Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-annotate.c, see those files for further
6 * copyright notes.
7 */
8
9#include <errno.h>
10#include <inttypes.h>
11#include <libgen.h>
12#include <stdlib.h>
13#include "util.h" // hex_width()
14#include "ui/ui.h"
15#include "sort.h"
16#include "build-id.h"
17#include "color.h"
18#include "config.h"
19#include "disasm.h"
20#include "dso.h"
21#include "env.h"
22#include "map.h"
23#include "maps.h"
24#include "symbol.h"
25#include "srcline.h"
26#include "units.h"
27#include "debug.h"
28#include "annotate.h"
29#include "annotate-data.h"
30#include "evsel.h"
31#include "evlist.h"
32#include "bpf-event.h"
33#include "bpf-utils.h"
34#include "block-range.h"
35#include "string2.h"
36#include "dwarf-regs.h"
37#include "util/event.h"
38#include "util/sharded_mutex.h"
39#include "arch/common.h"
40#include "namespaces.h"
41#include "thread.h"
42#include "hashmap.h"
43#include <regex.h>
44#include <linux/bitops.h>
45#include <linux/kernel.h>
46#include <linux/string.h>
47#include <linux/zalloc.h>
48#include <subcmd/parse-options.h>
49#include <subcmd/run-command.h>
50
51/* FIXME: For the HE_COLORSET */
52#include "ui/browser.h"
53
54/*
55 * FIXME: Using the same values as slang.h,
56 * but that header may not be available everywhere
57 */
58#define LARROW_CHAR ((unsigned char)',')
59#define RARROW_CHAR ((unsigned char)'+')
60#define DARROW_CHAR ((unsigned char)'.')
61#define UARROW_CHAR ((unsigned char)'-')
62
63#include <linux/ctype.h>
64
65/* global annotation options */
66struct annotation_options annotate_opts;
67
68/* Data type collection debug statistics */
69struct annotated_data_stat ann_data_stat;
70LIST_HEAD(ann_insn_stat);
71
72/* Pseudo data types */
73struct annotated_data_type stackop_type = {
74 .self = {
75 .type_name = (char *)"(stack operation)",
76 .children = LIST_HEAD_INIT(stackop_type.self.children),
77 },
78};
79
80struct annotated_data_type canary_type = {
81 .self = {
82 .type_name = (char *)"(stack canary)",
83 .children = LIST_HEAD_INIT(canary_type.self.children),
84 },
85};
86
87/* symbol histogram: key = offset << 16 | evsel->core.idx */
88static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
89{
90 return (key >> 16) + (key & 0xffff);
91}
92
93static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
94{
95 return key1 == key2;
96}
97
98static struct annotated_source *annotated_source__new(void)
99{
100 struct annotated_source *src = zalloc(sizeof(*src));
101
102 if (src != NULL)
103 INIT_LIST_HEAD(&src->source);
104
105 return src;
106}
107
108static __maybe_unused void annotated_source__delete(struct annotated_source *src)
109{
110 struct hashmap_entry *cur;
111 size_t bkt;
112
113 if (src == NULL)
114 return;
115
116 if (src->samples) {
117 hashmap__for_each_entry(src->samples, cur, bkt)
118 zfree(&cur->pvalue);
119 hashmap__free(src->samples);
120 }
121 zfree(&src->histograms);
122 free(src);
123}
124
125static int annotated_source__alloc_histograms(struct annotated_source *src,
126 int nr_hists)
127{
128 src->nr_histograms = nr_hists;
129 src->histograms = calloc(nr_hists, sizeof(*src->histograms));
130
131 if (src->histograms == NULL)
132 return -1;
133
134 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
135 if (src->samples == NULL)
136 zfree(&src->histograms);
137
138 return src->histograms ? 0 : -1;
139}
140
141void symbol__annotate_zero_histograms(struct symbol *sym)
142{
143 struct annotation *notes = symbol__annotation(sym);
144
145 annotation__lock(notes);
146 if (notes->src != NULL) {
147 memset(notes->src->histograms, 0,
148 notes->src->nr_histograms * sizeof(*notes->src->histograms));
149 hashmap__clear(notes->src->samples);
150 }
151 if (notes->branch && notes->branch->cycles_hist) {
152 memset(notes->branch->cycles_hist, 0,
153 symbol__size(sym) * sizeof(struct cyc_hist));
154 }
155 annotation__unlock(notes);
156}
157
158static int __symbol__account_cycles(struct cyc_hist *ch,
159 u64 start,
160 unsigned offset, unsigned cycles,
161 unsigned have_start)
162{
163 /*
164 * For now we can only account one basic block per
165 * final jump. But multiple could be overlapping.
166 * Always account the longest one. So when
167 * a shorter one has been already seen throw it away.
168 *
169 * We separately always account the full cycles.
170 */
171 ch[offset].num_aggr++;
172 ch[offset].cycles_aggr += cycles;
173
174 if (cycles > ch[offset].cycles_max)
175 ch[offset].cycles_max = cycles;
176
177 if (ch[offset].cycles_min) {
178 if (cycles && cycles < ch[offset].cycles_min)
179 ch[offset].cycles_min = cycles;
180 } else
181 ch[offset].cycles_min = cycles;
182
183 if (!have_start && ch[offset].have_start)
184 return 0;
185 if (ch[offset].num) {
186 if (have_start && (!ch[offset].have_start ||
187 ch[offset].start > start)) {
188 ch[offset].have_start = 0;
189 ch[offset].cycles = 0;
190 ch[offset].num = 0;
191 if (ch[offset].reset < 0xffff)
192 ch[offset].reset++;
193 } else if (have_start &&
194 ch[offset].start < start)
195 return 0;
196 }
197
198 if (ch[offset].num < NUM_SPARKS)
199 ch[offset].cycles_spark[ch[offset].num] = cycles;
200
201 ch[offset].have_start = have_start;
202 ch[offset].start = start;
203 ch[offset].cycles += cycles;
204 ch[offset].num++;
205 return 0;
206}
207
208static int __symbol__inc_addr_samples(struct map_symbol *ms,
209 struct annotated_source *src, int evidx, u64 addr,
210 struct perf_sample *sample)
211{
212 struct symbol *sym = ms->sym;
213 long hash_key;
214 u64 offset;
215 struct sym_hist *h;
216 struct sym_hist_entry *entry;
217
218 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
219
220 if ((addr < sym->start || addr >= sym->end) &&
221 (addr != sym->end || sym->start != sym->end)) {
222 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
223 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
224 return -ERANGE;
225 }
226
227 offset = addr - sym->start;
228 h = annotated_source__histogram(src, evidx);
229 if (h == NULL) {
230 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
231 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
232 return -ENOMEM;
233 }
234
235 hash_key = offset << 16 | evidx;
236 if (!hashmap__find(src->samples, hash_key, &entry)) {
237 entry = zalloc(sizeof(*entry));
238 if (entry == NULL)
239 return -ENOMEM;
240
241 if (hashmap__add(src->samples, hash_key, entry) < 0)
242 return -ENOMEM;
243 }
244
245 h->nr_samples++;
246 h->period += sample->period;
247 entry->nr_samples++;
248 entry->period += sample->period;
249
250 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
251 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
252 sym->start, sym->name, addr, addr - sym->start, evidx,
253 entry->nr_samples, entry->period);
254 return 0;
255}
256
257struct annotated_branch *annotation__get_branch(struct annotation *notes)
258{
259 if (notes == NULL)
260 return NULL;
261
262 if (notes->branch == NULL)
263 notes->branch = zalloc(sizeof(*notes->branch));
264
265 return notes->branch;
266}
267
268static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
269{
270 struct annotation *notes = symbol__annotation(sym);
271 struct annotated_branch *branch;
272
273 branch = annotation__get_branch(notes);
274 if (branch == NULL)
275 return NULL;
276
277 if (branch->cycles_hist == NULL) {
278 const size_t size = symbol__size(sym);
279
280 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
281 }
282
283 return branch->cycles_hist;
284}
285
286struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
287{
288 struct annotation *notes = symbol__annotation(sym);
289
290 if (notes->src == NULL) {
291 notes->src = annotated_source__new();
292 if (notes->src == NULL)
293 return NULL;
294 goto alloc_histograms;
295 }
296
297 if (notes->src->histograms == NULL) {
298alloc_histograms:
299 annotated_source__alloc_histograms(notes->src, nr_hists);
300 }
301
302 return notes->src;
303}
304
305static int symbol__inc_addr_samples(struct map_symbol *ms,
306 struct evsel *evsel, u64 addr,
307 struct perf_sample *sample)
308{
309 struct symbol *sym = ms->sym;
310 struct annotated_source *src;
311
312 if (sym == NULL)
313 return 0;
314 src = symbol__hists(sym, evsel->evlist->core.nr_entries);
315 return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
316}
317
318static int symbol__account_cycles(u64 addr, u64 start,
319 struct symbol *sym, unsigned cycles)
320{
321 struct cyc_hist *cycles_hist;
322 unsigned offset;
323
324 if (sym == NULL)
325 return 0;
326 cycles_hist = symbol__cycles_hist(sym);
327 if (cycles_hist == NULL)
328 return -ENOMEM;
329 if (addr < sym->start || addr >= sym->end)
330 return -ERANGE;
331
332 if (start) {
333 if (start < sym->start || start >= sym->end)
334 return -ERANGE;
335 if (start >= addr)
336 start = 0;
337 }
338 offset = addr - sym->start;
339 return __symbol__account_cycles(cycles_hist,
340 start ? start - sym->start : 0,
341 offset, cycles,
342 !!start);
343}
344
345int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
346 struct addr_map_symbol *start,
347 unsigned cycles)
348{
349 u64 saddr = 0;
350 int err;
351
352 if (!cycles)
353 return 0;
354
355 /*
356 * Only set start when IPC can be computed. We can only
357 * compute it when the basic block is completely in a single
358 * function.
359 * Special case the case when the jump is elsewhere, but
360 * it starts on the function start.
361 */
362 if (start &&
363 (start->ms.sym == ams->ms.sym ||
364 (ams->ms.sym &&
365 start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
366 saddr = start->al_addr;
367 if (saddr == 0)
368 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
369 ams->addr,
370 start ? start->addr : 0,
371 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
372 saddr);
373 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles);
374 if (err)
375 pr_debug2("account_cycles failed %d\n", err);
376 return err;
377}
378
379struct annotation_line *annotated_source__get_line(struct annotated_source *src,
380 s64 offset)
381{
382 struct annotation_line *al;
383
384 list_for_each_entry(al, &src->source, node) {
385 if (al->offset == offset)
386 return al;
387 }
388 return NULL;
389}
390
391static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
392{
393 struct annotation_line *al;
394 unsigned n_insn = 0;
395
396 al = annotated_source__get_line(notes->src, start);
397 if (al == NULL)
398 return 0;
399
400 list_for_each_entry_from(al, ¬es->src->source, node) {
401 if (al->offset == -1)
402 continue;
403 if ((u64)al->offset > end)
404 break;
405 n_insn++;
406 }
407 return n_insn;
408}
409
410static void annotated_branch__delete(struct annotated_branch *branch)
411{
412 if (branch) {
413 zfree(&branch->cycles_hist);
414 free(branch);
415 }
416}
417
418static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
419{
420 unsigned n_insn;
421 unsigned int cover_insn = 0;
422
423 n_insn = annotation__count_insn(notes, start, end);
424 if (n_insn && ch->num && ch->cycles) {
425 struct annotation_line *al;
426 struct annotated_branch *branch;
427 float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
428
429 /* Hide data when there are too many overlaps. */
430 if (ch->reset >= 0x7fff)
431 return;
432
433 al = annotated_source__get_line(notes->src, start);
434 if (al == NULL)
435 return;
436
437 list_for_each_entry_from(al, ¬es->src->source, node) {
438 if (al->offset == -1)
439 continue;
440 if ((u64)al->offset > end)
441 break;
442 if (al->cycles && al->cycles->ipc == 0.0) {
443 al->cycles->ipc = ipc;
444 cover_insn++;
445 }
446 }
447
448 branch = annotation__get_branch(notes);
449 if (cover_insn && branch) {
450 branch->hit_cycles += ch->cycles;
451 branch->hit_insn += n_insn * ch->num;
452 branch->cover_insn += cover_insn;
453 }
454 }
455}
456
457static int annotation__compute_ipc(struct annotation *notes, size_t size)
458{
459 int err = 0;
460 s64 offset;
461
462 if (!notes->branch || !notes->branch->cycles_hist)
463 return 0;
464
465 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
466 notes->branch->hit_cycles = 0;
467 notes->branch->hit_insn = 0;
468 notes->branch->cover_insn = 0;
469
470 annotation__lock(notes);
471 for (offset = size - 1; offset >= 0; --offset) {
472 struct cyc_hist *ch;
473
474 ch = ¬es->branch->cycles_hist[offset];
475 if (ch && ch->cycles) {
476 struct annotation_line *al;
477
478 al = annotated_source__get_line(notes->src, offset);
479 if (al && al->cycles == NULL) {
480 al->cycles = zalloc(sizeof(*al->cycles));
481 if (al->cycles == NULL) {
482 err = ENOMEM;
483 break;
484 }
485 }
486 if (ch->have_start)
487 annotation__count_and_fill(notes, ch->start, offset, ch);
488 if (al && ch->num_aggr) {
489 al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
490 al->cycles->max = ch->cycles_max;
491 al->cycles->min = ch->cycles_min;
492 }
493 }
494 }
495
496 if (err) {
497 while (++offset < (s64)size) {
498 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset];
499
500 if (ch && ch->cycles) {
501 struct annotation_line *al;
502
503 al = annotated_source__get_line(notes->src, offset);
504 if (al)
505 zfree(&al->cycles);
506 }
507 }
508 }
509
510 annotation__unlock(notes);
511 return 0;
512}
513
514int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
515 struct evsel *evsel)
516{
517 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
518}
519
520int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
521 struct evsel *evsel, u64 ip)
522{
523 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
524}
525
526
527void annotation__exit(struct annotation *notes)
528{
529 annotated_source__delete(notes->src);
530 annotated_branch__delete(notes->branch);
531}
532
533static struct sharded_mutex *sharded_mutex;
534
535static void annotation__init_sharded_mutex(void)
536{
537 /* As many mutexes as there are CPUs. */
538 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
539}
540
541static size_t annotation__hash(const struct annotation *notes)
542{
543 return (size_t)notes;
544}
545
546static struct mutex *annotation__get_mutex(const struct annotation *notes)
547{
548 static pthread_once_t once = PTHREAD_ONCE_INIT;
549
550 pthread_once(&once, annotation__init_sharded_mutex);
551 if (!sharded_mutex)
552 return NULL;
553
554 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
555}
556
557void annotation__lock(struct annotation *notes)
558 NO_THREAD_SAFETY_ANALYSIS
559{
560 struct mutex *mutex = annotation__get_mutex(notes);
561
562 if (mutex)
563 mutex_lock(mutex);
564}
565
566void annotation__unlock(struct annotation *notes)
567 NO_THREAD_SAFETY_ANALYSIS
568{
569 struct mutex *mutex = annotation__get_mutex(notes);
570
571 if (mutex)
572 mutex_unlock(mutex);
573}
574
575bool annotation__trylock(struct annotation *notes)
576{
577 struct mutex *mutex = annotation__get_mutex(notes);
578
579 if (!mutex)
580 return false;
581
582 return mutex_trylock(mutex);
583}
584
585void annotation_line__add(struct annotation_line *al, struct list_head *head)
586{
587 list_add_tail(&al->node, head);
588}
589
590struct annotation_line *
591annotation_line__next(struct annotation_line *pos, struct list_head *head)
592{
593 list_for_each_entry_continue(pos, head, node)
594 if (pos->offset >= 0)
595 return pos;
596
597 return NULL;
598}
599
600static const char *annotate__address_color(struct block_range *br)
601{
602 double cov = block_range__coverage(br);
603
604 if (cov >= 0) {
605 /* mark red for >75% coverage */
606 if (cov > 0.75)
607 return PERF_COLOR_RED;
608
609 /* mark dull for <1% coverage */
610 if (cov < 0.01)
611 return PERF_COLOR_NORMAL;
612 }
613
614 return PERF_COLOR_MAGENTA;
615}
616
617static const char *annotate__asm_color(struct block_range *br)
618{
619 double cov = block_range__coverage(br);
620
621 if (cov >= 0) {
622 /* mark dull for <1% coverage */
623 if (cov < 0.01)
624 return PERF_COLOR_NORMAL;
625 }
626
627 return PERF_COLOR_BLUE;
628}
629
630static void annotate__branch_printf(struct block_range *br, u64 addr)
631{
632 bool emit_comment = true;
633
634 if (!br)
635 return;
636
637#if 1
638 if (br->is_target && br->start == addr) {
639 struct block_range *branch = br;
640 double p;
641
642 /*
643 * Find matching branch to our target.
644 */
645 while (!branch->is_branch)
646 branch = block_range__next(branch);
647
648 p = 100 *(double)br->entry / branch->coverage;
649
650 if (p > 0.1) {
651 if (emit_comment) {
652 emit_comment = false;
653 printf("\t#");
654 }
655
656 /*
657 * The percentage of coverage joined at this target in relation
658 * to the next branch.
659 */
660 printf(" +%.2f%%", p);
661 }
662 }
663#endif
664 if (br->is_branch && br->end == addr) {
665 double p = 100*(double)br->taken / br->coverage;
666
667 if (p > 0.1) {
668 if (emit_comment) {
669 emit_comment = false;
670 printf("\t#");
671 }
672
673 /*
674 * The percentage of coverage leaving at this branch, and
675 * its prediction ratio.
676 */
677 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
678 }
679 }
680}
681
682static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
683{
684 s64 offset = dl->al.offset;
685 const u64 addr = start + offset;
686 struct block_range *br;
687
688 br = block_range__find(addr);
689 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
690 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
691 annotate__branch_printf(br, addr);
692 return 0;
693}
694
695static int
696annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
697 struct evsel *evsel, u64 len, int min_pcnt, int printed,
698 int max_lines, struct annotation_line *queue, int addr_fmt_width,
699 int percent_type)
700{
701 struct disasm_line *dl = container_of(al, struct disasm_line, al);
702 static const char *prev_line;
703
704 if (al->offset != -1) {
705 double max_percent = 0.0;
706 int i, nr_percent = 1;
707 const char *color;
708 struct annotation *notes = symbol__annotation(sym);
709
710 for (i = 0; i < al->data_nr; i++) {
711 double percent;
712
713 percent = annotation_data__percent(&al->data[i],
714 percent_type);
715
716 if (percent > max_percent)
717 max_percent = percent;
718 }
719
720 if (al->data_nr > nr_percent)
721 nr_percent = al->data_nr;
722
723 if (max_percent < min_pcnt)
724 return -1;
725
726 if (max_lines && printed >= max_lines)
727 return 1;
728
729 if (queue != NULL) {
730 list_for_each_entry_from(queue, ¬es->src->source, node) {
731 if (queue == al)
732 break;
733 annotation_line__print(queue, sym, start, evsel, len,
734 0, 0, 1, NULL, addr_fmt_width,
735 percent_type);
736 }
737 }
738
739 color = get_percent_color(max_percent);
740
741 for (i = 0; i < nr_percent; i++) {
742 struct annotation_data *data = &al->data[i];
743 double percent;
744
745 percent = annotation_data__percent(data, percent_type);
746 color = get_percent_color(percent);
747
748 if (symbol_conf.show_total_period)
749 color_fprintf(stdout, color, " %11" PRIu64,
750 data->he.period);
751 else if (symbol_conf.show_nr_samples)
752 color_fprintf(stdout, color, " %7" PRIu64,
753 data->he.nr_samples);
754 else
755 color_fprintf(stdout, color, " %7.2f", percent);
756 }
757
758 printf(" : ");
759
760 disasm_line__print(dl, start, addr_fmt_width);
761
762 /*
763 * Also color the filename and line if needed, with
764 * the same color than the percentage. Don't print it
765 * twice for close colored addr with the same filename:line
766 */
767 if (al->path) {
768 if (!prev_line || strcmp(prev_line, al->path)) {
769 color_fprintf(stdout, color, " // %s", al->path);
770 prev_line = al->path;
771 }
772 }
773
774 printf("\n");
775 } else if (max_lines && printed >= max_lines)
776 return 1;
777 else {
778 int width = symbol_conf.show_total_period ? 12 : 8;
779
780 if (queue)
781 return -1;
782
783 if (evsel__is_group_event(evsel))
784 width *= evsel->core.nr_members;
785
786 if (!*al->line)
787 printf(" %*s:\n", width, " ");
788 else
789 printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
790 }
791
792 return 0;
793}
794
795static void calc_percent(struct annotation *notes,
796 struct evsel *evsel,
797 struct annotation_data *data,
798 s64 offset, s64 end)
799{
800 struct hists *hists = evsel__hists(evsel);
801 int evidx = evsel->core.idx;
802 struct sym_hist *sym_hist = annotation__histogram(notes, evidx);
803 unsigned int hits = 0;
804 u64 period = 0;
805
806 while (offset < end) {
807 struct sym_hist_entry *entry;
808
809 entry = annotated_source__hist_entry(notes->src, evidx, offset);
810 if (entry) {
811 hits += entry->nr_samples;
812 period += entry->period;
813 }
814 ++offset;
815 }
816
817 if (sym_hist->nr_samples) {
818 data->he.period = period;
819 data->he.nr_samples = hits;
820 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
821 }
822
823 if (hists->stats.nr_non_filtered_samples)
824 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
825
826 if (sym_hist->period)
827 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
828
829 if (hists->stats.total_period)
830 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
831}
832
833static void annotation__calc_percent(struct annotation *notes,
834 struct evsel *leader, s64 len)
835{
836 struct annotation_line *al, *next;
837 struct evsel *evsel;
838
839 list_for_each_entry(al, ¬es->src->source, node) {
840 s64 end;
841 int i = 0;
842
843 if (al->offset == -1)
844 continue;
845
846 next = annotation_line__next(al, ¬es->src->source);
847 end = next ? next->offset : len;
848
849 for_each_group_evsel(evsel, leader) {
850 struct annotation_data *data;
851
852 BUG_ON(i >= al->data_nr);
853
854 data = &al->data[i++];
855
856 calc_percent(notes, evsel, data, al->offset, end);
857 }
858 }
859}
860
861void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
862{
863 struct annotation *notes = symbol__annotation(sym);
864
865 annotation__calc_percent(notes, evsel, symbol__size(sym));
866}
867
868static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
869{
870 struct perf_env *env = evsel__env(evsel);
871 const char *arch_name = perf_env__arch(env);
872 struct arch *arch;
873 int err;
874
875 if (!arch_name) {
876 *parch = NULL;
877 return errno;
878 }
879
880 *parch = arch = arch__find(arch_name);
881 if (arch == NULL) {
882 pr_err("%s: unsupported arch %s\n", __func__, arch_name);
883 return ENOTSUP;
884 }
885
886 if (arch->init) {
887 err = arch->init(arch, env ? env->cpuid : NULL);
888 if (err) {
889 pr_err("%s: failed to initialize %s arch priv area\n",
890 __func__, arch->name);
891 return err;
892 }
893 }
894 return 0;
895}
896
897int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
898 struct arch **parch)
899{
900 struct symbol *sym = ms->sym;
901 struct annotation *notes = symbol__annotation(sym);
902 struct annotate_args args = {
903 .evsel = evsel,
904 .options = &annotate_opts,
905 };
906 struct arch *arch = NULL;
907 int err;
908
909 err = evsel__get_arch(evsel, &arch);
910 if (err < 0)
911 return err;
912
913 if (parch)
914 *parch = arch;
915
916 if (notes->src && !list_empty(¬es->src->source))
917 return 0;
918
919 args.arch = arch;
920 args.ms = *ms;
921
922 if (notes->src == NULL) {
923 notes->src = annotated_source__new();
924 if (notes->src == NULL)
925 return -1;
926 }
927
928 if (annotate_opts.full_addr)
929 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
930 else
931 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
932
933 return symbol__disassemble(sym, &args);
934}
935
936static void insert_source_line(struct rb_root *root, struct annotation_line *al)
937{
938 struct annotation_line *iter;
939 struct rb_node **p = &root->rb_node;
940 struct rb_node *parent = NULL;
941 unsigned int percent_type = annotate_opts.percent_type;
942 int i, ret;
943
944 while (*p != NULL) {
945 parent = *p;
946 iter = rb_entry(parent, struct annotation_line, rb_node);
947
948 ret = strcmp(iter->path, al->path);
949 if (ret == 0) {
950 for (i = 0; i < al->data_nr; i++) {
951 iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
952 percent_type);
953 }
954 return;
955 }
956
957 if (ret < 0)
958 p = &(*p)->rb_left;
959 else
960 p = &(*p)->rb_right;
961 }
962
963 for (i = 0; i < al->data_nr; i++) {
964 al->data[i].percent_sum = annotation_data__percent(&al->data[i],
965 percent_type);
966 }
967
968 rb_link_node(&al->rb_node, parent, p);
969 rb_insert_color(&al->rb_node, root);
970}
971
972static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
973{
974 int i;
975
976 for (i = 0; i < a->data_nr; i++) {
977 if (a->data[i].percent_sum == b->data[i].percent_sum)
978 continue;
979 return a->data[i].percent_sum > b->data[i].percent_sum;
980 }
981
982 return 0;
983}
984
985static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
986{
987 struct annotation_line *iter;
988 struct rb_node **p = &root->rb_node;
989 struct rb_node *parent = NULL;
990
991 while (*p != NULL) {
992 parent = *p;
993 iter = rb_entry(parent, struct annotation_line, rb_node);
994
995 if (cmp_source_line(al, iter))
996 p = &(*p)->rb_left;
997 else
998 p = &(*p)->rb_right;
999 }
1000
1001 rb_link_node(&al->rb_node, parent, p);
1002 rb_insert_color(&al->rb_node, root);
1003}
1004
1005static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1006{
1007 struct annotation_line *al;
1008 struct rb_node *node;
1009
1010 node = rb_first(src_root);
1011 while (node) {
1012 struct rb_node *next;
1013
1014 al = rb_entry(node, struct annotation_line, rb_node);
1015 next = rb_next(node);
1016 rb_erase(node, src_root);
1017
1018 __resort_source_line(dest_root, al);
1019 node = next;
1020 }
1021}
1022
1023static void print_summary(struct rb_root *root, const char *filename)
1024{
1025 struct annotation_line *al;
1026 struct rb_node *node;
1027
1028 printf("\nSorted summary for file %s\n", filename);
1029 printf("----------------------------------------------\n\n");
1030
1031 if (RB_EMPTY_ROOT(root)) {
1032 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1033 return;
1034 }
1035
1036 node = rb_first(root);
1037 while (node) {
1038 double percent, percent_max = 0.0;
1039 const char *color;
1040 char *path;
1041 int i;
1042
1043 al = rb_entry(node, struct annotation_line, rb_node);
1044 for (i = 0; i < al->data_nr; i++) {
1045 percent = al->data[i].percent_sum;
1046 color = get_percent_color(percent);
1047 color_fprintf(stdout, color, " %7.2f", percent);
1048
1049 if (percent > percent_max)
1050 percent_max = percent;
1051 }
1052
1053 path = al->path;
1054 color = get_percent_color(percent_max);
1055 color_fprintf(stdout, color, " %s\n", path);
1056
1057 node = rb_next(node);
1058 }
1059}
1060
1061static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
1062{
1063 int evidx = evsel->core.idx;
1064 struct annotation *notes = symbol__annotation(sym);
1065 struct sym_hist *h = annotation__histogram(notes, evidx);
1066 u64 len = symbol__size(sym), offset;
1067
1068 for (offset = 0; offset < len; ++offset) {
1069 struct sym_hist_entry *entry;
1070
1071 entry = annotated_source__hist_entry(notes->src, evidx, offset);
1072 if (entry && entry->nr_samples != 0)
1073 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1074 sym->start + offset, entry->nr_samples);
1075 }
1076 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1077}
1078
1079static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1080{
1081 char bf[32];
1082 struct annotation_line *line;
1083
1084 list_for_each_entry_reverse(line, lines, node) {
1085 if (line->offset != -1)
1086 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
1087 }
1088
1089 return 0;
1090}
1091
1092int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
1093{
1094 struct map *map = ms->map;
1095 struct symbol *sym = ms->sym;
1096 struct dso *dso = map__dso(map);
1097 char *filename;
1098 const char *d_filename;
1099 const char *evsel_name = evsel__name(evsel);
1100 struct annotation *notes = symbol__annotation(sym);
1101 struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
1102 struct annotation_line *pos, *queue = NULL;
1103 struct annotation_options *opts = &annotate_opts;
1104 u64 start = map__rip_2objdump(map, sym->start);
1105 int printed = 2, queue_len = 0, addr_fmt_width;
1106 int more = 0;
1107 bool context = opts->context;
1108 u64 len;
1109 int width = symbol_conf.show_total_period ? 12 : 8;
1110 int graph_dotted_len;
1111 char buf[512];
1112
1113 filename = strdup(dso__long_name(dso));
1114 if (!filename)
1115 return -ENOMEM;
1116
1117 if (opts->full_path)
1118 d_filename = filename;
1119 else
1120 d_filename = basename(filename);
1121
1122 len = symbol__size(sym);
1123
1124 if (evsel__is_group_event(evsel)) {
1125 width *= evsel->core.nr_members;
1126 evsel__group_desc(evsel, buf, sizeof(buf));
1127 evsel_name = buf;
1128 }
1129
1130 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
1131 "percent: %s)\n",
1132 width, width, symbol_conf.show_total_period ? "Period" :
1133 symbol_conf.show_nr_samples ? "Samples" : "Percent",
1134 d_filename, evsel_name, h->nr_samples,
1135 percent_type_str(opts->percent_type));
1136
1137 printf("%-*.*s----\n",
1138 graph_dotted_len, graph_dotted_len, graph_dotted_line);
1139
1140 if (verbose > 0)
1141 symbol__annotate_hits(sym, evsel);
1142
1143 addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start);
1144
1145 list_for_each_entry(pos, ¬es->src->source, node) {
1146 int err;
1147
1148 if (context && queue == NULL) {
1149 queue = pos;
1150 queue_len = 0;
1151 }
1152
1153 err = annotation_line__print(pos, sym, start, evsel, len,
1154 opts->min_pcnt, printed, opts->max_lines,
1155 queue, addr_fmt_width, opts->percent_type);
1156
1157 switch (err) {
1158 case 0:
1159 ++printed;
1160 if (context) {
1161 printed += queue_len;
1162 queue = NULL;
1163 queue_len = 0;
1164 }
1165 break;
1166 case 1:
1167 /* filtered by max_lines */
1168 ++more;
1169 break;
1170 case -1:
1171 default:
1172 /*
1173 * Filtered by min_pcnt or non IP lines when
1174 * context != 0
1175 */
1176 if (!context)
1177 break;
1178 if (queue_len == context)
1179 queue = list_entry(queue->node.next, typeof(*queue), node);
1180 else
1181 ++queue_len;
1182 break;
1183 }
1184 }
1185
1186 free(filename);
1187
1188 return more;
1189}
1190
1191static void FILE__set_percent_color(void *fp __maybe_unused,
1192 double percent __maybe_unused,
1193 bool current __maybe_unused)
1194{
1195}
1196
1197static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
1198 int nr __maybe_unused, bool current __maybe_unused)
1199{
1200 return 0;
1201}
1202
1203static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
1204{
1205 return 0;
1206}
1207
1208static void FILE__printf(void *fp, const char *fmt, ...)
1209{
1210 va_list args;
1211
1212 va_start(args, fmt);
1213 vfprintf(fp, fmt, args);
1214 va_end(args);
1215}
1216
1217static void FILE__write_graph(void *fp, int graph)
1218{
1219 const char *s;
1220 switch (graph) {
1221
1222 case DARROW_CHAR: s = "↓"; break;
1223 case UARROW_CHAR: s = "↑"; break;
1224 case LARROW_CHAR: s = "←"; break;
1225 case RARROW_CHAR: s = "→"; break;
1226 default: s = "?"; break;
1227 }
1228
1229 fputs(s, fp);
1230}
1231
1232static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
1233{
1234 struct annotation *notes = symbol__annotation(sym);
1235 struct annotation_write_ops wops = {
1236 .first_line = true,
1237 .obj = fp,
1238 .set_color = FILE__set_color,
1239 .set_percent_color = FILE__set_percent_color,
1240 .set_jumps_percent_color = FILE__set_jumps_percent_color,
1241 .printf = FILE__printf,
1242 .write_graph = FILE__write_graph,
1243 };
1244 struct annotation_line *al;
1245
1246 list_for_each_entry(al, ¬es->src->source, node) {
1247 if (annotation_line__filter(al))
1248 continue;
1249 annotation_line__write(al, notes, &wops);
1250 fputc('\n', fp);
1251 wops.first_line = false;
1252 }
1253
1254 return 0;
1255}
1256
1257int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
1258{
1259 const char *ev_name = evsel__name(evsel);
1260 char buf[1024];
1261 char *filename;
1262 int err = -1;
1263 FILE *fp;
1264
1265 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
1266 return -1;
1267
1268 fp = fopen(filename, "w");
1269 if (fp == NULL)
1270 goto out_free_filename;
1271
1272 if (evsel__is_group_event(evsel)) {
1273 evsel__group_desc(evsel, buf, sizeof(buf));
1274 ev_name = buf;
1275 }
1276
1277 fprintf(fp, "%s() %s\nEvent: %s\n\n",
1278 ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
1279 symbol__annotate_fprintf2(ms->sym, fp);
1280
1281 fclose(fp);
1282 err = 0;
1283out_free_filename:
1284 free(filename);
1285 return err;
1286}
1287
1288void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1289{
1290 struct annotation *notes = symbol__annotation(sym);
1291 struct sym_hist *h = annotation__histogram(notes, evidx);
1292
1293 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
1294}
1295
1296void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1297{
1298 struct annotation *notes = symbol__annotation(sym);
1299 struct sym_hist *h = annotation__histogram(notes, evidx);
1300 struct annotation_line *al;
1301
1302 h->nr_samples = 0;
1303 list_for_each_entry(al, ¬es->src->source, node) {
1304 struct sym_hist_entry *entry;
1305
1306 if (al->offset == -1)
1307 continue;
1308
1309 entry = annotated_source__hist_entry(notes->src, evidx, al->offset);
1310 if (entry == NULL)
1311 continue;
1312
1313 entry->nr_samples = entry->nr_samples * 7 / 8;
1314 h->nr_samples += entry->nr_samples;
1315 }
1316}
1317
1318void annotated_source__purge(struct annotated_source *as)
1319{
1320 struct annotation_line *al, *n;
1321
1322 list_for_each_entry_safe(al, n, &as->source, node) {
1323 list_del_init(&al->node);
1324 disasm_line__free(disasm_line(al));
1325 }
1326}
1327
1328static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1329{
1330 size_t printed;
1331
1332 if (dl->al.offset == -1)
1333 return fprintf(fp, "%s\n", dl->al.line);
1334
1335 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
1336
1337 if (dl->ops.raw[0] != '\0') {
1338 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1339 dl->ops.raw);
1340 }
1341
1342 return printed + fprintf(fp, "\n");
1343}
1344
1345size_t disasm__fprintf(struct list_head *head, FILE *fp)
1346{
1347 struct disasm_line *pos;
1348 size_t printed = 0;
1349
1350 list_for_each_entry(pos, head, al.node)
1351 printed += disasm_line__fprintf(pos, fp);
1352
1353 return printed;
1354}
1355
1356bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
1357{
1358 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
1359 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
1360 dl->ops.target.offset >= (s64)symbol__size(sym))
1361 return false;
1362
1363 return true;
1364}
1365
1366static void
1367annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
1368{
1369 struct annotation_line *al;
1370
1371 /* PLT symbols contain external offsets */
1372 if (strstr(sym->name, "@plt"))
1373 return;
1374
1375 list_for_each_entry(al, ¬es->src->source, node) {
1376 struct disasm_line *dl;
1377 struct annotation_line *target;
1378
1379 dl = disasm_line(al);
1380
1381 if (!disasm_line__is_valid_local_jump(dl, sym))
1382 continue;
1383
1384 target = annotated_source__get_line(notes->src,
1385 dl->ops.target.offset);
1386 /*
1387 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
1388 * have to adjust to the previous offset?
1389 */
1390 if (target == NULL)
1391 continue;
1392
1393 if (++target->jump_sources > notes->src->max_jump_sources)
1394 notes->src->max_jump_sources = target->jump_sources;
1395 }
1396}
1397
1398static void annotation__set_index(struct annotation *notes)
1399{
1400 struct annotation_line *al;
1401 struct annotated_source *src = notes->src;
1402
1403 src->widths.max_line_len = 0;
1404 src->nr_entries = 0;
1405 src->nr_asm_entries = 0;
1406
1407 list_for_each_entry(al, &src->source, node) {
1408 size_t line_len = strlen(al->line);
1409
1410 if (src->widths.max_line_len < line_len)
1411 src->widths.max_line_len = line_len;
1412 al->idx = src->nr_entries++;
1413 if (al->offset != -1)
1414 al->idx_asm = src->nr_asm_entries++;
1415 else
1416 al->idx_asm = -1;
1417 }
1418}
1419
1420static inline int width_jumps(int n)
1421{
1422 if (n >= 100)
1423 return 5;
1424 if (n / 10)
1425 return 2;
1426 return 1;
1427}
1428
1429static int annotation__max_ins_name(struct annotation *notes)
1430{
1431 int max_name = 0, len;
1432 struct annotation_line *al;
1433
1434 list_for_each_entry(al, ¬es->src->source, node) {
1435 if (al->offset == -1)
1436 continue;
1437
1438 len = strlen(disasm_line(al)->ins.name);
1439 if (max_name < len)
1440 max_name = len;
1441 }
1442
1443 return max_name;
1444}
1445
1446static void
1447annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
1448{
1449 notes->src->widths.addr = notes->src->widths.target =
1450 notes->src->widths.min_addr = hex_width(symbol__size(sym));
1451 notes->src->widths.max_addr = hex_width(sym->end);
1452 notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
1453 notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
1454}
1455
1456void annotation__update_column_widths(struct annotation *notes)
1457{
1458 if (annotate_opts.use_offset)
1459 notes->src->widths.target = notes->src->widths.min_addr;
1460 else if (annotate_opts.full_addr)
1461 notes->src->widths.target = BITS_PER_LONG / 4;
1462 else
1463 notes->src->widths.target = notes->src->widths.max_addr;
1464
1465 notes->src->widths.addr = notes->src->widths.target;
1466
1467 if (annotate_opts.show_nr_jumps)
1468 notes->src->widths.addr += notes->src->widths.jumps + 1;
1469}
1470
1471void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
1472{
1473 annotate_opts.full_addr = !annotate_opts.full_addr;
1474
1475 if (annotate_opts.full_addr)
1476 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1477 else
1478 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1479
1480 annotation__update_column_widths(notes);
1481}
1482
1483static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
1484 struct rb_root *root)
1485{
1486 struct annotation_line *al;
1487 struct rb_root tmp_root = RB_ROOT;
1488
1489 list_for_each_entry(al, ¬es->src->source, node) {
1490 double percent_max = 0.0;
1491 u64 addr;
1492 int i;
1493
1494 for (i = 0; i < al->data_nr; i++) {
1495 double percent;
1496
1497 percent = annotation_data__percent(&al->data[i],
1498 annotate_opts.percent_type);
1499
1500 if (percent > percent_max)
1501 percent_max = percent;
1502 }
1503
1504 if (percent_max <= 0.5)
1505 continue;
1506
1507 addr = map__rip_2objdump(ms->map, ms->sym->start);
1508 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
1509 false, true, ms->sym->start + al->offset);
1510 insert_source_line(&tmp_root, al);
1511 }
1512
1513 resort_source_line(root, &tmp_root);
1514}
1515
1516static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
1517{
1518 struct annotation *notes = symbol__annotation(ms->sym);
1519
1520 annotation__calc_lines(notes, ms, root);
1521}
1522
1523int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
1524{
1525 struct dso *dso = map__dso(ms->map);
1526 struct symbol *sym = ms->sym;
1527 struct rb_root source_line = RB_ROOT;
1528 struct hists *hists = evsel__hists(evsel);
1529 char buf[1024];
1530 int err;
1531
1532 err = symbol__annotate2(ms, evsel, NULL);
1533 if (err) {
1534 char msg[BUFSIZ];
1535
1536 dso__set_annotate_warned(dso);
1537 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1538 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1539 return -1;
1540 }
1541
1542 if (annotate_opts.print_lines) {
1543 srcline_full_filename = annotate_opts.full_path;
1544 symbol__calc_lines(ms, &source_line);
1545 print_summary(&source_line, dso__long_name(dso));
1546 }
1547
1548 hists__scnprintf_title(hists, buf, sizeof(buf));
1549 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
1550 buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
1551 symbol__annotate_fprintf2(sym, stdout);
1552
1553 annotated_source__purge(symbol__annotation(sym)->src);
1554
1555 return 0;
1556}
1557
1558int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
1559{
1560 struct dso *dso = map__dso(ms->map);
1561 struct symbol *sym = ms->sym;
1562 struct rb_root source_line = RB_ROOT;
1563 int err;
1564
1565 err = symbol__annotate(ms, evsel, NULL);
1566 if (err) {
1567 char msg[BUFSIZ];
1568
1569 dso__set_annotate_warned(dso);
1570 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1571 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1572 return -1;
1573 }
1574
1575 symbol__calc_percent(sym, evsel);
1576
1577 if (annotate_opts.print_lines) {
1578 srcline_full_filename = annotate_opts.full_path;
1579 symbol__calc_lines(ms, &source_line);
1580 print_summary(&source_line, dso__long_name(dso));
1581 }
1582
1583 symbol__annotate_printf(ms, evsel);
1584
1585 annotated_source__purge(symbol__annotation(sym)->src);
1586
1587 return 0;
1588}
1589
1590bool ui__has_annotation(void)
1591{
1592 return use_browser == 1 && perf_hpp_list.sym;
1593}
1594
1595
1596static double annotation_line__max_percent(struct annotation_line *al,
1597 struct annotation *notes,
1598 unsigned int percent_type)
1599{
1600 double percent_max = 0.0;
1601 int i;
1602
1603 for (i = 0; i < notes->src->nr_events; i++) {
1604 double percent;
1605
1606 percent = annotation_data__percent(&al->data[i],
1607 percent_type);
1608
1609 if (percent > percent_max)
1610 percent_max = percent;
1611 }
1612
1613 return percent_max;
1614}
1615
1616static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
1617 void *obj, char *bf, size_t size,
1618 void (*obj__printf)(void *obj, const char *fmt, ...),
1619 void (*obj__write_graph)(void *obj, int graph))
1620{
1621 if (dl->ins.ops && dl->ins.ops->scnprintf) {
1622 if (ins__is_jump(&dl->ins)) {
1623 bool fwd;
1624
1625 if (dl->ops.target.outside)
1626 goto call_like;
1627 fwd = dl->ops.target.offset > dl->al.offset;
1628 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
1629 obj__printf(obj, " ");
1630 } else if (ins__is_call(&dl->ins)) {
1631call_like:
1632 obj__write_graph(obj, RARROW_CHAR);
1633 obj__printf(obj, " ");
1634 } else if (ins__is_ret(&dl->ins)) {
1635 obj__write_graph(obj, LARROW_CHAR);
1636 obj__printf(obj, " ");
1637 } else {
1638 obj__printf(obj, " ");
1639 }
1640 } else {
1641 obj__printf(obj, " ");
1642 }
1643
1644 disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
1645 notes->src->widths.max_ins_name);
1646}
1647
1648static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
1649{
1650 double ipc = 0.0, coverage = 0.0;
1651 struct annotated_branch *branch = annotation__get_branch(notes);
1652
1653 if (branch && branch->hit_cycles)
1654 ipc = branch->hit_insn / ((double)branch->hit_cycles);
1655
1656 if (branch && branch->total_insn) {
1657 coverage = branch->cover_insn * 100.0 /
1658 ((double)branch->total_insn);
1659 }
1660
1661 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
1662 ipc, coverage);
1663}
1664
1665static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
1666 bool first_line, bool current_entry, bool change_color, int width,
1667 void *obj, unsigned int percent_type,
1668 int (*obj__set_color)(void *obj, int color),
1669 void (*obj__set_percent_color)(void *obj, double percent, bool current),
1670 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
1671 void (*obj__printf)(void *obj, const char *fmt, ...),
1672 void (*obj__write_graph)(void *obj, int graph))
1673
1674{
1675 double percent_max = annotation_line__max_percent(al, notes, percent_type);
1676 int pcnt_width = annotation__pcnt_width(notes),
1677 cycles_width = annotation__cycles_width(notes);
1678 bool show_title = false;
1679 char bf[256];
1680 int printed;
1681
1682 if (first_line && (al->offset == -1 || percent_max == 0.0)) {
1683 if (notes->branch && al->cycles) {
1684 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
1685 show_title = true;
1686 } else
1687 show_title = true;
1688 }
1689
1690 if (al->offset != -1 && percent_max != 0.0) {
1691 int i;
1692
1693 for (i = 0; i < notes->src->nr_events; i++) {
1694 double percent;
1695
1696 percent = annotation_data__percent(&al->data[i], percent_type);
1697
1698 obj__set_percent_color(obj, percent, current_entry);
1699 if (symbol_conf.show_total_period) {
1700 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
1701 } else if (symbol_conf.show_nr_samples) {
1702 obj__printf(obj, "%6" PRIu64 " ",
1703 al->data[i].he.nr_samples);
1704 } else {
1705 obj__printf(obj, "%6.2f ", percent);
1706 }
1707 }
1708 } else {
1709 obj__set_percent_color(obj, 0, current_entry);
1710
1711 if (!show_title)
1712 obj__printf(obj, "%-*s", pcnt_width, " ");
1713 else {
1714 obj__printf(obj, "%-*s", pcnt_width,
1715 symbol_conf.show_total_period ? "Period" :
1716 symbol_conf.show_nr_samples ? "Samples" : "Percent");
1717 }
1718 }
1719
1720 if (notes->branch) {
1721 if (al->cycles && al->cycles->ipc)
1722 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
1723 else if (!show_title)
1724 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
1725 else
1726 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
1727
1728 if (!annotate_opts.show_minmax_cycle) {
1729 if (al->cycles && al->cycles->avg)
1730 obj__printf(obj, "%*" PRIu64 " ",
1731 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
1732 else if (!show_title)
1733 obj__printf(obj, "%*s",
1734 ANNOTATION__CYCLES_WIDTH, " ");
1735 else
1736 obj__printf(obj, "%*s ",
1737 ANNOTATION__CYCLES_WIDTH - 1,
1738 "Cycle");
1739 } else {
1740 if (al->cycles) {
1741 char str[32];
1742
1743 scnprintf(str, sizeof(str),
1744 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
1745 al->cycles->avg, al->cycles->min,
1746 al->cycles->max);
1747
1748 obj__printf(obj, "%*s ",
1749 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1750 str);
1751 } else if (!show_title)
1752 obj__printf(obj, "%*s",
1753 ANNOTATION__MINMAX_CYCLES_WIDTH,
1754 " ");
1755 else
1756 obj__printf(obj, "%*s ",
1757 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1758 "Cycle(min/max)");
1759 }
1760
1761 if (show_title && !*al->line) {
1762 ipc_coverage_string(bf, sizeof(bf), notes);
1763 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
1764 }
1765 }
1766
1767 obj__printf(obj, " ");
1768
1769 if (!*al->line)
1770 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
1771 else if (al->offset == -1) {
1772 if (al->line_nr && annotate_opts.show_linenr)
1773 printed = scnprintf(bf, sizeof(bf), "%-*d ",
1774 notes->src->widths.addr + 1, al->line_nr);
1775 else
1776 printed = scnprintf(bf, sizeof(bf), "%-*s ",
1777 notes->src->widths.addr, " ");
1778 obj__printf(obj, bf);
1779 obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
1780 } else {
1781 u64 addr = al->offset;
1782 int color = -1;
1783
1784 if (!annotate_opts.use_offset)
1785 addr += notes->src->start;
1786
1787 if (!annotate_opts.use_offset) {
1788 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
1789 } else {
1790 if (al->jump_sources &&
1791 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
1792 if (annotate_opts.show_nr_jumps) {
1793 int prev;
1794 printed = scnprintf(bf, sizeof(bf), "%*d ",
1795 notes->src->widths.jumps,
1796 al->jump_sources);
1797 prev = obj__set_jumps_percent_color(obj, al->jump_sources,
1798 current_entry);
1799 obj__printf(obj, bf);
1800 obj__set_color(obj, prev);
1801 }
1802print_addr:
1803 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
1804 notes->src->widths.target, addr);
1805 } else if (ins__is_call(&disasm_line(al)->ins) &&
1806 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
1807 goto print_addr;
1808 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
1809 goto print_addr;
1810 } else {
1811 printed = scnprintf(bf, sizeof(bf), "%-*s ",
1812 notes->src->widths.addr, " ");
1813 }
1814 }
1815
1816 if (change_color)
1817 color = obj__set_color(obj, HE_COLORSET_ADDR);
1818 obj__printf(obj, bf);
1819 if (change_color)
1820 obj__set_color(obj, color);
1821
1822 disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
1823
1824 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
1825 }
1826
1827}
1828
1829void annotation_line__write(struct annotation_line *al, struct annotation *notes,
1830 struct annotation_write_ops *wops)
1831{
1832 __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
1833 wops->change_color, wops->width, wops->obj,
1834 annotate_opts.percent_type,
1835 wops->set_color, wops->set_percent_color,
1836 wops->set_jumps_percent_color, wops->printf,
1837 wops->write_graph);
1838}
1839
1840int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
1841 struct arch **parch)
1842{
1843 struct symbol *sym = ms->sym;
1844 struct annotation *notes = symbol__annotation(sym);
1845 size_t size = symbol__size(sym);
1846 int nr_pcnt = 1, err;
1847
1848 if (evsel__is_group_event(evsel))
1849 nr_pcnt = evsel->core.nr_members;
1850
1851 err = symbol__annotate(ms, evsel, parch);
1852 if (err)
1853 return err;
1854
1855 symbol__calc_percent(sym, evsel);
1856
1857 annotation__set_index(notes);
1858 annotation__mark_jump_targets(notes, sym);
1859
1860 err = annotation__compute_ipc(notes, size);
1861 if (err)
1862 return err;
1863
1864 annotation__init_column_widths(notes, sym);
1865 notes->src->nr_events = nr_pcnt;
1866
1867 annotation__update_column_widths(notes);
1868 sym->annotate2 = 1;
1869
1870 return 0;
1871}
1872
1873static int annotation__config(const char *var, const char *value, void *data)
1874{
1875 struct annotation_options *opt = data;
1876
1877 if (!strstarts(var, "annotate."))
1878 return 0;
1879
1880 if (!strcmp(var, "annotate.offset_level")) {
1881 perf_config_u8(&opt->offset_level, "offset_level", value);
1882
1883 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
1884 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
1885 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
1886 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
1887 } else if (!strcmp(var, "annotate.hide_src_code")) {
1888 opt->hide_src_code = perf_config_bool("hide_src_code", value);
1889 } else if (!strcmp(var, "annotate.jump_arrows")) {
1890 opt->jump_arrows = perf_config_bool("jump_arrows", value);
1891 } else if (!strcmp(var, "annotate.show_linenr")) {
1892 opt->show_linenr = perf_config_bool("show_linenr", value);
1893 } else if (!strcmp(var, "annotate.show_nr_jumps")) {
1894 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
1895 } else if (!strcmp(var, "annotate.show_nr_samples")) {
1896 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
1897 value);
1898 } else if (!strcmp(var, "annotate.show_total_period")) {
1899 symbol_conf.show_total_period = perf_config_bool("show_total_period",
1900 value);
1901 } else if (!strcmp(var, "annotate.use_offset")) {
1902 opt->use_offset = perf_config_bool("use_offset", value);
1903 } else if (!strcmp(var, "annotate.disassembler_style")) {
1904 opt->disassembler_style = strdup(value);
1905 if (!opt->disassembler_style) {
1906 pr_err("Not enough memory for annotate.disassembler_style\n");
1907 return -1;
1908 }
1909 } else if (!strcmp(var, "annotate.objdump")) {
1910 opt->objdump_path = strdup(value);
1911 if (!opt->objdump_path) {
1912 pr_err("Not enough memory for annotate.objdump\n");
1913 return -1;
1914 }
1915 } else if (!strcmp(var, "annotate.addr2line")) {
1916 symbol_conf.addr2line_path = strdup(value);
1917 if (!symbol_conf.addr2line_path) {
1918 pr_err("Not enough memory for annotate.addr2line\n");
1919 return -1;
1920 }
1921 } else if (!strcmp(var, "annotate.demangle")) {
1922 symbol_conf.demangle = perf_config_bool("demangle", value);
1923 } else if (!strcmp(var, "annotate.demangle_kernel")) {
1924 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
1925 } else {
1926 pr_debug("%s variable unknown, ignoring...", var);
1927 }
1928
1929 return 0;
1930}
1931
1932void annotation_options__init(void)
1933{
1934 struct annotation_options *opt = &annotate_opts;
1935
1936 memset(opt, 0, sizeof(*opt));
1937
1938 /* Default values. */
1939 opt->use_offset = true;
1940 opt->jump_arrows = true;
1941 opt->annotate_src = true;
1942 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
1943 opt->percent_type = PERCENT_PERIOD_LOCAL;
1944}
1945
1946void annotation_options__exit(void)
1947{
1948 zfree(&annotate_opts.disassembler_style);
1949 zfree(&annotate_opts.objdump_path);
1950}
1951
1952void annotation_config__init(void)
1953{
1954 perf_config(annotation__config, &annotate_opts);
1955}
1956
1957static unsigned int parse_percent_type(char *str1, char *str2)
1958{
1959 unsigned int type = (unsigned int) -1;
1960
1961 if (!strcmp("period", str1)) {
1962 if (!strcmp("local", str2))
1963 type = PERCENT_PERIOD_LOCAL;
1964 else if (!strcmp("global", str2))
1965 type = PERCENT_PERIOD_GLOBAL;
1966 }
1967
1968 if (!strcmp("hits", str1)) {
1969 if (!strcmp("local", str2))
1970 type = PERCENT_HITS_LOCAL;
1971 else if (!strcmp("global", str2))
1972 type = PERCENT_HITS_GLOBAL;
1973 }
1974
1975 return type;
1976}
1977
1978int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
1979 int unset __maybe_unused)
1980{
1981 unsigned int type;
1982 char *str1, *str2;
1983 int err = -1;
1984
1985 str1 = strdup(_str);
1986 if (!str1)
1987 return -ENOMEM;
1988
1989 str2 = strchr(str1, '-');
1990 if (!str2)
1991 goto out;
1992
1993 *str2++ = 0;
1994
1995 type = parse_percent_type(str1, str2);
1996 if (type == (unsigned int) -1)
1997 type = parse_percent_type(str2, str1);
1998 if (type != (unsigned int) -1) {
1999 annotate_opts.percent_type = type;
2000 err = 0;
2001 }
2002
2003out:
2004 free(str1);
2005 return err;
2006}
2007
2008int annotate_check_args(void)
2009{
2010 struct annotation_options *args = &annotate_opts;
2011
2012 if (args->prefix_strip && !args->prefix) {
2013 pr_err("--prefix-strip requires --prefix\n");
2014 return -1;
2015 }
2016 return 0;
2017}
2018
2019/*
2020 * Get register number and access offset from the given instruction.
2021 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs
2022 * to revisit the format when it handles different architecture.
2023 * Fills @reg and @offset when return 0.
2024 */
2025static int extract_reg_offset(struct arch *arch, const char *str,
2026 struct annotated_op_loc *op_loc)
2027{
2028 char *p;
2029 char *regname;
2030
2031 if (arch->objdump.register_char == 0)
2032 return -1;
2033
2034 /*
2035 * It should start from offset, but it's possible to skip 0
2036 * in the asm. So 0(%rax) should be same as (%rax).
2037 *
2038 * However, it also start with a segment select register like
2039 * %gs:0x18(%rbx). In that case it should skip the part.
2040 */
2041 if (*str == arch->objdump.register_char) {
2042 if (arch__is(arch, "x86")) {
2043 /* FIXME: Handle other segment registers */
2044 if (!strncmp(str, "%gs:", 4))
2045 op_loc->segment = INSN_SEG_X86_GS;
2046 }
2047
2048 while (*str && !isdigit(*str) &&
2049 *str != arch->objdump.memory_ref_char)
2050 str++;
2051 }
2052
2053 op_loc->offset = strtol(str, &p, 0);
2054
2055 p = strchr(p, arch->objdump.register_char);
2056 if (p == NULL)
2057 return -1;
2058
2059 regname = strdup(p);
2060 if (regname == NULL)
2061 return -1;
2062
2063 op_loc->reg1 = get_dwarf_regnum(regname, 0);
2064 free(regname);
2065
2066 /* Get the second register */
2067 if (op_loc->multi_regs) {
2068 p = strchr(p + 1, arch->objdump.register_char);
2069 if (p == NULL)
2070 return -1;
2071
2072 regname = strdup(p);
2073 if (regname == NULL)
2074 return -1;
2075
2076 op_loc->reg2 = get_dwarf_regnum(regname, 0);
2077 free(regname);
2078 }
2079 return 0;
2080}
2081
2082/**
2083 * annotate_get_insn_location - Get location of instruction
2084 * @arch: the architecture info
2085 * @dl: the target instruction
2086 * @loc: a buffer to save the data
2087 *
2088 * Get detailed location info (register and offset) in the instruction.
2089 * It needs both source and target operand and whether it accesses a
2090 * memory location. The offset field is meaningful only when the
2091 * corresponding mem flag is set. The reg2 field is meaningful only
2092 * when multi_regs flag is set.
2093 *
2094 * Some examples on x86:
2095 *
2096 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0
2097 * # dst_reg1 = rcx, dst_mem = 0
2098 *
2099 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0
2100 * # dst_reg1 = r8, dst_mem = 0
2101 *
2102 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0
2103 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
2104 * # dst_multi_regs = 1, dst_offset = 8
2105 */
2106int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
2107 struct annotated_insn_loc *loc)
2108{
2109 struct ins_operands *ops;
2110 struct annotated_op_loc *op_loc;
2111 int i;
2112
2113 if (ins__is_lock(&dl->ins))
2114 ops = dl->ops.locked.ops;
2115 else
2116 ops = &dl->ops;
2117
2118 if (ops == NULL)
2119 return -1;
2120
2121 memset(loc, 0, sizeof(*loc));
2122
2123 for_each_insn_op_loc(loc, i, op_loc) {
2124 const char *insn_str = ops->source.raw;
2125 bool multi_regs = ops->source.multi_regs;
2126
2127 if (i == INSN_OP_TARGET) {
2128 insn_str = ops->target.raw;
2129 multi_regs = ops->target.multi_regs;
2130 }
2131
2132 /* Invalidate the register by default */
2133 op_loc->reg1 = -1;
2134 op_loc->reg2 = -1;
2135
2136 if (insn_str == NULL)
2137 continue;
2138
2139 if (strchr(insn_str, arch->objdump.memory_ref_char)) {
2140 op_loc->mem_ref = true;
2141 op_loc->multi_regs = multi_regs;
2142 extract_reg_offset(arch, insn_str, op_loc);
2143 } else {
2144 char *s, *p = NULL;
2145
2146 if (arch__is(arch, "x86")) {
2147 /* FIXME: Handle other segment registers */
2148 if (!strncmp(insn_str, "%gs:", 4)) {
2149 op_loc->segment = INSN_SEG_X86_GS;
2150 op_loc->offset = strtol(insn_str + 4,
2151 &p, 0);
2152 if (p && p != insn_str + 4)
2153 op_loc->imm = true;
2154 continue;
2155 }
2156 }
2157
2158 s = strdup(insn_str);
2159 if (s == NULL)
2160 return -1;
2161
2162 if (*s == arch->objdump.register_char)
2163 op_loc->reg1 = get_dwarf_regnum(s, 0);
2164 else if (*s == arch->objdump.imm_char) {
2165 op_loc->offset = strtol(s + 1, &p, 0);
2166 if (p && p != s + 1)
2167 op_loc->imm = true;
2168 }
2169 free(s);
2170 }
2171 }
2172
2173 return 0;
2174}
2175
2176static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
2177 bool allow_update)
2178{
2179 struct disasm_line *dl;
2180 struct annotation *notes;
2181
2182 notes = symbol__annotation(sym);
2183
2184 list_for_each_entry(dl, ¬es->src->source, al.node) {
2185 if (dl->al.offset == -1)
2186 continue;
2187
2188 if (sym->start + dl->al.offset == ip) {
2189 /*
2190 * llvm-objdump places "lock" in a separate line and
2191 * in that case, we want to get the next line.
2192 */
2193 if (ins__is_lock(&dl->ins) &&
2194 *dl->ops.raw == '\0' && allow_update) {
2195 ip++;
2196 continue;
2197 }
2198 return dl;
2199 }
2200 }
2201 return NULL;
2202}
2203
2204static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
2205 const char *name)
2206{
2207 struct annotated_item_stat *istat;
2208
2209 list_for_each_entry(istat, head, list) {
2210 if (!strcmp(istat->name, name))
2211 return istat;
2212 }
2213
2214 istat = zalloc(sizeof(*istat));
2215 if (istat == NULL)
2216 return NULL;
2217
2218 istat->name = strdup(name);
2219 if (istat->name == NULL) {
2220 free(istat);
2221 return NULL;
2222 }
2223
2224 list_add_tail(&istat->list, head);
2225 return istat;
2226}
2227
2228static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
2229{
2230 if (arch__is(arch, "x86")) {
2231 if (!strncmp(dl->ins.name, "push", 4) ||
2232 !strncmp(dl->ins.name, "pop", 3) ||
2233 !strncmp(dl->ins.name, "ret", 3))
2234 return true;
2235 }
2236
2237 return false;
2238}
2239
2240static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
2241{
2242 /* On x86_64, %gs:40 is used for stack canary */
2243 if (arch__is(arch, "x86")) {
2244 if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
2245 loc->offset == 40)
2246 return true;
2247 }
2248
2249 return false;
2250}
2251
2252static struct disasm_line *
2253annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
2254{
2255 struct list_head *sources = ¬es->src->source;
2256 struct disasm_line *prev;
2257
2258 if (curr == list_first_entry(sources, struct disasm_line, al.node))
2259 return NULL;
2260
2261 prev = list_prev_entry(curr, al.node);
2262 while (prev->al.offset == -1 &&
2263 prev != list_first_entry(sources, struct disasm_line, al.node))
2264 prev = list_prev_entry(prev, al.node);
2265
2266 if (prev->al.offset == -1)
2267 return NULL;
2268
2269 return prev;
2270}
2271
2272static struct disasm_line *
2273annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
2274{
2275 struct list_head *sources = ¬es->src->source;
2276 struct disasm_line *next;
2277
2278 if (curr == list_last_entry(sources, struct disasm_line, al.node))
2279 return NULL;
2280
2281 next = list_next_entry(curr, al.node);
2282 while (next->al.offset == -1 &&
2283 next != list_last_entry(sources, struct disasm_line, al.node))
2284 next = list_next_entry(next, al.node);
2285
2286 if (next->al.offset == -1)
2287 return NULL;
2288
2289 return next;
2290}
2291
2292u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
2293 struct disasm_line *dl)
2294{
2295 struct annotation *notes;
2296 struct disasm_line *next;
2297 u64 addr;
2298
2299 notes = symbol__annotation(ms->sym);
2300 /*
2301 * PC-relative addressing starts from the next instruction address
2302 * But the IP is for the current instruction. Since disasm_line
2303 * doesn't have the instruction size, calculate it using the next
2304 * disasm_line. If it's the last one, we can use symbol's end
2305 * address directly.
2306 */
2307 next = annotation__next_asm_line(notes, dl);
2308 if (next == NULL)
2309 addr = ms->sym->end + offset;
2310 else
2311 addr = ip + (next->al.offset - dl->al.offset) + offset;
2312
2313 return map__rip_2objdump(ms->map, addr);
2314}
2315
2316/**
2317 * hist_entry__get_data_type - find data type for given hist entry
2318 * @he: hist entry
2319 *
2320 * This function first annotates the instruction at @he->ip and extracts
2321 * register and offset info from it. Then it searches the DWARF debug
2322 * info to get a variable and type information using the address, register,
2323 * and offset.
2324 */
2325struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
2326{
2327 struct map_symbol *ms = &he->ms;
2328 struct evsel *evsel = hists_to_evsel(he->hists);
2329 struct arch *arch;
2330 struct disasm_line *dl;
2331 struct annotated_insn_loc loc;
2332 struct annotated_op_loc *op_loc;
2333 struct annotated_data_type *mem_type;
2334 struct annotated_item_stat *istat;
2335 u64 ip = he->ip;
2336 int i;
2337
2338 ann_data_stat.total++;
2339
2340 if (ms->map == NULL || ms->sym == NULL) {
2341 ann_data_stat.no_sym++;
2342 return NULL;
2343 }
2344
2345 if (!symbol_conf.init_annotation) {
2346 ann_data_stat.no_sym++;
2347 return NULL;
2348 }
2349
2350 /* Make sure it has the disasm of the function */
2351 if (symbol__annotate(ms, evsel, &arch) < 0) {
2352 ann_data_stat.no_insn++;
2353 return NULL;
2354 }
2355
2356 /*
2357 * Get a disasm to extract the location from the insn.
2358 * This is too slow...
2359 */
2360 dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
2361 if (dl == NULL) {
2362 ann_data_stat.no_insn++;
2363 return NULL;
2364 }
2365
2366retry:
2367 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2368 if (istat == NULL) {
2369 ann_data_stat.no_insn++;
2370 return NULL;
2371 }
2372
2373 if (annotate_get_insn_location(arch, dl, &loc) < 0) {
2374 ann_data_stat.no_insn_ops++;
2375 istat->bad++;
2376 return NULL;
2377 }
2378
2379 if (is_stack_operation(arch, dl)) {
2380 istat->good++;
2381 he->mem_type_off = 0;
2382 return &stackop_type;
2383 }
2384
2385 for_each_insn_op_loc(&loc, i, op_loc) {
2386 struct data_loc_info dloc = {
2387 .arch = arch,
2388 .thread = he->thread,
2389 .ms = ms,
2390 /* Recalculate IP for LOCK prefix or insn fusion */
2391 .ip = ms->sym->start + dl->al.offset,
2392 .cpumode = he->cpumode,
2393 .op = op_loc,
2394 };
2395
2396 if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
2397 continue;
2398
2399 /* Recalculate IP because of LOCK prefix or insn fusion */
2400 ip = ms->sym->start + dl->al.offset;
2401
2402 /* PC-relative addressing */
2403 if (op_loc->reg1 == DWARF_REG_PC) {
2404 dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
2405 op_loc->offset, dl);
2406 }
2407
2408 /* This CPU access in kernel - pretend PC-relative addressing */
2409 if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") &&
2410 op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
2411 dloc.var_addr = op_loc->offset;
2412 op_loc->reg1 = DWARF_REG_PC;
2413 }
2414
2415 mem_type = find_data_type(&dloc);
2416
2417 if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
2418 istat->good++;
2419 he->mem_type_off = 0;
2420 return &canary_type;
2421 }
2422
2423 if (mem_type)
2424 istat->good++;
2425 else
2426 istat->bad++;
2427
2428 if (symbol_conf.annotate_data_sample) {
2429 annotated_data_type__update_samples(mem_type, evsel,
2430 dloc.type_offset,
2431 he->stat.nr_events,
2432 he->stat.period);
2433 }
2434 he->mem_type_off = dloc.type_offset;
2435 return mem_type;
2436 }
2437
2438 /*
2439 * Some instructions can be fused and the actual memory access came
2440 * from the previous instruction.
2441 */
2442 if (dl->al.offset > 0) {
2443 struct annotation *notes;
2444 struct disasm_line *prev_dl;
2445
2446 notes = symbol__annotation(ms->sym);
2447 prev_dl = annotation__prev_asm_line(notes, dl);
2448
2449 if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
2450 dl = prev_dl;
2451 goto retry;
2452 }
2453 }
2454
2455 ann_data_stat.no_mem_ops++;
2456 istat->bad++;
2457 return NULL;
2458}
2459
2460/* Basic block traversal (BFS) data structure */
2461struct basic_block_data {
2462 struct list_head queue;
2463 struct list_head visited;
2464};
2465
2466/*
2467 * During the traversal, it needs to know the parent block where the current
2468 * block block started from. Note that single basic block can be parent of
2469 * two child basic blocks (in case of condition jump).
2470 */
2471struct basic_block_link {
2472 struct list_head node;
2473 struct basic_block_link *parent;
2474 struct annotated_basic_block *bb;
2475};
2476
2477/* Check any of basic block in the list already has the offset */
2478static bool basic_block_has_offset(struct list_head *head, s64 offset)
2479{
2480 struct basic_block_link *link;
2481
2482 list_for_each_entry(link, head, node) {
2483 s64 begin_offset = link->bb->begin->al.offset;
2484 s64 end_offset = link->bb->end->al.offset;
2485
2486 if (begin_offset <= offset && offset <= end_offset)
2487 return true;
2488 }
2489 return false;
2490}
2491
2492static bool is_new_basic_block(struct basic_block_data *bb_data,
2493 struct disasm_line *dl)
2494{
2495 s64 offset = dl->al.offset;
2496
2497 if (basic_block_has_offset(&bb_data->visited, offset))
2498 return false;
2499 if (basic_block_has_offset(&bb_data->queue, offset))
2500 return false;
2501 return true;
2502}
2503
2504/* Add a basic block starting from dl and link it to the parent */
2505static int add_basic_block(struct basic_block_data *bb_data,
2506 struct basic_block_link *parent,
2507 struct disasm_line *dl)
2508{
2509 struct annotated_basic_block *bb;
2510 struct basic_block_link *link;
2511
2512 if (dl == NULL)
2513 return -1;
2514
2515 if (!is_new_basic_block(bb_data, dl))
2516 return 0;
2517
2518 bb = zalloc(sizeof(*bb));
2519 if (bb == NULL)
2520 return -1;
2521
2522 bb->begin = dl;
2523 bb->end = dl;
2524 INIT_LIST_HEAD(&bb->list);
2525
2526 link = malloc(sizeof(*link));
2527 if (link == NULL) {
2528 free(bb);
2529 return -1;
2530 }
2531
2532 link->bb = bb;
2533 link->parent = parent;
2534 list_add_tail(&link->node, &bb_data->queue);
2535 return 0;
2536}
2537
2538/* Returns true when it finds the target in the current basic block */
2539static bool process_basic_block(struct basic_block_data *bb_data,
2540 struct basic_block_link *link,
2541 struct symbol *sym, u64 target)
2542{
2543 struct disasm_line *dl, *next_dl, *last_dl;
2544 struct annotation *notes = symbol__annotation(sym);
2545 bool found = false;
2546
2547 dl = link->bb->begin;
2548 /* Check if it's already visited */
2549 if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
2550 return false;
2551
2552 last_dl = list_last_entry(¬es->src->source,
2553 struct disasm_line, al.node);
2554 if (last_dl->al.offset == -1)
2555 last_dl = annotation__prev_asm_line(notes, last_dl);
2556
2557 if (last_dl == NULL)
2558 return false;
2559
2560 list_for_each_entry_from(dl, ¬es->src->source, al.node) {
2561 /* Skip comment or debug info line */
2562 if (dl->al.offset == -1)
2563 continue;
2564 /* Found the target instruction */
2565 if (sym->start + dl->al.offset == target) {
2566 found = true;
2567 break;
2568 }
2569 /* End of the function, finish the block */
2570 if (dl == last_dl)
2571 break;
2572 /* 'return' instruction finishes the block */
2573 if (ins__is_ret(&dl->ins))
2574 break;
2575 /* normal instructions are part of the basic block */
2576 if (!ins__is_jump(&dl->ins))
2577 continue;
2578 /* jump to a different function, tail call or return */
2579 if (dl->ops.target.outside)
2580 break;
2581 /* jump instruction creates new basic block(s) */
2582 next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
2583 /*allow_update=*/false);
2584 if (next_dl)
2585 add_basic_block(bb_data, link, next_dl);
2586
2587 /*
2588 * FIXME: determine conditional jumps properly.
2589 * Conditional jumps create another basic block with the
2590 * next disasm line.
2591 */
2592 if (!strstr(dl->ins.name, "jmp")) {
2593 next_dl = annotation__next_asm_line(notes, dl);
2594 if (next_dl)
2595 add_basic_block(bb_data, link, next_dl);
2596 }
2597 break;
2598
2599 }
2600 link->bb->end = dl;
2601 return found;
2602}
2603
2604/*
2605 * It founds a target basic block, build a proper linked list of basic blocks
2606 * by following the link recursively.
2607 */
2608static void link_found_basic_blocks(struct basic_block_link *link,
2609 struct list_head *head)
2610{
2611 while (link) {
2612 struct basic_block_link *parent = link->parent;
2613
2614 list_move(&link->bb->list, head);
2615 list_del(&link->node);
2616 free(link);
2617
2618 link = parent;
2619 }
2620}
2621
2622static void delete_basic_blocks(struct basic_block_data *bb_data)
2623{
2624 struct basic_block_link *link, *tmp;
2625
2626 list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
2627 list_del(&link->node);
2628 zfree(&link->bb);
2629 free(link);
2630 }
2631
2632 list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
2633 list_del(&link->node);
2634 zfree(&link->bb);
2635 free(link);
2636 }
2637}
2638
2639/**
2640 * annotate_get_basic_blocks - Get basic blocks for given address range
2641 * @sym: symbol to annotate
2642 * @src: source address
2643 * @dst: destination address
2644 * @head: list head to save basic blocks
2645 *
2646 * This function traverses disasm_lines from @src to @dst and save them in a
2647 * list of annotated_basic_block to @head. It uses BFS to find the shortest
2648 * path between two. The basic_block_link is to maintain parent links so
2649 * that it can build a list of blocks from the start.
2650 */
2651int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
2652 struct list_head *head)
2653{
2654 struct basic_block_data bb_data = {
2655 .queue = LIST_HEAD_INIT(bb_data.queue),
2656 .visited = LIST_HEAD_INIT(bb_data.visited),
2657 };
2658 struct basic_block_link *link;
2659 struct disasm_line *dl;
2660 int ret = -1;
2661
2662 dl = find_disasm_line(sym, src, /*allow_update=*/false);
2663 if (dl == NULL)
2664 return -1;
2665
2666 if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
2667 return -1;
2668
2669 /* Find shortest path from src to dst using BFS */
2670 while (!list_empty(&bb_data.queue)) {
2671 link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
2672
2673 if (process_basic_block(&bb_data, link, sym, dst)) {
2674 link_found_basic_blocks(link, head);
2675 ret = 0;
2676 break;
2677 }
2678 list_move(&link->node, &bb_data.visited);
2679 }
2680 delete_basic_blocks(&bb_data);
2681 return ret;
2682}