Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#include "symbol.h"
2#include <errno.h>
3#include <inttypes.h>
4#include <limits.h>
5#include <stdlib.h>
6#include <string.h>
7#include <stdio.h>
8#include <unistd.h>
9#include "map.h"
10#include "thread.h"
11#include "strlist.h"
12#include "vdso.h"
13
14const char *map_type__name[MAP__NR_TYPES] = {
15 [MAP__FUNCTION] = "Functions",
16 [MAP__VARIABLE] = "Variables",
17};
18
19static inline int is_anon_memory(const char *filename)
20{
21 return strcmp(filename, "//anon") == 0;
22}
23
24static inline int is_no_dso_memory(const char *filename)
25{
26 return !strcmp(filename, "[stack]") ||
27 !strcmp(filename, "[heap]");
28}
29
30void map__init(struct map *self, enum map_type type,
31 u64 start, u64 end, u64 pgoff, struct dso *dso)
32{
33 self->type = type;
34 self->start = start;
35 self->end = end;
36 self->pgoff = pgoff;
37 self->dso = dso;
38 self->map_ip = map__map_ip;
39 self->unmap_ip = map__unmap_ip;
40 RB_CLEAR_NODE(&self->rb_node);
41 self->groups = NULL;
42 self->referenced = false;
43 self->erange_warned = false;
44}
45
46struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
47 u64 pgoff, u32 pid, char *filename,
48 enum map_type type)
49{
50 struct map *self = malloc(sizeof(*self));
51
52 if (self != NULL) {
53 char newfilename[PATH_MAX];
54 struct dso *dso;
55 int anon, no_dso, vdso;
56
57 anon = is_anon_memory(filename);
58 vdso = is_vdso_map(filename);
59 no_dso = is_no_dso_memory(filename);
60
61 if (anon) {
62 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
63 filename = newfilename;
64 }
65
66 if (vdso) {
67 pgoff = 0;
68 dso = vdso__dso_findnew(dsos__list);
69 } else
70 dso = __dsos__findnew(dsos__list, filename);
71
72 if (dso == NULL)
73 goto out_delete;
74
75 map__init(self, type, start, start + len, pgoff, dso);
76
77 if (anon || no_dso) {
78 self->map_ip = self->unmap_ip = identity__map_ip;
79
80 /*
81 * Set memory without DSO as loaded. All map__find_*
82 * functions still return NULL, and we avoid the
83 * unnecessary map__load warning.
84 */
85 if (no_dso)
86 dso__set_loaded(dso, self->type);
87 }
88 }
89 return self;
90out_delete:
91 free(self);
92 return NULL;
93}
94
95/*
96 * Constructor variant for modules (where we know from /proc/modules where
97 * they are loaded) and for vmlinux, where only after we load all the
98 * symbols we'll know where it starts and ends.
99 */
100struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
101{
102 struct map *map = calloc(1, (sizeof(*map) +
103 (dso->kernel ? sizeof(struct kmap) : 0)));
104 if (map != NULL) {
105 /*
106 * ->end will be filled after we load all the symbols
107 */
108 map__init(map, type, start, 0, 0, dso);
109 }
110
111 return map;
112}
113
114void map__delete(struct map *self)
115{
116 free(self);
117}
118
119void map__fixup_start(struct map *self)
120{
121 struct rb_root *symbols = &self->dso->symbols[self->type];
122 struct rb_node *nd = rb_first(symbols);
123 if (nd != NULL) {
124 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
125 self->start = sym->start;
126 }
127}
128
129void map__fixup_end(struct map *self)
130{
131 struct rb_root *symbols = &self->dso->symbols[self->type];
132 struct rb_node *nd = rb_last(symbols);
133 if (nd != NULL) {
134 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
135 self->end = sym->end;
136 }
137}
138
139#define DSO__DELETED "(deleted)"
140
141int map__load(struct map *self, symbol_filter_t filter)
142{
143 const char *name = self->dso->long_name;
144 int nr;
145
146 if (dso__loaded(self->dso, self->type))
147 return 0;
148
149 nr = dso__load(self->dso, self, filter);
150 if (nr < 0) {
151 if (self->dso->has_build_id) {
152 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
153
154 build_id__sprintf(self->dso->build_id,
155 sizeof(self->dso->build_id),
156 sbuild_id);
157 pr_warning("%s with build id %s not found",
158 name, sbuild_id);
159 } else
160 pr_warning("Failed to open %s", name);
161
162 pr_warning(", continuing without symbols\n");
163 return -1;
164 } else if (nr == 0) {
165#ifdef LIBELF_SUPPORT
166 const size_t len = strlen(name);
167 const size_t real_len = len - sizeof(DSO__DELETED);
168
169 if (len > sizeof(DSO__DELETED) &&
170 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
171 pr_warning("%.*s was updated (is prelink enabled?). "
172 "Restart the long running apps that use it!\n",
173 (int)real_len, name);
174 } else {
175 pr_warning("no symbols found in %s, maybe install "
176 "a debug package?\n", name);
177 }
178#endif
179 return -1;
180 }
181 /*
182 * Only applies to the kernel, as its symtabs aren't relative like the
183 * module ones.
184 */
185 if (self->dso->kernel)
186 map__reloc_vmlinux(self);
187
188 return 0;
189}
190
191struct symbol *map__find_symbol(struct map *self, u64 addr,
192 symbol_filter_t filter)
193{
194 if (map__load(self, filter) < 0)
195 return NULL;
196
197 return dso__find_symbol(self->dso, self->type, addr);
198}
199
200struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
201 symbol_filter_t filter)
202{
203 if (map__load(self, filter) < 0)
204 return NULL;
205
206 if (!dso__sorted_by_name(self->dso, self->type))
207 dso__sort_by_name(self->dso, self->type);
208
209 return dso__find_symbol_by_name(self->dso, self->type, name);
210}
211
212struct map *map__clone(struct map *self)
213{
214 struct map *map = malloc(sizeof(*self));
215
216 if (!map)
217 return NULL;
218
219 memcpy(map, self, sizeof(*self));
220
221 return map;
222}
223
224int map__overlap(struct map *l, struct map *r)
225{
226 if (l->start > r->start) {
227 struct map *t = l;
228 l = r;
229 r = t;
230 }
231
232 if (l->end > r->start)
233 return 1;
234
235 return 0;
236}
237
238size_t map__fprintf(struct map *self, FILE *fp)
239{
240 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
241 self->start, self->end, self->pgoff, self->dso->name);
242}
243
244size_t map__fprintf_dsoname(struct map *map, FILE *fp)
245{
246 const char *dsoname = "[unknown]";
247
248 if (map && map->dso && (map->dso->name || map->dso->long_name)) {
249 if (symbol_conf.show_kernel_path && map->dso->long_name)
250 dsoname = map->dso->long_name;
251 else if (map->dso->name)
252 dsoname = map->dso->name;
253 }
254
255 return fprintf(fp, "%s", dsoname);
256}
257
258/*
259 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
260 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
261 */
262u64 map__rip_2objdump(struct map *map, u64 rip)
263{
264 u64 addr = map->dso->adjust_symbols ?
265 map->unmap_ip(map, rip) : /* RIP -> IP */
266 rip;
267 return addr;
268}
269
270void map_groups__init(struct map_groups *mg)
271{
272 int i;
273 for (i = 0; i < MAP__NR_TYPES; ++i) {
274 mg->maps[i] = RB_ROOT;
275 INIT_LIST_HEAD(&mg->removed_maps[i]);
276 }
277 mg->machine = NULL;
278}
279
280static void maps__delete(struct rb_root *maps)
281{
282 struct rb_node *next = rb_first(maps);
283
284 while (next) {
285 struct map *pos = rb_entry(next, struct map, rb_node);
286
287 next = rb_next(&pos->rb_node);
288 rb_erase(&pos->rb_node, maps);
289 map__delete(pos);
290 }
291}
292
293static void maps__delete_removed(struct list_head *maps)
294{
295 struct map *pos, *n;
296
297 list_for_each_entry_safe(pos, n, maps, node) {
298 list_del(&pos->node);
299 map__delete(pos);
300 }
301}
302
303void map_groups__exit(struct map_groups *mg)
304{
305 int i;
306
307 for (i = 0; i < MAP__NR_TYPES; ++i) {
308 maps__delete(&mg->maps[i]);
309 maps__delete_removed(&mg->removed_maps[i]);
310 }
311}
312
313void map_groups__flush(struct map_groups *mg)
314{
315 int type;
316
317 for (type = 0; type < MAP__NR_TYPES; type++) {
318 struct rb_root *root = &mg->maps[type];
319 struct rb_node *next = rb_first(root);
320
321 while (next) {
322 struct map *pos = rb_entry(next, struct map, rb_node);
323 next = rb_next(&pos->rb_node);
324 rb_erase(&pos->rb_node, root);
325 /*
326 * We may have references to this map, for
327 * instance in some hist_entry instances, so
328 * just move them to a separate list.
329 */
330 list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
331 }
332 }
333}
334
335struct symbol *map_groups__find_symbol(struct map_groups *mg,
336 enum map_type type, u64 addr,
337 struct map **mapp,
338 symbol_filter_t filter)
339{
340 struct map *map = map_groups__find(mg, type, addr);
341
342 if (map != NULL) {
343 if (mapp != NULL)
344 *mapp = map;
345 return map__find_symbol(map, map->map_ip(map, addr), filter);
346 }
347
348 return NULL;
349}
350
351struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
352 enum map_type type,
353 const char *name,
354 struct map **mapp,
355 symbol_filter_t filter)
356{
357 struct rb_node *nd;
358
359 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
360 struct map *pos = rb_entry(nd, struct map, rb_node);
361 struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
362
363 if (sym == NULL)
364 continue;
365 if (mapp != NULL)
366 *mapp = pos;
367 return sym;
368 }
369
370 return NULL;
371}
372
373size_t __map_groups__fprintf_maps(struct map_groups *mg,
374 enum map_type type, int verbose, FILE *fp)
375{
376 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
377 struct rb_node *nd;
378
379 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
380 struct map *pos = rb_entry(nd, struct map, rb_node);
381 printed += fprintf(fp, "Map:");
382 printed += map__fprintf(pos, fp);
383 if (verbose > 2) {
384 printed += dso__fprintf(pos->dso, type, fp);
385 printed += fprintf(fp, "--\n");
386 }
387 }
388
389 return printed;
390}
391
392size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
393{
394 size_t printed = 0, i;
395 for (i = 0; i < MAP__NR_TYPES; ++i)
396 printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
397 return printed;
398}
399
400static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
401 enum map_type type,
402 int verbose, FILE *fp)
403{
404 struct map *pos;
405 size_t printed = 0;
406
407 list_for_each_entry(pos, &mg->removed_maps[type], node) {
408 printed += fprintf(fp, "Map:");
409 printed += map__fprintf(pos, fp);
410 if (verbose > 1) {
411 printed += dso__fprintf(pos->dso, type, fp);
412 printed += fprintf(fp, "--\n");
413 }
414 }
415 return printed;
416}
417
418static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
419 int verbose, FILE *fp)
420{
421 size_t printed = 0, i;
422 for (i = 0; i < MAP__NR_TYPES; ++i)
423 printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
424 return printed;
425}
426
427size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
428{
429 size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
430 printed += fprintf(fp, "Removed maps:\n");
431 return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
432}
433
434int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
435 int verbose, FILE *fp)
436{
437 struct rb_root *root = &mg->maps[map->type];
438 struct rb_node *next = rb_first(root);
439 int err = 0;
440
441 while (next) {
442 struct map *pos = rb_entry(next, struct map, rb_node);
443 next = rb_next(&pos->rb_node);
444
445 if (!map__overlap(pos, map))
446 continue;
447
448 if (verbose >= 2) {
449 fputs("overlapping maps:\n", fp);
450 map__fprintf(map, fp);
451 map__fprintf(pos, fp);
452 }
453
454 rb_erase(&pos->rb_node, root);
455 /*
456 * Now check if we need to create new maps for areas not
457 * overlapped by the new map:
458 */
459 if (map->start > pos->start) {
460 struct map *before = map__clone(pos);
461
462 if (before == NULL) {
463 err = -ENOMEM;
464 goto move_map;
465 }
466
467 before->end = map->start - 1;
468 map_groups__insert(mg, before);
469 if (verbose >= 2)
470 map__fprintf(before, fp);
471 }
472
473 if (map->end < pos->end) {
474 struct map *after = map__clone(pos);
475
476 if (after == NULL) {
477 err = -ENOMEM;
478 goto move_map;
479 }
480
481 after->start = map->end + 1;
482 map_groups__insert(mg, after);
483 if (verbose >= 2)
484 map__fprintf(after, fp);
485 }
486move_map:
487 /*
488 * If we have references, just move them to a separate list.
489 */
490 if (pos->referenced)
491 list_add_tail(&pos->node, &mg->removed_maps[map->type]);
492 else
493 map__delete(pos);
494
495 if (err)
496 return err;
497 }
498
499 return 0;
500}
501
502/*
503 * XXX This should not really _copy_ te maps, but refcount them.
504 */
505int map_groups__clone(struct map_groups *mg,
506 struct map_groups *parent, enum map_type type)
507{
508 struct rb_node *nd;
509 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
510 struct map *map = rb_entry(nd, struct map, rb_node);
511 struct map *new = map__clone(map);
512 if (new == NULL)
513 return -ENOMEM;
514 map_groups__insert(mg, new);
515 }
516 return 0;
517}
518
519static u64 map__reloc_map_ip(struct map *map, u64 ip)
520{
521 return ip + (s64)map->pgoff;
522}
523
524static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
525{
526 return ip - (s64)map->pgoff;
527}
528
529void map__reloc_vmlinux(struct map *self)
530{
531 struct kmap *kmap = map__kmap(self);
532 s64 reloc;
533
534 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
535 return;
536
537 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
538 kmap->ref_reloc_sym->addr);
539
540 if (!reloc)
541 return;
542
543 self->map_ip = map__reloc_map_ip;
544 self->unmap_ip = map__reloc_unmap_ip;
545 self->pgoff = reloc;
546}
547
548void maps__insert(struct rb_root *maps, struct map *map)
549{
550 struct rb_node **p = &maps->rb_node;
551 struct rb_node *parent = NULL;
552 const u64 ip = map->start;
553 struct map *m;
554
555 while (*p != NULL) {
556 parent = *p;
557 m = rb_entry(parent, struct map, rb_node);
558 if (ip < m->start)
559 p = &(*p)->rb_left;
560 else
561 p = &(*p)->rb_right;
562 }
563
564 rb_link_node(&map->rb_node, parent, p);
565 rb_insert_color(&map->rb_node, maps);
566}
567
568void maps__remove(struct rb_root *self, struct map *map)
569{
570 rb_erase(&map->rb_node, self);
571}
572
573struct map *maps__find(struct rb_root *maps, u64 ip)
574{
575 struct rb_node **p = &maps->rb_node;
576 struct rb_node *parent = NULL;
577 struct map *m;
578
579 while (*p != NULL) {
580 parent = *p;
581 m = rb_entry(parent, struct map, rb_node);
582 if (ip < m->start)
583 p = &(*p)->rb_left;
584 else if (ip > m->end)
585 p = &(*p)->rb_right;
586 else
587 return m;
588 }
589
590 return NULL;
591}
592
593int machine__init(struct machine *self, const char *root_dir, pid_t pid)
594{
595 map_groups__init(&self->kmaps);
596 RB_CLEAR_NODE(&self->rb_node);
597 INIT_LIST_HEAD(&self->user_dsos);
598 INIT_LIST_HEAD(&self->kernel_dsos);
599
600 self->threads = RB_ROOT;
601 INIT_LIST_HEAD(&self->dead_threads);
602 self->last_match = NULL;
603
604 self->kmaps.machine = self;
605 self->pid = pid;
606 self->root_dir = strdup(root_dir);
607 if (self->root_dir == NULL)
608 return -ENOMEM;
609
610 if (pid != HOST_KERNEL_ID) {
611 struct thread *thread = machine__findnew_thread(self, pid);
612 char comm[64];
613
614 if (thread == NULL)
615 return -ENOMEM;
616
617 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
618 thread__set_comm(thread, comm);
619 }
620
621 return 0;
622}
623
624static void dsos__delete(struct list_head *self)
625{
626 struct dso *pos, *n;
627
628 list_for_each_entry_safe(pos, n, self, node) {
629 list_del(&pos->node);
630 dso__delete(pos);
631 }
632}
633
634void machine__exit(struct machine *self)
635{
636 map_groups__exit(&self->kmaps);
637 dsos__delete(&self->user_dsos);
638 dsos__delete(&self->kernel_dsos);
639 free(self->root_dir);
640 self->root_dir = NULL;
641}
642
643void machine__delete(struct machine *self)
644{
645 machine__exit(self);
646 free(self);
647}
648
649struct machine *machines__add(struct rb_root *self, pid_t pid,
650 const char *root_dir)
651{
652 struct rb_node **p = &self->rb_node;
653 struct rb_node *parent = NULL;
654 struct machine *pos, *machine = malloc(sizeof(*machine));
655
656 if (!machine)
657 return NULL;
658
659 if (machine__init(machine, root_dir, pid) != 0) {
660 free(machine);
661 return NULL;
662 }
663
664 while (*p != NULL) {
665 parent = *p;
666 pos = rb_entry(parent, struct machine, rb_node);
667 if (pid < pos->pid)
668 p = &(*p)->rb_left;
669 else
670 p = &(*p)->rb_right;
671 }
672
673 rb_link_node(&machine->rb_node, parent, p);
674 rb_insert_color(&machine->rb_node, self);
675
676 return machine;
677}
678
679struct machine *machines__find(struct rb_root *self, pid_t pid)
680{
681 struct rb_node **p = &self->rb_node;
682 struct rb_node *parent = NULL;
683 struct machine *machine;
684 struct machine *default_machine = NULL;
685
686 while (*p != NULL) {
687 parent = *p;
688 machine = rb_entry(parent, struct machine, rb_node);
689 if (pid < machine->pid)
690 p = &(*p)->rb_left;
691 else if (pid > machine->pid)
692 p = &(*p)->rb_right;
693 else
694 return machine;
695 if (!machine->pid)
696 default_machine = machine;
697 }
698
699 return default_machine;
700}
701
702struct machine *machines__findnew(struct rb_root *self, pid_t pid)
703{
704 char path[PATH_MAX];
705 const char *root_dir = "";
706 struct machine *machine = machines__find(self, pid);
707
708 if (machine && (machine->pid == pid))
709 goto out;
710
711 if ((pid != HOST_KERNEL_ID) &&
712 (pid != DEFAULT_GUEST_KERNEL_ID) &&
713 (symbol_conf.guestmount)) {
714 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
715 if (access(path, R_OK)) {
716 static struct strlist *seen;
717
718 if (!seen)
719 seen = strlist__new(true, NULL);
720
721 if (!strlist__has_entry(seen, path)) {
722 pr_err("Can't access file %s\n", path);
723 strlist__add(seen, path);
724 }
725 machine = NULL;
726 goto out;
727 }
728 root_dir = path;
729 }
730
731 machine = machines__add(self, pid, root_dir);
732
733out:
734 return machine;
735}
736
737void machines__process(struct rb_root *self, machine__process_t process, void *data)
738{
739 struct rb_node *nd;
740
741 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
742 struct machine *pos = rb_entry(nd, struct machine, rb_node);
743 process(pos, data);
744 }
745}
746
747char *machine__mmap_name(struct machine *self, char *bf, size_t size)
748{
749 if (machine__is_host(self))
750 snprintf(bf, size, "[%s]", "kernel.kallsyms");
751 else if (machine__is_default_guest(self))
752 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
753 else
754 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
755
756 return bf;
757}
758
759void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size)
760{
761 struct rb_node *node;
762 struct machine *machine;
763
764 for (node = rb_first(machines); node; node = rb_next(node)) {
765 machine = rb_entry(node, struct machine, rb_node);
766 machine->id_hdr_size = id_hdr_size;
767 }
768
769 return;
770}