Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include "symbol.h"
3#include <errno.h>
4#include <inttypes.h>
5#include <limits.h>
6#include <stdlib.h>
7#include <string.h>
8#include <stdio.h>
9#include <unistd.h>
10#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
11#include "map.h"
12#include "thread.h"
13#include "vdso.h"
14#include "build-id.h"
15#include "util.h"
16#include "debug.h"
17#include "machine.h"
18#include <linux/string.h>
19#include "srcline.h"
20#include "namespaces.h"
21#include "unwind.h"
22
23static void __maps__insert(struct maps *maps, struct map *map);
24
25static inline int is_anon_memory(const char *filename, u32 flags)
26{
27 return flags & MAP_HUGETLB ||
28 !strcmp(filename, "//anon") ||
29 !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
30 !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
31}
32
33static inline int is_no_dso_memory(const char *filename)
34{
35 return !strncmp(filename, "[stack", 6) ||
36 !strncmp(filename, "/SYSV",5) ||
37 !strcmp(filename, "[heap]");
38}
39
40static inline int is_android_lib(const char *filename)
41{
42 return !strncmp(filename, "/data/app-lib", 13) ||
43 !strncmp(filename, "/system/lib", 11);
44}
45
46static inline bool replace_android_lib(const char *filename, char *newfilename)
47{
48 const char *libname;
49 char *app_abi;
50 size_t app_abi_length, new_length;
51 size_t lib_length = 0;
52
53 libname = strrchr(filename, '/');
54 if (libname)
55 lib_length = strlen(libname);
56
57 app_abi = getenv("APP_ABI");
58 if (!app_abi)
59 return false;
60
61 app_abi_length = strlen(app_abi);
62
63 if (!strncmp(filename, "/data/app-lib", 13)) {
64 char *apk_path;
65
66 if (!app_abi_length)
67 return false;
68
69 new_length = 7 + app_abi_length + lib_length;
70
71 apk_path = getenv("APK_PATH");
72 if (apk_path) {
73 new_length += strlen(apk_path) + 1;
74 if (new_length > PATH_MAX)
75 return false;
76 snprintf(newfilename, new_length,
77 "%s/libs/%s/%s", apk_path, app_abi, libname);
78 } else {
79 if (new_length > PATH_MAX)
80 return false;
81 snprintf(newfilename, new_length,
82 "libs/%s/%s", app_abi, libname);
83 }
84 return true;
85 }
86
87 if (!strncmp(filename, "/system/lib/", 11)) {
88 char *ndk, *app;
89 const char *arch;
90 size_t ndk_length;
91 size_t app_length;
92
93 ndk = getenv("NDK_ROOT");
94 app = getenv("APP_PLATFORM");
95
96 if (!(ndk && app))
97 return false;
98
99 ndk_length = strlen(ndk);
100 app_length = strlen(app);
101
102 if (!(ndk_length && app_length && app_abi_length))
103 return false;
104
105 arch = !strncmp(app_abi, "arm", 3) ? "arm" :
106 !strncmp(app_abi, "mips", 4) ? "mips" :
107 !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
108
109 if (!arch)
110 return false;
111
112 new_length = 27 + ndk_length +
113 app_length + lib_length
114 + strlen(arch);
115
116 if (new_length > PATH_MAX)
117 return false;
118 snprintf(newfilename, new_length,
119 "%s/platforms/%s/arch-%s/usr/lib/%s",
120 ndk, app, arch, libname);
121
122 return true;
123 }
124 return false;
125}
126
127void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
128{
129 map->start = start;
130 map->end = end;
131 map->pgoff = pgoff;
132 map->reloc = 0;
133 map->dso = dso__get(dso);
134 map->map_ip = map__map_ip;
135 map->unmap_ip = map__unmap_ip;
136 RB_CLEAR_NODE(&map->rb_node);
137 map->groups = NULL;
138 map->erange_warned = false;
139 refcount_set(&map->refcnt, 1);
140}
141
142struct map *map__new(struct machine *machine, u64 start, u64 len,
143 u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
144 u64 ino_gen, u32 prot, u32 flags, char *filename,
145 struct thread *thread)
146{
147 struct map *map = malloc(sizeof(*map));
148 struct nsinfo *nsi = NULL;
149 struct nsinfo *nnsi;
150
151 if (map != NULL) {
152 char newfilename[PATH_MAX];
153 struct dso *dso;
154 int anon, no_dso, vdso, android;
155
156 android = is_android_lib(filename);
157 anon = is_anon_memory(filename, flags);
158 vdso = is_vdso_map(filename);
159 no_dso = is_no_dso_memory(filename);
160
161 map->maj = d_maj;
162 map->min = d_min;
163 map->ino = ino;
164 map->ino_generation = ino_gen;
165 map->prot = prot;
166 map->flags = flags;
167 nsi = nsinfo__get(thread->nsinfo);
168
169 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
170 snprintf(newfilename, sizeof(newfilename),
171 "/tmp/perf-%d.map", nsi->pid);
172 filename = newfilename;
173 }
174
175 if (android) {
176 if (replace_android_lib(filename, newfilename))
177 filename = newfilename;
178 }
179
180 if (vdso) {
181 /* The vdso maps are always on the host and not the
182 * container. Ensure that we don't use setns to look
183 * them up.
184 */
185 nnsi = nsinfo__copy(nsi);
186 if (nnsi) {
187 nsinfo__put(nsi);
188 nnsi->need_setns = false;
189 nsi = nnsi;
190 }
191 pgoff = 0;
192 dso = machine__findnew_vdso(machine, thread);
193 } else
194 dso = machine__findnew_dso(machine, filename);
195
196 if (dso == NULL)
197 goto out_delete;
198
199 map__init(map, start, start + len, pgoff, dso);
200
201 if (anon || no_dso) {
202 map->map_ip = map->unmap_ip = identity__map_ip;
203
204 /*
205 * Set memory without DSO as loaded. All map__find_*
206 * functions still return NULL, and we avoid the
207 * unnecessary map__load warning.
208 */
209 if (!(prot & PROT_EXEC))
210 dso__set_loaded(dso);
211 }
212 dso->nsinfo = nsi;
213 dso__put(dso);
214 }
215 return map;
216out_delete:
217 nsinfo__put(nsi);
218 free(map);
219 return NULL;
220}
221
222/*
223 * Constructor variant for modules (where we know from /proc/modules where
224 * they are loaded) and for vmlinux, where only after we load all the
225 * symbols we'll know where it starts and ends.
226 */
227struct map *map__new2(u64 start, struct dso *dso)
228{
229 struct map *map = calloc(1, (sizeof(*map) +
230 (dso->kernel ? sizeof(struct kmap) : 0)));
231 if (map != NULL) {
232 /*
233 * ->end will be filled after we load all the symbols
234 */
235 map__init(map, start, 0, 0, dso);
236 }
237
238 return map;
239}
240
241/*
242 * Use this and __map__is_kmodule() for map instances that are in
243 * machine->kmaps, and thus have map->groups->machine all properly set, to
244 * disambiguate between the kernel and modules.
245 *
246 * When the need arises, introduce map__is_{kernel,kmodule)() that
247 * checks (map->groups != NULL && map->groups->machine != NULL &&
248 * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
249 */
250bool __map__is_kernel(const struct map *map)
251{
252 return machine__kernel_map(map->groups->machine) == map;
253}
254
255bool __map__is_extra_kernel_map(const struct map *map)
256{
257 struct kmap *kmap = __map__kmap((struct map *)map);
258
259 return kmap && kmap->name[0];
260}
261
262bool map__has_symbols(const struct map *map)
263{
264 return dso__has_symbols(map->dso);
265}
266
267static void map__exit(struct map *map)
268{
269 BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
270 dso__zput(map->dso);
271}
272
273void map__delete(struct map *map)
274{
275 map__exit(map);
276 free(map);
277}
278
279void map__put(struct map *map)
280{
281 if (map && refcount_dec_and_test(&map->refcnt))
282 map__delete(map);
283}
284
285void map__fixup_start(struct map *map)
286{
287 struct rb_root *symbols = &map->dso->symbols;
288 struct rb_node *nd = rb_first(symbols);
289 if (nd != NULL) {
290 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
291 map->start = sym->start;
292 }
293}
294
295void map__fixup_end(struct map *map)
296{
297 struct rb_root *symbols = &map->dso->symbols;
298 struct rb_node *nd = rb_last(symbols);
299 if (nd != NULL) {
300 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
301 map->end = sym->end;
302 }
303}
304
305#define DSO__DELETED "(deleted)"
306
307int map__load(struct map *map)
308{
309 const char *name = map->dso->long_name;
310 int nr;
311
312 if (dso__loaded(map->dso))
313 return 0;
314
315 nr = dso__load(map->dso, map);
316 if (nr < 0) {
317 if (map->dso->has_build_id) {
318 char sbuild_id[SBUILD_ID_SIZE];
319
320 build_id__sprintf(map->dso->build_id,
321 sizeof(map->dso->build_id),
322 sbuild_id);
323 pr_debug("%s with build id %s not found", name, sbuild_id);
324 } else
325 pr_debug("Failed to open %s", name);
326
327 pr_debug(", continuing without symbols\n");
328 return -1;
329 } else if (nr == 0) {
330#ifdef HAVE_LIBELF_SUPPORT
331 const size_t len = strlen(name);
332 const size_t real_len = len - sizeof(DSO__DELETED);
333
334 if (len > sizeof(DSO__DELETED) &&
335 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
336 pr_debug("%.*s was updated (is prelink enabled?). "
337 "Restart the long running apps that use it!\n",
338 (int)real_len, name);
339 } else {
340 pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
341 }
342#endif
343 return -1;
344 }
345
346 return 0;
347}
348
349struct symbol *map__find_symbol(struct map *map, u64 addr)
350{
351 if (map__load(map) < 0)
352 return NULL;
353
354 return dso__find_symbol(map->dso, addr);
355}
356
357struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
358{
359 if (map__load(map) < 0)
360 return NULL;
361
362 if (!dso__sorted_by_name(map->dso))
363 dso__sort_by_name(map->dso);
364
365 return dso__find_symbol_by_name(map->dso, name);
366}
367
368struct map *map__clone(struct map *from)
369{
370 struct map *map = memdup(from, sizeof(*map));
371
372 if (map != NULL) {
373 refcount_set(&map->refcnt, 1);
374 RB_CLEAR_NODE(&map->rb_node);
375 dso__get(map->dso);
376 map->groups = NULL;
377 }
378
379 return map;
380}
381
382size_t map__fprintf(struct map *map, FILE *fp)
383{
384 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
385 map->start, map->end, map->pgoff, map->dso->name);
386}
387
388size_t map__fprintf_dsoname(struct map *map, FILE *fp)
389{
390 const char *dsoname = "[unknown]";
391
392 if (map && map->dso) {
393 if (symbol_conf.show_kernel_path && map->dso->long_name)
394 dsoname = map->dso->long_name;
395 else
396 dsoname = map->dso->name;
397 }
398
399 return fprintf(fp, "%s", dsoname);
400}
401
402char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
403{
404 if (map == NULL)
405 return SRCLINE_UNKNOWN;
406 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr);
407}
408
409int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
410 FILE *fp)
411{
412 int ret = 0;
413
414 if (map && map->dso) {
415 char *srcline = map__srcline(map, addr, NULL);
416 if (srcline != SRCLINE_UNKNOWN)
417 ret = fprintf(fp, "%s%s", prefix, srcline);
418 free_srcline(srcline);
419 }
420 return ret;
421}
422
423/**
424 * map__rip_2objdump - convert symbol start address to objdump address.
425 * @map: memory map
426 * @rip: symbol start address
427 *
428 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
429 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
430 * relative to section start.
431 *
432 * Return: Address suitable for passing to "objdump --start-address="
433 */
434u64 map__rip_2objdump(struct map *map, u64 rip)
435{
436 struct kmap *kmap = __map__kmap(map);
437
438 /*
439 * vmlinux does not have program headers for PTI entry trampolines and
440 * kcore may not either. However the trampoline object code is on the
441 * main kernel map, so just use that instead.
442 */
443 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
444 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
445
446 if (kernel_map)
447 map = kernel_map;
448 }
449
450 if (!map->dso->adjust_symbols)
451 return rip;
452
453 if (map->dso->rel)
454 return rip - map->pgoff;
455
456 /*
457 * kernel modules also have DSO_TYPE_USER in dso->kernel,
458 * but all kernel modules are ET_REL, so won't get here.
459 */
460 if (map->dso->kernel == DSO_TYPE_USER)
461 return rip + map->dso->text_offset;
462
463 return map->unmap_ip(map, rip) - map->reloc;
464}
465
466/**
467 * map__objdump_2mem - convert objdump address to a memory address.
468 * @map: memory map
469 * @ip: objdump address
470 *
471 * Closely related to map__rip_2objdump(), this function takes an address from
472 * objdump and converts it to a memory address. Note this assumes that @map
473 * contains the address. To be sure the result is valid, check it forwards
474 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
475 *
476 * Return: Memory address.
477 */
478u64 map__objdump_2mem(struct map *map, u64 ip)
479{
480 if (!map->dso->adjust_symbols)
481 return map->unmap_ip(map, ip);
482
483 if (map->dso->rel)
484 return map->unmap_ip(map, ip + map->pgoff);
485
486 /*
487 * kernel modules also have DSO_TYPE_USER in dso->kernel,
488 * but all kernel modules are ET_REL, so won't get here.
489 */
490 if (map->dso->kernel == DSO_TYPE_USER)
491 return map->unmap_ip(map, ip - map->dso->text_offset);
492
493 return ip + map->reloc;
494}
495
496static void maps__init(struct maps *maps)
497{
498 maps->entries = RB_ROOT;
499 init_rwsem(&maps->lock);
500}
501
502void map_groups__init(struct map_groups *mg, struct machine *machine)
503{
504 maps__init(&mg->maps);
505 mg->machine = machine;
506 refcount_set(&mg->refcnt, 1);
507}
508
509static void __maps__purge(struct maps *maps)
510{
511 struct rb_root *root = &maps->entries;
512 struct rb_node *next = rb_first(root);
513
514 while (next) {
515 struct map *pos = rb_entry(next, struct map, rb_node);
516
517 next = rb_next(&pos->rb_node);
518 rb_erase_init(&pos->rb_node, root);
519 map__put(pos);
520 }
521}
522
523static void maps__exit(struct maps *maps)
524{
525 down_write(&maps->lock);
526 __maps__purge(maps);
527 up_write(&maps->lock);
528}
529
530void map_groups__exit(struct map_groups *mg)
531{
532 maps__exit(&mg->maps);
533}
534
535bool map_groups__empty(struct map_groups *mg)
536{
537 return !maps__first(&mg->maps);
538}
539
540struct map_groups *map_groups__new(struct machine *machine)
541{
542 struct map_groups *mg = malloc(sizeof(*mg));
543
544 if (mg != NULL)
545 map_groups__init(mg, machine);
546
547 return mg;
548}
549
550void map_groups__delete(struct map_groups *mg)
551{
552 map_groups__exit(mg);
553 free(mg);
554}
555
556void map_groups__put(struct map_groups *mg)
557{
558 if (mg && refcount_dec_and_test(&mg->refcnt))
559 map_groups__delete(mg);
560}
561
562struct symbol *map_groups__find_symbol(struct map_groups *mg,
563 u64 addr, struct map **mapp)
564{
565 struct map *map = map_groups__find(mg, addr);
566
567 /* Ensure map is loaded before using map->map_ip */
568 if (map != NULL && map__load(map) >= 0) {
569 if (mapp != NULL)
570 *mapp = map;
571 return map__find_symbol(map, map->map_ip(map, addr));
572 }
573
574 return NULL;
575}
576
577static bool map__contains_symbol(struct map *map, struct symbol *sym)
578{
579 u64 ip = map->unmap_ip(map, sym->start);
580
581 return ip >= map->start && ip < map->end;
582}
583
584struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
585 struct map **mapp)
586{
587 struct symbol *sym;
588 struct rb_node *nd;
589
590 down_read(&maps->lock);
591
592 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
593 struct map *pos = rb_entry(nd, struct map, rb_node);
594
595 sym = map__find_symbol_by_name(pos, name);
596
597 if (sym == NULL)
598 continue;
599 if (!map__contains_symbol(pos, sym)) {
600 sym = NULL;
601 continue;
602 }
603 if (mapp != NULL)
604 *mapp = pos;
605 goto out;
606 }
607
608 sym = NULL;
609out:
610 up_read(&maps->lock);
611 return sym;
612}
613
614struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
615 const char *name,
616 struct map **mapp)
617{
618 return maps__find_symbol_by_name(&mg->maps, name, mapp);
619}
620
621int map_groups__find_ams(struct addr_map_symbol *ams)
622{
623 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
624 if (ams->map->groups == NULL)
625 return -1;
626 ams->map = map_groups__find(ams->map->groups, ams->addr);
627 if (ams->map == NULL)
628 return -1;
629 }
630
631 ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
632 ams->sym = map__find_symbol(ams->map, ams->al_addr);
633
634 return ams->sym ? 0 : -1;
635}
636
637static size_t maps__fprintf(struct maps *maps, FILE *fp)
638{
639 size_t printed = 0;
640 struct rb_node *nd;
641
642 down_read(&maps->lock);
643
644 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
645 struct map *pos = rb_entry(nd, struct map, rb_node);
646 printed += fprintf(fp, "Map:");
647 printed += map__fprintf(pos, fp);
648 if (verbose > 2) {
649 printed += dso__fprintf(pos->dso, fp);
650 printed += fprintf(fp, "--\n");
651 }
652 }
653
654 up_read(&maps->lock);
655
656 return printed;
657}
658
659size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
660{
661 return maps__fprintf(&mg->maps, fp);
662}
663
664static void __map_groups__insert(struct map_groups *mg, struct map *map)
665{
666 __maps__insert(&mg->maps, map);
667 map->groups = mg;
668}
669
670static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
671{
672 struct rb_root *root;
673 struct rb_node *next, *first;
674 int err = 0;
675
676 down_write(&maps->lock);
677
678 root = &maps->entries;
679
680 /*
681 * Find first map where end > map->start.
682 * Same as find_vma() in kernel.
683 */
684 next = root->rb_node;
685 first = NULL;
686 while (next) {
687 struct map *pos = rb_entry(next, struct map, rb_node);
688
689 if (pos->end > map->start) {
690 first = next;
691 if (pos->start <= map->start)
692 break;
693 next = next->rb_left;
694 } else
695 next = next->rb_right;
696 }
697
698 next = first;
699 while (next) {
700 struct map *pos = rb_entry(next, struct map, rb_node);
701 next = rb_next(&pos->rb_node);
702
703 /*
704 * Stop if current map starts after map->end.
705 * Maps are ordered by start: next will not overlap for sure.
706 */
707 if (pos->start >= map->end)
708 break;
709
710 if (verbose >= 2) {
711
712 if (use_browser) {
713 pr_debug("overlapping maps in %s (disable tui for more info)\n",
714 map->dso->name);
715 } else {
716 fputs("overlapping maps:\n", fp);
717 map__fprintf(map, fp);
718 map__fprintf(pos, fp);
719 }
720 }
721
722 rb_erase_init(&pos->rb_node, root);
723 /*
724 * Now check if we need to create new maps for areas not
725 * overlapped by the new map:
726 */
727 if (map->start > pos->start) {
728 struct map *before = map__clone(pos);
729
730 if (before == NULL) {
731 err = -ENOMEM;
732 goto put_map;
733 }
734
735 before->end = map->start;
736 __map_groups__insert(pos->groups, before);
737 if (verbose >= 2 && !use_browser)
738 map__fprintf(before, fp);
739 map__put(before);
740 }
741
742 if (map->end < pos->end) {
743 struct map *after = map__clone(pos);
744
745 if (after == NULL) {
746 err = -ENOMEM;
747 goto put_map;
748 }
749
750 after->start = map->end;
751 __map_groups__insert(pos->groups, after);
752 if (verbose >= 2 && !use_browser)
753 map__fprintf(after, fp);
754 map__put(after);
755 }
756put_map:
757 map__put(pos);
758
759 if (err)
760 goto out;
761 }
762
763 err = 0;
764out:
765 up_write(&maps->lock);
766 return err;
767}
768
769int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
770 FILE *fp)
771{
772 return maps__fixup_overlappings(&mg->maps, map, fp);
773}
774
775/*
776 * XXX This should not really _copy_ te maps, but refcount them.
777 */
778int map_groups__clone(struct thread *thread, struct map_groups *parent)
779{
780 struct map_groups *mg = thread->mg;
781 int err = -ENOMEM;
782 struct map *map;
783 struct maps *maps = &parent->maps;
784
785 down_read(&maps->lock);
786
787 for (map = maps__first(maps); map; map = map__next(map)) {
788 struct map *new = map__clone(map);
789 if (new == NULL)
790 goto out_unlock;
791
792 err = unwind__prepare_access(thread, new, NULL);
793 if (err)
794 goto out_unlock;
795
796 map_groups__insert(mg, new);
797 map__put(new);
798 }
799
800 err = 0;
801out_unlock:
802 up_read(&maps->lock);
803 return err;
804}
805
806static void __maps__insert(struct maps *maps, struct map *map)
807{
808 struct rb_node **p = &maps->entries.rb_node;
809 struct rb_node *parent = NULL;
810 const u64 ip = map->start;
811 struct map *m;
812
813 while (*p != NULL) {
814 parent = *p;
815 m = rb_entry(parent, struct map, rb_node);
816 if (ip < m->start)
817 p = &(*p)->rb_left;
818 else
819 p = &(*p)->rb_right;
820 }
821
822 rb_link_node(&map->rb_node, parent, p);
823 rb_insert_color(&map->rb_node, &maps->entries);
824 map__get(map);
825}
826
827void maps__insert(struct maps *maps, struct map *map)
828{
829 down_write(&maps->lock);
830 __maps__insert(maps, map);
831 up_write(&maps->lock);
832}
833
834static void __maps__remove(struct maps *maps, struct map *map)
835{
836 rb_erase_init(&map->rb_node, &maps->entries);
837 map__put(map);
838}
839
840void maps__remove(struct maps *maps, struct map *map)
841{
842 down_write(&maps->lock);
843 __maps__remove(maps, map);
844 up_write(&maps->lock);
845}
846
847struct map *maps__find(struct maps *maps, u64 ip)
848{
849 struct rb_node **p, *parent = NULL;
850 struct map *m;
851
852 down_read(&maps->lock);
853
854 p = &maps->entries.rb_node;
855 while (*p != NULL) {
856 parent = *p;
857 m = rb_entry(parent, struct map, rb_node);
858 if (ip < m->start)
859 p = &(*p)->rb_left;
860 else if (ip >= m->end)
861 p = &(*p)->rb_right;
862 else
863 goto out;
864 }
865
866 m = NULL;
867out:
868 up_read(&maps->lock);
869 return m;
870}
871
872struct map *maps__first(struct maps *maps)
873{
874 struct rb_node *first = rb_first(&maps->entries);
875
876 if (first)
877 return rb_entry(first, struct map, rb_node);
878 return NULL;
879}
880
881struct map *map__next(struct map *map)
882{
883 struct rb_node *next = rb_next(&map->rb_node);
884
885 if (next)
886 return rb_entry(next, struct map, rb_node);
887 return NULL;
888}
889
890struct kmap *__map__kmap(struct map *map)
891{
892 if (!map->dso || !map->dso->kernel)
893 return NULL;
894 return (struct kmap *)(map + 1);
895}
896
897struct kmap *map__kmap(struct map *map)
898{
899 struct kmap *kmap = __map__kmap(map);
900
901 if (!kmap)
902 pr_err("Internal error: map__kmap with a non-kernel map\n");
903 return kmap;
904}
905
906struct map_groups *map__kmaps(struct map *map)
907{
908 struct kmap *kmap = map__kmap(map);
909
910 if (!kmap || !kmap->kmaps) {
911 pr_err("Internal error: map__kmaps with a non-kernel map\n");
912 return NULL;
913 }
914 return kmap->kmaps;
915}