at v5.4-rc2 23 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2#include "symbol.h" 3#include <errno.h> 4#include <inttypes.h> 5#include <limits.h> 6#include <stdlib.h> 7#include <string.h> 8#include <stdio.h> 9#include <unistd.h> 10#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 11#include "dso.h" 12#include "map.h" 13#include "map_symbol.h" 14#include "thread.h" 15#include "vdso.h" 16#include "build-id.h" 17#include "debug.h" 18#include "machine.h" 19#include <linux/string.h> 20#include <linux/zalloc.h> 21#include "srcline.h" 22#include "namespaces.h" 23#include "unwind.h" 24#include "srccode.h" 25#include "ui/ui.h" 26 27static void __maps__insert(struct maps *maps, struct map *map); 28static void __maps__insert_name(struct maps *maps, struct map *map); 29 30static inline int is_anon_memory(const char *filename, u32 flags) 31{ 32 return flags & MAP_HUGETLB || 33 !strcmp(filename, "//anon") || 34 !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) || 35 !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1); 36} 37 38static inline int is_no_dso_memory(const char *filename) 39{ 40 return !strncmp(filename, "[stack", 6) || 41 !strncmp(filename, "/SYSV",5) || 42 !strcmp(filename, "[heap]"); 43} 44 45static inline int is_android_lib(const char *filename) 46{ 47 return !strncmp(filename, "/data/app-lib", 13) || 48 !strncmp(filename, "/system/lib", 11); 49} 50 51static inline bool replace_android_lib(const char *filename, char *newfilename) 52{ 53 const char *libname; 54 char *app_abi; 55 size_t app_abi_length, new_length; 56 size_t lib_length = 0; 57 58 libname = strrchr(filename, '/'); 59 if (libname) 60 lib_length = strlen(libname); 61 62 app_abi = getenv("APP_ABI"); 63 if (!app_abi) 64 return false; 65 66 app_abi_length = strlen(app_abi); 67 68 if (!strncmp(filename, "/data/app-lib", 13)) { 69 char *apk_path; 70 71 if (!app_abi_length) 72 return false; 73 74 new_length = 7 + app_abi_length + lib_length; 75 76 apk_path = getenv("APK_PATH"); 77 if (apk_path) { 78 new_length += strlen(apk_path) + 1; 79 if (new_length > PATH_MAX) 80 return false; 81 snprintf(newfilename, new_length, 82 "%s/libs/%s/%s", apk_path, app_abi, libname); 83 } else { 84 if (new_length > PATH_MAX) 85 return false; 86 snprintf(newfilename, new_length, 87 "libs/%s/%s", app_abi, libname); 88 } 89 return true; 90 } 91 92 if (!strncmp(filename, "/system/lib/", 11)) { 93 char *ndk, *app; 94 const char *arch; 95 size_t ndk_length; 96 size_t app_length; 97 98 ndk = getenv("NDK_ROOT"); 99 app = getenv("APP_PLATFORM"); 100 101 if (!(ndk && app)) 102 return false; 103 104 ndk_length = strlen(ndk); 105 app_length = strlen(app); 106 107 if (!(ndk_length && app_length && app_abi_length)) 108 return false; 109 110 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 111 !strncmp(app_abi, "mips", 4) ? "mips" : 112 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 113 114 if (!arch) 115 return false; 116 117 new_length = 27 + ndk_length + 118 app_length + lib_length 119 + strlen(arch); 120 121 if (new_length > PATH_MAX) 122 return false; 123 snprintf(newfilename, new_length, 124 "%s/platforms/%s/arch-%s/usr/lib/%s", 125 ndk, app, arch, libname); 126 127 return true; 128 } 129 return false; 130} 131 132void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) 133{ 134 map->start = start; 135 map->end = end; 136 map->pgoff = pgoff; 137 map->reloc = 0; 138 map->dso = dso__get(dso); 139 map->map_ip = map__map_ip; 140 map->unmap_ip = map__unmap_ip; 141 RB_CLEAR_NODE(&map->rb_node); 142 map->groups = NULL; 143 map->erange_warned = false; 144 refcount_set(&map->refcnt, 1); 145} 146 147struct map *map__new(struct machine *machine, u64 start, u64 len, 148 u64 pgoff, u32 d_maj, u32 d_min, u64 ino, 149 u64 ino_gen, u32 prot, u32 flags, char *filename, 150 struct thread *thread) 151{ 152 struct map *map = malloc(sizeof(*map)); 153 struct nsinfo *nsi = NULL; 154 struct nsinfo *nnsi; 155 156 if (map != NULL) { 157 char newfilename[PATH_MAX]; 158 struct dso *dso; 159 int anon, no_dso, vdso, android; 160 161 android = is_android_lib(filename); 162 anon = is_anon_memory(filename, flags); 163 vdso = is_vdso_map(filename); 164 no_dso = is_no_dso_memory(filename); 165 166 map->maj = d_maj; 167 map->min = d_min; 168 map->ino = ino; 169 map->ino_generation = ino_gen; 170 map->prot = prot; 171 map->flags = flags; 172 nsi = nsinfo__get(thread->nsinfo); 173 174 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { 175 snprintf(newfilename, sizeof(newfilename), 176 "/tmp/perf-%d.map", nsi->pid); 177 filename = newfilename; 178 } 179 180 if (android) { 181 if (replace_android_lib(filename, newfilename)) 182 filename = newfilename; 183 } 184 185 if (vdso) { 186 /* The vdso maps are always on the host and not the 187 * container. Ensure that we don't use setns to look 188 * them up. 189 */ 190 nnsi = nsinfo__copy(nsi); 191 if (nnsi) { 192 nsinfo__put(nsi); 193 nnsi->need_setns = false; 194 nsi = nnsi; 195 } 196 pgoff = 0; 197 dso = machine__findnew_vdso(machine, thread); 198 } else 199 dso = machine__findnew_dso(machine, filename); 200 201 if (dso == NULL) 202 goto out_delete; 203 204 map__init(map, start, start + len, pgoff, dso); 205 206 if (anon || no_dso) { 207 map->map_ip = map->unmap_ip = identity__map_ip; 208 209 /* 210 * Set memory without DSO as loaded. All map__find_* 211 * functions still return NULL, and we avoid the 212 * unnecessary map__load warning. 213 */ 214 if (!(prot & PROT_EXEC)) 215 dso__set_loaded(dso); 216 } 217 dso->nsinfo = nsi; 218 dso__put(dso); 219 } 220 return map; 221out_delete: 222 nsinfo__put(nsi); 223 free(map); 224 return NULL; 225} 226 227/* 228 * Constructor variant for modules (where we know from /proc/modules where 229 * they are loaded) and for vmlinux, where only after we load all the 230 * symbols we'll know where it starts and ends. 231 */ 232struct map *map__new2(u64 start, struct dso *dso) 233{ 234 struct map *map = calloc(1, (sizeof(*map) + 235 (dso->kernel ? sizeof(struct kmap) : 0))); 236 if (map != NULL) { 237 /* 238 * ->end will be filled after we load all the symbols 239 */ 240 map__init(map, start, 0, 0, dso); 241 } 242 243 return map; 244} 245 246/* 247 * Use this and __map__is_kmodule() for map instances that are in 248 * machine->kmaps, and thus have map->groups->machine all properly set, to 249 * disambiguate between the kernel and modules. 250 * 251 * When the need arises, introduce map__is_{kernel,kmodule)() that 252 * checks (map->groups != NULL && map->groups->machine != NULL && 253 * map->dso->kernel) before calling __map__is_{kernel,kmodule}()) 254 */ 255bool __map__is_kernel(const struct map *map) 256{ 257 return machine__kernel_map(map->groups->machine) == map; 258} 259 260bool __map__is_extra_kernel_map(const struct map *map) 261{ 262 struct kmap *kmap = __map__kmap((struct map *)map); 263 264 return kmap && kmap->name[0]; 265} 266 267bool __map__is_bpf_prog(const struct map *map) 268{ 269 const char *name; 270 271 if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 272 return true; 273 274 /* 275 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have 276 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can 277 * guess the type based on name. 278 */ 279 name = map->dso->short_name; 280 return name && (strstr(name, "bpf_prog_") == name); 281} 282 283bool map__has_symbols(const struct map *map) 284{ 285 return dso__has_symbols(map->dso); 286} 287 288static void map__exit(struct map *map) 289{ 290 BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); 291 dso__zput(map->dso); 292} 293 294void map__delete(struct map *map) 295{ 296 map__exit(map); 297 free(map); 298} 299 300void map__put(struct map *map) 301{ 302 if (map && refcount_dec_and_test(&map->refcnt)) 303 map__delete(map); 304} 305 306void map__fixup_start(struct map *map) 307{ 308 struct rb_root_cached *symbols = &map->dso->symbols; 309 struct rb_node *nd = rb_first_cached(symbols); 310 if (nd != NULL) { 311 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 312 map->start = sym->start; 313 } 314} 315 316void map__fixup_end(struct map *map) 317{ 318 struct rb_root_cached *symbols = &map->dso->symbols; 319 struct rb_node *nd = rb_last(&symbols->rb_root); 320 if (nd != NULL) { 321 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 322 map->end = sym->end; 323 } 324} 325 326#define DSO__DELETED "(deleted)" 327 328int map__load(struct map *map) 329{ 330 const char *name = map->dso->long_name; 331 int nr; 332 333 if (dso__loaded(map->dso)) 334 return 0; 335 336 nr = dso__load(map->dso, map); 337 if (nr < 0) { 338 if (map->dso->has_build_id) { 339 char sbuild_id[SBUILD_ID_SIZE]; 340 341 build_id__sprintf(map->dso->build_id, 342 sizeof(map->dso->build_id), 343 sbuild_id); 344 pr_debug("%s with build id %s not found", name, sbuild_id); 345 } else 346 pr_debug("Failed to open %s", name); 347 348 pr_debug(", continuing without symbols\n"); 349 return -1; 350 } else if (nr == 0) { 351#ifdef HAVE_LIBELF_SUPPORT 352 const size_t len = strlen(name); 353 const size_t real_len = len - sizeof(DSO__DELETED); 354 355 if (len > sizeof(DSO__DELETED) && 356 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 357 pr_debug("%.*s was updated (is prelink enabled?). " 358 "Restart the long running apps that use it!\n", 359 (int)real_len, name); 360 } else { 361 pr_debug("no symbols found in %s, maybe install a debug package?\n", name); 362 } 363#endif 364 return -1; 365 } 366 367 return 0; 368} 369 370struct symbol *map__find_symbol(struct map *map, u64 addr) 371{ 372 if (map__load(map) < 0) 373 return NULL; 374 375 return dso__find_symbol(map->dso, addr); 376} 377 378struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 379{ 380 if (map__load(map) < 0) 381 return NULL; 382 383 if (!dso__sorted_by_name(map->dso)) 384 dso__sort_by_name(map->dso); 385 386 return dso__find_symbol_by_name(map->dso, name); 387} 388 389struct map *map__clone(struct map *from) 390{ 391 struct map *map = memdup(from, sizeof(*map)); 392 393 if (map != NULL) { 394 refcount_set(&map->refcnt, 1); 395 RB_CLEAR_NODE(&map->rb_node); 396 dso__get(map->dso); 397 map->groups = NULL; 398 } 399 400 return map; 401} 402 403size_t map__fprintf(struct map *map, FILE *fp) 404{ 405 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 406 map->start, map->end, map->pgoff, map->dso->name); 407} 408 409size_t map__fprintf_dsoname(struct map *map, FILE *fp) 410{ 411 char buf[symbol_conf.pad_output_len_dso + 1]; 412 const char *dsoname = "[unknown]"; 413 414 if (map && map->dso) { 415 if (symbol_conf.show_kernel_path && map->dso->long_name) 416 dsoname = map->dso->long_name; 417 else 418 dsoname = map->dso->name; 419 } 420 421 if (symbol_conf.pad_output_len_dso) { 422 scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname); 423 dsoname = buf; 424 } 425 426 return fprintf(fp, "%s", dsoname); 427} 428 429char *map__srcline(struct map *map, u64 addr, struct symbol *sym) 430{ 431 if (map == NULL) 432 return SRCLINE_UNKNOWN; 433 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr); 434} 435 436int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 437 FILE *fp) 438{ 439 int ret = 0; 440 441 if (map && map->dso) { 442 char *srcline = map__srcline(map, addr, NULL); 443 if (srcline != SRCLINE_UNKNOWN) 444 ret = fprintf(fp, "%s%s", prefix, srcline); 445 free_srcline(srcline); 446 } 447 return ret; 448} 449 450int map__fprintf_srccode(struct map *map, u64 addr, 451 FILE *fp, 452 struct srccode_state *state) 453{ 454 char *srcfile; 455 int ret = 0; 456 unsigned line; 457 int len; 458 char *srccode; 459 460 if (!map || !map->dso) 461 return 0; 462 srcfile = get_srcline_split(map->dso, 463 map__rip_2objdump(map, addr), 464 &line); 465 if (!srcfile) 466 return 0; 467 468 /* Avoid redundant printing */ 469 if (state && 470 state->srcfile && 471 !strcmp(state->srcfile, srcfile) && 472 state->line == line) { 473 free(srcfile); 474 return 0; 475 } 476 477 srccode = find_sourceline(srcfile, line, &len); 478 if (!srccode) 479 goto out_free_line; 480 481 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode); 482 483 if (state) { 484 state->srcfile = srcfile; 485 state->line = line; 486 } 487 return ret; 488 489out_free_line: 490 free(srcfile); 491 return ret; 492} 493 494 495void srccode_state_free(struct srccode_state *state) 496{ 497 zfree(&state->srcfile); 498 state->line = 0; 499} 500 501/** 502 * map__rip_2objdump - convert symbol start address to objdump address. 503 * @map: memory map 504 * @rip: symbol start address 505 * 506 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 507 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 508 * relative to section start. 509 * 510 * Return: Address suitable for passing to "objdump --start-address=" 511 */ 512u64 map__rip_2objdump(struct map *map, u64 rip) 513{ 514 struct kmap *kmap = __map__kmap(map); 515 516 /* 517 * vmlinux does not have program headers for PTI entry trampolines and 518 * kcore may not either. However the trampoline object code is on the 519 * main kernel map, so just use that instead. 520 */ 521 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) { 522 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine); 523 524 if (kernel_map) 525 map = kernel_map; 526 } 527 528 if (!map->dso->adjust_symbols) 529 return rip; 530 531 if (map->dso->rel) 532 return rip - map->pgoff; 533 534 /* 535 * kernel modules also have DSO_TYPE_USER in dso->kernel, 536 * but all kernel modules are ET_REL, so won't get here. 537 */ 538 if (map->dso->kernel == DSO_TYPE_USER) 539 return rip + map->dso->text_offset; 540 541 return map->unmap_ip(map, rip) - map->reloc; 542} 543 544/** 545 * map__objdump_2mem - convert objdump address to a memory address. 546 * @map: memory map 547 * @ip: objdump address 548 * 549 * Closely related to map__rip_2objdump(), this function takes an address from 550 * objdump and converts it to a memory address. Note this assumes that @map 551 * contains the address. To be sure the result is valid, check it forwards 552 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip 553 * 554 * Return: Memory address. 555 */ 556u64 map__objdump_2mem(struct map *map, u64 ip) 557{ 558 if (!map->dso->adjust_symbols) 559 return map->unmap_ip(map, ip); 560 561 if (map->dso->rel) 562 return map->unmap_ip(map, ip + map->pgoff); 563 564 /* 565 * kernel modules also have DSO_TYPE_USER in dso->kernel, 566 * but all kernel modules are ET_REL, so won't get here. 567 */ 568 if (map->dso->kernel == DSO_TYPE_USER) 569 return map->unmap_ip(map, ip - map->dso->text_offset); 570 571 return ip + map->reloc; 572} 573 574static void maps__init(struct maps *maps) 575{ 576 maps->entries = RB_ROOT; 577 maps->names = RB_ROOT; 578 init_rwsem(&maps->lock); 579} 580 581void map_groups__init(struct map_groups *mg, struct machine *machine) 582{ 583 maps__init(&mg->maps); 584 mg->machine = machine; 585 refcount_set(&mg->refcnt, 1); 586} 587 588void map_groups__insert(struct map_groups *mg, struct map *map) 589{ 590 maps__insert(&mg->maps, map); 591 map->groups = mg; 592} 593 594static void __maps__purge(struct maps *maps) 595{ 596 struct rb_root *root = &maps->entries; 597 struct rb_node *next = rb_first(root); 598 599 while (next) { 600 struct map *pos = rb_entry(next, struct map, rb_node); 601 602 next = rb_next(&pos->rb_node); 603 rb_erase_init(&pos->rb_node, root); 604 map__put(pos); 605 } 606} 607 608static void __maps__purge_names(struct maps *maps) 609{ 610 struct rb_root *root = &maps->names; 611 struct rb_node *next = rb_first(root); 612 613 while (next) { 614 struct map *pos = rb_entry(next, struct map, rb_node_name); 615 616 next = rb_next(&pos->rb_node_name); 617 rb_erase_init(&pos->rb_node_name, root); 618 map__put(pos); 619 } 620} 621 622static void maps__exit(struct maps *maps) 623{ 624 down_write(&maps->lock); 625 __maps__purge(maps); 626 __maps__purge_names(maps); 627 up_write(&maps->lock); 628} 629 630void map_groups__exit(struct map_groups *mg) 631{ 632 maps__exit(&mg->maps); 633} 634 635bool map_groups__empty(struct map_groups *mg) 636{ 637 return !maps__first(&mg->maps); 638} 639 640struct map_groups *map_groups__new(struct machine *machine) 641{ 642 struct map_groups *mg = zalloc(sizeof(*mg)); 643 644 if (mg != NULL) 645 map_groups__init(mg, machine); 646 647 return mg; 648} 649 650void map_groups__delete(struct map_groups *mg) 651{ 652 map_groups__exit(mg); 653 unwind__finish_access(mg); 654 free(mg); 655} 656 657void map_groups__put(struct map_groups *mg) 658{ 659 if (mg && refcount_dec_and_test(&mg->refcnt)) 660 map_groups__delete(mg); 661} 662 663struct symbol *map_groups__find_symbol(struct map_groups *mg, 664 u64 addr, struct map **mapp) 665{ 666 struct map *map = map_groups__find(mg, addr); 667 668 /* Ensure map is loaded before using map->map_ip */ 669 if (map != NULL && map__load(map) >= 0) { 670 if (mapp != NULL) 671 *mapp = map; 672 return map__find_symbol(map, map->map_ip(map, addr)); 673 } 674 675 return NULL; 676} 677 678static bool map__contains_symbol(struct map *map, struct symbol *sym) 679{ 680 u64 ip = map->unmap_ip(map, sym->start); 681 682 return ip >= map->start && ip < map->end; 683} 684 685struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, 686 struct map **mapp) 687{ 688 struct symbol *sym; 689 struct rb_node *nd; 690 691 down_read(&maps->lock); 692 693 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 694 struct map *pos = rb_entry(nd, struct map, rb_node); 695 696 sym = map__find_symbol_by_name(pos, name); 697 698 if (sym == NULL) 699 continue; 700 if (!map__contains_symbol(pos, sym)) { 701 sym = NULL; 702 continue; 703 } 704 if (mapp != NULL) 705 *mapp = pos; 706 goto out; 707 } 708 709 sym = NULL; 710out: 711 up_read(&maps->lock); 712 return sym; 713} 714 715struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 716 const char *name, 717 struct map **mapp) 718{ 719 return maps__find_symbol_by_name(&mg->maps, name, mapp); 720} 721 722int map_groups__find_ams(struct addr_map_symbol *ams) 723{ 724 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 725 if (ams->map->groups == NULL) 726 return -1; 727 ams->map = map_groups__find(ams->map->groups, ams->addr); 728 if (ams->map == NULL) 729 return -1; 730 } 731 732 ams->al_addr = ams->map->map_ip(ams->map, ams->addr); 733 ams->sym = map__find_symbol(ams->map, ams->al_addr); 734 735 return ams->sym ? 0 : -1; 736} 737 738static size_t maps__fprintf(struct maps *maps, FILE *fp) 739{ 740 size_t printed = 0; 741 struct rb_node *nd; 742 743 down_read(&maps->lock); 744 745 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 746 struct map *pos = rb_entry(nd, struct map, rb_node); 747 printed += fprintf(fp, "Map:"); 748 printed += map__fprintf(pos, fp); 749 if (verbose > 2) { 750 printed += dso__fprintf(pos->dso, fp); 751 printed += fprintf(fp, "--\n"); 752 } 753 } 754 755 up_read(&maps->lock); 756 757 return printed; 758} 759 760size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 761{ 762 return maps__fprintf(&mg->maps, fp); 763} 764 765static void __map_groups__insert(struct map_groups *mg, struct map *map) 766{ 767 __maps__insert(&mg->maps, map); 768 __maps__insert_name(&mg->maps, map); 769 map->groups = mg; 770} 771 772static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 773{ 774 struct rb_root *root; 775 struct rb_node *next, *first; 776 int err = 0; 777 778 down_write(&maps->lock); 779 780 root = &maps->entries; 781 782 /* 783 * Find first map where end > map->start. 784 * Same as find_vma() in kernel. 785 */ 786 next = root->rb_node; 787 first = NULL; 788 while (next) { 789 struct map *pos = rb_entry(next, struct map, rb_node); 790 791 if (pos->end > map->start) { 792 first = next; 793 if (pos->start <= map->start) 794 break; 795 next = next->rb_left; 796 } else 797 next = next->rb_right; 798 } 799 800 next = first; 801 while (next) { 802 struct map *pos = rb_entry(next, struct map, rb_node); 803 next = rb_next(&pos->rb_node); 804 805 /* 806 * Stop if current map starts after map->end. 807 * Maps are ordered by start: next will not overlap for sure. 808 */ 809 if (pos->start >= map->end) 810 break; 811 812 if (verbose >= 2) { 813 814 if (use_browser) { 815 pr_debug("overlapping maps in %s (disable tui for more info)\n", 816 map->dso->name); 817 } else { 818 fputs("overlapping maps:\n", fp); 819 map__fprintf(map, fp); 820 map__fprintf(pos, fp); 821 } 822 } 823 824 rb_erase_init(&pos->rb_node, root); 825 /* 826 * Now check if we need to create new maps for areas not 827 * overlapped by the new map: 828 */ 829 if (map->start > pos->start) { 830 struct map *before = map__clone(pos); 831 832 if (before == NULL) { 833 err = -ENOMEM; 834 goto put_map; 835 } 836 837 before->end = map->start; 838 __map_groups__insert(pos->groups, before); 839 if (verbose >= 2 && !use_browser) 840 map__fprintf(before, fp); 841 map__put(before); 842 } 843 844 if (map->end < pos->end) { 845 struct map *after = map__clone(pos); 846 847 if (after == NULL) { 848 err = -ENOMEM; 849 goto put_map; 850 } 851 852 after->start = map->end; 853 __map_groups__insert(pos->groups, after); 854 if (verbose >= 2 && !use_browser) 855 map__fprintf(after, fp); 856 map__put(after); 857 } 858put_map: 859 map__put(pos); 860 861 if (err) 862 goto out; 863 } 864 865 err = 0; 866out: 867 up_write(&maps->lock); 868 return err; 869} 870 871int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 872 FILE *fp) 873{ 874 return maps__fixup_overlappings(&mg->maps, map, fp); 875} 876 877/* 878 * XXX This should not really _copy_ te maps, but refcount them. 879 */ 880int map_groups__clone(struct thread *thread, struct map_groups *parent) 881{ 882 struct map_groups *mg = thread->mg; 883 int err = -ENOMEM; 884 struct map *map; 885 struct maps *maps = &parent->maps; 886 887 down_read(&maps->lock); 888 889 for (map = maps__first(maps); map; map = map__next(map)) { 890 struct map *new = map__clone(map); 891 if (new == NULL) 892 goto out_unlock; 893 894 err = unwind__prepare_access(mg, new, NULL); 895 if (err) 896 goto out_unlock; 897 898 map_groups__insert(mg, new); 899 map__put(new); 900 } 901 902 err = 0; 903out_unlock: 904 up_read(&maps->lock); 905 return err; 906} 907 908static void __maps__insert(struct maps *maps, struct map *map) 909{ 910 struct rb_node **p = &maps->entries.rb_node; 911 struct rb_node *parent = NULL; 912 const u64 ip = map->start; 913 struct map *m; 914 915 while (*p != NULL) { 916 parent = *p; 917 m = rb_entry(parent, struct map, rb_node); 918 if (ip < m->start) 919 p = &(*p)->rb_left; 920 else 921 p = &(*p)->rb_right; 922 } 923 924 rb_link_node(&map->rb_node, parent, p); 925 rb_insert_color(&map->rb_node, &maps->entries); 926 map__get(map); 927} 928 929static void __maps__insert_name(struct maps *maps, struct map *map) 930{ 931 struct rb_node **p = &maps->names.rb_node; 932 struct rb_node *parent = NULL; 933 struct map *m; 934 int rc; 935 936 while (*p != NULL) { 937 parent = *p; 938 m = rb_entry(parent, struct map, rb_node_name); 939 rc = strcmp(m->dso->short_name, map->dso->short_name); 940 if (rc < 0) 941 p = &(*p)->rb_left; 942 else 943 p = &(*p)->rb_right; 944 } 945 rb_link_node(&map->rb_node_name, parent, p); 946 rb_insert_color(&map->rb_node_name, &maps->names); 947 map__get(map); 948} 949 950void maps__insert(struct maps *maps, struct map *map) 951{ 952 down_write(&maps->lock); 953 __maps__insert(maps, map); 954 __maps__insert_name(maps, map); 955 up_write(&maps->lock); 956} 957 958static void __maps__remove(struct maps *maps, struct map *map) 959{ 960 rb_erase_init(&map->rb_node, &maps->entries); 961 map__put(map); 962 963 rb_erase_init(&map->rb_node_name, &maps->names); 964 map__put(map); 965} 966 967void maps__remove(struct maps *maps, struct map *map) 968{ 969 down_write(&maps->lock); 970 __maps__remove(maps, map); 971 up_write(&maps->lock); 972} 973 974struct map *maps__find(struct maps *maps, u64 ip) 975{ 976 struct rb_node *p; 977 struct map *m; 978 979 down_read(&maps->lock); 980 981 p = maps->entries.rb_node; 982 while (p != NULL) { 983 m = rb_entry(p, struct map, rb_node); 984 if (ip < m->start) 985 p = p->rb_left; 986 else if (ip >= m->end) 987 p = p->rb_right; 988 else 989 goto out; 990 } 991 992 m = NULL; 993out: 994 up_read(&maps->lock); 995 return m; 996} 997 998struct map *maps__first(struct maps *maps) 999{ 1000 struct rb_node *first = rb_first(&maps->entries); 1001 1002 if (first) 1003 return rb_entry(first, struct map, rb_node); 1004 return NULL; 1005} 1006 1007struct map *map__next(struct map *map) 1008{ 1009 struct rb_node *next = rb_next(&map->rb_node); 1010 1011 if (next) 1012 return rb_entry(next, struct map, rb_node); 1013 return NULL; 1014} 1015 1016struct kmap *__map__kmap(struct map *map) 1017{ 1018 if (!map->dso || !map->dso->kernel) 1019 return NULL; 1020 return (struct kmap *)(map + 1); 1021} 1022 1023struct kmap *map__kmap(struct map *map) 1024{ 1025 struct kmap *kmap = __map__kmap(map); 1026 1027 if (!kmap) 1028 pr_err("Internal error: map__kmap with a non-kernel map\n"); 1029 return kmap; 1030} 1031 1032struct map_groups *map__kmaps(struct map *map) 1033{ 1034 struct kmap *kmap = map__kmap(map); 1035 1036 if (!kmap || !kmap->kmaps) { 1037 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 1038 return NULL; 1039 } 1040 return kmap->kmaps; 1041}