Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf maps: Merge 'struct maps' with 'struct map_groups'

And pick the shortest name: 'struct maps'.

The split existed because we used to have two groups of maps, one for
functions and one for variables, but that only complicated things,
sometimes we needed to figure out what was at some address and then had
to first try it on the functions group and if that failed, fall back to
the variables one.

That split is long gone, so for quite a while we had only one struct
maps per struct map_groups, simplify things by combining those structs.

First patch is the minimum needed to merge both, follow up patches will
rename 'thread->mg' to 'thread->maps', etc.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-hom6639ro7020o708trhxh59@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+209 -275
+1 -1
tools/perf/arch/arm/tests/dwarf-unwind.c
··· 26 26 27 27 sp = (unsigned long) regs[PERF_REG_ARM_SP]; 28 28 29 - map = map_groups__find(thread->mg, (u64)sp); 29 + map = maps__find(thread->mg, (u64)sp); 30 30 if (!map) { 31 31 pr_debug("failed to get stack map\n"); 32 32 free(buf);
+1 -1
tools/perf/arch/arm64/tests/dwarf-unwind.c
··· 26 26 27 27 sp = (unsigned long) regs[PERF_REG_ARM64_SP]; 28 28 29 - map = map_groups__find(thread->mg, (u64)sp); 29 + map = maps__find(thread->mg, (u64)sp); 30 30 if (!map) { 31 31 pr_debug("failed to get stack map\n"); 32 32 free(buf);
+1 -1
tools/perf/arch/powerpc/tests/dwarf-unwind.c
··· 27 27 28 28 sp = (unsigned long) regs[PERF_REG_POWERPC_R1]; 29 29 30 - map = map_groups__find(thread->mg, (u64)sp); 30 + map = maps__find(thread->mg, (u64)sp); 31 31 if (!map) { 32 32 pr_debug("failed to get stack map\n"); 33 33 free(buf);
+1 -1
tools/perf/arch/s390/annotate/instructions.c
··· 38 38 return -1; 39 39 target.addr = map__objdump_2mem(map, ops->target.addr); 40 40 41 - if (map_groups__find_ams(ms->mg, &target) == 0 && 41 + if (maps__find_ams(ms->mg, &target) == 0 && 42 42 map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr) 43 43 ops->target.sym = target.ms.sym; 44 44
+1 -1
tools/perf/arch/x86/tests/dwarf-unwind.c
··· 27 27 28 28 sp = (unsigned long) regs[PERF_REG_X86_SP]; 29 29 30 - map = map_groups__find(thread->mg, (u64)sp); 30 + map = maps__find(thread->mg, (u64)sp); 31 31 if (!map) { 32 32 pr_debug("failed to get stack map\n"); 33 33 free(buf);
+2 -3
tools/perf/arch/x86/util/event.c
··· 18 18 { 19 19 int rc = 0; 20 20 struct map *pos; 21 - struct map_groups *kmaps = &machine->kmaps; 22 - struct maps *maps = &kmaps->maps; 21 + struct maps *kmaps = &machine->kmaps; 23 22 union perf_event *event = zalloc(sizeof(event->mmap) + 24 23 machine->id_hdr_size); 25 24 ··· 28 29 return -1; 29 30 } 30 31 31 - maps__for_each_entry(maps, pos) { 32 + maps__for_each_entry(kmaps, pos) { 32 33 struct kmap *kmap; 33 34 size_t size; 34 35
+1 -6
tools/perf/builtin-report.c
··· 780 780 return printed; 781 781 } 782 782 783 - static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp) 784 - { 785 - return maps__fprintf_task(&mg->maps, indent, fp); 786 - } 787 - 788 783 static void task__print_level(struct task *task, FILE *fp, int level) 789 784 { 790 785 struct thread *thread = task->thread; ··· 790 795 791 796 fprintf(fp, "%s\n", thread__comm_str(thread)); 792 797 793 - map_groups__fprintf_task(thread->mg, comm_indent, fp); 798 + maps__fprintf_task(thread->mg, comm_indent, fp); 794 799 795 800 if (!list_empty(&task->children)) { 796 801 list_for_each_entry(child, &task->children, list)
+8 -8
tools/perf/tests/map_groups.c
··· 13 13 u64 end; 14 14 }; 15 15 16 - static int check_maps(struct map_def *merged, unsigned int size, struct map_groups *mg) 16 + static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps) 17 17 { 18 18 struct map *map; 19 19 unsigned int i = 0; 20 20 21 - map_groups__for_each_entry(mg, map) { 21 + maps__for_each_entry(maps, map) { 22 22 if (i > 0) 23 23 TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size)); 24 24 ··· 35 35 36 36 int test__map_groups__merge_in(struct test *t __maybe_unused, int subtest __maybe_unused) 37 37 { 38 - struct map_groups mg; 38 + struct maps mg; 39 39 unsigned int i; 40 40 struct map_def bpf_progs[] = { 41 41 { "bpf_prog_1", 200, 300 }, ··· 64 64 struct map *map_kcore1, *map_kcore2, *map_kcore3; 65 65 int ret; 66 66 67 - map_groups__init(&mg, NULL); 67 + maps__init(&mg, NULL); 68 68 69 69 for (i = 0; i < ARRAY_SIZE(bpf_progs); i++) { 70 70 struct map *map; ··· 74 74 75 75 map->start = bpf_progs[i].start; 76 76 map->end = bpf_progs[i].end; 77 - map_groups__insert(&mg, map); 77 + maps__insert(&mg, map); 78 78 map__put(map); 79 79 } 80 80 ··· 99 99 map_kcore3->start = 880; 100 100 map_kcore3->end = 1100; 101 101 102 - ret = map_groups__merge_in(&mg, map_kcore1); 102 + ret = maps__merge_in(&mg, map_kcore1); 103 103 TEST_ASSERT_VAL("failed to merge map", !ret); 104 104 105 105 ret = check_maps(merged12, ARRAY_SIZE(merged12), &mg); 106 106 TEST_ASSERT_VAL("merge check failed", !ret); 107 107 108 - ret = map_groups__merge_in(&mg, map_kcore2); 108 + ret = maps__merge_in(&mg, map_kcore2); 109 109 TEST_ASSERT_VAL("failed to merge map", !ret); 110 110 111 111 ret = check_maps(merged12, ARRAY_SIZE(merged12), &mg); 112 112 TEST_ASSERT_VAL("merge check failed", !ret); 113 113 114 - ret = map_groups__merge_in(&mg, map_kcore3); 114 + ret = maps__merge_in(&mg, map_kcore3); 115 115 TEST_ASSERT_VAL("failed to merge map", !ret); 116 116 117 117 ret = check_maps(merged3, ARRAY_SIZE(merged3), &mg);
+3 -3
tools/perf/tests/thread-mg-share.c
··· 12 12 /* thread group */ 13 13 struct thread *leader; 14 14 struct thread *t1, *t2, *t3; 15 - struct map_groups *mg; 15 + struct maps *mg; 16 16 17 17 /* other process */ 18 18 struct thread *other, *other_leader; 19 - struct map_groups *other_mg; 19 + struct maps *other_mg; 20 20 21 21 /* 22 22 * This test create 2 processes abstractions (struct thread) 23 23 * with several threads and checks they properly share and 24 - * maintain map groups info (struct map_groups). 24 + * maintain maps info (struct maps). 25 25 * 26 26 * thread group (pid: 0, tids: 0, 1, 2, 3) 27 27 * other group (pid: 4, tids: 4, 5)
+4 -5
tools/perf/tests/vmlinux-kallsyms.c
··· 190 190 * so use the short name, less descriptive but the same ("[kernel]" in 191 191 * both cases. 192 192 */ 193 - pair = map_groups__find_by_name(&kallsyms.kmaps, 194 - (map->dso->kernel ? 195 - map->dso->short_name : 196 - map->dso->name)); 193 + pair = maps__find_by_name(&kallsyms.kmaps, (map->dso->kernel ? 194 + map->dso->short_name : 195 + map->dso->name)); 197 196 if (pair) { 198 197 pair->priv = 1; 199 198 } else { ··· 212 213 mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start); 213 214 mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end); 214 215 215 - pair = map_groups__find(&kallsyms.kmaps, mem_start); 216 + pair = maps__find(&kallsyms.kmaps, mem_start); 216 217 if (pair == NULL || pair->priv) 217 218 continue; 218 219
+1 -1
tools/perf/ui/stdio/hist.c
··· 885 885 } 886 886 887 887 if (h->ms.map == NULL && verbose > 1) { 888 - map_groups__fprintf(h->thread->mg, fp); 888 + maps__fprintf(h->thread->mg, fp); 889 889 fprintf(fp, "%.10s end\n", graph_dotted_line); 890 890 } 891 891 }
+3 -3
tools/perf/util/annotate.c
··· 271 271 find_target: 272 272 target.addr = map__objdump_2mem(map, ops->target.addr); 273 273 274 - if (map_groups__find_ams(ms->mg, &target) == 0 && 274 + if (maps__find_ams(ms->mg, &target) == 0 && 275 275 map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr) 276 276 ops->target.sym = target.ms.sym; 277 277 ··· 391 391 * Actual navigation will come next, with further understanding of how 392 392 * the symbol searching and disassembly should be done. 393 393 */ 394 - if (map_groups__find_ams(ms->mg, &target) == 0 && 394 + if (maps__find_ams(ms->mg, &target) == 0 && 395 395 map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr) 396 396 ops->target.sym = target.ms.sym; 397 397 ··· 1545 1545 .ms = { .map = map, }, 1546 1546 }; 1547 1547 1548 - if (!map_groups__find_ams(args->ms.mg, &target) && 1548 + if (!maps__find_ams(args->ms.mg, &target) && 1549 1549 target.ms.sym->start == target.al_addr) 1550 1550 dl->ops.target.sym = target.ms.sym; 1551 1551 }
+1 -3
tools/perf/util/bpf-event.c
··· 52 52 for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) { 53 53 u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms); 54 54 u64 addr = addrs[i]; 55 - struct map *map; 56 - 57 - map = map_groups__find(&machine->kmaps, addr); 55 + struct map *map = maps__find(&machine->kmaps, addr); 58 56 59 57 if (map) { 60 58 map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
+1 -1
tools/perf/util/cs-etm.c
··· 2569 2569 if (err) 2570 2570 goto err_delete_thread; 2571 2571 2572 - if (thread__init_map_groups(etm->unknown_thread, etm->machine)) { 2572 + if (thread__init_maps(etm->unknown_thread, etm->machine)) { 2573 2573 err = -ENOMEM; 2574 2574 goto err_delete_thread; 2575 2575 }
+2 -2
tools/perf/util/event.c
··· 457 457 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, 458 458 struct addr_location *al) 459 459 { 460 - struct map_groups *mg = thread->mg; 460 + struct maps *mg = thread->mg; 461 461 struct machine *machine = mg->machine; 462 462 bool load_map = false; 463 463 ··· 500 500 return NULL; 501 501 } 502 502 503 - al->map = map_groups__find(mg, al->addr); 503 + al->map = maps__find(mg, al->addr); 504 504 if (al->map != NULL) { 505 505 /* 506 506 * Kernel maps might be changed when loading symbols so loading
+1 -1
tools/perf/util/intel-pt.c
··· 3296 3296 err = thread__set_comm(pt->unknown_thread, "unknown", 0); 3297 3297 if (err) 3298 3298 goto err_delete_thread; 3299 - if (thread__init_map_groups(pt->unknown_thread, pt->machine)) { 3299 + if (thread__init_maps(pt->unknown_thread, pt->machine)) { 3300 3300 err = -ENOMEM; 3301 3301 goto err_delete_thread; 3302 3302 }
+30 -36
tools/perf/util/machine.c
··· 86 86 int err = -ENOMEM; 87 87 88 88 memset(machine, 0, sizeof(*machine)); 89 - map_groups__init(&machine->kmaps, machine); 89 + maps__init(&machine->kmaps, machine); 90 90 RB_CLEAR_NODE(&machine->rb_node); 91 91 dsos__init(&machine->dsos); 92 92 ··· 217 217 return; 218 218 219 219 machine__destroy_kernel_maps(machine); 220 - map_groups__exit(&machine->kmaps); 220 + maps__exit(&machine->kmaps); 221 221 dsos__exit(&machine->dsos); 222 222 machine__exit_vdso(machine); 223 223 zfree(&machine->root_dir); ··· 413 413 goto out_err; 414 414 415 415 if (!leader->mg) 416 - leader->mg = map_groups__new(machine); 416 + leader->mg = maps__new(machine); 417 417 418 418 if (!leader->mg) 419 419 goto out_err; ··· 427 427 * tid. Consequently there never should be any maps on a thread 428 428 * with an unknown pid. Just print an error if there are. 429 429 */ 430 - if (!map_groups__empty(th->mg)) 430 + if (!maps__empty(th->mg)) 431 431 pr_err("Discarding thread maps for %d:%d\n", 432 432 th->pid_, th->tid); 433 - map_groups__put(th->mg); 433 + maps__put(th->mg); 434 434 } 435 435 436 - th->mg = map_groups__get(leader->mg); 436 + th->mg = maps__get(leader->mg); 437 437 out_put: 438 438 thread__put(leader); 439 439 return; ··· 536 536 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost); 537 537 538 538 /* 539 - * We have to initialize map_groups separately 540 - * after rb tree is updated. 539 + * We have to initialize maps separately after rb tree is updated. 541 540 * 542 541 * The reason is that we call machine__findnew_thread 543 - * within thread__init_map_groups to find the thread 542 + * within thread__init_maps to find the thread 544 543 * leader and that would screwed the rb tree. 545 544 */ 546 - if (thread__init_map_groups(th, machine)) { 545 + if (thread__init_maps(th, machine)) { 547 546 rb_erase_cached(&th->rb_node, &threads->entries); 548 547 RB_CLEAR_NODE(&th->rb_node); 549 548 thread__put(th); ··· 723 724 struct perf_sample *sample __maybe_unused) 724 725 { 725 726 struct symbol *sym; 726 - struct map *map; 727 + struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr); 727 728 728 - map = map_groups__find(&machine->kmaps, event->ksymbol.addr); 729 729 if (!map) { 730 730 map = dso__new_map(event->ksymbol.name); 731 731 if (!map) ··· 732 734 733 735 map->start = event->ksymbol.addr; 734 736 map->end = map->start + event->ksymbol.len; 735 - map_groups__insert(&machine->kmaps, map); 737 + maps__insert(&machine->kmaps, map); 736 738 } 737 739 738 740 sym = symbol__new(map->map_ip(map, map->start), ··· 750 752 { 751 753 struct map *map; 752 754 753 - map = map_groups__find(&machine->kmaps, event->ksymbol.addr); 755 + map = maps__find(&machine->kmaps, event->ksymbol.addr); 754 756 if (map) 755 - map_groups__remove(&machine->kmaps, map); 757 + maps__remove(&machine->kmaps, map); 756 758 757 759 return 0; 758 760 } ··· 788 790 if (map == NULL) 789 791 goto out; 790 792 791 - map_groups__insert(&machine->kmaps, map); 793 + maps__insert(&machine->kmaps, map); 792 794 793 - /* Put the map here because map_groups__insert alread got it */ 795 + /* Put the map here because maps__insert alread got it */ 794 796 map__put(map); 795 797 out: 796 798 /* put the dso here, corresponding to machine__findnew_module_dso */ ··· 975 977 kmap->kmaps = &machine->kmaps; 976 978 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); 977 979 978 - map_groups__insert(&machine->kmaps, map); 980 + maps__insert(&machine->kmaps, map); 979 981 980 982 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n", 981 983 kmap->name, map->start, map->end); ··· 1020 1022 int machine__map_x86_64_entry_trampolines(struct machine *machine, 1021 1023 struct dso *kernel) 1022 1024 { 1023 - struct map_groups *kmaps = &machine->kmaps; 1024 - struct maps *maps = &kmaps->maps; 1025 + struct maps *kmaps = &machine->kmaps; 1025 1026 int nr_cpus_avail, cpu; 1026 1027 bool found = false; 1027 1028 struct map *map; ··· 1030 1033 * In the vmlinux case, pgoff is a virtual address which must now be 1031 1034 * mapped to a vmlinux offset. 1032 1035 */ 1033 - maps__for_each_entry(maps, map) { 1036 + maps__for_each_entry(kmaps, map) { 1034 1037 struct kmap *kmap = __map__kmap(map); 1035 1038 struct map *dest_map; 1036 1039 1037 1040 if (!kmap || !is_entry_trampoline(kmap->name)) 1038 1041 continue; 1039 1042 1040 - dest_map = map_groups__find(kmaps, map->pgoff); 1043 + dest_map = maps__find(kmaps, map->pgoff); 1041 1044 if (dest_map != map) 1042 1045 map->pgoff = dest_map->map_ip(dest_map, map->pgoff); 1043 1046 found = true; ··· 1099 1102 return -1; 1100 1103 1101 1104 kmap->kmaps = &machine->kmaps; 1102 - map_groups__insert(&machine->kmaps, map); 1105 + maps__insert(&machine->kmaps, map); 1103 1106 1104 1107 return 0; 1105 1108 } ··· 1113 1116 return; 1114 1117 1115 1118 kmap = map__kmap(map); 1116 - map_groups__remove(&machine->kmaps, map); 1119 + maps__remove(&machine->kmaps, map); 1117 1120 if (kmap && kmap->ref_reloc_sym) { 1118 1121 zfree((char **)&kmap->ref_reloc_sym->name); 1119 1122 zfree(&kmap->ref_reloc_sym); ··· 1208 1211 * kernel, with modules between them, fixup the end of all 1209 1212 * sections. 1210 1213 */ 1211 - map_groups__fixup_end(&machine->kmaps); 1214 + maps__fixup_end(&machine->kmaps); 1212 1215 } 1213 1216 1214 1217 return ret; ··· 1259 1262 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1260 1263 } 1261 1264 1262 - static int map_groups__set_module_path(struct map_groups *mg, const char *path, 1263 - struct kmod_path *m) 1265 + static int maps__set_module_path(struct maps *mg, const char *path, struct kmod_path *m) 1264 1266 { 1265 1267 char *long_name; 1266 - struct map *map = map_groups__find_by_name(mg, m->name); 1268 + struct map *map = maps__find_by_name(mg, m->name); 1267 1269 1268 1270 if (map == NULL) 1269 1271 return 0; ··· 1286 1290 return 0; 1287 1291 } 1288 1292 1289 - static int map_groups__set_modules_path_dir(struct map_groups *mg, 1290 - const char *dir_name, int depth) 1293 + static int maps__set_modules_path_dir(struct maps *mg, const char *dir_name, int depth) 1291 1294 { 1292 1295 struct dirent *dent; 1293 1296 DIR *dir = opendir(dir_name); ··· 1318 1323 continue; 1319 1324 } 1320 1325 1321 - ret = map_groups__set_modules_path_dir(mg, path, 1322 - depth + 1); 1326 + ret = maps__set_modules_path_dir(mg, path, depth + 1); 1323 1327 if (ret < 0) 1324 1328 goto out; 1325 1329 } else { ··· 1329 1335 goto out; 1330 1336 1331 1337 if (m.kmod) 1332 - ret = map_groups__set_module_path(mg, path, &m); 1338 + ret = maps__set_module_path(mg, path, &m); 1333 1339 1334 1340 zfree(&m.name); 1335 1341 ··· 1356 1362 machine->root_dir, version); 1357 1363 free(version); 1358 1364 1359 - return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 1365 + return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0); 1360 1366 } 1361 1367 int __weak arch__fix_module_text_start(u64 *start __maybe_unused, 1362 1368 u64 *size __maybe_unused, ··· 1429 1435 struct map *map = machine__kernel_map(machine); 1430 1436 1431 1437 map__get(map); 1432 - map_groups__remove(&machine->kmaps, map); 1438 + maps__remove(&machine->kmaps, map); 1433 1439 1434 1440 machine__set_kernel_mmap(machine, start, end); 1435 1441 1436 - map_groups__insert(&machine->kmaps, map); 1442 + maps__insert(&machine->kmaps, map); 1437 1443 map__put(map); 1438 1444 } 1439 1445
+4 -4
tools/perf/util/machine.h
··· 51 51 struct vdso_info *vdso_info; 52 52 struct perf_env *env; 53 53 struct dsos dsos; 54 - struct map_groups kmaps; 54 + struct maps kmaps; 55 55 struct map *vmlinux_map; 56 56 u64 kernel_start; 57 57 pid_t *current_tid; ··· 83 83 static inline 84 84 struct maps *machine__kernel_maps(struct machine *machine) 85 85 { 86 - return &machine->kmaps.maps; 86 + return &machine->kmaps; 87 87 } 88 88 89 89 int machine__get_kernel_start(struct machine *machine); ··· 212 212 struct symbol *machine__find_kernel_symbol(struct machine *machine, u64 addr, 213 213 struct map **mapp) 214 214 { 215 - return map_groups__find_symbol(&machine->kmaps, addr, mapp); 215 + return maps__find_symbol(&machine->kmaps, addr, mapp); 216 216 } 217 217 218 218 static inline ··· 220 220 const char *name, 221 221 struct map **mapp) 222 222 { 223 - return map_groups__find_symbol_by_name(&machine->kmaps, name, mapp); 223 + return maps__find_symbol_by_name(&machine->kmaps, name, mapp); 224 224 } 225 225 226 226 int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
+37 -68
tools/perf/util/map.c
··· 512 512 return ip + map->reloc; 513 513 } 514 514 515 - static void maps__init(struct maps *maps) 515 + void maps__init(struct maps *mg, struct machine *machine) 516 516 { 517 - maps->entries = RB_ROOT; 518 - init_rwsem(&maps->lock); 519 - } 520 - 521 - void map_groups__init(struct map_groups *mg, struct machine *machine) 522 - { 523 - maps__init(&mg->maps); 517 + mg->entries = RB_ROOT; 518 + init_rwsem(&mg->lock); 524 519 mg->machine = machine; 525 520 mg->last_search_by_name = NULL; 526 521 mg->nr_maps = 0; ··· 523 528 refcount_set(&mg->refcnt, 1); 524 529 } 525 530 526 - static void __map_groups__free_maps_by_name(struct map_groups *mg) 531 + static void __maps__free_maps_by_name(struct maps *mg) 527 532 { 528 533 /* 529 534 * Free everything to try to do it from the rbtree in the next search ··· 532 537 mg->nr_maps_allocated = 0; 533 538 } 534 539 535 - void map_groups__insert(struct map_groups *mg, struct map *map) 540 + void maps__insert(struct maps *mg, struct map *map) 536 541 { 537 - struct maps *maps = &mg->maps; 542 + struct maps *maps = mg; 538 543 539 544 down_write(&maps->lock); 540 545 __maps__insert(maps, map); ··· 550 555 struct map **maps_by_name = realloc(mg->maps_by_name, nr_allocate * sizeof(map)); 551 556 552 557 if (maps_by_name == NULL) { 553 - __map_groups__free_maps_by_name(mg); 558 + __maps__free_maps_by_name(maps); 554 559 return; 555 560 } 556 561 ··· 558 563 mg->nr_maps_allocated = nr_allocate; 559 564 } 560 565 mg->maps_by_name[mg->nr_maps - 1] = map; 561 - __map_groups__sort_by_name(mg); 566 + __maps__sort_by_name(maps); 562 567 } 563 568 up_write(&maps->lock); 564 569 } ··· 569 574 map__put(map); 570 575 } 571 576 572 - void map_groups__remove(struct map_groups *mg, struct map *map) 577 + void maps__remove(struct maps *mg, struct map *map) 573 578 { 574 - struct maps *maps = &mg->maps; 579 + struct maps *maps = mg; 575 580 down_write(&maps->lock); 576 581 if (mg->last_search_by_name == map) 577 582 mg->last_search_by_name = NULL; ··· 579 584 __maps__remove(maps, map); 580 585 --mg->nr_maps; 581 586 if (mg->maps_by_name) 582 - __map_groups__free_maps_by_name(mg); 587 + __maps__free_maps_by_name(maps); 583 588 up_write(&maps->lock); 584 589 } 585 590 ··· 593 598 } 594 599 } 595 600 596 - static void maps__exit(struct maps *maps) 601 + void maps__exit(struct maps *maps) 597 602 { 598 603 down_write(&maps->lock); 599 604 __maps__purge(maps); 600 605 up_write(&maps->lock); 601 606 } 602 607 603 - void map_groups__exit(struct map_groups *mg) 608 + bool maps__empty(struct maps *maps) 604 609 { 605 - maps__exit(&mg->maps); 610 + return !maps__first(maps); 606 611 } 607 612 608 - bool map_groups__empty(struct map_groups *mg) 613 + struct maps *maps__new(struct machine *machine) 609 614 { 610 - return !maps__first(&mg->maps); 611 - } 612 - 613 - struct map_groups *map_groups__new(struct machine *machine) 614 - { 615 - struct map_groups *mg = zalloc(sizeof(*mg)); 615 + struct maps *mg = zalloc(sizeof(*mg)), *maps = mg; 616 616 617 617 if (mg != NULL) 618 - map_groups__init(mg, machine); 618 + maps__init(maps, machine); 619 619 620 620 return mg; 621 621 } 622 622 623 - void map_groups__delete(struct map_groups *mg) 623 + void maps__delete(struct maps *mg) 624 624 { 625 - map_groups__exit(mg); 625 + maps__exit(mg); 626 626 unwind__finish_access(mg); 627 627 free(mg); 628 628 } 629 629 630 - void map_groups__put(struct map_groups *mg) 630 + void maps__put(struct maps *mg) 631 631 { 632 632 if (mg && refcount_dec_and_test(&mg->refcnt)) 633 - map_groups__delete(mg); 633 + maps__delete(mg); 634 634 } 635 635 636 - struct symbol *map_groups__find_symbol(struct map_groups *mg, 637 - u64 addr, struct map **mapp) 636 + struct symbol *maps__find_symbol(struct maps *mg, u64 addr, struct map **mapp) 638 637 { 639 - struct map *map = map_groups__find(mg, addr); 638 + struct map *map = maps__find(mg, addr); 640 639 641 640 /* Ensure map is loaded before using map->map_ip */ 642 641 if (map != NULL && map__load(map) >= 0) { ··· 649 660 return ip >= map->start && ip < map->end; 650 661 } 651 662 652 - static struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, 653 - struct map **mapp) 663 + struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) 654 664 { 655 665 struct symbol *sym; 656 666 struct map *pos; ··· 676 688 return sym; 677 689 } 678 690 679 - struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 680 - const char *name, 681 - struct map **mapp) 682 - { 683 - return maps__find_symbol_by_name(&mg->maps, name, mapp); 684 - } 685 - 686 - int map_groups__find_ams(struct map_groups *mg, struct addr_map_symbol *ams) 691 + int maps__find_ams(struct maps *mg, struct addr_map_symbol *ams) 687 692 { 688 693 if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) { 689 694 if (mg == NULL) 690 695 return -1; 691 - ams->ms.map = map_groups__find(mg, ams->addr); 696 + ams->ms.map = maps__find(mg, ams->addr); 692 697 if (ams->ms.map == NULL) 693 698 return -1; 694 699 } ··· 692 711 return ams->ms.sym ? 0 : -1; 693 712 } 694 713 695 - static size_t maps__fprintf(struct maps *maps, FILE *fp) 714 + size_t maps__fprintf(struct maps *maps, FILE *fp) 696 715 { 697 716 size_t printed = 0; 698 717 struct map *pos; ··· 713 732 return printed; 714 733 } 715 734 716 - size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 735 + int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 717 736 { 718 - return maps__fprintf(&mg->maps, fp); 719 - } 720 - 721 - static void __map_groups__insert(struct map_groups *mg, struct map *map) 722 - { 723 - __maps__insert(&mg->maps, map); 724 - } 725 - 726 - int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp) 727 - { 728 - struct maps *maps = &mg->maps; 729 737 struct rb_root *root; 730 738 struct rb_node *next, *first; 731 739 int err = 0; ··· 779 809 } 780 810 781 811 before->end = map->start; 782 - __map_groups__insert(mg, before); 812 + __maps__insert(maps, before); 783 813 if (verbose >= 2 && !use_browser) 784 814 map__fprintf(before, fp); 785 815 map__put(before); ··· 796 826 after->start = map->end; 797 827 after->pgoff += map->end - pos->start; 798 828 assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end)); 799 - __map_groups__insert(mg, after); 829 + __maps__insert(maps, after); 800 830 if (verbose >= 2 && !use_browser) 801 831 map__fprintf(after, fp); 802 832 map__put(after); ··· 817 847 /* 818 848 * XXX This should not really _copy_ te maps, but refcount them. 819 849 */ 820 - int map_groups__clone(struct thread *thread, struct map_groups *parent) 850 + int maps__clone(struct thread *thread, struct maps *parent) 821 851 { 822 - struct map_groups *mg = thread->mg; 852 + struct maps *mg = thread->mg; 823 853 int err = -ENOMEM; 824 854 struct map *map; 825 - struct maps *maps = &parent->maps; 826 855 827 - down_read(&maps->lock); 856 + down_read(&parent->lock); 828 857 829 - maps__for_each_entry(maps, map) { 858 + maps__for_each_entry(parent, map) { 830 859 struct map *new = map__clone(map); 831 860 if (new == NULL) 832 861 goto out_unlock; ··· 834 865 if (err) 835 866 goto out_unlock; 836 867 837 - map_groups__insert(mg, new); 868 + maps__insert(mg, new); 838 869 map__put(new); 839 870 } 840 871 841 872 err = 0; 842 873 out_unlock: 843 - up_read(&maps->lock); 874 + up_read(&parent->lock); 844 875 return err; 845 876 } 846 877 ··· 928 959 return kmap; 929 960 } 930 961 931 - struct map_groups *map__kmaps(struct map *map) 962 + struct maps *map__kmaps(struct map *map) 932 963 { 933 964 struct kmap *kmap = map__kmap(map); 934 965
+2 -2
tools/perf/util/map.h
··· 12 12 #include <linux/types.h> 13 13 14 14 struct dso; 15 - struct map_groups; 15 + struct maps; 16 16 struct machine; 17 17 18 18 struct map { ··· 42 42 43 43 struct kmap *__map__kmap(struct map *map); 44 44 struct kmap *map__kmap(struct map *map); 45 - struct map_groups *map__kmaps(struct map *map); 45 + struct maps *map__kmaps(struct map *map); 46 46 47 47 static inline u64 map__map_ip(struct map *map, u64 ip) 48 48 {
+23 -37
tools/perf/util/map_groups.h
··· 12 12 struct ref_reloc_sym; 13 13 struct machine; 14 14 struct map; 15 + struct maps; 15 16 struct thread; 16 - 17 - struct maps { 18 - struct rb_root entries; 19 - struct rw_semaphore lock; 20 - }; 21 17 22 18 struct map *maps__find(struct maps *maps, u64 addr); 23 19 struct map *maps__first(struct maps *maps); ··· 25 29 #define maps__for_each_entry_safe(maps, map, next) \ 26 30 for (map = maps__first(maps), next = map__next(map); map; map = next, next = map__next(map)) 27 31 28 - struct map_groups { 29 - struct maps maps; 32 + struct maps { 33 + struct rb_root entries; 34 + struct rw_semaphore lock; 30 35 struct machine *machine; 31 36 struct map *last_search_by_name; 32 37 struct map **maps_by_name; ··· 44 47 45 48 struct kmap { 46 49 struct ref_reloc_sym *ref_reloc_sym; 47 - struct map_groups *kmaps; 50 + struct maps *kmaps; 48 51 char name[KMAP_NAME_LEN]; 49 52 }; 50 53 51 - struct map_groups *map_groups__new(struct machine *machine); 52 - void map_groups__delete(struct map_groups *mg); 53 - bool map_groups__empty(struct map_groups *mg); 54 + struct maps *maps__new(struct machine *machine); 55 + void maps__delete(struct maps *mg); 56 + bool maps__empty(struct maps *mg); 54 57 55 - static inline struct map_groups *map_groups__get(struct map_groups *mg) 58 + static inline struct maps *maps__get(struct maps *mg) 56 59 { 57 60 if (mg) 58 61 refcount_inc(&mg->refcnt); 59 62 return mg; 60 63 } 61 64 62 - void map_groups__put(struct map_groups *mg); 63 - void map_groups__init(struct map_groups *mg, struct machine *machine); 64 - void map_groups__exit(struct map_groups *mg); 65 - int map_groups__clone(struct thread *thread, struct map_groups *parent); 66 - size_t map_groups__fprintf(struct map_groups *mg, FILE *fp); 65 + void maps__put(struct maps *mg); 66 + void maps__init(struct maps *mg, struct machine *machine); 67 + void maps__exit(struct maps *mg); 68 + int maps__clone(struct thread *thread, struct maps *parent); 69 + size_t maps__fprintf(struct maps *mg, FILE *fp); 67 70 68 - void map_groups__insert(struct map_groups *mg, struct map *map); 71 + void maps__insert(struct maps *mg, struct map *map); 69 72 70 - void map_groups__remove(struct map_groups *mg, struct map *map); 73 + void maps__remove(struct maps *mg, struct map *map); 71 74 72 - static inline struct map *map_groups__find(struct map_groups *mg, u64 addr) 73 - { 74 - return maps__find(&mg->maps, addr); 75 - } 76 - 77 - #define map_groups__for_each_entry(mg, map) \ 78 - for (map = maps__first(&mg->maps); map; map = map__next(map)) 79 - 80 - #define map_groups__for_each_entry_safe(mg, map, next) \ 81 - for (map = maps__first(&mg->maps), next = map__next(map); map; map = next, next = map__next(map)) 82 - 83 - struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp); 84 - struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp); 75 + struct symbol *maps__find_symbol(struct maps *mg, u64 addr, struct map **mapp); 76 + struct symbol *maps__find_symbol_by_name(struct maps *mg, const char *name, struct map **mapp); 85 77 86 78 struct addr_map_symbol; 87 79 88 - int map_groups__find_ams(struct map_groups *mg, struct addr_map_symbol *ams); 80 + int maps__find_ams(struct maps *mg, struct addr_map_symbol *ams); 89 81 90 - int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp); 82 + int maps__fixup_overlappings(struct maps *mg, struct map *map, FILE *fp); 91 83 92 - struct map *map_groups__find_by_name(struct map_groups *mg, const char *name); 84 + struct map *maps__find_by_name(struct maps *mg, const char *name); 93 85 94 - int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map); 86 + int maps__merge_in(struct maps *kmaps, struct map *new_map); 95 87 96 - void __map_groups__sort_by_name(struct map_groups *mg); 88 + void __maps__sort_by_name(struct maps *mg); 97 89 98 90 #endif // __PERF_MAP_GROUPS_H
+2 -2
tools/perf/util/map_symbol.h
··· 4 4 5 5 #include <linux/types.h> 6 6 7 - struct map_groups; 7 + struct maps; 8 8 struct map; 9 9 struct symbol; 10 10 11 11 struct map_symbol { 12 - struct map_groups *mg; 12 + struct maps *mg; 13 13 struct map *map; 14 14 struct symbol *sym; 15 15 };
+1 -1
tools/perf/util/probe-event.c
··· 321 321 char module_name[128]; 322 322 323 323 snprintf(module_name, sizeof(module_name), "[%s]", module); 324 - map = map_groups__find_by_name(&host_machine->kmaps, module_name); 324 + map = maps__find_by_name(&host_machine->kmaps, module_name); 325 325 if (map) { 326 326 dso = map->dso; 327 327 goto found;
+7 -7
tools/perf/util/symbol-elf.c
··· 844 844 845 845 static int dso__process_kernel_symbol(struct dso *dso, struct map *map, 846 846 GElf_Sym *sym, GElf_Shdr *shdr, 847 - struct map_groups *kmaps, struct kmap *kmap, 847 + struct maps *kmaps, struct kmap *kmap, 848 848 struct dso **curr_dsop, struct map **curr_mapp, 849 849 const char *section_name, 850 850 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel) ··· 876 876 /* Ensure maps are correctly ordered */ 877 877 if (kmaps) { 878 878 map__get(map); 879 - map_groups__remove(kmaps, map); 880 - map_groups__insert(kmaps, map); 879 + maps__remove(kmaps, map); 880 + maps__insert(kmaps, map); 881 881 map__put(map); 882 882 } 883 883 } ··· 902 902 903 903 snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name); 904 904 905 - curr_map = map_groups__find_by_name(kmaps, dso_name); 905 + curr_map = maps__find_by_name(kmaps, dso_name); 906 906 if (curr_map == NULL) { 907 907 u64 start = sym->st_value; 908 908 ··· 928 928 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 929 929 } 930 930 curr_dso->symtab_type = dso->symtab_type; 931 - map_groups__insert(kmaps, curr_map); 931 + maps__insert(kmaps, curr_map); 932 932 /* 933 933 * Add it before we drop the referece to curr_map, i.e. while 934 934 * we still are sure to have a reference to this DSO via ··· 950 950 struct symsrc *runtime_ss, int kmodule) 951 951 { 952 952 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; 953 - struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL; 953 + struct maps *kmaps = kmap ? map__kmaps(map) : NULL; 954 954 struct map *curr_map = map; 955 955 struct dso *curr_dso = dso; 956 956 Elf_Data *symstrs, *secstrs; ··· 1162 1162 * We need to fixup this here too because we create new 1163 1163 * maps here, for things like vsyscall sections. 1164 1164 */ 1165 - map_groups__fixup_end(kmaps); 1165 + maps__fixup_end(kmaps); 1166 1166 } 1167 1167 } 1168 1168 err = nr;
+33 -34
tools/perf/util/symbol.c
··· 239 239 curr->end = roundup(curr->start, 4096) + 4096; 240 240 } 241 241 242 - void map_groups__fixup_end(struct map_groups *mg) 242 + void maps__fixup_end(struct maps *mg) 243 243 { 244 - struct maps *maps = &mg->maps; 244 + struct maps *maps = mg; 245 245 struct map *prev = NULL, *curr; 246 246 247 247 down_write(&maps->lock); ··· 698 698 return kallsyms__parse(filename, dso, map__process_kallsym_symbol); 699 699 } 700 700 701 - static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso) 701 + static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) 702 702 { 703 703 struct map *curr_map; 704 704 struct symbol *pos; ··· 724 724 if (module) 725 725 *module = '\0'; 726 726 727 - curr_map = map_groups__find(kmaps, pos->start); 727 + curr_map = maps__find(kmaps, pos->start); 728 728 729 729 if (!curr_map) { 730 730 symbol__delete(pos); ··· 751 751 * kernel range is broken in several maps, named [kernel].N, as we don't have 752 752 * the original ELF section names vmlinux have. 753 753 */ 754 - static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta, 755 - struct map *initial_map) 754 + static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, 755 + struct map *initial_map) 756 756 { 757 757 struct machine *machine; 758 758 struct map *curr_map = initial_map; ··· 797 797 dso__set_loaded(curr_map->dso); 798 798 } 799 799 800 - curr_map = map_groups__find_by_name(kmaps, module); 800 + curr_map = maps__find_by_name(kmaps, module); 801 801 if (curr_map == NULL) { 802 802 pr_debug("%s/proc/{kallsyms,modules} " 803 803 "inconsistency while looking " ··· 864 864 } 865 865 866 866 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 867 - map_groups__insert(kmaps, curr_map); 867 + maps__insert(kmaps, curr_map); 868 868 ++kernel_range; 869 869 } else if (delta) { 870 870 /* Kernel was relocated at boot time */ ··· 1049 1049 return ret; 1050 1050 } 1051 1051 1052 - static int do_validate_kcore_modules(const char *filename, 1053 - struct map_groups *kmaps) 1052 + static int do_validate_kcore_modules(const char *filename, struct maps *kmaps) 1054 1053 { 1055 1054 struct rb_root modules = RB_ROOT; 1056 1055 struct map *old_map; ··· 1059 1060 if (err) 1060 1061 return err; 1061 1062 1062 - map_groups__for_each_entry(kmaps, old_map) { 1063 + maps__for_each_entry(kmaps, old_map) { 1063 1064 struct module_info *mi; 1064 1065 1065 1066 if (!__map__is_kmodule(old_map)) { ··· 1106 1107 static int validate_kcore_modules(const char *kallsyms_filename, 1107 1108 struct map *map) 1108 1109 { 1109 - struct map_groups *kmaps = map__kmaps(map); 1110 + struct maps *kmaps = map__kmaps(map); 1110 1111 char modules_filename[PATH_MAX]; 1111 1112 1112 1113 if (!kmaps) ··· 1166 1167 } 1167 1168 1168 1169 /* 1169 - * Merges map into map_groups by splitting the new map 1170 - * within the existing map regions. 1170 + * Merges map into maps by splitting the new map within the existing map 1171 + * regions. 1171 1172 */ 1172 - int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) 1173 + int maps__merge_in(struct maps *kmaps, struct map *new_map) 1173 1174 { 1174 1175 struct map *old_map; 1175 1176 LIST_HEAD(merged); 1176 1177 1177 - map_groups__for_each_entry(kmaps, old_map) { 1178 + maps__for_each_entry(kmaps, old_map) { 1178 1179 /* no overload with this one */ 1179 1180 if (new_map->end < old_map->start || 1180 1181 new_map->start >= old_map->end) ··· 1231 1232 while (!list_empty(&merged)) { 1232 1233 old_map = list_entry(merged.next, struct map, node); 1233 1234 list_del_init(&old_map->node); 1234 - map_groups__insert(kmaps, old_map); 1235 + maps__insert(kmaps, old_map); 1235 1236 map__put(old_map); 1236 1237 } 1237 1238 1238 1239 if (new_map) { 1239 - map_groups__insert(kmaps, new_map); 1240 + maps__insert(kmaps, new_map); 1240 1241 map__put(new_map); 1241 1242 } 1242 1243 return 0; ··· 1245 1246 static int dso__load_kcore(struct dso *dso, struct map *map, 1246 1247 const char *kallsyms_filename) 1247 1248 { 1248 - struct map_groups *kmaps = map__kmaps(map); 1249 + struct maps *kmaps = map__kmaps(map); 1249 1250 struct kcore_mapfn_data md; 1250 1251 struct map *old_map, *new_map, *replacement_map = NULL, *next; 1251 1252 struct machine *machine; ··· 1294 1295 } 1295 1296 1296 1297 /* Remove old maps */ 1297 - map_groups__for_each_entry_safe(kmaps, old_map, next) { 1298 + maps__for_each_entry_safe(kmaps, old_map, next) { 1298 1299 /* 1299 1300 * We need to preserve eBPF maps even if they are 1300 1301 * covered by kcore, because we need to access 1301 1302 * eBPF dso for source data. 1302 1303 */ 1303 1304 if (old_map != map && !__map__is_bpf_prog(old_map)) 1304 - map_groups__remove(kmaps, old_map); 1305 + maps__remove(kmaps, old_map); 1305 1306 } 1306 1307 machine->trampolines_mapped = false; 1307 1308 ··· 1330 1331 map->unmap_ip = new_map->unmap_ip; 1331 1332 /* Ensure maps are correctly ordered */ 1332 1333 map__get(map); 1333 - map_groups__remove(kmaps, map); 1334 - map_groups__insert(kmaps, map); 1334 + maps__remove(kmaps, map); 1335 + maps__insert(kmaps, map); 1335 1336 map__put(map); 1336 1337 map__put(new_map); 1337 1338 } else { ··· 1340 1341 * and ensure that current maps (eBPF) 1341 1342 * stay intact. 1342 1343 */ 1343 - if (map_groups__merge_in(kmaps, new_map)) 1344 + if (maps__merge_in(kmaps, new_map)) 1344 1345 goto out_err; 1345 1346 } 1346 1347 } ··· 1432 1433 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1433 1434 1434 1435 if (!no_kcore && !dso__load_kcore(dso, map, filename)) 1435 - return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso); 1436 + return maps__split_kallsyms_for_kcore(kmap->kmaps, dso); 1436 1437 else 1437 - return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map); 1438 + return maps__split_kallsyms(kmap->kmaps, dso, delta, map); 1438 1439 } 1439 1440 1440 1441 int dso__load_kallsyms(struct dso *dso, const char *filename, ··· 1771 1772 return strcmp(name, map->dso->short_name); 1772 1773 } 1773 1774 1774 - void __map_groups__sort_by_name(struct map_groups *mg) 1775 + void __maps__sort_by_name(struct maps *mg) 1775 1776 { 1776 1777 qsort(mg->maps_by_name, mg->nr_maps, sizeof(struct map *), map__strcmp); 1777 1778 } 1778 1779 1779 - static int map__groups__sort_by_name_from_rbtree(struct map_groups *mg) 1780 + static int map__groups__sort_by_name_from_rbtree(struct maps *mg) 1780 1781 { 1781 1782 struct map *map; 1782 1783 struct map **maps_by_name = realloc(mg->maps_by_name, mg->nr_maps * sizeof(map)); ··· 1788 1789 mg->maps_by_name = maps_by_name; 1789 1790 mg->nr_maps_allocated = mg->nr_maps; 1790 1791 1791 - maps__for_each_entry(&mg->maps, map) 1792 + maps__for_each_entry(mg, map) 1792 1793 maps_by_name[i++] = map; 1793 1794 1794 - __map_groups__sort_by_name(mg); 1795 + __maps__sort_by_name(mg); 1795 1796 return 0; 1796 1797 } 1797 1798 1798 - static struct map *__map_groups__find_by_name(struct map_groups *mg, const char *name) 1799 + static struct map *__maps__find_by_name(struct maps *mg, const char *name) 1799 1800 { 1800 1801 struct map **mapp; 1801 1802 ··· 1809 1810 return NULL; 1810 1811 } 1811 1812 1812 - struct map *map_groups__find_by_name(struct map_groups *mg, const char *name) 1813 + struct map *maps__find_by_name(struct maps *mg, const char *name) 1813 1814 { 1814 - struct maps *maps = &mg->maps; 1815 + struct maps *maps = mg; 1815 1816 struct map *map; 1816 1817 1817 1818 down_read(&maps->lock); ··· 1825 1826 * as mg->maps_by_name mirrors the rbtree when lookups by name are 1826 1827 * made. 1827 1828 */ 1828 - map = __map_groups__find_by_name(mg, name); 1829 + map = __maps__find_by_name(mg, name); 1829 1830 if (map || mg->maps_by_name != NULL) 1830 1831 goto out_unlock; 1831 1832
+3 -3
tools/perf/util/symbol.h
··· 21 21 22 22 struct dso; 23 23 struct map; 24 - struct map_groups; 24 + struct maps; 25 25 struct option; 26 26 27 27 /* ··· 108 108 109 109 struct addr_location { 110 110 struct thread *thread; 111 - struct map_groups *mg; 111 + struct maps *mg; 112 112 struct map *map; 113 113 struct symbol *sym; 114 114 const char *srcline; ··· 186 186 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym); 187 187 void symbols__fixup_duplicate(struct rb_root_cached *symbols); 188 188 void symbols__fixup_end(struct rb_root_cached *symbols); 189 - void map_groups__fixup_end(struct map_groups *mg); 189 + void maps__fixup_end(struct maps *mg); 190 190 191 191 typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); 192 192 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
+1 -1
tools/perf/util/synthetic-events.c
··· 493 493 494 494 /* 495 495 * send mmap only for thread group leader 496 - * see thread__init_map_groups 496 + * see thread__init_maps() 497 497 */ 498 498 if (pid == tgid && 499 499 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+11 -13
tools/perf/util/thread.c
··· 19 19 20 20 #include <api/fs/fs.h> 21 21 22 - int thread__init_map_groups(struct thread *thread, struct machine *machine) 22 + int thread__init_maps(struct thread *thread, struct machine *machine) 23 23 { 24 24 pid_t pid = thread->pid_; 25 25 26 26 if (pid == thread->tid || pid == -1) { 27 - thread->mg = map_groups__new(machine); 27 + thread->mg = maps__new(machine); 28 28 } else { 29 29 struct thread *leader = __machine__findnew_thread(machine, pid, pid); 30 30 if (leader) { 31 - thread->mg = map_groups__get(leader->mg); 31 + thread->mg = maps__get(leader->mg); 32 32 thread__put(leader); 33 33 } 34 34 } ··· 87 87 thread_stack__free(thread); 88 88 89 89 if (thread->mg) { 90 - map_groups__put(thread->mg); 90 + maps__put(thread->mg); 91 91 thread->mg = NULL; 92 92 } 93 93 down_write(&thread->namespaces_lock); ··· 324 324 size_t thread__fprintf(struct thread *thread, FILE *fp) 325 325 { 326 326 return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + 327 - map_groups__fprintf(thread->mg, fp); 327 + maps__fprintf(thread->mg, fp); 328 328 } 329 329 330 330 int thread__insert_map(struct thread *thread, struct map *map) ··· 335 335 if (ret) 336 336 return ret; 337 337 338 - map_groups__fixup_overlappings(thread->mg, map, stderr); 339 - map_groups__insert(thread->mg, map); 338 + maps__fixup_overlappings(thread->mg, map, stderr); 339 + maps__insert(thread->mg, map); 340 340 341 341 return 0; 342 342 } ··· 345 345 { 346 346 bool initialized = false; 347 347 int err = 0; 348 - struct maps *maps = &thread->mg->maps; 348 + struct maps *maps = thread->mg; 349 349 struct map *map; 350 350 351 351 down_read(&maps->lock); ··· 371 371 return err; 372 372 } 373 373 374 - static int thread__clone_map_groups(struct thread *thread, 375 - struct thread *parent, 376 - bool do_maps_clone) 374 + static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone) 377 375 { 378 376 /* This is new thread, we share map groups for process. */ 379 377 if (thread->pid_ == parent->pid_) ··· 383 385 return 0; 384 386 } 385 387 /* But this one is new process, copy maps. */ 386 - return do_maps_clone ? map_groups__clone(thread, parent->mg) : 0; 388 + return do_maps_clone ? maps__clone(thread, parent->mg) : 0; 387 389 } 388 390 389 391 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone) ··· 399 401 } 400 402 401 403 thread->ppid = parent->tid; 402 - return thread__clone_map_groups(thread, parent, do_maps_clone); 404 + return thread__clone_maps(thread, parent, do_maps_clone); 403 405 } 404 406 405 407 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
+2 -2
tools/perf/util/thread.h
··· 25 25 struct rb_node rb_node; 26 26 struct list_head node; 27 27 }; 28 - struct map_groups *mg; 28 + struct maps *mg; 29 29 pid_t pid_; /* Not all tools update this */ 30 30 pid_t tid; 31 31 pid_t ppid; ··· 53 53 struct comm; 54 54 55 55 struct thread *thread__new(pid_t pid, pid_t tid); 56 - int thread__init_map_groups(struct thread *thread, struct machine *machine); 56 + int thread__init_maps(struct thread *thread, struct machine *machine); 57 57 void thread__delete(struct thread *thread); 58 58 59 59 struct thread *thread__get(struct thread *thread);
+3 -3
tools/perf/util/unwind-libunwind-local.c
··· 616 616 .get_proc_name = get_proc_name, 617 617 }; 618 618 619 - static int _unwind__prepare_access(struct map_groups *mg) 619 + static int _unwind__prepare_access(struct maps *mg) 620 620 { 621 621 mg->addr_space = unw_create_addr_space(&accessors, 0); 622 622 if (!mg->addr_space) { ··· 628 628 return 0; 629 629 } 630 630 631 - static void _unwind__flush_access(struct map_groups *mg) 631 + static void _unwind__flush_access(struct maps *mg) 632 632 { 633 633 unw_flush_cache(mg->addr_space, 0, 0); 634 634 } 635 635 636 - static void _unwind__finish_access(struct map_groups *mg) 636 + static void _unwind__finish_access(struct maps *mg) 637 637 { 638 638 unw_destroy_addr_space(mg->addr_space); 639 639 }
+4 -6
tools/perf/util/unwind-libunwind.c
··· 12 12 struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; 13 13 struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; 14 14 15 - static void unwind__register_ops(struct map_groups *mg, 16 - struct unwind_libunwind_ops *ops) 15 + static void unwind__register_ops(struct maps *mg, struct unwind_libunwind_ops *ops) 17 16 { 18 17 mg->unwind_libunwind_ops = ops; 19 18 } 20 19 21 - int unwind__prepare_access(struct map_groups *mg, struct map *map, 22 - bool *initialized) 20 + int unwind__prepare_access(struct maps *mg, struct map *map, bool *initialized) 23 21 { 24 22 const char *arch; 25 23 enum dso_type dso_type; ··· 66 68 return err; 67 69 } 68 70 69 - void unwind__flush_access(struct map_groups *mg) 71 + void unwind__flush_access(struct maps *mg) 70 72 { 71 73 if (mg->unwind_libunwind_ops) 72 74 mg->unwind_libunwind_ops->flush_access(mg); 73 75 } 74 76 75 - void unwind__finish_access(struct map_groups *mg) 77 + void unwind__finish_access(struct maps *mg) 76 78 { 77 79 if (mg->unwind_libunwind_ops) 78 80 mg->unwind_libunwind_ops->finish_access(mg);
+13 -14
tools/perf/util/unwind.h
··· 6 6 #include <linux/types.h> 7 7 #include "util/map_symbol.h" 8 8 9 - struct map_groups; 9 + struct maps; 10 10 struct perf_sample; 11 11 struct thread; 12 12 ··· 18 18 typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg); 19 19 20 20 struct unwind_libunwind_ops { 21 - int (*prepare_access)(struct map_groups *mg); 22 - void (*flush_access)(struct map_groups *mg); 23 - void (*finish_access)(struct map_groups *mg); 21 + int (*prepare_access)(struct maps *maps); 22 + void (*flush_access)(struct maps *maps); 23 + void (*finish_access)(struct maps *maps); 24 24 int (*get_entries)(unwind_entry_cb_t cb, void *arg, 25 25 struct thread *thread, 26 26 struct perf_sample *data, int max_stack); ··· 45 45 #endif 46 46 47 47 int LIBUNWIND__ARCH_REG_ID(int regnum); 48 - int unwind__prepare_access(struct map_groups *mg, struct map *map, 49 - bool *initialized); 50 - void unwind__flush_access(struct map_groups *mg); 51 - void unwind__finish_access(struct map_groups *mg); 48 + int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized); 49 + void unwind__flush_access(struct maps *maps); 50 + void unwind__finish_access(struct maps *maps); 52 51 #else 53 - static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused, 52 + static inline int unwind__prepare_access(struct maps *maps __maybe_unused, 54 53 struct map *map __maybe_unused, 55 54 bool *initialized __maybe_unused) 56 55 { 57 56 return 0; 58 57 } 59 58 60 - static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {} 61 - static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {} 59 + static inline void unwind__flush_access(struct maps *maps __maybe_unused) {} 60 + static inline void unwind__finish_access(struct maps *maps __maybe_unused) {} 62 61 #endif 63 62 #else 64 63 static inline int ··· 70 71 return 0; 71 72 } 72 73 73 - static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused, 74 + static inline int unwind__prepare_access(struct maps *maps __maybe_unused, 74 75 struct map *map __maybe_unused, 75 76 bool *initialized __maybe_unused) 76 77 { 77 78 return 0; 78 79 } 79 80 80 - static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {} 81 - static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {} 81 + static inline void unwind__flush_access(struct maps *maps __maybe_unused) {} 82 + static inline void unwind__finish_access(struct maps *maps __maybe_unused) {} 82 83 #endif /* HAVE_DWARF_UNWIND_SUPPORT */ 83 84 #endif /* __UNWIND_H */
+1 -1
tools/perf/util/vdso.c
··· 144 144 enum dso_type dso_type = DSO__TYPE_UNKNOWN; 145 145 struct map *map; 146 146 147 - map_groups__for_each_entry(thread->mg, map) { 147 + maps__for_each_entry(thread->mg, map) { 148 148 struct dso *dso = map->dso; 149 149 if (!dso || dso->long_name[0] != '/') 150 150 continue;