Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf maps: Get map before returning in maps__find

Finding a map is done under a lock, returning the map without a
reference count means it can be removed without notice and causing
uses after free. Grab a reference count to the map within the lock
region and return this. Fix up locations that need a map__put
following this.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: James Clark <james.clark@arm.com>
Cc: Vincent Whitchurch <vincent.whitchurch@axis.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Artem Savkov <asavkov@redhat.com>
Cc: bpf@vger.kernel.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240210031746.4057262-3-irogers@google.com

authored by

Ian Rogers and committed by
Namhyung Kim
42fd623b 659ad349

+26 -27
+1
tools/perf/arch/x86/tests/dwarf-unwind.c
··· 34 34 } 35 35 36 36 stack_size = map__end(map) - sp; 37 + map__put(map); 37 38 stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; 38 39 39 40 memcpy(buf, (void *) sp, stack_size);
+2 -3
tools/perf/tests/vmlinux-kallsyms.c
··· 151 151 u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map)); 152 152 153 153 pair = maps__find(args->kallsyms.kmaps, mem_start); 154 - if (pair == NULL || map__priv(pair)) 155 - return 0; 156 154 157 - if (map__start(pair) == mem_start) { 155 + if (pair != NULL && !map__priv(pair) && map__start(pair) == mem_start) { 158 156 struct dso *dso = map__dso(map); 159 157 160 158 if (!args->header_printed) { ··· 168 170 pr_info(" %s\n", dso->name); 169 171 map__set_priv(pair, 1); 170 172 } 173 + map__put(pair); 171 174 return 0; 172 175 } 173 176
+1
tools/perf/util/bpf-event.c
··· 63 63 dso->bpf_prog.id = id; 64 64 dso->bpf_prog.sub_id = i; 65 65 dso->bpf_prog.env = env; 66 + map__put(map); 66 67 } 67 68 } 68 69 return 0;
+2 -2
tools/perf/util/event.c
··· 511 511 struct addr_location al; 512 512 513 513 addr_location__init(&al); 514 - al.map = map__get(maps__find(machine__kernel_maps(machine), tp->addr)); 514 + al.map = maps__find(machine__kernel_maps(machine), tp->addr); 515 515 if (al.map && map__load(al.map) >= 0) { 516 516 al.addr = map__map_ip(al.map, tp->addr); 517 517 al.sym = map__find_symbol(al.map, al.addr); ··· 641 641 return NULL; 642 642 } 643 643 al->maps = maps__get(maps); 644 - al->map = map__get(maps__find(maps, al->addr)); 644 + al->map = maps__find(maps, al->addr); 645 645 if (al->map != NULL) { 646 646 /* 647 647 * Kernel maps might be changed when loading symbols so loading
+8 -14
tools/perf/util/machine.c
··· 896 896 struct symbol *sym; 897 897 struct dso *dso; 898 898 struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); 899 - bool put_map = false; 900 899 int err = 0; 901 900 902 901 if (!map) { ··· 912 913 err = -ENOMEM; 913 914 goto out; 914 915 } 915 - /* 916 - * The inserted map has a get on it, we need to put to release 917 - * the reference count here, but do it after all accesses are 918 - * done. 919 - */ 920 - put_map = true; 921 916 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) { 922 917 dso->binary_type = DSO_BINARY_TYPE__OOL; 923 918 dso->data.file_size = event->ksymbol.len; ··· 945 952 } 946 953 dso__insert_symbol(dso, sym); 947 954 out: 948 - if (put_map) 949 - map__put(map); 955 + map__put(map); 950 956 return err; 951 957 } 952 958 ··· 969 977 if (sym) 970 978 dso__delete_symbol(dso, sym); 971 979 } 972 - 980 + map__put(map); 973 981 return 0; 974 982 } 975 983 ··· 997 1005 perf_event__fprintf_text_poke(event, machine, stdout); 998 1006 999 1007 if (!event->text_poke.new_len) 1000 - return 0; 1008 + goto out; 1001 1009 1002 1010 if (cpumode != PERF_RECORD_MISC_KERNEL) { 1003 1011 pr_debug("%s: unsupported cpumode - ignoring\n", __func__); 1004 - return 0; 1012 + goto out; 1005 1013 } 1006 1014 1007 1015 if (dso) { ··· 1024 1032 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n", 1025 1033 event->text_poke.addr); 1026 1034 } 1027 - 1035 + out: 1036 + map__put(map); 1028 1037 return 0; 1029 1038 } 1030 1039 ··· 1293 1300 return 0; 1294 1301 1295 1302 dest_map = maps__find(args->kmaps, map__pgoff(map)); 1296 - if (dest_map != map) 1303 + if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map)) 1297 1304 map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map))); 1298 1305 1306 + map__put(dest_map); 1299 1307 args->found = true; 1300 1308 return 0; 1301 1309 }
+10 -7
tools/perf/util/maps.c
··· 506 506 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) 507 507 { 508 508 struct map *map = maps__find(maps, addr); 509 + struct symbol *result = NULL; 509 510 510 511 /* Ensure map is loaded before using map->map_ip */ 511 512 if (map != NULL && map__load(map) >= 0) { 512 - if (mapp != NULL) 513 - *mapp = map; // TODO: map_put on else path when find returns a get. 514 - return map__find_symbol(map, map__map_ip(map, addr)); 515 - } 513 + if (mapp) 514 + *mapp = map; 516 515 517 - return NULL; 516 + result = map__find_symbol(map, map__map_ip(map, addr)); 517 + if (!mapp) 518 + map__put(map); 519 + } 520 + return result; 518 521 } 519 522 520 523 struct maps__find_symbol_by_name_args { ··· 561 558 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) { 562 559 if (maps == NULL) 563 560 return -1; 564 - ams->ms.map = maps__find(maps, ams->addr); // TODO: map_get 561 + ams->ms.map = maps__find(maps, ams->addr); 565 562 if (ams->ms.map == NULL) 566 563 return -1; 567 564 } ··· 871 868 sizeof(*mapp), map__addr_cmp); 872 869 873 870 if (mapp) 874 - result = *mapp; // map__get(*mapp); 871 + result = map__get(*mapp); 875 872 done = true; 876 873 } 877 874 up_read(maps__lock(maps));
+2 -1
tools/perf/util/symbol.c
··· 757 757 758 758 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) 759 759 { 760 - struct map *curr_map; 761 760 struct symbol *pos; 762 761 int count = 0; 763 762 struct rb_root_cached old_root = dso->symbols; ··· 769 770 *root = RB_ROOT_CACHED; 770 771 771 772 while (next) { 773 + struct map *curr_map; 772 774 struct dso *curr_map_dso; 773 775 char *module; 774 776 ··· 796 796 pos->end -= map__start(curr_map) - map__pgoff(curr_map); 797 797 symbols__insert(&curr_map_dso->symbols, pos); 798 798 ++count; 799 + map__put(curr_map); 799 800 } 800 801 801 802 /* Symbols have been adjusted */