Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf maps: Hide maps internals

Move the struct into the C file. Add maps__equal to work around
exposing the struct for reference count checking. Add accessors for
the unwind_libunwind_ops. Move maps_list_node to its only use in
symbol.c.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: James Clark <james.clark@arm.com>
Cc: Vincent Whitchurch <vincent.whitchurch@axis.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Artem Savkov <asavkov@redhat.com>
Cc: bpf@vger.kernel.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240210031746.4057262-6-irogers@google.com

authored by

Ian Rogers and committed by
Namhyung Kim
ff0bd799 39a27325

+124 -102
+4 -4
tools/perf/tests/thread-maps-share.c
··· 46 46 TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4); 47 47 48 48 /* test the maps pointer is shared */ 49 - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t1))); 50 - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t2))); 51 - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t3))); 49 + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t1))); 50 + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t2))); 51 + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t3))); 52 52 53 53 /* 54 54 * Verify the other leader was created by previous call. ··· 73 73 other_maps = thread__maps(other); 74 74 TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 2); 75 75 76 - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(other_maps, thread__maps(other_leader))); 76 + TEST_ASSERT_VAL("maps don't match", maps__equal(other_maps, thread__maps(other_leader))); 77 77 78 78 /* release thread group */ 79 79 thread__put(t3);
+1 -1
tools/perf/util/callchain.c
··· 1157 1157 if (al->map == NULL) 1158 1158 goto out; 1159 1159 } 1160 - if (RC_CHK_EQUAL(al->maps, machine__kernel_maps(machine))) { 1160 + if (maps__equal(al->maps, machine__kernel_maps(machine))) { 1161 1161 if (machine__is_host(machine)) { 1162 1162 al->cpumode = PERF_RECORD_MISC_KERNEL; 1163 1163 al->level = 'k';
+96
tools/perf/util/maps.c
··· 6 6 #include "dso.h" 7 7 #include "map.h" 8 8 #include "maps.h" 9 + #include "rwsem.h" 9 10 #include "thread.h" 10 11 #include "ui/ui.h" 11 12 #include "unwind.h" 13 + #include <internal/rc_check.h> 14 + 15 + /* 16 + * Locking/sorting note: 17 + * 18 + * Sorting is done with the write lock, iteration and binary searching happens 19 + * under the read lock requiring being sorted. There is a race between sorting 20 + * releasing the write lock and acquiring the read lock for iteration/searching 21 + * where another thread could insert and break the sorting of the maps. In 22 + * practice inserting maps should be rare meaning that the race shouldn't lead 23 + * to live lock. Removal of maps doesn't break being sorted. 24 + */ 25 + 26 + DECLARE_RC_STRUCT(maps) { 27 + struct rw_semaphore lock; 28 + /** 29 + * @maps_by_address: array of maps sorted by their starting address if 30 + * maps_by_address_sorted is true. 31 + */ 32 + struct map **maps_by_address; 33 + /** 34 + * @maps_by_name: optional array of maps sorted by their dso name if 35 + * maps_by_name_sorted is true. 36 + */ 37 + struct map **maps_by_name; 38 + struct machine *machine; 39 + #ifdef HAVE_LIBUNWIND_SUPPORT 40 + void *addr_space; 41 + const struct unwind_libunwind_ops *unwind_libunwind_ops; 42 + #endif 43 + refcount_t refcnt; 44 + /** 45 + * @nr_maps: number of maps_by_address, and possibly maps_by_name, 46 + * entries that contain maps. 47 + */ 48 + unsigned int nr_maps; 49 + /** 50 + * @nr_maps_allocated: number of entries in maps_by_address and possibly 51 + * maps_by_name. 52 + */ 53 + unsigned int nr_maps_allocated; 54 + /** 55 + * @last_search_by_name_idx: cache of last found by name entry's index 56 + * as frequent searches for the same dso name are common. 57 + */ 58 + unsigned int last_search_by_name_idx; 59 + /** @maps_by_address_sorted: is maps_by_address sorted. */ 60 + bool maps_by_address_sorted; 61 + /** @maps_by_name_sorted: is maps_by_name sorted. */ 62 + bool maps_by_name_sorted; 63 + /** @ends_broken: does the map contain a map where end values are unset/unsorted? */ 64 + bool ends_broken; 65 + }; 12 66 13 67 static void check_invariants(const struct maps *maps __maybe_unused) 14 68 { ··· 171 117 { 172 118 RC_CHK_ACCESS(maps)->maps_by_name_sorted = value; 173 119 } 120 + 121 + struct machine *maps__machine(const struct maps *maps) 122 + { 123 + return RC_CHK_ACCESS(maps)->machine; 124 + } 125 + 126 + unsigned int maps__nr_maps(const struct maps *maps) 127 + { 128 + return RC_CHK_ACCESS(maps)->nr_maps; 129 + } 130 + 131 + refcount_t *maps__refcnt(struct maps *maps) 132 + { 133 + return &RC_CHK_ACCESS(maps)->refcnt; 134 + } 135 + 136 + #ifdef HAVE_LIBUNWIND_SUPPORT 137 + void *maps__addr_space(const struct maps *maps) 138 + { 139 + return RC_CHK_ACCESS(maps)->addr_space; 140 + } 141 + 142 + void maps__set_addr_space(struct maps *maps, void *addr_space) 143 + { 144 + RC_CHK_ACCESS(maps)->addr_space = addr_space; 145 + } 146 + 147 + const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) 148 + { 149 + return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; 150 + } 151 + 152 + void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops) 153 + { 154 + RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; 155 + } 156 + #endif 174 157 175 158 static struct rw_semaphore *maps__lock(struct maps *maps) 176 159 { ··· 542 451 bool maps__empty(struct maps *maps) 543 452 { 544 453 return maps__nr_maps(maps) == 0; 454 + } 455 + 456 + bool maps__equal(struct maps *a, struct maps *b) 457 + { 458 + return RC_CHK_EQUAL(a, b); 545 459 } 546 460 547 461 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
+9 -88
tools/perf/util/maps.h
··· 3 3 #define __PERF_MAPS_H 4 4 5 5 #include <linux/refcount.h> 6 - #include <linux/rbtree.h> 7 6 #include <stdio.h> 8 7 #include <stdbool.h> 9 8 #include <linux/types.h> 10 - #include "rwsem.h" 11 - #include <internal/rc_check.h> 12 9 13 10 struct ref_reloc_sym; 14 11 struct machine; 15 12 struct map; 16 13 struct maps; 17 - 18 - struct map_list_node { 19 - struct list_head node; 20 - struct map *map; 21 - }; 22 - 23 - static inline struct map_list_node *map_list_node__new(void) 24 - { 25 - return malloc(sizeof(struct map_list_node)); 26 - } 27 - 28 - /* 29 - * Locking/sorting note: 30 - * 31 - * Sorting is done with the write lock, iteration and binary searching happens 32 - * under the read lock requiring being sorted. There is a race between sorting 33 - * releasing the write lock and acquiring the read lock for iteration/searching 34 - * where another thread could insert and break the sorting of the maps. In 35 - * practice inserting maps should be rare meaning that the race shouldn't lead 36 - * to live lock. Removal of maps doesn't break being sorted. 37 - */ 38 - 39 - DECLARE_RC_STRUCT(maps) { 40 - struct rw_semaphore lock; 41 - /** 42 - * @maps_by_address: array of maps sorted by their starting address if 43 - * maps_by_address_sorted is true. 44 - */ 45 - struct map **maps_by_address; 46 - /** 47 - * @maps_by_name: optional array of maps sorted by their dso name if 48 - * maps_by_name_sorted is true. 49 - */ 50 - struct map **maps_by_name; 51 - struct machine *machine; 52 - #ifdef HAVE_LIBUNWIND_SUPPORT 53 - void *addr_space; 54 - const struct unwind_libunwind_ops *unwind_libunwind_ops; 55 - #endif 56 - refcount_t refcnt; 57 - /** 58 - * @nr_maps: number of maps_by_address, and possibly maps_by_name, 59 - * entries that contain maps. 60 - */ 61 - unsigned int nr_maps; 62 - /** 63 - * @nr_maps_allocated: number of entries in maps_by_address and possibly 64 - * maps_by_name. 65 - */ 66 - unsigned int nr_maps_allocated; 67 - /** 68 - * @last_search_by_name_idx: cache of last found by name entry's index 69 - * as frequent searches for the same dso name are common. 70 - */ 71 - unsigned int last_search_by_name_idx; 72 - /** @maps_by_address_sorted: is maps_by_address sorted. */ 73 - bool maps_by_address_sorted; 74 - /** @maps_by_name_sorted: is maps_by_name sorted. */ 75 - bool maps_by_name_sorted; 76 - /** @ends_broken: does the map contain a map where end values are unset/unsorted? */ 77 - bool ends_broken; 78 - }; 79 14 80 15 #define KMAP_NAME_LEN 256 81 16 ··· 35 100 36 101 #define maps__zput(map) __maps__zput(&map) 37 102 103 + bool maps__equal(struct maps *a, struct maps *b); 104 + 38 105 /* Iterate over map calling cb for each entry. */ 39 106 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data); 40 107 /* Iterate over map removing an entry if cb returns true. */ 41 108 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data); 42 109 43 - static inline struct machine *maps__machine(struct maps *maps) 44 - { 45 - return RC_CHK_ACCESS(maps)->machine; 46 - } 47 - 48 - static inline unsigned int maps__nr_maps(const struct maps *maps) 49 - { 50 - return RC_CHK_ACCESS(maps)->nr_maps; 51 - } 52 - 53 - static inline refcount_t *maps__refcnt(struct maps *maps) 54 - { 55 - return &RC_CHK_ACCESS(maps)->refcnt; 56 - } 110 + struct machine *maps__machine(const struct maps *maps); 111 + unsigned int maps__nr_maps(const struct maps *maps); 112 + refcount_t *maps__refcnt(struct maps *maps); 57 113 58 114 #ifdef HAVE_LIBUNWIND_SUPPORT 59 - static inline void *maps__addr_space(struct maps *maps) 60 - { 61 - return RC_CHK_ACCESS(maps)->addr_space; 62 - } 63 - 64 - static inline const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) 65 - { 66 - return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; 67 - } 115 + void *maps__addr_space(const struct maps *maps); 116 + void maps__set_addr_space(struct maps *maps, void *addr_space); 117 + const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps); 118 + void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops); 68 119 #endif 69 120 70 121 size_t maps__fprintf(struct maps *maps, FILE *fp);
+10
tools/perf/util/symbol.c
··· 63 63 .res_sample = 0, 64 64 }; 65 65 66 + struct map_list_node { 67 + struct list_head node; 68 + struct map *map; 69 + }; 70 + 71 + static struct map_list_node *map_list_node__new(void) 72 + { 73 + return malloc(sizeof(struct map_list_node)); 74 + } 75 + 66 76 static enum dso_binary_type binary_type_symtab[] = { 67 77 DSO_BINARY_TYPE__KALLSYMS, 68 78 DSO_BINARY_TYPE__GUEST_KALLSYMS,
+1 -1
tools/perf/util/thread.c
··· 383 383 if (thread__pid(thread) == thread__pid(parent)) 384 384 return thread__prepare_access(thread); 385 385 386 - if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) { 386 + if (maps__equal(thread__maps(thread), thread__maps(parent))) { 387 387 pr_debug("broken map groups on thread %d/%d parent %d/%d\n", 388 388 thread__pid(thread), thread__tid(thread), 389 389 thread__pid(parent), thread__tid(parent));
+1 -1
tools/perf/util/unwind-libdw.c
··· 263 263 struct unwind_info *ui, ui_buf = { 264 264 .sample = data, 265 265 .thread = thread, 266 - .machine = RC_CHK_ACCESS(thread__maps(thread))->machine, 266 + .machine = maps__machine((thread__maps(thread))), 267 267 .cb = cb, 268 268 .arg = arg, 269 269 .max_stack = max_stack,
+1 -1
tools/perf/util/unwind-libunwind-local.c
··· 706 706 { 707 707 void *addr_space = unw_create_addr_space(&accessors, 0); 708 708 709 - RC_CHK_ACCESS(maps)->addr_space = addr_space; 709 + maps__set_addr_space(maps, addr_space); 710 710 if (!addr_space) { 711 711 pr_err("unwind: Can't create unwind address space.\n"); 712 712 return -ENOMEM;
+1 -6
tools/perf/util/unwind-libunwind.c
··· 12 12 struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; 13 13 struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; 14 14 15 - static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops) 16 - { 17 - RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; 18 - } 19 - 20 15 int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized) 21 16 { 22 17 const char *arch; ··· 55 60 return 0; 56 61 } 57 62 out_register: 58 - unwind__register_ops(maps, ops); 63 + maps__set_unwind_libunwind_ops(maps, ops); 59 64 60 65 err = maps__unwind_libunwind_ops(maps)->prepare_access(maps); 61 66 if (initialized)