Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf env: Avoid recursively taking env->bpf_progs.lock

Add variants of perf_env__insert_bpf_prog_info(), perf_env__insert_btf()
and perf_env__find_btf prefixed with __ to indicate the
env->bpf_progs.lock is assumed held.

Call these variants when the lock is held to avoid recursively taking it
and potentially having a thread deadlock with itself.

Fixes: f8dfeae009effc0b ("perf bpf: Show more BPF program info in print_bpf_prog_info()")
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Song Liu <song@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20231207014655.1252484-1-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Ian Rogers and committed by
Arnaldo Carvalho de Melo
9c51f878 58824fa0

+50 -32
+4 -4
tools/perf/util/bpf-event.c
··· 545 545 return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env); 546 546 } 547 547 548 - void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, 549 - struct perf_env *env, 550 - FILE *fp) 548 + void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, 549 + struct perf_env *env, 550 + FILE *fp) 551 551 { 552 552 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); 553 553 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); ··· 563 563 if (info->btf_id) { 564 564 struct btf_node *node; 565 565 566 - node = perf_env__find_btf(env, info->btf_id); 566 + node = __perf_env__find_btf(env, info->btf_id); 567 567 if (node) 568 568 btf = btf__new((__u8 *)(node->data), 569 569 node->data_size);
+6 -6
tools/perf/util/bpf-event.h
··· 33 33 int machine__process_bpf(struct machine *machine, union perf_event *event, 34 34 struct perf_sample *sample); 35 35 int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env); 36 - void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, 37 - struct perf_env *env, 38 - FILE *fp); 36 + void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, 37 + struct perf_env *env, 38 + FILE *fp); 39 39 #else 40 40 static inline int machine__process_bpf(struct machine *machine __maybe_unused, 41 41 union perf_event *event __maybe_unused, ··· 50 50 return 0; 51 51 } 52 52 53 - static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused, 54 - struct perf_env *env __maybe_unused, 55 - FILE *fp __maybe_unused) 53 + static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused, 54 + struct perf_env *env __maybe_unused, 55 + FILE *fp __maybe_unused) 56 56 { 57 57 58 58 }
+32 -18
tools/perf/util/env.c
··· 25 25 void perf_env__insert_bpf_prog_info(struct perf_env *env, 26 26 struct bpf_prog_info_node *info_node) 27 27 { 28 + down_write(&env->bpf_progs.lock); 29 + __perf_env__insert_bpf_prog_info(env, info_node); 30 + up_write(&env->bpf_progs.lock); 31 + } 32 + 33 + void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node) 34 + { 28 35 __u32 prog_id = info_node->info_linear->info.id; 29 36 struct bpf_prog_info_node *node; 30 37 struct rb_node *parent = NULL; 31 38 struct rb_node **p; 32 39 33 - down_write(&env->bpf_progs.lock); 34 40 p = &env->bpf_progs.infos.rb_node; 35 41 36 42 while (*p != NULL) { ··· 48 42 p = &(*p)->rb_right; 49 43 } else { 50 44 pr_debug("duplicated bpf prog info %u\n", prog_id); 51 - goto out; 45 + return; 52 46 } 53 47 } 54 48 55 49 rb_link_node(&info_node->rb_node, parent, p); 56 50 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos); 57 51 env->bpf_progs.infos_cnt++; 58 - out: 59 - up_write(&env->bpf_progs.lock); 60 52 } 61 53 62 54 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, ··· 84 80 85 81 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) 86 82 { 83 + bool ret; 84 + 85 + down_write(&env->bpf_progs.lock); 86 + ret = __perf_env__insert_btf(env, btf_node); 87 + up_write(&env->bpf_progs.lock); 88 + return ret; 89 + } 90 + 91 + bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) 92 + { 87 93 struct rb_node *parent = NULL; 88 94 __u32 btf_id = btf_node->id; 89 95 struct btf_node *node; 90 96 struct rb_node **p; 91 - bool ret = true; 92 97 93 - down_write(&env->bpf_progs.lock); 94 98 p = &env->bpf_progs.btfs.rb_node; 95 99 96 100 while (*p != NULL) { ··· 110 98 p = &(*p)->rb_right; 111 99 } else { 112 100 pr_debug("duplicated btf %u\n", btf_id); 113 - ret = false; 114 - goto out; 101 + return false; 115 102 } 116 103 } 117 104 118 105 rb_link_node(&btf_node->rb_node, parent, p); 119 106 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs); 120 107 env->bpf_progs.btfs_cnt++; 121 - out: 122 - up_write(&env->bpf_progs.lock); 123 - return ret; 108 + return true; 124 109 } 125 110 126 111 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) 127 112 { 113 + struct btf_node *res; 114 + 115 + down_read(&env->bpf_progs.lock); 116 + res = __perf_env__find_btf(env, btf_id); 117 + up_read(&env->bpf_progs.lock); 118 + return res; 119 + } 120 + 121 + struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id) 122 + { 128 123 struct btf_node *node = NULL; 129 124 struct rb_node *n; 130 125 131 - down_read(&env->bpf_progs.lock); 132 126 n = env->bpf_progs.btfs.rb_node; 133 127 134 128 while (n) { ··· 144 126 else if (btf_id > node->id) 145 127 n = n->rb_right; 146 128 else 147 - goto out; 129 + return node; 148 130 } 149 - node = NULL; 150 - 151 - out: 152 - up_read(&env->bpf_progs.lock); 153 - return node; 131 + return NULL; 154 132 } 155 133 156 134 /* purge data in bpf_progs.infos tree */
+4
tools/perf/util/env.h
··· 175 175 int perf_env__nr_cpus_avail(struct perf_env *env); 176 176 177 177 void perf_env__init(struct perf_env *env); 178 + void __perf_env__insert_bpf_prog_info(struct perf_env *env, 179 + struct bpf_prog_info_node *info_node); 178 180 void perf_env__insert_bpf_prog_info(struct perf_env *env, 179 181 struct bpf_prog_info_node *info_node); 180 182 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, 181 183 __u32 prog_id); 182 184 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); 185 + bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); 183 186 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); 187 + struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id); 184 188 185 189 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu); 186 190 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
+4 -4
tools/perf/util/header.c
··· 1849 1849 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 1850 1850 next = rb_next(&node->rb_node); 1851 1851 1852 - bpf_event__print_bpf_prog_info(&node->info_linear->info, 1853 - env, fp); 1852 + __bpf_event__print_bpf_prog_info(&node->info_linear->info, 1853 + env, fp); 1854 1854 } 1855 1855 1856 1856 up_read(&env->bpf_progs.lock); ··· 3188 3188 /* after reading from file, translate offset to address */ 3189 3189 bpil_offs_to_addr(info_linear); 3190 3190 info_node->info_linear = info_linear; 3191 - perf_env__insert_bpf_prog_info(env, info_node); 3191 + __perf_env__insert_bpf_prog_info(env, info_node); 3192 3192 } 3193 3193 3194 3194 up_write(&env->bpf_progs.lock); ··· 3235 3235 if (__do_read(ff, node->data, data_size)) 3236 3236 goto out; 3237 3237 3238 - perf_env__insert_btf(env, node); 3238 + __perf_env__insert_btf(env, node); 3239 3239 node = NULL; 3240 3240 } 3241 3241