Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf unwind: Call unwind__prepare_access for forked thread

Currently we call unwind__prepare_access for map event. In case we
report fork event the thread inherits its parent's maps and
unwind__prepare_access is never called for the thread.

This causes unwind__get_entries seeing uninitialized
unwind_libunwind_ops and thus returning no callchain.

Adding unwind__prepare_access calls for fork even processing.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1467634583-29147-5-git-send-email-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Jiri Olsa and committed by
Arnaldo Carvalho de Melo
6c502584 a2873325

+44 -4
+8 -1
tools/perf/util/map.c
··· 15 15 #include "debug.h" 16 16 #include "machine.h" 17 17 #include <linux/string.h> 18 + #include "unwind.h" 18 19 19 20 static void __maps__insert(struct maps *maps, struct map *map); 20 21 ··· 745 744 /* 746 745 * XXX This should not really _copy_ te maps, but refcount them. 747 746 */ 748 - int map_groups__clone(struct map_groups *mg, 747 + int map_groups__clone(struct thread *thread, 749 748 struct map_groups *parent, enum map_type type) 750 749 { 750 + struct map_groups *mg = thread->mg; 751 751 int err = -ENOMEM; 752 752 struct map *map; 753 753 struct maps *maps = &parent->maps[type]; ··· 759 757 struct map *new = map__clone(map); 760 758 if (new == NULL) 761 759 goto out_unlock; 760 + 761 + err = unwind__prepare_access(thread, new, NULL); 762 + if (err) 763 + goto out_unlock; 764 + 762 765 map_groups__insert(mg, new); 763 766 map__put(new); 764 767 }
+1 -1
tools/perf/util/map.h
··· 194 194 struct map **mapp, symbol_filter_t filter); 195 195 void map_groups__init(struct map_groups *mg, struct machine *machine); 196 196 void map_groups__exit(struct map_groups *mg); 197 - int map_groups__clone(struct map_groups *mg, 197 + int map_groups__clone(struct thread *thread, 198 198 struct map_groups *parent, enum map_type type); 199 199 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp); 200 200
+35 -2
tools/perf/util/thread.c
··· 212 212 return 0; 213 213 } 214 214 215 + static int __thread__prepare_access(struct thread *thread) 216 + { 217 + bool initialized = false; 218 + int i, err = 0; 219 + 220 + for (i = 0; i < MAP__NR_TYPES; ++i) { 221 + struct maps *maps = &thread->mg->maps[i]; 222 + struct map *map; 223 + 224 + pthread_rwlock_rdlock(&maps->lock); 225 + 226 + for (map = maps__first(maps); map; map = map__next(map)) { 227 + err = unwind__prepare_access(thread, map, &initialized); 228 + if (err || initialized) 229 + break; 230 + } 231 + 232 + pthread_rwlock_unlock(&maps->lock); 233 + } 234 + 235 + return err; 236 + } 237 + 238 + static int thread__prepare_access(struct thread *thread) 239 + { 240 + int err = 0; 241 + 242 + if (symbol_conf.use_callchain) 243 + err = __thread__prepare_access(thread); 244 + 245 + return err; 246 + } 247 + 215 248 static int thread__clone_map_groups(struct thread *thread, 216 249 struct thread *parent) 217 250 { ··· 252 219 253 220 /* This is new thread, we share map groups for process. */ 254 221 if (thread->pid_ == parent->pid_) 255 - return 0; 222 + return thread__prepare_access(thread); 256 223 257 224 if (thread->mg == parent->mg) { 258 225 pr_debug("broken map groups on thread %d/%d parent %d/%d\n", ··· 262 229 263 230 /* But this one is new process, copy maps. */ 264 231 for (i = 0; i < MAP__NR_TYPES; ++i) 265 - if (map_groups__clone(thread->mg, parent->mg, i) < 0) 232 + if (map_groups__clone(thread, parent->mg, i) < 0) 266 233 return -ENOMEM; 267 234 268 235 return 0;