Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

machine: Adopt is_lock_function() from builtin-lock.c

It is used in bpf_lock_contention.c and builtin-lock.c will be made
CONFIG_LIBTRACEEVENT=y conditional, so move it to machine.c, that is
always available.

This makes those 4 global variables for sched and lock text start and
end to move to 'struct machine' too, as conceivably we can have that
info for several machine instances, say some 'perf diff' like tool.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+48 -59
+2 -56
tools/perf/builtin-lock.c
··· 67 67 LOCK_AGGR_CALLER, 68 68 } aggr_mode = LOCK_AGGR_ADDR; 69 69 70 - static u64 sched_text_start; 71 - static u64 sched_text_end; 72 - static u64 lock_text_start; 73 - static u64 lock_text_end; 74 - 75 70 static struct thread_stat *thread_stat_find(u32 tid) 76 71 { 77 72 struct rb_node *node; ··· 849 854 return 0; 850 855 } 851 856 852 - bool is_lock_function(struct machine *machine, u64 addr) 853 - { 854 - if (!sched_text_start) { 855 - struct map *kmap; 856 - struct symbol *sym; 857 - 858 - sym = machine__find_kernel_symbol_by_name(machine, 859 - "__sched_text_start", 860 - &kmap); 861 - if (!sym) { 862 - /* to avoid retry */ 863 - sched_text_start = 1; 864 - return false; 865 - } 866 - 867 - sched_text_start = kmap->unmap_ip(kmap, sym->start); 868 - 869 - /* should not fail from here */ 870 - sym = machine__find_kernel_symbol_by_name(machine, 871 - "__sched_text_end", 872 - &kmap); 873 - sched_text_end = kmap->unmap_ip(kmap, sym->start); 874 - 875 - sym = machine__find_kernel_symbol_by_name(machine, 876 - "__lock_text_start", 877 - &kmap); 878 - lock_text_start = kmap->unmap_ip(kmap, sym->start); 879 - 880 - sym = machine__find_kernel_symbol_by_name(machine, 881 - "__lock_text_end", 882 - &kmap); 883 - lock_text_end = kmap->unmap_ip(kmap, sym->start); 884 - } 885 - 886 - /* failed to get kernel symbols */ 887 - if (sched_text_start == 1) 888 - return false; 889 - 890 - /* mutex and rwsem functions are in sched text section */ 891 - if (sched_text_start <= addr && addr < sched_text_end) 892 - return true; 893 - 894 - /* spinlock functions are in lock text section */ 895 - if (lock_text_start <= addr && addr < lock_text_end) 896 - return true; 897 - 898 - return false; 899 - } 900 - 901 857 static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip, 902 858 char *buf, int size) 903 859 { ··· 907 961 goto next; 908 962 909 963 sym = node->ms.sym; 910 - if (sym && !is_lock_function(machine, node->ip)) { 964 + if (sym && !machine__is_lock_function(machine, node->ip)) { 911 965 get_symbol_name_offset(node->ms.map, sym, node->ip, 912 966 buf, size); 913 967 return 0; ··· 953 1007 if (++skip <= stack_skip) 954 1008 goto next; 955 1009 956 - if (node->ms.sym && is_lock_function(machine, node->ip)) 1010 + if (node->ms.sym && machine__is_lock_function(machine, node->ip)) 957 1011 goto next; 958 1012 959 1013 hash ^= hash_long((unsigned long)node->ip, 64);
+1 -1
tools/perf/util/bpf_lock_contention.c
··· 153 153 bpf_map_lookup_elem(stack, &key, stack_trace); 154 154 155 155 /* skip lock internal functions */ 156 - while (is_lock_function(machine, stack_trace[idx]) && 156 + while (machine__is_lock_function(machine, stack_trace[idx]) && 157 157 idx < con->max_stack - 1) 158 158 idx++; 159 159
-2
tools/perf/util/lock-contention.h
··· 145 145 146 146 #endif /* HAVE_BPF_SKEL */ 147 147 148 - bool is_lock_function(struct machine *machine, u64 addr); 149 - 150 148 #endif /* PERF_LOCK_CONTENTION_H */
+40
tools/perf/util/machine.c
··· 3336 3336 } 3337 3337 return err; 3338 3338 } 3339 + 3340 + bool machine__is_lock_function(struct machine *machine, u64 addr) 3341 + { 3342 + if (!machine->sched.text_start) { 3343 + struct map *kmap; 3344 + struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap); 3345 + 3346 + if (!sym) { 3347 + /* to avoid retry */ 3348 + machine->sched.text_start = 1; 3349 + return false; 3350 + } 3351 + 3352 + machine->sched.text_start = kmap->unmap_ip(kmap, sym->start); 3353 + 3354 + /* should not fail from here */ 3355 + sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap); 3356 + machine->sched.text_end = kmap->unmap_ip(kmap, sym->start); 3357 + 3358 + sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap); 3359 + machine->lock.text_start = kmap->unmap_ip(kmap, sym->start); 3360 + 3361 + sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap); 3362 + machine->lock.text_end = kmap->unmap_ip(kmap, sym->start); 3363 + } 3364 + 3365 + /* failed to get kernel symbols */ 3366 + if (machine->sched.text_start == 1) 3367 + return false; 3368 + 3369 + /* mutex and rwsem functions are in sched text section */ 3370 + if (machine->sched.text_start <= addr && addr < machine->sched.text_end) 3371 + return true; 3372 + 3373 + /* spinlock functions are in lock text section */ 3374 + if (machine->lock.text_start <= addr && addr < machine->lock.text_end) 3375 + return true; 3376 + 3377 + return false; 3378 + }
+5
tools/perf/util/machine.h
··· 56 56 struct maps *kmaps; 57 57 struct map *vmlinux_map; 58 58 u64 kernel_start; 59 + struct { 60 + u64 text_start; 61 + u64 text_end; 62 + } sched, lock; 59 63 pid_t *current_tid; 60 64 size_t current_tid_sz; 61 65 union { /* Tool specific area */ ··· 216 212 return machine ? machine->pid == HOST_KERNEL_ID : false; 217 213 } 218 214 215 + bool machine__is_lock_function(struct machine *machine, u64 addr); 219 216 bool machine__is(struct machine *machine, const char *arch); 220 217 bool machine__normalized_is(struct machine *machine, const char *arch); 221 218 int machine__nr_cpus_avail(struct machine *machine);