Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

* Don't show scripts menu for 'perf top', fix from Feng Tang

* Add framework for automated perf_event_attr tests, where tools with
different command line options will be run from a 'perf test', via
python glue, and the perf syscall will be intercepted to verify that
the perf_event_attr fields set by the tool are those expected,
from Jiri Olsa

* Use normalized arch name for searching objdump path. This fixes cases
where the system's objdump (e.g. x86_64) supports the architecture in
the perf.data file (e.g. i686), but is not the same,
fix from Namhyung Kim.

* Postpone objdump check until annotation requested, from Namhyung Kim.

* Add a 'link' method for hists, so that we can have the leader with
buckets for all the entries in all the hists. This new method
is now used in the default 'diff' output, making the sum of the 'baseline'
column be 100%, eliminating blind spots. Now we need to use this
for 'diff' with > 2 perf.data files and for multi event 'report' and
'annotate'.

* libtraceevent fixes for compiler warnings trying to make perf it build
on some distros, like fedora 14, 32-bit, some of the warnings really
pointed to real bugs.

* Remove temp dir on failure in 'perf test', fix from Jiri Olsa.

* Fixes for handling data, stack mmaps, from Namhyung Kim.

* Fix live annotation bug related to recent objdump lookup patches, from
Namhyung Kim

* Don't try to follow jump target on PLT symbols in the annotation browser,
fix from Namhyung Kim.

* Fix leak on hist_entry delete, from Namhyung Kim.

* Fix a CPU_ALLOC related build error on builtin-test, from Zheng Liu.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+2118 -504
+12 -10
tools/lib/traceevent/event-parse.c
··· 174 174 return 0; 175 175 } 176 176 177 - static char *find_cmdline(struct pevent *pevent, int pid) 177 + static const char *find_cmdline(struct pevent *pevent, int pid) 178 178 { 179 179 const struct cmdline *comm; 180 180 struct cmdline key; ··· 2637 2637 struct print_arg *farg; 2638 2638 enum event_type type; 2639 2639 char *token; 2640 - char *test; 2640 + const char *test; 2641 2641 int i; 2642 2642 2643 2643 arg->type = PRINT_FUNC; ··· 3889 3889 struct event_format *event, struct print_arg *arg) 3890 3890 { 3891 3891 unsigned char *buf; 3892 - char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"; 3892 + const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"; 3893 3893 3894 3894 if (arg->type == PRINT_FUNC) { 3895 3895 process_defined_func(s, data, size, event, arg); ··· 3931 3931 return 1; 3932 3932 } 3933 3933 3934 - static void print_event_fields(struct trace_seq *s, void *data, int size, 3934 + static void print_event_fields(struct trace_seq *s, void *data, 3935 + int size __maybe_unused, 3935 3936 struct event_format *event) 3936 3937 { 3937 3938 struct format_field *field; ··· 4409 4408 void pevent_print_event(struct pevent *pevent, struct trace_seq *s, 4410 4409 struct pevent_record *record) 4411 4410 { 4412 - static char *spaces = " "; /* 20 spaces */ 4411 + static const char *spaces = " "; /* 20 spaces */ 4413 4412 struct event_format *event; 4414 4413 unsigned long secs; 4415 4414 unsigned long usecs; ··· 5071 5070 }; 5072 5071 #undef _PE 5073 5072 5074 - int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum, 5075 - char *buf, size_t buflen) 5073 + int pevent_strerror(struct pevent *pevent __maybe_unused, 5074 + enum pevent_errno errnum, char *buf, size_t buflen) 5076 5075 { 5077 5076 int idx; 5078 5077 const char *msg; ··· 5101 5100 case PEVENT_ERRNO__READ_FORMAT_FAILED: 5102 5101 case PEVENT_ERRNO__READ_PRINT_FAILED: 5103 5102 case PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED: 5103 + case PEVENT_ERRNO__INVALID_ARG_TYPE: 5104 5104 snprintf(buf, buflen, "%s", msg); 5105 5105 break; 5106 5106 ··· 5364 5362 if (type == PEVENT_FUNC_ARG_VOID) 5365 5363 break; 5366 5364 5367 - if (type < 0 || type >= PEVENT_FUNC_ARG_MAX_TYPES) { 5365 + if (type >= PEVENT_FUNC_ARG_MAX_TYPES) { 5368 5366 do_warning("Invalid argument type %d", type); 5369 5367 ret = PEVENT_ERRNO__INVALID_ARG_TYPE; 5370 5368 goto out_free; ··· 5562 5560 } 5563 5561 5564 5562 if (pevent->func_map) { 5565 - for (i = 0; i < pevent->func_count; i++) { 5563 + for (i = 0; i < (int)pevent->func_count; i++) { 5566 5564 free(pevent->func_map[i].func); 5567 5565 free(pevent->func_map[i].mod); 5568 5566 } ··· 5584 5582 } 5585 5583 5586 5584 if (pevent->printk_map) { 5587 - for (i = 0; i < pevent->printk_count; i++) 5585 + for (i = 0; i < (int)pevent->printk_count; i++) 5588 5586 free(pevent->printk_map[i].printk); 5589 5587 free(pevent->printk_map); 5590 5588 }
+4 -1
tools/perf/Documentation/android.txt
··· 48 48 II. Compile perf for Android 49 49 ------------------------------------------------ 50 50 You need to run make with the NDK toolchain and sysroot defined above: 51 - make CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}" 51 + For arm: 52 + make ARCH=arm CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}" 53 + For x86: 54 + make ARCH=x86 CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}" 52 55 53 56 III. Install perf 54 57 -----------------------------------------------
+27 -7
tools/perf/Makefile
··· 169 169 170 170 ### --- END CONFIGURATION SECTION --- 171 171 172 - BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE 172 + BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -Iutil -I. -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE 173 173 BASIC_LDFLAGS = 174 174 175 175 ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y) ··· 371 371 LIB_OBJS += $(OUTPUT)util/levenshtein.o 372 372 LIB_OBJS += $(OUTPUT)util/parse-options.o 373 373 LIB_OBJS += $(OUTPUT)util/parse-events.o 374 - LIB_OBJS += $(OUTPUT)util/parse-events-test.o 375 374 LIB_OBJS += $(OUTPUT)util/path.o 376 375 LIB_OBJS += $(OUTPUT)util/rbtree.o 377 376 LIB_OBJS += $(OUTPUT)util/bitmap.o ··· 388 389 LIB_OBJS += $(OUTPUT)util/dso.o 389 390 LIB_OBJS += $(OUTPUT)util/symbol.o 390 391 LIB_OBJS += $(OUTPUT)util/symbol-elf.o 391 - LIB_OBJS += $(OUTPUT)util/dso-test-data.o 392 392 LIB_OBJS += $(OUTPUT)util/color.o 393 393 LIB_OBJS += $(OUTPUT)util/pager.o 394 394 LIB_OBJS += $(OUTPUT)util/header.o ··· 428 430 429 431 LIB_OBJS += $(OUTPUT)arch/common.o 430 432 433 + LIB_OBJS += $(OUTPUT)tests/parse-events.o 434 + LIB_OBJS += $(OUTPUT)tests/dso-data.o 435 + LIB_OBJS += $(OUTPUT)tests/attr.o 436 + 431 437 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o 432 438 BUILTIN_OBJS += $(OUTPUT)builtin-bench.o 433 439 # Benchmark modules ··· 461 459 BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o 462 460 BUILTIN_OBJS += $(OUTPUT)builtin-lock.o 463 461 BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o 464 - BUILTIN_OBJS += $(OUTPUT)builtin-test.o 465 462 BUILTIN_OBJS += $(OUTPUT)builtin-inject.o 463 + BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o 466 464 467 465 PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT) 468 466 ··· 492 490 LIBC_SUPPORT := 1 493 491 endif 494 492 ifeq ($(LIBC_SUPPORT),1) 493 + msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev); 494 + 495 495 NO_LIBELF := 1 496 496 NO_DWARF := 1 497 497 NO_DEMANGLE := 1 ··· 501 497 msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); 502 498 endif 503 499 else 504 - FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS) 500 + # for linking with debug library, run like: 501 + # make DEBUG=1 LIBDW_DIR=/opt/libdw/ 502 + ifdef LIBDW_DIR 503 + LIBDW_CFLAGS := -I$(LIBDW_DIR)/include 504 + LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib 505 + endif 506 + 507 + FLAGS_DWARF=$(ALL_CFLAGS) $(LIBDW_CFLAGS) -ldw -lelf $(LIBDW_LDFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) 505 508 ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF),libdw),y) 506 509 msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev); 507 510 NO_DWARF := 1 ··· 563 552 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) 564 553 msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); 565 554 else 566 - BASIC_CFLAGS += -DDWARF_SUPPORT 555 + BASIC_CFLAGS := -DDWARF_SUPPORT $(LIBDW_CFLAGS) $(BASIC_CFLAGS) 556 + BASIC_LDFLAGS := $(LIBDW_LDFLAGS) $(BASIC_LDFLAGS) 567 557 EXTLIBS += -lelf -ldw 568 558 LIB_OBJS += $(OUTPUT)util/probe-finder.o 569 559 LIB_OBJS += $(OUTPUT)util/dwarf-aux.o ··· 903 891 $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS 904 892 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ 905 893 '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ 906 - '-DBINDIR="$(bindir_relative_SQ)"' \ 907 894 '-DPREFIX="$(prefix_SQ)"' \ 895 + $< 896 + 897 + $(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS 898 + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ 899 + '-DBINDIR="$(bindir_SQ)"' \ 908 900 $< 909 901 910 902 $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS ··· 1075 1059 $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' 1076 1060 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d' 1077 1061 $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf' 1062 + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests' 1063 + $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests' 1064 + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 1065 + $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 1078 1066 1079 1067 install-python_ext: 1080 1068 $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
+40 -7
tools/perf/arch/common.c
··· 93 93 return -1; 94 94 } 95 95 96 + /* 97 + * Return architecture name in a normalized form. 98 + * The conversion logic comes from the Makefile. 99 + */ 100 + static const char *normalize_arch(char *arch) 101 + { 102 + if (!strcmp(arch, "x86_64")) 103 + return "x86"; 104 + if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6') 105 + return "x86"; 106 + if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5)) 107 + return "sparc"; 108 + if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) 109 + return "arm"; 110 + if (!strncmp(arch, "s390", 4)) 111 + return "s390"; 112 + if (!strncmp(arch, "parisc", 6)) 113 + return "parisc"; 114 + if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3)) 115 + return "powerpc"; 116 + if (!strncmp(arch, "mips", 4)) 117 + return "mips"; 118 + if (!strncmp(arch, "sh", 2) && isdigit(arch[2])) 119 + return "sh"; 120 + 121 + return arch; 122 + } 123 + 96 124 static int perf_session_env__lookup_binutils_path(struct perf_session_env *env, 97 125 const char *name, 98 126 const char **path) 99 127 { 100 128 int idx; 101 - char *arch, *cross_env; 129 + const char *arch, *cross_env; 102 130 struct utsname uts; 103 131 const char *const *path_list; 104 132 char *buf = NULL; 133 + 134 + arch = normalize_arch(env->arch); 105 135 106 136 if (uname(&uts) < 0) 107 137 goto out; ··· 140 110 * We don't need to try to find objdump path for native system. 141 111 * Just use default binutils path (e.g.: "objdump"). 142 112 */ 143 - if (!strcmp(uts.machine, env->arch)) 113 + if (!strcmp(normalize_arch(uts.machine), arch)) 144 114 goto out; 145 115 146 116 cross_env = getenv("CROSS_COMPILE"); ··· 157 127 free(buf); 158 128 } 159 129 160 - arch = env->arch; 161 - 162 130 if (!strcmp(arch, "arm")) 163 131 path_list = arm_triplets; 164 132 else if (!strcmp(arch, "powerpc")) ··· 167 139 path_list = s390_triplets; 168 140 else if (!strcmp(arch, "sparc")) 169 141 path_list = sparc_triplets; 170 - else if (!strcmp(arch, "x86") || !strcmp(arch, "i386") || 171 - !strcmp(arch, "i486") || !strcmp(arch, "i586") || 172 - !strcmp(arch, "i686")) 142 + else if (!strcmp(arch, "x86")) 173 143 path_list = x86_triplets; 174 144 else if (!strcmp(arch, "mips")) 175 145 path_list = mips_triplets; ··· 199 173 200 174 int perf_session_env__lookup_objdump(struct perf_session_env *env) 201 175 { 176 + /* 177 + * For live mode, env->arch will be NULL and we can use 178 + * the native objdump tool. 179 + */ 180 + if (env->arch == NULL) 181 + return 0; 182 + 202 183 return perf_session_env__lookup_binutils_path(env, "objdump", 203 184 &objdump_path); 204 185 }
+1 -1
tools/perf/builtin-annotate.c
··· 139 139 } 140 140 141 141 if (use_browser > 0) { 142 - key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0); 142 + key = hist_entry__tui_annotate(he, evidx, NULL); 143 143 switch (key) { 144 144 case K_RIGHT: 145 145 next = rb_next(nd);
+10 -38
tools/perf/builtin-diff.c
··· 154 154 155 155 double perf_diff__compute_delta(struct hist_entry *he) 156 156 { 157 - struct hist_entry *pair = he->pair; 157 + struct hist_entry *pair = hist_entry__next_pair(he); 158 158 double new_percent = get_period_percent(he, he->stat.period); 159 159 double old_percent = pair ? get_period_percent(pair, pair->stat.period) : 0.0; 160 160 ··· 165 165 166 166 double perf_diff__compute_ratio(struct hist_entry *he) 167 167 { 168 - struct hist_entry *pair = he->pair; 168 + struct hist_entry *pair = hist_entry__next_pair(he); 169 169 double new_period = he->stat.period; 170 170 double old_period = pair ? pair->stat.period : 0; 171 171 ··· 176 176 177 177 s64 perf_diff__compute_wdiff(struct hist_entry *he) 178 178 { 179 - struct hist_entry *pair = he->pair; 179 + struct hist_entry *pair = hist_entry__next_pair(he); 180 180 u64 new_period = he->stat.period; 181 181 u64 old_period = pair ? pair->stat.period : 0; 182 182 ··· 193 193 194 194 static int formula_delta(struct hist_entry *he, char *buf, size_t size) 195 195 { 196 - struct hist_entry *pair = he->pair; 196 + struct hist_entry *pair = hist_entry__next_pair(he); 197 197 198 198 if (!pair) 199 199 return -1; ··· 207 207 208 208 static int formula_ratio(struct hist_entry *he, char *buf, size_t size) 209 209 { 210 - struct hist_entry *pair = he->pair; 210 + struct hist_entry *pair = hist_entry__next_pair(he); 211 211 double new_period = he->stat.period; 212 212 double old_period = pair ? pair->stat.period : 0; 213 213 ··· 219 219 220 220 static int formula_wdiff(struct hist_entry *he, char *buf, size_t size) 221 221 { 222 - struct hist_entry *pair = he->pair; 222 + struct hist_entry *pair = hist_entry__next_pair(he); 223 223 u64 new_period = he->stat.period; 224 224 u64 old_period = pair ? pair->stat.period : 0; 225 225 ··· 334 334 self->entries = tmp; 335 335 } 336 336 337 - static struct hist_entry *hists__find_entry(struct hists *self, 338 - struct hist_entry *he) 339 - { 340 - struct rb_node *n = self->entries.rb_node; 341 - 342 - while (n) { 343 - struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 344 - int64_t cmp = hist_entry__cmp(he, iter); 345 - 346 - if (cmp < 0) 347 - n = n->rb_left; 348 - else if (cmp > 0) 349 - n = n->rb_right; 350 - else 351 - return iter; 352 - } 353 - 354 - return NULL; 355 - } 356 - 357 - static void hists__match(struct hists *older, struct hists *newer) 358 - { 359 - struct rb_node *nd; 360 - 361 - for (nd = rb_first(&newer->entries); nd; nd = rb_next(nd)) { 362 - struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node); 363 - pos->pair = hists__find_entry(older, pos); 364 - } 365 - } 366 - 367 337 static struct perf_evsel *evsel_match(struct perf_evsel *evsel, 368 338 struct perf_evlist *evlist) 369 339 { ··· 372 402 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 373 403 374 404 next = rb_next(&he->rb_node); 375 - if (!he->pair) { 405 + if (!hist_entry__next_pair(he)) { 376 406 rb_erase(&he->rb_node, &hists->entries); 377 407 hist_entry__free(he); 378 408 } ··· 487 517 488 518 static void hists__process(struct hists *old, struct hists *new) 489 519 { 490 - hists__match(old, new); 520 + hists__match(new, old); 491 521 492 522 if (show_baseline_only) 493 523 hists__baseline_only(new); 524 + else 525 + hists__link(new, old); 494 526 495 527 if (sort_compute) { 496 528 hists__precompute(new);
+3 -8
tools/perf/builtin-report.c
··· 428 428 if (use_browser > 0) { 429 429 if (use_browser == 1) { 430 430 perf_evlist__tui_browse_hists(session->evlist, help, 431 - NULL, NULL, 0); 431 + NULL, 432 + &session->header.env); 432 433 } else if (use_browser == 2) { 433 434 perf_evlist__gtk_browse_hists(session->evlist, help, 434 - NULL, NULL, 0); 435 + NULL); 435 436 } 436 437 } else 437 438 perf_evlist__tty_browse_hists(session->evlist, rep, help); ··· 672 671 673 672 has_br_stack = perf_header__has_feat(&session->header, 674 673 HEADER_BRANCH_STACK); 675 - 676 - if (!objdump_path) { 677 - ret = perf_session_env__lookup_objdump(&session->header.env); 678 - if (ret) 679 - goto error; 680 - } 681 674 682 675 if (sort__branch_mode == -1 && has_br_stack) 683 676 sort__branch_mode = 1;
+21 -30
tools/perf/builtin-test.c tools/perf/tests/builtin-test.c
··· 10 10 #include "util/debug.h" 11 11 #include "util/debugfs.h" 12 12 #include "util/evlist.h" 13 + #include "util/machine.h" 13 14 #include "util/parse-options.h" 14 15 #include "util/parse-events.h" 15 16 #include "util/symbol.h" ··· 319 318 nr_open_calls, evsel->counts->cpu[0].val); 320 319 goto out_close_fd; 321 320 } 322 - 321 + 323 322 err = 0; 324 323 out_close_fd: 325 324 perf_evsel__close_fd(evsel, 1, threads->nr); ··· 605 604 #undef nsyscalls 606 605 } 607 606 608 - static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, 609 - size_t *sizep) 607 + static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) 610 608 { 611 - cpu_set_t *mask; 612 - size_t size; 613 609 int i, cpu = -1, nrcpus = 1024; 614 610 realloc: 615 - mask = CPU_ALLOC(nrcpus); 616 - size = CPU_ALLOC_SIZE(nrcpus); 617 - CPU_ZERO_S(size, mask); 611 + CPU_ZERO(maskp); 618 612 619 - if (sched_getaffinity(pid, size, mask) == -1) { 620 - CPU_FREE(mask); 613 + if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { 621 614 if (errno == EINVAL && nrcpus < (1024 << 8)) { 622 615 nrcpus = nrcpus << 2; 623 616 goto realloc; ··· 621 626 } 622 627 623 628 for (i = 0; i < nrcpus; i++) { 624 - if (CPU_ISSET_S(i, size, mask)) { 625 - if (cpu == -1) { 629 + if (CPU_ISSET(i, maskp)) { 630 + if (cpu == -1) 626 631 cpu = i; 627 - *maskp = mask; 628 - *sizep = size; 629 - } else 630 - CPU_CLR_S(i, size, mask); 632 + else 633 + CPU_CLR(i, maskp); 631 634 } 632 635 } 633 - 634 - if (cpu == -1) 635 - CPU_FREE(mask); 636 636 637 637 return cpu; 638 638 } ··· 643 653 .freq = 10, 644 654 .mmap_pages = 256, 645 655 }; 646 - cpu_set_t *cpu_mask = NULL; 647 - size_t cpu_mask_size = 0; 656 + cpu_set_t cpu_mask; 657 + size_t cpu_mask_size = sizeof(cpu_mask); 648 658 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); 649 659 struct perf_evsel *evsel; 650 660 struct perf_sample sample; ··· 708 718 evsel->attr.sample_type |= PERF_SAMPLE_TIME; 709 719 perf_evlist__config_attrs(evlist, &opts); 710 720 711 - err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, 712 - &cpu_mask_size); 721 + err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); 713 722 if (err < 0) { 714 723 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); 715 724 goto out_delete_evlist; ··· 719 730 /* 720 731 * So that we can check perf_sample.cpu on all the samples. 721 732 */ 722 - if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { 733 + if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { 723 734 pr_debug("sched_setaffinity: %s\n", strerror(errno)); 724 - goto out_free_cpu_mask; 735 + goto out_delete_evlist; 725 736 } 726 737 727 738 /* ··· 905 916 } 906 917 out_err: 907 918 perf_evlist__munmap(evlist); 908 - out_free_cpu_mask: 909 - CPU_FREE(cpu_mask); 910 919 out_delete_evlist: 911 920 perf_evlist__delete(evlist); 912 921 out: ··· 1331 1344 perf_evlist__enable(evlist); 1332 1345 1333 1346 /* 1334 - * Generate the event: 1335 - */ 1347 + * Generate the event: 1348 + */ 1336 1349 open(filename, flags); 1337 1350 1338 1351 while (1) { ··· 1442 1455 .func = test__syscall_open_tp_fields, 1443 1456 }, 1444 1457 { 1458 + .desc = "struct perf_event_attr setup", 1459 + .func = test_attr__run, 1460 + }, 1461 + { 1445 1462 .func = NULL, 1446 1463 }, 1447 1464 }; ··· 1486 1495 width = len; 1487 1496 ++i; 1488 1497 } 1489 - 1498 + 1490 1499 i = 0; 1491 1500 while (tests[i].func) { 1492 1501 int curr = i++, err;
+7 -3
tools/perf/builtin-top.c
··· 582 582 struct perf_evsel *pos; 583 583 struct perf_top *top = arg; 584 584 const char *help = "For a higher level overview, try: perf top --sort comm,dso"; 585 + struct hist_browser_timer hbt = { 586 + .timer = perf_top__sort_new_samples, 587 + .arg = top, 588 + .refresh = top->delay_secs, 589 + }; 585 590 586 591 perf_top__sort_new_samples(top); 587 592 ··· 598 593 list_for_each_entry(pos, &top->evlist->entries, node) 599 594 pos->hists.uid_filter_str = top->target.uid_str; 600 595 601 - perf_evlist__tui_browse_hists(top->evlist, help, 602 - perf_top__sort_new_samples, 603 - top, top->delay_secs); 596 + perf_evlist__tui_browse_hists(top->evlist, help, &hbt, 597 + &top->session->header.env); 604 598 605 599 exit_browser(0); 606 600 exit(0);
+2
tools/perf/perf.c
··· 484 484 } 485 485 cmd = argv[0]; 486 486 487 + test_attr__init(); 488 + 487 489 /* 488 490 * We use PATH to find perf commands, but we prepend some higher 489 491 * precedence paths: the "--exec-path" option, the PERF_EXEC_PATH
+15 -2
tools/perf/perf.h
··· 174 174 (void) (&_min1 == &_min2); \ 175 175 _min1 < _min2 ? _min1 : _min2; }) 176 176 177 + extern bool test_attr__enabled; 178 + void test_attr__init(void); 179 + void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, 180 + int fd, int group_fd, unsigned long flags); 181 + int test_attr__run(void); 182 + 177 183 static inline int 178 184 sys_perf_event_open(struct perf_event_attr *attr, 179 185 pid_t pid, int cpu, int group_fd, 180 186 unsigned long flags) 181 187 { 182 - return syscall(__NR_perf_event_open, attr, pid, cpu, 183 - group_fd, flags); 188 + int fd; 189 + 190 + fd = syscall(__NR_perf_event_open, attr, pid, cpu, 191 + group_fd, flags); 192 + 193 + if (unlikely(test_attr__enabled)) 194 + test_attr__open(attr, pid, cpu, fd, group_fd, flags); 195 + 196 + return fd; 184 197 } 185 198 186 199 #define MAX_COUNTERS 256
+174
tools/perf/tests/attr.c
··· 1 + 2 + /* 3 + * The struct perf_event_attr test support. 4 + * 5 + * This test is embedded inside into perf directly and is governed 6 + * by the PERF_TEST_ATTR environment variable and hook inside 7 + * sys_perf_event_open function. 8 + * 9 + * The general idea is to store 'struct perf_event_attr' details for 10 + * each event created within single perf command. Each event details 11 + * are stored into separate text file. Once perf command is finished 12 + * these files can be checked for values we expect for command. 13 + * 14 + * Besides 'struct perf_event_attr' values we also store 'fd' and 15 + * 'group_fd' values to allow checking for groups created. 16 + * 17 + * This all is triggered by setting PERF_TEST_ATTR environment variable. 18 + * It must contain name of existing directory with access and write 19 + * permissions. All the event text files are stored there. 20 + */ 21 + 22 + #include <stdlib.h> 23 + #include <stdio.h> 24 + #include <inttypes.h> 25 + #include <linux/types.h> 26 + #include <linux/kernel.h> 27 + #include "../perf.h" 28 + #include "util.h" 29 + #include "exec_cmd.h" 30 + 31 + #define ENV "PERF_TEST_ATTR" 32 + 33 + extern int verbose; 34 + 35 + bool test_attr__enabled; 36 + 37 + static char *dir; 38 + 39 + void test_attr__init(void) 40 + { 41 + dir = getenv(ENV); 42 + test_attr__enabled = (dir != NULL); 43 + } 44 + 45 + #define BUFSIZE 1024 46 + 47 + #define __WRITE_ASS(str, fmt, data) \ 48 + do { \ 49 + char buf[BUFSIZE]; \ 50 + size_t size; \ 51 + \ 52 + size = snprintf(buf, BUFSIZE, #str "=%"fmt "\n", data); \ 53 + if (1 != fwrite(buf, size, 1, file)) { \ 54 + perror("test attr - failed to write event file"); \ 55 + fclose(file); \ 56 + return -1; \ 57 + } \ 58 + \ 59 + } while (0) 60 + 61 + #define WRITE_ASS(field, fmt) __WRITE_ASS(field, fmt, attr->field) 62 + 63 + static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu, 64 + int fd, int group_fd, unsigned long flags) 65 + { 66 + FILE *file; 67 + char path[PATH_MAX]; 68 + 69 + snprintf(path, PATH_MAX, "%s/event-%d-%llu-%d", dir, 70 + attr->type, attr->config, fd); 71 + 72 + file = fopen(path, "w+"); 73 + if (!file) { 74 + perror("test attr - failed to open event file"); 75 + return -1; 76 + } 77 + 78 + if (fprintf(file, "[event-%d-%llu-%d]\n", 79 + attr->type, attr->config, fd) < 0) { 80 + perror("test attr - failed to write event file"); 81 + fclose(file); 82 + return -1; 83 + } 84 + 85 + /* syscall arguments */ 86 + __WRITE_ASS(fd, "d", fd); 87 + __WRITE_ASS(group_fd, "d", group_fd); 88 + __WRITE_ASS(cpu, "d", cpu); 89 + __WRITE_ASS(pid, "d", pid); 90 + __WRITE_ASS(flags, "lu", flags); 91 + 92 + /* struct perf_event_attr */ 93 + WRITE_ASS(type, PRIu32); 94 + WRITE_ASS(size, PRIu32); 95 + WRITE_ASS(config, "llu"); 96 + WRITE_ASS(sample_period, "llu"); 97 + WRITE_ASS(sample_type, "llu"); 98 + WRITE_ASS(read_format, "llu"); 99 + WRITE_ASS(disabled, "d"); 100 + WRITE_ASS(inherit, "d"); 101 + WRITE_ASS(pinned, "d"); 102 + WRITE_ASS(exclusive, "d"); 103 + WRITE_ASS(exclude_user, "d"); 104 + WRITE_ASS(exclude_kernel, "d"); 105 + WRITE_ASS(exclude_hv, "d"); 106 + WRITE_ASS(exclude_idle, "d"); 107 + WRITE_ASS(mmap, "d"); 108 + WRITE_ASS(comm, "d"); 109 + WRITE_ASS(freq, "d"); 110 + WRITE_ASS(inherit_stat, "d"); 111 + WRITE_ASS(enable_on_exec, "d"); 112 + WRITE_ASS(task, "d"); 113 + WRITE_ASS(watermark, "d"); 114 + WRITE_ASS(precise_ip, "d"); 115 + WRITE_ASS(mmap_data, "d"); 116 + WRITE_ASS(sample_id_all, "d"); 117 + WRITE_ASS(exclude_host, "d"); 118 + WRITE_ASS(exclude_guest, "d"); 119 + WRITE_ASS(exclude_callchain_kernel, "d"); 120 + WRITE_ASS(exclude_callchain_user, "d"); 121 + WRITE_ASS(wakeup_events, PRIu32); 122 + WRITE_ASS(bp_type, PRIu32); 123 + WRITE_ASS(config1, "llu"); 124 + WRITE_ASS(config2, "llu"); 125 + WRITE_ASS(branch_sample_type, "llu"); 126 + WRITE_ASS(sample_regs_user, "llu"); 127 + WRITE_ASS(sample_stack_user, PRIu32); 128 + 129 + fclose(file); 130 + return 0; 131 + } 132 + 133 + void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, 134 + int fd, int group_fd, unsigned long flags) 135 + { 136 + int errno_saved = errno; 137 + 138 + if (store_event(attr, pid, cpu, fd, group_fd, flags)) 139 + die("test attr FAILED"); 140 + 141 + errno = errno_saved; 142 + } 143 + 144 + static int run_dir(const char *d, const char *perf) 145 + { 146 + char cmd[3*PATH_MAX]; 147 + 148 + snprintf(cmd, 3*PATH_MAX, "python %s/attr.py -d %s/attr/ -p %s %s", 149 + d, d, perf, verbose ? "-v" : ""); 150 + 151 + return system(cmd); 152 + } 153 + 154 + int test_attr__run(void) 155 + { 156 + struct stat st; 157 + char path_perf[PATH_MAX]; 158 + char path_dir[PATH_MAX]; 159 + 160 + /* First try developement tree tests. */ 161 + if (!lstat("./tests", &st)) 162 + return run_dir("./tests", "./perf"); 163 + 164 + /* Then installed path. */ 165 + snprintf(path_dir, PATH_MAX, "%s/tests", perf_exec_path()); 166 + snprintf(path_perf, PATH_MAX, "%s/perf", BINDIR); 167 + 168 + if (!lstat(path_dir, &st) && 169 + !lstat(path_perf, &st)) 170 + return run_dir(path_dir, path_perf); 171 + 172 + fprintf(stderr, " (ommitted)"); 173 + return 0; 174 + }
+322
tools/perf/tests/attr.py
··· 1 + #! /usr/bin/python 2 + 3 + import os 4 + import sys 5 + import glob 6 + import optparse 7 + import tempfile 8 + import logging 9 + import shutil 10 + import ConfigParser 11 + 12 + class Fail(Exception): 13 + def __init__(self, test, msg): 14 + self.msg = msg 15 + self.test = test 16 + def getMsg(self): 17 + return '\'%s\' - %s' % (self.test.path, self.msg) 18 + 19 + class Unsup(Exception): 20 + def __init__(self, test): 21 + self.test = test 22 + def getMsg(self): 23 + return '\'%s\'' % self.test.path 24 + 25 + class Event(dict): 26 + terms = [ 27 + 'flags', 28 + 'type', 29 + 'size', 30 + 'config', 31 + 'sample_period', 32 + 'sample_type', 33 + 'read_format', 34 + 'disabled', 35 + 'inherit', 36 + 'pinned', 37 + 'exclusive', 38 + 'exclude_user', 39 + 'exclude_kernel', 40 + 'exclude_hv', 41 + 'exclude_idle', 42 + 'mmap', 43 + 'comm', 44 + 'freq', 45 + 'inherit_stat', 46 + 'enable_on_exec', 47 + 'task', 48 + 'watermark', 49 + 'precise_ip', 50 + 'mmap_data', 51 + 'sample_id_all', 52 + 'exclude_host', 53 + 'exclude_guest', 54 + 'exclude_callchain_kernel', 55 + 'exclude_callchain_user', 56 + 'wakeup_events', 57 + 'bp_type', 58 + 'config1', 59 + 'config2', 60 + 'branch_sample_type', 61 + 'sample_regs_user', 62 + 'sample_stack_user', 63 + ] 64 + 65 + def add(self, data): 66 + for key, val in data: 67 + log.debug(" %s = %s" % (key, val)) 68 + self[key] = val 69 + 70 + def __init__(self, name, data, base): 71 + log.info(" Event %s" % name); 72 + self.name = name; 73 + self.group = '' 74 + self.add(base) 75 + self.add(data) 76 + 77 + def compare_data(self, a, b): 78 + # Allow multiple values in assignment separated by '|' 79 + a_list = a.split('|') 80 + b_list = b.split('|') 81 + 82 + for a_item in a_list: 83 + for b_item in b_list: 84 + if (a_item == b_item): 85 + return True 86 + elif (a_item == '*') or (b_item == '*'): 87 + return True 88 + 89 + return False 90 + 91 + def equal(self, other): 92 + for t in Event.terms: 93 + log.debug(" [%s] %s %s" % (t, self[t], other[t])); 94 + if not self.has_key(t) or not other.has_key(t): 95 + return False 96 + if not self.compare_data(self[t], other[t]): 97 + return False 98 + return True 99 + 100 + # Test file description needs to have following sections: 101 + # [config] 102 + # - just single instance in file 103 + # - needs to specify: 104 + # 'command' - perf command name 105 + # 'args' - special command arguments 106 + # 'ret' - expected command return value (0 by default) 107 + # 108 + # [eventX:base] 109 + # - one or multiple instances in file 110 + # - expected values assignments 111 + class Test(object): 112 + def __init__(self, path, options): 113 + parser = ConfigParser.SafeConfigParser() 114 + parser.read(path) 115 + 116 + log.warning("running '%s'" % path) 117 + 118 + self.path = path 119 + self.test_dir = options.test_dir 120 + self.perf = options.perf 121 + self.command = parser.get('config', 'command') 122 + self.args = parser.get('config', 'args') 123 + 124 + try: 125 + self.ret = parser.get('config', 'ret') 126 + except: 127 + self.ret = 0 128 + 129 + self.expect = {} 130 + self.result = {} 131 + log.info(" loading expected events"); 132 + self.load_events(path, self.expect) 133 + 134 + def is_event(self, name): 135 + if name.find("event") == -1: 136 + return False 137 + else: 138 + return True 139 + 140 + def load_events(self, path, events): 141 + parser_event = ConfigParser.SafeConfigParser() 142 + parser_event.read(path) 143 + 144 + # The event record section header contains 'event' word, 145 + # optionaly followed by ':' allowing to load 'parent 146 + # event' first as a base 147 + for section in filter(self.is_event, parser_event.sections()): 148 + 149 + parser_items = parser_event.items(section); 150 + base_items = {} 151 + 152 + # Read parent event if there's any 153 + if (':' in section): 154 + base = section[section.index(':') + 1:] 155 + parser_base = ConfigParser.SafeConfigParser() 156 + parser_base.read(self.test_dir + '/' + base) 157 + base_items = parser_base.items('event') 158 + 159 + e = Event(section, parser_items, base_items) 160 + events[section] = e 161 + 162 + def run_cmd(self, tempdir): 163 + cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir, 164 + self.perf, self.command, tempdir, self.args) 165 + ret = os.WEXITSTATUS(os.system(cmd)) 166 + 167 + log.info(" running '%s' ret %d " % (cmd, ret)) 168 + 169 + if ret != int(self.ret): 170 + raise Unsup(self) 171 + 172 + def compare(self, expect, result): 173 + match = {} 174 + 175 + log.info(" compare"); 176 + 177 + # For each expected event find all matching 178 + # events in result. Fail if there's not any. 179 + for exp_name, exp_event in expect.items(): 180 + exp_list = [] 181 + log.debug(" matching [%s]" % exp_name) 182 + for res_name, res_event in result.items(): 183 + log.debug(" to [%s]" % res_name) 184 + if (exp_event.equal(res_event)): 185 + exp_list.append(res_name) 186 + log.debug(" ->OK") 187 + else: 188 + log.debug(" ->FAIL"); 189 + 190 + log.info(" match: [%s] matches %s" % (exp_name, str(exp_list))) 191 + 192 + # we did not any matching event - fail 193 + if (not exp_list): 194 + raise Fail(self, 'match failure'); 195 + 196 + match[exp_name] = exp_list 197 + 198 + # For each defined group in the expected events 199 + # check we match the same group in the result. 200 + for exp_name, exp_event in expect.items(): 201 + group = exp_event.group 202 + 203 + if (group == ''): 204 + continue 205 + 206 + for res_name in match[exp_name]: 207 + res_group = result[res_name].group 208 + if res_group not in match[group]: 209 + raise Fail(self, 'group failure') 210 + 211 + log.info(" group: [%s] matches group leader %s" % 212 + (exp_name, str(match[group]))) 213 + 214 + log.info(" matched") 215 + 216 + def resolve_groups(self, events): 217 + for name, event in events.items(): 218 + group_fd = event['group_fd']; 219 + if group_fd == '-1': 220 + continue; 221 + 222 + for iname, ievent in events.items(): 223 + if (ievent['fd'] == group_fd): 224 + event.group = iname 225 + log.debug('[%s] has group leader [%s]' % (name, iname)) 226 + break; 227 + 228 + def run(self): 229 + tempdir = tempfile.mkdtemp(); 230 + 231 + try: 232 + # run the test script 233 + self.run_cmd(tempdir); 234 + 235 + # load events expectation for the test 236 + log.info(" loading result events"); 237 + for f in glob.glob(tempdir + '/event*'): 238 + self.load_events(f, self.result); 239 + 240 + # resolve group_fd to event names 241 + self.resolve_groups(self.expect); 242 + self.resolve_groups(self.result); 243 + 244 + # do the expectation - results matching - both ways 245 + self.compare(self.expect, self.result) 246 + self.compare(self.result, self.expect) 247 + 248 + finally: 249 + # cleanup 250 + shutil.rmtree(tempdir) 251 + 252 + 253 + def run_tests(options): 254 + for f in glob.glob(options.test_dir + '/' + options.test): 255 + try: 256 + Test(f, options).run() 257 + except Unsup, obj: 258 + log.warning("unsupp %s" % obj.getMsg()) 259 + 260 + def setup_log(verbose): 261 + global log 262 + level = logging.CRITICAL 263 + 264 + if verbose == 1: 265 + level = logging.WARNING 266 + if verbose == 2: 267 + level = logging.INFO 268 + if verbose >= 3: 269 + level = logging.DEBUG 270 + 271 + log = logging.getLogger('test') 272 + log.setLevel(level) 273 + ch = logging.StreamHandler() 274 + ch.setLevel(level) 275 + formatter = logging.Formatter('%(message)s') 276 + ch.setFormatter(formatter) 277 + log.addHandler(ch) 278 + 279 + USAGE = '''%s [OPTIONS] 280 + -d dir # tests dir 281 + -p path # perf binary 282 + -t test # single test 283 + -v # verbose level 284 + ''' % sys.argv[0] 285 + 286 + def main(): 287 + parser = optparse.OptionParser(usage=USAGE) 288 + 289 + parser.add_option("-t", "--test", 290 + action="store", type="string", dest="test") 291 + parser.add_option("-d", "--test-dir", 292 + action="store", type="string", dest="test_dir") 293 + parser.add_option("-p", "--perf", 294 + action="store", type="string", dest="perf") 295 + parser.add_option("-v", "--verbose", 296 + action="count", dest="verbose") 297 + 298 + options, args = parser.parse_args() 299 + if args: 300 + parser.error('FAILED wrong arguments %s' % ' '.join(args)) 301 + return -1 302 + 303 + setup_log(options.verbose) 304 + 305 + if not options.test_dir: 306 + print 'FAILED no -d option specified' 307 + sys.exit(-1) 308 + 309 + if not options.test: 310 + options.test = 'test*' 311 + 312 + try: 313 + run_tests(options) 314 + 315 + except Fail, obj: 316 + print "FAILED %s" % obj.getMsg(); 317 + sys.exit(-1) 318 + 319 + sys.exit(0) 320 + 321 + if __name__ == '__main__': 322 + main()
+64
tools/perf/tests/attr/README
··· 1 + The struct perf_event_attr test (attr tests) support 2 + ==================================================== 3 + This testing support is embedded into perf directly and is governed 4 + by the PERF_TEST_ATTR environment variable and hook inside the 5 + sys_perf_event_open function. 6 + 7 + The general idea is to store 'struct perf_event_attr' details for 8 + each event created within single perf command. Each event details 9 + are stored into separate text file. Once perf command is finished 10 + these files are checked for values we expect for command. 11 + 12 + The attr tests consist of following parts: 13 + 14 + tests/attr.c 15 + ------------ 16 + This is the sys_perf_event_open hook implementation. The hook 17 + is triggered when the PERF_TEST_ATTR environment variable is 18 + defined. It must contain name of existing directory with access 19 + and write permissions. 20 + 21 + For each sys_perf_event_open call event details are stored in 22 + separate file. Besides 'struct perf_event_attr' values we also 23 + store 'fd' and 'group_fd' values to allow checking for groups. 24 + 25 + tests/attr.py 26 + ------------- 27 + This is the python script that does all the hard work. It reads 28 + the test definition, executes it and checks results. 29 + 30 + tests/attr/ 31 + ----------- 32 + Directory containing all attr test definitions. 33 + Following tests are defined (with perf commands): 34 + 35 + perf record kill (test-record-basic) 36 + perf record -b kill (test-record-branch-any) 37 + perf record -j any kill (test-record-branch-filter-any) 38 + perf record -j any_call kill (test-record-branch-filter-any_call) 39 + perf record -j any_ret kill (test-record-branch-filter-any_ret) 40 + perf record -j hv kill (test-record-branch-filter-hv) 41 + perf record -j ind_call kill (test-record-branch-filter-ind_call) 42 + perf record -j k kill (test-record-branch-filter-k) 43 + perf record -j u kill (test-record-branch-filter-u) 44 + perf record -c 123 kill (test-record-count) 45 + perf record -d kill (test-record-data) 46 + perf record -F 100 kill (test-record-freq) 47 + perf record -g -- kill (test-record-graph-default) 48 + perf record -g dwarf -- kill (test-record-graph-dwarf) 49 + perf record -g fp kill (test-record-graph-fp) 50 + perf record --group -e cycles,instructions kill (test-record-group) 51 + perf record -e '{cycles,instructions}' kill (test-record-group1) 52 + perf record -D kill (test-record-no-delay) 53 + perf record -i kill (test-record-no-inherit) 54 + perf record -n kill (test-record-no-samples) 55 + perf record -c 100 -P kill (test-record-period) 56 + perf record -R kill (test-record-raw) 57 + perf stat -e cycles kill (test-stat-basic) 58 + perf stat kill (test-stat-default) 59 + perf stat -d kill (test-stat-detailed-1) 60 + perf stat -dd kill (test-stat-detailed-2) 61 + perf stat -ddd kill (test-stat-detailed-3) 62 + perf stat --group -e cycles,instructions kill (test-stat-group) 63 + perf stat -e '{cycles,instructions}' kill (test-stat-group1) 64 + perf stat -i -e cycles kill (test-stat-no-inherit)
+39
tools/perf/tests/attr/base-record
··· 1 + [event] 2 + fd=1 3 + group_fd=-1 4 + flags=0 5 + type=0|1 6 + size=96 7 + config=0 8 + sample_period=4000 9 + sample_type=263 10 + read_format=7 11 + disabled=1 12 + inherit=1 13 + pinned=0 14 + exclusive=0 15 + exclude_user=0 16 + exclude_kernel=0 17 + exclude_hv=0 18 + exclude_idle=0 19 + mmap=1 20 + comm=1 21 + freq=1 22 + inherit_stat=0 23 + enable_on_exec=1 24 + task=0 25 + watermark=0 26 + precise_ip=0 27 + mmap_data=0 28 + sample_id_all=1 29 + exclude_host=0 30 + exclude_guest=1 31 + exclude_callchain_kernel=0 32 + exclude_callchain_user=0 33 + wakeup_events=0 34 + bp_type=0 35 + config1=0 36 + config2=0 37 + branch_sample_type=0 38 + sample_regs_user=0 39 + sample_stack_user=0
+39
tools/perf/tests/attr/base-stat
··· 1 + [event] 2 + fd=1 3 + group_fd=-1 4 + flags=0 5 + type=0 6 + size=96 7 + config=0 8 + sample_period=0 9 + sample_type=0 10 + read_format=3 11 + disabled=1 12 + inherit=1 13 + pinned=0 14 + exclusive=0 15 + exclude_user=0 16 + exclude_kernel=0 17 + exclude_hv=0 18 + exclude_idle=0 19 + mmap=0 20 + comm=0 21 + freq=0 22 + inherit_stat=0 23 + enable_on_exec=1 24 + task=0 25 + watermark=0 26 + precise_ip=0 27 + mmap_data=0 28 + sample_id_all=0 29 + exclude_host=0 30 + exclude_guest=1 31 + exclude_callchain_kernel=0 32 + exclude_callchain_user=0 33 + wakeup_events=0 34 + bp_type=0 35 + config1=0 36 + config2=0 37 + branch_sample_type=0 38 + sample_regs_user=0 39 + sample_stack_user=0
+5
tools/perf/tests/attr/test-record-basic
··· 1 + [config] 2 + command = record 3 + args = kill >/dev/null 2>&1 4 + 5 + [event:base-record]
+8
tools/perf/tests/attr/test-record-branch-any
··· 1 + [config] 2 + command = record 3 + args = -b kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=2311 8 + branch_sample_type=8
+8
tools/perf/tests/attr/test-record-branch-filter-any
··· 1 + [config] 2 + command = record 3 + args = -j any kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=2311 8 + branch_sample_type=8
+8
tools/perf/tests/attr/test-record-branch-filter-any_call
··· 1 + [config] 2 + command = record 3 + args = -j any_call kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=2311 8 + branch_sample_type=16
+8
tools/perf/tests/attr/test-record-branch-filter-any_ret
··· 1 + [config] 2 + command = record 3 + args = -j any_ret kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=2311 8 + branch_sample_type=32
+8
tools/perf/tests/attr/test-record-branch-filter-hv
··· 1 + [config] 2 + command = record 3 + args = -j hv kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=2311 8 + branch_sample_type=8
+8
tools/perf/tests/attr/test-record-branch-filter-ind_call
··· 1 + [config] 2 + command = record 3 + args = -j ind_call kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=2311 8 + branch_sample_type=64
+8
tools/perf/tests/attr/test-record-branch-filter-k
··· 1 + [config] 2 + command = record 3 + args = -j k kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=2311 8 + branch_sample_type=8
+8
tools/perf/tests/attr/test-record-branch-filter-u
··· 1 + [config] 2 + command = record 3 + args = -j u kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=2311 8 + branch_sample_type=8
+8
tools/perf/tests/attr/test-record-count
··· 1 + [config] 2 + command = record 3 + args = -c 123 kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=123 7 + sample_type=7 8 + freq=0
+8
tools/perf/tests/attr/test-record-data
··· 1 + [config] 2 + command = record 3 + args = -d kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=271 8 + mmap_data=1
+6
tools/perf/tests/attr/test-record-freq
··· 1 + [config] 2 + command = record 3 + args = -F 100 kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=100
+6
tools/perf/tests/attr/test-record-graph-default
··· 1 + [config] 2 + command = record 3 + args = -g -- kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_type=295
+10
tools/perf/tests/attr/test-record-graph-dwarf
··· 1 + [config] 2 + command = record 3 + args = -g dwarf -- kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_type=12583 7 + exclude_callchain_user=1 8 + sample_stack_user=8192 9 + # TODO different for each arch, no support for that now 10 + sample_regs_user=*
+6
tools/perf/tests/attr/test-record-graph-fp
··· 1 + [config] 2 + command = record 3 + args = -g fp kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_type=295
+17
tools/perf/tests/attr/test-record-group
··· 1 + [config] 2 + command = record 3 + args = --group -e cycles,instructions kill >/dev/null 2>&1 4 + 5 + [event-1:base-record] 6 + fd=1 7 + group_fd=-1 8 + sample_type=327 9 + 10 + [event-2:base-record] 11 + fd=2 12 + group_fd=1 13 + config=1 14 + sample_type=327 15 + mmap=0 16 + comm=0 17 + enable_on_exec=0
+20
tools/perf/tests/attr/test-record-group1
··· 1 + [config] 2 + command = record 3 + args = -e '{cycles,instructions}' kill >/tmp/krava 2>&1 4 + 5 + [event-1:base-record] 6 + fd=1 7 + group_fd=-1 8 + sample_type=327 9 + 10 + [event-2:base-record] 11 + fd=2 12 + group_fd=1 13 + type=0 14 + config=1 15 + sample_type=327 16 + mmap=0 17 + comm=0 18 + # TODO this is disabled for --group option, enabled otherwise 19 + # check why.. 20 + enable_on_exec=1
+9
tools/perf/tests/attr/test-record-no-delay
··· 1 + [config] 2 + command = record 3 + args = -D kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=263 8 + watermark=0 9 + wakeup_events=1
+7
tools/perf/tests/attr/test-record-no-inherit
··· 1 + [config] 2 + command = record 3 + args = -i kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_type=259 7 + inherit=0
+6
tools/perf/tests/attr/test-record-no-samples
··· 1 + [config] 2 + command = record 3 + args = -n kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=0
+7
tools/perf/tests/attr/test-record-period
··· 1 + [config] 2 + command = record 3 + args = -c 100 -P kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=100 7 + freq=0
+7
tools/perf/tests/attr/test-record-raw
··· 1 + [config] 2 + command = record 3 + args = -R kill >/dev/null 2>&1 4 + 5 + [event:base-record] 6 + sample_period=4000 7 + sample_type=1415
+6
tools/perf/tests/attr/test-stat-basic
··· 1 + [config] 2 + command = stat 3 + args = -e cycles kill >/dev/null 2>&1 4 + ret = 1 5 + 6 + [event:base-stat]
+64
tools/perf/tests/attr/test-stat-default
··· 1 + [config] 2 + command = stat 3 + args = kill >/dev/null 2>&1 4 + ret = 1 5 + 6 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK 7 + [event1:base-stat] 8 + fd=1 9 + type=1 10 + config=1 11 + 12 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES 13 + [event2:base-stat] 14 + fd=2 15 + type=1 16 + config=3 17 + 18 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS 19 + [event3:base-stat] 20 + fd=3 21 + type=1 22 + config=4 23 + 24 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS 25 + [event4:base-stat] 26 + fd=4 27 + type=1 28 + config=2 29 + 30 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES 31 + [event5:base-stat] 32 + fd=5 33 + type=0 34 + config=0 35 + 36 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 37 + [event6:base-stat] 38 + fd=6 39 + type=0 40 + config=7 41 + 42 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND 43 + [event7:base-stat] 44 + fd=7 45 + type=0 46 + config=8 47 + 48 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS 49 + [event8:base-stat] 50 + fd=8 51 + type=0 52 + config=1 53 + 54 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS 55 + [event9:base-stat] 56 + fd=9 57 + type=0 58 + config=4 59 + 60 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES 61 + [event10:base-stat] 62 + fd=10 63 + type=0 64 + config=5
+101
tools/perf/tests/attr/test-stat-detailed-1
··· 1 + [config] 2 + command = stat 3 + args = -d kill >/dev/null 2>&1 4 + ret = 1 5 + 6 + 7 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK 8 + [event1:base-stat] 9 + fd=1 10 + type=1 11 + config=1 12 + 13 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES 14 + [event2:base-stat] 15 + fd=2 16 + type=1 17 + config=3 18 + 19 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS 20 + [event3:base-stat] 21 + fd=3 22 + type=1 23 + config=4 24 + 25 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS 26 + [event4:base-stat] 27 + fd=4 28 + type=1 29 + config=2 30 + 31 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES 32 + [event5:base-stat] 33 + fd=5 34 + type=0 35 + config=0 36 + 37 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 38 + [event6:base-stat] 39 + fd=6 40 + type=0 41 + config=7 42 + 43 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND 44 + [event7:base-stat] 45 + fd=7 46 + type=0 47 + config=8 48 + 49 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS 50 + [event8:base-stat] 51 + fd=8 52 + type=0 53 + config=1 54 + 55 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS 56 + [event9:base-stat] 57 + fd=9 58 + type=0 59 + config=4 60 + 61 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES 62 + [event10:base-stat] 63 + fd=10 64 + type=0 65 + config=5 66 + 67 + # PERF_TYPE_HW_CACHE / 68 + # PERF_COUNT_HW_CACHE_L1D << 0 | 69 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 70 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 71 + [event11:base-stat] 72 + fd=11 73 + type=3 74 + config=0 75 + 76 + # PERF_TYPE_HW_CACHE / 77 + # PERF_COUNT_HW_CACHE_L1D << 0 | 78 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 79 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 80 + [event12:base-stat] 81 + fd=12 82 + type=3 83 + config=65536 84 + 85 + # PERF_TYPE_HW_CACHE / 86 + # PERF_COUNT_HW_CACHE_LL << 0 | 87 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 88 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 89 + [event13:base-stat] 90 + fd=13 91 + type=3 92 + config=2 93 + 94 + # PERF_TYPE_HW_CACHE, 95 + # PERF_COUNT_HW_CACHE_LL << 0 | 96 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 97 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 98 + [event14:base-stat] 99 + fd=14 100 + type=3 101 + config=65538
+155
tools/perf/tests/attr/test-stat-detailed-2
··· 1 + [config] 2 + command = stat 3 + args = -dd kill >/dev/null 2>&1 4 + ret = 1 5 + 6 + 7 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK 8 + [event1:base-stat] 9 + fd=1 10 + type=1 11 + config=1 12 + 13 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES 14 + [event2:base-stat] 15 + fd=2 16 + type=1 17 + config=3 18 + 19 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS 20 + [event3:base-stat] 21 + fd=3 22 + type=1 23 + config=4 24 + 25 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS 26 + [event4:base-stat] 27 + fd=4 28 + type=1 29 + config=2 30 + 31 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES 32 + [event5:base-stat] 33 + fd=5 34 + type=0 35 + config=0 36 + 37 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 38 + [event6:base-stat] 39 + fd=6 40 + type=0 41 + config=7 42 + 43 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND 44 + [event7:base-stat] 45 + fd=7 46 + type=0 47 + config=8 48 + 49 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS 50 + [event8:base-stat] 51 + fd=8 52 + type=0 53 + config=1 54 + 55 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS 56 + [event9:base-stat] 57 + fd=9 58 + type=0 59 + config=4 60 + 61 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES 62 + [event10:base-stat] 63 + fd=10 64 + type=0 65 + config=5 66 + 67 + # PERF_TYPE_HW_CACHE / 68 + # PERF_COUNT_HW_CACHE_L1D << 0 | 69 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 70 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 71 + [event11:base-stat] 72 + fd=11 73 + type=3 74 + config=0 75 + 76 + # PERF_TYPE_HW_CACHE / 77 + # PERF_COUNT_HW_CACHE_L1D << 0 | 78 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 79 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 80 + [event12:base-stat] 81 + fd=12 82 + type=3 83 + config=65536 84 + 85 + # PERF_TYPE_HW_CACHE / 86 + # PERF_COUNT_HW_CACHE_LL << 0 | 87 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 88 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 89 + [event13:base-stat] 90 + fd=13 91 + type=3 92 + config=2 93 + 94 + # PERF_TYPE_HW_CACHE, 95 + # PERF_COUNT_HW_CACHE_LL << 0 | 96 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 97 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 98 + [event14:base-stat] 99 + fd=14 100 + type=3 101 + config=65538 102 + 103 + # PERF_TYPE_HW_CACHE, 104 + # PERF_COUNT_HW_CACHE_L1I << 0 | 105 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 106 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 107 + [event15:base-stat] 108 + fd=15 109 + type=3 110 + config=1 111 + 112 + # PERF_TYPE_HW_CACHE, 113 + # PERF_COUNT_HW_CACHE_L1I << 0 | 114 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 115 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 116 + [event16:base-stat] 117 + fd=16 118 + type=3 119 + config=65537 120 + 121 + # PERF_TYPE_HW_CACHE, 122 + # PERF_COUNT_HW_CACHE_DTLB << 0 | 123 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 124 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 125 + [event17:base-stat] 126 + fd=17 127 + type=3 128 + config=3 129 + 130 + # PERF_TYPE_HW_CACHE, 131 + # PERF_COUNT_HW_CACHE_DTLB << 0 | 132 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 133 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 134 + [event18:base-stat] 135 + fd=18 136 + type=3 137 + config=65539 138 + 139 + # PERF_TYPE_HW_CACHE, 140 + # PERF_COUNT_HW_CACHE_ITLB << 0 | 141 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 142 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 143 + [event19:base-stat] 144 + fd=19 145 + type=3 146 + config=4 147 + 148 + # PERF_TYPE_HW_CACHE, 149 + # PERF_COUNT_HW_CACHE_ITLB << 0 | 150 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 151 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 152 + [event20:base-stat] 153 + fd=20 154 + type=3 155 + config=65540
+173
tools/perf/tests/attr/test-stat-detailed-3
··· 1 + [config] 2 + command = stat 3 + args = -ddd kill >/dev/null 2>&1 4 + ret = 1 5 + 6 + 7 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK 8 + [event1:base-stat] 9 + fd=1 10 + type=1 11 + config=1 12 + 13 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES 14 + [event2:base-stat] 15 + fd=2 16 + type=1 17 + config=3 18 + 19 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS 20 + [event3:base-stat] 21 + fd=3 22 + type=1 23 + config=4 24 + 25 + # PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS 26 + [event4:base-stat] 27 + fd=4 28 + type=1 29 + config=2 30 + 31 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES 32 + [event5:base-stat] 33 + fd=5 34 + type=0 35 + config=0 36 + 37 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 38 + [event6:base-stat] 39 + fd=6 40 + type=0 41 + config=7 42 + 43 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND 44 + [event7:base-stat] 45 + fd=7 46 + type=0 47 + config=8 48 + 49 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS 50 + [event8:base-stat] 51 + fd=8 52 + type=0 53 + config=1 54 + 55 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS 56 + [event9:base-stat] 57 + fd=9 58 + type=0 59 + config=4 60 + 61 + # PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES 62 + [event10:base-stat] 63 + fd=10 64 + type=0 65 + config=5 66 + 67 + # PERF_TYPE_HW_CACHE / 68 + # PERF_COUNT_HW_CACHE_L1D << 0 | 69 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 70 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 71 + [event11:base-stat] 72 + fd=11 73 + type=3 74 + config=0 75 + 76 + # PERF_TYPE_HW_CACHE / 77 + # PERF_COUNT_HW_CACHE_L1D << 0 | 78 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 79 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 80 + [event12:base-stat] 81 + fd=12 82 + type=3 83 + config=65536 84 + 85 + # PERF_TYPE_HW_CACHE / 86 + # PERF_COUNT_HW_CACHE_LL << 0 | 87 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 88 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 89 + [event13:base-stat] 90 + fd=13 91 + type=3 92 + config=2 93 + 94 + # PERF_TYPE_HW_CACHE, 95 + # PERF_COUNT_HW_CACHE_LL << 0 | 96 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 97 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 98 + [event14:base-stat] 99 + fd=14 100 + type=3 101 + config=65538 102 + 103 + # PERF_TYPE_HW_CACHE, 104 + # PERF_COUNT_HW_CACHE_L1I << 0 | 105 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 106 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 107 + [event15:base-stat] 108 + fd=15 109 + type=3 110 + config=1 111 + 112 + # PERF_TYPE_HW_CACHE, 113 + # PERF_COUNT_HW_CACHE_L1I << 0 | 114 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 115 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 116 + [event16:base-stat] 117 + fd=16 118 + type=3 119 + config=65537 120 + 121 + # PERF_TYPE_HW_CACHE, 122 + # PERF_COUNT_HW_CACHE_DTLB << 0 | 123 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 124 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 125 + [event17:base-stat] 126 + fd=17 127 + type=3 128 + config=3 129 + 130 + # PERF_TYPE_HW_CACHE, 131 + # PERF_COUNT_HW_CACHE_DTLB << 0 | 132 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 133 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 134 + [event18:base-stat] 135 + fd=18 136 + type=3 137 + config=65539 138 + 139 + # PERF_TYPE_HW_CACHE, 140 + # PERF_COUNT_HW_CACHE_ITLB << 0 | 141 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 142 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 143 + [event19:base-stat] 144 + fd=19 145 + type=3 146 + config=4 147 + 148 + # PERF_TYPE_HW_CACHE, 149 + # PERF_COUNT_HW_CACHE_ITLB << 0 | 150 + # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 151 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 152 + [event20:base-stat] 153 + fd=20 154 + type=3 155 + config=65540 156 + 157 + # PERF_TYPE_HW_CACHE, 158 + # PERF_COUNT_HW_CACHE_L1D << 0 | 159 + # (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 160 + # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 161 + [event21:base-stat] 162 + fd=21 163 + type=3 164 + config=512 165 + 166 + # PERF_TYPE_HW_CACHE, 167 + # PERF_COUNT_HW_CACHE_L1D << 0 | 168 + # (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 169 + # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 170 + [event22:base-stat] 171 + fd=22 172 + type=3 173 + config=66048
+15
tools/perf/tests/attr/test-stat-group
··· 1 + [config] 2 + command = stat 3 + args = --group -e cycles,instructions kill >/dev/null 2>&1 4 + ret = 1 5 + 6 + [event-1:base-stat] 7 + fd=1 8 + group_fd=-1 9 + 10 + [event-2:base-stat] 11 + fd=2 12 + group_fd=1 13 + config=1 14 + disabled=0 15 + enable_on_exec=0
+17
tools/perf/tests/attr/test-stat-group1
··· 1 + [config] 2 + command = stat 3 + args = -e '{cycles,instructions}' kill >/dev/null 2>&1 4 + ret = 1 5 + 6 + [event-1:base-stat] 7 + fd=1 8 + group_fd=-1 9 + 10 + [event-2:base-stat] 11 + fd=2 12 + group_fd=1 13 + config=1 14 + # TODO both disabled and enable_on_exec are disabled for --group option, 15 + # enabled otherwise, check why.. 16 + disabled=1 17 + enable_on_exec=1
+7
tools/perf/tests/attr/test-stat-no-inherit
··· 1 + [config] 2 + command = stat 3 + args = -i -e cycles kill >/dev/null 2>&1 4 + ret = 1 5 + 6 + [event:base-stat] 7 + inherit=0
+24 -15
tools/perf/ui/browsers/annotate.c
··· 188 188 struct disasm_line *cursor = ab->selection, *target; 189 189 struct browser_disasm_line *btarget, *bcursor; 190 190 unsigned int from, to; 191 + struct map_symbol *ms = ab->b.priv; 192 + struct symbol *sym = ms->sym; 193 + 194 + /* PLT symbols contain external offsets */ 195 + if (strstr(sym->name, "@plt")) 196 + return; 191 197 192 198 if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) || 193 199 !disasm_line__has_offset(cursor)) ··· 392 386 browser->b.nr_entries = browser->nr_asm_entries; 393 387 } 394 388 395 - static bool annotate_browser__callq(struct annotate_browser *browser, 396 - int evidx, void (*timer)(void *arg), 397 - void *arg, int delay_secs) 389 + static bool annotate_browser__callq(struct annotate_browser *browser, int evidx, 390 + struct hist_browser_timer *hbt) 398 391 { 399 392 struct map_symbol *ms = browser->b.priv; 400 393 struct disasm_line *dl = browser->selection; ··· 423 418 } 424 419 425 420 pthread_mutex_unlock(&notes->lock); 426 - symbol__tui_annotate(target, ms->map, evidx, timer, arg, delay_secs); 421 + symbol__tui_annotate(target, ms->map, evidx, hbt); 427 422 ui_browser__show_title(&browser->b, sym->name); 428 423 return true; 429 424 } ··· 607 602 } 608 603 609 604 static int annotate_browser__run(struct annotate_browser *browser, int evidx, 610 - void(*timer)(void *arg), 611 - void *arg, int delay_secs) 605 + struct hist_browser_timer *hbt) 612 606 { 613 607 struct rb_node *nd = NULL; 614 608 struct map_symbol *ms = browser->b.priv; 615 609 struct symbol *sym = ms->sym; 616 610 const char *help = "Press 'h' for help on key bindings"; 611 + int delay_secs = hbt ? hbt->refresh : 0; 617 612 int key; 618 613 619 614 if (ui_browser__show(&browser->b, sym->name, help) < 0) ··· 644 639 645 640 switch (key) { 646 641 case K_TIMER: 647 - if (timer != NULL) 648 - timer(arg); 642 + if (hbt) 643 + hbt->timer(hbt->arg); 649 644 650 645 if (delay_secs != 0) 651 646 symbol__annotate_decay_histogram(sym, evidx); ··· 745 740 goto show_sup_ins; 746 741 goto out; 747 742 } else if (!(annotate_browser__jump(browser) || 748 - annotate_browser__callq(browser, evidx, timer, arg, delay_secs))) { 743 + annotate_browser__callq(browser, evidx, hbt))) { 749 744 show_sup_ins: 750 745 ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions."); 751 746 } ··· 768 763 } 769 764 770 765 int hist_entry__tui_annotate(struct hist_entry *he, int evidx, 771 - void(*timer)(void *arg), void *arg, int delay_secs) 766 + struct hist_browser_timer *hbt) 772 767 { 773 - return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, 774 - timer, arg, delay_secs); 768 + return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, hbt); 775 769 } 776 770 777 771 static void annotate_browser__mark_jump_targets(struct annotate_browser *browser, 778 772 size_t size) 779 773 { 780 774 u64 offset; 775 + struct map_symbol *ms = browser->b.priv; 776 + struct symbol *sym = ms->sym; 777 + 778 + /* PLT symbols contain external offsets */ 779 + if (strstr(sym->name, "@plt")) 780 + return; 781 781 782 782 for (offset = 0; offset < size; ++offset) { 783 783 struct disasm_line *dl = browser->offsets[offset], *dlt; ··· 826 816 } 827 817 828 818 int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, 829 - void(*timer)(void *arg), void *arg, 830 - int delay_secs) 819 + struct hist_browser_timer *hbt) 831 820 { 832 821 struct disasm_line *pos, *n; 833 822 struct annotation *notes; ··· 908 899 909 900 annotate_browser__update_addr_width(&browser); 910 901 911 - ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs); 902 + ret = annotate_browser__run(&browser, evidx, hbt); 912 903 list_for_each_entry_safe(pos, n, &notes->src->source, node) { 913 904 list_del(&pos->node); 914 905 disasm_line__free(pos);
+38 -25
tools/perf/ui/browsers/hists.c
··· 11 11 #include "../../util/pstack.h" 12 12 #include "../../util/sort.h" 13 13 #include "../../util/util.h" 14 + #include "../../arch/common.h" 14 15 15 16 #include "../browser.h" 16 17 #include "../helpline.h" ··· 311 310 } 312 311 313 312 static int hist_browser__run(struct hist_browser *browser, const char *ev_name, 314 - void(*timer)(void *arg), void *arg, int delay_secs) 313 + struct hist_browser_timer *hbt) 315 314 { 316 315 int key; 317 316 char title[160]; 317 + int delay_secs = hbt ? hbt->refresh : 0; 318 318 319 319 browser->b.entries = &browser->hists->entries; 320 320 browser->b.nr_entries = browser->hists->nr_entries; ··· 332 330 333 331 switch (key) { 334 332 case K_TIMER: 335 - timer(arg); 333 + hbt->timer(hbt->arg); 336 334 ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); 337 335 338 336 if (browser->hists->stats.nr_lost_warned != ··· 1129 1127 } 1130 1128 } 1131 1129 1130 + /* Check whether the browser is for 'top' or 'report' */ 1131 + static inline bool is_report_browser(void *timer) 1132 + { 1133 + return timer == NULL; 1134 + } 1135 + 1132 1136 static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, 1133 1137 const char *helpline, const char *ev_name, 1134 1138 bool left_exits, 1135 - void(*timer)(void *arg), void *arg, 1136 - int delay_secs) 1139 + struct hist_browser_timer *hbt, 1140 + struct perf_session_env *env) 1137 1141 { 1138 1142 struct hists *hists = &evsel->hists; 1139 1143 struct hist_browser *browser = hist_browser__new(hists); ··· 1150 1142 int key = -1; 1151 1143 char buf[64]; 1152 1144 char script_opt[64]; 1145 + int delay_secs = hbt ? hbt->refresh : 0; 1153 1146 1154 1147 if (browser == NULL) 1155 1148 return -1; ··· 1173 1164 1174 1165 nr_options = 0; 1175 1166 1176 - key = hist_browser__run(browser, ev_name, timer, arg, delay_secs); 1167 + key = hist_browser__run(browser, ev_name, hbt); 1177 1168 1178 1169 if (browser->he_selection != NULL) { 1179 1170 thread = hist_browser__selected_thread(browser); ··· 1223 1214 } 1224 1215 continue; 1225 1216 case 'r': 1226 - goto do_scripts; 1217 + if (is_report_browser(hbt)) 1218 + goto do_scripts; 1219 + continue; 1227 1220 case K_F1: 1228 1221 case 'h': 1229 1222 case '?': ··· 1244 1233 "E Expand all callchains\n" 1245 1234 "d Zoom into current DSO\n" 1246 1235 "t Zoom into current Thread\n" 1247 - "r Run available scripts\n" 1236 + "r Run available scripts('perf report' only)\n" 1248 1237 "P Print histograms to perf.hist.N\n" 1249 1238 "V Verbose (DSO names in callchains, etc)\n" 1250 1239 "/ Filter symbol by name"); ··· 1369 1358 struct hist_entry *he; 1370 1359 int err; 1371 1360 do_annotate: 1361 + if (!objdump_path && perf_session_env__lookup_objdump(env)) 1362 + continue; 1363 + 1372 1364 he = hist_browser__selected_entry(browser); 1373 1365 if (he == NULL) 1374 1366 continue; ··· 1394 1380 * Don't let this be freed, say, by hists__decay_entry. 1395 1381 */ 1396 1382 he->used = true; 1397 - err = hist_entry__tui_annotate(he, evsel->idx, 1398 - timer, arg, delay_secs); 1383 + err = hist_entry__tui_annotate(he, evsel->idx, hbt); 1399 1384 he->used = false; 1400 1385 /* 1401 1386 * offer option to annotate the other branch source or target ··· 1475 1462 struct ui_browser b; 1476 1463 struct perf_evsel *selection; 1477 1464 bool lost_events, lost_events_warned; 1465 + struct perf_session_env *env; 1478 1466 }; 1479 1467 1480 1468 static void perf_evsel_menu__write(struct ui_browser *browser, ··· 1518 1504 1519 1505 static int perf_evsel_menu__run(struct perf_evsel_menu *menu, 1520 1506 int nr_events, const char *help, 1521 - void(*timer)(void *arg), void *arg, int delay_secs) 1507 + struct hist_browser_timer *hbt) 1522 1508 { 1523 1509 struct perf_evlist *evlist = menu->b.priv; 1524 1510 struct perf_evsel *pos; 1525 1511 const char *ev_name, *title = "Available samples"; 1512 + int delay_secs = hbt ? hbt->refresh : 0; 1526 1513 int key; 1527 1514 1528 1515 if (ui_browser__show(&menu->b, title, ··· 1535 1520 1536 1521 switch (key) { 1537 1522 case K_TIMER: 1538 - timer(arg); 1523 + hbt->timer(hbt->arg); 1539 1524 1540 1525 if (!menu->lost_events_warned && menu->lost_events) { 1541 1526 ui_browser__warn_lost_events(&menu->b); ··· 1553 1538 * Give the calling tool a chance to populate the non 1554 1539 * default evsel resorted hists tree. 1555 1540 */ 1556 - if (timer) 1557 - timer(arg); 1541 + if (hbt) 1542 + hbt->timer(hbt->arg); 1558 1543 ev_name = perf_evsel__name(pos); 1559 1544 key = perf_evsel__hists_browse(pos, nr_events, help, 1560 - ev_name, true, timer, 1561 - arg, delay_secs); 1545 + ev_name, true, hbt, 1546 + menu->env); 1562 1547 ui_browser__show_title(&menu->b, title); 1563 1548 switch (key) { 1564 1549 case K_TAB: ··· 1606 1591 1607 1592 static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, 1608 1593 const char *help, 1609 - void(*timer)(void *arg), void *arg, 1610 - int delay_secs) 1594 + struct hist_browser_timer *hbt, 1595 + struct perf_session_env *env) 1611 1596 { 1612 1597 struct perf_evsel *pos; 1613 1598 struct perf_evsel_menu menu = { ··· 1619 1604 .nr_entries = evlist->nr_entries, 1620 1605 .priv = evlist, 1621 1606 }, 1607 + .env = env, 1622 1608 }; 1623 1609 1624 1610 ui_helpline__push("Press ESC to exit"); ··· 1632 1616 menu.b.width = line_len; 1633 1617 } 1634 1618 1635 - return perf_evsel_menu__run(&menu, evlist->nr_entries, help, timer, 1636 - arg, delay_secs); 1619 + return perf_evsel_menu__run(&menu, evlist->nr_entries, help, hbt); 1637 1620 } 1638 1621 1639 1622 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, 1640 - void(*timer)(void *arg), void *arg, 1641 - int delay_secs) 1623 + struct hist_browser_timer *hbt, 1624 + struct perf_session_env *env) 1642 1625 { 1643 1626 if (evlist->nr_entries == 1) { 1644 1627 struct perf_evsel *first = list_entry(evlist->entries.next, 1645 1628 struct perf_evsel, node); 1646 1629 const char *ev_name = perf_evsel__name(first); 1647 1630 return perf_evsel__hists_browse(first, evlist->nr_entries, help, 1648 - ev_name, false, timer, arg, 1649 - delay_secs); 1631 + ev_name, false, hbt, env); 1650 1632 } 1651 1633 1652 - return __perf_evlist__tui_browse_hists(evlist, help, 1653 - timer, arg, delay_secs); 1634 + return __perf_evlist__tui_browse_hists(evlist, help, hbt, env); 1654 1635 }
+1 -3
tools/perf/ui/gtk/browser.c
··· 237 237 238 238 int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, 239 239 const char *help, 240 - void (*timer) (void *arg)__maybe_unused, 241 - void *arg __maybe_unused, 242 - int delay_secs __maybe_unused) 240 + struct hist_browser_timer *hbt __maybe_unused) 243 241 { 244 242 struct perf_evsel *pos; 245 243 GtkWidget *vbox;
+5 -5
tools/perf/ui/hist.c
··· 161 161 162 162 static double baseline_percent(struct hist_entry *he) 163 163 { 164 - struct hist_entry *pair = he->pair; 164 + struct hist_entry *pair = hist_entry__next_pair(he); 165 165 struct hists *pair_hists = pair ? pair->hists : NULL; 166 166 double percent = 0.0; 167 167 ··· 179 179 { 180 180 double percent = baseline_percent(he); 181 181 182 - if (he->pair) 182 + if (hist_entry__has_pairs(he)) 183 183 return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); 184 184 else 185 185 return scnprintf(hpp->buf, hpp->size, " "); ··· 190 190 double percent = baseline_percent(he); 191 191 const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%"; 192 192 193 - if (he->pair || symbol_conf.field_sep) 193 + if (hist_entry__has_pairs(he) || symbol_conf.field_sep) 194 194 return scnprintf(hpp->buf, hpp->size, fmt, percent); 195 195 else 196 196 return scnprintf(hpp->buf, hpp->size, " "); ··· 248 248 249 249 static int hpp__entry_period_baseline(struct perf_hpp *hpp, struct hist_entry *he) 250 250 { 251 - struct hist_entry *pair = he->pair; 251 + struct hist_entry *pair = hist_entry__next_pair(he); 252 252 u64 period = pair ? pair->stat.period : 0; 253 253 const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64; 254 254 ··· 354 354 static int hpp__entry_displ(struct perf_hpp *hpp, 355 355 struct hist_entry *he) 356 356 { 357 - struct hist_entry *pair = he->pair; 357 + struct hist_entry *pair = hist_entry__next_pair(he); 358 358 long displacement = pair ? pair->position - he->position : 0; 359 359 const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; 360 360 char buf[32] = " ";
+5 -9
tools/perf/util/PERF-VERSION-GEN
··· 9 9 LF=' 10 10 ' 11 11 12 + # 12 13 # First check if there is a .git to get the version from git describe 13 - # otherwise try to get the version from the kernel makefile 14 + # otherwise try to get the version from the kernel Makefile 15 + # 14 16 if test -d ../../.git -o -f ../../.git && 15 - VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) && 16 - case "$VN" in 17 - *$LF*) (exit 1) ;; 18 - v[0-9]*) 19 - git update-index -q --refresh 20 - test -z "$(git diff-index --name-only HEAD --)" || 21 - VN="$VN-dirty" ;; 22 - esac 17 + VN=$(git tag 2>/dev/null | tail -1 | grep -E "v[0-9].[0-9]*") 23 18 then 19 + VN=$(echo $VN"-g"$(git log -1 --abbrev=4 --pretty=format:"%h" HEAD)) 24 20 VN=$(echo "$VN" | sed -e 's/-/./g'); 25 21 else 26 22 VN=$(MAKEFLAGS= make -sC ../.. kernelversion)
+61 -10
tools/perf/util/annotate.c
··· 171 171 if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0) 172 172 goto out_free_ops; 173 173 174 - ops->locked.ins = ins__find(name); 175 - if (ops->locked.ins == NULL) 176 - goto out_free_ops; 174 + ops->locked.ins = ins__find(name); 175 + if (ops->locked.ins == NULL) 176 + goto out_free_ops; 177 177 178 - if (!ops->locked.ins->ops) 179 - return 0; 178 + if (!ops->locked.ins->ops) 179 + return 0; 180 180 181 - if (ops->locked.ins->ops->parse) 182 - ops->locked.ins->ops->parse(ops->locked.ops); 181 + if (ops->locked.ins->ops->parse) 182 + ops->locked.ins->ops->parse(ops->locked.ops); 183 183 184 184 return 0; 185 185 ··· 401 401 { .name = "testb", .ops = &mov_ops, }, 402 402 { .name = "testl", .ops = &mov_ops, }, 403 403 { .name = "xadd", .ops = &mov_ops, }, 404 + { .name = "xbeginl", .ops = &jump_ops, }, 405 + { .name = "xbeginq", .ops = &jump_ops, }, 404 406 }; 405 407 406 408 static int ins__cmp(const void *name, const void *insp) ··· 858 856 struct source_line *iter; 859 857 struct rb_node **p = &root->rb_node; 860 858 struct rb_node *parent = NULL; 859 + int ret; 861 860 862 861 while (*p != NULL) { 863 862 parent = *p; 864 863 iter = rb_entry(parent, struct source_line, node); 865 864 866 - if (src_line->percent > iter->percent) 865 + ret = strcmp(iter->path, src_line->path); 866 + if (ret == 0) { 867 + iter->percent_sum += src_line->percent; 868 + return; 869 + } 870 + 871 + if (ret < 0) 872 + p = &(*p)->rb_left; 873 + else 874 + p = &(*p)->rb_right; 875 + } 876 + 877 + src_line->percent_sum = src_line->percent; 878 + 879 + rb_link_node(&src_line->node, parent, p); 880 + rb_insert_color(&src_line->node, root); 881 + } 882 + 883 + static void __resort_source_line(struct rb_root *root, struct source_line *src_line) 884 + { 885 + struct source_line *iter; 886 + struct rb_node **p = &root->rb_node; 887 + struct rb_node *parent = NULL; 888 + 889 + while (*p != NULL) { 890 + parent = *p; 891 + iter = rb_entry(parent, struct source_line, node); 892 + 893 + if (src_line->percent_sum > iter->percent_sum) 867 894 p = &(*p)->rb_left; 868 895 else 869 896 p = &(*p)->rb_right; ··· 900 869 901 870 rb_link_node(&src_line->node, parent, p); 902 871 rb_insert_color(&src_line->node, root); 872 + } 873 + 874 + static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) 875 + { 876 + struct source_line *src_line; 877 + struct rb_node *node; 878 + 879 + node = rb_first(src_root); 880 + while (node) { 881 + struct rb_node *next; 882 + 883 + src_line = rb_entry(node, struct source_line, node); 884 + next = rb_next(node); 885 + rb_erase(node, src_root); 886 + 887 + __resort_source_line(dest_root, src_line); 888 + node = next; 889 + } 903 890 } 904 891 905 892 static void symbol__free_source_line(struct symbol *sym, int len) ··· 944 895 struct source_line *src_line; 945 896 struct annotation *notes = symbol__annotation(sym); 946 897 struct sym_hist *h = annotation__histogram(notes, evidx); 898 + struct rb_root tmp_root = RB_ROOT; 947 899 948 900 if (!h->sum) 949 901 return 0; ··· 979 929 goto next; 980 930 981 931 strcpy(src_line[i].path, path); 982 - insert_source_line(root, &src_line[i]); 932 + insert_source_line(&tmp_root, &src_line[i]); 983 933 984 934 next: 985 935 pclose(fp); 986 936 } 987 937 938 + resort_source_line(root, &tmp_root); 988 939 return 0; 989 940 } 990 941 ··· 1009 958 char *path; 1010 959 1011 960 src_line = rb_entry(node, struct source_line, node); 1012 - percent = src_line->percent; 961 + percent = src_line->percent_sum; 1013 962 color = get_percent_color(percent); 1014 963 path = src_line->path; 1015 964
+5 -4
tools/perf/util/annotate.h
··· 5 5 #include <stdint.h> 6 6 #include "types.h" 7 7 #include "symbol.h" 8 + #include "hist.h" 8 9 #include <linux/list.h> 9 10 #include <linux/rbtree.h> 10 11 #include <pthread.h> ··· 76 75 struct source_line { 77 76 struct rb_node node; 78 77 double percent; 78 + double percent_sum; 79 79 char *path; 80 80 }; 81 81 ··· 142 140 143 141 #ifdef NEWT_SUPPORT 144 142 int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, 145 - void(*timer)(void *arg), void *arg, int delay_secs); 143 + struct hist_browser_timer *hbt); 146 144 #else 147 145 static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, 148 146 struct map *map __maybe_unused, 149 147 int evidx __maybe_unused, 150 - void(*timer)(void *arg) __maybe_unused, 151 - void *arg __maybe_unused, 152 - int delay_secs __maybe_unused) 148 + struct hist_browser_timer *hbt 149 + __maybe_unused) 153 150 { 154 151 return 0; 155 152 }
+1
tools/perf/util/dso-test-data.c tools/perf/tests/dso-data.c
··· 6 6 #include <fcntl.h> 7 7 #include <string.h> 8 8 9 + #include "machine.h" 9 10 #include "symbol.h" 10 11 11 12 #define TEST_ASSERT_VAL(text, cond) \
+1
tools/perf/util/dso.c
··· 1 1 #include "symbol.h" 2 2 #include "dso.h" 3 + #include "machine.h" 3 4 #include "util.h" 4 5 #include "debug.h" 5 6
+100
tools/perf/util/hist.c
··· 244 244 he->ms.map->referenced = true; 245 245 if (symbol_conf.use_callchain) 246 246 callchain_init(he->callchain); 247 + 248 + INIT_LIST_HEAD(&he->pairs.node); 247 249 } 248 250 249 251 return he; ··· 412 410 413 411 void hist_entry__free(struct hist_entry *he) 414 412 { 413 + free(he->branch_info); 415 414 free(he); 416 415 } 417 416 ··· 715 712 { 716 713 ++hists->stats.nr_events[0]; 717 714 ++hists->stats.nr_events[type]; 715 + } 716 + 717 + static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 718 + struct hist_entry *pair) 719 + { 720 + struct rb_node **p = &hists->entries.rb_node; 721 + struct rb_node *parent = NULL; 722 + struct hist_entry *he; 723 + int cmp; 724 + 725 + while (*p != NULL) { 726 + parent = *p; 727 + he = rb_entry(parent, struct hist_entry, rb_node); 728 + 729 + cmp = hist_entry__cmp(pair, he); 730 + 731 + if (!cmp) 732 + goto out; 733 + 734 + if (cmp < 0) 735 + p = &(*p)->rb_left; 736 + else 737 + p = &(*p)->rb_right; 738 + } 739 + 740 + he = hist_entry__new(pair); 741 + if (he) { 742 + he->stat.nr_events = 0; 743 + he->stat.period = 0; 744 + he->hists = hists; 745 + rb_link_node(&he->rb_node, parent, p); 746 + rb_insert_color(&he->rb_node, &hists->entries); 747 + hists__inc_nr_entries(hists, he); 748 + } 749 + out: 750 + return he; 751 + } 752 + 753 + static struct hist_entry *hists__find_entry(struct hists *hists, 754 + struct hist_entry *he) 755 + { 756 + struct rb_node *n = hists->entries.rb_node; 757 + 758 + while (n) { 759 + struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 760 + int64_t cmp = hist_entry__cmp(he, iter); 761 + 762 + if (cmp < 0) 763 + n = n->rb_left; 764 + else if (cmp > 0) 765 + n = n->rb_right; 766 + else 767 + return iter; 768 + } 769 + 770 + return NULL; 771 + } 772 + 773 + /* 774 + * Look for pairs to link to the leader buckets (hist_entries): 775 + */ 776 + void hists__match(struct hists *leader, struct hists *other) 777 + { 778 + struct rb_node *nd; 779 + struct hist_entry *pos, *pair; 780 + 781 + for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) { 782 + pos = rb_entry(nd, struct hist_entry, rb_node); 783 + pair = hists__find_entry(other, pos); 784 + 785 + if (pair) 786 + hist__entry_add_pair(pos, pair); 787 + } 788 + } 789 + 790 + /* 791 + * Look for entries in the other hists that are not present in the leader, if 792 + * we find them, just add a dummy entry on the leader hists, with period=0, 793 + * nr_events=0, to serve as the list header. 794 + */ 795 + int hists__link(struct hists *leader, struct hists *other) 796 + { 797 + struct rb_node *nd; 798 + struct hist_entry *pos, *pair; 799 + 800 + for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) { 801 + pos = rb_entry(nd, struct hist_entry, rb_node); 802 + 803 + if (!hist_entry__has_pairs(pos)) { 804 + pair = hists__add_dummy_entry(leader, pos); 805 + if (pair == NULL) 806 + return -1; 807 + hist__entry_add_pair(pair, pos); 808 + } 809 + } 810 + 811 + return 0; 718 812 }
+19 -15
tools/perf/util/hist.h
··· 4 4 #include <linux/types.h> 5 5 #include <pthread.h> 6 6 #include "callchain.h" 7 + #include "header.h" 7 8 8 9 extern struct callchain_param callchain_param; 9 10 ··· 115 114 void hists__reset_col_len(struct hists *hists); 116 115 void hists__calc_col_len(struct hists *hists, struct hist_entry *he); 117 116 117 + void hists__match(struct hists *leader, struct hists *other); 118 + int hists__link(struct hists *leader, struct hists *other); 119 + 118 120 struct perf_hpp { 119 121 char *buf; 120 122 size_t size; ··· 161 157 162 158 struct perf_evlist; 163 159 160 + struct hist_browser_timer { 161 + void (*timer)(void *arg); 162 + void *arg; 163 + int refresh; 164 + }; 165 + 164 166 #ifdef NEWT_SUPPORT 165 167 #include "../ui/keysyms.h" 166 168 int hist_entry__tui_annotate(struct hist_entry *he, int evidx, 167 - void(*timer)(void *arg), void *arg, int delay_secs); 169 + struct hist_browser_timer *hbt); 168 170 169 171 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, 170 - void(*timer)(void *arg), void *arg, 171 - int refresh); 172 + struct hist_browser_timer *hbt, 173 + struct perf_session_env *env); 172 174 int script_browse(const char *script_opt); 173 175 #else 174 176 static inline 175 177 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, 176 178 const char *help __maybe_unused, 177 - void(*timer)(void *arg) __maybe_unused, 178 - void *arg __maybe_unused, 179 - int refresh __maybe_unused) 179 + struct hist_browser_timer *hbt __maybe_unused, 180 + struct perf_session_env *env __maybe_unused) 180 181 { 181 182 return 0; 182 183 } ··· 189 180 static inline int hist_entry__tui_annotate(struct hist_entry *self 190 181 __maybe_unused, 191 182 int evidx __maybe_unused, 192 - void(*timer)(void *arg) 193 - __maybe_unused, 194 - void *arg __maybe_unused, 195 - int delay_secs __maybe_unused) 183 + struct hist_browser_timer *hbt 184 + __maybe_unused) 196 185 { 197 186 return 0; 198 187 } ··· 206 199 207 200 #ifdef GTK2_SUPPORT 208 201 int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help, 209 - void(*timer)(void *arg), void *arg, 210 - int refresh); 202 + struct hist_browser_timer *hbt __maybe_unused); 211 203 #else 212 204 static inline 213 205 int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused, 214 206 const char *help __maybe_unused, 215 - void(*timer)(void *arg) __maybe_unused, 216 - void *arg __maybe_unused, 217 - int refresh __maybe_unused) 207 + struct hist_browser_timer *hbt __maybe_unused) 218 208 { 219 209 return 0; 220 210 }
+196 -9
tools/perf/util/machine.c
··· 2 2 #include "event.h" 3 3 #include "machine.h" 4 4 #include "map.h" 5 + #include "strlist.h" 5 6 #include "thread.h" 6 7 #include <stdbool.h> 8 + 9 + int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 10 + { 11 + map_groups__init(&machine->kmaps); 12 + RB_CLEAR_NODE(&machine->rb_node); 13 + INIT_LIST_HEAD(&machine->user_dsos); 14 + INIT_LIST_HEAD(&machine->kernel_dsos); 15 + 16 + machine->threads = RB_ROOT; 17 + INIT_LIST_HEAD(&machine->dead_threads); 18 + machine->last_match = NULL; 19 + 20 + machine->kmaps.machine = machine; 21 + machine->pid = pid; 22 + 23 + machine->root_dir = strdup(root_dir); 24 + if (machine->root_dir == NULL) 25 + return -ENOMEM; 26 + 27 + if (pid != HOST_KERNEL_ID) { 28 + struct thread *thread = machine__findnew_thread(machine, pid); 29 + char comm[64]; 30 + 31 + if (thread == NULL) 32 + return -ENOMEM; 33 + 34 + snprintf(comm, sizeof(comm), "[guest/%d]", pid); 35 + thread__set_comm(thread, comm); 36 + } 37 + 38 + return 0; 39 + } 40 + 41 + static void dsos__delete(struct list_head *dsos) 42 + { 43 + struct dso *pos, *n; 44 + 45 + list_for_each_entry_safe(pos, n, dsos, node) { 46 + list_del(&pos->node); 47 + dso__delete(pos); 48 + } 49 + } 50 + 51 + void machine__exit(struct machine *machine) 52 + { 53 + map_groups__exit(&machine->kmaps); 54 + dsos__delete(&machine->user_dsos); 55 + dsos__delete(&machine->kernel_dsos); 56 + free(machine->root_dir); 57 + machine->root_dir = NULL; 58 + } 59 + 60 + void machine__delete(struct machine *machine) 61 + { 62 + machine__exit(machine); 63 + free(machine); 64 + } 65 + 66 + struct machine *machines__add(struct rb_root *machines, pid_t pid, 67 + const char *root_dir) 68 + { 69 + struct rb_node **p = &machines->rb_node; 70 + struct rb_node *parent = NULL; 71 + struct machine *pos, *machine = malloc(sizeof(*machine)); 72 + 73 + if (machine == NULL) 74 + return NULL; 75 + 76 + if (machine__init(machine, root_dir, pid) != 0) { 77 + free(machine); 78 + return NULL; 79 + } 80 + 81 + while (*p != NULL) { 82 + parent = *p; 83 + pos = rb_entry(parent, struct machine, rb_node); 84 + if (pid < pos->pid) 85 + p = &(*p)->rb_left; 86 + else 87 + p = &(*p)->rb_right; 88 + } 89 + 90 + rb_link_node(&machine->rb_node, parent, p); 91 + rb_insert_color(&machine->rb_node, machines); 92 + 93 + return machine; 94 + } 95 + 96 + struct machine *machines__find(struct rb_root *machines, pid_t pid) 97 + { 98 + struct rb_node **p = &machines->rb_node; 99 + struct rb_node *parent = NULL; 100 + struct machine *machine; 101 + struct machine *default_machine = NULL; 102 + 103 + while (*p != NULL) { 104 + parent = *p; 105 + machine = rb_entry(parent, struct machine, rb_node); 106 + if (pid < machine->pid) 107 + p = &(*p)->rb_left; 108 + else if (pid > machine->pid) 109 + p = &(*p)->rb_right; 110 + else 111 + return machine; 112 + if (!machine->pid) 113 + default_machine = machine; 114 + } 115 + 116 + return default_machine; 117 + } 118 + 119 + struct machine *machines__findnew(struct rb_root *machines, pid_t pid) 120 + { 121 + char path[PATH_MAX]; 122 + const char *root_dir = ""; 123 + struct machine *machine = machines__find(machines, pid); 124 + 125 + if (machine && (machine->pid == pid)) 126 + goto out; 127 + 128 + if ((pid != HOST_KERNEL_ID) && 129 + (pid != DEFAULT_GUEST_KERNEL_ID) && 130 + (symbol_conf.guestmount)) { 131 + sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 132 + if (access(path, R_OK)) { 133 + static struct strlist *seen; 134 + 135 + if (!seen) 136 + seen = strlist__new(true, NULL); 137 + 138 + if (!strlist__has_entry(seen, path)) { 139 + pr_err("Can't access file %s\n", path); 140 + strlist__add(seen, path); 141 + } 142 + machine = NULL; 143 + goto out; 144 + } 145 + root_dir = path; 146 + } 147 + 148 + machine = machines__add(machines, pid, root_dir); 149 + out: 150 + return machine; 151 + } 152 + 153 + void machines__process(struct rb_root *machines, 154 + machine__process_t process, void *data) 155 + { 156 + struct rb_node *nd; 157 + 158 + for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 159 + struct machine *pos = rb_entry(nd, struct machine, rb_node); 160 + process(pos, data); 161 + } 162 + } 163 + 164 + char *machine__mmap_name(struct machine *machine, char *bf, size_t size) 165 + { 166 + if (machine__is_host(machine)) 167 + snprintf(bf, size, "[%s]", "kernel.kallsyms"); 168 + else if (machine__is_default_guest(machine)) 169 + snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 170 + else { 171 + snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", 172 + machine->pid); 173 + } 174 + 175 + return bf; 176 + } 177 + 178 + void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size) 179 + { 180 + struct rb_node *node; 181 + struct machine *machine; 182 + 183 + for (node = rb_first(machines); node; node = rb_next(node)) { 184 + machine = rb_entry(node, struct machine, rb_node); 185 + machine->id_hdr_size = id_hdr_size; 186 + } 187 + 188 + return; 189 + } 7 190 8 191 static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, 9 192 bool create) ··· 267 84 static void machine__set_kernel_mmap_len(struct machine *machine, 268 85 union perf_event *event) 269 86 { 270 - machine->vmlinux_maps[MAP__FUNCTION]->start = event->mmap.start; 271 - machine->vmlinux_maps[MAP__FUNCTION]->end = (event->mmap.start + 272 - event->mmap.len); 273 - /* 274 - * Be a bit paranoid here, some perf.data file came with 275 - * a zero sized synthesized MMAP event for the kernel. 276 - */ 277 - if (machine->vmlinux_maps[MAP__FUNCTION]->end == 0) 278 - machine->vmlinux_maps[MAP__FUNCTION]->end = ~0ULL; 87 + int i; 88 + 89 + for (i = 0; i < MAP__NR_TYPES; i++) { 90 + machine->vmlinux_maps[i]->start = event->mmap.start; 91 + machine->vmlinux_maps[i]->end = (event->mmap.start + 92 + event->mmap.len); 93 + /* 94 + * Be a bit paranoid here, some perf.data file came with 95 + * a zero sized synthesized MMAP event for the kernel. 96 + */ 97 + if (machine->vmlinux_maps[i]->end == 0) 98 + machine->vmlinux_maps[i]->end = ~0ULL; 99 + } 279 100 } 280 101 281 102 static int machine__process_kernel_mmap_event(struct machine *machine,
+130 -1
tools/perf/util/machine.h
··· 2 2 #define __PERF_MACHINE_H 3 3 4 4 #include <sys/types.h> 5 + #include <linux/rbtree.h> 6 + #include "map.h" 5 7 8 + struct branch_stack; 9 + struct perf_evsel; 10 + struct perf_sample; 11 + struct symbol; 6 12 struct thread; 7 - struct machine; 8 13 union perf_event; 14 + 15 + /* Native host kernel uses -1 as pid index in machine */ 16 + #define HOST_KERNEL_ID (-1) 17 + #define DEFAULT_GUEST_KERNEL_ID (0) 18 + 19 + struct machine { 20 + struct rb_node rb_node; 21 + pid_t pid; 22 + u16 id_hdr_size; 23 + char *root_dir; 24 + struct rb_root threads; 25 + struct list_head dead_threads; 26 + struct thread *last_match; 27 + struct list_head user_dsos; 28 + struct list_head kernel_dsos; 29 + struct map_groups kmaps; 30 + struct map *vmlinux_maps[MAP__NR_TYPES]; 31 + }; 32 + 33 + static inline 34 + struct map *machine__kernel_map(struct machine *machine, enum map_type type) 35 + { 36 + return machine->vmlinux_maps[type]; 37 + } 9 38 10 39 struct thread *machine__find_thread(struct machine *machine, pid_t pid); 11 40 ··· 44 15 int machine__process_lost_event(struct machine *machine, union perf_event *event); 45 16 int machine__process_mmap_event(struct machine *machine, union perf_event *event); 46 17 int machine__process_event(struct machine *machine, union perf_event *event); 18 + 19 + typedef void (*machine__process_t)(struct machine *machine, void *data); 20 + 21 + void machines__process(struct rb_root *machines, 22 + machine__process_t process, void *data); 23 + 24 + struct machine *machines__add(struct rb_root *machines, pid_t pid, 25 + const char *root_dir); 26 + struct machine *machines__find_host(struct rb_root *machines); 27 + struct machine *machines__find(struct rb_root *machines, pid_t pid); 28 + struct machine *machines__findnew(struct rb_root *machines, pid_t pid); 29 + 30 + void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size); 31 + char *machine__mmap_name(struct machine *machine, char *bf, size_t size); 32 + 33 + int machine__init(struct machine *machine, const char *root_dir, pid_t pid); 34 + void machine__exit(struct machine *machine); 35 + void machine__delete(struct machine *machine); 36 + 37 + 38 + struct branch_info *machine__resolve_bstack(struct machine *machine, 39 + struct thread *thread, 40 + struct branch_stack *bs); 41 + int machine__resolve_callchain(struct machine *machine, 42 + struct perf_evsel *evsel, 43 + struct thread *thread, 44 + struct perf_sample *sample, 45 + struct symbol **parent); 46 + 47 + /* 48 + * Default guest kernel is defined by parameter --guestkallsyms 49 + * and --guestmodules 50 + */ 51 + static inline bool machine__is_default_guest(struct machine *machine) 52 + { 53 + return machine ? machine->pid == DEFAULT_GUEST_KERNEL_ID : false; 54 + } 55 + 56 + static inline bool machine__is_host(struct machine *machine) 57 + { 58 + return machine ? machine->pid == HOST_KERNEL_ID : false; 59 + } 60 + 61 + struct thread *machine__findnew_thread(struct machine *machine, pid_t pid); 62 + void machine__remove_thread(struct machine *machine, struct thread *th); 63 + 64 + size_t machine__fprintf(struct machine *machine, FILE *fp); 65 + 66 + static inline 67 + struct symbol *machine__find_kernel_symbol(struct machine *machine, 68 + enum map_type type, u64 addr, 69 + struct map **mapp, 70 + symbol_filter_t filter) 71 + { 72 + return map_groups__find_symbol(&machine->kmaps, type, addr, 73 + mapp, filter); 74 + } 75 + 76 + static inline 77 + struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr, 78 + struct map **mapp, 79 + symbol_filter_t filter) 80 + { 81 + return machine__find_kernel_symbol(machine, MAP__FUNCTION, addr, 82 + mapp, filter); 83 + } 84 + 85 + static inline 86 + struct symbol *machine__find_kernel_function_by_name(struct machine *machine, 87 + const char *name, 88 + struct map **mapp, 89 + symbol_filter_t filter) 90 + { 91 + return map_groups__find_function_by_name(&machine->kmaps, name, mapp, 92 + filter); 93 + } 94 + 95 + struct map *machine__new_module(struct machine *machine, u64 start, 96 + const char *filename); 97 + 98 + int machine__load_kallsyms(struct machine *machine, const char *filename, 99 + enum map_type type, symbol_filter_t filter); 100 + int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 101 + symbol_filter_t filter); 102 + 103 + size_t machine__fprintf_dsos_buildid(struct machine *machine, 104 + FILE *fp, bool with_hits); 105 + size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); 106 + size_t machines__fprintf_dsos_buildid(struct rb_root *machines, 107 + FILE *fp, bool with_hits); 108 + 109 + void machine__destroy_kernel_maps(struct machine *machine); 110 + int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); 111 + int machine__create_kernel_maps(struct machine *machine); 112 + 113 + int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); 114 + int machines__create_guest_kernel_maps(struct rb_root *machines); 115 + void machines__destroy_guest_kernel_maps(struct rb_root *machines); 116 + 117 + size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); 47 118 48 119 #endif /* __PERF_MACHINE_H */
+1 -180
tools/perf/util/map.c
··· 24 24 25 25 static inline int is_no_dso_memory(const char *filename) 26 26 { 27 - return !strcmp(filename, "[stack]") || 27 + return !strncmp(filename, "[stack", 6) || 28 28 !strcmp(filename, "[heap]"); 29 29 } 30 30 ··· 589 589 } 590 590 591 591 return NULL; 592 - } 593 - 594 - int machine__init(struct machine *self, const char *root_dir, pid_t pid) 595 - { 596 - map_groups__init(&self->kmaps); 597 - RB_CLEAR_NODE(&self->rb_node); 598 - INIT_LIST_HEAD(&self->user_dsos); 599 - INIT_LIST_HEAD(&self->kernel_dsos); 600 - 601 - self->threads = RB_ROOT; 602 - INIT_LIST_HEAD(&self->dead_threads); 603 - self->last_match = NULL; 604 - 605 - self->kmaps.machine = self; 606 - self->pid = pid; 607 - self->root_dir = strdup(root_dir); 608 - if (self->root_dir == NULL) 609 - return -ENOMEM; 610 - 611 - if (pid != HOST_KERNEL_ID) { 612 - struct thread *thread = machine__findnew_thread(self, pid); 613 - char comm[64]; 614 - 615 - if (thread == NULL) 616 - return -ENOMEM; 617 - 618 - snprintf(comm, sizeof(comm), "[guest/%d]", pid); 619 - thread__set_comm(thread, comm); 620 - } 621 - 622 - return 0; 623 - } 624 - 625 - static void dsos__delete(struct list_head *self) 626 - { 627 - struct dso *pos, *n; 628 - 629 - list_for_each_entry_safe(pos, n, self, node) { 630 - list_del(&pos->node); 631 - dso__delete(pos); 632 - } 633 - } 634 - 635 - void machine__exit(struct machine *self) 636 - { 637 - map_groups__exit(&self->kmaps); 638 - dsos__delete(&self->user_dsos); 639 - dsos__delete(&self->kernel_dsos); 640 - free(self->root_dir); 641 - self->root_dir = NULL; 642 - } 643 - 644 - void machine__delete(struct machine *self) 645 - { 646 - machine__exit(self); 647 - free(self); 648 - } 649 - 650 - struct machine *machines__add(struct rb_root *self, pid_t pid, 651 - const char *root_dir) 652 - { 653 - struct rb_node **p = &self->rb_node; 654 - struct rb_node *parent = NULL; 655 - struct machine *pos, *machine = malloc(sizeof(*machine)); 656 - 657 - if (!machine) 658 - return NULL; 659 - 660 - if (machine__init(machine, root_dir, pid) != 0) { 661 - free(machine); 662 - return NULL; 663 - } 664 - 665 - while (*p != NULL) { 666 - parent = *p; 667 - pos = rb_entry(parent, struct machine, rb_node); 668 - if (pid < pos->pid) 669 - p = &(*p)->rb_left; 670 - else 671 - p = &(*p)->rb_right; 672 - } 673 - 674 - rb_link_node(&machine->rb_node, parent, p); 675 - rb_insert_color(&machine->rb_node, self); 676 - 677 - return machine; 678 - } 679 - 680 - struct machine *machines__find(struct rb_root *self, pid_t pid) 681 - { 682 - struct rb_node **p = &self->rb_node; 683 - struct rb_node *parent = NULL; 684 - struct machine *machine; 685 - struct machine *default_machine = NULL; 686 - 687 - while (*p != NULL) { 688 - parent = *p; 689 - machine = rb_entry(parent, struct machine, rb_node); 690 - if (pid < machine->pid) 691 - p = &(*p)->rb_left; 692 - else if (pid > machine->pid) 693 - p = &(*p)->rb_right; 694 - else 695 - return machine; 696 - if (!machine->pid) 697 - default_machine = machine; 698 - } 699 - 700 - return default_machine; 701 - } 702 - 703 - struct machine *machines__findnew(struct rb_root *self, pid_t pid) 704 - { 705 - char path[PATH_MAX]; 706 - const char *root_dir = ""; 707 - struct machine *machine = machines__find(self, pid); 708 - 709 - if (machine && (machine->pid == pid)) 710 - goto out; 711 - 712 - if ((pid != HOST_KERNEL_ID) && 713 - (pid != DEFAULT_GUEST_KERNEL_ID) && 714 - (symbol_conf.guestmount)) { 715 - sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 716 - if (access(path, R_OK)) { 717 - static struct strlist *seen; 718 - 719 - if (!seen) 720 - seen = strlist__new(true, NULL); 721 - 722 - if (!strlist__has_entry(seen, path)) { 723 - pr_err("Can't access file %s\n", path); 724 - strlist__add(seen, path); 725 - } 726 - machine = NULL; 727 - goto out; 728 - } 729 - root_dir = path; 730 - } 731 - 732 - machine = machines__add(self, pid, root_dir); 733 - 734 - out: 735 - return machine; 736 - } 737 - 738 - void machines__process(struct rb_root *self, machine__process_t process, void *data) 739 - { 740 - struct rb_node *nd; 741 - 742 - for (nd = rb_first(self); nd; nd = rb_next(nd)) { 743 - struct machine *pos = rb_entry(nd, struct machine, rb_node); 744 - process(pos, data); 745 - } 746 - } 747 - 748 - char *machine__mmap_name(struct machine *self, char *bf, size_t size) 749 - { 750 - if (machine__is_host(self)) 751 - snprintf(bf, size, "[%s]", "kernel.kallsyms"); 752 - else if (machine__is_default_guest(self)) 753 - snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 754 - else 755 - snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid); 756 - 757 - return bf; 758 - } 759 - 760 - void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size) 761 - { 762 - struct rb_node *node; 763 - struct machine *machine; 764 - 765 - for (node = rb_first(machines); node; node = rb_next(node)) { 766 - machine = rb_entry(node, struct machine, rb_node); 767 - machine->id_hdr_size = id_hdr_size; 768 - } 769 - 770 - return; 771 592 }
-93
tools/perf/util/map.h
··· 57 57 struct machine *machine; 58 58 }; 59 59 60 - /* Native host kernel uses -1 as pid index in machine */ 61 - #define HOST_KERNEL_ID (-1) 62 - #define DEFAULT_GUEST_KERNEL_ID (0) 63 - 64 - struct machine { 65 - struct rb_node rb_node; 66 - pid_t pid; 67 - u16 id_hdr_size; 68 - char *root_dir; 69 - struct rb_root threads; 70 - struct list_head dead_threads; 71 - struct thread *last_match; 72 - struct list_head user_dsos; 73 - struct list_head kernel_dsos; 74 - struct map_groups kmaps; 75 - struct map *vmlinux_maps[MAP__NR_TYPES]; 76 - }; 77 - 78 - static inline 79 - struct map *machine__kernel_map(struct machine *self, enum map_type type) 80 - { 81 - return self->vmlinux_maps[type]; 82 - } 83 - 84 60 static inline struct kmap *map__kmap(struct map *self) 85 61 { 86 62 return (struct kmap *)(self + 1); ··· 119 143 size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp); 120 144 size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp); 121 145 122 - typedef void (*machine__process_t)(struct machine *self, void *data); 123 - 124 - void machines__process(struct rb_root *self, machine__process_t process, void *data); 125 - struct machine *machines__add(struct rb_root *self, pid_t pid, 126 - const char *root_dir); 127 - struct machine *machines__find_host(struct rb_root *self); 128 - struct machine *machines__find(struct rb_root *self, pid_t pid); 129 - struct machine *machines__findnew(struct rb_root *self, pid_t pid); 130 - void machines__set_id_hdr_size(struct rb_root *self, u16 id_hdr_size); 131 - char *machine__mmap_name(struct machine *self, char *bf, size_t size); 132 - int machine__init(struct machine *self, const char *root_dir, pid_t pid); 133 - void machine__exit(struct machine *self); 134 - void machine__delete(struct machine *self); 135 - 136 - struct perf_evsel; 137 - struct perf_sample; 138 - int machine__resolve_callchain(struct machine *machine, 139 - struct perf_evsel *evsel, 140 - struct thread *thread, 141 - struct perf_sample *sample, 142 - struct symbol **parent); 143 146 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, 144 147 u64 addr); 145 - 146 - /* 147 - * Default guest kernel is defined by parameter --guestkallsyms 148 - * and --guestmodules 149 - */ 150 - static inline bool machine__is_default_guest(struct machine *self) 151 - { 152 - return self ? self->pid == DEFAULT_GUEST_KERNEL_ID : false; 153 - } 154 - 155 - static inline bool machine__is_host(struct machine *self) 156 - { 157 - return self ? self->pid == HOST_KERNEL_ID : false; 158 - } 159 148 160 149 static inline void map_groups__insert(struct map_groups *mg, struct map *map) 161 150 { ··· 150 209 struct map **mapp, 151 210 symbol_filter_t filter); 152 211 153 - 154 - struct thread *machine__findnew_thread(struct machine *machine, pid_t pid); 155 - void machine__remove_thread(struct machine *machine, struct thread *th); 156 - 157 - size_t machine__fprintf(struct machine *machine, FILE *fp); 158 - 159 - static inline 160 - struct symbol *machine__find_kernel_symbol(struct machine *self, 161 - enum map_type type, u64 addr, 162 - struct map **mapp, 163 - symbol_filter_t filter) 164 - { 165 - return map_groups__find_symbol(&self->kmaps, type, addr, mapp, filter); 166 - } 167 - 168 - static inline 169 - struct symbol *machine__find_kernel_function(struct machine *self, u64 addr, 170 - struct map **mapp, 171 - symbol_filter_t filter) 172 - { 173 - return machine__find_kernel_symbol(self, MAP__FUNCTION, addr, mapp, filter); 174 - } 175 - 176 212 static inline 177 213 struct symbol *map_groups__find_function_by_name(struct map_groups *mg, 178 214 const char *name, struct map **mapp, ··· 158 240 return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp, filter); 159 241 } 160 242 161 - static inline 162 - struct symbol *machine__find_kernel_function_by_name(struct machine *self, 163 - const char *name, 164 - struct map **mapp, 165 - symbol_filter_t filter) 166 - { 167 - return map_groups__find_function_by_name(&self->kmaps, name, mapp, 168 - filter); 169 - } 170 - 171 243 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 172 244 int verbose, FILE *fp); 173 245 174 246 struct map *map_groups__find_by_name(struct map_groups *mg, 175 247 enum map_type type, const char *name); 176 - struct map *machine__new_module(struct machine *self, u64 start, const char *filename); 177 248 178 249 void map_groups__flush(struct map_groups *mg); 179 250
tools/perf/util/parse-events-test.c tools/perf/tests/parse-events.c
+2
tools/perf/util/parse-events.l
··· 81 81 num_hex 0x[a-fA-F0-9]+ 82 82 num_raw_hex [a-fA-F0-9]+ 83 83 name [a-zA-Z_*?][a-zA-Z0-9_*?]* 84 + name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* 84 85 modifier_event [ukhpGH]{1,8} 85 86 modifier_bp [rwx]{1,3} 86 87 ··· 169 168 branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } 170 169 , { return ','; } 171 170 "/" { BEGIN(INITIAL); return '/'; } 171 + {name_minus} { return str(yyscanner, PE_NAME); } 172 172 } 173 173 174 174 mem: { BEGIN(mem); return PE_PREFIX_MEM; }
+1 -4
tools/perf/util/session.h
··· 4 4 #include "hist.h" 5 5 #include "event.h" 6 6 #include "header.h" 7 + #include "machine.h" 7 8 #include "symbol.h" 8 9 #include "thread.h" 9 10 #include <linux/rbtree.h> ··· 68 67 struct thread *thread, 69 68 struct ip_callchain *chain, 70 69 struct symbol **parent); 71 - 72 - struct branch_info *machine__resolve_bstack(struct machine *self, 73 - struct thread *thread, 74 - struct branch_stack *bs); 75 70 76 71 bool perf_session__has_traces(struct perf_session *self, const char *msg); 77 72
+23 -4
tools/perf/util/sort.h
··· 77 77 struct hist_entry { 78 78 struct rb_node rb_node_in; 79 79 struct rb_node rb_node; 80 + union { 81 + struct list_head node; 82 + struct list_head head; 83 + } pairs; 80 84 struct he_stat stat; 81 85 struct map_symbol ms; 82 86 struct thread *thread; ··· 100 96 char *srcline; 101 97 struct symbol *parent; 102 98 unsigned long position; 103 - union { 104 - struct hist_entry *pair; 105 - struct rb_root sorted_chain; 106 - }; 99 + struct rb_root sorted_chain; 107 100 struct branch_info *branch_info; 108 101 struct hists *hists; 109 102 struct callchain_root callchain[0]; 110 103 }; 104 + 105 + static inline bool hist_entry__has_pairs(struct hist_entry *he) 106 + { 107 + return !list_empty(&he->pairs.node); 108 + } 109 + 110 + static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he) 111 + { 112 + if (hist_entry__has_pairs(he)) 113 + return list_entry(he->pairs.node.next, struct hist_entry, pairs.node); 114 + return NULL; 115 + } 116 + 117 + static inline void hist__entry_add_pair(struct hist_entry *he, 118 + struct hist_entry *pair) 119 + { 120 + list_add_tail(&he->pairs.head, &pair->pairs.node); 121 + } 111 122 112 123 enum sort_type { 113 124 SORT_PID,
+1
tools/perf/util/symbol.c
··· 12 12 #include "build-id.h" 13 13 #include "util.h" 14 14 #include "debug.h" 15 + #include "machine.h" 15 16 #include "symbol.h" 16 17 #include "strlist.h" 17 18
-20
tools/perf/util/symbol.h
··· 200 200 symbol_filter_t filter); 201 201 int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, 202 202 symbol_filter_t filter); 203 - int machine__load_kallsyms(struct machine *machine, const char *filename, 204 - enum map_type type, symbol_filter_t filter); 205 - int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 206 - symbol_filter_t filter); 207 - 208 - size_t machine__fprintf_dsos_buildid(struct machine *machine, 209 - FILE *fp, bool with_hits); 210 - size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); 211 - size_t machines__fprintf_dsos_buildid(struct rb_root *machines, 212 - FILE *fp, bool with_hits); 213 203 214 204 struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, 215 205 u64 addr); ··· 214 224 int filename__read_debuglink(const char *filename, char *debuglink, 215 225 size_t size); 216 226 217 - void machine__destroy_kernel_maps(struct machine *machine); 218 - int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); 219 - int machine__create_kernel_maps(struct machine *machine); 220 - 221 - int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); 222 - int machines__create_guest_kernel_maps(struct rb_root *machines); 223 - void machines__destroy_guest_kernel_maps(struct rb_root *machines); 224 - 225 227 int symbol__init(void); 226 228 void symbol__exit(void); 227 229 void symbol__elf_init(void); ··· 223 241 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); 224 242 size_t symbol__fprintf(struct symbol *sym, FILE *fp); 225 243 bool symbol_type__is_a(char symbol_type, enum map_type map_type); 226 - 227 - size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); 228 244 229 245 int dso__test_data(void); 230 246 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,