Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

. perf build-id cache now can show DSOs present in a perf.data file that are
not in the cache, to integrate with build-id servers being put in place by
organizations such as Fedora.

. perf buildid-list -i an-elf-file-instead-of-a-perf.data is back showing its
build-id.

. No need to do feature checks when doing a 'make tags'

. Fix some 'perf test' errors and make them use the tracepoint evsel constructor.

. perf top now shares more of the evsel config/creation routines with 'record',
paving the way for further integration like 'top' snapshots, etc.

. perf top now supports DWARF callchains.

. perf evlist decodes sample_type and read_format, helping diagnose problems.

. Fix mmap limitations on 32-bit, fix from David Miller.

. perf diff fixes from Jiri Olsa.

. Ignore ABS symbols when loading data maps, fix from Namhyung Kim

. Hists improvements from Namhyung Kim

. Don't check configuration on make clean, from Namhyung Kim

. Fix dso__fprintf() print statement, from Stephane Eranian.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+1484 -1520
+4
tools/perf/Documentation/Makefile
··· 222 222 #install-html: html 223 223 # '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) 224 224 225 + ifneq ($(MAKECMDGOALS),clean) 226 + ifneq ($(MAKECMDGOALS),tags) 225 227 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 226 228 $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE 227 229 228 230 -include $(OUTPUT)PERF-VERSION-FILE 231 + endif 232 + endif 229 233 230 234 # 231 235 # Determine "include::" file references in asciidoc files.
+3
tools/perf/Documentation/perf-buildid-cache.txt
··· 24 24 -r:: 25 25 --remove=:: 26 26 Remove specified file from the cache. 27 + -M:: 28 + --missing=:: 29 + List missing build ids in the cache for the specified file. 27 30 -v:: 28 31 --verbose:: 29 32 Be more verbose.
-4
tools/perf/Documentation/perf-diff.txt
··· 22 22 23 23 OPTIONS 24 24 ------- 25 - -M:: 26 - --displacement:: 27 - Show position displacement relative to baseline. 28 - 29 25 -D:: 30 26 --dump-raw-trace:: 31 27 Dump raw trace in ASCII.
+1 -1
tools/perf/Documentation/perf-top.txt
··· 60 60 61 61 -i:: 62 62 --inherit:: 63 - Child tasks inherit counters, only makes sens with -p option. 63 + Child tasks do not inherit counters. 64 64 65 65 -k <path>:: 66 66 --vmlinux=<path>::
+28 -23
tools/perf/Makefile
··· 153 153 # explicitly what architecture to check for. Fix this up for yours.. 154 154 SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ 155 155 156 + ifneq ($(MAKECMDGOALS),clean) 157 + ifneq ($(MAKECMDGOALS),tags) 156 158 -include config/feature-tests.mak 157 159 158 160 ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) ··· 208 206 EXTLIBS := $(filter-out -lpthread,$(EXTLIBS)) 209 207 BASIC_CFLAGS += -I. 210 208 endif 209 + endif # MAKECMDGOALS != tags 210 + endif # MAKECMDGOALS != clean 211 211 212 212 # Guard against environment variables 213 213 BUILTIN_OBJS = ··· 234 230 LIBTRACEEVENT = $(TE_PATH)libtraceevent.a 235 231 TE_LIB := -L$(TE_PATH) -ltraceevent 236 232 233 + export LIBTRACEEVENT 234 + 235 + # python extension build directories 236 + PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ 237 + PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ 238 + PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ 239 + export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP 240 + 241 + python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so 242 + 237 243 PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) 238 244 PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py 239 - 240 - export LIBTRACEEVENT 241 245 242 246 $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) 243 247 $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ ··· 390 378 LIB_H += util/intlist.h 391 379 LIB_H += util/perf_regs.h 392 380 LIB_H += util/unwind.h 393 - LIB_H += ui/helpline.h 394 381 LIB_H += util/vdso.h 382 + LIB_H += ui/helpline.h 383 + LIB_H += ui/progress.h 384 + LIB_H += ui/util.h 385 + LIB_H += ui/ui.h 395 386 396 387 LIB_OBJS += $(OUTPUT)util/abspath.o 397 388 LIB_OBJS += $(OUTPUT)util/alias.o ··· 468 453 LIB_OBJS += $(OUTPUT)ui/setup.o 469 454 LIB_OBJS += $(OUTPUT)ui/helpline.o 470 455 LIB_OBJS += $(OUTPUT)ui/progress.o 456 + LIB_OBJS += $(OUTPUT)ui/util.o 471 457 LIB_OBJS += $(OUTPUT)ui/hist.o 472 458 LIB_OBJS += $(OUTPUT)ui/stdio/hist.o 473 459 ··· 487 471 LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o 488 472 LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o 489 473 LIB_OBJS += $(OUTPUT)tests/pmu.o 490 - LIB_OBJS += $(OUTPUT)tests/util.o 491 474 492 475 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o 493 476 BUILTIN_OBJS += $(OUTPUT)builtin-bench.o ··· 525 510 # 526 511 # Platform specific tweaks 527 512 # 513 + ifneq ($(MAKECMDGOALS),clean) 514 + ifneq ($(MAKECMDGOALS),tags) 528 515 529 516 # We choose to avoid "if .. else if .. else .. endif endif" 530 517 # because maintaining the nesting to match is a pain. If ··· 663 646 LIB_OBJS += $(OUTPUT)ui/browsers/hists.o 664 647 LIB_OBJS += $(OUTPUT)ui/browsers/map.o 665 648 LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o 666 - LIB_OBJS += $(OUTPUT)ui/util.o 667 649 LIB_OBJS += $(OUTPUT)ui/tui/setup.o 668 650 LIB_OBJS += $(OUTPUT)ui/tui/util.o 669 651 LIB_OBJS += $(OUTPUT)ui/tui/helpline.o ··· 671 655 LIB_H += ui/browsers/map.h 672 656 LIB_H += ui/keysyms.h 673 657 LIB_H += ui/libslang.h 674 - LIB_H += ui/progress.h 675 - LIB_H += ui/util.h 676 - LIB_H += ui/ui.h 677 658 endif 678 659 endif 679 660 ··· 690 677 LIB_OBJS += $(OUTPUT)ui/gtk/util.o 691 678 LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o 692 679 LIB_OBJS += $(OUTPUT)ui/gtk/progress.o 693 - # Make sure that it'd be included only once. 694 - ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),) 695 - LIB_OBJS += $(OUTPUT)ui/util.o 696 - endif 697 680 endif 698 681 endif 699 682 ··· 716 707 define disable-python_code 717 708 BASIC_CFLAGS += -DNO_LIBPYTHON 718 709 $(if $(1),$(warning No $(1) was found)) 719 - $(warning Python support won't be built) 710 + $(warning Python support will not be built) 720 711 endef 721 712 722 713 override PYTHON := \ ··· 724 715 725 716 ifndef PYTHON 726 717 $(call disable-python,python interpreter) 727 - python-clean := 728 718 else 729 719 730 720 PYTHON_WORD := $(call shell-wordify,$(PYTHON)) 731 - 732 - # python extension build directories 733 - PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ 734 - PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ 735 - PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ 736 - export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP 737 - 738 - python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so 739 721 740 722 ifdef NO_LIBPYTHON 741 723 $(call disable-python) ··· 842 842 ifdef ASCIIDOC8 843 843 export ASCIIDOC8 844 844 endif 845 + 846 + endif # MAKECMDGOALS != tags 847 + endif # MAKECMDGOALS != clean 845 848 846 849 # Shell quote (do not use $(call) to accommodate ancient setups); 847 850 ··· 1102 1099 endif 1103 1100 perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) 1104 1101 1105 - install: all try-install-man 1102 + install-bin: all 1106 1103 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' 1107 1104 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' 1108 1105 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' ··· 1122 1119 $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests' 1123 1120 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 1124 1121 $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 1122 + 1123 + install: install-bin try-install-man 1125 1124 1126 1125 install-python_ext: 1127 1126 $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
+46 -2
tools/perf/builtin-buildid-cache.c
··· 14 14 #include "util/parse-options.h" 15 15 #include "util/strlist.h" 16 16 #include "util/build-id.h" 17 + #include "util/session.h" 17 18 #include "util/symbol.h" 18 19 19 20 static int build_id_cache__add_file(const char *filename, const char *debugdir) ··· 59 58 return err; 60 59 } 61 60 61 + static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused) 62 + { 63 + char filename[PATH_MAX]; 64 + u8 build_id[BUILD_ID_SIZE]; 65 + 66 + if (dso__build_id_filename(dso, filename, sizeof(filename)) && 67 + filename__read_build_id(filename, build_id, 68 + sizeof(build_id)) != sizeof(build_id)) { 69 + if (errno == ENOENT) 70 + return false; 71 + 72 + pr_warning("Problems with %s file, consider removing it from the cache\n", 73 + filename); 74 + } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) { 75 + pr_warning("Problems with %s file, consider removing it from the cache\n", 76 + filename); 77 + } 78 + 79 + return true; 80 + } 81 + 82 + static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp) 83 + { 84 + struct perf_session *session = perf_session__new(filename, O_RDONLY, 85 + force, false, NULL); 86 + if (session == NULL) 87 + return -1; 88 + 89 + perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0); 90 + perf_session__delete(session); 91 + 92 + return 0; 93 + } 94 + 62 95 int cmd_buildid_cache(int argc, const char **argv, 63 96 const char *prefix __maybe_unused) 64 97 { 65 98 struct strlist *list; 66 99 struct str_node *pos; 100 + int ret = 0; 101 + bool force = false; 67 102 char debugdir[PATH_MAX]; 68 103 char const *add_name_list_str = NULL, 69 - *remove_name_list_str = NULL; 104 + *remove_name_list_str = NULL, 105 + *missing_filename = NULL; 70 106 const struct option buildid_cache_options[] = { 71 107 OPT_STRING('a', "add", &add_name_list_str, 72 108 "file list", "file(s) to add"), 73 109 OPT_STRING('r', "remove", &remove_name_list_str, "file list", 74 110 "file(s) to remove"), 111 + OPT_STRING('M', "missing", &missing_filename, "file", 112 + "to find missing build ids in the cache"), 113 + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 75 114 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 76 115 OPT_END() 77 116 }; ··· 166 125 } 167 126 } 168 127 169 - return 0; 128 + if (missing_filename) 129 + ret = build_id_cache__fprintf_missing(missing_filename, force, stdout); 130 + 131 + return ret; 170 132 }
+12 -9
tools/perf/builtin-buildid-list.c
··· 44 44 return fprintf(fp, "%s\n", sbuild_id); 45 45 } 46 46 47 + static bool dso__skip_buildid(struct dso *dso, int with_hits) 48 + { 49 + return with_hits && !dso->hit; 50 + } 51 + 47 52 static int perf_session__list_build_ids(bool force, bool with_hits) 48 53 { 49 54 struct perf_session *session; 50 55 51 56 symbol__elf_init(); 57 + /* 58 + * See if this is an ELF file first: 59 + */ 60 + if (filename__fprintf_build_id(input_name, stdout)) 61 + goto out; 52 62 53 63 session = perf_session__new(input_name, O_RDONLY, force, false, 54 64 &build_id__mark_dso_hit_ops); 55 65 if (session == NULL) 56 66 return -1; 57 - 58 - /* 59 - * See if this is an ELF file first: 60 - */ 61 - if (filename__fprintf_build_id(session->filename, stdout)) 62 - goto out; 63 - 64 67 /* 65 68 * in pipe-mode, the only way to get the buildids is to parse 66 69 * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID ··· 71 68 if (with_hits || session->fd_pipe) 72 69 perf_session__process_events(session, &build_id__mark_dso_hit_ops); 73 70 74 - perf_session__fprintf_dsos_buildid(session, stdout, with_hits); 75 - out: 71 + perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits); 76 72 perf_session__delete(session); 73 + out: 77 74 return 0; 78 75 } 79 76
+44 -77
tools/perf/builtin-diff.c
··· 23 23 *input_new = "perf.data"; 24 24 static char diff__default_sort_order[] = "dso,symbol"; 25 25 static bool force; 26 - static bool show_displacement; 27 26 static bool show_period; 28 27 static bool show_formula; 29 28 static bool show_baseline_only; ··· 145 146 return -EINVAL; 146 147 } 147 148 148 - static double get_period_percent(struct hist_entry *he, u64 period) 149 + double perf_diff__period_percent(struct hist_entry *he, u64 period) 149 150 { 150 151 u64 total = he->hists->stats.total_period; 151 152 return (period * 100.0) / total; 152 153 } 153 154 154 - double perf_diff__compute_delta(struct hist_entry *he) 155 + double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair) 155 156 { 156 - struct hist_entry *pair = hist_entry__next_pair(he); 157 - double new_percent = get_period_percent(he, he->stat.period); 158 - double old_percent = pair ? get_period_percent(pair, pair->stat.period) : 0.0; 157 + double new_percent = perf_diff__period_percent(he, he->stat.period); 158 + double old_percent = perf_diff__period_percent(pair, pair->stat.period); 159 159 160 160 he->diff.period_ratio_delta = new_percent - old_percent; 161 161 he->diff.computed = true; 162 162 return he->diff.period_ratio_delta; 163 163 } 164 164 165 - double perf_diff__compute_ratio(struct hist_entry *he) 165 + double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair) 166 166 { 167 - struct hist_entry *pair = hist_entry__next_pair(he); 168 167 double new_period = he->stat.period; 169 - double old_period = pair ? pair->stat.period : 0; 168 + double old_period = pair->stat.period; 170 169 171 170 he->diff.computed = true; 172 - he->diff.period_ratio = pair ? (new_period / old_period) : 0; 171 + he->diff.period_ratio = new_period / old_period; 173 172 return he->diff.period_ratio; 174 173 } 175 174 176 - s64 perf_diff__compute_wdiff(struct hist_entry *he) 175 + s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair) 177 176 { 178 - struct hist_entry *pair = hist_entry__next_pair(he); 179 177 u64 new_period = he->stat.period; 180 - u64 old_period = pair ? pair->stat.period : 0; 178 + u64 old_period = pair->stat.period; 181 179 182 180 he->diff.computed = true; 183 - 184 - if (!pair) 185 - he->diff.wdiff = 0; 186 - else 187 - he->diff.wdiff = new_period * compute_wdiff_w2 - 188 - old_period * compute_wdiff_w1; 181 + he->diff.wdiff = new_period * compute_wdiff_w2 - 182 + old_period * compute_wdiff_w1; 189 183 190 184 return he->diff.wdiff; 191 185 } 192 186 193 - static int formula_delta(struct hist_entry *he, char *buf, size_t size) 187 + static int formula_delta(struct hist_entry *he, struct hist_entry *pair, 188 + char *buf, size_t size) 194 189 { 195 - struct hist_entry *pair = hist_entry__next_pair(he); 196 - 197 - if (!pair) 198 - return -1; 199 - 200 190 return scnprintf(buf, size, 201 191 "(%" PRIu64 " * 100 / %" PRIu64 ") - " 202 192 "(%" PRIu64 " * 100 / %" PRIu64 ")", ··· 193 205 pair->stat.period, pair->hists->stats.total_period); 194 206 } 195 207 196 - static int formula_ratio(struct hist_entry *he, char *buf, size_t size) 208 + static int formula_ratio(struct hist_entry *he, struct hist_entry *pair, 209 + char *buf, size_t size) 197 210 { 198 - struct hist_entry *pair = hist_entry__next_pair(he); 199 211 double new_period = he->stat.period; 200 - double old_period = pair ? pair->stat.period : 0; 201 - 202 - if (!pair) 203 - return -1; 212 + double old_period = pair->stat.period; 204 213 205 214 return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period); 206 215 } 207 216 208 - static int formula_wdiff(struct hist_entry *he, char *buf, size_t size) 217 + static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair, 218 + char *buf, size_t size) 209 219 { 210 - struct hist_entry *pair = hist_entry__next_pair(he); 211 220 u64 new_period = he->stat.period; 212 - u64 old_period = pair ? pair->stat.period : 0; 213 - 214 - if (!pair) 215 - return -1; 221 + u64 old_period = pair->stat.period; 216 222 217 223 return scnprintf(buf, size, 218 224 "(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")", 219 225 new_period, compute_wdiff_w2, old_period, compute_wdiff_w1); 220 226 } 221 227 222 - int perf_diff__formula(char *buf, size_t size, struct hist_entry *he) 228 + int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair, 229 + char *buf, size_t size) 223 230 { 224 231 switch (compute) { 225 232 case COMPUTE_DELTA: 226 - return formula_delta(he, buf, size); 233 + return formula_delta(he, pair, buf, size); 227 234 case COMPUTE_RATIO: 228 - return formula_ratio(he, buf, size); 235 + return formula_ratio(he, pair, buf, size); 229 236 case COMPUTE_WEIGHTED_DIFF: 230 - return formula_wdiff(he, buf, size); 237 + return formula_wdiff(he, pair, buf, size); 231 238 default: 232 239 BUG_ON(1); 233 240 } ··· 295 312 rb_insert_color(&he->rb_node, root); 296 313 } 297 314 298 - static void hists__name_resort(struct hists *self, bool sort) 315 + static void hists__name_resort(struct hists *self) 299 316 { 300 - unsigned long position = 1; 301 317 struct rb_root tmp = RB_ROOT; 302 318 struct rb_node *next = rb_first(&self->entries); 303 319 ··· 304 322 struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node); 305 323 306 324 next = rb_next(&n->rb_node); 307 - n->position = position++; 308 325 309 - if (sort) { 310 - rb_erase(&n->rb_node, &self->entries); 311 - insert_hist_entry_by_name(&tmp, n); 312 - } 326 + rb_erase(&n->rb_node, &self->entries); 327 + insert_hist_entry_by_name(&tmp, n); 313 328 } 314 329 315 - if (sort) 316 - self->entries = tmp; 330 + self->entries = tmp; 317 331 } 318 332 319 333 static struct perf_evsel *evsel_match(struct perf_evsel *evsel, ··· 333 355 334 356 hists__output_resort(hists); 335 357 336 - /* 337 - * The hists__name_resort only sets possition 338 - * if name is false. 339 - */ 340 - if (name || ((!name) && show_displacement)) 341 - hists__name_resort(hists, name); 358 + if (name) 359 + hists__name_resort(hists); 342 360 } 343 361 } 344 362 ··· 359 385 360 386 while (next != NULL) { 361 387 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 388 + struct hist_entry *pair = hist_entry__next_pair(he); 362 389 363 390 next = rb_next(&he->rb_node); 391 + if (!pair) 392 + continue; 364 393 365 394 switch (compute) { 366 395 case COMPUTE_DELTA: 367 - perf_diff__compute_delta(he); 396 + perf_diff__compute_delta(he, pair); 368 397 break; 369 398 case COMPUTE_RATIO: 370 - perf_diff__compute_ratio(he); 399 + perf_diff__compute_ratio(he, pair); 371 400 break; 372 401 case COMPUTE_WEIGHTED_DIFF: 373 - perf_diff__compute_wdiff(he); 402 + perf_diff__compute_wdiff(he, pair); 374 403 break; 375 404 default: 376 405 BUG_ON(1); ··· 539 562 static const struct option options[] = { 540 563 OPT_INCR('v', "verbose", &verbose, 541 564 "be more verbose (show symbol address, etc)"), 542 - OPT_BOOLEAN('M', "displacement", &show_displacement, 543 - "Show position displacement relative to baseline"), 544 565 OPT_BOOLEAN('b', "baseline-only", &show_baseline_only, 545 566 "Show only items with match in baseline"), 546 567 OPT_CALLBACK('c', "compute", &compute, ··· 572 597 573 598 static void ui_init(void) 574 599 { 575 - perf_hpp__init(); 576 - 577 - /* No overhead column. */ 578 - perf_hpp__column_enable(PERF_HPP__OVERHEAD, false); 579 - 580 600 /* 581 - * Display baseline/delta/ratio/displacement/ 601 + * Display baseline/delta/ratio 582 602 * formula/periods columns. 583 603 */ 584 - perf_hpp__column_enable(PERF_HPP__BASELINE, true); 604 + perf_hpp__column_enable(PERF_HPP__BASELINE); 585 605 586 606 switch (compute) { 587 607 case COMPUTE_DELTA: 588 - perf_hpp__column_enable(PERF_HPP__DELTA, true); 608 + perf_hpp__column_enable(PERF_HPP__DELTA); 589 609 break; 590 610 case COMPUTE_RATIO: 591 - perf_hpp__column_enable(PERF_HPP__RATIO, true); 611 + perf_hpp__column_enable(PERF_HPP__RATIO); 592 612 break; 593 613 case COMPUTE_WEIGHTED_DIFF: 594 - perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF, true); 614 + perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF); 595 615 break; 596 616 default: 597 617 BUG_ON(1); 598 618 }; 599 619 600 - if (show_displacement) 601 - perf_hpp__column_enable(PERF_HPP__DISPL, true); 602 - 603 620 if (show_formula) 604 - perf_hpp__column_enable(PERF_HPP__FORMULA, true); 621 + perf_hpp__column_enable(PERF_HPP__FORMULA); 605 622 606 623 if (show_period) { 607 - perf_hpp__column_enable(PERF_HPP__PERIOD, true); 608 - perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE, true); 624 + perf_hpp__column_enable(PERF_HPP__PERIOD); 625 + perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE); 609 626 } 610 627 } 611 628
+2 -79
tools/perf/builtin-evlist.c
··· 15 15 #include "util/parse-options.h" 16 16 #include "util/session.h" 17 17 18 - struct perf_attr_details { 19 - bool freq; 20 - bool verbose; 21 - }; 22 - 23 - static int comma_printf(bool *first, const char *fmt, ...) 24 - { 25 - va_list args; 26 - int ret = 0; 27 - 28 - if (!*first) { 29 - ret += printf(","); 30 - } else { 31 - ret += printf(":"); 32 - *first = false; 33 - } 34 - 35 - va_start(args, fmt); 36 - ret += vprintf(fmt, args); 37 - va_end(args); 38 - return ret; 39 - } 40 - 41 - static int __if_print(bool *first, const char *field, u64 value) 42 - { 43 - if (value == 0) 44 - return 0; 45 - 46 - return comma_printf(first, " %s: %" PRIu64, field, value); 47 - } 48 - 49 - #define if_print(field) __if_print(&first, #field, pos->attr.field) 50 - 51 18 static int __cmd_evlist(const char *file_name, struct perf_attr_details *details) 52 19 { 53 20 struct perf_session *session; ··· 24 57 if (session == NULL) 25 58 return -ENOMEM; 26 59 27 - list_for_each_entry(pos, &session->evlist->entries, node) { 28 - bool first = true; 29 - 30 - printf("%s", perf_evsel__name(pos)); 31 - 32 - if (details->verbose || details->freq) { 33 - comma_printf(&first, " sample_freq=%" PRIu64, 34 - (u64)pos->attr.sample_freq); 35 - } 36 - 37 - if (details->verbose) { 38 - if_print(type); 39 - if_print(config); 40 - if_print(config1); 41 - if_print(config2); 42 - if_print(size); 43 - if_print(sample_type); 44 - if_print(read_format); 45 - if_print(disabled); 46 - if_print(inherit); 47 - if_print(pinned); 48 - if_print(exclusive); 49 - if_print(exclude_user); 50 - if_print(exclude_kernel); 51 - if_print(exclude_hv); 52 - if_print(exclude_idle); 53 - if_print(mmap); 54 - if_print(comm); 55 - if_print(freq); 56 - if_print(inherit_stat); 57 - if_print(enable_on_exec); 58 - if_print(task); 59 - if_print(watermark); 60 - if_print(precise_ip); 61 - if_print(mmap_data); 62 - if_print(sample_id_all); 63 - if_print(exclude_host); 64 - if_print(exclude_guest); 65 - if_print(__reserved_1); 66 - if_print(wakeup_events); 67 - if_print(bp_type); 68 - if_print(branch_sample_type); 69 - } 70 - 71 - putchar('\n'); 72 - } 60 + list_for_each_entry(pos, &session->evlist->entries, node) 61 + perf_evsel__fprintf(pos, details, stdout); 73 62 74 63 perf_session__delete(session); 75 64 return 0;
+16 -24
tools/perf/builtin-record.c
··· 230 230 struct perf_record_opts *opts = &rec->opts; 231 231 int rc = 0; 232 232 233 - /* 234 - * Set the evsel leader links before we configure attributes, 235 - * since some might depend on this info. 236 - */ 237 - if (opts->group) 238 - perf_evlist__set_leader(evlist); 239 - 240 - perf_evlist__config_attrs(evlist, opts); 233 + perf_evlist__config(evlist, opts); 241 234 242 235 list_for_each_entry(pos, &evlist->entries, node) { 243 236 struct perf_event_attr *attr = &pos->attr; ··· 279 286 */ 280 287 opts->sample_id_all_missing = true; 281 288 if (!opts->sample_time && !opts->raw_samples && !time_needed) 282 - attr->sample_type &= ~PERF_SAMPLE_TIME; 289 + perf_evsel__reset_sample_bit(pos, TIME); 283 290 284 291 goto retry_sample_id; 285 292 } ··· 868 875 } 869 876 #endif /* LIBUNWIND_SUPPORT */ 870 877 871 - static int 872 - parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, 873 - int unset) 878 + int record_parse_callchain_opt(const struct option *opt, 879 + const char *arg, int unset) 874 880 { 875 - struct perf_record *rec = (struct perf_record *)opt->value; 881 + struct perf_record_opts *opts = opt->value; 876 882 char *tok, *name, *saveptr = NULL; 877 883 char *buf; 878 884 int ret = -1; ··· 897 905 /* Framepointer style */ 898 906 if (!strncmp(name, "fp", sizeof("fp"))) { 899 907 if (!strtok_r(NULL, ",", &saveptr)) { 900 - rec->opts.call_graph = CALLCHAIN_FP; 908 + opts->call_graph = CALLCHAIN_FP; 901 909 ret = 0; 902 910 } else 903 911 pr_err("callchain: No more arguments " ··· 910 918 const unsigned long default_stack_dump_size = 8192; 911 919 912 920 ret = 0; 913 - rec->opts.call_graph = CALLCHAIN_DWARF; 914 - rec->opts.stack_dump_size = default_stack_dump_size; 921 + opts->call_graph = CALLCHAIN_DWARF; 922 + opts->stack_dump_size = default_stack_dump_size; 915 923 916 924 tok = strtok_r(NULL, ",", &saveptr); 917 925 if (tok) { 918 926 unsigned long size = 0; 919 927 920 928 ret = get_stack_size(tok, &size); 921 - rec->opts.stack_dump_size = size; 929 + opts->stack_dump_size = size; 922 930 } 923 931 924 932 if (!ret) 925 933 pr_debug("callchain: stack dump size %d\n", 926 - rec->opts.stack_dump_size); 934 + opts->stack_dump_size); 927 935 #endif /* LIBUNWIND_SUPPORT */ 928 936 } else { 929 937 pr_err("callchain: Unknown -g option " ··· 936 944 free(buf); 937 945 938 946 if (!ret) 939 - pr_debug("callchain: type %d\n", rec->opts.call_graph); 947 + pr_debug("callchain: type %d\n", opts->call_graph); 940 948 941 949 return ret; 942 950 } ··· 974 982 #define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: " 975 983 976 984 #ifdef LIBUNWIND_SUPPORT 977 - static const char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; 985 + const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; 978 986 #else 979 - static const char callchain_help[] = CALLCHAIN_HELP "[fp]"; 987 + const char record_callchain_help[] = CALLCHAIN_HELP "[fp]"; 980 988 #endif 981 989 982 990 /* ··· 1020 1028 "number of mmap data pages"), 1021 1029 OPT_BOOLEAN(0, "group", &record.opts.group, 1022 1030 "put the counters into a counter group"), 1023 - OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]", 1024 - callchain_help, &parse_callchain_opt, 1025 - "fp"), 1031 + OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts, 1032 + "mode[,dump_size]", record_callchain_help, 1033 + &record_parse_callchain_opt, "fp"), 1026 1034 OPT_INCR('v', "verbose", &verbose, 1027 1035 "be more verbose (show counter open errors, etc)"), 1028 1036 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
+1
tools/perf/builtin-report.c
··· 692 692 setup_browser(true); 693 693 else { 694 694 use_browser = 0; 695 + perf_hpp__column_enable(PERF_HPP__OVERHEAD); 695 696 perf_hpp__init(); 696 697 } 697 698
+1 -1
tools/perf/builtin-stat.c
··· 153 153 } 154 154 155 155 if (!perf_target__has_task(&target) && 156 - !perf_evsel__is_group_member(evsel)) { 156 + perf_evsel__is_group_leader(evsel)) { 157 157 attr->disabled = 1; 158 158 attr->enable_on_exec = 1; 159 159 }
+66 -141
tools/perf/builtin-top.c
··· 596 596 * via --uid. 597 597 */ 598 598 list_for_each_entry(pos, &top->evlist->entries, node) 599 - pos->hists.uid_filter_str = top->target.uid_str; 599 + pos->hists.uid_filter_str = top->record_opts.target.uid_str; 600 600 601 601 perf_evlist__tui_browse_hists(top->evlist, help, &hbt, 602 602 &top->session->header.env); ··· 727 727 } 728 728 729 729 if (!machine) { 730 - pr_err("%u unprocessable samples recorded.", 730 + pr_err("%u unprocessable samples recorded.\n", 731 731 top->session->hists.stats.nr_unprocessable_samples++); 732 732 return; 733 733 } ··· 894 894 { 895 895 struct perf_evsel *counter; 896 896 struct perf_evlist *evlist = top->evlist; 897 + struct perf_record_opts *opts = &top->record_opts; 897 898 898 - if (top->group) 899 - perf_evlist__set_leader(evlist); 899 + perf_evlist__config(evlist, opts); 900 900 901 901 list_for_each_entry(counter, &evlist->entries, node) { 902 902 struct perf_event_attr *attr = &counter->attr; 903 903 904 - attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; 905 - 906 - if (top->freq) { 907 - attr->sample_type |= PERF_SAMPLE_PERIOD; 908 - attr->freq = 1; 909 - attr->sample_freq = top->freq; 910 - } 911 - 912 - if (evlist->nr_entries > 1) { 913 - attr->sample_type |= PERF_SAMPLE_ID; 914 - attr->read_format |= PERF_FORMAT_ID; 915 - } 916 - 917 - if (perf_target__has_cpu(&top->target)) 918 - attr->sample_type |= PERF_SAMPLE_CPU; 919 - 920 - if (symbol_conf.use_callchain) 921 - attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 922 - 923 - attr->mmap = 1; 924 - attr->comm = 1; 925 - attr->inherit = top->inherit; 926 904 fallback_missing_features: 927 905 if (top->exclude_guest_missing) 928 906 attr->exclude_guest = attr->exclude_host = 0; ··· 974 996 } 975 997 } 976 998 977 - if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) { 999 + if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { 978 1000 ui__error("Failed to mmap with %d (%s)\n", 979 1001 errno, strerror(errno)); 980 1002 goto out_err; ··· 994 1016 ui__error("Selected -g but \"sym\" not present in --sort/-s."); 995 1017 return -EINVAL; 996 1018 } 997 - } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) { 1019 + } else if (callchain_param.mode != CHAIN_NONE) { 998 1020 if (callchain_register_param(&callchain_param) < 0) { 999 1021 ui__error("Can't register callchain params.\n"); 1000 1022 return -EINVAL; ··· 1006 1028 1007 1029 static int __cmd_top(struct perf_top *top) 1008 1030 { 1031 + struct perf_record_opts *opts = &top->record_opts; 1009 1032 pthread_t thread; 1010 1033 int ret; 1011 1034 /* ··· 1021 1042 if (ret) 1022 1043 goto out_delete; 1023 1044 1024 - if (perf_target__has_task(&top->target)) 1045 + if (perf_target__has_task(&opts->target)) 1025 1046 perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, 1026 1047 perf_event__process, 1027 1048 &top->session->host_machine); ··· 1031 1052 perf_top__start_counters(top); 1032 1053 top->session->evlist = top->evlist; 1033 1054 perf_session__set_id_hdr_size(top->session); 1055 + 1056 + /* 1057 + * When perf is starting the traced process, all the events (apart from 1058 + * group members) have enable_on_exec=1 set, so don't spoil it by 1059 + * prematurely enabling them. 1060 + * 1061 + * XXX 'top' still doesn't start workloads like record, trace, but should, 1062 + * so leave the check here. 1063 + */ 1064 + if (!perf_target__none(&opts->target)) 1065 + perf_evlist__enable(top->evlist); 1034 1066 1035 1067 /* Wait for a minimal set of events before starting the snapshot */ 1036 1068 poll(top->evlist->pollfd, top->evlist->nr_fds, 100); ··· 1083 1093 static int 1084 1094 parse_callchain_opt(const struct option *opt, const char *arg, int unset) 1085 1095 { 1086 - struct perf_top *top = (struct perf_top *)opt->value; 1087 - char *tok, *tok2; 1088 - char *endptr; 1089 - 1090 1096 /* 1091 1097 * --no-call-graph 1092 1098 */ 1093 - if (unset) { 1094 - top->dont_use_callchains = true; 1099 + if (unset) 1095 1100 return 0; 1096 - } 1097 1101 1098 1102 symbol_conf.use_callchain = true; 1099 1103 1100 - if (!arg) 1101 - return 0; 1102 - 1103 - tok = strtok((char *)arg, ","); 1104 - if (!tok) 1105 - return -1; 1106 - 1107 - /* get the output mode */ 1108 - if (!strncmp(tok, "graph", strlen(arg))) 1109 - callchain_param.mode = CHAIN_GRAPH_ABS; 1110 - 1111 - else if (!strncmp(tok, "flat", strlen(arg))) 1112 - callchain_param.mode = CHAIN_FLAT; 1113 - 1114 - else if (!strncmp(tok, "fractal", strlen(arg))) 1115 - callchain_param.mode = CHAIN_GRAPH_REL; 1116 - 1117 - else if (!strncmp(tok, "none", strlen(arg))) { 1118 - callchain_param.mode = CHAIN_NONE; 1119 - symbol_conf.use_callchain = false; 1120 - 1121 - return 0; 1122 - } else 1123 - return -1; 1124 - 1125 - /* get the min percentage */ 1126 - tok = strtok(NULL, ","); 1127 - if (!tok) 1128 - goto setup; 1129 - 1130 - callchain_param.min_percent = strtod(tok, &endptr); 1131 - if (tok == endptr) 1132 - return -1; 1133 - 1134 - /* get the print limit */ 1135 - tok2 = strtok(NULL, ","); 1136 - if (!tok2) 1137 - goto setup; 1138 - 1139 - if (tok2[0] != 'c') { 1140 - callchain_param.print_limit = strtod(tok2, &endptr); 1141 - tok2 = strtok(NULL, ","); 1142 - if (!tok2) 1143 - goto setup; 1144 - } 1145 - 1146 - /* get the call chain order */ 1147 - if (!strcmp(tok2, "caller")) 1148 - callchain_param.order = ORDER_CALLER; 1149 - else if (!strcmp(tok2, "callee")) 1150 - callchain_param.order = ORDER_CALLEE; 1151 - else 1152 - return -1; 1153 - setup: 1154 - if (callchain_register_param(&callchain_param) < 0) { 1155 - fprintf(stderr, "Can't register callchain params\n"); 1156 - return -1; 1157 - } 1158 - return 0; 1104 + return record_parse_callchain_opt(opt, arg, unset); 1159 1105 } 1160 1106 1161 1107 int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) 1162 1108 { 1163 - struct perf_evsel *pos; 1164 1109 int status; 1165 1110 char errbuf[BUFSIZ]; 1166 1111 struct perf_top top = { 1167 1112 .count_filter = 5, 1168 1113 .delay_secs = 2, 1169 - .freq = 4000, /* 4 KHz */ 1170 - .mmap_pages = 128, 1171 - .sym_pcnt_filter = 5, 1172 - .target = { 1173 - .uses_mmap = true, 1114 + .record_opts = { 1115 + .mmap_pages = UINT_MAX, 1116 + .user_freq = UINT_MAX, 1117 + .user_interval = ULLONG_MAX, 1118 + .freq = 4000, /* 4 KHz */ 1119 + .target = { 1120 + .uses_mmap = true, 1121 + }, 1174 1122 }, 1123 + .sym_pcnt_filter = 5, 1175 1124 }; 1176 - char callchain_default_opt[] = "fractal,0.5,callee"; 1125 + struct perf_record_opts *opts = &top.record_opts; 1126 + struct perf_target *target = &opts->target; 1177 1127 const struct option options[] = { 1178 1128 OPT_CALLBACK('e', "event", &top.evlist, "event", 1179 1129 "event selector. use 'perf list' to list available events", 1180 1130 parse_events_option), 1181 - OPT_INTEGER('c', "count", &top.default_interval, 1182 - "event period to sample"), 1183 - OPT_STRING('p', "pid", &top.target.pid, "pid", 1131 + OPT_U64('c', "count", &opts->user_interval, "event period to sample"), 1132 + OPT_STRING('p', "pid", &target->pid, "pid", 1184 1133 "profile events on existing process id"), 1185 - OPT_STRING('t', "tid", &top.target.tid, "tid", 1134 + OPT_STRING('t', "tid", &target->tid, "tid", 1186 1135 "profile events on existing thread id"), 1187 - OPT_BOOLEAN('a', "all-cpus", &top.target.system_wide, 1136 + OPT_BOOLEAN('a', "all-cpus", &target->system_wide, 1188 1137 "system-wide collection from all CPUs"), 1189 - OPT_STRING('C', "cpu", &top.target.cpu_list, "cpu", 1138 + OPT_STRING('C', "cpu", &target->cpu_list, "cpu", 1190 1139 "list of cpus to monitor"), 1191 1140 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 1192 1141 "file", "vmlinux pathname"), 1193 1142 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols, 1194 1143 "hide kernel symbols"), 1195 - OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"), 1144 + OPT_UINTEGER('m', "mmap-pages", &opts->mmap_pages, 1145 + "number of mmap data pages"), 1196 1146 OPT_INTEGER('r', "realtime", &top.realtime_prio, 1197 1147 "collect data with this RT SCHED_FIFO priority"), 1198 1148 OPT_INTEGER('d', "delay", &top.delay_secs, ··· 1141 1211 "dump the symbol table used for profiling"), 1142 1212 OPT_INTEGER('f', "count-filter", &top.count_filter, 1143 1213 "only display functions with more events than this"), 1144 - OPT_BOOLEAN('g', "group", &top.group, 1214 + OPT_BOOLEAN('g', "group", &opts->group, 1145 1215 "put the counters into a counter group"), 1146 - OPT_BOOLEAN('i', "inherit", &top.inherit, 1147 - "child tasks inherit counters"), 1216 + OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit, 1217 + "child tasks do not inherit counters"), 1148 1218 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name", 1149 1219 "symbol to annotate"), 1150 - OPT_BOOLEAN('z', "zero", &top.zero, 1151 - "zero history across updates"), 1152 - OPT_INTEGER('F', "freq", &top.freq, 1153 - "profile at this frequency"), 1220 + OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"), 1221 + OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"), 1154 1222 OPT_INTEGER('E', "entries", &top.print_entries, 1155 1223 "display this many functions"), 1156 1224 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols, ··· 1161 1233 "sort by key(s): pid, comm, dso, symbol, parent"), 1162 1234 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 1163 1235 "Show a column with the number of samples"), 1164 - OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order", 1165 - "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. " 1166 - "Default: fractal,0.5,callee", &parse_callchain_opt, 1167 - callchain_default_opt), 1236 + OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts, 1237 + "mode[,dump_size]", record_callchain_help, 1238 + &parse_callchain_opt, "fp"), 1168 1239 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, 1169 1240 "Show a column with the sum of periods"), 1170 1241 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", ··· 1178 1251 "Display raw encoding of assembly instructions (default)"), 1179 1252 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 1180 1253 "Specify disassembler style (e.g. -M intel for intel syntax)"), 1181 - OPT_STRING('u', "uid", &top.target.uid_str, "user", "user to profile"), 1254 + OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"), 1182 1255 OPT_END() 1183 1256 }; 1184 1257 const char * const top_usage[] = { ··· 1208 1281 1209 1282 setup_browser(false); 1210 1283 1211 - status = perf_target__validate(&top.target); 1284 + status = perf_target__validate(target); 1212 1285 if (status) { 1213 - perf_target__strerror(&top.target, status, errbuf, BUFSIZ); 1286 + perf_target__strerror(target, status, errbuf, BUFSIZ); 1214 1287 ui__warning("%s", errbuf); 1215 1288 } 1216 1289 1217 - status = perf_target__parse_uid(&top.target); 1290 + status = perf_target__parse_uid(target); 1218 1291 if (status) { 1219 1292 int saved_errno = errno; 1220 1293 1221 - perf_target__strerror(&top.target, status, errbuf, BUFSIZ); 1294 + perf_target__strerror(target, status, errbuf, BUFSIZ); 1222 1295 ui__error("%s", errbuf); 1223 1296 1224 1297 status = -saved_errno; 1225 1298 goto out_delete_evlist; 1226 1299 } 1227 1300 1228 - if (perf_target__none(&top.target)) 1229 - top.target.system_wide = true; 1301 + if (perf_target__none(target)) 1302 + target->system_wide = true; 1230 1303 1231 - if (perf_evlist__create_maps(top.evlist, &top.target) < 0) 1304 + if (perf_evlist__create_maps(top.evlist, target) < 0) 1232 1305 usage_with_options(top_usage, options); 1233 1306 1234 1307 if (!top.evlist->nr_entries && ··· 1242 1315 if (top.delay_secs < 1) 1243 1316 top.delay_secs = 1; 1244 1317 1318 + if (opts->user_interval != ULLONG_MAX) 1319 + opts->default_interval = opts->user_interval; 1320 + if (opts->user_freq != UINT_MAX) 1321 + opts->freq = opts->user_freq; 1322 + 1245 1323 /* 1246 1324 * User specified count overrides default frequency. 1247 1325 */ 1248 - if (top.default_interval) 1249 - top.freq = 0; 1250 - else if (top.freq) { 1251 - top.default_interval = top.freq; 1326 + if (opts->default_interval) 1327 + opts->freq = 0; 1328 + else if (opts->freq) { 1329 + opts->default_interval = opts->freq; 1252 1330 } else { 1253 1331 ui__error("frequency and count are zero, aborting\n"); 1254 - exit(EXIT_FAILURE); 1255 - } 1256 - 1257 - list_for_each_entry(pos, &top.evlist->entries, node) { 1258 - /* 1259 - * Fill in the ones not specifically initialized via -c: 1260 - */ 1261 - if (!pos->attr.sample_period) 1262 - pos->attr.sample_period = top.default_interval; 1332 + status = -EINVAL; 1333 + goto out_delete_evlist; 1263 1334 } 1264 1335 1265 1336 top.sym_evsel = perf_evlist__first(top.evlist);
+1 -1
tools/perf/builtin-trace.c
··· 455 455 goto out_delete_evlist; 456 456 } 457 457 458 - perf_evlist__config_attrs(evlist, &trace->opts); 458 + perf_evlist__config(evlist, &trace->opts); 459 459 460 460 signal(SIGCHLD, sig_handler); 461 461 signal(SIGINT, sig_handler);
+1 -1
tools/perf/tests/attr/base-record
··· 7 7 config=0 8 8 sample_period=4000 9 9 sample_type=263 10 - read_format=7 10 + read_format=0 11 11 disabled=1 12 12 inherit=1 13 13 pinned=0
+2
tools/perf/tests/attr/test-record-group
··· 6 6 fd=1 7 7 group_fd=-1 8 8 sample_type=327 9 + read_format=4 9 10 10 11 [event-2:base-record] 11 12 fd=2 12 13 group_fd=1 13 14 config=1 14 15 sample_type=327 16 + read_format=4 15 17 mmap=0 16 18 comm=0 17 19 enable_on_exec=0
+2
tools/perf/tests/attr/test-record-group1
··· 6 6 fd=1 7 7 group_fd=-1 8 8 sample_type=327 9 + read_format=4 9 10 10 11 [event-2:base-record] 11 12 fd=2 ··· 14 13 type=0 15 14 config=1 16 15 sample_type=327 16 + read_format=4 17 17 mmap=0 18 18 comm=0 19 19 enable_on_exec=0
+13 -27
tools/perf/tests/mmap-basic.c
··· 22 22 struct thread_map *threads; 23 23 struct cpu_map *cpus; 24 24 struct perf_evlist *evlist; 25 - struct perf_event_attr attr = { 26 - .type = PERF_TYPE_TRACEPOINT, 27 - .read_format = PERF_FORMAT_ID, 28 - .sample_type = PERF_SAMPLE_ID, 29 - .watermark = 0, 30 - }; 31 25 cpu_set_t cpu_set; 32 26 const char *syscall_names[] = { "getsid", "getppid", "getpgrp", 33 27 "getpgid", }; 34 28 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, 35 29 (void*)getpgid }; 36 30 #define nsyscalls ARRAY_SIZE(syscall_names) 37 - int ids[nsyscalls]; 38 31 unsigned int nr_events[nsyscalls], 39 32 expected_nr_events[nsyscalls], i, j; 40 33 struct perf_evsel *evsels[nsyscalls], *evsel; 41 - 42 - for (i = 0; i < nsyscalls; ++i) { 43 - char name[64]; 44 - 45 - snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 46 - ids[i] = trace_event__id(name); 47 - if (ids[i] < 0) { 48 - pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); 49 - return -1; 50 - } 51 - nr_events[i] = 0; 52 - expected_nr_events[i] = random() % 257; 53 - } 54 34 55 35 threads = thread_map__new(-1, getpid(), UINT_MAX); 56 36 if (threads == NULL) { ··· 59 79 goto out_free_cpus; 60 80 } 61 81 62 - /* anonymous union fields, can't be initialized above */ 63 - attr.wakeup_events = 1; 64 - attr.sample_period = 1; 65 - 66 82 for (i = 0; i < nsyscalls; ++i) { 67 - attr.config = ids[i]; 68 - evsels[i] = perf_evsel__new(&attr, i); 83 + char name[64]; 84 + 85 + snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 86 + evsels[i] = perf_evsel__newtp("syscalls", name, i); 69 87 if (evsels[i] == NULL) { 70 88 pr_debug("perf_evsel__new\n"); 71 89 goto out_free_evlist; 72 90 } 91 + 92 + evsels[i]->attr.wakeup_events = 1; 93 + perf_evsel__set_sample_id(evsels[i]); 73 94 74 95 perf_evlist__add(evlist, evsels[i]); 75 96 ··· 80 99 strerror(errno)); 81 100 goto out_close_fd; 82 101 } 102 + 103 + nr_events[i] = 0; 104 + expected_nr_events[i] = 1 + rand() % 127; 83 105 } 84 106 85 107 if (perf_evlist__mmap(evlist, 128, true) < 0) { ··· 112 128 goto out_munmap; 113 129 } 114 130 131 + err = -1; 115 132 evsel = perf_evlist__id2evsel(evlist, sample.id); 116 133 if (evsel == NULL) { 117 134 pr_debug("event with id %" PRIu64 ··· 122 137 nr_events[evsel->idx]++; 123 138 } 124 139 140 + err = 0; 125 141 list_for_each_entry(evsel, &evlist->entries, node) { 126 142 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { 127 143 pr_debug("expected %d %s events, got %d\n", 128 144 expected_nr_events[evsel->idx], 129 145 perf_evsel__name(evsel), nr_events[evsel->idx]); 146 + err = -1; 130 147 goto out_munmap; 131 148 } 132 149 } 133 150 134 - err = 0; 135 151 out_munmap: 136 152 perf_evlist__munmap(evlist); 137 153 out_close_fd:
+3 -15
tools/perf/tests/open-syscall-all-cpus.c
··· 7 7 int test__open_syscall_event_on_all_cpus(void) 8 8 { 9 9 int err = -1, fd, cpu; 10 - struct thread_map *threads; 11 10 struct cpu_map *cpus; 12 11 struct perf_evsel *evsel; 13 - struct perf_event_attr attr; 14 12 unsigned int nr_open_calls = 111, i; 15 13 cpu_set_t cpu_set; 16 - int id = trace_event__id("sys_enter_open"); 14 + struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 17 15 18 - if (id < 0) { 19 - pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); 20 - return -1; 21 - } 22 - 23 - threads = thread_map__new(-1, getpid(), UINT_MAX); 24 16 if (threads == NULL) { 25 17 pr_debug("thread_map__new\n"); 26 18 return -1; ··· 24 32 goto out_thread_map_delete; 25 33 } 26 34 27 - 28 35 CPU_ZERO(&cpu_set); 29 36 30 - memset(&attr, 0, sizeof(attr)); 31 - attr.type = PERF_TYPE_TRACEPOINT; 32 - attr.config = id; 33 - evsel = perf_evsel__new(&attr, 0); 37 + evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); 34 38 if (evsel == NULL) { 35 - pr_debug("perf_evsel__new\n"); 39 + pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); 36 40 goto out_thread_map_delete; 37 41 } 38 42
+3 -14
tools/perf/tests/open-syscall.c
··· 6 6 int test__open_syscall_event(void) 7 7 { 8 8 int err = -1, fd; 9 - struct thread_map *threads; 10 9 struct perf_evsel *evsel; 11 - struct perf_event_attr attr; 12 10 unsigned int nr_open_calls = 111, i; 13 - int id = trace_event__id("sys_enter_open"); 11 + struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 14 12 15 - if (id < 0) { 16 - pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); 17 - return -1; 18 - } 19 - 20 - threads = thread_map__new(-1, getpid(), UINT_MAX); 21 13 if (threads == NULL) { 22 14 pr_debug("thread_map__new\n"); 23 15 return -1; 24 16 } 25 17 26 - memset(&attr, 0, sizeof(attr)); 27 - attr.type = PERF_TYPE_TRACEPOINT; 28 - attr.config = id; 29 - evsel = perf_evsel__new(&attr, 0); 18 + evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); 30 19 if (evsel == NULL) { 31 - pr_debug("perf_evsel__new\n"); 20 + pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); 32 21 goto out_thread_map_delete; 33 22 } 34 23
+10 -10
tools/perf/tests/parse-events.c
··· 521 521 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 522 522 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 523 523 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 524 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 524 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 525 525 526 526 /* cycles:upp */ 527 527 evsel = perf_evsel__next(evsel); ··· 557 557 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 558 558 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 559 559 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 560 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 560 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 561 561 562 562 /* cache-references + :u modifier */ 563 563 evsel = perf_evsel__next(evsel); ··· 583 583 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 584 584 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 585 585 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 586 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 586 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 587 587 588 588 return 0; 589 589 } ··· 606 606 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); 607 607 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 608 608 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 609 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 609 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 610 610 TEST_ASSERT_VAL("wrong group name", 611 611 !strcmp(leader->group_name, "group1")); 612 612 ··· 636 636 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 637 637 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 638 638 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 639 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 639 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 640 640 TEST_ASSERT_VAL("wrong group name", 641 641 !strcmp(leader->group_name, "group2")); 642 642 ··· 663 663 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 664 664 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 665 665 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 666 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 666 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 667 667 668 668 return 0; 669 669 } ··· 687 687 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 688 688 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); 689 689 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 690 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 690 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 691 691 692 692 /* instructions:kp + p */ 693 693 evsel = perf_evsel__next(evsel); ··· 724 724 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 725 725 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 726 726 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 727 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 727 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 728 728 729 729 /* instructions + G */ 730 730 evsel = perf_evsel__next(evsel); ··· 751 751 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 752 752 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 753 753 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 754 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 754 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 755 755 756 756 /* instructions:G */ 757 757 evsel = perf_evsel__next(evsel); ··· 777 777 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); 778 778 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 779 779 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 780 - TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 780 + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); 781 781 782 782 return 0; 783 783 }
+4 -4
tools/perf/tests/perf-record.c
··· 103 103 * Config the evsels, setting attr->comm on the first one, etc. 104 104 */ 105 105 evsel = perf_evlist__first(evlist); 106 - evsel->attr.sample_type |= PERF_SAMPLE_CPU; 107 - evsel->attr.sample_type |= PERF_SAMPLE_TID; 108 - evsel->attr.sample_type |= PERF_SAMPLE_TIME; 109 - perf_evlist__config_attrs(evlist, &opts); 106 + perf_evsel__set_sample_bit(evsel, CPU); 107 + perf_evsel__set_sample_bit(evsel, TID); 108 + perf_evsel__set_sample_bit(evsel, TIME); 109 + perf_evlist__config(evlist, &opts); 110 110 111 111 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); 112 112 if (err < 0) {
-3
tools/perf/tests/tests.h
··· 16 16 int test__dso_data(void); 17 17 int test__parse_events(void); 18 18 19 - /* Util */ 20 - int trace_event__id(const char *evname); 21 - 22 19 #endif /* TESTS_H */
-30
tools/perf/tests/util.c
··· 1 - #include <stdio.h> 2 - #include <unistd.h> 3 - #include <stdlib.h> 4 - #include <sys/types.h> 5 - #include <sys/stat.h> 6 - #include <fcntl.h> 7 - #include "tests.h" 8 - #include "debugfs.h" 9 - 10 - int trace_event__id(const char *evname) 11 - { 12 - char *filename; 13 - int err = -1, fd; 14 - 15 - if (asprintf(&filename, 16 - "%s/syscalls/%s/id", 17 - tracing_events_path, evname) < 0) 18 - return -1; 19 - 20 - fd = open(filename, O_RDONLY); 21 - if (fd >= 0) { 22 - char id[16]; 23 - if (read(fd, id, sizeof(id)) > 0) 24 - err = atoi(id); 25 - close(fd); 26 - } 27 - 28 - free(filename); 29 - return err; 30 - }
+12 -8
tools/perf/ui/browsers/hists.c
··· 587 587 588 588 void hist_browser__init_hpp(void) 589 589 { 590 + perf_hpp__column_enable(PERF_HPP__OVERHEAD); 591 + 590 592 perf_hpp__init(); 591 593 592 594 perf_hpp__format[PERF_HPP__OVERHEAD].color = ··· 609 607 { 610 608 char s[256]; 611 609 double percent; 612 - int i, printed = 0; 610 + int printed = 0; 613 611 int width = browser->b.width; 614 612 char folded_sign = ' '; 615 613 bool current_entry = ui_browser__is_current_entry(&browser->b, row); 616 614 off_t row_offset = entry->row_offset; 617 615 bool first = true; 616 + struct perf_hpp_fmt *fmt; 618 617 619 618 if (current_entry) { 620 619 browser->he_selection = entry; ··· 632 629 .buf = s, 633 630 .size = sizeof(s), 634 631 }; 632 + int i = 0; 635 633 636 634 ui_browser__gotorc(&browser->b, row, 0); 637 635 638 - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 639 - if (!perf_hpp__format[i].cond) 640 - continue; 636 + perf_hpp__for_each_format(fmt) { 641 637 642 638 if (!first) { 643 639 slsmg_printf(" "); ··· 644 642 } 645 643 first = false; 646 644 647 - if (perf_hpp__format[i].color) { 645 + if (fmt->color) { 648 646 hpp.ptr = &percent; 649 647 /* It will set percent for us. See HPP__COLOR_FN above. */ 650 - width -= perf_hpp__format[i].color(&hpp, entry); 648 + width -= fmt->color(&hpp, entry); 651 649 652 650 ui_browser__set_percent_color(&browser->b, percent, current_entry); 653 651 654 - if (i == PERF_HPP__OVERHEAD && symbol_conf.use_callchain) { 652 + if (!i && symbol_conf.use_callchain) { 655 653 slsmg_printf("%c ", folded_sign); 656 654 width -= 2; 657 655 } ··· 661 659 if (!current_entry || !browser->b.navkeypressed) 662 660 ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL); 663 661 } else { 664 - width -= perf_hpp__format[i].entry(&hpp, entry); 662 + width -= fmt->entry(&hpp, entry); 665 663 slsmg_printf("%s", s); 666 664 } 665 + 666 + i++; 667 667 } 668 668 669 669 /* The scroll bar isn't being used */
+11 -19
tools/perf/ui/gtk/browser.c
··· 74 74 75 75 void perf_gtk__init_hpp(void) 76 76 { 77 + perf_hpp__column_enable(PERF_HPP__OVERHEAD); 78 + 77 79 perf_hpp__init(); 78 80 79 81 perf_hpp__format[PERF_HPP__OVERHEAD].color = ··· 92 90 93 91 static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) 94 92 { 93 + struct perf_hpp_fmt *fmt; 95 94 GType col_types[MAX_COLUMNS]; 96 95 GtkCellRenderer *renderer; 97 96 struct sort_entry *se; 98 97 GtkListStore *store; 99 98 struct rb_node *nd; 100 99 GtkWidget *view; 101 - int i, col_idx; 100 + int col_idx; 102 101 int nr_cols; 103 102 char s[512]; 104 103 ··· 110 107 111 108 nr_cols = 0; 112 109 113 - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 114 - if (!perf_hpp__format[i].cond) 115 - continue; 116 - 110 + perf_hpp__for_each_format(fmt) 117 111 col_types[nr_cols++] = G_TYPE_STRING; 118 - } 119 112 120 113 list_for_each_entry(se, &hist_entry__sort_list, list) { 121 114 if (se->elide) ··· 128 129 129 130 col_idx = 0; 130 131 131 - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 132 - if (!perf_hpp__format[i].cond) 133 - continue; 134 - 135 - perf_hpp__format[i].header(&hpp); 136 - 132 + perf_hpp__for_each_format(fmt) { 133 + fmt->header(&hpp); 137 134 gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), 138 135 -1, s, 139 136 renderer, "markup", ··· 161 166 162 167 col_idx = 0; 163 168 164 - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 165 - if (!perf_hpp__format[i].cond) 166 - continue; 167 - 168 - if (perf_hpp__format[i].color) 169 - perf_hpp__format[i].color(&hpp, h); 169 + perf_hpp__for_each_format(fmt) { 170 + if (fmt->color) 171 + fmt->color(&hpp, h); 170 172 else 171 - perf_hpp__format[i].entry(&hpp, h); 173 + fmt->entry(&hpp, h); 172 174 173 175 gtk_list_store_set(store, &iter, col_idx++, s, -1); 174 176 }
+12 -11
tools/perf/ui/gtk/helpline.c
··· 24 24 pgctx->statbar_ctx_id, msg); 25 25 } 26 26 27 - static struct ui_helpline gtk_helpline_fns = { 28 - .pop = gtk_helpline_pop, 29 - .push = gtk_helpline_push, 30 - }; 31 - 32 - void perf_gtk__init_helpline(void) 33 - { 34 - helpline_fns = &gtk_helpline_fns; 35 - } 36 - 37 - int perf_gtk__show_helpline(const char *fmt, va_list ap) 27 + static int gtk_helpline_show(const char *fmt, va_list ap) 38 28 { 39 29 int ret; 40 30 char *ptr; ··· 43 53 } 44 54 45 55 return ret; 56 + } 57 + 58 + static struct ui_helpline gtk_helpline_fns = { 59 + .pop = gtk_helpline_pop, 60 + .push = gtk_helpline_push, 61 + .show = gtk_helpline_show, 62 + }; 63 + 64 + void perf_gtk__init_helpline(void) 65 + { 66 + helpline_fns = &gtk_helpline_fns; 46 67 }
+12
tools/perf/ui/helpline.c
··· 16 16 { 17 17 } 18 18 19 + static int nop_helpline__show(const char *fmt __maybe_unused, 20 + va_list ap __maybe_unused) 21 + { 22 + return 0; 23 + } 24 + 19 25 static struct ui_helpline default_helpline_fns = { 20 26 .pop = nop_helpline__pop, 21 27 .push = nop_helpline__push, 28 + .show = nop_helpline__show, 22 29 }; 23 30 24 31 struct ui_helpline *helpline_fns = &default_helpline_fns; ··· 65 58 { 66 59 ui_helpline__pop(); 67 60 ui_helpline__push(msg); 61 + } 62 + 63 + int ui_helpline__vshow(const char *fmt, va_list ap) 64 + { 65 + return helpline_fns->show(fmt, ap); 68 66 }
+2 -20
tools/perf/ui/helpline.h
··· 9 9 struct ui_helpline { 10 10 void (*pop)(void); 11 11 void (*push)(const char *msg); 12 + int (*show)(const char *fmt, va_list ap); 12 13 }; 13 14 14 15 extern struct ui_helpline *helpline_fns; ··· 21 20 void ui_helpline__vpush(const char *fmt, va_list ap); 22 21 void ui_helpline__fpush(const char *fmt, ...); 23 22 void ui_helpline__puts(const char *msg); 23 + int ui_helpline__vshow(const char *fmt, va_list ap); 24 24 25 25 extern char ui_helpline__current[512]; 26 - 27 - #ifdef NEWT_SUPPORT 28 26 extern char ui_helpline__last_msg[]; 29 - int ui_helpline__show_help(const char *format, va_list ap); 30 - #else 31 - static inline int ui_helpline__show_help(const char *format __maybe_unused, 32 - va_list ap __maybe_unused) 33 - { 34 - return 0; 35 - } 36 - #endif /* NEWT_SUPPORT */ 37 - 38 - #ifdef GTK2_SUPPORT 39 - int perf_gtk__show_helpline(const char *format, va_list ap); 40 - #else 41 - static inline int perf_gtk__show_helpline(const char *format __maybe_unused, 42 - va_list ap __maybe_unused) 43 - { 44 - return 0; 45 - } 46 - #endif /* GTK2_SUPPORT */ 47 27 48 28 #endif /* _PERF_UI_HELPLINE_H_ */
+85 -85
tools/perf/ui/hist.c
··· 268 268 269 269 static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he) 270 270 { 271 + struct hist_entry *pair = hist_entry__next_pair(he); 271 272 const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s"; 272 273 char buf[32] = " "; 273 - double diff; 274 + double diff = 0.0; 274 275 275 - if (he->diff.computed) 276 - diff = he->diff.period_ratio_delta; 277 - else 278 - diff = perf_diff__compute_delta(he); 276 + if (pair) { 277 + if (he->diff.computed) 278 + diff = he->diff.period_ratio_delta; 279 + else 280 + diff = perf_diff__compute_delta(he, pair); 281 + } else 282 + diff = perf_diff__period_percent(he, he->stat.period); 279 283 280 284 if (fabs(diff) >= 0.01) 281 285 scnprintf(buf, sizeof(buf), "%+4.2F%%", diff); ··· 301 297 302 298 static int hpp__entry_ratio(struct perf_hpp *hpp, struct hist_entry *he) 303 299 { 300 + struct hist_entry *pair = hist_entry__next_pair(he); 304 301 const char *fmt = symbol_conf.field_sep ? "%s" : "%14s"; 305 302 char buf[32] = " "; 306 - double ratio; 303 + double ratio = 0.0; 307 304 308 - if (he->diff.computed) 309 - ratio = he->diff.period_ratio; 310 - else 311 - ratio = perf_diff__compute_ratio(he); 305 + if (pair) { 306 + if (he->diff.computed) 307 + ratio = he->diff.period_ratio; 308 + else 309 + ratio = perf_diff__compute_ratio(he, pair); 310 + } 312 311 313 312 if (ratio > 0.0) 314 313 scnprintf(buf, sizeof(buf), "%+14.6F", ratio); ··· 333 326 334 327 static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he) 335 328 { 329 + struct hist_entry *pair = hist_entry__next_pair(he); 336 330 const char *fmt = symbol_conf.field_sep ? "%s" : "%14s"; 337 331 char buf[32] = " "; 338 - s64 wdiff; 332 + s64 wdiff = 0; 339 333 340 - if (he->diff.computed) 341 - wdiff = he->diff.wdiff; 342 - else 343 - wdiff = perf_diff__compute_wdiff(he); 334 + if (pair) { 335 + if (he->diff.computed) 336 + wdiff = he->diff.wdiff; 337 + else 338 + wdiff = perf_diff__compute_wdiff(he, pair); 339 + } 344 340 345 341 if (wdiff != 0) 346 342 scnprintf(buf, sizeof(buf), "%14ld", wdiff); 347 - 348 - return scnprintf(hpp->buf, hpp->size, fmt, buf); 349 - } 350 - 351 - static int hpp__header_displ(struct perf_hpp *hpp) 352 - { 353 - return scnprintf(hpp->buf, hpp->size, "Displ."); 354 - } 355 - 356 - static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused) 357 - { 358 - return 6; 359 - } 360 - 361 - static int hpp__entry_displ(struct perf_hpp *hpp, 362 - struct hist_entry *he) 363 - { 364 - struct hist_entry *pair = hist_entry__next_pair(he); 365 - long displacement = pair ? pair->position - he->position : 0; 366 - const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; 367 - char buf[32] = " "; 368 - 369 - if (displacement) 370 - scnprintf(buf, sizeof(buf), "%+4ld", displacement); 371 343 372 344 return scnprintf(hpp->buf, hpp->size, fmt, buf); 373 345 } ··· 365 379 366 380 static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he) 367 381 { 382 + struct hist_entry *pair = hist_entry__next_pair(he); 368 383 const char *fmt = symbol_conf.field_sep ? "%s" : "%-70s"; 369 384 char buf[96] = " "; 370 385 371 - perf_diff__formula(buf, sizeof(buf), he); 386 + if (pair) 387 + perf_diff__formula(he, pair, buf, sizeof(buf)); 388 + 372 389 return scnprintf(hpp->buf, hpp->size, fmt, buf); 373 390 } 374 391 375 - #define HPP__COLOR_PRINT_FNS(_name) \ 376 - .header = hpp__header_ ## _name, \ 377 - .width = hpp__width_ ## _name, \ 378 - .color = hpp__color_ ## _name, \ 379 - .entry = hpp__entry_ ## _name 392 + #define HPP__COLOR_PRINT_FNS(_name) \ 393 + { \ 394 + .header = hpp__header_ ## _name, \ 395 + .width = hpp__width_ ## _name, \ 396 + .color = hpp__color_ ## _name, \ 397 + .entry = hpp__entry_ ## _name \ 398 + } 380 399 381 - #define HPP__PRINT_FNS(_name) \ 382 - .header = hpp__header_ ## _name, \ 383 - .width = hpp__width_ ## _name, \ 384 - .entry = hpp__entry_ ## _name 400 + #define HPP__PRINT_FNS(_name) \ 401 + { \ 402 + .header = hpp__header_ ## _name, \ 403 + .width = hpp__width_ ## _name, \ 404 + .entry = hpp__entry_ ## _name \ 405 + } 385 406 386 407 struct perf_hpp_fmt perf_hpp__format[] = { 387 - { .cond = false, HPP__COLOR_PRINT_FNS(baseline) }, 388 - { .cond = true, HPP__COLOR_PRINT_FNS(overhead) }, 389 - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) }, 390 - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) }, 391 - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) }, 392 - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) }, 393 - { .cond = false, HPP__PRINT_FNS(samples) }, 394 - { .cond = false, HPP__PRINT_FNS(period) }, 395 - { .cond = false, HPP__PRINT_FNS(period_baseline) }, 396 - { .cond = false, HPP__PRINT_FNS(delta) }, 397 - { .cond = false, HPP__PRINT_FNS(ratio) }, 398 - { .cond = false, HPP__PRINT_FNS(wdiff) }, 399 - { .cond = false, HPP__PRINT_FNS(displ) }, 400 - { .cond = false, HPP__PRINT_FNS(formula) } 408 + HPP__COLOR_PRINT_FNS(baseline), 409 + HPP__COLOR_PRINT_FNS(overhead), 410 + HPP__COLOR_PRINT_FNS(overhead_sys), 411 + HPP__COLOR_PRINT_FNS(overhead_us), 412 + HPP__COLOR_PRINT_FNS(overhead_guest_sys), 413 + HPP__COLOR_PRINT_FNS(overhead_guest_us), 414 + HPP__PRINT_FNS(samples), 415 + HPP__PRINT_FNS(period), 416 + HPP__PRINT_FNS(period_baseline), 417 + HPP__PRINT_FNS(delta), 418 + HPP__PRINT_FNS(ratio), 419 + HPP__PRINT_FNS(wdiff), 420 + HPP__PRINT_FNS(formula) 401 421 }; 422 + 423 + LIST_HEAD(perf_hpp__list); 402 424 403 425 #undef HPP__COLOR_PRINT_FNS 404 426 #undef HPP__PRINT_FNS ··· 414 420 void perf_hpp__init(void) 415 421 { 416 422 if (symbol_conf.show_cpu_utilization) { 417 - perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true; 418 - perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true; 423 + perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS); 424 + perf_hpp__column_enable(PERF_HPP__OVERHEAD_US); 419 425 420 426 if (perf_guest) { 421 - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true; 422 - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true; 427 + perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS); 428 + perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US); 423 429 } 424 430 } 425 431 426 432 if (symbol_conf.show_nr_samples) 427 - perf_hpp__format[PERF_HPP__SAMPLES].cond = true; 433 + perf_hpp__column_enable(PERF_HPP__SAMPLES); 428 434 429 435 if (symbol_conf.show_total_period) 430 - perf_hpp__format[PERF_HPP__PERIOD].cond = true; 436 + perf_hpp__column_enable(PERF_HPP__PERIOD); 431 437 } 432 438 433 - void perf_hpp__column_enable(unsigned col, bool enable) 439 + void perf_hpp__column_register(struct perf_hpp_fmt *format) 440 + { 441 + list_add_tail(&format->list, &perf_hpp__list); 442 + } 443 + 444 + void perf_hpp__column_enable(unsigned col) 434 445 { 435 446 BUG_ON(col >= PERF_HPP__MAX_INDEX); 436 - perf_hpp__format[col].cond = enable; 447 + perf_hpp__column_register(&perf_hpp__format[col]); 437 448 } 438 449 439 450 static inline void advance_hpp(struct perf_hpp *hpp, int inc) ··· 451 452 bool color) 452 453 { 453 454 const char *sep = symbol_conf.field_sep; 455 + struct perf_hpp_fmt *fmt; 454 456 char *start = hpp->buf; 455 - int i, ret; 457 + int ret; 456 458 bool first = true; 457 459 458 460 if (symbol_conf.exclude_other && !he->parent) 459 461 return 0; 460 462 461 - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 462 - if (!perf_hpp__format[i].cond) 463 - continue; 464 - 463 + perf_hpp__for_each_format(fmt) { 464 + /* 465 + * If there's no field_sep, we still need 466 + * to display initial ' '. 467 + */ 465 468 if (!sep || !first) { 466 469 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); 467 470 advance_hpp(hpp, ret); 471 + } else 468 472 first = false; 469 - } 470 473 471 - if (color && perf_hpp__format[i].color) 472 - ret = perf_hpp__format[i].color(hpp, he); 474 + if (color && fmt->color) 475 + ret = fmt->color(hpp, he); 473 476 else 474 - ret = perf_hpp__format[i].entry(hpp, he); 477 + ret = fmt->entry(hpp, he); 475 478 476 479 advance_hpp(hpp, ret); 477 480 } ··· 505 504 */ 506 505 unsigned int hists__sort_list_width(struct hists *hists) 507 506 { 507 + struct perf_hpp_fmt *fmt; 508 508 struct sort_entry *se; 509 - int i, ret = 0; 509 + int i = 0, ret = 0; 510 510 511 - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 512 - if (!perf_hpp__format[i].cond) 513 - continue; 511 + perf_hpp__for_each_format(fmt) { 514 512 if (i) 515 513 ret += 2; 516 514 517 - ret += perf_hpp__format[i].width(NULL); 515 + ret += fmt->width(NULL); 518 516 } 519 517 520 518 list_for_each_entry(se, &hist_entry__sort_list, list)
+1
tools/perf/ui/setup.c
··· 30 30 if (fallback_to_pager) 31 31 setup_pager(); 32 32 33 + perf_hpp__column_enable(PERF_HPP__OVERHEAD); 33 34 perf_hpp__init(); 34 35 break; 35 36 }
+7 -10
tools/perf/ui/stdio/hist.c
··· 335 335 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, 336 336 int max_cols, FILE *fp) 337 337 { 338 + struct perf_hpp_fmt *fmt; 338 339 struct sort_entry *se; 339 340 struct rb_node *nd; 340 341 size_t ret = 0; 341 342 unsigned int width; 342 343 const char *sep = symbol_conf.field_sep; 343 344 const char *col_width = symbol_conf.col_width_list_str; 344 - int idx, nr_rows = 0; 345 + int nr_rows = 0; 345 346 char bf[96]; 346 347 struct perf_hpp dummy_hpp = { 347 348 .buf = bf, ··· 356 355 goto print_entries; 357 356 358 357 fprintf(fp, "# "); 359 - for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { 360 - if (!perf_hpp__format[idx].cond) 361 - continue; 362 358 359 + perf_hpp__for_each_format(fmt) { 363 360 if (!first) 364 361 fprintf(fp, "%s", sep ?: " "); 365 362 else 366 363 first = false; 367 364 368 - perf_hpp__format[idx].header(&dummy_hpp); 365 + fmt->header(&dummy_hpp); 369 366 fprintf(fp, "%s", bf); 370 367 } 371 368 ··· 399 400 first = true; 400 401 401 402 fprintf(fp, "# "); 402 - for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { 403 - unsigned int i; 404 403 405 - if (!perf_hpp__format[idx].cond) 406 - continue; 404 + perf_hpp__for_each_format(fmt) { 405 + unsigned int i; 407 406 408 407 if (!first) 409 408 fprintf(fp, "%s", sep ?: " "); 410 409 else 411 410 first = false; 412 411 413 - width = perf_hpp__format[idx].width(&dummy_hpp); 412 + width = fmt->width(&dummy_hpp); 414 413 for (i = 0; i < width; i++) 415 414 fprintf(fp, "."); 416 415 }
+15 -14
tools/perf/ui/tui/helpline.c
··· 8 8 #include "../ui.h" 9 9 #include "../libslang.h" 10 10 11 + char ui_helpline__last_msg[1024]; 12 + 11 13 static void tui_helpline__pop(void) 12 14 { 13 15 } ··· 25 23 strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; 26 24 } 27 25 28 - struct ui_helpline tui_helpline_fns = { 29 - .pop = tui_helpline__pop, 30 - .push = tui_helpline__push, 31 - }; 32 - 33 - void ui_helpline__init(void) 34 - { 35 - helpline_fns = &tui_helpline_fns; 36 - ui_helpline__puts(" "); 37 - } 38 - 39 - char ui_helpline__last_msg[1024]; 40 - 41 - int ui_helpline__show_help(const char *format, va_list ap) 26 + static int tui_helpline__show(const char *format, va_list ap) 42 27 { 43 28 int ret; 44 29 static int backlog; ··· 43 54 pthread_mutex_unlock(&ui__lock); 44 55 45 56 return ret; 57 + } 58 + 59 + struct ui_helpline tui_helpline_fns = { 60 + .pop = tui_helpline__pop, 61 + .push = tui_helpline__push, 62 + .show = tui_helpline__show, 63 + }; 64 + 65 + void ui_helpline__init(void) 66 + { 67 + helpline_fns = &tui_helpline_fns; 68 + ui_helpline__puts(" "); 46 69 }
+10
tools/perf/ui/util.c
··· 52 52 return ret; 53 53 } 54 54 55 + int ui__error_paranoid(void) 56 + { 57 + return ui__error("Permission error - are you root?\n" 58 + "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" 59 + " -1 - Not paranoid at all\n" 60 + " 0 - Disallow raw tracepoint access for unpriv\n" 61 + " 1 - Disallow cpu events for unpriv\n" 62 + " 2 - Disallow kernel profiling for unpriv\n"); 63 + } 64 + 55 65 56 66 /** 57 67 * perf_error__register - Register error logging functions
+5
tools/perf/util/callchain.h
··· 143 143 cursor->curr = cursor->curr->next; 144 144 cursor->pos++; 145 145 } 146 + 147 + struct option; 148 + 149 + int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset); 150 + extern const char record_callchain_help[]; 146 151 #endif /* __PERF_CALLCHAIN_H */
+2 -26
tools/perf/util/debug.c
··· 23 23 24 24 if (verbose >= level) { 25 25 va_start(args, fmt); 26 - if (use_browser == 1) 27 - ret = ui_helpline__show_help(fmt, args); 28 - else if (use_browser == 2) 29 - ret = perf_gtk__show_helpline(fmt, args); 26 + if (use_browser >= 1) 27 + ui_helpline__vshow(fmt, args); 30 28 else 31 29 ret = vfprintf(stderr, fmt, args); 32 30 va_end(args); ··· 45 47 } 46 48 47 49 return ret; 48 - } 49 - 50 - #if !defined(NEWT_SUPPORT) && !defined(GTK2_SUPPORT) 51 - int ui__warning(const char *format, ...) 52 - { 53 - va_list args; 54 - 55 - va_start(args, format); 56 - vfprintf(stderr, format, args); 57 - va_end(args); 58 - return 0; 59 - } 60 - #endif 61 - 62 - int ui__error_paranoid(void) 63 - { 64 - return ui__error("Permission error - are you root?\n" 65 - "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" 66 - " -1 - Not paranoid at all\n" 67 - " 0 - Disallow raw tracepoint access for unpriv\n" 68 - " 1 - Disallow cpu events for unpriv\n" 69 - " 2 - Disallow kernel profiling for unpriv\n"); 70 50 } 71 51 72 52 void trace_event(union perf_event *event)
+2 -31
tools/perf/util/debug.h
··· 5 5 #include <stdbool.h> 6 6 #include "event.h" 7 7 #include "../ui/helpline.h" 8 + #include "../ui/progress.h" 9 + #include "../ui/util.h" 8 10 9 11 extern int verbose; 10 12 extern bool quiet, dump_trace; ··· 14 12 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); 15 13 void trace_event(union perf_event *event); 16 14 17 - struct ui_progress; 18 - struct perf_error_ops; 19 - 20 - #if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT) 21 - 22 - #include "../ui/progress.h" 23 15 int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); 24 - #include "../ui/util.h" 25 - 26 - #else 27 - 28 - static inline void ui_progress__update(u64 curr __maybe_unused, 29 - u64 total __maybe_unused, 30 - const char *title __maybe_unused) {} 31 - static inline void ui_progress__finish(void) {} 32 - 33 - #define ui__error(format, arg...) ui__warning(format, ##arg) 34 - 35 - static inline int 36 - perf_error__register(struct perf_error_ops *eops __maybe_unused) 37 - { 38 - return 0; 39 - } 40 - 41 - static inline int 42 - perf_error__unregister(struct perf_error_ops *eops __maybe_unused) 43 - { 44 - return 0; 45 - } 46 - 47 - #endif /* NEWT_SUPPORT || GTK2_SUPPORT */ 48 - 49 16 int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); 50 17 int ui__error_paranoid(void); 51 18
+3 -3
tools/perf/util/dso.c
··· 539 539 } 540 540 541 541 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 542 - bool with_hits) 542 + bool (skip)(struct dso *dso, int parm), int parm) 543 543 { 544 544 struct dso *pos; 545 545 size_t ret = 0; 546 546 547 547 list_for_each_entry(pos, head, node) { 548 - if (with_hits && !pos->hit) 548 + if (skip && skip(pos, parm)) 549 549 continue; 550 550 ret += dso__fprintf_buildid(pos, fp); 551 551 ret += fprintf(fp, " %s\n", pos->long_name); ··· 583 583 if (dso->short_name != dso->long_name) 584 584 ret += fprintf(fp, "%s, ", dso->long_name); 585 585 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 586 - dso->loaded ? "" : "NOT "); 586 + dso__loaded(dso, type) ? "" : "NOT "); 587 587 ret += dso__fprintf_buildid(dso, fp); 588 588 ret += fprintf(fp, ")\n"); 589 589 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
+1 -1
tools/perf/util/dso.h
··· 138 138 bool __dsos__read_build_ids(struct list_head *head, bool with_hits); 139 139 140 140 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 141 - bool with_hits); 141 + bool (skip)(struct dso *dso, int parm), int parm); 142 142 size_t __dsos__fprintf(struct list_head *head, FILE *fp); 143 143 144 144 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
+11 -6
tools/perf/util/evlist.c
··· 49 49 return evlist; 50 50 } 51 51 52 - void perf_evlist__config_attrs(struct perf_evlist *evlist, 53 - struct perf_record_opts *opts) 52 + void perf_evlist__config(struct perf_evlist *evlist, 53 + struct perf_record_opts *opts) 54 54 { 55 55 struct perf_evsel *evsel; 56 + /* 57 + * Set the evsel leader links before we configure attributes, 58 + * since some might depend on this info. 59 + */ 60 + if (opts->group) 61 + perf_evlist__set_leader(evlist); 56 62 57 63 if (evlist->cpus->map[0] < 0) 58 64 opts->no_inherit = true; ··· 67 61 perf_evsel__config(evsel, opts); 68 62 69 63 if (evlist->nr_entries > 1) 70 - evsel->attr.sample_type |= PERF_SAMPLE_ID; 64 + perf_evsel__set_sample_id(evsel); 71 65 } 72 66 } 73 67 ··· 117 111 struct perf_evsel *evsel, *leader; 118 112 119 113 leader = list_entry(list->next, struct perf_evsel, node); 120 - leader->leader = NULL; 121 114 122 115 list_for_each_entry(evsel, list, node) { 123 116 if (evsel != leader) ··· 227 222 228 223 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 229 224 list_for_each_entry(pos, &evlist->entries, node) { 230 - if (perf_evsel__is_group_member(pos)) 225 + if (!perf_evsel__is_group_leader(pos)) 231 226 continue; 232 227 for (thread = 0; thread < evlist->threads->nr; thread++) 233 228 ioctl(FD(pos, cpu, thread), ··· 243 238 244 239 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 245 240 list_for_each_entry(pos, &evlist->entries, node) { 246 - if (perf_evsel__is_group_member(pos)) 241 + if (!perf_evsel__is_group_leader(pos)) 247 242 continue; 248 243 for (thread = 0; thread < evlist->threads->nr; thread++) 249 244 ioctl(FD(pos, cpu, thread),
+2 -2
tools/perf/util/evlist.h
··· 76 76 77 77 int perf_evlist__open(struct perf_evlist *evlist); 78 78 79 - void perf_evlist__config_attrs(struct perf_evlist *evlist, 80 - struct perf_record_opts *opts); 79 + void perf_evlist__config(struct perf_evlist *evlist, 80 + struct perf_record_opts *opts); 81 81 82 82 int perf_evlist__prepare_workload(struct perf_evlist *evlist, 83 83 struct perf_record_opts *opts,
+167 -19
tools/perf/util/evsel.c
··· 50 50 pthread_mutex_init(&hists->lock, NULL); 51 51 } 52 52 53 + void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, 54 + enum perf_event_sample_format bit) 55 + { 56 + if (!(evsel->attr.sample_type & bit)) { 57 + evsel->attr.sample_type |= bit; 58 + evsel->sample_size += sizeof(u64); 59 + } 60 + } 61 + 62 + void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, 63 + enum perf_event_sample_format bit) 64 + { 65 + if (evsel->attr.sample_type & bit) { 66 + evsel->attr.sample_type &= ~bit; 67 + evsel->sample_size -= sizeof(u64); 68 + } 69 + } 70 + 71 + void perf_evsel__set_sample_id(struct perf_evsel *evsel) 72 + { 73 + perf_evsel__set_sample_bit(evsel, ID); 74 + evsel->attr.read_format |= PERF_FORMAT_ID; 75 + } 76 + 53 77 void perf_evsel__init(struct perf_evsel *evsel, 54 78 struct perf_event_attr *attr, int idx) 55 79 { 56 80 evsel->idx = idx; 57 81 evsel->attr = *attr; 82 + evsel->leader = evsel; 58 83 INIT_LIST_HEAD(&evsel->node); 59 84 hists__init(&evsel->hists); 60 85 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); ··· 465 440 466 441 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; 467 442 attr->inherit = !opts->no_inherit; 468 - attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 469 - PERF_FORMAT_TOTAL_TIME_RUNNING | 470 - PERF_FORMAT_ID; 471 443 472 - attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; 444 + perf_evsel__set_sample_bit(evsel, IP); 445 + perf_evsel__set_sample_bit(evsel, TID); 473 446 474 447 /* 475 448 * We default some events to a 1 default interval. But keep ··· 476 453 if (!attr->sample_period || (opts->user_freq != UINT_MAX && 477 454 opts->user_interval != ULLONG_MAX)) { 478 455 if (opts->freq) { 479 - attr->sample_type |= PERF_SAMPLE_PERIOD; 456 + perf_evsel__set_sample_bit(evsel, PERIOD); 480 457 attr->freq = 1; 481 458 attr->sample_freq = opts->freq; 482 459 } else { ··· 491 468 attr->inherit_stat = 1; 492 469 493 470 if (opts->sample_address) { 494 - attr->sample_type |= PERF_SAMPLE_ADDR; 471 + perf_evsel__set_sample_bit(evsel, ADDR); 495 472 attr->mmap_data = track; 496 473 } 497 474 498 475 if (opts->call_graph) { 499 - attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 476 + perf_evsel__set_sample_bit(evsel, CALLCHAIN); 500 477 501 478 if (opts->call_graph == CALLCHAIN_DWARF) { 502 - attr->sample_type |= PERF_SAMPLE_REGS_USER | 503 - PERF_SAMPLE_STACK_USER; 479 + perf_evsel__set_sample_bit(evsel, REGS_USER); 480 + perf_evsel__set_sample_bit(evsel, STACK_USER); 504 481 attr->sample_regs_user = PERF_REGS_MASK; 505 482 attr->sample_stack_user = opts->stack_dump_size; 506 483 attr->exclude_callchain_user = 1; ··· 508 485 } 509 486 510 487 if (perf_target__has_cpu(&opts->target)) 511 - attr->sample_type |= PERF_SAMPLE_CPU; 488 + perf_evsel__set_sample_bit(evsel, CPU); 512 489 513 490 if (opts->period) 514 - attr->sample_type |= PERF_SAMPLE_PERIOD; 491 + perf_evsel__set_sample_bit(evsel, PERIOD); 515 492 516 493 if (!opts->sample_id_all_missing && 517 494 (opts->sample_time || !opts->no_inherit || 518 495 perf_target__has_cpu(&opts->target))) 519 - attr->sample_type |= PERF_SAMPLE_TIME; 496 + perf_evsel__set_sample_bit(evsel, TIME); 520 497 521 498 if (opts->raw_samples) { 522 - attr->sample_type |= PERF_SAMPLE_TIME; 523 - attr->sample_type |= PERF_SAMPLE_RAW; 524 - attr->sample_type |= PERF_SAMPLE_CPU; 499 + perf_evsel__set_sample_bit(evsel, TIME); 500 + perf_evsel__set_sample_bit(evsel, RAW); 501 + perf_evsel__set_sample_bit(evsel, CPU); 525 502 } 526 503 527 504 if (opts->no_delay) { ··· 529 506 attr->wakeup_events = 1; 530 507 } 531 508 if (opts->branch_stack) { 532 - attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; 509 + perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 533 510 attr->branch_sample_type = opts->branch_stack; 534 511 } 535 512 ··· 542 519 * Disabling only independent events or group leaders, 543 520 * keeping group members enabled. 544 521 */ 545 - if (!perf_evsel__is_group_member(evsel)) 522 + if (perf_evsel__is_group_leader(evsel)) 546 523 attr->disabled = 1; 547 524 548 525 /* 549 526 * Setting enable_on_exec for independent events and 550 527 * group leaders for traced executed by perf. 551 528 */ 552 - if (perf_target__none(&opts->target) && !perf_evsel__is_group_member(evsel)) 529 + if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) 553 530 attr->enable_on_exec = 1; 554 531 } 555 532 ··· 730 707 struct perf_evsel *leader = evsel->leader; 731 708 int fd; 732 709 733 - if (!perf_evsel__is_group_member(evsel)) 710 + if (perf_evsel__is_group_leader(evsel)) 734 711 return -1; 735 712 736 713 /* ··· 1227 1204 } 1228 1205 1229 1206 return 0; 1207 + } 1208 + 1209 + static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) 1210 + { 1211 + va_list args; 1212 + int ret = 0; 1213 + 1214 + if (!*first) { 1215 + ret += fprintf(fp, ","); 1216 + } else { 1217 + ret += fprintf(fp, ":"); 1218 + *first = false; 1219 + } 1220 + 1221 + va_start(args, fmt); 1222 + ret += vfprintf(fp, fmt, args); 1223 + va_end(args); 1224 + return ret; 1225 + } 1226 + 1227 + static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value) 1228 + { 1229 + if (value == 0) 1230 + return 0; 1231 + 1232 + return comma_fprintf(fp, first, " %s: %" PRIu64, field, value); 1233 + } 1234 + 1235 + #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field) 1236 + 1237 + struct bit_names { 1238 + int bit; 1239 + const char *name; 1240 + }; 1241 + 1242 + static int bits__fprintf(FILE *fp, const char *field, u64 value, 1243 + struct bit_names *bits, bool *first) 1244 + { 1245 + int i = 0, printed = comma_fprintf(fp, first, " %s: ", field); 1246 + bool first_bit = true; 1247 + 1248 + do { 1249 + if (value & bits[i].bit) { 1250 + printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name); 1251 + first_bit = false; 1252 + } 1253 + } while (bits[++i].name != NULL); 1254 + 1255 + return printed; 1256 + } 1257 + 1258 + static int sample_type__fprintf(FILE *fp, bool *first, u64 value) 1259 + { 1260 + #define bit_name(n) { PERF_SAMPLE_##n, #n } 1261 + struct bit_names bits[] = { 1262 + bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), 1263 + bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), 1264 + bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), 1265 + bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), 1266 + { .name = NULL, } 1267 + }; 1268 + #undef bit_name 1269 + return bits__fprintf(fp, "sample_type", value, bits, first); 1270 + } 1271 + 1272 + static int read_format__fprintf(FILE *fp, bool *first, u64 value) 1273 + { 1274 + #define bit_name(n) { PERF_FORMAT_##n, #n } 1275 + struct bit_names bits[] = { 1276 + bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), 1277 + bit_name(ID), bit_name(GROUP), 1278 + { .name = NULL, } 1279 + }; 1280 + #undef bit_name 1281 + return bits__fprintf(fp, "read_format", value, bits, first); 1282 + } 1283 + 1284 + int perf_evsel__fprintf(struct perf_evsel *evsel, 1285 + struct perf_attr_details *details, FILE *fp) 1286 + { 1287 + bool first = true; 1288 + int printed = fprintf(fp, "%s", perf_evsel__name(evsel)); 1289 + 1290 + if (details->verbose || details->freq) { 1291 + printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, 1292 + (u64)evsel->attr.sample_freq); 1293 + } 1294 + 1295 + if (details->verbose) { 1296 + if_print(type); 1297 + if_print(config); 1298 + if_print(config1); 1299 + if_print(config2); 1300 + if_print(size); 1301 + printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type); 1302 + if (evsel->attr.read_format) 1303 + printed += read_format__fprintf(fp, &first, evsel->attr.read_format); 1304 + if_print(disabled); 1305 + if_print(inherit); 1306 + if_print(pinned); 1307 + if_print(exclusive); 1308 + if_print(exclude_user); 1309 + if_print(exclude_kernel); 1310 + if_print(exclude_hv); 1311 + if_print(exclude_idle); 1312 + if_print(mmap); 1313 + if_print(comm); 1314 + if_print(freq); 1315 + if_print(inherit_stat); 1316 + if_print(enable_on_exec); 1317 + if_print(task); 1318 + if_print(watermark); 1319 + if_print(precise_ip); 1320 + if_print(mmap_data); 1321 + if_print(sample_id_all); 1322 + if_print(exclude_host); 1323 + if_print(exclude_guest); 1324 + if_print(__reserved_1); 1325 + if_print(wakeup_events); 1326 + if_print(bp_type); 1327 + if_print(branch_sample_type); 1328 + } 1329 + 1330 + fputc('\n', fp); 1331 + return ++printed; 1230 1332 }
+23 -2
tools/perf/util/evsel.h
··· 118 118 void perf_evsel__free_id(struct perf_evsel *evsel); 119 119 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 120 120 121 + void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, 122 + enum perf_event_sample_format bit); 123 + void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, 124 + enum perf_event_sample_format bit); 125 + 126 + #define perf_evsel__set_sample_bit(evsel, bit) \ 127 + __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit) 128 + 129 + #define perf_evsel__reset_sample_bit(evsel, bit) \ 130 + __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit) 131 + 132 + void perf_evsel__set_sample_id(struct perf_evsel *evsel); 133 + 121 134 int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, 122 135 const char *filter); 123 136 ··· 239 226 return list_entry(evsel->node.next, struct perf_evsel, node); 240 227 } 241 228 242 - static inline bool perf_evsel__is_group_member(const struct perf_evsel *evsel) 229 + static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel) 243 230 { 244 - return evsel->leader != NULL; 231 + return evsel->leader == evsel; 245 232 } 233 + 234 + struct perf_attr_details { 235 + bool freq; 236 + bool verbose; 237 + }; 238 + 239 + int perf_evsel__fprintf(struct perf_evsel *evsel, 240 + struct perf_attr_details *details, FILE *fp); 246 241 #endif /* __PERF_EVSEL_H */
+2 -2
tools/perf/util/hist.c
··· 785 785 pair = hists__find_entry(other, pos); 786 786 787 787 if (pair) 788 - hist__entry_add_pair(pos, pair); 788 + hist_entry__add_pair(pair, pos); 789 789 } 790 790 } 791 791 ··· 806 806 pair = hists__add_dummy_entry(leader, pos); 807 807 if (pair == NULL) 808 808 return -1; 809 - hist__entry_add_pair(pair, pos); 809 + hist_entry__add_pair(pos, pair); 810 810 } 811 811 } 812 812
+15 -7
tools/perf/util/hist.h
··· 126 126 }; 127 127 128 128 struct perf_hpp_fmt { 129 - bool cond; 130 129 int (*header)(struct perf_hpp *hpp); 131 130 int (*width)(struct perf_hpp *hpp); 132 131 int (*color)(struct perf_hpp *hpp, struct hist_entry *he); 133 132 int (*entry)(struct perf_hpp *hpp, struct hist_entry *he); 133 + 134 + struct list_head list; 134 135 }; 136 + 137 + extern struct list_head perf_hpp__list; 138 + 139 + #define perf_hpp__for_each_format(format) \ 140 + list_for_each_entry(format, &perf_hpp__list, list) 135 141 136 142 extern struct perf_hpp_fmt perf_hpp__format[]; 137 143 ··· 154 148 PERF_HPP__DELTA, 155 149 PERF_HPP__RATIO, 156 150 PERF_HPP__WEIGHTED_DIFF, 157 - PERF_HPP__DISPL, 158 151 PERF_HPP__FORMULA, 159 152 160 153 PERF_HPP__MAX_INDEX 161 154 }; 162 155 163 156 void perf_hpp__init(void); 164 - void perf_hpp__column_enable(unsigned col, bool enable); 157 + void perf_hpp__column_register(struct perf_hpp_fmt *format); 158 + void perf_hpp__column_enable(unsigned col); 165 159 int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, 166 160 bool color); 167 161 ··· 225 219 226 220 unsigned int hists__sort_list_width(struct hists *self); 227 221 228 - double perf_diff__compute_delta(struct hist_entry *he); 229 - double perf_diff__compute_ratio(struct hist_entry *he); 230 - s64 perf_diff__compute_wdiff(struct hist_entry *he); 231 - int perf_diff__formula(char *buf, size_t size, struct hist_entry *he); 222 + double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair); 223 + double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair); 224 + s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair); 225 + int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair, 226 + char *buf, size_t size); 227 + double perf_diff__period_percent(struct hist_entry *he, u64 period); 232 228 #endif /* __PERF_HIST_H */
+742
tools/perf/util/machine.c
··· 1 + #include "callchain.h" 1 2 #include "debug.h" 2 3 #include "event.h" 4 + #include "evsel.h" 5 + #include "hist.h" 3 6 #include "machine.h" 4 7 #include "map.h" 8 + #include "sort.h" 5 9 #include "strlist.h" 6 10 #include "thread.h" 7 11 #include <stdbool.h> 12 + #include "unwind.h" 8 13 9 14 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 10 15 { ··· 50 45 list_for_each_entry_safe(pos, n, dsos, node) { 51 46 list_del(&pos->node); 52 47 dso__delete(pos); 48 + } 49 + } 50 + 51 + void machine__delete_dead_threads(struct machine *machine) 52 + { 53 + struct thread *n, *t; 54 + 55 + list_for_each_entry_safe(t, n, &machine->dead_threads, node) { 56 + list_del(&t->node); 57 + thread__delete(t); 58 + } 59 + } 60 + 61 + void machine__delete_threads(struct machine *machine) 62 + { 63 + struct rb_node *nd = rb_first(&machine->threads); 64 + 65 + while (nd) { 66 + struct thread *t = rb_entry(nd, struct thread, rb_node); 67 + 68 + rb_erase(&t->rb_node, &machine->threads); 69 + nd = rb_next(nd); 70 + thread__delete(t); 53 71 } 54 72 } 55 73 ··· 292 264 return 0; 293 265 } 294 266 267 + struct map *machine__new_module(struct machine *machine, u64 start, 268 + const char *filename) 269 + { 270 + struct map *map; 271 + struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); 272 + 273 + if (dso == NULL) 274 + return NULL; 275 + 276 + map = map__new2(start, dso, MAP__FUNCTION); 277 + if (map == NULL) 278 + return NULL; 279 + 280 + if (machine__is_host(machine)) 281 + dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 282 + else 283 + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 284 + map_groups__insert(&machine->kmaps, map); 285 + return map; 286 + } 287 + 288 + size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) 289 + { 290 + struct rb_node *nd; 291 + size_t ret = 0; 292 + 293 + for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 294 + struct machine *pos = rb_entry(nd, struct machine, rb_node); 295 + ret += __dsos__fprintf(&pos->kernel_dsos, fp); 296 + ret += __dsos__fprintf(&pos->user_dsos, fp); 297 + } 298 + 299 + return ret; 300 + } 301 + 302 + size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, 303 + bool (skip)(struct dso *dso, int parm), int parm) 304 + { 305 + return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) + 306 + __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); 307 + } 308 + 309 + size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, 310 + bool (skip)(struct dso *dso, int parm), int parm) 311 + { 312 + struct rb_node *nd; 313 + size_t ret = 0; 314 + 315 + for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 316 + struct machine *pos = rb_entry(nd, struct machine, rb_node); 317 + ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 318 + } 319 + return ret; 320 + } 321 + 322 + size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 323 + { 324 + int i; 325 + size_t printed = 0; 326 + struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; 327 + 328 + if (kdso->has_build_id) { 329 + char filename[PATH_MAX]; 330 + if (dso__build_id_filename(kdso, filename, sizeof(filename))) 331 + printed += fprintf(fp, "[0] %s\n", filename); 332 + } 333 + 334 + for (i = 0; i < vmlinux_path__nr_entries; ++i) 335 + printed += fprintf(fp, "[%d] %s\n", 336 + i + kdso->has_build_id, vmlinux_path[i]); 337 + 338 + return printed; 339 + } 340 + 341 + size_t machine__fprintf(struct machine *machine, FILE *fp) 342 + { 343 + size_t ret = 0; 344 + struct rb_node *nd; 345 + 346 + for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 347 + struct thread *pos = rb_entry(nd, struct thread, rb_node); 348 + 349 + ret += thread__fprintf(pos, fp); 350 + } 351 + 352 + return ret; 353 + } 354 + 355 + static struct dso *machine__get_kernel(struct machine *machine) 356 + { 357 + const char *vmlinux_name = NULL; 358 + struct dso *kernel; 359 + 360 + if (machine__is_host(machine)) { 361 + vmlinux_name = symbol_conf.vmlinux_name; 362 + if (!vmlinux_name) 363 + vmlinux_name = "[kernel.kallsyms]"; 364 + 365 + kernel = dso__kernel_findnew(machine, vmlinux_name, 366 + "[kernel]", 367 + DSO_TYPE_KERNEL); 368 + } else { 369 + char bf[PATH_MAX]; 370 + 371 + if (machine__is_default_guest(machine)) 372 + vmlinux_name = symbol_conf.default_guest_vmlinux_name; 373 + if (!vmlinux_name) 374 + vmlinux_name = machine__mmap_name(machine, bf, 375 + sizeof(bf)); 376 + 377 + kernel = dso__kernel_findnew(machine, vmlinux_name, 378 + "[guest.kernel]", 379 + DSO_TYPE_GUEST_KERNEL); 380 + } 381 + 382 + if (kernel != NULL && (!kernel->has_build_id)) 383 + dso__read_running_kernel_build_id(kernel, machine); 384 + 385 + return kernel; 386 + } 387 + 388 + struct process_args { 389 + u64 start; 390 + }; 391 + 392 + static int symbol__in_kernel(void *arg, const char *name, 393 + char type __maybe_unused, u64 start) 394 + { 395 + struct process_args *args = arg; 396 + 397 + if (strchr(name, '[')) 398 + return 0; 399 + 400 + args->start = start; 401 + return 1; 402 + } 403 + 404 + /* Figure out the start address of kernel map from /proc/kallsyms */ 405 + static u64 machine__get_kernel_start_addr(struct machine *machine) 406 + { 407 + const char *filename; 408 + char path[PATH_MAX]; 409 + struct process_args args; 410 + 411 + if (machine__is_host(machine)) { 412 + filename = "/proc/kallsyms"; 413 + } else { 414 + if (machine__is_default_guest(machine)) 415 + filename = (char *)symbol_conf.default_guest_kallsyms; 416 + else { 417 + sprintf(path, "%s/proc/kallsyms", machine->root_dir); 418 + filename = path; 419 + } 420 + } 421 + 422 + if (symbol__restricted_filename(filename, "/proc/kallsyms")) 423 + return 0; 424 + 425 + if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) 426 + return 0; 427 + 428 + return args.start; 429 + } 430 + 431 + int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 432 + { 433 + enum map_type type; 434 + u64 start = machine__get_kernel_start_addr(machine); 435 + 436 + for (type = 0; type < MAP__NR_TYPES; ++type) { 437 + struct kmap *kmap; 438 + 439 + machine->vmlinux_maps[type] = map__new2(start, kernel, type); 440 + if (machine->vmlinux_maps[type] == NULL) 441 + return -1; 442 + 443 + machine->vmlinux_maps[type]->map_ip = 444 + machine->vmlinux_maps[type]->unmap_ip = 445 + identity__map_ip; 446 + kmap = map__kmap(machine->vmlinux_maps[type]); 447 + kmap->kmaps = &machine->kmaps; 448 + map_groups__insert(&machine->kmaps, 449 + machine->vmlinux_maps[type]); 450 + } 451 + 452 + return 0; 453 + } 454 + 455 + void machine__destroy_kernel_maps(struct machine *machine) 456 + { 457 + enum map_type type; 458 + 459 + for (type = 0; type < MAP__NR_TYPES; ++type) { 460 + struct kmap *kmap; 461 + 462 + if (machine->vmlinux_maps[type] == NULL) 463 + continue; 464 + 465 + kmap = map__kmap(machine->vmlinux_maps[type]); 466 + map_groups__remove(&machine->kmaps, 467 + machine->vmlinux_maps[type]); 468 + if (kmap->ref_reloc_sym) { 469 + /* 470 + * ref_reloc_sym is shared among all maps, so free just 471 + * on one of them. 472 + */ 473 + if (type == MAP__FUNCTION) { 474 + free((char *)kmap->ref_reloc_sym->name); 475 + kmap->ref_reloc_sym->name = NULL; 476 + free(kmap->ref_reloc_sym); 477 + } 478 + kmap->ref_reloc_sym = NULL; 479 + } 480 + 481 + map__delete(machine->vmlinux_maps[type]); 482 + machine->vmlinux_maps[type] = NULL; 483 + } 484 + } 485 + 486 + int machines__create_guest_kernel_maps(struct rb_root *machines) 487 + { 488 + int ret = 0; 489 + struct dirent **namelist = NULL; 490 + int i, items = 0; 491 + char path[PATH_MAX]; 492 + pid_t pid; 493 + char *endp; 494 + 495 + if (symbol_conf.default_guest_vmlinux_name || 496 + symbol_conf.default_guest_modules || 497 + symbol_conf.default_guest_kallsyms) { 498 + machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 499 + } 500 + 501 + if (symbol_conf.guestmount) { 502 + items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 503 + if (items <= 0) 504 + return -ENOENT; 505 + for (i = 0; i < items; i++) { 506 + if (!isdigit(namelist[i]->d_name[0])) { 507 + /* Filter out . and .. */ 508 + continue; 509 + } 510 + pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 511 + if ((*endp != '\0') || 512 + (endp == namelist[i]->d_name) || 513 + (errno == ERANGE)) { 514 + pr_debug("invalid directory (%s). Skipping.\n", 515 + namelist[i]->d_name); 516 + continue; 517 + } 518 + sprintf(path, "%s/%s/proc/kallsyms", 519 + symbol_conf.guestmount, 520 + namelist[i]->d_name); 521 + ret = access(path, R_OK); 522 + if (ret) { 523 + pr_debug("Can't access file %s\n", path); 524 + goto failure; 525 + } 526 + machines__create_kernel_maps(machines, pid); 527 + } 528 + failure: 529 + free(namelist); 530 + } 531 + 532 + return ret; 533 + } 534 + 535 + void machines__destroy_guest_kernel_maps(struct rb_root *machines) 536 + { 537 + struct rb_node *next = rb_first(machines); 538 + 539 + while (next) { 540 + struct machine *pos = rb_entry(next, struct machine, rb_node); 541 + 542 + next = rb_next(&pos->rb_node); 543 + rb_erase(&pos->rb_node, machines); 544 + machine__delete(pos); 545 + } 546 + } 547 + 548 + int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) 549 + { 550 + struct machine *machine = machines__findnew(machines, pid); 551 + 552 + if (machine == NULL) 553 + return -1; 554 + 555 + return machine__create_kernel_maps(machine); 556 + } 557 + 558 + int machine__load_kallsyms(struct machine *machine, const char *filename, 559 + enum map_type type, symbol_filter_t filter) 560 + { 561 + struct map *map = machine->vmlinux_maps[type]; 562 + int ret = dso__load_kallsyms(map->dso, filename, map, filter); 563 + 564 + if (ret > 0) { 565 + dso__set_loaded(map->dso, type); 566 + /* 567 + * Since /proc/kallsyms will have multiple sessions for the 568 + * kernel, with modules between them, fixup the end of all 569 + * sections. 570 + */ 571 + __map_groups__fixup_end(&machine->kmaps, type); 572 + } 573 + 574 + return ret; 575 + } 576 + 577 + int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 578 + symbol_filter_t filter) 579 + { 580 + struct map *map = machine->vmlinux_maps[type]; 581 + int ret = dso__load_vmlinux_path(map->dso, map, filter); 582 + 583 + if (ret > 0) { 584 + dso__set_loaded(map->dso, type); 585 + map__reloc_vmlinux(map); 586 + } 587 + 588 + return ret; 589 + } 590 + 591 + static void map_groups__fixup_end(struct map_groups *mg) 592 + { 593 + int i; 594 + for (i = 0; i < MAP__NR_TYPES; ++i) 595 + __map_groups__fixup_end(mg, i); 596 + } 597 + 598 + static char *get_kernel_version(const char *root_dir) 599 + { 600 + char version[PATH_MAX]; 601 + FILE *file; 602 + char *name, *tmp; 603 + const char *prefix = "Linux version "; 604 + 605 + sprintf(version, "%s/proc/version", root_dir); 606 + file = fopen(version, "r"); 607 + if (!file) 608 + return NULL; 609 + 610 + version[0] = '\0'; 611 + tmp = fgets(version, sizeof(version), file); 612 + fclose(file); 613 + 614 + name = strstr(version, prefix); 615 + if (!name) 616 + return NULL; 617 + name += strlen(prefix); 618 + tmp = strchr(name, ' '); 619 + if (tmp) 620 + *tmp = '\0'; 621 + 622 + return strdup(name); 623 + } 624 + 625 + static int map_groups__set_modules_path_dir(struct map_groups *mg, 626 + const char *dir_name) 627 + { 628 + struct dirent *dent; 629 + DIR *dir = opendir(dir_name); 630 + int ret = 0; 631 + 632 + if (!dir) { 633 + pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 634 + return -1; 635 + } 636 + 637 + while ((dent = readdir(dir)) != NULL) { 638 + char path[PATH_MAX]; 639 + struct stat st; 640 + 641 + /*sshfs might return bad dent->d_type, so we have to stat*/ 642 + snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 643 + if (stat(path, &st)) 644 + continue; 645 + 646 + if (S_ISDIR(st.st_mode)) { 647 + if (!strcmp(dent->d_name, ".") || 648 + !strcmp(dent->d_name, "..")) 649 + continue; 650 + 651 + ret = map_groups__set_modules_path_dir(mg, path); 652 + if (ret < 0) 653 + goto out; 654 + } else { 655 + char *dot = strrchr(dent->d_name, '.'), 656 + dso_name[PATH_MAX]; 657 + struct map *map; 658 + char *long_name; 659 + 660 + if (dot == NULL || strcmp(dot, ".ko")) 661 + continue; 662 + snprintf(dso_name, sizeof(dso_name), "[%.*s]", 663 + (int)(dot - dent->d_name), dent->d_name); 664 + 665 + strxfrchar(dso_name, '-', '_'); 666 + map = map_groups__find_by_name(mg, MAP__FUNCTION, 667 + dso_name); 668 + if (map == NULL) 669 + continue; 670 + 671 + long_name = strdup(path); 672 + if (long_name == NULL) { 673 + ret = -1; 674 + goto out; 675 + } 676 + dso__set_long_name(map->dso, long_name); 677 + map->dso->lname_alloc = 1; 678 + dso__kernel_module_get_build_id(map->dso, ""); 679 + } 680 + } 681 + 682 + out: 683 + closedir(dir); 684 + return ret; 685 + } 686 + 687 + static int machine__set_modules_path(struct machine *machine) 688 + { 689 + char *version; 690 + char modules_path[PATH_MAX]; 691 + 692 + version = get_kernel_version(machine->root_dir); 693 + if (!version) 694 + return -1; 695 + 696 + snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", 697 + machine->root_dir, version); 698 + free(version); 699 + 700 + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); 701 + } 702 + 703 + static int machine__create_modules(struct machine *machine) 704 + { 705 + char *line = NULL; 706 + size_t n; 707 + FILE *file; 708 + struct map *map; 709 + const char *modules; 710 + char path[PATH_MAX]; 711 + 712 + if (machine__is_default_guest(machine)) 713 + modules = symbol_conf.default_guest_modules; 714 + else { 715 + sprintf(path, "%s/proc/modules", machine->root_dir); 716 + modules = path; 717 + } 718 + 719 + if (symbol__restricted_filename(path, "/proc/modules")) 720 + return -1; 721 + 722 + file = fopen(modules, "r"); 723 + if (file == NULL) 724 + return -1; 725 + 726 + while (!feof(file)) { 727 + char name[PATH_MAX]; 728 + u64 start; 729 + char *sep; 730 + int line_len; 731 + 732 + line_len = getline(&line, &n, file); 733 + if (line_len < 0) 734 + break; 735 + 736 + if (!line) 737 + goto out_failure; 738 + 739 + line[--line_len] = '\0'; /* \n */ 740 + 741 + sep = strrchr(line, 'x'); 742 + if (sep == NULL) 743 + continue; 744 + 745 + hex2u64(sep + 1, &start); 746 + 747 + sep = strchr(line, ' '); 748 + if (sep == NULL) 749 + continue; 750 + 751 + *sep = '\0'; 752 + 753 + snprintf(name, sizeof(name), "[%s]", line); 754 + map = machine__new_module(machine, start, name); 755 + if (map == NULL) 756 + goto out_delete_line; 757 + dso__kernel_module_get_build_id(map->dso, machine->root_dir); 758 + } 759 + 760 + free(line); 761 + fclose(file); 762 + 763 + return machine__set_modules_path(machine); 764 + 765 + out_delete_line: 766 + free(line); 767 + out_failure: 768 + return -1; 769 + } 770 + 771 + int machine__create_kernel_maps(struct machine *machine) 772 + { 773 + struct dso *kernel = machine__get_kernel(machine); 774 + 775 + if (kernel == NULL || 776 + __machine__create_kernel_maps(machine, kernel) < 0) 777 + return -1; 778 + 779 + if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 780 + if (machine__is_host(machine)) 781 + pr_debug("Problems creating module maps, " 782 + "continuing anyway...\n"); 783 + else 784 + pr_debug("Problems creating module maps for guest %d, " 785 + "continuing anyway...\n", machine->pid); 786 + } 787 + 788 + /* 789 + * Now that we have all the maps created, just set the ->end of them: 790 + */ 791 + map_groups__fixup_end(&machine->kmaps); 792 + return 0; 793 + } 794 + 295 795 static void machine__set_kernel_mmap_len(struct machine *machine, 296 796 union perf_event *event) 297 797 { ··· 1017 461 } 1018 462 1019 463 return ret; 464 + } 465 + 466 + void machine__remove_thread(struct machine *machine, struct thread *th) 467 + { 468 + machine->last_match = NULL; 469 + rb_erase(&th->rb_node, &machine->threads); 470 + /* 471 + * We may have references to this thread, for instance in some hist_entry 472 + * instances, so just move them to a separate list. 473 + */ 474 + list_add_tail(&th->node, &machine->dead_threads); 475 + } 476 + 477 + static bool symbol__match_parent_regex(struct symbol *sym) 478 + { 479 + if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 480 + return 1; 481 + 482 + return 0; 483 + } 484 + 485 + static const u8 cpumodes[] = { 486 + PERF_RECORD_MISC_USER, 487 + PERF_RECORD_MISC_KERNEL, 488 + PERF_RECORD_MISC_GUEST_USER, 489 + PERF_RECORD_MISC_GUEST_KERNEL 490 + }; 491 + #define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) 492 + 493 + static void ip__resolve_ams(struct machine *machine, struct thread *thread, 494 + struct addr_map_symbol *ams, 495 + u64 ip) 496 + { 497 + struct addr_location al; 498 + size_t i; 499 + u8 m; 500 + 501 + memset(&al, 0, sizeof(al)); 502 + 503 + for (i = 0; i < NCPUMODES; i++) { 504 + m = cpumodes[i]; 505 + /* 506 + * We cannot use the header.misc hint to determine whether a 507 + * branch stack address is user, kernel, guest, hypervisor. 508 + * Branches may straddle the kernel/user/hypervisor boundaries. 509 + * Thus, we have to try consecutively until we find a match 510 + * or else, the symbol is unknown 511 + */ 512 + thread__find_addr_location(thread, machine, m, MAP__FUNCTION, 513 + ip, &al, NULL); 514 + if (al.sym) 515 + goto found; 516 + } 517 + found: 518 + ams->addr = ip; 519 + ams->al_addr = al.addr; 520 + ams->sym = al.sym; 521 + ams->map = al.map; 522 + } 523 + 524 + struct branch_info *machine__resolve_bstack(struct machine *machine, 525 + struct thread *thr, 526 + struct branch_stack *bs) 527 + { 528 + struct branch_info *bi; 529 + unsigned int i; 530 + 531 + bi = calloc(bs->nr, sizeof(struct branch_info)); 532 + if (!bi) 533 + return NULL; 534 + 535 + for (i = 0; i < bs->nr; i++) { 536 + ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to); 537 + ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from); 538 + bi[i].flags = bs->entries[i].flags; 539 + } 540 + return bi; 541 + } 542 + 543 + static int machine__resolve_callchain_sample(struct machine *machine, 544 + struct thread *thread, 545 + struct ip_callchain *chain, 546 + struct symbol **parent) 547 + 548 + { 549 + u8 cpumode = PERF_RECORD_MISC_USER; 550 + unsigned int i; 551 + int err; 552 + 553 + callchain_cursor_reset(&callchain_cursor); 554 + 555 + if (chain->nr > PERF_MAX_STACK_DEPTH) { 556 + pr_warning("corrupted callchain. skipping...\n"); 557 + return 0; 558 + } 559 + 560 + for (i = 0; i < chain->nr; i++) { 561 + u64 ip; 562 + struct addr_location al; 563 + 564 + if (callchain_param.order == ORDER_CALLEE) 565 + ip = chain->ips[i]; 566 + else 567 + ip = chain->ips[chain->nr - i - 1]; 568 + 569 + if (ip >= PERF_CONTEXT_MAX) { 570 + switch (ip) { 571 + case PERF_CONTEXT_HV: 572 + cpumode = PERF_RECORD_MISC_HYPERVISOR; 573 + break; 574 + case PERF_CONTEXT_KERNEL: 575 + cpumode = PERF_RECORD_MISC_KERNEL; 576 + break; 577 + case PERF_CONTEXT_USER: 578 + cpumode = PERF_RECORD_MISC_USER; 579 + break; 580 + default: 581 + pr_debug("invalid callchain context: " 582 + "%"PRId64"\n", (s64) ip); 583 + /* 584 + * It seems the callchain is corrupted. 585 + * Discard all. 586 + */ 587 + callchain_cursor_reset(&callchain_cursor); 588 + return 0; 589 + } 590 + continue; 591 + } 592 + 593 + al.filtered = false; 594 + thread__find_addr_location(thread, machine, cpumode, 595 + MAP__FUNCTION, ip, &al, NULL); 596 + if (al.sym != NULL) { 597 + if (sort__has_parent && !*parent && 598 + symbol__match_parent_regex(al.sym)) 599 + *parent = al.sym; 600 + if (!symbol_conf.use_callchain) 601 + break; 602 + } 603 + 604 + err = callchain_cursor_append(&callchain_cursor, 605 + ip, al.map, al.sym); 606 + if (err) 607 + return err; 608 + } 609 + 610 + return 0; 611 + } 612 + 613 + static int unwind_entry(struct unwind_entry *entry, void *arg) 614 + { 615 + struct callchain_cursor *cursor = arg; 616 + return callchain_cursor_append(cursor, entry->ip, 617 + entry->map, entry->sym); 618 + } 619 + 620 + int machine__resolve_callchain(struct machine *machine, 621 + struct perf_evsel *evsel, 622 + struct thread *thread, 623 + struct perf_sample *sample, 624 + struct symbol **parent) 625 + 626 + { 627 + int ret; 628 + 629 + callchain_cursor_reset(&callchain_cursor); 630 + 631 + ret = machine__resolve_callchain_sample(machine, thread, 632 + sample->callchain, parent); 633 + if (ret) 634 + return ret; 635 + 636 + /* Can we do dwarf post unwind? */ 637 + if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 638 + (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 639 + return 0; 640 + 641 + /* Bail out if nothing was captured. */ 642 + if ((!sample->user_regs.regs) || 643 + (!sample->user_stack.size)) 644 + return 0; 645 + 646 + return unwind__get_entries(unwind_entry, &callchain_cursor, machine, 647 + thread, evsel->attr.sample_regs_user, 648 + sample); 649 + 1020 650 }
+6 -5
tools/perf/util/machine.h
··· 61 61 62 62 int machine__init(struct machine *machine, const char *root_dir, pid_t pid); 63 63 void machine__exit(struct machine *machine); 64 + void machine__delete_dead_threads(struct machine *machine); 65 + void machine__delete_threads(struct machine *machine); 64 66 void machine__delete(struct machine *machine); 65 - 66 67 67 68 struct branch_info *machine__resolve_bstack(struct machine *machine, 68 69 struct thread *thread, ··· 130 129 int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 131 130 symbol_filter_t filter); 132 131 133 - size_t machine__fprintf_dsos_buildid(struct machine *machine, 134 - FILE *fp, bool with_hits); 132 + size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, 133 + bool (skip)(struct dso *dso, int parm), int parm); 135 134 size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); 136 - size_t machines__fprintf_dsos_buildid(struct rb_root *machines, 137 - FILE *fp, bool with_hits); 135 + size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, 136 + bool (skip)(struct dso *dso, int parm), int parm); 138 137 139 138 void machine__destroy_kernel_maps(struct machine *machine); 140 139 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
+34 -224
tools/perf/util/session.c
··· 16 16 #include "cpumap.h" 17 17 #include "event-parse.h" 18 18 #include "perf_regs.h" 19 - #include "unwind.h" 20 19 #include "vdso.h" 21 20 22 21 static int perf_session__open(struct perf_session *self, bool force) ··· 127 128 goto out; 128 129 129 130 memcpy(self->filename, filename, len); 130 - /* 131 - * On 64bit we can mmap the data file in one go. No need for tiny mmap 132 - * slices. On 32bit we use 32MB. 133 - */ 134 - #if BITS_PER_LONG == 64 135 - self->mmap_window = ULLONG_MAX; 136 - #else 137 - self->mmap_window = 32 * 1024 * 1024ULL; 138 - #endif 139 131 self->machines = RB_ROOT; 140 132 self->repipe = repipe; 141 133 INIT_LIST_HEAD(&self->ordered_samples.samples); ··· 161 171 return NULL; 162 172 } 163 173 164 - static void machine__delete_dead_threads(struct machine *machine) 165 - { 166 - struct thread *n, *t; 167 - 168 - list_for_each_entry_safe(t, n, &machine->dead_threads, node) { 169 - list_del(&t->node); 170 - thread__delete(t); 171 - } 172 - } 173 - 174 174 static void perf_session__delete_dead_threads(struct perf_session *session) 175 175 { 176 176 machine__delete_dead_threads(&session->host_machine); 177 - } 178 - 179 - static void machine__delete_threads(struct machine *self) 180 - { 181 - struct rb_node *nd = rb_first(&self->threads); 182 - 183 - while (nd) { 184 - struct thread *t = rb_entry(nd, struct thread, rb_node); 185 - 186 - rb_erase(&t->rb_node, &self->threads); 187 - nd = rb_next(nd); 188 - thread__delete(t); 189 - } 190 177 } 191 178 192 179 static void perf_session__delete_threads(struct perf_session *session) ··· 171 204 machine__delete_threads(&session->host_machine); 172 205 } 173 206 207 + static void perf_session_env__delete(struct perf_session_env *env) 208 + { 209 + free(env->hostname); 210 + free(env->os_release); 211 + free(env->version); 212 + free(env->arch); 213 + free(env->cpu_desc); 214 + free(env->cpuid); 215 + 216 + free(env->cmdline); 217 + free(env->sibling_cores); 218 + free(env->sibling_threads); 219 + free(env->numa_nodes); 220 + free(env->pmu_mappings); 221 + } 222 + 174 223 void perf_session__delete(struct perf_session *self) 175 224 { 176 225 perf_session__destroy_kernel_maps(self); 177 226 perf_session__delete_dead_threads(self); 178 227 perf_session__delete_threads(self); 228 + perf_session_env__delete(&self->header.env); 179 229 machine__exit(&self->host_machine); 180 230 close(self->fd); 181 231 free(self); 182 232 vdso__exit(); 183 - } 184 - 185 - void machine__remove_thread(struct machine *self, struct thread *th) 186 - { 187 - self->last_match = NULL; 188 - rb_erase(&th->rb_node, &self->threads); 189 - /* 190 - * We may have references to this thread, for instance in some hist_entry 191 - * instances, so just move them to a separate list. 192 - */ 193 - list_add_tail(&th->node, &self->dead_threads); 194 - } 195 - 196 - static bool symbol__match_parent_regex(struct symbol *sym) 197 - { 198 - if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 199 - return 1; 200 - 201 - return 0; 202 - } 203 - 204 - static const u8 cpumodes[] = { 205 - PERF_RECORD_MISC_USER, 206 - PERF_RECORD_MISC_KERNEL, 207 - PERF_RECORD_MISC_GUEST_USER, 208 - PERF_RECORD_MISC_GUEST_KERNEL 209 - }; 210 - #define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) 211 - 212 - static void ip__resolve_ams(struct machine *self, struct thread *thread, 213 - struct addr_map_symbol *ams, 214 - u64 ip) 215 - { 216 - struct addr_location al; 217 - size_t i; 218 - u8 m; 219 - 220 - memset(&al, 0, sizeof(al)); 221 - 222 - for (i = 0; i < NCPUMODES; i++) { 223 - m = cpumodes[i]; 224 - /* 225 - * We cannot use the header.misc hint to determine whether a 226 - * branch stack address is user, kernel, guest, hypervisor. 227 - * Branches may straddle the kernel/user/hypervisor boundaries. 228 - * Thus, we have to try consecutively until we find a match 229 - * or else, the symbol is unknown 230 - */ 231 - thread__find_addr_location(thread, self, m, MAP__FUNCTION, 232 - ip, &al, NULL); 233 - if (al.sym) 234 - goto found; 235 - } 236 - found: 237 - ams->addr = ip; 238 - ams->al_addr = al.addr; 239 - ams->sym = al.sym; 240 - ams->map = al.map; 241 - } 242 - 243 - struct branch_info *machine__resolve_bstack(struct machine *self, 244 - struct thread *thr, 245 - struct branch_stack *bs) 246 - { 247 - struct branch_info *bi; 248 - unsigned int i; 249 - 250 - bi = calloc(bs->nr, sizeof(struct branch_info)); 251 - if (!bi) 252 - return NULL; 253 - 254 - for (i = 0; i < bs->nr; i++) { 255 - ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to); 256 - ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from); 257 - bi[i].flags = bs->entries[i].flags; 258 - } 259 - return bi; 260 - } 261 - 262 - static int machine__resolve_callchain_sample(struct machine *machine, 263 - struct thread *thread, 264 - struct ip_callchain *chain, 265 - struct symbol **parent) 266 - 267 - { 268 - u8 cpumode = PERF_RECORD_MISC_USER; 269 - unsigned int i; 270 - int err; 271 - 272 - callchain_cursor_reset(&callchain_cursor); 273 - 274 - if (chain->nr > PERF_MAX_STACK_DEPTH) { 275 - pr_warning("corrupted callchain. skipping...\n"); 276 - return 0; 277 - } 278 - 279 - for (i = 0; i < chain->nr; i++) { 280 - u64 ip; 281 - struct addr_location al; 282 - 283 - if (callchain_param.order == ORDER_CALLEE) 284 - ip = chain->ips[i]; 285 - else 286 - ip = chain->ips[chain->nr - i - 1]; 287 - 288 - if (ip >= PERF_CONTEXT_MAX) { 289 - switch (ip) { 290 - case PERF_CONTEXT_HV: 291 - cpumode = PERF_RECORD_MISC_HYPERVISOR; 292 - break; 293 - case PERF_CONTEXT_KERNEL: 294 - cpumode = PERF_RECORD_MISC_KERNEL; 295 - break; 296 - case PERF_CONTEXT_USER: 297 - cpumode = PERF_RECORD_MISC_USER; 298 - break; 299 - default: 300 - pr_debug("invalid callchain context: " 301 - "%"PRId64"\n", (s64) ip); 302 - /* 303 - * It seems the callchain is corrupted. 304 - * Discard all. 305 - */ 306 - callchain_cursor_reset(&callchain_cursor); 307 - return 0; 308 - } 309 - continue; 310 - } 311 - 312 - al.filtered = false; 313 - thread__find_addr_location(thread, machine, cpumode, 314 - MAP__FUNCTION, ip, &al, NULL); 315 - if (al.sym != NULL) { 316 - if (sort__has_parent && !*parent && 317 - symbol__match_parent_regex(al.sym)) 318 - *parent = al.sym; 319 - if (!symbol_conf.use_callchain) 320 - break; 321 - } 322 - 323 - err = callchain_cursor_append(&callchain_cursor, 324 - ip, al.map, al.sym); 325 - if (err) 326 - return err; 327 - } 328 - 329 - return 0; 330 - } 331 - 332 - static int unwind_entry(struct unwind_entry *entry, void *arg) 333 - { 334 - struct callchain_cursor *cursor = arg; 335 - return callchain_cursor_append(cursor, entry->ip, 336 - entry->map, entry->sym); 337 - } 338 - 339 - int machine__resolve_callchain(struct machine *machine, 340 - struct perf_evsel *evsel, 341 - struct thread *thread, 342 - struct perf_sample *sample, 343 - struct symbol **parent) 344 - 345 - { 346 - int ret; 347 - 348 - callchain_cursor_reset(&callchain_cursor); 349 - 350 - ret = machine__resolve_callchain_sample(machine, thread, 351 - sample->callchain, parent); 352 - if (ret) 353 - return ret; 354 - 355 - /* Can we do dwarf post unwind? */ 356 - if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 357 - (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 358 - return 0; 359 - 360 - /* Bail out if nothing was captured. */ 361 - if ((!sample->user_regs.regs) || 362 - (!sample->user_stack.size)) 363 - return 0; 364 - 365 - return unwind__get_entries(unwind_entry, &callchain_cursor, machine, 366 - thread, evsel->attr.sample_regs_user, 367 - sample); 368 - 369 233 } 370 234 371 235 static int process_event_synth_tracing_data_stub(union perf_event *event ··· 1167 1369 return event; 1168 1370 } 1169 1371 1372 + /* 1373 + * On 64bit we can mmap the data file in one go. No need for tiny mmap 1374 + * slices. On 32bit we use 32MB. 1375 + */ 1376 + #if BITS_PER_LONG == 64 1377 + #define MMAP_SIZE ULLONG_MAX 1378 + #define NUM_MMAPS 1 1379 + #else 1380 + #define MMAP_SIZE (32 * 1024 * 1024ULL) 1381 + #define NUM_MMAPS 128 1382 + #endif 1383 + 1170 1384 int __perf_session__process_events(struct perf_session *session, 1171 1385 u64 data_offset, u64 data_size, 1172 1386 u64 file_size, struct perf_tool *tool) ··· 1186 1376 u64 head, page_offset, file_offset, file_pos, progress_next; 1187 1377 int err, mmap_prot, mmap_flags, map_idx = 0; 1188 1378 size_t mmap_size; 1189 - char *buf, *mmaps[8]; 1379 + char *buf, *mmaps[NUM_MMAPS]; 1190 1380 union perf_event *event; 1191 1381 uint32_t size; 1192 1382 ··· 1201 1391 1202 1392 progress_next = file_size / 16; 1203 1393 1204 - mmap_size = session->mmap_window; 1394 + mmap_size = MMAP_SIZE; 1205 1395 if (mmap_size > file_size) 1206 1396 mmap_size = file_size; 1207 1397 ··· 1342 1532 } 1343 1533 1344 1534 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1345 - bool with_hits) 1535 + bool (skip)(struct dso *dso, int parm), int parm) 1346 1536 { 1347 - size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 1348 - return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); 1537 + size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, skip, parm); 1538 + return ret + machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); 1349 1539 } 1350 1540 1351 1541 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
+2 -3
tools/perf/util/session.h
··· 30 30 struct perf_session { 31 31 struct perf_header header; 32 32 unsigned long size; 33 - unsigned long mmap_window; 34 33 struct machine host_machine; 35 34 struct rb_root machines; 36 35 struct perf_evlist *evlist; ··· 115 116 116 117 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); 117 118 118 - size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, 119 - FILE *fp, bool with_hits); 119 + size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 120 + bool (fn)(struct dso *dso, int parm), int parm); 120 121 121 122 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); 122 123
+1 -4
tools/perf/util/sort.h
··· 55 55 struct hist_entry_diff { 56 56 bool computed; 57 57 58 - /* PERF_HPP__DISPL */ 59 - int displacement; 60 - 61 58 /* PERF_HPP__DELTA */ 62 59 double period_ratio_delta; 63 60 ··· 115 118 return NULL; 116 119 } 117 120 118 - static inline void hist__entry_add_pair(struct hist_entry *he, 121 + static inline void hist_entry__add_pair(struct hist_entry *he, 119 122 struct hist_entry *pair) 120 123 { 121 124 list_add_tail(&he->pairs.head, &pair->pairs.node);
+11
tools/perf/util/symbol-elf.c
··· 718 718 sym.st_value); 719 719 used_opd = true; 720 720 } 721 + /* 722 + * When loading symbols in a data mapping, ABS symbols (which 723 + * has a value of SHN_ABS in its st_shndx) failed at 724 + * elf_getscn(). And it marks the loading as a failure so 725 + * already loaded symbols cannot be fixed up. 726 + * 727 + * I'm not sure what should be done. Just ignore them for now. 728 + * - Namhyung Kim 729 + */ 730 + if (sym.st_shndx == SHN_ABS) 731 + continue; 721 732 722 733 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 723 734 if (!sec)
+4 -518
tools/perf/util/symbol.c
··· 28 28 symbol_filter_t filter); 29 29 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 30 30 symbol_filter_t filter); 31 - static int vmlinux_path__nr_entries; 32 - static char **vmlinux_path; 31 + int vmlinux_path__nr_entries; 32 + char **vmlinux_path; 33 33 34 34 struct symbol_conf symbol_conf = { 35 35 .exclude_other = true, ··· 200 200 * last map final address. 201 201 */ 202 202 curr->end = ~0ULL; 203 - } 204 - 205 - static void map_groups__fixup_end(struct map_groups *mg) 206 - { 207 - int i; 208 - for (i = 0; i < MAP__NR_TYPES; ++i) 209 - __map_groups__fixup_end(mg, i); 210 203 } 211 204 212 205 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) ··· 645 652 return count + moved; 646 653 } 647 654 648 - static bool symbol__restricted_filename(const char *filename, 649 - const char *restricted_filename) 655 + bool symbol__restricted_filename(const char *filename, 656 + const char *restricted_filename) 650 657 { 651 658 bool restricted = false; 652 659 ··· 880 887 return NULL; 881 888 } 882 889 883 - static int map_groups__set_modules_path_dir(struct map_groups *mg, 884 - const char *dir_name) 885 - { 886 - struct dirent *dent; 887 - DIR *dir = opendir(dir_name); 888 - int ret = 0; 889 - 890 - if (!dir) { 891 - pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 892 - return -1; 893 - } 894 - 895 - while ((dent = readdir(dir)) != NULL) { 896 - char path[PATH_MAX]; 897 - struct stat st; 898 - 899 - /*sshfs might return bad dent->d_type, so we have to stat*/ 900 - snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 901 - if (stat(path, &st)) 902 - continue; 903 - 904 - if (S_ISDIR(st.st_mode)) { 905 - if (!strcmp(dent->d_name, ".") || 906 - !strcmp(dent->d_name, "..")) 907 - continue; 908 - 909 - ret = map_groups__set_modules_path_dir(mg, path); 910 - if (ret < 0) 911 - goto out; 912 - } else { 913 - char *dot = strrchr(dent->d_name, '.'), 914 - dso_name[PATH_MAX]; 915 - struct map *map; 916 - char *long_name; 917 - 918 - if (dot == NULL || strcmp(dot, ".ko")) 919 - continue; 920 - snprintf(dso_name, sizeof(dso_name), "[%.*s]", 921 - (int)(dot - dent->d_name), dent->d_name); 922 - 923 - strxfrchar(dso_name, '-', '_'); 924 - map = map_groups__find_by_name(mg, MAP__FUNCTION, 925 - dso_name); 926 - if (map == NULL) 927 - continue; 928 - 929 - long_name = strdup(path); 930 - if (long_name == NULL) { 931 - ret = -1; 932 - goto out; 933 - } 934 - dso__set_long_name(map->dso, long_name); 935 - map->dso->lname_alloc = 1; 936 - dso__kernel_module_get_build_id(map->dso, ""); 937 - } 938 - } 939 - 940 - out: 941 - closedir(dir); 942 - return ret; 943 - } 944 - 945 - static char *get_kernel_version(const char *root_dir) 946 - { 947 - char version[PATH_MAX]; 948 - FILE *file; 949 - char *name, *tmp; 950 - const char *prefix = "Linux version "; 951 - 952 - sprintf(version, "%s/proc/version", root_dir); 953 - file = fopen(version, "r"); 954 - if (!file) 955 - return NULL; 956 - 957 - version[0] = '\0'; 958 - tmp = fgets(version, sizeof(version), file); 959 - fclose(file); 960 - 961 - name = strstr(version, prefix); 962 - if (!name) 963 - return NULL; 964 - name += strlen(prefix); 965 - tmp = strchr(name, ' '); 966 - if (tmp) 967 - *tmp = '\0'; 968 - 969 - return strdup(name); 970 - } 971 - 972 - static int machine__set_modules_path(struct machine *machine) 973 - { 974 - char *version; 975 - char modules_path[PATH_MAX]; 976 - 977 - version = get_kernel_version(machine->root_dir); 978 - if (!version) 979 - return -1; 980 - 981 - snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", 982 - machine->root_dir, version); 983 - free(version); 984 - 985 - return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); 986 - } 987 - 988 - struct map *machine__new_module(struct machine *machine, u64 start, 989 - const char *filename) 990 - { 991 - struct map *map; 992 - struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); 993 - 994 - if (dso == NULL) 995 - return NULL; 996 - 997 - map = map__new2(start, dso, MAP__FUNCTION); 998 - if (map == NULL) 999 - return NULL; 1000 - 1001 - if (machine__is_host(machine)) 1002 - dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 1003 - else 1004 - dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 1005 - map_groups__insert(&machine->kmaps, map); 1006 - return map; 1007 - } 1008 - 1009 - static int machine__create_modules(struct machine *machine) 1010 - { 1011 - char *line = NULL; 1012 - size_t n; 1013 - FILE *file; 1014 - struct map *map; 1015 - const char *modules; 1016 - char path[PATH_MAX]; 1017 - 1018 - if (machine__is_default_guest(machine)) 1019 - modules = symbol_conf.default_guest_modules; 1020 - else { 1021 - sprintf(path, "%s/proc/modules", machine->root_dir); 1022 - modules = path; 1023 - } 1024 - 1025 - if (symbol__restricted_filename(path, "/proc/modules")) 1026 - return -1; 1027 - 1028 - file = fopen(modules, "r"); 1029 - if (file == NULL) 1030 - return -1; 1031 - 1032 - while (!feof(file)) { 1033 - char name[PATH_MAX]; 1034 - u64 start; 1035 - char *sep; 1036 - int line_len; 1037 - 1038 - line_len = getline(&line, &n, file); 1039 - if (line_len < 0) 1040 - break; 1041 - 1042 - if (!line) 1043 - goto out_failure; 1044 - 1045 - line[--line_len] = '\0'; /* \n */ 1046 - 1047 - sep = strrchr(line, 'x'); 1048 - if (sep == NULL) 1049 - continue; 1050 - 1051 - hex2u64(sep + 1, &start); 1052 - 1053 - sep = strchr(line, ' '); 1054 - if (sep == NULL) 1055 - continue; 1056 - 1057 - *sep = '\0'; 1058 - 1059 - snprintf(name, sizeof(name), "[%s]", line); 1060 - map = machine__new_module(machine, start, name); 1061 - if (map == NULL) 1062 - goto out_delete_line; 1063 - dso__kernel_module_get_build_id(map->dso, machine->root_dir); 1064 - } 1065 - 1066 - free(line); 1067 - fclose(file); 1068 - 1069 - return machine__set_modules_path(machine); 1070 - 1071 - out_delete_line: 1072 - free(line); 1073 - out_failure: 1074 - return -1; 1075 - } 1076 - 1077 890 int dso__load_vmlinux(struct dso *dso, struct map *map, 1078 891 const char *vmlinux, symbol_filter_t filter) 1079 892 { ··· 1099 1300 return err; 1100 1301 } 1101 1302 1102 - size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) 1103 - { 1104 - struct rb_node *nd; 1105 - size_t ret = 0; 1106 - 1107 - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 1108 - struct machine *pos = rb_entry(nd, struct machine, rb_node); 1109 - ret += __dsos__fprintf(&pos->kernel_dsos, fp); 1110 - ret += __dsos__fprintf(&pos->user_dsos, fp); 1111 - } 1112 - 1113 - return ret; 1114 - } 1115 - 1116 - size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, 1117 - bool with_hits) 1118 - { 1119 - return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) + 1120 - __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits); 1121 - } 1122 - 1123 - size_t machines__fprintf_dsos_buildid(struct rb_root *machines, 1124 - FILE *fp, bool with_hits) 1125 - { 1126 - struct rb_node *nd; 1127 - size_t ret = 0; 1128 - 1129 - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 1130 - struct machine *pos = rb_entry(nd, struct machine, rb_node); 1131 - ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); 1132 - } 1133 - return ret; 1134 - } 1135 - 1136 - static struct dso *machine__get_kernel(struct machine *machine) 1137 - { 1138 - const char *vmlinux_name = NULL; 1139 - struct dso *kernel; 1140 - 1141 - if (machine__is_host(machine)) { 1142 - vmlinux_name = symbol_conf.vmlinux_name; 1143 - if (!vmlinux_name) 1144 - vmlinux_name = "[kernel.kallsyms]"; 1145 - 1146 - kernel = dso__kernel_findnew(machine, vmlinux_name, 1147 - "[kernel]", 1148 - DSO_TYPE_KERNEL); 1149 - } else { 1150 - char bf[PATH_MAX]; 1151 - 1152 - if (machine__is_default_guest(machine)) 1153 - vmlinux_name = symbol_conf.default_guest_vmlinux_name; 1154 - if (!vmlinux_name) 1155 - vmlinux_name = machine__mmap_name(machine, bf, 1156 - sizeof(bf)); 1157 - 1158 - kernel = dso__kernel_findnew(machine, vmlinux_name, 1159 - "[guest.kernel]", 1160 - DSO_TYPE_GUEST_KERNEL); 1161 - } 1162 - 1163 - if (kernel != NULL && (!kernel->has_build_id)) 1164 - dso__read_running_kernel_build_id(kernel, machine); 1165 - 1166 - return kernel; 1167 - } 1168 - 1169 - struct process_args { 1170 - u64 start; 1171 - }; 1172 - 1173 - static int symbol__in_kernel(void *arg, const char *name, 1174 - char type __maybe_unused, u64 start) 1175 - { 1176 - struct process_args *args = arg; 1177 - 1178 - if (strchr(name, '[')) 1179 - return 0; 1180 - 1181 - args->start = start; 1182 - return 1; 1183 - } 1184 - 1185 - /* Figure out the start address of kernel map from /proc/kallsyms */ 1186 - static u64 machine__get_kernel_start_addr(struct machine *machine) 1187 - { 1188 - const char *filename; 1189 - char path[PATH_MAX]; 1190 - struct process_args args; 1191 - 1192 - if (machine__is_host(machine)) { 1193 - filename = "/proc/kallsyms"; 1194 - } else { 1195 - if (machine__is_default_guest(machine)) 1196 - filename = (char *)symbol_conf.default_guest_kallsyms; 1197 - else { 1198 - sprintf(path, "%s/proc/kallsyms", machine->root_dir); 1199 - filename = path; 1200 - } 1201 - } 1202 - 1203 - if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1204 - return 0; 1205 - 1206 - if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) 1207 - return 0; 1208 - 1209 - return args.start; 1210 - } 1211 - 1212 - int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 1213 - { 1214 - enum map_type type; 1215 - u64 start = machine__get_kernel_start_addr(machine); 1216 - 1217 - for (type = 0; type < MAP__NR_TYPES; ++type) { 1218 - struct kmap *kmap; 1219 - 1220 - machine->vmlinux_maps[type] = map__new2(start, kernel, type); 1221 - if (machine->vmlinux_maps[type] == NULL) 1222 - return -1; 1223 - 1224 - machine->vmlinux_maps[type]->map_ip = 1225 - machine->vmlinux_maps[type]->unmap_ip = 1226 - identity__map_ip; 1227 - kmap = map__kmap(machine->vmlinux_maps[type]); 1228 - kmap->kmaps = &machine->kmaps; 1229 - map_groups__insert(&machine->kmaps, 1230 - machine->vmlinux_maps[type]); 1231 - } 1232 - 1233 - return 0; 1234 - } 1235 - 1236 - void machine__destroy_kernel_maps(struct machine *machine) 1237 - { 1238 - enum map_type type; 1239 - 1240 - for (type = 0; type < MAP__NR_TYPES; ++type) { 1241 - struct kmap *kmap; 1242 - 1243 - if (machine->vmlinux_maps[type] == NULL) 1244 - continue; 1245 - 1246 - kmap = map__kmap(machine->vmlinux_maps[type]); 1247 - map_groups__remove(&machine->kmaps, 1248 - machine->vmlinux_maps[type]); 1249 - if (kmap->ref_reloc_sym) { 1250 - /* 1251 - * ref_reloc_sym is shared among all maps, so free just 1252 - * on one of them. 1253 - */ 1254 - if (type == MAP__FUNCTION) { 1255 - free((char *)kmap->ref_reloc_sym->name); 1256 - kmap->ref_reloc_sym->name = NULL; 1257 - free(kmap->ref_reloc_sym); 1258 - } 1259 - kmap->ref_reloc_sym = NULL; 1260 - } 1261 - 1262 - map__delete(machine->vmlinux_maps[type]); 1263 - machine->vmlinux_maps[type] = NULL; 1264 - } 1265 - } 1266 - 1267 - int machine__create_kernel_maps(struct machine *machine) 1268 - { 1269 - struct dso *kernel = machine__get_kernel(machine); 1270 - 1271 - if (kernel == NULL || 1272 - __machine__create_kernel_maps(machine, kernel) < 0) 1273 - return -1; 1274 - 1275 - if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1276 - if (machine__is_host(machine)) 1277 - pr_debug("Problems creating module maps, " 1278 - "continuing anyway...\n"); 1279 - else 1280 - pr_debug("Problems creating module maps for guest %d, " 1281 - "continuing anyway...\n", machine->pid); 1282 - } 1283 - 1284 - /* 1285 - * Now that we have all the maps created, just set the ->end of them: 1286 - */ 1287 - map_groups__fixup_end(&machine->kmaps); 1288 - return 0; 1289 - } 1290 - 1291 1303 static void vmlinux_path__exit(void) 1292 1304 { 1293 1305 while (--vmlinux_path__nr_entries >= 0) { ··· 1157 1547 out_fail: 1158 1548 vmlinux_path__exit(); 1159 1549 return -1; 1160 - } 1161 - 1162 - size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 1163 - { 1164 - int i; 1165 - size_t printed = 0; 1166 - struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; 1167 - 1168 - if (kdso->has_build_id) { 1169 - char filename[PATH_MAX]; 1170 - if (dso__build_id_filename(kdso, filename, sizeof(filename))) 1171 - printed += fprintf(fp, "[0] %s\n", filename); 1172 - } 1173 - 1174 - for (i = 0; i < vmlinux_path__nr_entries; ++i) 1175 - printed += fprintf(fp, "[%d] %s\n", 1176 - i + kdso->has_build_id, vmlinux_path[i]); 1177 - 1178 - return printed; 1179 1550 } 1180 1551 1181 1552 static int setup_list(struct strlist **list, const char *list_str, ··· 1261 1670 vmlinux_path__exit(); 1262 1671 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 1263 1672 symbol_conf.initialized = false; 1264 - } 1265 - 1266 - int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) 1267 - { 1268 - struct machine *machine = machines__findnew(machines, pid); 1269 - 1270 - if (machine == NULL) 1271 - return -1; 1272 - 1273 - return machine__create_kernel_maps(machine); 1274 - } 1275 - 1276 - int machines__create_guest_kernel_maps(struct rb_root *machines) 1277 - { 1278 - int ret = 0; 1279 - struct dirent **namelist = NULL; 1280 - int i, items = 0; 1281 - char path[PATH_MAX]; 1282 - pid_t pid; 1283 - char *endp; 1284 - 1285 - if (symbol_conf.default_guest_vmlinux_name || 1286 - symbol_conf.default_guest_modules || 1287 - symbol_conf.default_guest_kallsyms) { 1288 - machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 1289 - } 1290 - 1291 - if (symbol_conf.guestmount) { 1292 - items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 1293 - if (items <= 0) 1294 - return -ENOENT; 1295 - for (i = 0; i < items; i++) { 1296 - if (!isdigit(namelist[i]->d_name[0])) { 1297 - /* Filter out . and .. */ 1298 - continue; 1299 - } 1300 - pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 1301 - if ((*endp != '\0') || 1302 - (endp == namelist[i]->d_name) || 1303 - (errno == ERANGE)) { 1304 - pr_debug("invalid directory (%s). Skipping.\n", 1305 - namelist[i]->d_name); 1306 - continue; 1307 - } 1308 - sprintf(path, "%s/%s/proc/kallsyms", 1309 - symbol_conf.guestmount, 1310 - namelist[i]->d_name); 1311 - ret = access(path, R_OK); 1312 - if (ret) { 1313 - pr_debug("Can't access file %s\n", path); 1314 - goto failure; 1315 - } 1316 - machines__create_kernel_maps(machines, pid); 1317 - } 1318 - failure: 1319 - free(namelist); 1320 - } 1321 - 1322 - return ret; 1323 - } 1324 - 1325 - void machines__destroy_guest_kernel_maps(struct rb_root *machines) 1326 - { 1327 - struct rb_node *next = rb_first(machines); 1328 - 1329 - while (next) { 1330 - struct machine *pos = rb_entry(next, struct machine, rb_node); 1331 - 1332 - next = rb_next(&pos->rb_node); 1333 - rb_erase(&pos->rb_node, machines); 1334 - machine__delete(pos); 1335 - } 1336 - } 1337 - 1338 - int machine__load_kallsyms(struct machine *machine, const char *filename, 1339 - enum map_type type, symbol_filter_t filter) 1340 - { 1341 - struct map *map = machine->vmlinux_maps[type]; 1342 - int ret = dso__load_kallsyms(map->dso, filename, map, filter); 1343 - 1344 - if (ret > 0) { 1345 - dso__set_loaded(map->dso, type); 1346 - /* 1347 - * Since /proc/kallsyms will have multiple sessions for the 1348 - * kernel, with modules between them, fixup the end of all 1349 - * sections. 1350 - */ 1351 - __map_groups__fixup_end(&machine->kmaps, type); 1352 - } 1353 - 1354 - return ret; 1355 - } 1356 - 1357 - int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 1358 - symbol_filter_t filter) 1359 - { 1360 - struct map *map = machine->vmlinux_maps[type]; 1361 - int ret = dso__load_vmlinux_path(map->dso, map, filter); 1362 - 1363 - if (ret > 0) { 1364 - dso__set_loaded(map->dso, type); 1365 - map__reloc_vmlinux(map); 1366 - } 1367 - 1368 - return ret; 1369 1673 }
+4
tools/perf/util/symbol.h
··· 120 120 }; 121 121 122 122 extern struct symbol_conf symbol_conf; 123 + extern int vmlinux_path__nr_entries; 124 + extern char **vmlinux_path; 123 125 124 126 static inline void *symbol__priv(struct symbol *sym) 125 127 { ··· 225 223 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); 226 224 size_t symbol__fprintf(struct symbol *sym, FILE *fp); 227 225 bool symbol_type__is_a(char symbol_type, enum map_type map_type); 226 + bool symbol__restricted_filename(const char *filename, 227 + const char *restricted_filename); 228 228 229 229 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 230 230 struct symsrc *runtime_ss, symbol_filter_t filter,
+3 -17
tools/perf/util/thread.c
··· 54 54 return self->comm_len; 55 55 } 56 56 57 - static size_t thread__fprintf(struct thread *self, FILE *fp) 57 + size_t thread__fprintf(struct thread *thread, FILE *fp) 58 58 { 59 - return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + 60 - map_groups__fprintf(&self->mg, verbose, fp); 59 + return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) + 60 + map_groups__fprintf(&thread->mg, verbose, fp); 61 61 } 62 62 63 63 void thread__insert_map(struct thread *self, struct map *map) ··· 83 83 if (map_groups__clone(&self->mg, &parent->mg, i) < 0) 84 84 return -ENOMEM; 85 85 return 0; 86 - } 87 - 88 - size_t machine__fprintf(struct machine *machine, FILE *fp) 89 - { 90 - size_t ret = 0; 91 - struct rb_node *nd; 92 - 93 - for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 94 - struct thread *pos = rb_entry(nd, struct thread, rb_node); 95 - 96 - ret += thread__fprintf(pos, fp); 97 - } 98 - 99 - return ret; 100 86 }
+1
tools/perf/util/thread.h
··· 30 30 int thread__comm_len(struct thread *self); 31 31 void thread__insert_map(struct thread *self, struct map *map); 32 32 int thread__fork(struct thread *self, struct thread *parent); 33 + size_t thread__fprintf(struct thread *thread, FILE *fp); 33 34 34 35 static inline struct map *thread__find_map(struct thread *self, 35 36 enum map_type type, u64 addr)
+12 -10
tools/perf/util/top.c
··· 26 26 float samples_per_sec = top->samples / top->delay_secs; 27 27 float ksamples_per_sec = top->kernel_samples / top->delay_secs; 28 28 float esamples_percent = (100.0 * top->exact_samples) / top->samples; 29 + struct perf_record_opts *opts = &top->record_opts; 30 + struct perf_target *target = &opts->target; 29 31 size_t ret = 0; 30 32 31 33 if (!perf_guest) { ··· 63 61 struct perf_evsel *first = perf_evlist__first(top->evlist); 64 62 ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", 65 63 (uint64_t)first->attr.sample_period, 66 - top->freq ? "Hz" : ""); 64 + opts->freq ? "Hz" : ""); 67 65 } 68 66 69 67 ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel)); 70 68 71 69 ret += SNPRINTF(bf + ret, size - ret, "], "); 72 70 73 - if (top->target.pid) 71 + if (target->pid) 74 72 ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s", 75 - top->target.pid); 76 - else if (top->target.tid) 73 + target->pid); 74 + else if (target->tid) 77 75 ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s", 78 - top->target.tid); 79 - else if (top->target.uid_str != NULL) 76 + target->tid); 77 + else if (target->uid_str != NULL) 80 78 ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", 81 - top->target.uid_str); 79 + target->uid_str); 82 80 else 83 81 ret += SNPRINTF(bf + ret, size - ret, " (all"); 84 82 85 - if (top->target.cpu_list) 83 + if (target->cpu_list) 86 84 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", 87 85 top->evlist->cpus->nr > 1 ? "s" : "", 88 - top->target.cpu_list); 86 + target->cpu_list); 89 87 else { 90 - if (top->target.tid) 88 + if (target->tid) 91 89 ret += SNPRINTF(bf + ret, size - ret, ")"); 92 90 else 93 91 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
+1 -7
tools/perf/util/top.h
··· 14 14 struct perf_top { 15 15 struct perf_tool tool; 16 16 struct perf_evlist *evlist; 17 - struct perf_target target; 17 + struct perf_record_opts record_opts; 18 18 /* 19 19 * Symbols will be added here in perf_event__process_sample and will 20 20 * get out after decayed. ··· 24 24 u64 exact_samples; 25 25 u64 guest_us_samples, guest_kernel_samples; 26 26 int print_entries, count_filter, delay_secs; 27 - int freq; 28 27 bool hide_kernel_symbols, hide_user_symbols, zero; 29 28 bool use_tui, use_stdio; 30 29 bool sort_has_symbols; 31 - bool dont_use_callchains; 32 30 bool kptr_restrict_warned; 33 31 bool vmlinux_warned; 34 - bool inherit; 35 - bool group; 36 32 bool sample_id_all_missing; 37 33 bool exclude_guest_missing; 38 34 bool dump_symtab; ··· 36 40 struct perf_evsel *sym_evsel; 37 41 struct perf_session *session; 38 42 struct winsize winsize; 39 - unsigned int mmap_pages; 40 - int default_interval; 41 43 int realtime_prio; 42 44 int sym_pcnt_filter; 43 45 const char *sym_filter;