Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

. Allow skipping problematic entries in 'perf test'.

. Fix some namespace problems in the event parsing routines.

. Add 'perf test' entry to make sure the python binding doesn't have
linking problems.

. Adjust 'perf test' attr tests verbosity levels.

. Make tools/perf build with GNU make v3.80, fix from Al Cooper.

. Do missing feature fallbacks in just one place, removing duplicated
code in multiple tools.

. Fix some memory leaks, from David Ahern.

. Fix segfault when drawing out-of-bounds jumps, from Frederik Deweerdt.

. Allow of casting an array of char to string in 'perf probe', from
Hyeoncheol Lee.

. Add support for wildcard in tracepoint system name, from Jiri Olsa.

. Update FSF postal address to be URL's, from Jon Stanley.

. Add anonymous huge page recognition, from Joshua Zhu.

. Remove some needless feature test checks, from Namhyung Kim.

. Multiple improvements to the sort routines, from Namhyung Kim.

. Fix warning on '>=' operator in libtraceevent, from Namhyung Kim.

. Use ARRAY_SIZE instead of reinventing it in 'perf script' and 'perf kmem',
from Sasha Levin.

. Remove some redundant checks, from Sasha Levin.

. Test correct variable after allocation in libtraceevent, fix from Sasha Levin.

. Mark branch_info maps as referenced, fix from Stephane Eranian.

. Fix PMU format parsing test failure, from Sukadev Bhattiprolu.

. Fix possible (unlikely) buffer overflow, from Thomas Jarosch.

. Multiple 'perf script' fixes, from Tom Zanussi.

. Add missing field in PERF_RECORD_SAMPLE documentation, from Vince Weaver.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+1817 -1203
+2 -1
include/uapi/linux/perf_event.h
··· 579 579 * { u32 size; 580 580 * char data[size];}&& PERF_SAMPLE_RAW 581 581 * 582 - * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 582 + * { u64 nr; 583 + * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 583 584 * 584 585 * { u64 abi; # enum perf_sample_regs_abi 585 586 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
+1 -2
kernel/events/uprobes.c
··· 901 901 } 902 902 903 903 mutex_unlock(uprobes_hash(inode)); 904 - if (uprobe) 905 - put_uprobe(uprobe); 904 + put_uprobe(uprobe); 906 905 } 907 906 908 907 static struct rb_node *
+6 -4
tools/lib/traceevent/event-parse.c
··· 13 13 * GNU Lesser General Public License for more details. 14 14 * 15 15 * You should have received a copy of the GNU Lesser General Public 16 - * License along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 16 + * License along with this program; if not, see <http://www.gnu.org/licenses> 18 17 * 19 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 19 * ··· 1462 1463 if (read_expect_type(EVENT_ITEM, &token)) 1463 1464 goto fail; 1464 1465 1465 - /* add signed type */ 1466 + if (strtoul(token, NULL, 0)) 1467 + field->flags |= FIELD_IS_SIGNED; 1466 1468 1467 1469 free_token(token); 1468 1470 if (read_expected(EVENT_OP, ";") < 0) ··· 1785 1785 strcmp(token, "/") == 0 || 1786 1786 strcmp(token, "<") == 0 || 1787 1787 strcmp(token, ">") == 0 || 1788 + strcmp(token, "<=") == 0 || 1789 + strcmp(token, ">=") == 0 || 1788 1790 strcmp(token, "==") == 0 || 1789 1791 strcmp(token, "!=") == 0) { 1790 1792 ··· 2483 2481 2484 2482 free_token(token); 2485 2483 arg = alloc_arg(); 2486 - if (!field) { 2484 + if (!arg) { 2487 2485 do_warning("%s: not enough memory!", __func__); 2488 2486 *tok = NULL; 2489 2487 return EVENT_ERROR;
+1 -2
tools/lib/traceevent/event-parse.h
··· 13 13 * GNU Lesser General Public License for more details. 14 14 * 15 15 * You should have received a copy of the GNU Lesser General Public 16 - * License along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 16 + * License along with this program; if not, see <http://www.gnu.org/licenses> 18 17 * 19 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 19 */
+1 -2
tools/lib/traceevent/event-utils.h
··· 13 13 * GNU Lesser General Public License for more details. 14 14 * 15 15 * You should have received a copy of the GNU Lesser General Public 16 - * License along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 16 + * License along with this program; if not, see <http://www.gnu.org/licenses> 18 17 * 19 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 19 */
+1 -2
tools/lib/traceevent/parse-filter.c
··· 13 13 * GNU Lesser General Public License for more details. 14 14 * 15 15 * You should have received a copy of the GNU Lesser General Public 16 - * License along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 16 + * License along with this program; if not, see <http://www.gnu.org/licenses> 18 17 * 19 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 19 */
+19
tools/lib/traceevent/parse-utils.c
··· 1 + /* 2 + * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 3 + * 4 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU Lesser General Public 7 + * License as published by the Free Software Foundation; 8 + * version 2.1 of the License (not later!) 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU Lesser General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU Lesser General Public 16 + * License along with this program; if not, see <http://www.gnu.org/licenses> 17 + * 18 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 + */ 1 20 #include <stdio.h> 2 21 #include <stdlib.h> 3 22 #include <string.h>
+1 -2
tools/lib/traceevent/trace-seq.c
··· 13 13 * GNU Lesser General Public License for more details. 14 14 * 15 15 * You should have received a copy of the GNU Lesser General Public 16 - * License along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 16 + * License along with this program; if not, see <http://www.gnu.org/licenses> 18 17 * 19 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 19 */
+35 -3
tools/perf/Documentation/perf-report.txt
··· 57 57 58 58 -s:: 59 59 --sort=:: 60 - Sort by key(s): pid, comm, dso, symbol, parent, srcline. 60 + Sort histogram entries by given key(s) - multiple keys can be specified 61 + in CSV format. Following sort keys are available: 62 + pid, comm, dso, symbol, parent, cpu, srcline. 63 + 64 + Each key has following meaning: 65 + 66 + - comm: command (name) of the task which can be read via /proc/<pid>/comm 67 + - pid: command and tid of the task 68 + - dso: name of library or module executed at the time of sample 69 + - symbol: name of function executed at the time of sample 70 + - parent: name of function matched to the parent regex filter. Unmatched 71 + entries are displayed as "[other]". 72 + - cpu: cpu number the task ran at the time of sample 73 + - srcline: filename and line number executed at the time of sample. The 74 + DWARF debuggin info must be provided. 75 + 76 + By default, comm, dso and symbol keys are used. 77 + (i.e. --sort comm,dso,symbol) 78 + 79 + If --branch-stack option is used, following sort keys are also 80 + available: 81 + dso_from, dso_to, symbol_from, symbol_to, mispredict. 82 + 83 + - dso_from: name of library or module branched from 84 + - dso_to: name of library or module branched to 85 + - symbol_from: name of function branched from 86 + - symbol_to: name of function branched to 87 + - mispredict: "N" for predicted branch, "Y" for mispredicted branch 88 + 89 + And default sort keys are changed to comm, dso_from, symbol_from, dso_to 90 + and symbol_to, see '--branch-stack'. 61 91 62 92 -p:: 63 93 --parent=<regex>:: 64 - regex filter to identify parent, see: '--sort parent' 94 + A regex filter to identify parent. The parent is a caller of this 95 + function and searched through the callchain, thus it requires callchain 96 + information recorded. The pattern is in the exteneded regex format and 97 + defaults to "\^sys_|^do_page_fault", see '--sort parent'. 65 98 66 99 -x:: 67 100 --exclude-other:: ··· 107 74 108 75 -t:: 109 76 --field-separator=:: 110 - 111 77 Use a special separator character and don't pad with spaces, replacing 112 78 all occurrences of this separator in symbol names (and other output) 113 79 with a '.' character, that thus it's the only non valid separator.
-2
tools/perf/Documentation/perf-script-python.txt
··· 336 336 ---- 337 337 root@tropicana:~# perf script -l 338 338 List of available trace scripts: 339 - workqueue-stats workqueue stats (ins/exe/create/destroy) 340 339 wakeup-latency system-wide min/max/avg wakeup latency 341 340 rw-by-file <comm> r/w activity for a program, by file 342 341 rw-by-pid system-wide r/w activity ··· 401 402 ---- 402 403 root@tropicana:~# perf script -l 403 404 List of available trace scripts: 404 - workqueue-stats workqueue stats (ins/exe/create/destroy) 405 405 wakeup-latency system-wide min/max/avg wakeup latency 406 406 rw-by-file <comm> r/w activity for a program, by file 407 407 rw-by-pid system-wide r/w activity
+4
tools/perf/Documentation/perf-test.txt
··· 23 23 24 24 OPTIONS 25 25 ------- 26 + -s:: 27 + --skip:: 28 + Tests to skip (comma separater numeric list). 29 + 26 30 -v:: 27 31 --verbose:: 28 32 Be more verbose.
+11 -6
tools/perf/Makefile
··· 50 50 51 51 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 52 52 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) 53 - -include $(OUTPUT)PERF-VERSION-FILE 54 53 55 54 uname_M := $(shell uname -m 2>/dev/null || echo not) 56 55 ··· 486 487 LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o 487 488 LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o 488 489 LIB_OBJS += $(OUTPUT)tests/pmu.o 490 + LIB_OBJS += $(OUTPUT)tests/hists_link.o 491 + LIB_OBJS += $(OUTPUT)tests/python-use.o 489 492 490 493 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o 491 494 BUILTIN_OBJS += $(OUTPUT)builtin-bench.o ··· 532 531 # We choose to avoid "if .. else if .. else .. endif endif" 533 532 # because maintaining the nesting to match is a pain. If 534 533 # we had "elif" things would have been much nicer... 535 - 536 - -include config.mak.autogen 537 - -include config.mak 538 534 539 535 ifdef NO_LIBELF 540 536 NO_DWARF := 1 ··· 684 686 BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null) 685 687 EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null) 686 688 LIB_OBJS += $(OUTPUT)ui/gtk/browser.o 689 + LIB_OBJS += $(OUTPUT)ui/gtk/hists.o 687 690 LIB_OBJS += $(OUTPUT)ui/gtk/setup.o 688 691 LIB_OBJS += $(OUTPUT)ui/gtk/util.o 689 692 LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o ··· 886 887 $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf 887 888 888 889 $(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS 889 - $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ 890 + $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \ 890 891 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ 891 892 $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@ 892 893 ··· 950 951 951 952 $(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS 952 953 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ 953 - '-DBINDIR="$(bindir_SQ)"' \ 954 + '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \ 955 + $< 956 + 957 + $(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS 958 + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ 959 + -DPYTHONPATH='"$(OUTPUT)python"' \ 960 + -DPYTHON='"$(PYTHON_WORD)"' \ 954 961 $< 955 962 956 963 $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
+2
tools/perf/builtin-bench.c
··· 159 159 printf("# Running %s/%s benchmark...\n", 160 160 subsys->name, 161 161 suites[i].name); 162 + fflush(stdout); 162 163 163 164 argv[1] = suites[i].name; 164 165 suites[i].fn(1, argv, NULL); ··· 226 225 printf("# Running %s/%s benchmark...\n", 227 226 subsystems[i].name, 228 227 subsystems[i].suites[j].name); 228 + fflush(stdout); 229 229 status = subsystems[i].suites[j].fn(argc - 1, 230 230 argv + 1, prefix); 231 231 goto end;
+36 -56
tools/perf/builtin-diff.c
··· 275 275 .ordering_requires_timestamps = true, 276 276 }; 277 277 278 - static void insert_hist_entry_by_name(struct rb_root *root, 279 - struct hist_entry *he) 280 - { 281 - struct rb_node **p = &root->rb_node; 282 - struct rb_node *parent = NULL; 283 - struct hist_entry *iter; 284 - 285 - while (*p != NULL) { 286 - parent = *p; 287 - iter = rb_entry(parent, struct hist_entry, rb_node); 288 - if (hist_entry__cmp(he, iter) < 0) 289 - p = &(*p)->rb_left; 290 - else 291 - p = &(*p)->rb_right; 292 - } 293 - 294 - rb_link_node(&he->rb_node, parent, p); 295 - rb_insert_color(&he->rb_node, root); 296 - } 297 - 298 - static void hists__name_resort(struct hists *self) 299 - { 300 - struct rb_root tmp = RB_ROOT; 301 - struct rb_node *next = rb_first(&self->entries); 302 - 303 - while (next != NULL) { 304 - struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node); 305 - 306 - next = rb_next(&n->rb_node); 307 - 308 - rb_erase(&n->rb_node, &self->entries); 309 - insert_hist_entry_by_name(&tmp, n); 310 - } 311 - 312 - self->entries = tmp; 313 - } 314 - 315 278 static struct perf_evsel *evsel_match(struct perf_evsel *evsel, 316 279 struct perf_evlist *evlist) 317 280 { ··· 287 324 return NULL; 288 325 } 289 326 290 - static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name) 327 + static void perf_evlist__collapse_resort(struct perf_evlist *evlist) 291 328 { 292 329 struct perf_evsel *evsel; 293 330 294 331 list_for_each_entry(evsel, &evlist->entries, node) { 295 332 struct hists *hists = &evsel->hists; 296 333 297 - hists__output_resort(hists); 298 - 299 - if (name) 300 - hists__name_resort(hists); 334 + hists__collapse_resort(hists); 301 335 } 302 336 } 303 337 304 338 static void hists__baseline_only(struct hists *hists) 305 339 { 306 - struct rb_node *next = rb_first(&hists->entries); 340 + struct rb_root *root; 341 + struct rb_node *next; 307 342 343 + if (sort__need_collapse) 344 + root = &hists->entries_collapsed; 345 + else 346 + root = hists->entries_in; 347 + 348 + next = rb_first(root); 308 349 while (next != NULL) { 309 - struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 350 + struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in); 310 351 311 - next = rb_next(&he->rb_node); 352 + next = rb_next(&he->rb_node_in); 312 353 if (!hist_entry__next_pair(he)) { 313 - rb_erase(&he->rb_node, &hists->entries); 354 + rb_erase(&he->rb_node_in, root); 314 355 hist_entry__free(he); 315 356 } 316 357 } ··· 414 447 415 448 static void hists__compute_resort(struct hists *hists) 416 449 { 417 - struct rb_root tmp = RB_ROOT; 418 - struct rb_node *next = rb_first(&hists->entries); 450 + struct rb_root *root; 451 + struct rb_node *next; 452 + 453 + if (sort__need_collapse) 454 + root = &hists->entries_collapsed; 455 + else 456 + root = hists->entries_in; 457 + 458 + hists->entries = RB_ROOT; 459 + next = rb_first(root); 460 + 461 + hists->nr_entries = 0; 462 + hists->stats.total_period = 0; 463 + hists__reset_col_len(hists); 419 464 420 465 while (next != NULL) { 421 - struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 466 + struct hist_entry *he; 422 467 423 - next = rb_next(&he->rb_node); 468 + he = rb_entry(next, struct hist_entry, rb_node_in); 469 + next = rb_next(&he->rb_node_in); 424 470 425 - rb_erase(&he->rb_node, &hists->entries); 426 - insert_hist_entry_by_compute(&tmp, he, compute); 471 + insert_hist_entry_by_compute(&hists->entries, he, compute); 472 + hists__inc_nr_entries(hists, he); 427 473 } 428 - 429 - hists->entries = tmp; 430 474 } 431 475 432 476 static void hists__process(struct hists *old, struct hists *new) ··· 452 474 if (sort_compute) { 453 475 hists__precompute(new); 454 476 hists__compute_resort(new); 477 + } else { 478 + hists__output_resort(new); 455 479 } 456 480 457 481 hists__fprintf(new, true, 0, 0, stdout); ··· 485 505 evlist_old = older->evlist; 486 506 evlist_new = newer->evlist; 487 507 488 - perf_evlist__resort_hists(evlist_old, true); 489 - perf_evlist__resort_hists(evlist_new, false); 508 + perf_evlist__collapse_resort(evlist_old); 509 + perf_evlist__collapse_resort(evlist_new); 490 510 491 511 list_for_each_entry(evsel, &evlist_new->entries, node) { 492 512 struct perf_evsel *evsel_old;
+2 -8
tools/perf/builtin-kmem.c
··· 340 340 int n_lines, int is_caller) 341 341 { 342 342 struct rb_node *next; 343 - struct machine *machine; 343 + struct machine *machine = &session->machines.host; 344 344 345 345 printf("%.102s\n", graph_dotted_line); 346 346 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); ··· 349 349 350 350 next = rb_first(root); 351 351 352 - machine = perf_session__find_host_machine(session); 353 - if (!machine) { 354 - pr_err("__print_result: couldn't find kernel information\n"); 355 - return; 356 - } 357 352 while (next && n_lines--) { 358 353 struct alloc_stat *data = rb_entry(next, struct alloc_stat, 359 354 node); ··· 609 614 &pingpong_sort_dimension, 610 615 }; 611 616 612 - #define NUM_AVAIL_SORTS \ 613 - (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *)) 617 + #define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts)) 614 618 615 619 static int sort_dimension__add(const char *tok, struct list_head *list) 616 620 {
+1 -2
tools/perf/builtin-kvm.c
··· 973 973 974 974 int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) 975 975 { 976 - const char *file_name; 977 - 976 + const char *file_name = NULL; 978 977 const struct option kvm_options[] = { 979 978 OPT_STRING('i', "input", &file_name, "file", 980 979 "Input file name"),
+12 -115
tools/perf/builtin-record.c
··· 224 224 225 225 static int perf_record__open(struct perf_record *rec) 226 226 { 227 + char msg[512]; 227 228 struct perf_evsel *pos; 228 229 struct perf_evlist *evlist = rec->evlist; 229 230 struct perf_session *session = rec->session; ··· 234 233 perf_evlist__config(evlist, opts); 235 234 236 235 list_for_each_entry(pos, &evlist->entries, node) { 237 - struct perf_event_attr *attr = &pos->attr; 238 - /* 239 - * Check if parse_single_tracepoint_event has already asked for 240 - * PERF_SAMPLE_TIME. 241 - * 242 - * XXX this is kludgy but short term fix for problems introduced by 243 - * eac23d1c that broke 'perf script' by having different sample_types 244 - * when using multiple tracepoint events when we use a perf binary 245 - * that tries to use sample_id_all on an older kernel. 246 - * 247 - * We need to move counter creation to perf_session, support 248 - * different sample_types, etc. 249 - */ 250 - bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; 251 - 252 - fallback_missing_features: 253 - if (opts->exclude_guest_missing) 254 - attr->exclude_guest = attr->exclude_host = 0; 255 - retry_sample_id: 256 - attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; 257 236 try_again: 258 237 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { 259 - int err = errno; 260 - 261 - if (err == EPERM || err == EACCES) { 262 - ui__error_paranoid(); 263 - rc = -err; 264 - goto out; 265 - } else if (err == ENODEV && opts->target.cpu_list) { 266 - pr_err("No such device - did you specify" 267 - " an out-of-range profile CPU?\n"); 268 - rc = -err; 269 - goto out; 270 - } else if (err == EINVAL) { 271 - if (!opts->exclude_guest_missing && 272 - (attr->exclude_guest || attr->exclude_host)) { 273 - pr_debug("Old kernel, cannot exclude " 274 - "guest or host samples.\n"); 275 - opts->exclude_guest_missing = true; 276 - goto fallback_missing_features; 277 - } else if (!opts->sample_id_all_missing) { 278 - /* 279 - * Old kernel, no attr->sample_id_type_all field 280 - */ 281 - opts->sample_id_all_missing = true; 282 - if (!opts->sample_time && !opts->raw_samples && !time_needed) 283 - perf_evsel__reset_sample_bit(pos, TIME); 284 - 285 - goto retry_sample_id; 286 - } 287 - } 288 - 289 - /* 290 - * If it's cycles then fall back to hrtimer 291 - * based cpu-clock-tick sw counter, which 292 - * is always available even if no PMU support. 293 - * 294 - * PPC returns ENXIO until 2.6.37 (behavior changed 295 - * with commit b0a873e). 296 - */ 297 - if ((err == ENOENT || err == ENXIO) 298 - && attr->type == PERF_TYPE_HARDWARE 299 - && attr->config == PERF_COUNT_HW_CPU_CYCLES) { 300 - 238 + if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) { 301 239 if (verbose) 302 - ui__warning("The cycles event is not supported, " 303 - "trying to fall back to cpu-clock-ticks\n"); 304 - attr->type = PERF_TYPE_SOFTWARE; 305 - attr->config = PERF_COUNT_SW_CPU_CLOCK; 306 - if (pos->name) { 307 - free(pos->name); 308 - pos->name = NULL; 309 - } 240 + ui__warning("%s\n", msg); 310 241 goto try_again; 311 242 } 312 243 313 - if (err == ENOENT) { 314 - ui__error("The %s event is not supported.\n", 315 - perf_evsel__name(pos)); 316 - rc = -err; 317 - goto out; 318 - } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) { 319 - ui__error("\'precise\' request may not be supported. " 320 - "Try removing 'p' modifier\n"); 321 - rc = -err; 322 - goto out; 323 - } 324 - 325 - printf("\n"); 326 - error("sys_perf_event_open() syscall returned with %d " 327 - "(%s) for event %s. /bin/dmesg may provide " 328 - "additional information.\n", 329 - err, strerror(err), perf_evsel__name(pos)); 330 - 331 - #if defined(__i386__) || defined(__x86_64__) 332 - if (attr->type == PERF_TYPE_HARDWARE && 333 - err == EOPNOTSUPP) { 334 - pr_err("No hardware sampling interrupt available." 335 - " No APIC? If so then you can boot the kernel" 336 - " with the \"lapic\" boot parameter to" 337 - " force-enable it.\n"); 338 - rc = -err; 339 - goto out; 340 - } 341 - #endif 342 - 343 - pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); 344 - rc = -err; 244 + rc = -errno; 245 + perf_evsel__open_strerror(pos, &opts->target, 246 + errno, msg, sizeof(msg)); 247 + ui__error("%s\n", msg); 345 248 goto out; 346 249 } 347 250 } ··· 328 423 { 329 424 int err; 330 425 struct perf_tool *tool = data; 331 - 332 - if (machine__is_host(machine)) 333 - return; 334 - 335 426 /* 336 427 *As for guest kernel when processing subcommand record&report, 337 428 *we arrange module mmap prior to guest kernel mmap and trigger ··· 512 611 513 612 rec->post_processing_offset = lseek(output, 0, SEEK_CUR); 514 613 515 - machine = perf_session__find_host_machine(session); 516 - if (!machine) { 517 - pr_err("Couldn't find native kernel information.\n"); 518 - err = -1; 519 - goto out_delete_session; 520 - } 614 + machine = &session->machines.host; 521 615 522 616 if (opts->pipe_output) { 523 617 err = perf_event__synthesize_attrs(tool, session, ··· 565 669 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 566 670 "Check /proc/modules permission or run as root.\n"); 567 671 568 - if (perf_guest) 569 - perf_session__process_machines(session, tool, 570 - perf_event__synthesize_guest_os); 672 + if (perf_guest) { 673 + machines__process_guests(&session->machines, 674 + perf_event__synthesize_guest_os, tool); 675 + } 571 676 572 677 if (!opts->target.system_wide) 573 678 err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
+3 -3
tools/perf/builtin-report.c
··· 372 372 if (ret) 373 373 goto out_delete; 374 374 375 - kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION]; 375 + kernel_map = session->machines.host.vmlinux_maps[MAP__FUNCTION]; 376 376 kernel_kmap = map__kmap(kernel_map); 377 377 if (kernel_map == NULL || 378 378 (kernel_map->dso->hit && ··· 595 595 OPT_BOOLEAN(0, "stdio", &report.use_stdio, 596 596 "Use the stdio interface"), 597 597 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 598 - "sort by key(s): pid, comm, dso, symbol, parent, dso_to," 599 - " dso_from, symbol_to, symbol_from, mispredict"), 598 + "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline," 599 + " dso_to, dso_from, symbol_to, symbol_from, mispredict"), 600 600 OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, 601 601 "Show sample percentage for different cpu modes"), 602 602 OPT_STRING('p', "parent", &parent_pattern, "regex",
+3 -3
tools/perf/builtin-sched.c
··· 1475 1475 goto out_delete; 1476 1476 } 1477 1477 1478 - sched->nr_events = session->hists.stats.nr_events[0]; 1479 - sched->nr_lost_events = session->hists.stats.total_lost; 1480 - sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; 1478 + sched->nr_events = session->stats.nr_events[0]; 1479 + sched->nr_lost_events = session->stats.total_lost; 1480 + sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST]; 1481 1481 } 1482 1482 1483 1483 if (destroy)
+3 -14
tools/perf/builtin-script.c
··· 692 692 const char *arg, int unset __maybe_unused) 693 693 { 694 694 char *tok; 695 - int i, imax = sizeof(all_output_options) / sizeof(struct output_option); 695 + int i, imax = ARRAY_SIZE(all_output_options); 696 696 int j; 697 697 int rc = 0; 698 698 char *str = strdup(arg); ··· 907 907 } 908 908 909 909 return NULL; 910 - } 911 - 912 - static char *ltrim(char *str) 913 - { 914 - int len = strlen(str); 915 - 916 - while (len && isspace(*str)) { 917 - len--; 918 - str++; 919 - } 920 - 921 - return str; 922 910 } 923 911 924 912 static int read_script_info(struct script_desc *desc, const char *filename) ··· 1475 1487 return -1; 1476 1488 } 1477 1489 1478 - perf_session__fprintf_info(session, stdout, show_full_info); 1490 + if (!script_name && !generate_script_lang) 1491 + perf_session__fprintf_info(session, stdout, show_full_info); 1479 1492 1480 1493 if (!no_callchain) 1481 1494 symbol_conf.use_callchain = true;
+8 -38
tools/perf/builtin-stat.c
··· 132 132 static int create_perf_stat_counter(struct perf_evsel *evsel) 133 133 { 134 134 struct perf_event_attr *attr = &evsel->attr; 135 - bool exclude_guest_missing = false; 136 - int ret; 137 135 138 136 if (scale) 139 137 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | ··· 139 141 140 142 attr->inherit = !no_inherit; 141 143 142 - retry: 143 - if (exclude_guest_missing) 144 - evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 145 - 146 - if (perf_target__has_cpu(&target)) { 147 - ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); 148 - if (ret) 149 - goto check_ret; 150 - return 0; 151 - } 144 + if (perf_target__has_cpu(&target)) 145 + return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); 152 146 153 147 if (!perf_target__has_task(&target) && 154 148 perf_evsel__is_group_leader(evsel)) { ··· 148 158 attr->enable_on_exec = 1; 149 159 } 150 160 151 - ret = perf_evsel__open_per_thread(evsel, evsel_list->threads); 152 - if (!ret) 153 - return 0; 154 - /* fall through */ 155 - check_ret: 156 - if (ret && errno == EINVAL) { 157 - if (!exclude_guest_missing && 158 - (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { 159 - pr_debug("Old kernel, cannot exclude " 160 - "guest or host samples.\n"); 161 - exclude_guest_missing = true; 162 - goto retry; 163 - } 164 - } 165 - return ret; 161 + return perf_evsel__open_per_thread(evsel, evsel_list->threads); 166 162 } 167 163 168 164 /* ··· 247 271 248 272 static int __run_perf_stat(int argc __maybe_unused, const char **argv) 249 273 { 274 + char msg[512]; 250 275 unsigned long long t0, t1; 251 276 struct perf_evsel *counter; 252 277 int status = 0; ··· 325 348 continue; 326 349 } 327 350 328 - if (errno == EPERM || errno == EACCES) { 329 - error("You may not have permission to collect %sstats.\n" 330 - "\t Consider tweaking" 331 - " /proc/sys/kernel/perf_event_paranoid or running as root.", 332 - target.system_wide ? "system-wide " : ""); 333 - } else { 334 - error("open_counter returned with %d (%s). " 335 - "/bin/dmesg may provide additional information.\n", 336 - errno, strerror(errno)); 337 - } 351 + perf_evsel__open_strerror(counter, &target, 352 + errno, msg, sizeof(msg)); 353 + ui__error("%s\n", msg); 354 + 338 355 if (child_pid != -1) 339 356 kill(child_pid, SIGTERM); 340 357 341 - pr_err("Not all events could be opened.\n"); 342 358 return -1; 343 359 } 344 360 counter->supported = true;
+13 -93
tools/perf/builtin-top.c
··· 68 68 #include <linux/unistd.h> 69 69 #include <linux/types.h> 70 70 71 - void get_term_dimensions(struct winsize *ws) 72 - { 73 - char *s = getenv("LINES"); 74 - 75 - if (s != NULL) { 76 - ws->ws_row = atoi(s); 77 - s = getenv("COLUMNS"); 78 - if (s != NULL) { 79 - ws->ws_col = atoi(s); 80 - if (ws->ws_row && ws->ws_col) 81 - return; 82 - } 83 - } 84 - #ifdef TIOCGWINSZ 85 - if (ioctl(1, TIOCGWINSZ, ws) == 0 && 86 - ws->ws_row && ws->ws_col) 87 - return; 88 - #endif 89 - ws->ws_row = 25; 90 - ws->ws_col = 80; 91 - } 92 - 93 71 static void perf_top__update_print_entries(struct perf_top *top) 94 72 { 95 73 if (top->print_entries > 9) ··· 694 716 static struct intlist *seen; 695 717 696 718 if (!seen) 697 - seen = intlist__new(); 719 + seen = intlist__new(NULL); 698 720 699 721 if (!intlist__has_entry(seen, event->ip.pid)) { 700 722 pr_err("Can't find guest [%d]'s kernel information\n", ··· 706 728 707 729 if (!machine) { 708 730 pr_err("%u unprocessable samples recorded.\n", 709 - top->session->hists.stats.nr_unprocessable_samples++); 731 + top->session->stats.nr_unprocessable_samples++); 710 732 return; 711 733 } 712 734 ··· 825 847 ++top->us_samples; 826 848 if (top->hide_user_symbols) 827 849 continue; 828 - machine = perf_session__find_host_machine(session); 850 + machine = &session->machines.host; 829 851 break; 830 852 case PERF_RECORD_MISC_KERNEL: 831 853 ++top->kernel_samples; 832 854 if (top->hide_kernel_symbols) 833 855 continue; 834 - machine = perf_session__find_host_machine(session); 856 + machine = &session->machines.host; 835 857 break; 836 858 case PERF_RECORD_MISC_GUEST_KERNEL: 837 859 ++top->guest_kernel_samples; ··· 856 878 hists__inc_nr_events(&evsel->hists, event->header.type); 857 879 machine__process_event(machine, event); 858 880 } else 859 - ++session->hists.stats.nr_unknown_events; 881 + ++session->stats.nr_unknown_events; 860 882 } 861 883 } 862 884 ··· 870 892 871 893 static void perf_top__start_counters(struct perf_top *top) 872 894 { 895 + char msg[512]; 873 896 struct perf_evsel *counter; 874 897 struct perf_evlist *evlist = top->evlist; 875 898 struct perf_record_opts *opts = &top->record_opts; ··· 878 899 perf_evlist__config(evlist, opts); 879 900 880 901 list_for_each_entry(counter, &evlist->entries, node) { 881 - struct perf_event_attr *attr = &counter->attr; 882 - 883 - fallback_missing_features: 884 - if (top->exclude_guest_missing) 885 - attr->exclude_guest = attr->exclude_host = 0; 886 - retry_sample_id: 887 - attr->sample_id_all = top->sample_id_all_missing ? 0 : 1; 888 902 try_again: 889 903 if (perf_evsel__open(counter, top->evlist->cpus, 890 904 top->evlist->threads) < 0) { 891 - int err = errno; 892 - 893 - if (err == EPERM || err == EACCES) { 894 - ui__error_paranoid(); 895 - goto out_err; 896 - } else if (err == EINVAL) { 897 - if (!top->exclude_guest_missing && 898 - (attr->exclude_guest || attr->exclude_host)) { 899 - pr_debug("Old kernel, cannot exclude " 900 - "guest or host samples.\n"); 901 - top->exclude_guest_missing = true; 902 - goto fallback_missing_features; 903 - } else if (!top->sample_id_all_missing) { 904 - /* 905 - * Old kernel, no attr->sample_id_type_all field 906 - */ 907 - top->sample_id_all_missing = true; 908 - goto retry_sample_id; 909 - } 910 - } 911 - /* 912 - * If it's cycles then fall back to hrtimer 913 - * based cpu-clock-tick sw counter, which 914 - * is always available even if no PMU support: 915 - */ 916 - if ((err == ENOENT || err == ENXIO) && 917 - (attr->type == PERF_TYPE_HARDWARE) && 918 - (attr->config == PERF_COUNT_HW_CPU_CYCLES)) { 919 - 905 + if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { 920 906 if (verbose) 921 - ui__warning("Cycles event not supported,\n" 922 - "trying to fall back to cpu-clock-ticks\n"); 923 - 924 - attr->type = PERF_TYPE_SOFTWARE; 925 - attr->config = PERF_COUNT_SW_CPU_CLOCK; 926 - if (counter->name) { 927 - free(counter->name); 928 - counter->name = NULL; 929 - } 907 + ui__warning("%s\n", msg); 930 908 goto try_again; 931 909 } 932 910 933 - if (err == ENOENT) { 934 - ui__error("The %s event is not supported.\n", 935 - perf_evsel__name(counter)); 936 - goto out_err; 937 - } else if (err == EMFILE) { 938 - ui__error("Too many events are opened.\n" 939 - "Try again after reducing the number of events\n"); 940 - goto out_err; 941 - } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) { 942 - ui__error("\'precise\' request may not be supported. " 943 - "Try removing 'p' modifier\n"); 944 - goto out_err; 945 - } 946 - 947 - ui__error("The sys_perf_event_open() syscall " 948 - "returned with %d (%s). /bin/dmesg " 949 - "may provide additional information.\n" 950 - "No CONFIG_PERF_EVENTS=y kernel support " 951 - "configured?\n", err, strerror(err)); 911 + perf_evsel__open_strerror(counter, &opts->target, 912 + errno, msg, sizeof(msg)); 913 + ui__error("%s\n", msg); 952 914 goto out_err; 953 915 } 954 916 } ··· 944 1024 if (perf_target__has_task(&opts->target)) 945 1025 perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, 946 1026 perf_event__process, 947 - &top->session->host_machine); 1027 + &top->session->machines.host); 948 1028 else 949 1029 perf_event__synthesize_threads(&top->tool, perf_event__process, 950 - &top->session->host_machine); 1030 + &top->session->machines.host); 951 1031 perf_top__start_counters(top); 952 1032 top->session->evlist = top->evlist; 953 1033 perf_session__set_id_hdr_size(top->session);
+3 -3
tools/perf/config/utilities.mak
··· 13 13 # what should replace a newline when escaping 14 14 # newlines; the default is a bizarre string. 15 15 # 16 - nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n) 16 + nl-escape = $(if $(1),$(1),m822df3020w6a44id34bt574ctac44eb9f4n) 17 17 18 18 # escape-nl 19 19 # ··· 173 173 # Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default) 174 174 # 175 175 define get-executable-or-default 176 - $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2))) 176 + $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2),$(1))) 177 177 endef 178 - _ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) 178 + _ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2))) 179 179 _gea_warn = $(warning The path '$(1)' is not executable.) 180 180 _gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) 181 181
+21 -11
tools/perf/perf.c
··· 328 328 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) 329 329 return 0; 330 330 331 + status = 1; 331 332 /* Check for ENOSPC and EIO errors.. */ 332 - if (fflush(stdout)) 333 - die("write failure on standard output: %s", strerror(errno)); 334 - if (ferror(stdout)) 335 - die("unknown write failure on standard output"); 336 - if (fclose(stdout)) 337 - die("close failed on standard output: %s", strerror(errno)); 338 - return 0; 333 + if (fflush(stdout)) { 334 + fprintf(stderr, "write failure on standard output: %s", strerror(errno)); 335 + goto out; 336 + } 337 + if (ferror(stdout)) { 338 + fprintf(stderr, "unknown write failure on standard output"); 339 + goto out; 340 + } 341 + if (fclose(stdout)) { 342 + fprintf(stderr, "close failed on standard output: %s", strerror(errno)); 343 + goto out; 344 + } 345 + status = 0; 346 + out: 347 + return status; 339 348 } 340 349 341 350 static void handle_internal_command(int argc, const char **argv) ··· 476 467 cmd += 5; 477 468 argv[0] = cmd; 478 469 handle_internal_command(argc, argv); 479 - die("cannot handle %s internally", cmd); 470 + fprintf(stderr, "cannot handle %s internally", cmd); 471 + goto out; 480 472 } 481 473 482 474 /* Look for flags.. */ ··· 495 485 printf("\n usage: %s\n\n", perf_usage_string); 496 486 list_common_cmds_help(); 497 487 printf("\n %s\n\n", perf_more_info_string); 498 - exit(1); 488 + goto out; 499 489 } 500 490 cmd = argv[0]; 501 491 ··· 527 517 fprintf(stderr, "Expansion of alias '%s' failed; " 528 518 "'%s' is not a perf-command\n", 529 519 cmd, argv[0]); 530 - exit(1); 520 + goto out; 531 521 } 532 522 if (!done_help) { 533 523 cmd = argv[0] = help_unknown_cmd(cmd); ··· 538 528 539 529 fprintf(stderr, "Failed to run command '%s': %s\n", 540 530 cmd, strerror(errno)); 541 - 531 + out: 542 532 return 1; 543 533 }
-6
tools/perf/perf.h
··· 1 1 #ifndef _PERF_PERF_H 2 2 #define _PERF_PERF_H 3 3 4 - struct winsize; 5 - 6 - void get_term_dimensions(struct winsize *ws); 7 - 8 4 #include <asm/unistd.h> 9 5 10 6 #if defined(__i386__) ··· 233 237 bool raw_samples; 234 238 bool sample_address; 235 239 bool sample_time; 236 - bool sample_id_all_missing; 237 - bool exclude_guest_missing; 238 240 bool period; 239 241 unsigned int freq; 240 242 unsigned int mmap_pages;
-2
tools/perf/scripts/perl/bin/workqueue-stats-record
··· 1 - #!/bin/bash 2 - perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
-3
tools/perf/scripts/perl/bin/workqueue-stats-report
··· 1 - #!/bin/bash 2 - # description: workqueue stats (ins/exe/create/destroy) 3 - perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl
-129
tools/perf/scripts/perl/workqueue-stats.pl
··· 1 - #!/usr/bin/perl -w 2 - # (c) 2009, Tom Zanussi <tzanussi@gmail.com> 3 - # Licensed under the terms of the GNU GPL License version 2 4 - 5 - # Displays workqueue stats 6 - # 7 - # Usage: 8 - # 9 - # perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e 10 - # workqueue:workqueue_destruction -e workqueue:workqueue_execution 11 - # -e workqueue:workqueue_insertion 12 - # 13 - # perf script -p -s tools/perf/scripts/perl/workqueue-stats.pl 14 - 15 - use 5.010000; 16 - use strict; 17 - use warnings; 18 - 19 - use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; 20 - use lib "./Perf-Trace-Util/lib"; 21 - use Perf::Trace::Core; 22 - use Perf::Trace::Util; 23 - 24 - my @cpus; 25 - 26 - sub workqueue::workqueue_destruction 27 - { 28 - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 29 - $common_pid, $common_comm, 30 - $thread_comm, $thread_pid) = @_; 31 - 32 - $cpus[$common_cpu]{$thread_pid}{destroyed}++; 33 - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; 34 - } 35 - 36 - sub workqueue::workqueue_creation 37 - { 38 - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 39 - $common_pid, $common_comm, 40 - $thread_comm, $thread_pid, $cpu) = @_; 41 - 42 - $cpus[$common_cpu]{$thread_pid}{created}++; 43 - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; 44 - } 45 - 46 - sub workqueue::workqueue_execution 47 - { 48 - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 49 - $common_pid, $common_comm, 50 - $thread_comm, $thread_pid, $func) = @_; 51 - 52 - $cpus[$common_cpu]{$thread_pid}{executed}++; 53 - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; 54 - } 55 - 56 - sub workqueue::workqueue_insertion 57 - { 58 - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 59 - $common_pid, $common_comm, 60 - $thread_comm, $thread_pid, $func) = @_; 61 - 62 - $cpus[$common_cpu]{$thread_pid}{inserted}++; 63 - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; 64 - } 65 - 66 - sub trace_end 67 - { 68 - print "workqueue work stats:\n\n"; 69 - my $cpu = 0; 70 - printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name"); 71 - printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----"); 72 - foreach my $pidhash (@cpus) { 73 - while ((my $pid, my $wqhash) = each %$pidhash) { 74 - my $ins = $$wqhash{'inserted'} || 0; 75 - my $exe = $$wqhash{'executed'} || 0; 76 - my $comm = $$wqhash{'comm'} || ""; 77 - if ($ins || $exe) { 78 - printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm); 79 - } 80 - } 81 - $cpu++; 82 - } 83 - 84 - $cpu = 0; 85 - print "\nworkqueue lifecycle stats:\n\n"; 86 - printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name"); 87 - printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----"); 88 - foreach my $pidhash (@cpus) { 89 - while ((my $pid, my $wqhash) = each %$pidhash) { 90 - my $created = $$wqhash{'created'} || 0; 91 - my $destroyed = $$wqhash{'destroyed'} || 0; 92 - my $comm = $$wqhash{'comm'} || ""; 93 - if ($created || $destroyed) { 94 - printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed, 95 - $comm); 96 - } 97 - } 98 - $cpu++; 99 - } 100 - 101 - print_unhandled(); 102 - } 103 - 104 - my %unhandled; 105 - 106 - sub print_unhandled 107 - { 108 - if ((scalar keys %unhandled) == 0) { 109 - return; 110 - } 111 - 112 - print "\nunhandled events:\n\n"; 113 - 114 - printf("%-40s %10s\n", "event", "count"); 115 - printf("%-40s %10s\n", "----------------------------------------", 116 - "-----------"); 117 - 118 - foreach my $event_name (keys %unhandled) { 119 - printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); 120 - } 121 - } 122 - 123 - sub trace_unhandled 124 - { 125 - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 126 - $common_pid, $common_comm) = @_; 127 - 128 - $unhandled{$event_name}++; 129 - }
+1 -3
tools/perf/tests/attr.c
··· 33 33 34 34 extern int verbose; 35 35 36 - bool test_attr__enabled; 37 - 38 36 static char *dir; 39 37 40 38 void test_attr__init(void) ··· 144 146 { 145 147 char cmd[3*PATH_MAX]; 146 148 147 - snprintf(cmd, 3*PATH_MAX, "python %s/attr.py -d %s/attr/ -p %s %s", 149 + snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %s", 148 150 d, d, perf, verbose ? "-v" : ""); 149 151 150 152 return system(cmd);
+18 -9
tools/perf/tests/attr.py
··· 68 68 self[key] = val 69 69 70 70 def __init__(self, name, data, base): 71 - log.info(" Event %s" % name); 71 + log.debug(" Event %s" % name); 72 72 self.name = name; 73 73 self.group = '' 74 74 self.add(base) ··· 97 97 return False 98 98 return True 99 99 100 + def diff(self, other): 101 + for t in Event.terms: 102 + if not self.has_key(t) or not other.has_key(t): 103 + continue 104 + if not self.compare_data(self[t], other[t]): 105 + log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) 106 + 107 + 100 108 # Test file description needs to have following sections: 101 109 # [config] 102 110 # - just single instance in file ··· 121 113 parser = ConfigParser.SafeConfigParser() 122 114 parser.read(path) 123 115 124 - log.warning("running '%s'" % path) 116 + log.debug("running '%s'" % path) 125 117 126 118 self.path = path 127 119 self.test_dir = options.test_dir ··· 136 128 137 129 self.expect = {} 138 130 self.result = {} 139 - log.info(" loading expected events"); 131 + log.debug(" loading expected events"); 140 132 self.load_events(path, self.expect) 141 133 142 134 def is_event(self, name): ··· 172 164 self.perf, self.command, tempdir, self.args) 173 165 ret = os.WEXITSTATUS(os.system(cmd)) 174 166 175 - log.info(" running '%s' ret %d " % (cmd, ret)) 167 + log.warning(" running '%s' ret %d " % (cmd, ret)) 176 168 177 169 if ret != int(self.ret): 178 170 raise Unsup(self) ··· 180 172 def compare(self, expect, result): 181 173 match = {} 182 174 183 - log.info(" compare"); 175 + log.debug(" compare"); 184 176 185 177 # For each expected event find all matching 186 178 # events in result. Fail if there's not any. ··· 195 187 else: 196 188 log.debug(" ->FAIL"); 197 189 198 - log.info(" match: [%s] matches %s" % (exp_name, str(exp_list))) 190 + log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list))) 199 191 200 192 # we did not any matching event - fail 201 193 if (not exp_list): 194 + exp_event.diff(res_event) 202 195 raise Fail(self, 'match failure'); 203 196 204 197 match[exp_name] = exp_list ··· 217 208 if res_group not in match[group]: 218 209 raise Fail(self, 'group failure') 219 210 220 - log.info(" group: [%s] matches group leader %s" % 211 + log.debug(" group: [%s] matches group leader %s" % 221 212 (exp_name, str(match[group]))) 222 213 223 - log.info(" matched") 214 + log.debug(" matched") 224 215 225 216 def resolve_groups(self, events): 226 217 for name, event in events.items(): ··· 242 233 self.run_cmd(tempdir); 243 234 244 235 # load events expectation for the test 245 - log.info(" loading result events"); 236 + log.debug(" loading result events"); 246 237 for f in glob.glob(tempdir + '/event*'): 247 238 self.load_events(f, self.result); 248 239
+1 -1
tools/perf/tests/attr/test-record-group1
··· 1 1 [config] 2 2 command = record 3 - args = -e '{cycles,instructions}' kill >/tmp/krava 2>&1 3 + args = -e '{cycles,instructions}' kill >/dev/null 2>&1 4 4 5 5 [event-1:base-record] 6 6 fd=1
+35 -5
tools/perf/tests/builtin-test.c
··· 4 4 * Builtin regression testing command: ever growing number of sanity tests 5 5 */ 6 6 #include "builtin.h" 7 + #include "intlist.h" 7 8 #include "tests.h" 8 9 #include "debug.h" 9 10 #include "color.h" ··· 70 69 .func = test__attr, 71 70 }, 72 71 { 72 + .desc = "Test matching and linking mutliple hists", 73 + .func = test__hists_link, 74 + }, 75 + { 76 + .desc = "Try 'use perf' in python, checking link problems", 77 + .func = test__python_use, 78 + }, 79 + { 73 80 .func = NULL, 74 81 }, 75 82 }; ··· 106 97 return false; 107 98 } 108 99 109 - static int __cmd_test(int argc, const char *argv[]) 100 + static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) 110 101 { 111 102 int i = 0; 112 103 int width = 0; ··· 127 118 continue; 128 119 129 120 pr_info("%2d: %-*s:", i, width, tests[curr].desc); 121 + 122 + if (intlist__find(skiplist, i)) { 123 + color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); 124 + continue; 125 + } 126 + 130 127 pr_debug("\n--- start ---\n"); 131 128 err = tests[curr].func(); 132 129 pr_debug("---- end ----\n%s:", tests[curr].desc); 133 - if (err) 134 - color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); 135 - else 130 + 131 + switch (err) { 132 + case TEST_OK: 136 133 pr_info(" Ok\n"); 134 + break; 135 + case TEST_SKIP: 136 + color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n"); 137 + break; 138 + case TEST_FAIL: 139 + default: 140 + color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); 141 + break; 142 + } 137 143 } 138 144 139 145 return 0; ··· 176 152 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", 177 153 NULL, 178 154 }; 155 + const char *skip = NULL; 179 156 const struct option test_options[] = { 157 + OPT_STRING('s', "skip", &skip, "tests", "tests to skip"), 180 158 OPT_INCR('v', "verbose", &verbose, 181 159 "be more verbose (show symbol address, etc)"), 182 160 OPT_END() 183 161 }; 162 + struct intlist *skiplist = NULL; 184 163 185 164 argc = parse_options(argc, argv, test_options, test_usage, 0); 186 165 if (argc >= 1 && !strcmp(argv[0], "list")) ··· 196 169 if (symbol__init() < 0) 197 170 return -1; 198 171 199 - return __cmd_test(argc, argv); 172 + if (skip != NULL) 173 + skiplist = intlist__new(skip); 174 + 175 + return __cmd_test(argc, argv, skiplist); 200 176 }
+2 -2
tools/perf/tests/evsel-roundtrip-name.c
··· 22 22 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 23 23 __perf_evsel__hw_cache_type_op_res_name(type, op, i, 24 24 name, sizeof(name)); 25 - err = parse_events(evlist, name, 0); 25 + err = parse_events(evlist, name); 26 26 if (err) 27 27 ret = err; 28 28 } ··· 70 70 return -ENOMEM; 71 71 72 72 for (i = 0; i < nr_names; ++i) { 73 - err = parse_events(evlist, names[i], 0); 73 + err = parse_events(evlist, names[i]); 74 74 if (err) { 75 75 pr_debug("failed to parse event '%s', err %d\n", 76 76 names[i], err);
+499
tools/perf/tests/hists_link.c
··· 1 + #include "perf.h" 2 + #include "tests.h" 3 + #include "debug.h" 4 + #include "symbol.h" 5 + #include "sort.h" 6 + #include "evsel.h" 7 + #include "evlist.h" 8 + #include "machine.h" 9 + #include "thread.h" 10 + #include "parse-events.h" 11 + 12 + static struct { 13 + u32 pid; 14 + const char *comm; 15 + } fake_threads[] = { 16 + { 100, "perf" }, 17 + { 200, "perf" }, 18 + { 300, "bash" }, 19 + }; 20 + 21 + static struct { 22 + u32 pid; 23 + u64 start; 24 + const char *filename; 25 + } fake_mmap_info[] = { 26 + { 100, 0x40000, "perf" }, 27 + { 100, 0x50000, "libc" }, 28 + { 100, 0xf0000, "[kernel]" }, 29 + { 200, 0x40000, "perf" }, 30 + { 200, 0x50000, "libc" }, 31 + { 200, 0xf0000, "[kernel]" }, 32 + { 300, 0x40000, "bash" }, 33 + { 300, 0x50000, "libc" }, 34 + { 300, 0xf0000, "[kernel]" }, 35 + }; 36 + 37 + struct fake_sym { 38 + u64 start; 39 + u64 length; 40 + const char *name; 41 + }; 42 + 43 + static struct fake_sym perf_syms[] = { 44 + { 700, 100, "main" }, 45 + { 800, 100, "run_command" }, 46 + { 900, 100, "cmd_record" }, 47 + }; 48 + 49 + static struct fake_sym bash_syms[] = { 50 + { 700, 100, "main" }, 51 + { 800, 100, "xmalloc" }, 52 + { 900, 100, "xfree" }, 53 + }; 54 + 55 + static struct fake_sym libc_syms[] = { 56 + { 700, 100, "malloc" }, 57 + { 800, 100, "free" }, 58 + { 900, 100, "realloc" }, 59 + }; 60 + 61 + static struct fake_sym kernel_syms[] = { 62 + { 700, 100, "schedule" }, 63 + { 800, 100, "page_fault" }, 64 + { 900, 100, "sys_perf_event_open" }, 65 + }; 66 + 67 + static struct { 68 + const char *dso_name; 69 + struct fake_sym *syms; 70 + size_t nr_syms; 71 + } fake_symbols[] = { 72 + { "perf", perf_syms, ARRAY_SIZE(perf_syms) }, 73 + { "bash", bash_syms, ARRAY_SIZE(bash_syms) }, 74 + { "libc", libc_syms, ARRAY_SIZE(libc_syms) }, 75 + { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) }, 76 + }; 77 + 78 + static struct machine *setup_fake_machine(struct machines *machines) 79 + { 80 + struct machine *machine = machines__find(machines, HOST_KERNEL_ID); 81 + size_t i; 82 + 83 + if (machine == NULL) { 84 + pr_debug("Not enough memory for machine setup\n"); 85 + return NULL; 86 + } 87 + 88 + for (i = 0; i < ARRAY_SIZE(fake_threads); i++) { 89 + struct thread *thread; 90 + 91 + thread = machine__findnew_thread(machine, fake_threads[i].pid); 92 + if (thread == NULL) 93 + goto out; 94 + 95 + thread__set_comm(thread, fake_threads[i].comm); 96 + } 97 + 98 + for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) { 99 + union perf_event fake_mmap_event = { 100 + .mmap = { 101 + .header = { .misc = PERF_RECORD_MISC_USER, }, 102 + .pid = fake_mmap_info[i].pid, 103 + .start = fake_mmap_info[i].start, 104 + .len = 0x1000ULL, 105 + .pgoff = 0ULL, 106 + }, 107 + }; 108 + 109 + strcpy(fake_mmap_event.mmap.filename, 110 + fake_mmap_info[i].filename); 111 + 112 + machine__process_mmap_event(machine, &fake_mmap_event); 113 + } 114 + 115 + for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) { 116 + size_t k; 117 + struct dso *dso; 118 + 119 + dso = __dsos__findnew(&machine->user_dsos, 120 + fake_symbols[i].dso_name); 121 + if (dso == NULL) 122 + goto out; 123 + 124 + /* emulate dso__load() */ 125 + dso__set_loaded(dso, MAP__FUNCTION); 126 + 127 + for (k = 0; k < fake_symbols[i].nr_syms; k++) { 128 + struct symbol *sym; 129 + struct fake_sym *fsym = &fake_symbols[i].syms[k]; 130 + 131 + sym = symbol__new(fsym->start, fsym->length, 132 + STB_GLOBAL, fsym->name); 133 + if (sym == NULL) 134 + goto out; 135 + 136 + symbols__insert(&dso->symbols[MAP__FUNCTION], sym); 137 + } 138 + } 139 + 140 + return machine; 141 + 142 + out: 143 + pr_debug("Not enough memory for machine setup\n"); 144 + machine__delete_threads(machine); 145 + machine__delete(machine); 146 + return NULL; 147 + } 148 + 149 + struct sample { 150 + u32 pid; 151 + u64 ip; 152 + struct thread *thread; 153 + struct map *map; 154 + struct symbol *sym; 155 + }; 156 + 157 + static struct sample fake_common_samples[] = { 158 + /* perf [kernel] schedule() */ 159 + { .pid = 100, .ip = 0xf0000 + 700, }, 160 + /* perf [perf] main() */ 161 + { .pid = 200, .ip = 0x40000 + 700, }, 162 + /* perf [perf] cmd_record() */ 163 + { .pid = 200, .ip = 0x40000 + 900, }, 164 + /* bash [bash] xmalloc() */ 165 + { .pid = 300, .ip = 0x40000 + 800, }, 166 + /* bash [libc] malloc() */ 167 + { .pid = 300, .ip = 0x50000 + 700, }, 168 + }; 169 + 170 + static struct sample fake_samples[][5] = { 171 + { 172 + /* perf [perf] run_command() */ 173 + { .pid = 100, .ip = 0x40000 + 800, }, 174 + /* perf [libc] malloc() */ 175 + { .pid = 100, .ip = 0x50000 + 700, }, 176 + /* perf [kernel] page_fault() */ 177 + { .pid = 100, .ip = 0xf0000 + 800, }, 178 + /* perf [kernel] sys_perf_event_open() */ 179 + { .pid = 200, .ip = 0xf0000 + 900, }, 180 + /* bash [libc] free() */ 181 + { .pid = 300, .ip = 0x50000 + 800, }, 182 + }, 183 + { 184 + /* perf [libc] free() */ 185 + { .pid = 200, .ip = 0x50000 + 800, }, 186 + /* bash [libc] malloc() */ 187 + { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */ 188 + /* bash [bash] xfee() */ 189 + { .pid = 300, .ip = 0x40000 + 900, }, 190 + /* bash [libc] realloc() */ 191 + { .pid = 300, .ip = 0x50000 + 900, }, 192 + /* bash [kernel] page_fault() */ 193 + { .pid = 300, .ip = 0xf0000 + 800, }, 194 + }, 195 + }; 196 + 197 + static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) 198 + { 199 + struct perf_evsel *evsel; 200 + struct addr_location al; 201 + struct hist_entry *he; 202 + struct perf_sample sample = { .cpu = 0, }; 203 + size_t i = 0, k; 204 + 205 + /* 206 + * each evsel will have 10 samples - 5 common and 5 distinct. 207 + * However the second evsel also has a collapsed entry for 208 + * "bash [libc] malloc" so total 9 entries will be in the tree. 209 + */ 210 + list_for_each_entry(evsel, &evlist->entries, node) { 211 + for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) { 212 + const union perf_event event = { 213 + .ip = { 214 + .header = { 215 + .misc = PERF_RECORD_MISC_USER, 216 + }, 217 + .pid = fake_common_samples[k].pid, 218 + .ip = fake_common_samples[k].ip, 219 + }, 220 + }; 221 + 222 + if (perf_event__preprocess_sample(&event, machine, &al, 223 + &sample, 0) < 0) 224 + goto out; 225 + 226 + he = __hists__add_entry(&evsel->hists, &al, NULL, 1); 227 + if (he == NULL) 228 + goto out; 229 + 230 + fake_common_samples[k].thread = al.thread; 231 + fake_common_samples[k].map = al.map; 232 + fake_common_samples[k].sym = al.sym; 233 + } 234 + 235 + for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) { 236 + const union perf_event event = { 237 + .ip = { 238 + .header = { 239 + .misc = PERF_RECORD_MISC_USER, 240 + }, 241 + .pid = fake_samples[i][k].pid, 242 + .ip = fake_samples[i][k].ip, 243 + }, 244 + }; 245 + 246 + if (perf_event__preprocess_sample(&event, machine, &al, 247 + &sample, 0) < 0) 248 + goto out; 249 + 250 + he = __hists__add_entry(&evsel->hists, &al, NULL, 1); 251 + if (he == NULL) 252 + goto out; 253 + 254 + fake_samples[i][k].thread = al.thread; 255 + fake_samples[i][k].map = al.map; 256 + fake_samples[i][k].sym = al.sym; 257 + } 258 + i++; 259 + } 260 + 261 + return 0; 262 + 263 + out: 264 + pr_debug("Not enough memory for adding a hist entry\n"); 265 + return -1; 266 + } 267 + 268 + static int find_sample(struct sample *samples, size_t nr_samples, 269 + struct thread *t, struct map *m, struct symbol *s) 270 + { 271 + while (nr_samples--) { 272 + if (samples->thread == t && samples->map == m && 273 + samples->sym == s) 274 + return 1; 275 + samples++; 276 + } 277 + return 0; 278 + } 279 + 280 + static int __validate_match(struct hists *hists) 281 + { 282 + size_t count = 0; 283 + struct rb_root *root; 284 + struct rb_node *node; 285 + 286 + /* 287 + * Only entries from fake_common_samples should have a pair. 288 + */ 289 + if (sort__need_collapse) 290 + root = &hists->entries_collapsed; 291 + else 292 + root = hists->entries_in; 293 + 294 + node = rb_first(root); 295 + while (node) { 296 + struct hist_entry *he; 297 + 298 + he = rb_entry(node, struct hist_entry, rb_node_in); 299 + 300 + if (hist_entry__has_pairs(he)) { 301 + if (find_sample(fake_common_samples, 302 + ARRAY_SIZE(fake_common_samples), 303 + he->thread, he->ms.map, he->ms.sym)) { 304 + count++; 305 + } else { 306 + pr_debug("Can't find the matched entry\n"); 307 + return -1; 308 + } 309 + } 310 + 311 + node = rb_next(node); 312 + } 313 + 314 + if (count != ARRAY_SIZE(fake_common_samples)) { 315 + pr_debug("Invalid count for matched entries: %zd of %zd\n", 316 + count, ARRAY_SIZE(fake_common_samples)); 317 + return -1; 318 + } 319 + 320 + return 0; 321 + } 322 + 323 + static int validate_match(struct hists *leader, struct hists *other) 324 + { 325 + return __validate_match(leader) || __validate_match(other); 326 + } 327 + 328 + static int __validate_link(struct hists *hists, int idx) 329 + { 330 + size_t count = 0; 331 + size_t count_pair = 0; 332 + size_t count_dummy = 0; 333 + struct rb_root *root; 334 + struct rb_node *node; 335 + 336 + /* 337 + * Leader hists (idx = 0) will have dummy entries from other, 338 + * and some entries will have no pair. However every entry 339 + * in other hists should have (dummy) pair. 340 + */ 341 + if (sort__need_collapse) 342 + root = &hists->entries_collapsed; 343 + else 344 + root = hists->entries_in; 345 + 346 + node = rb_first(root); 347 + while (node) { 348 + struct hist_entry *he; 349 + 350 + he = rb_entry(node, struct hist_entry, rb_node_in); 351 + 352 + if (hist_entry__has_pairs(he)) { 353 + if (!find_sample(fake_common_samples, 354 + ARRAY_SIZE(fake_common_samples), 355 + he->thread, he->ms.map, he->ms.sym) && 356 + !find_sample(fake_samples[idx], 357 + ARRAY_SIZE(fake_samples[idx]), 358 + he->thread, he->ms.map, he->ms.sym)) { 359 + count_dummy++; 360 + } 361 + count_pair++; 362 + } else if (idx) { 363 + pr_debug("A entry from the other hists should have pair\n"); 364 + return -1; 365 + } 366 + 367 + count++; 368 + node = rb_next(node); 369 + } 370 + 371 + /* 372 + * Note that we have a entry collapsed in the other (idx = 1) hists. 373 + */ 374 + if (idx == 0) { 375 + if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) { 376 + pr_debug("Invalid count of dummy entries: %zd of %zd\n", 377 + count_dummy, ARRAY_SIZE(fake_samples[1]) - 1); 378 + return -1; 379 + } 380 + if (count != count_pair + ARRAY_SIZE(fake_samples[0])) { 381 + pr_debug("Invalid count of total leader entries: %zd of %zd\n", 382 + count, count_pair + ARRAY_SIZE(fake_samples[0])); 383 + return -1; 384 + } 385 + } else { 386 + if (count != count_pair) { 387 + pr_debug("Invalid count of total other entries: %zd of %zd\n", 388 + count, count_pair); 389 + return -1; 390 + } 391 + if (count_dummy > 0) { 392 + pr_debug("Other hists should not have dummy entries: %zd\n", 393 + count_dummy); 394 + return -1; 395 + } 396 + } 397 + 398 + return 0; 399 + } 400 + 401 + static int validate_link(struct hists *leader, struct hists *other) 402 + { 403 + return __validate_link(leader, 0) || __validate_link(other, 1); 404 + } 405 + 406 + static void print_hists(struct hists *hists) 407 + { 408 + int i = 0; 409 + struct rb_root *root; 410 + struct rb_node *node; 411 + 412 + if (sort__need_collapse) 413 + root = &hists->entries_collapsed; 414 + else 415 + root = hists->entries_in; 416 + 417 + pr_info("----- %s --------\n", __func__); 418 + node = rb_first(root); 419 + while (node) { 420 + struct hist_entry *he; 421 + 422 + he = rb_entry(node, struct hist_entry, rb_node_in); 423 + 424 + pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n", 425 + i, he->thread->comm, he->ms.map->dso->short_name, 426 + he->ms.sym->name, he->stat.period); 427 + 428 + i++; 429 + node = rb_next(node); 430 + } 431 + } 432 + 433 + int test__hists_link(void) 434 + { 435 + int err = -1; 436 + struct machines machines; 437 + struct machine *machine = NULL; 438 + struct perf_evsel *evsel, *first; 439 + struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); 440 + 441 + if (evlist == NULL) 442 + return -ENOMEM; 443 + 444 + err = parse_events(evlist, "cpu-clock"); 445 + if (err) 446 + goto out; 447 + err = parse_events(evlist, "task-clock"); 448 + if (err) 449 + goto out; 450 + 451 + /* default sort order (comm,dso,sym) will be used */ 452 + setup_sorting(NULL, NULL); 453 + 454 + machines__init(&machines); 455 + 456 + /* setup threads/dso/map/symbols also */ 457 + machine = setup_fake_machine(&machines); 458 + if (!machine) 459 + goto out; 460 + 461 + if (verbose > 1) 462 + machine__fprintf(machine, stderr); 463 + 464 + /* process sample events */ 465 + err = add_hist_entries(evlist, machine); 466 + if (err < 0) 467 + goto out; 468 + 469 + list_for_each_entry(evsel, &evlist->entries, node) { 470 + hists__collapse_resort(&evsel->hists); 471 + 472 + if (verbose > 2) 473 + print_hists(&evsel->hists); 474 + } 475 + 476 + first = perf_evlist__first(evlist); 477 + evsel = perf_evlist__last(evlist); 478 + 479 + /* match common entries */ 480 + hists__match(&first->hists, &evsel->hists); 481 + err = validate_match(&first->hists, &evsel->hists); 482 + if (err) 483 + goto out; 484 + 485 + /* link common and/or dummy entries */ 486 + hists__link(&first->hists, &evsel->hists); 487 + err = validate_link(&first->hists, &evsel->hists); 488 + if (err) 489 + goto out; 490 + 491 + err = 0; 492 + 493 + out: 494 + /* tear down everything */ 495 + perf_evlist__delete(evlist); 496 + machines__exit(&machines); 497 + 498 + return err; 499 + }
+80 -18
tools/perf/tests/parse-events.c
··· 3 3 #include "evsel.h" 4 4 #include "evlist.h" 5 5 #include "sysfs.h" 6 + #include "debugfs.h" 6 7 #include "tests.h" 7 8 #include <linux/hw_breakpoint.h> 8 9 ··· 464 463 465 464 static int test__checkterms_simple(struct list_head *terms) 466 465 { 467 - struct parse_events__term *term; 466 + struct parse_events_term *term; 468 467 469 468 /* config=10 */ 470 - term = list_entry(terms->next, struct parse_events__term, list); 469 + term = list_entry(terms->next, struct parse_events_term, list); 471 470 TEST_ASSERT_VAL("wrong type term", 472 471 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG); 473 472 TEST_ASSERT_VAL("wrong type val", ··· 476 475 TEST_ASSERT_VAL("wrong config", !term->config); 477 476 478 477 /* config1 */ 479 - term = list_entry(term->list.next, struct parse_events__term, list); 478 + term = list_entry(term->list.next, struct parse_events_term, list); 480 479 TEST_ASSERT_VAL("wrong type term", 481 480 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1); 482 481 TEST_ASSERT_VAL("wrong type val", ··· 485 484 TEST_ASSERT_VAL("wrong config", !term->config); 486 485 487 486 /* config2=3 */ 488 - term = list_entry(term->list.next, struct parse_events__term, list); 487 + term = list_entry(term->list.next, struct parse_events_term, list); 489 488 TEST_ASSERT_VAL("wrong type term", 490 489 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2); 491 490 TEST_ASSERT_VAL("wrong type val", ··· 494 493 TEST_ASSERT_VAL("wrong config", !term->config); 495 494 496 495 /* umask=1*/ 497 - term = list_entry(term->list.next, struct parse_events__term, list); 496 + term = list_entry(term->list.next, struct parse_events_term, list); 498 497 TEST_ASSERT_VAL("wrong type term", 499 498 term->type_term == PARSE_EVENTS__TERM_TYPE_USER); 500 499 TEST_ASSERT_VAL("wrong type val", ··· 783 782 return 0; 784 783 } 785 784 786 - struct test__event_st { 785 + static int count_tracepoints(void) 786 + { 787 + char events_path[PATH_MAX]; 788 + struct dirent *events_ent; 789 + DIR *events_dir; 790 + int cnt = 0; 791 + 792 + scnprintf(events_path, PATH_MAX, "%s/tracing/events", 793 + debugfs_find_mountpoint()); 794 + 795 + events_dir = opendir(events_path); 796 + 797 + TEST_ASSERT_VAL("Can't open events dir", events_dir); 798 + 799 + while ((events_ent = readdir(events_dir))) { 800 + char sys_path[PATH_MAX]; 801 + struct dirent *sys_ent; 802 + DIR *sys_dir; 803 + 804 + if (!strcmp(events_ent->d_name, ".") 805 + || !strcmp(events_ent->d_name, "..") 806 + || !strcmp(events_ent->d_name, "enable") 807 + || !strcmp(events_ent->d_name, "header_event") 808 + || !strcmp(events_ent->d_name, "header_page")) 809 + continue; 810 + 811 + scnprintf(sys_path, PATH_MAX, "%s/%s", 812 + events_path, events_ent->d_name); 813 + 814 + sys_dir = opendir(sys_path); 815 + TEST_ASSERT_VAL("Can't open sys dir", sys_dir); 816 + 817 + while ((sys_ent = readdir(sys_dir))) { 818 + if (!strcmp(sys_ent->d_name, ".") 819 + || !strcmp(sys_ent->d_name, "..") 820 + || !strcmp(sys_ent->d_name, "enable") 821 + || !strcmp(sys_ent->d_name, "filter")) 822 + continue; 823 + 824 + cnt++; 825 + } 826 + 827 + closedir(sys_dir); 828 + } 829 + 830 + closedir(events_dir); 831 + return cnt; 832 + } 833 + 834 + static int test__all_tracepoints(struct perf_evlist *evlist) 835 + { 836 + TEST_ASSERT_VAL("wrong events count", 837 + count_tracepoints() == evlist->nr_entries); 838 + 839 + return test__checkevent_tracepoint_multi(evlist); 840 + } 841 + 842 + struct evlist_test { 787 843 const char *name; 788 844 __u32 type; 789 845 int (*check)(struct perf_evlist *evlist); 790 846 }; 791 847 792 - static struct test__event_st test__events[] = { 848 + static struct evlist_test test__events[] = { 793 849 [0] = { 794 850 .name = "syscalls:sys_enter_open", 795 851 .check = test__checkevent_tracepoint, ··· 979 921 .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles", 980 922 .check = test__group5, 981 923 }, 924 + [33] = { 925 + .name = "*:*", 926 + .check = test__all_tracepoints, 927 + }, 982 928 }; 983 929 984 - static struct test__event_st test__events_pmu[] = { 930 + static struct evlist_test test__events_pmu[] = { 985 931 [0] = { 986 932 .name = "cpu/config=10,config1,config2=3,period=1000/u", 987 933 .check = test__checkevent_pmu, ··· 996 934 }, 997 935 }; 998 936 999 - struct test__term { 937 + struct terms_test { 1000 938 const char *str; 1001 939 __u32 type; 1002 940 int (*check)(struct list_head *terms); 1003 941 }; 1004 942 1005 - static struct test__term test__terms[] = { 943 + static struct terms_test test__terms[] = { 1006 944 [0] = { 1007 945 .str = "config=10,config1,config2=3,umask=1", 1008 946 .check = test__checkterms_simple, 1009 947 }, 1010 948 }; 1011 949 1012 - static int test_event(struct test__event_st *e) 950 + static int test_event(struct evlist_test *e) 1013 951 { 1014 952 struct perf_evlist *evlist; 1015 953 int ret; ··· 1018 956 if (evlist == NULL) 1019 957 return -ENOMEM; 1020 958 1021 - ret = parse_events(evlist, e->name, 0); 959 + ret = parse_events(evlist, e->name); 1022 960 if (ret) { 1023 961 pr_debug("failed to parse event '%s', err %d\n", 1024 962 e->name, ret); ··· 1031 969 return ret; 1032 970 } 1033 971 1034 - static int test_events(struct test__event_st *events, unsigned cnt) 972 + static int test_events(struct evlist_test *events, unsigned cnt) 1035 973 { 1036 974 int ret1, ret2 = 0; 1037 975 unsigned i; 1038 976 1039 977 for (i = 0; i < cnt; i++) { 1040 - struct test__event_st *e = &events[i]; 978 + struct evlist_test *e = &events[i]; 1041 979 1042 980 pr_debug("running test %d '%s'\n", i, e->name); 1043 981 ret1 = test_event(e); ··· 1048 986 return ret2; 1049 987 } 1050 988 1051 - static int test_term(struct test__term *t) 989 + static int test_term(struct terms_test *t) 1052 990 { 1053 991 struct list_head *terms; 1054 992 int ret; ··· 1072 1010 return ret; 1073 1011 } 1074 1012 1075 - static int test_terms(struct test__term *terms, unsigned cnt) 1013 + static int test_terms(struct terms_test *terms, unsigned cnt) 1076 1014 { 1077 1015 int ret = 0; 1078 1016 unsigned i; 1079 1017 1080 1018 for (i = 0; i < cnt; i++) { 1081 - struct test__term *t = &terms[i]; 1019 + struct terms_test *t = &terms[i]; 1082 1020 1083 1021 pr_debug("running test %d '%s'\n", i, t->str); 1084 1022 ret = test_term(t); ··· 1129 1067 1130 1068 while (!ret && (ent = readdir(dir))) { 1131 1069 #define MAX_NAME 100 1132 - struct test__event_st e; 1070 + struct evlist_test e; 1133 1071 char name[MAX_NAME]; 1134 1072 1135 1073 if (!strcmp(ent->d_name, ".") ||
+3 -8
tools/perf/tests/pmu.c
··· 19 19 { "krava23", "config2:28-29,38\n", }, 20 20 }; 21 21 22 - #define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format)) 23 - 24 22 /* Simulated users input. */ 25 - static struct parse_events__term test_terms[] = { 23 + static struct parse_events_term test_terms[] = { 26 24 { 27 25 .config = (char *) "krava01", 28 26 .val.num = 15, ··· 76 78 .type_term = PARSE_EVENTS__TERM_TYPE_USER, 77 79 }, 78 80 }; 79 - #define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term)) 80 81 81 82 /* 82 83 * Prepare format directory data, exported by kernel ··· 90 93 if (!mkdtemp(dir)) 91 94 return NULL; 92 95 93 - for (i = 0; i < TEST_FORMATS_CNT; i++) { 96 + for (i = 0; i < ARRAY_SIZE(test_formats); i++) { 94 97 static char name[PATH_MAX]; 95 98 struct test_format *format = &test_formats[i]; 96 99 FILE *file; ··· 127 130 static LIST_HEAD(terms); 128 131 unsigned int i; 129 132 130 - for (i = 0; i < TERMS_CNT; i++) 133 + for (i = 0; i < ARRAY_SIZE(test_terms); i++) 131 134 list_add_tail(&test_terms[i].list, &terms); 132 135 133 136 return &terms; 134 137 } 135 - 136 - #undef TERMS_CNT 137 138 138 139 int test__pmu(void) 139 140 {
+23
tools/perf/tests/python-use.c
··· 1 + /* 2 + * Just test if we can load the python binding. 3 + */ 4 + 5 + #include <stdio.h> 6 + #include <stdlib.h> 7 + #include "tests.h" 8 + 9 + extern int verbose; 10 + 11 + int test__python_use(void) 12 + { 13 + char *cmd; 14 + int ret; 15 + 16 + if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s", 17 + PYTHONPATH, PYTHON, verbose ? "" : "2> /dev/null") < 0) 18 + return -1; 19 + 20 + ret = system(cmd) ? -1 : 0; 21 + free(cmd); 22 + return ret; 23 + }
+8
tools/perf/tests/tests.h
··· 1 1 #ifndef TESTS_H 2 2 #define TESTS_H 3 3 4 + enum { 5 + TEST_OK = 0, 6 + TEST_FAIL = -1, 7 + TEST_SKIP = -2, 8 + }; 9 + 4 10 /* Tests */ 5 11 int test__vmlinux_matches_kallsyms(void); 6 12 int test__open_syscall_event(void); ··· 21 15 int test__attr(void); 22 16 int test__dso_data(void); 23 17 int test__parse_events(void); 18 + int test__hists_link(void); 19 + int test__python_use(void); 24 20 25 21 #endif /* TESTS_H */
+2 -1
tools/perf/tests/vmlinux-kallsyms.c
··· 101 101 */ 102 102 if (machine__load_vmlinux_path(&vmlinux, type, 103 103 vmlinux_matches_kallsyms_filter) <= 0) { 104 - pr_debug("machine__load_vmlinux_path "); 104 + pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n"); 105 + err = TEST_SKIP; 105 106 goto out; 106 107 } 107 108
+2 -2
tools/perf/ui/browser.c
··· 471 471 return row; 472 472 } 473 473 474 - static struct ui_browser__colorset { 474 + static struct ui_browser_colorset { 475 475 const char *name, *fg, *bg; 476 476 int colorset; 477 477 } ui_browser__colorsets[] = { ··· 706 706 perf_config(ui_browser__color_config, NULL); 707 707 708 708 while (ui_browser__colorsets[i].name) { 709 - struct ui_browser__colorset *c = &ui_browser__colorsets[i++]; 709 + struct ui_browser_colorset *c = &ui_browser__colorsets[i++]; 710 710 sltt_set_color(c->colorset, c->name, c->fg, c->bg); 711 711 } 712 712
+17 -16
tools/perf/ui/browsers/annotate.c
··· 182 182 ab->selection = dl; 183 183 } 184 184 185 + static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym) 186 + { 187 + if (!dl || !dl->ins || !ins__is_jump(dl->ins) 188 + || !disasm_line__has_offset(dl) 189 + || dl->ops.target.offset >= symbol__size(sym)) 190 + return false; 191 + 192 + return true; 193 + } 194 + 185 195 static void annotate_browser__draw_current_jump(struct ui_browser *browser) 186 196 { 187 197 struct annotate_browser *ab = container_of(browser, struct annotate_browser, b); ··· 205 195 if (strstr(sym->name, "@plt")) 206 196 return; 207 197 208 - if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) || 209 - !disasm_line__has_offset(cursor)) 198 + if (!disasm_line__is_valid_jump(cursor, sym)) 210 199 return; 211 200 212 201 target = ab->offsets[cursor->ops.target.offset]; ··· 797 788 struct disasm_line *dl = browser->offsets[offset], *dlt; 798 789 struct browser_disasm_line *bdlt; 799 790 800 - if (!dl || !dl->ins || !ins__is_jump(dl->ins) || 801 - !disasm_line__has_offset(dl)) 791 + if (!disasm_line__is_valid_jump(dl, sym)) 802 792 continue; 803 - 804 - if (dl->ops.target.offset >= size) { 805 - ui__error("jump to after symbol!\n" 806 - "size: %zx, jump target: %" PRIx64, 807 - size, dl->ops.target.offset); 808 - continue; 809 - } 810 793 811 794 dlt = browser->offsets[dl->ops.target.offset]; 812 795 /* ··· 922 921 923 922 #define ANNOTATE_CFG(n) \ 924 923 { .name = #n, .value = &annotate_browser__opts.n, } 925 - 924 + 926 925 /* 927 926 * Keep the entries sorted, they are bsearch'ed 928 927 */ 929 - static struct annotate__config { 928 + static struct annotate_config { 930 929 const char *name; 931 930 bool *value; 932 931 } annotate__configs[] = { ··· 940 939 941 940 static int annotate_config__cmp(const void *name, const void *cfgp) 942 941 { 943 - const struct annotate__config *cfg = cfgp; 942 + const struct annotate_config *cfg = cfgp; 944 943 945 944 return strcmp(name, cfg->name); 946 945 } ··· 948 947 static int annotate__config(const char *var, const char *value, 949 948 void *data __maybe_unused) 950 949 { 951 - struct annotate__config *cfg; 950 + struct annotate_config *cfg; 952 951 const char *name; 953 952 954 953 if (prefixcmp(var, "annotate.") != 0) ··· 956 955 957 956 name = var + 9; 958 957 cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs), 959 - sizeof(struct annotate__config), annotate_config__cmp); 958 + sizeof(struct annotate_config), annotate_config__cmp); 960 959 961 960 if (cfg == NULL) 962 961 return -1;
+5 -222
tools/perf/ui/gtk/browser.c
··· 8 8 9 9 #include <signal.h> 10 10 11 - #define MAX_COLUMNS 32 12 - 13 - static void perf_gtk__signal(int sig) 11 + void perf_gtk__signal(int sig) 14 12 { 15 13 perf_gtk__exit(false); 16 14 psignal(sig, "perf"); 17 15 } 18 16 19 - static void perf_gtk__resize_window(GtkWidget *window) 17 + void perf_gtk__resize_window(GtkWidget *window) 20 18 { 21 19 GdkRectangle rect; 22 20 GdkScreen *screen; ··· 34 36 gtk_window_resize(GTK_WINDOW(window), width, height); 35 37 } 36 38 37 - static const char *perf_gtk__get_percent_color(double percent) 39 + const char *perf_gtk__get_percent_color(double percent) 38 40 { 39 41 if (percent >= MIN_RED) 40 42 return "<span fgcolor='red'>"; ··· 43 45 return NULL; 44 46 } 45 47 46 - #define HPP__COLOR_FN(_name, _field) \ 47 - static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \ 48 - struct hist_entry *he) \ 49 - { \ 50 - struct hists *hists = he->hists; \ 51 - double percent = 100.0 * he->stat._field / hists->stats.total_period; \ 52 - const char *markup; \ 53 - int ret = 0; \ 54 - \ 55 - markup = perf_gtk__get_percent_color(percent); \ 56 - if (markup) \ 57 - ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \ 58 - ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \ 59 - if (markup) \ 60 - ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>"); \ 61 - \ 62 - return ret; \ 63 - } 64 - 65 - HPP__COLOR_FN(overhead, period) 66 - HPP__COLOR_FN(overhead_sys, period_sys) 67 - HPP__COLOR_FN(overhead_us, period_us) 68 - HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) 69 - HPP__COLOR_FN(overhead_guest_us, period_guest_us) 70 - 71 - #undef HPP__COLOR_FN 72 - 73 - void perf_gtk__init_hpp(void) 74 - { 75 - perf_hpp__column_enable(PERF_HPP__OVERHEAD); 76 - 77 - perf_hpp__init(); 78 - 79 - perf_hpp__format[PERF_HPP__OVERHEAD].color = 80 - perf_gtk__hpp_color_overhead; 81 - perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = 82 - perf_gtk__hpp_color_overhead_sys; 83 - perf_hpp__format[PERF_HPP__OVERHEAD_US].color = 84 - perf_gtk__hpp_color_overhead_us; 85 - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = 86 - perf_gtk__hpp_color_overhead_guest_sys; 87 - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = 88 - perf_gtk__hpp_color_overhead_guest_us; 89 - } 90 - 91 - static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) 92 - { 93 - struct perf_hpp_fmt *fmt; 94 - GType col_types[MAX_COLUMNS]; 95 - GtkCellRenderer *renderer; 96 - struct sort_entry *se; 97 - GtkListStore *store; 98 - struct rb_node *nd; 99 - GtkWidget *view; 100 - int col_idx; 101 - int nr_cols; 102 - char s[512]; 103 - 104 - struct perf_hpp hpp = { 105 - .buf = s, 106 - .size = sizeof(s), 107 - }; 108 - 109 - nr_cols = 0; 110 - 111 - perf_hpp__for_each_format(fmt) 112 - col_types[nr_cols++] = G_TYPE_STRING; 113 - 114 - list_for_each_entry(se, &hist_entry__sort_list, list) { 115 - if (se->elide) 116 - continue; 117 - 118 - col_types[nr_cols++] = G_TYPE_STRING; 119 - } 120 - 121 - store = gtk_list_store_newv(nr_cols, col_types); 122 - 123 - view = gtk_tree_view_new(); 124 - 125 - renderer = gtk_cell_renderer_text_new(); 126 - 127 - col_idx = 0; 128 - 129 - perf_hpp__for_each_format(fmt) { 130 - fmt->header(&hpp); 131 - gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), 132 - -1, s, 133 - renderer, "markup", 134 - col_idx++, NULL); 135 - } 136 - 137 - list_for_each_entry(se, &hist_entry__sort_list, list) { 138 - if (se->elide) 139 - continue; 140 - 141 - gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), 142 - -1, se->se_header, 143 - renderer, "text", 144 - col_idx++, NULL); 145 - } 146 - 147 - gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); 148 - 149 - g_object_unref(GTK_TREE_MODEL(store)); 150 - 151 - for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 152 - struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 153 - GtkTreeIter iter; 154 - 155 - if (h->filtered) 156 - continue; 157 - 158 - gtk_list_store_append(store, &iter); 159 - 160 - col_idx = 0; 161 - 162 - perf_hpp__for_each_format(fmt) { 163 - if (fmt->color) 164 - fmt->color(&hpp, h); 165 - else 166 - fmt->entry(&hpp, h); 167 - 168 - gtk_list_store_set(store, &iter, col_idx++, s, -1); 169 - } 170 - 171 - list_for_each_entry(se, &hist_entry__sort_list, list) { 172 - if (se->elide) 173 - continue; 174 - 175 - se->se_snprintf(h, s, ARRAY_SIZE(s), 176 - hists__col_len(hists, se->se_width_idx)); 177 - 178 - gtk_list_store_set(store, &iter, col_idx++, s, -1); 179 - } 180 - } 181 - 182 - gtk_container_add(GTK_CONTAINER(window), view); 183 - } 184 - 185 48 #ifdef HAVE_GTK_INFO_BAR 186 - static GtkWidget *perf_gtk__setup_info_bar(void) 49 + GtkWidget *perf_gtk__setup_info_bar(void) 187 50 { 188 51 GtkWidget *info_bar; 189 52 GtkWidget *label; ··· 71 212 } 72 213 #endif 73 214 74 - static GtkWidget *perf_gtk__setup_statusbar(void) 215 + GtkWidget *perf_gtk__setup_statusbar(void) 75 216 { 76 217 GtkWidget *stbar; 77 218 unsigned ctxid; ··· 84 225 pgctx->statbar_ctx_id = ctxid; 85 226 86 227 return stbar; 87 - } 88 - 89 - int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, 90 - const char *help, 91 - struct hist_browser_timer *hbt __maybe_unused) 92 - { 93 - struct perf_evsel *pos; 94 - GtkWidget *vbox; 95 - GtkWidget *notebook; 96 - GtkWidget *info_bar; 97 - GtkWidget *statbar; 98 - GtkWidget *window; 99 - 100 - signal(SIGSEGV, perf_gtk__signal); 101 - signal(SIGFPE, perf_gtk__signal); 102 - signal(SIGINT, perf_gtk__signal); 103 - signal(SIGQUIT, perf_gtk__signal); 104 - signal(SIGTERM, perf_gtk__signal); 105 - 106 - window = gtk_window_new(GTK_WINDOW_TOPLEVEL); 107 - 108 - gtk_window_set_title(GTK_WINDOW(window), "perf report"); 109 - 110 - g_signal_connect(window, "delete_event", gtk_main_quit, NULL); 111 - 112 - pgctx = perf_gtk__activate_context(window); 113 - if (!pgctx) 114 - return -1; 115 - 116 - vbox = gtk_vbox_new(FALSE, 0); 117 - 118 - notebook = gtk_notebook_new(); 119 - 120 - list_for_each_entry(pos, &evlist->entries, node) { 121 - struct hists *hists = &pos->hists; 122 - const char *evname = perf_evsel__name(pos); 123 - GtkWidget *scrolled_window; 124 - GtkWidget *tab_label; 125 - 126 - scrolled_window = gtk_scrolled_window_new(NULL, NULL); 127 - 128 - gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), 129 - GTK_POLICY_AUTOMATIC, 130 - GTK_POLICY_AUTOMATIC); 131 - 132 - perf_gtk__show_hists(scrolled_window, hists); 133 - 134 - tab_label = gtk_label_new(evname); 135 - 136 - gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); 137 - } 138 - 139 - gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); 140 - 141 - info_bar = perf_gtk__setup_info_bar(); 142 - if (info_bar) 143 - gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); 144 - 145 - statbar = perf_gtk__setup_statusbar(); 146 - gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); 147 - 148 - gtk_container_add(GTK_CONTAINER(window), vbox); 149 - 150 - gtk_widget_show_all(window); 151 - 152 - perf_gtk__resize_window(window); 153 - 154 - gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); 155 - 156 - ui_helpline__push(help); 157 - 158 - gtk_main(); 159 - 160 - perf_gtk__deactivate_context(&pgctx); 161 - 162 - return 0; 163 228 }
+8 -1
tools/perf/ui/gtk/gtk.h
··· 33 33 void perf_gtk__init_progress(void); 34 34 void perf_gtk__init_hpp(void); 35 35 36 - #ifndef HAVE_GTK_INFO_BAR 36 + void perf_gtk__signal(int sig); 37 + void perf_gtk__resize_window(GtkWidget *window); 38 + const char *perf_gtk__get_percent_color(double percent); 39 + GtkWidget *perf_gtk__setup_statusbar(void); 40 + 41 + #ifdef HAVE_GTK_INFO_BAR 42 + GtkWidget *perf_gtk__setup_info_bar(void); 43 + #else 37 44 static inline GtkWidget *perf_gtk__setup_info_bar(void) 38 45 { 39 46 return NULL;
+226
tools/perf/ui/gtk/hists.c
··· 1 + #include "../evlist.h" 2 + #include "../cache.h" 3 + #include "../evsel.h" 4 + #include "../sort.h" 5 + #include "../hist.h" 6 + #include "../helpline.h" 7 + #include "gtk.h" 8 + 9 + #define MAX_COLUMNS 32 10 + 11 + #define HPP__COLOR_FN(_name, _field) \ 12 + static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \ 13 + struct hist_entry *he) \ 14 + { \ 15 + struct hists *hists = he->hists; \ 16 + double percent = 100.0 * he->stat._field / hists->stats.total_period; \ 17 + const char *markup; \ 18 + int ret = 0; \ 19 + \ 20 + markup = perf_gtk__get_percent_color(percent); \ 21 + if (markup) \ 22 + ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \ 23 + ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \ 24 + if (markup) \ 25 + ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>"); \ 26 + \ 27 + return ret; \ 28 + } 29 + 30 + HPP__COLOR_FN(overhead, period) 31 + HPP__COLOR_FN(overhead_sys, period_sys) 32 + HPP__COLOR_FN(overhead_us, period_us) 33 + HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) 34 + HPP__COLOR_FN(overhead_guest_us, period_guest_us) 35 + 36 + #undef HPP__COLOR_FN 37 + 38 + 39 + void perf_gtk__init_hpp(void) 40 + { 41 + perf_hpp__column_enable(PERF_HPP__OVERHEAD); 42 + 43 + perf_hpp__init(); 44 + 45 + perf_hpp__format[PERF_HPP__OVERHEAD].color = 46 + perf_gtk__hpp_color_overhead; 47 + perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = 48 + perf_gtk__hpp_color_overhead_sys; 49 + perf_hpp__format[PERF_HPP__OVERHEAD_US].color = 50 + perf_gtk__hpp_color_overhead_us; 51 + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = 52 + perf_gtk__hpp_color_overhead_guest_sys; 53 + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = 54 + perf_gtk__hpp_color_overhead_guest_us; 55 + } 56 + 57 + static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) 58 + { 59 + struct perf_hpp_fmt *fmt; 60 + GType col_types[MAX_COLUMNS]; 61 + GtkCellRenderer *renderer; 62 + struct sort_entry *se; 63 + GtkListStore *store; 64 + struct rb_node *nd; 65 + GtkWidget *view; 66 + int col_idx; 67 + int nr_cols; 68 + char s[512]; 69 + 70 + struct perf_hpp hpp = { 71 + .buf = s, 72 + .size = sizeof(s), 73 + }; 74 + 75 + nr_cols = 0; 76 + 77 + perf_hpp__for_each_format(fmt) 78 + col_types[nr_cols++] = G_TYPE_STRING; 79 + 80 + list_for_each_entry(se, &hist_entry__sort_list, list) { 81 + if (se->elide) 82 + continue; 83 + 84 + col_types[nr_cols++] = G_TYPE_STRING; 85 + } 86 + 87 + store = gtk_list_store_newv(nr_cols, col_types); 88 + 89 + view = gtk_tree_view_new(); 90 + 91 + renderer = gtk_cell_renderer_text_new(); 92 + 93 + col_idx = 0; 94 + 95 + perf_hpp__for_each_format(fmt) { 96 + fmt->header(&hpp); 97 + 98 + gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), 99 + -1, s, 100 + renderer, "markup", 101 + col_idx++, NULL); 102 + } 103 + 104 + list_for_each_entry(se, &hist_entry__sort_list, list) { 105 + if (se->elide) 106 + continue; 107 + 108 + gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), 109 + -1, se->se_header, 110 + renderer, "text", 111 + col_idx++, NULL); 112 + } 113 + 114 + gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); 115 + 116 + g_object_unref(GTK_TREE_MODEL(store)); 117 + 118 + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 119 + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 120 + GtkTreeIter iter; 121 + 122 + if (h->filtered) 123 + continue; 124 + 125 + gtk_list_store_append(store, &iter); 126 + 127 + col_idx = 0; 128 + 129 + perf_hpp__for_each_format(fmt) { 130 + if (fmt->color) 131 + fmt->color(&hpp, h); 132 + else 133 + fmt->entry(&hpp, h); 134 + 135 + gtk_list_store_set(store, &iter, col_idx++, s, -1); 136 + } 137 + 138 + list_for_each_entry(se, &hist_entry__sort_list, list) { 139 + if (se->elide) 140 + continue; 141 + 142 + se->se_snprintf(h, s, ARRAY_SIZE(s), 143 + hists__col_len(hists, se->se_width_idx)); 144 + 145 + gtk_list_store_set(store, &iter, col_idx++, s, -1); 146 + } 147 + } 148 + 149 + gtk_container_add(GTK_CONTAINER(window), view); 150 + } 151 + 152 + int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, 153 + const char *help, 154 + struct hist_browser_timer *hbt __maybe_unused) 155 + { 156 + struct perf_evsel *pos; 157 + GtkWidget *vbox; 158 + GtkWidget *notebook; 159 + GtkWidget *info_bar; 160 + GtkWidget *statbar; 161 + GtkWidget *window; 162 + 163 + signal(SIGSEGV, perf_gtk__signal); 164 + signal(SIGFPE, perf_gtk__signal); 165 + signal(SIGINT, perf_gtk__signal); 166 + signal(SIGQUIT, perf_gtk__signal); 167 + signal(SIGTERM, perf_gtk__signal); 168 + 169 + window = gtk_window_new(GTK_WINDOW_TOPLEVEL); 170 + 171 + gtk_window_set_title(GTK_WINDOW(window), "perf report"); 172 + 173 + g_signal_connect(window, "delete_event", gtk_main_quit, NULL); 174 + 175 + pgctx = perf_gtk__activate_context(window); 176 + if (!pgctx) 177 + return -1; 178 + 179 + vbox = gtk_vbox_new(FALSE, 0); 180 + 181 + notebook = gtk_notebook_new(); 182 + 183 + gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); 184 + 185 + info_bar = perf_gtk__setup_info_bar(); 186 + if (info_bar) 187 + gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); 188 + 189 + statbar = perf_gtk__setup_statusbar(); 190 + gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); 191 + 192 + gtk_container_add(GTK_CONTAINER(window), vbox); 193 + 194 + list_for_each_entry(pos, &evlist->entries, node) { 195 + struct hists *hists = &pos->hists; 196 + const char *evname = perf_evsel__name(pos); 197 + GtkWidget *scrolled_window; 198 + GtkWidget *tab_label; 199 + 200 + scrolled_window = gtk_scrolled_window_new(NULL, NULL); 201 + 202 + gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), 203 + GTK_POLICY_AUTOMATIC, 204 + GTK_POLICY_AUTOMATIC); 205 + 206 + perf_gtk__show_hists(scrolled_window, hists); 207 + 208 + tab_label = gtk_label_new(evname); 209 + 210 + gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); 211 + } 212 + 213 + gtk_widget_show_all(window); 214 + 215 + perf_gtk__resize_window(window); 216 + 217 + gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); 218 + 219 + ui_helpline__push(help); 220 + 221 + gtk_main(); 222 + 223 + perf_gtk__deactivate_context(&pgctx); 224 + 225 + return 0; 226 + }
+3 -3
tools/perf/ui/stdio/hist.c
··· 459 459 return ret; 460 460 } 461 461 462 - size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) 462 + size_t events_stats__fprintf(struct events_stats *stats, FILE *fp) 463 463 { 464 464 int i; 465 465 size_t ret = 0; ··· 467 467 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { 468 468 const char *name; 469 469 470 - if (hists->stats.nr_events[i] == 0) 470 + if (stats->nr_events[i] == 0) 471 471 continue; 472 472 473 473 name = perf_event__name(i); ··· 475 475 continue; 476 476 477 477 ret += fprintf(fp, "%16s events: %10d\n", name, 478 - hists->stats.nr_events[i]); 478 + stats->nr_events[i]); 479 479 } 480 480 481 481 return ret;
-11
tools/perf/ui/util.c
··· 52 52 return ret; 53 53 } 54 54 55 - int ui__error_paranoid(void) 56 - { 57 - return ui__error("Permission error - are you root?\n" 58 - "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" 59 - " -1 - Not paranoid at all\n" 60 - " 0 - Disallow raw tracepoint access for unpriv\n" 61 - " 1 - Disallow cpu events for unpriv\n" 62 - " 2 - Disallow kernel profiling for unpriv\n"); 63 - } 64 - 65 - 66 55 /** 67 56 * perf_error__register - Register error logging functions 68 57 * @eops: The pointer to error logging function struct
+2 -2
tools/perf/util/PERF-VERSION-GEN
··· 26 26 27 27 if test -r $GVF 28 28 then 29 - VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) 29 + VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF) 30 30 else 31 31 VC=unset 32 32 fi 33 33 test "$VN" = "$VC" || { 34 34 echo >&2 "PERF_VERSION = $VN" 35 - echo "PERF_VERSION = $VN" >$GVF 35 + echo "#define PERF_VERSION \"$VN\"" >$GVF 36 36 } 37 37 38 38
-1
tools/perf/util/debug.h
··· 16 16 17 17 int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); 18 18 int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); 19 - int ui__error_paranoid(void); 20 19 21 20 #endif /* __PERF_DEBUG_H */
+105 -3
tools/perf/util/evsel.c
··· 22 22 #include <linux/perf_event.h> 23 23 #include "perf_regs.h" 24 24 25 + static struct { 26 + bool sample_id_all; 27 + bool exclude_guest; 28 + } perf_missing_features; 29 + 25 30 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 26 31 27 32 static int __perf_evsel__sample_size(u64 sample_type) ··· 468 463 struct perf_event_attr *attr = &evsel->attr; 469 464 int track = !evsel->idx; /* only the first counter needs these */ 470 465 471 - attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; 466 + attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 472 467 attr->inherit = !opts->no_inherit; 473 468 474 469 perf_evsel__set_sample_bit(evsel, IP); ··· 518 513 if (opts->period) 519 514 perf_evsel__set_sample_bit(evsel, PERIOD); 520 515 521 - if (!opts->sample_id_all_missing && 516 + if (!perf_missing_features.sample_id_all && 522 517 (opts->sample_time || !opts->no_inherit || 523 518 perf_target__has_cpu(&opts->target))) 524 519 perf_evsel__set_sample_bit(evsel, TIME); ··· 766 761 pid = evsel->cgrp->fd; 767 762 } 768 763 764 + fallback_missing_features: 765 + if (perf_missing_features.exclude_guest) 766 + evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 767 + retry_sample_id: 768 + if (perf_missing_features.sample_id_all) 769 + evsel->attr.sample_id_all = 0; 770 + 769 771 for (cpu = 0; cpu < cpus->nr; cpu++) { 770 772 771 773 for (thread = 0; thread < threads->nr; thread++) { ··· 789 777 group_fd, flags); 790 778 if (FD(evsel, cpu, thread) < 0) { 791 779 err = -errno; 792 - goto out_close; 780 + goto try_fallback; 793 781 } 794 782 } 795 783 } 796 784 797 785 return 0; 786 + 787 + try_fallback: 788 + if (err != -EINVAL || cpu > 0 || thread > 0) 789 + goto out_close; 790 + 791 + if (!perf_missing_features.exclude_guest && 792 + (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { 793 + perf_missing_features.exclude_guest = true; 794 + goto fallback_missing_features; 795 + } else if (!perf_missing_features.sample_id_all) { 796 + perf_missing_features.sample_id_all = true; 797 + goto retry_sample_id; 798 + } 798 799 799 800 out_close: 800 801 do { ··· 1377 1352 1378 1353 fputc('\n', fp); 1379 1354 return ++printed; 1355 + } 1356 + 1357 + bool perf_evsel__fallback(struct perf_evsel *evsel, int err, 1358 + char *msg, size_t msgsize) 1359 + { 1360 + if ((err == ENOENT || err == ENXIO) && 1361 + evsel->attr.type == PERF_TYPE_HARDWARE && 1362 + evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { 1363 + /* 1364 + * If it's cycles then fall back to hrtimer based 1365 + * cpu-clock-tick sw counter, which is always available even if 1366 + * no PMU support. 1367 + * 1368 + * PPC returns ENXIO until 2.6.37 (behavior changed with commit 1369 + * b0a873e). 1370 + */ 1371 + scnprintf(msg, msgsize, "%s", 1372 + "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 1373 + 1374 + evsel->attr.type = PERF_TYPE_SOFTWARE; 1375 + evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; 1376 + 1377 + free(evsel->name); 1378 + evsel->name = NULL; 1379 + return true; 1380 + } 1381 + 1382 + return false; 1383 + } 1384 + 1385 + int perf_evsel__open_strerror(struct perf_evsel *evsel, 1386 + struct perf_target *target, 1387 + int err, char *msg, size_t size) 1388 + { 1389 + switch (err) { 1390 + case EPERM: 1391 + case EACCES: 1392 + return scnprintf(msg, size, "%s", 1393 + "You may not have permission to collect %sstats.\n" 1394 + "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" 1395 + " -1 - Not paranoid at all\n" 1396 + " 0 - Disallow raw tracepoint access for unpriv\n" 1397 + " 1 - Disallow cpu events for unpriv\n" 1398 + " 2 - Disallow kernel profiling for unpriv", 1399 + target->system_wide ? "system-wide " : ""); 1400 + case ENOENT: 1401 + return scnprintf(msg, size, "The %s event is not supported.", 1402 + perf_evsel__name(evsel)); 1403 + case EMFILE: 1404 + return scnprintf(msg, size, "%s", 1405 + "Too many events are opened.\n" 1406 + "Try again after reducing the number of events."); 1407 + case ENODEV: 1408 + if (target->cpu_list) 1409 + return scnprintf(msg, size, "%s", 1410 + "No such device - did you specify an out-of-range profile CPU?\n"); 1411 + break; 1412 + case EOPNOTSUPP: 1413 + if (evsel->attr.precise_ip) 1414 + return scnprintf(msg, size, "%s", 1415 + "\'precise\' request may not be supported. Try removing 'p' modifier."); 1416 + #if defined(__i386__) || defined(__x86_64__) 1417 + if (evsel->attr.type == PERF_TYPE_HARDWARE) 1418 + return scnprintf(msg, size, "%s", 1419 + "No hardware sampling interrupt available.\n" 1420 + "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); 1421 + #endif 1422 + break; 1423 + default: 1424 + break; 1425 + } 1426 + 1427 + return scnprintf(msg, size, 1428 + "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n" 1429 + "/bin/dmesg may provide additional information.\n" 1430 + "No CONFIG_PERF_EVENTS=y kernel support configured?\n", 1431 + err, strerror(err), perf_evsel__name(evsel)); 1380 1432 }
+6
tools/perf/util/evsel.h
··· 251 251 252 252 int perf_evsel__fprintf(struct perf_evsel *evsel, 253 253 struct perf_attr_details *details, FILE *fp); 254 + 255 + bool perf_evsel__fallback(struct perf_evsel *evsel, int err, 256 + char *msg, size_t msgsize); 257 + int perf_evsel__open_strerror(struct perf_evsel *evsel, 258 + struct perf_target *target, 259 + int err, char *msg, size_t size); 254 260 #endif /* __PERF_EVSEL_H */
+42 -33
tools/perf/util/header.c
··· 148 148 u32 len; 149 149 char *buf; 150 150 151 - sz = read(fd, &len, sizeof(len)); 151 + sz = readn(fd, &len, sizeof(len)); 152 152 if (sz < (ssize_t)sizeof(len)) 153 153 return NULL; 154 154 ··· 159 159 if (!buf) 160 160 return NULL; 161 161 162 - ret = read(fd, buf, len); 162 + ret = readn(fd, buf, len); 163 163 if (ret == (ssize_t)len) { 164 164 /* 165 165 * strings are padded by zeroes ··· 287 287 struct perf_session *session = container_of(header, 288 288 struct perf_session, header); 289 289 struct rb_node *nd; 290 - int err = machine__write_buildid_table(&session->host_machine, fd); 290 + int err = machine__write_buildid_table(&session->machines.host, fd); 291 291 292 292 if (err) 293 293 return err; 294 294 295 - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 295 + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { 296 296 struct machine *pos = rb_entry(nd, struct machine, rb_node); 297 297 err = machine__write_buildid_table(pos, fd); 298 298 if (err) ··· 448 448 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) 449 449 return -1; 450 450 451 - ret = machine__cache_build_ids(&session->host_machine, debugdir); 451 + ret = machine__cache_build_ids(&session->machines.host, debugdir); 452 452 453 - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 453 + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { 454 454 struct machine *pos = rb_entry(nd, struct machine, rb_node); 455 455 ret |= machine__cache_build_ids(pos, debugdir); 456 456 } ··· 467 467 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) 468 468 { 469 469 struct rb_node *nd; 470 - bool ret = machine__read_build_ids(&session->host_machine, with_hits); 470 + bool ret = machine__read_build_ids(&session->machines.host, with_hits); 471 471 472 - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 472 + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { 473 473 struct machine *pos = rb_entry(nd, struct machine, rb_node); 474 474 ret |= machine__read_build_ids(pos, with_hits); 475 475 } ··· 1051 1051 struct perf_pmu *pmu = NULL; 1052 1052 off_t offset = lseek(fd, 0, SEEK_CUR); 1053 1053 __u32 pmu_num = 0; 1054 + int ret; 1054 1055 1055 1056 /* write real pmu_num later */ 1056 - do_write(fd, &pmu_num, sizeof(pmu_num)); 1057 + ret = do_write(fd, &pmu_num, sizeof(pmu_num)); 1058 + if (ret < 0) 1059 + return ret; 1057 1060 1058 1061 while ((pmu = perf_pmu__scan(pmu))) { 1059 1062 if (!pmu->name) 1060 1063 continue; 1061 1064 pmu_num++; 1062 - do_write(fd, &pmu->type, sizeof(pmu->type)); 1063 - do_write_string(fd, pmu->name); 1065 + 1066 + ret = do_write(fd, &pmu->type, sizeof(pmu->type)); 1067 + if (ret < 0) 1068 + return ret; 1069 + 1070 + ret = do_write_string(fd, pmu->name); 1071 + if (ret < 0) 1072 + return ret; 1064 1073 } 1065 1074 1066 1075 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { ··· 1218 1209 size_t msz; 1219 1210 1220 1211 /* number of events */ 1221 - ret = read(fd, &nre, sizeof(nre)); 1212 + ret = readn(fd, &nre, sizeof(nre)); 1222 1213 if (ret != (ssize_t)sizeof(nre)) 1223 1214 goto error; 1224 1215 1225 1216 if (ph->needs_swap) 1226 1217 nre = bswap_32(nre); 1227 1218 1228 - ret = read(fd, &sz, sizeof(sz)); 1219 + ret = readn(fd, &sz, sizeof(sz)); 1229 1220 if (ret != (ssize_t)sizeof(sz)) 1230 1221 goto error; 1231 1222 ··· 1253 1244 * must read entire on-file attr struct to 1254 1245 * sync up with layout. 1255 1246 */ 1256 - ret = read(fd, buf, sz); 1247 + ret = readn(fd, buf, sz); 1257 1248 if (ret != (ssize_t)sz) 1258 1249 goto error; 1259 1250 ··· 1262 1253 1263 1254 memcpy(&evsel->attr, buf, msz); 1264 1255 1265 - ret = read(fd, &nr, sizeof(nr)); 1256 + ret = readn(fd, &nr, sizeof(nr)); 1266 1257 if (ret != (ssize_t)sizeof(nr)) 1267 1258 goto error; 1268 1259 ··· 1283 1274 evsel->id = id; 1284 1275 1285 1276 for (j = 0 ; j < nr; j++) { 1286 - ret = read(fd, id, sizeof(*id)); 1277 + ret = readn(fd, id, sizeof(*id)); 1287 1278 if (ret != (ssize_t)sizeof(*id)) 1288 1279 goto error; 1289 1280 if (ph->needs_swap) ··· 1515 1506 while (offset < limit) { 1516 1507 ssize_t len; 1517 1508 1518 - if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1509 + if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1519 1510 return -1; 1520 1511 1521 1512 if (header->needs_swap) 1522 1513 perf_event_header__bswap(&old_bev.header); 1523 1514 1524 1515 len = old_bev.header.size - sizeof(old_bev); 1525 - if (read(input, filename, len) != len) 1516 + if (readn(input, filename, len) != len) 1526 1517 return -1; 1527 1518 1528 1519 bev.header = old_bev.header; ··· 1557 1548 while (offset < limit) { 1558 1549 ssize_t len; 1559 1550 1560 - if (read(input, &bev, sizeof(bev)) != sizeof(bev)) 1551 + if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 1561 1552 goto out; 1562 1553 1563 1554 if (header->needs_swap) 1564 1555 perf_event_header__bswap(&bev.header); 1565 1556 1566 1557 len = bev.header.size - sizeof(bev); 1567 - if (read(input, filename, len) != len) 1558 + if (readn(input, filename, len) != len) 1568 1559 goto out; 1569 1560 /* 1570 1561 * The a1645ce1 changeset: ··· 1650 1641 size_t ret; 1651 1642 u32 nr; 1652 1643 1653 - ret = read(fd, &nr, sizeof(nr)); 1644 + ret = readn(fd, &nr, sizeof(nr)); 1654 1645 if (ret != sizeof(nr)) 1655 1646 return -1; 1656 1647 ··· 1659 1650 1660 1651 ph->env.nr_cpus_online = nr; 1661 1652 1662 - ret = read(fd, &nr, sizeof(nr)); 1653 + ret = readn(fd, &nr, sizeof(nr)); 1663 1654 if (ret != sizeof(nr)) 1664 1655 return -1; 1665 1656 ··· 1693 1684 uint64_t mem; 1694 1685 size_t ret; 1695 1686 1696 - ret = read(fd, &mem, sizeof(mem)); 1687 + ret = readn(fd, &mem, sizeof(mem)); 1697 1688 if (ret != sizeof(mem)) 1698 1689 return -1; 1699 1690 ··· 1765 1756 u32 nr, i; 1766 1757 struct strbuf sb; 1767 1758 1768 - ret = read(fd, &nr, sizeof(nr)); 1759 + ret = readn(fd, &nr, sizeof(nr)); 1769 1760 if (ret != sizeof(nr)) 1770 1761 return -1; 1771 1762 ··· 1801 1792 char *str; 1802 1793 struct strbuf sb; 1803 1794 1804 - ret = read(fd, &nr, sizeof(nr)); 1795 + ret = readn(fd, &nr, sizeof(nr)); 1805 1796 if (ret != sizeof(nr)) 1806 1797 return -1; 1807 1798 ··· 1822 1813 } 1823 1814 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 1824 1815 1825 - ret = read(fd, &nr, sizeof(nr)); 1816 + ret = readn(fd, &nr, sizeof(nr)); 1826 1817 if (ret != sizeof(nr)) 1827 1818 return -1; 1828 1819 ··· 1859 1850 struct strbuf sb; 1860 1851 1861 1852 /* nr nodes */ 1862 - ret = read(fd, &nr, sizeof(nr)); 1853 + ret = readn(fd, &nr, sizeof(nr)); 1863 1854 if (ret != sizeof(nr)) 1864 1855 goto error; 1865 1856 ··· 1871 1862 1872 1863 for (i = 0; i < nr; i++) { 1873 1864 /* node number */ 1874 - ret = read(fd, &node, sizeof(node)); 1865 + ret = readn(fd, &node, sizeof(node)); 1875 1866 if (ret != sizeof(node)) 1876 1867 goto error; 1877 1868 1878 - ret = read(fd, &mem_total, sizeof(u64)); 1869 + ret = readn(fd, &mem_total, sizeof(u64)); 1879 1870 if (ret != sizeof(u64)) 1880 1871 goto error; 1881 1872 1882 - ret = read(fd, &mem_free, sizeof(u64)); 1873 + ret = readn(fd, &mem_free, sizeof(u64)); 1883 1874 if (ret != sizeof(u64)) 1884 1875 goto error; 1885 1876 ··· 1918 1909 u32 type; 1919 1910 struct strbuf sb; 1920 1911 1921 - ret = read(fd, &pmu_num, sizeof(pmu_num)); 1912 + ret = readn(fd, &pmu_num, sizeof(pmu_num)); 1922 1913 if (ret != sizeof(pmu_num)) 1923 1914 return -1; 1924 1915 ··· 1934 1925 strbuf_init(&sb, 128); 1935 1926 1936 1927 while (pmu_num) { 1937 - if (read(fd, &type, sizeof(type)) != sizeof(type)) 1928 + if (readn(fd, &type, sizeof(type)) != sizeof(type)) 1938 1929 goto error; 1939 1930 if (ph->needs_swap) 1940 1931 type = bswap_32(type); ··· 2921 2912 session->repipe); 2922 2913 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 2923 2914 2924 - if (read(session->fd, buf, padding) < 0) 2915 + if (readn(session->fd, buf, padding) < 0) 2925 2916 die("reading input file"); 2926 2917 if (session->repipe) { 2927 2918 int retw = write(STDOUT_FILENO, buf, padding);
+63 -16
tools/perf/util/hist.c
··· 82 82 hists__new_col_len(hists, HISTC_DSO, len); 83 83 } 84 84 85 + if (h->parent) 86 + hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 87 + 85 88 if (h->branch_info) { 86 89 int symlen; 87 90 /* ··· 245 242 246 243 if (he->ms.map) 247 244 he->ms.map->referenced = true; 245 + 246 + if (he->branch_info) { 247 + if (he->branch_info->from.map) 248 + he->branch_info->from.map->referenced = true; 249 + if (he->branch_info->to.map) 250 + he->branch_info->to.map->referenced = true; 251 + } 252 + 248 253 if (symbol_conf.use_callchain) 249 254 callchain_init(he->callchain); 250 255 ··· 262 251 return he; 263 252 } 264 253 265 - static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) 254 + void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) 266 255 { 267 256 if (!h->filtered) { 268 257 hists__calc_col_len(hists, h); ··· 296 285 parent = *p; 297 286 he = rb_entry(parent, struct hist_entry, rb_node_in); 298 287 299 - cmp = hist_entry__cmp(entry, he); 288 + /* 289 + * Make sure that it receives arguments in a same order as 290 + * hist_entry__collapse() so that we can use an appropriate 291 + * function when searching an entry regardless which sort 292 + * keys were used. 293 + */ 294 + cmp = hist_entry__cmp(he, entry); 300 295 301 296 if (!cmp) { 302 297 he_stat__add_period(&he->stat, period); ··· 728 711 return symbol__annotate(he->ms.sym, he->ms.map, privsize); 729 712 } 730 713 714 + void events_stats__inc(struct events_stats *stats, u32 type) 715 + { 716 + ++stats->nr_events[0]; 717 + ++stats->nr_events[type]; 718 + } 719 + 731 720 void hists__inc_nr_events(struct hists *hists, u32 type) 732 721 { 733 - ++hists->stats.nr_events[0]; 734 - ++hists->stats.nr_events[type]; 722 + events_stats__inc(&hists->stats, type); 735 723 } 736 724 737 725 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 738 726 struct hist_entry *pair) 739 727 { 740 - struct rb_node **p = &hists->entries.rb_node; 728 + struct rb_root *root; 729 + struct rb_node **p; 741 730 struct rb_node *parent = NULL; 742 731 struct hist_entry *he; 743 732 int cmp; 744 733 734 + if (sort__need_collapse) 735 + root = &hists->entries_collapsed; 736 + else 737 + root = hists->entries_in; 738 + 739 + p = &root->rb_node; 740 + 745 741 while (*p != NULL) { 746 742 parent = *p; 747 - he = rb_entry(parent, struct hist_entry, rb_node); 743 + he = rb_entry(parent, struct hist_entry, rb_node_in); 748 744 749 - cmp = hist_entry__cmp(pair, he); 745 + cmp = hist_entry__collapse(he, pair); 750 746 751 747 if (!cmp) 752 748 goto out; ··· 774 744 if (he) { 775 745 memset(&he->stat, 0, sizeof(he->stat)); 776 746 he->hists = hists; 777 - rb_link_node(&he->rb_node, parent, p); 778 - rb_insert_color(&he->rb_node, &hists->entries); 747 + rb_link_node(&he->rb_node_in, parent, p); 748 + rb_insert_color(&he->rb_node_in, root); 779 749 hists__inc_nr_entries(hists, he); 780 750 } 781 751 out: ··· 785 755 static struct hist_entry *hists__find_entry(struct hists *hists, 786 756 struct hist_entry *he) 787 757 { 788 - struct rb_node *n = hists->entries.rb_node; 758 + struct rb_node *n; 759 + 760 + if (sort__need_collapse) 761 + n = hists->entries_collapsed.rb_node; 762 + else 763 + n = hists->entries_in->rb_node; 789 764 790 765 while (n) { 791 - struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 792 - int64_t cmp = hist_entry__cmp(he, iter); 766 + struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 767 + int64_t cmp = hist_entry__collapse(iter, he); 793 768 794 769 if (cmp < 0) 795 770 n = n->rb_left; ··· 812 777 */ 813 778 void hists__match(struct hists *leader, struct hists *other) 814 779 { 780 + struct rb_root *root; 815 781 struct rb_node *nd; 816 782 struct hist_entry *pos, *pair; 817 783 818 - for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) { 819 - pos = rb_entry(nd, struct hist_entry, rb_node); 784 + if (sort__need_collapse) 785 + root = &leader->entries_collapsed; 786 + else 787 + root = leader->entries_in; 788 + 789 + for (nd = rb_first(root); nd; nd = rb_next(nd)) { 790 + pos = rb_entry(nd, struct hist_entry, rb_node_in); 820 791 pair = hists__find_entry(other, pos); 821 792 822 793 if (pair) ··· 837 796 */ 838 797 int hists__link(struct hists *leader, struct hists *other) 839 798 { 799 + struct rb_root *root; 840 800 struct rb_node *nd; 841 801 struct hist_entry *pos, *pair; 842 802 843 - for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) { 844 - pos = rb_entry(nd, struct hist_entry, rb_node); 803 + if (sort__need_collapse) 804 + root = &other->entries_collapsed; 805 + else 806 + root = other->entries_in; 807 + 808 + for (nd = rb_first(root); nd; nd = rb_next(nd)) { 809 + pos = rb_entry(nd, struct hist_entry, rb_node_in); 845 810 846 811 if (!hist_entry__has_pairs(pos)) { 847 812 pair = hists__add_dummy_entry(leader, pos);
+3 -1
tools/perf/util/hist.h
··· 96 96 bool zap_kernel); 97 97 void hists__output_recalc_col_len(struct hists *hists, int max_rows); 98 98 99 + void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h); 99 100 void hists__inc_nr_events(struct hists *self, u32 type); 100 - size_t hists__fprintf_nr_events(struct hists *self, FILE *fp); 101 + void events_stats__inc(struct events_stats *stats, u32 type); 102 + size_t events_stats__fprintf(struct events_stats *stats, FILE *fp); 101 103 102 104 size_t hists__fprintf(struct hists *self, bool show_header, int max_rows, 103 105 int max_cols, FILE *fp);
+1
tools/perf/util/include/linux/bitops.h
··· 14 14 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 15 15 #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) 16 16 #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) 17 + #define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE) 17 18 18 19 #define for_each_set_bit(bit, addr, size) \ 19 20 for ((bit) = find_first_bit((addr), (size)); \
+33 -3
tools/perf/util/intlist.c
··· 59 59 60 60 struct int_node *intlist__find(struct intlist *ilist, int i) 61 61 { 62 - struct int_node *node = NULL; 63 - struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); 62 + struct int_node *node; 63 + struct rb_node *rb_node; 64 64 65 + if (ilist == NULL) 66 + return NULL; 67 + 68 + node = NULL; 69 + rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); 65 70 if (rb_node) 66 71 node = container_of(rb_node, struct int_node, rb_node); 67 72 68 73 return node; 69 74 } 70 75 71 - struct intlist *intlist__new(void) 76 + static int intlist__parse_list(struct intlist *ilist, const char *s) 77 + { 78 + char *sep; 79 + int err; 80 + 81 + do { 82 + long value = strtol(s, &sep, 10); 83 + err = -EINVAL; 84 + if (*sep != ',' && *sep != '\0') 85 + break; 86 + err = intlist__add(ilist, value); 87 + if (err) 88 + break; 89 + s = sep + 1; 90 + } while (*sep != '\0'); 91 + 92 + return err; 93 + } 94 + 95 + struct intlist *intlist__new(const char *slist) 72 96 { 73 97 struct intlist *ilist = malloc(sizeof(*ilist)); 74 98 ··· 101 77 ilist->rblist.node_cmp = intlist__node_cmp; 102 78 ilist->rblist.node_new = intlist__node_new; 103 79 ilist->rblist.node_delete = intlist__node_delete; 80 + 81 + if (slist && intlist__parse_list(ilist, slist)) 82 + goto out_delete; 104 83 } 105 84 106 85 return ilist; 86 + out_delete: 87 + intlist__delete(ilist); 88 + return NULL; 107 89 } 108 90 109 91 void intlist__delete(struct intlist *ilist)
+1 -1
tools/perf/util/intlist.h
··· 15 15 struct rblist rblist; 16 16 }; 17 17 18 - struct intlist *intlist__new(void); 18 + struct intlist *intlist__new(const char *slist); 19 19 void intlist__delete(struct intlist *ilist); 20 20 21 21 void intlist__remove(struct intlist *ilist, struct int_node *in);
+42 -22
tools/perf/util/machine.c
··· 91 91 free(machine); 92 92 } 93 93 94 - struct machine *machines__add(struct rb_root *machines, pid_t pid, 94 + void machines__init(struct machines *machines) 95 + { 96 + machine__init(&machines->host, "", HOST_KERNEL_ID); 97 + machines->guests = RB_ROOT; 98 + } 99 + 100 + void machines__exit(struct machines *machines) 101 + { 102 + machine__exit(&machines->host); 103 + /* XXX exit guest */ 104 + } 105 + 106 + struct machine *machines__add(struct machines *machines, pid_t pid, 95 107 const char *root_dir) 96 108 { 97 - struct rb_node **p = &machines->rb_node; 109 + struct rb_node **p = &machines->guests.rb_node; 98 110 struct rb_node *parent = NULL; 99 111 struct machine *pos, *machine = malloc(sizeof(*machine)); 100 112 ··· 128 116 } 129 117 130 118 rb_link_node(&machine->rb_node, parent, p); 131 - rb_insert_color(&machine->rb_node, machines); 119 + rb_insert_color(&machine->rb_node, &machines->guests); 132 120 133 121 return machine; 134 122 } 135 123 136 - struct machine *machines__find(struct rb_root *machines, pid_t pid) 124 + struct machine *machines__find(struct machines *machines, pid_t pid) 137 125 { 138 - struct rb_node **p = &machines->rb_node; 126 + struct rb_node **p = &machines->guests.rb_node; 139 127 struct rb_node *parent = NULL; 140 128 struct machine *machine; 141 129 struct machine *default_machine = NULL; 130 + 131 + if (pid == HOST_KERNEL_ID) 132 + return &machines->host; 142 133 143 134 while (*p != NULL) { 144 135 parent = *p; ··· 159 144 return default_machine; 160 145 } 161 146 162 - struct machine *machines__findnew(struct rb_root *machines, pid_t pid) 147 + struct machine *machines__findnew(struct machines *machines, pid_t pid) 163 148 { 164 149 char path[PATH_MAX]; 165 150 const char *root_dir = ""; ··· 193 178 return machine; 194 179 } 195 180 196 - void machines__process(struct rb_root *machines, 197 - machine__process_t process, void *data) 181 + void machines__process_guests(struct machines *machines, 182 + machine__process_t process, void *data) 198 183 { 199 184 struct rb_node *nd; 200 185 201 - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 186 + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 202 187 struct machine *pos = rb_entry(nd, struct machine, rb_node); 203 188 process(pos, data); 204 189 } ··· 218 203 return bf; 219 204 } 220 205 221 - void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size) 206 + void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 222 207 { 223 208 struct rb_node *node; 224 209 struct machine *machine; 225 210 226 - for (node = rb_first(machines); node; node = rb_next(node)) { 211 + machines->host.id_hdr_size = id_hdr_size; 212 + 213 + for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 227 214 machine = rb_entry(node, struct machine, rb_node); 228 215 machine->id_hdr_size = id_hdr_size; 229 216 } ··· 330 313 return map; 331 314 } 332 315 333 - size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) 316 + size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 334 317 { 335 318 struct rb_node *nd; 336 - size_t ret = 0; 319 + size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) + 320 + __dsos__fprintf(&machines->host.user_dsos, fp); 337 321 338 - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 322 + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 339 323 struct machine *pos = rb_entry(nd, struct machine, rb_node); 340 324 ret += __dsos__fprintf(&pos->kernel_dsos, fp); 341 325 ret += __dsos__fprintf(&pos->user_dsos, fp); ··· 352 334 __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); 353 335 } 354 336 355 - size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, 337 + size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 356 338 bool (skip)(struct dso *dso, int parm), int parm) 357 339 { 358 340 struct rb_node *nd; 359 - size_t ret = 0; 341 + size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 360 342 361 - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 343 + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 362 344 struct machine *pos = rb_entry(nd, struct machine, rb_node); 363 345 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 364 346 } ··· 529 511 } 530 512 } 531 513 532 - int machines__create_guest_kernel_maps(struct rb_root *machines) 514 + int machines__create_guest_kernel_maps(struct machines *machines) 533 515 { 534 516 int ret = 0; 535 517 struct dirent **namelist = NULL; ··· 578 560 return ret; 579 561 } 580 562 581 - void machines__destroy_guest_kernel_maps(struct rb_root *machines) 563 + void machines__destroy_kernel_maps(struct machines *machines) 582 564 { 583 - struct rb_node *next = rb_first(machines); 565 + struct rb_node *next = rb_first(&machines->guests); 566 + 567 + machine__destroy_kernel_maps(&machines->host); 584 568 585 569 while (next) { 586 570 struct machine *pos = rb_entry(next, struct machine, rb_node); 587 571 588 572 next = rb_next(&pos->rb_node); 589 - rb_erase(&pos->rb_node, machines); 573 + rb_erase(&pos->rb_node, &machines->guests); 590 574 machine__delete(pos); 591 575 } 592 576 } 593 577 594 - int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) 578 + int machines__create_kernel_maps(struct machines *machines, pid_t pid) 595 579 { 596 580 struct machine *machine = machines__findnew(machines, pid); 597 581
+20 -12
tools/perf/util/machine.h
··· 47 47 48 48 typedef void (*machine__process_t)(struct machine *machine, void *data); 49 49 50 - void machines__process(struct rb_root *machines, 51 - machine__process_t process, void *data); 50 + struct machines { 51 + struct machine host; 52 + struct rb_root guests; 53 + }; 52 54 53 - struct machine *machines__add(struct rb_root *machines, pid_t pid, 55 + void machines__init(struct machines *machines); 56 + void machines__exit(struct machines *machines); 57 + 58 + void machines__process_guests(struct machines *machines, 59 + machine__process_t process, void *data); 60 + 61 + struct machine *machines__add(struct machines *machines, pid_t pid, 54 62 const char *root_dir); 55 - struct machine *machines__find_host(struct rb_root *machines); 56 - struct machine *machines__find(struct rb_root *machines, pid_t pid); 57 - struct machine *machines__findnew(struct rb_root *machines, pid_t pid); 63 + struct machine *machines__find_host(struct machines *machines); 64 + struct machine *machines__find(struct machines *machines, pid_t pid); 65 + struct machine *machines__findnew(struct machines *machines, pid_t pid); 58 66 59 - void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size); 67 + void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size); 60 68 char *machine__mmap_name(struct machine *machine, char *bf, size_t size); 61 69 62 70 int machine__init(struct machine *machine, const char *root_dir, pid_t pid); ··· 140 132 141 133 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, 142 134 bool (skip)(struct dso *dso, int parm), int parm); 143 - size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); 144 - size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp, 135 + size_t machines__fprintf_dsos(struct machines *machines, FILE *fp); 136 + size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 145 137 bool (skip)(struct dso *dso, int parm), int parm); 146 138 147 139 void machine__destroy_kernel_maps(struct machine *machine); 148 140 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); 149 141 int machine__create_kernel_maps(struct machine *machine); 150 142 151 - int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); 152 - int machines__create_guest_kernel_maps(struct rb_root *machines); 153 - void machines__destroy_guest_kernel_maps(struct rb_root *machines); 143 + int machines__create_kernel_maps(struct machines *machines, pid_t pid); 144 + int machines__create_guest_kernel_maps(struct machines *machines); 145 + void machines__destroy_kernel_maps(struct machines *machines); 154 146 155 147 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); 156 148
+2 -1
tools/perf/util/map.c
··· 19 19 20 20 static inline int is_anon_memory(const char *filename) 21 21 { 22 - return strcmp(filename, "//anon") == 0; 22 + return !strcmp(filename, "//anon") || 23 + !strcmp(filename, "/anon_hugepage (deleted)"); 23 24 } 24 25 25 26 static inline int is_no_dso_memory(const char *filename)
+64 -23
tools/perf/util/parse-events.c
··· 380 380 return 0; 381 381 } 382 382 383 - static int add_tracepoint_multi(struct list_head **list, int *idx, 384 - char *sys_name, char *evt_name) 383 + static int add_tracepoint_multi_event(struct list_head **list, int *idx, 384 + char *sys_name, char *evt_name) 385 385 { 386 386 char evt_path[MAXPATHLEN]; 387 387 struct dirent *evt_ent; ··· 408 408 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); 409 409 } 410 410 411 + closedir(evt_dir); 412 + return ret; 413 + } 414 + 415 + static int add_tracepoint_event(struct list_head **list, int *idx, 416 + char *sys_name, char *evt_name) 417 + { 418 + return strpbrk(evt_name, "*?") ? 419 + add_tracepoint_multi_event(list, idx, sys_name, evt_name) : 420 + add_tracepoint(list, idx, sys_name, evt_name); 421 + } 422 + 423 + static int add_tracepoint_multi_sys(struct list_head **list, int *idx, 424 + char *sys_name, char *evt_name) 425 + { 426 + struct dirent *events_ent; 427 + DIR *events_dir; 428 + int ret = 0; 429 + 430 + events_dir = opendir(tracing_events_path); 431 + if (!events_dir) { 432 + perror("Can't open event dir"); 433 + return -1; 434 + } 435 + 436 + while (!ret && (events_ent = readdir(events_dir))) { 437 + if (!strcmp(events_ent->d_name, ".") 438 + || !strcmp(events_ent->d_name, "..") 439 + || !strcmp(events_ent->d_name, "enable") 440 + || !strcmp(events_ent->d_name, "header_event") 441 + || !strcmp(events_ent->d_name, "header_page")) 442 + continue; 443 + 444 + if (!strglobmatch(events_ent->d_name, sys_name)) 445 + continue; 446 + 447 + ret = add_tracepoint_event(list, idx, events_ent->d_name, 448 + evt_name); 449 + } 450 + 451 + closedir(events_dir); 411 452 return ret; 412 453 } 413 454 ··· 461 420 if (ret) 462 421 return ret; 463 422 464 - return strpbrk(event, "*?") ? 465 - add_tracepoint_multi(list, idx, sys, event) : 466 - add_tracepoint(list, idx, sys, event); 423 + if (strpbrk(sys, "*?")) 424 + return add_tracepoint_multi_sys(list, idx, sys, event); 425 + else 426 + return add_tracepoint_event(list, idx, sys, event); 467 427 } 468 428 469 429 static int ··· 534 492 } 535 493 536 494 static int config_term(struct perf_event_attr *attr, 537 - struct parse_events__term *term) 495 + struct parse_events_term *term) 538 496 { 539 497 #define CHECK_TYPE_VAL(type) \ 540 498 do { \ ··· 579 537 static int config_attr(struct perf_event_attr *attr, 580 538 struct list_head *head, int fail) 581 539 { 582 - struct parse_events__term *term; 540 + struct parse_events_term *term; 583 541 584 542 list_for_each_entry(term, head, list) 585 543 if (config_term(attr, term) && fail) ··· 605 563 return add_event(list, idx, &attr, NULL); 606 564 } 607 565 608 - static int parse_events__is_name_term(struct parse_events__term *term) 566 + static int parse_events__is_name_term(struct parse_events_term *term) 609 567 { 610 568 return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; 611 569 } 612 570 613 571 static char *pmu_event_name(struct list_head *head_terms) 614 572 { 615 - struct parse_events__term *term; 573 + struct parse_events_term *term; 616 574 617 575 list_for_each_entry(term, head_terms, list) 618 576 if (parse_events__is_name_term(term)) ··· 856 814 */ 857 815 int parse_events_terms(struct list_head *terms, const char *str) 858 816 { 859 - struct parse_events_data__terms data = { 817 + struct parse_events_terms data = { 860 818 .terms = NULL, 861 819 }; 862 820 int ret; ··· 872 830 return ret; 873 831 } 874 832 875 - int parse_events(struct perf_evlist *evlist, const char *str, 876 - int unset __maybe_unused) 833 + int parse_events(struct perf_evlist *evlist, const char *str) 877 834 { 878 - struct parse_events_data__events data = { 835 + struct parse_events_evlist data = { 879 836 .list = LIST_HEAD_INIT(data.list), 880 837 .idx = evlist->nr_entries, 881 838 }; ··· 899 858 int unset __maybe_unused) 900 859 { 901 860 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 902 - int ret = parse_events(evlist, str, unset); 861 + int ret = parse_events(evlist, str); 903 862 904 863 if (ret) { 905 864 fprintf(stderr, "invalid or unsupported event: '%s'\n", str); ··· 1162 1121 print_tracepoint_events(NULL, NULL, name_only); 1163 1122 } 1164 1123 1165 - int parse_events__is_hardcoded_term(struct parse_events__term *term) 1124 + int parse_events__is_hardcoded_term(struct parse_events_term *term) 1166 1125 { 1167 1126 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 1168 1127 } 1169 1128 1170 - static int new_term(struct parse_events__term **_term, int type_val, 1129 + static int new_term(struct parse_events_term **_term, int type_val, 1171 1130 int type_term, char *config, 1172 1131 char *str, u64 num) 1173 1132 { 1174 - struct parse_events__term *term; 1133 + struct parse_events_term *term; 1175 1134 1176 1135 term = zalloc(sizeof(*term)); 1177 1136 if (!term) ··· 1197 1156 return 0; 1198 1157 } 1199 1158 1200 - int parse_events__term_num(struct parse_events__term **term, 1159 + int parse_events_term__num(struct parse_events_term **term, 1201 1160 int type_term, char *config, u64 num) 1202 1161 { 1203 1162 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, 1204 1163 config, NULL, num); 1205 1164 } 1206 1165 1207 - int parse_events__term_str(struct parse_events__term **term, 1166 + int parse_events_term__str(struct parse_events_term **term, 1208 1167 int type_term, char *config, char *str) 1209 1168 { 1210 1169 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, 1211 1170 config, str, 0); 1212 1171 } 1213 1172 1214 - int parse_events__term_sym_hw(struct parse_events__term **term, 1173 + int parse_events_term__sym_hw(struct parse_events_term **term, 1215 1174 char *config, unsigned idx) 1216 1175 { 1217 1176 struct event_symbol *sym; ··· 1229 1188 (char *) "event", (char *) sym->symbol, 0); 1230 1189 } 1231 1190 1232 - int parse_events__term_clone(struct parse_events__term **new, 1233 - struct parse_events__term *term) 1191 + int parse_events_term__clone(struct parse_events_term **new, 1192 + struct parse_events_term *term) 1234 1193 { 1235 1194 return new_term(new, term->type_val, term->type_term, term->config, 1236 1195 term->val.str, term->val.num); ··· 1238 1197 1239 1198 void parse_events__free_terms(struct list_head *terms) 1240 1199 { 1241 - struct parse_events__term *term, *h; 1200 + struct parse_events_term *term, *h; 1242 1201 1243 1202 list_for_each_entry_safe(term, h, terms, list) 1244 1203 free(term);
+10 -11
tools/perf/util/parse-events.h
··· 29 29 30 30 extern int parse_events_option(const struct option *opt, const char *str, 31 31 int unset); 32 - extern int parse_events(struct perf_evlist *evlist, const char *str, 33 - int unset); 32 + extern int parse_events(struct perf_evlist *evlist, const char *str); 34 33 extern int parse_events_terms(struct list_head *terms, const char *str); 35 34 extern int parse_filter(const struct option *opt, const char *str, int unset); 36 35 ··· 50 51 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, 51 52 }; 52 53 53 - struct parse_events__term { 54 + struct parse_events_term { 54 55 char *config; 55 56 union { 56 57 char *str; ··· 61 62 struct list_head list; 62 63 }; 63 64 64 - struct parse_events_data__events { 65 + struct parse_events_evlist { 65 66 struct list_head list; 66 67 int idx; 67 68 }; 68 69 69 - struct parse_events_data__terms { 70 + struct parse_events_terms { 70 71 struct list_head *terms; 71 72 }; 72 73 73 - int parse_events__is_hardcoded_term(struct parse_events__term *term); 74 - int parse_events__term_num(struct parse_events__term **_term, 74 + int parse_events__is_hardcoded_term(struct parse_events_term *term); 75 + int parse_events_term__num(struct parse_events_term **_term, 75 76 int type_term, char *config, u64 num); 76 - int parse_events__term_str(struct parse_events__term **_term, 77 + int parse_events_term__str(struct parse_events_term **_term, 77 78 int type_term, char *config, char *str); 78 - int parse_events__term_sym_hw(struct parse_events__term **term, 79 + int parse_events_term__sym_hw(struct parse_events_term **term, 79 80 char *config, unsigned idx); 80 - int parse_events__term_clone(struct parse_events__term **new, 81 - struct parse_events__term *term); 81 + int parse_events_term__clone(struct parse_events_term **new, 82 + struct parse_events_term *term); 82 83 void parse_events__free_terms(struct list_head *terms); 83 84 int parse_events__modifier_event(struct list_head *list, char *str, bool add); 84 85 int parse_events__modifier_group(struct list_head *list, char *event_mod);
+32 -32
tools/perf/util/parse-events.y
··· 68 68 char *str; 69 69 u64 num; 70 70 struct list_head *head; 71 - struct parse_events__term *term; 71 + struct parse_events_term *term; 72 72 } 73 73 %% 74 74 ··· 79 79 80 80 start_events: groups 81 81 { 82 - struct parse_events_data__events *data = _data; 82 + struct parse_events_evlist *data = _data; 83 83 84 84 parse_events_update_lists($1, &data->list); 85 85 } ··· 186 186 event_pmu: 187 187 PE_NAME '/' event_config '/' 188 188 { 189 - struct parse_events_data__events *data = _data; 189 + struct parse_events_evlist *data = _data; 190 190 struct list_head *list = NULL; 191 191 192 192 ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3)); ··· 202 202 event_legacy_symbol: 203 203 value_sym '/' event_config '/' 204 204 { 205 - struct parse_events_data__events *data = _data; 205 + struct parse_events_evlist *data = _data; 206 206 struct list_head *list = NULL; 207 207 int type = $1 >> 16; 208 208 int config = $1 & 255; ··· 215 215 | 216 216 value_sym sep_slash_dc 217 217 { 218 - struct parse_events_data__events *data = _data; 218 + struct parse_events_evlist *data = _data; 219 219 struct list_head *list = NULL; 220 220 int type = $1 >> 16; 221 221 int config = $1 & 255; ··· 228 228 event_legacy_cache: 229 229 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT 230 230 { 231 - struct parse_events_data__events *data = _data; 231 + struct parse_events_evlist *data = _data; 232 232 struct list_head *list = NULL; 233 233 234 234 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5)); ··· 237 237 | 238 238 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT 239 239 { 240 - struct parse_events_data__events *data = _data; 240 + struct parse_events_evlist *data = _data; 241 241 struct list_head *list = NULL; 242 242 243 243 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL)); ··· 246 246 | 247 247 PE_NAME_CACHE_TYPE 248 248 { 249 - struct parse_events_data__events *data = _data; 249 + struct parse_events_evlist *data = _data; 250 250 struct list_head *list = NULL; 251 251 252 252 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL)); ··· 256 256 event_legacy_mem: 257 257 PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc 258 258 { 259 - struct parse_events_data__events *data = _data; 259 + struct parse_events_evlist *data = _data; 260 260 struct list_head *list = NULL; 261 261 262 262 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, ··· 266 266 | 267 267 PE_PREFIX_MEM PE_VALUE sep_dc 268 268 { 269 - struct parse_events_data__events *data = _data; 269 + struct parse_events_evlist *data = _data; 270 270 struct list_head *list = NULL; 271 271 272 272 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, ··· 277 277 event_legacy_tracepoint: 278 278 PE_NAME ':' PE_NAME 279 279 { 280 - struct parse_events_data__events *data = _data; 280 + struct parse_events_evlist *data = _data; 281 281 struct list_head *list = NULL; 282 282 283 283 ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3)); ··· 287 287 event_legacy_numeric: 288 288 PE_VALUE ':' PE_VALUE 289 289 { 290 - struct parse_events_data__events *data = _data; 290 + struct parse_events_evlist *data = _data; 291 291 struct list_head *list = NULL; 292 292 293 293 ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL)); ··· 297 297 event_legacy_raw: 298 298 PE_RAW 299 299 { 300 - struct parse_events_data__events *data = _data; 300 + struct parse_events_evlist *data = _data; 301 301 struct list_head *list = NULL; 302 302 303 303 ABORT_ON(parse_events_add_numeric(&list, &data->idx, ··· 307 307 308 308 start_terms: event_config 309 309 { 310 - struct parse_events_data__terms *data = _data; 310 + struct parse_events_terms *data = _data; 311 311 data->terms = $1; 312 312 } 313 313 ··· 315 315 event_config ',' event_term 316 316 { 317 317 struct list_head *head = $1; 318 - struct parse_events__term *term = $3; 318 + struct parse_events_term *term = $3; 319 319 320 320 ABORT_ON(!head); 321 321 list_add_tail(&term->list, head); ··· 325 325 event_term 326 326 { 327 327 struct list_head *head = malloc(sizeof(*head)); 328 - struct parse_events__term *term = $1; 328 + struct parse_events_term *term = $1; 329 329 330 330 ABORT_ON(!head); 331 331 INIT_LIST_HEAD(head); ··· 336 336 event_term: 337 337 PE_NAME '=' PE_NAME 338 338 { 339 - struct parse_events__term *term; 339 + struct parse_events_term *term; 340 340 341 - ABORT_ON(parse_events__term_str(&term, PARSE_EVENTS__TERM_TYPE_USER, 341 + ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, 342 342 $1, $3)); 343 343 $$ = term; 344 344 } 345 345 | 346 346 PE_NAME '=' PE_VALUE 347 347 { 348 - struct parse_events__term *term; 348 + struct parse_events_term *term; 349 349 350 - ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, 350 + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, 351 351 $1, $3)); 352 352 $$ = term; 353 353 } 354 354 | 355 355 PE_NAME '=' PE_VALUE_SYM_HW 356 356 { 357 - struct parse_events__term *term; 357 + struct parse_events_term *term; 358 358 int config = $3 & 255; 359 359 360 - ABORT_ON(parse_events__term_sym_hw(&term, $1, config)); 360 + ABORT_ON(parse_events_term__sym_hw(&term, $1, config)); 361 361 $$ = term; 362 362 } 363 363 | 364 364 PE_NAME 365 365 { 366 - struct parse_events__term *term; 366 + struct parse_events_term *term; 367 367 368 - ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, 368 + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, 369 369 $1, 1)); 370 370 $$ = term; 371 371 } 372 372 | 373 373 PE_VALUE_SYM_HW 374 374 { 375 - struct parse_events__term *term; 375 + struct parse_events_term *term; 376 376 int config = $1 & 255; 377 377 378 - ABORT_ON(parse_events__term_sym_hw(&term, NULL, config)); 378 + ABORT_ON(parse_events_term__sym_hw(&term, NULL, config)); 379 379 $$ = term; 380 380 } 381 381 | 382 382 PE_TERM '=' PE_NAME 383 383 { 384 - struct parse_events__term *term; 384 + struct parse_events_term *term; 385 385 386 - ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3)); 386 + ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3)); 387 387 $$ = term; 388 388 } 389 389 | 390 390 PE_TERM '=' PE_VALUE 391 391 { 392 - struct parse_events__term *term; 392 + struct parse_events_term *term; 393 393 394 - ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3)); 394 + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3)); 395 395 $$ = term; 396 396 } 397 397 | 398 398 PE_TERM 399 399 { 400 - struct parse_events__term *term; 400 + struct parse_events_term *term; 401 401 402 - ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1)); 402 + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1)); 403 403 $$ = term; 404 404 } 405 405
+29 -17
tools/perf/util/pmu.c
··· 1 - 2 1 #include <linux/list.h> 3 2 #include <sys/types.h> 4 3 #include <sys/stat.h> ··· 9 10 #include "pmu.h" 10 11 #include "parse-events.h" 11 12 #include "cpumap.h" 13 + 14 + struct perf_pmu_alias { 15 + char *name; 16 + struct list_head terms; 17 + struct list_head list; 18 + }; 19 + 20 + struct perf_pmu_format { 21 + char *name; 22 + int value; 23 + DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); 24 + struct list_head list; 25 + }; 12 26 13 27 #define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/" 14 28 ··· 97 85 98 86 static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file) 99 87 { 100 - struct perf_pmu__alias *alias; 88 + struct perf_pmu_alias *alias; 101 89 char buf[256]; 102 90 int ret; 103 91 ··· 184 172 return 0; 185 173 } 186 174 187 - static int pmu_alias_terms(struct perf_pmu__alias *alias, 175 + static int pmu_alias_terms(struct perf_pmu_alias *alias, 188 176 struct list_head *terms) 189 177 { 190 - struct parse_events__term *term, *clone; 178 + struct parse_events_term *term, *clone; 191 179 LIST_HEAD(list); 192 180 int ret; 193 181 194 182 list_for_each_entry(term, &alias->terms, list) { 195 - ret = parse_events__term_clone(&clone, term); 183 + ret = parse_events_term__clone(&clone, term); 196 184 if (ret) { 197 185 parse_events__free_terms(&list); 198 186 return ret; ··· 372 360 return pmu_lookup(name); 373 361 } 374 362 375 - static struct perf_pmu__format* 363 + static struct perf_pmu_format * 376 364 pmu_find_format(struct list_head *formats, char *name) 377 365 { 378 - struct perf_pmu__format *format; 366 + struct perf_pmu_format *format; 379 367 380 368 list_for_each_entry(format, formats, list) 381 369 if (!strcmp(format->name, name)) ··· 415 403 */ 416 404 static int pmu_config_term(struct list_head *formats, 417 405 struct perf_event_attr *attr, 418 - struct parse_events__term *term) 406 + struct parse_events_term *term) 419 407 { 420 - struct perf_pmu__format *format; 408 + struct perf_pmu_format *format; 421 409 __u64 *vp; 422 410 423 411 /* ··· 462 450 struct perf_event_attr *attr, 463 451 struct list_head *head_terms) 464 452 { 465 - struct parse_events__term *term; 453 + struct parse_events_term *term; 466 454 467 455 list_for_each_entry(term, head_terms, list) 468 456 if (pmu_config_term(formats, attr, term)) ··· 483 471 return perf_pmu__config_terms(&pmu->format, attr, head_terms); 484 472 } 485 473 486 - static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, 487 - struct parse_events__term *term) 474 + static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu, 475 + struct parse_events_term *term) 488 476 { 489 - struct perf_pmu__alias *alias; 477 + struct perf_pmu_alias *alias; 490 478 char *name; 491 479 492 480 if (parse_events__is_hardcoded_term(term)) ··· 519 507 */ 520 508 int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) 521 509 { 522 - struct parse_events__term *term, *h; 523 - struct perf_pmu__alias *alias; 510 + struct parse_events_term *term, *h; 511 + struct perf_pmu_alias *alias; 524 512 int ret; 525 513 526 514 list_for_each_entry_safe(term, h, head_terms, list) { ··· 539 527 int perf_pmu__new_format(struct list_head *list, char *name, 540 528 int config, unsigned long *bits) 541 529 { 542 - struct perf_pmu__format *format; 530 + struct perf_pmu_format *format; 543 531 544 532 format = zalloc(sizeof(*format)); 545 533 if (!format) ··· 560 548 if (!to) 561 549 to = from; 562 550 563 - memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS)); 551 + memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS)); 564 552 for (b = from; b <= to; b++) 565 553 set_bit(b, bits); 566 554 }
+1 -14
tools/perf/util/pmu.h
··· 12 12 13 13 #define PERF_PMU_FORMAT_BITS 64 14 14 15 - struct perf_pmu__format { 16 - char *name; 17 - int value; 18 - DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); 19 - struct list_head list; 20 - }; 21 - 22 - struct perf_pmu__alias { 23 - char *name; 24 - struct list_head terms; 25 - struct list_head list; 26 - }; 27 - 28 15 struct perf_pmu { 29 16 char *name; 30 17 __u32 type; ··· 29 42 struct list_head *head_terms); 30 43 int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms); 31 44 struct list_head *perf_pmu__alias(struct perf_pmu *pmu, 32 - struct list_head *head_terms); 45 + struct list_head *head_terms); 33 46 int perf_pmu_wrap(void); 34 47 void perf_pmu_error(struct list_head *list, char *name, char const *msg); 35 48
+5 -5
tools/perf/util/probe-finder.c
··· 413 413 dwarf_diename(vr_die), dwarf_diename(&type)); 414 414 return -EINVAL; 415 415 } 416 + if (die_get_real_type(&type, &type) == NULL) { 417 + pr_warning("Failed to get a type" 418 + " information.\n"); 419 + return -ENOENT; 420 + } 416 421 if (ret == DW_TAG_pointer_type) { 417 - if (die_get_real_type(&type, &type) == NULL) { 418 - pr_warning("Failed to get a type" 419 - " information.\n"); 420 - return -ENOENT; 421 - } 422 422 while (*ref_ptr) 423 423 ref_ptr = &(*ref_ptr)->next; 424 424 /* Add new reference with offset +0 */
+9
tools/perf/util/python.c
··· 1045 1045 if (PyErr_Occurred()) 1046 1046 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 1047 1047 } 1048 + 1049 + /* 1050 + * Dummy, to avoid dragging all the test_attr infrastructure in the python 1051 + * binding. 1052 + */ 1053 + void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, 1054 + int fd, int group_fd, unsigned long flags) 1055 + { 1056 + }
+1
tools/perf/util/scripting-engines/trace-event-perl.c
··· 292 292 ns = nsecs - s * NSECS_PER_SEC; 293 293 294 294 scripting_context->event_data = data; 295 + scripting_context->pevent = evsel->tp_format->pevent; 295 296 296 297 ENTER; 297 298 SAVETMPS;
+1
tools/perf/util/scripting-engines/trace-event-python.c
··· 265 265 ns = nsecs - s * NSECS_PER_SEC; 266 266 267 267 scripting_context->event_data = data; 268 + scripting_context->pevent = evsel->tp_format->pevent; 268 269 269 270 context = PyCObject_FromVoidPtr(scripting_context, NULL); 270 271
+34 -41
tools/perf/util/session.c
··· 86 86 { 87 87 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 88 88 89 - session->host_machine.id_hdr_size = id_hdr_size; 90 89 machines__set_id_hdr_size(&session->machines, id_hdr_size); 91 90 } 92 91 93 92 int perf_session__create_kernel_maps(struct perf_session *self) 94 93 { 95 - int ret = machine__create_kernel_maps(&self->host_machine); 94 + int ret = machine__create_kernel_maps(&self->machines.host); 96 95 97 96 if (ret >= 0) 98 97 ret = machines__create_guest_kernel_maps(&self->machines); ··· 100 101 101 102 static void perf_session__destroy_kernel_maps(struct perf_session *self) 102 103 { 103 - machine__destroy_kernel_maps(&self->host_machine); 104 - machines__destroy_guest_kernel_maps(&self->machines); 104 + machines__destroy_kernel_maps(&self->machines); 105 105 } 106 106 107 107 struct perf_session *perf_session__new(const char *filename, int mode, ··· 125 127 goto out; 126 128 127 129 memcpy(self->filename, filename, len); 128 - self->machines = RB_ROOT; 129 130 self->repipe = repipe; 130 131 INIT_LIST_HEAD(&self->ordered_samples.samples); 131 132 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 132 133 INIT_LIST_HEAD(&self->ordered_samples.to_free); 133 - machine__init(&self->host_machine, "", HOST_KERNEL_ID); 134 - hists__init(&self->hists); 134 + machines__init(&self->machines); 135 135 136 136 if (mode == O_RDONLY) { 137 137 if (perf_session__open(self, force) < 0) ··· 159 163 160 164 static void perf_session__delete_dead_threads(struct perf_session *session) 161 165 { 162 - machine__delete_dead_threads(&session->host_machine); 166 + machine__delete_dead_threads(&session->machines.host); 163 167 } 164 168 165 169 static void perf_session__delete_threads(struct perf_session *session) 166 170 { 167 - machine__delete_threads(&session->host_machine); 171 + machine__delete_threads(&session->machines.host); 168 172 } 169 173 170 174 static void perf_session_env__delete(struct perf_session_env *env) ··· 189 193 perf_session__delete_dead_threads(self); 190 194 perf_session__delete_threads(self); 191 195 perf_session_env__delete(&self->header.env); 192 - machine__exit(&self->host_machine); 196 + machines__exit(&self->machines); 193 197 close(self->fd); 194 198 free(self); 195 199 vdso__exit(); ··· 821 825 return perf_session__findnew_machine(session, pid); 822 826 } 823 827 824 - return perf_session__find_host_machine(session); 828 + return &session->machines.host; 825 829 } 826 830 827 831 static int perf_session_deliver_event(struct perf_session *session, ··· 859 863 case PERF_RECORD_SAMPLE: 860 864 dump_sample(evsel, event, sample); 861 865 if (evsel == NULL) { 862 - ++session->hists.stats.nr_unknown_id; 866 + ++session->stats.nr_unknown_id; 863 867 return 0; 864 868 } 865 869 if (machine == NULL) { 866 - ++session->hists.stats.nr_unprocessable_samples; 870 + ++session->stats.nr_unprocessable_samples; 867 871 return 0; 868 872 } 869 873 return tool->sample(tool, event, sample, evsel, machine); ··· 877 881 return tool->exit(tool, event, sample, machine); 878 882 case PERF_RECORD_LOST: 879 883 if (tool->lost == perf_event__process_lost) 880 - session->hists.stats.total_lost += event->lost.lost; 884 + session->stats.total_lost += event->lost.lost; 881 885 return tool->lost(tool, event, sample, machine); 882 886 case PERF_RECORD_READ: 883 887 return tool->read(tool, event, sample, evsel, machine); ··· 886 890 case PERF_RECORD_UNTHROTTLE: 887 891 return tool->unthrottle(tool, event, sample, machine); 888 892 default: 889 - ++session->hists.stats.nr_unknown_events; 893 + ++session->stats.nr_unknown_events; 890 894 return -1; 891 895 } 892 896 } ··· 900 904 901 905 if (!ip_callchain__valid(sample->callchain, event)) { 902 906 pr_debug("call-chain problem with event, skipping it.\n"); 903 - ++session->hists.stats.nr_invalid_chains; 904 - session->hists.stats.total_invalid_chains += sample->period; 907 + ++session->stats.nr_invalid_chains; 908 + session->stats.total_invalid_chains += sample->period; 905 909 return -EINVAL; 906 910 } 907 911 return 0; ··· 959 963 if (event->header.type >= PERF_RECORD_HEADER_MAX) 960 964 return -EINVAL; 961 965 962 - hists__inc_nr_events(&session->hists, event->header.type); 966 + events_stats__inc(&session->stats, event->header.type); 963 967 964 968 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 965 969 return perf_session__process_user_event(session, event, tool, file_offset); ··· 995 999 996 1000 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 997 1001 { 998 - return machine__findnew_thread(&session->host_machine, pid); 1002 + return machine__findnew_thread(&session->machines.host, pid); 999 1003 } 1000 1004 1001 1005 static struct thread *perf_session__register_idle_thread(struct perf_session *self) ··· 1014 1018 const struct perf_tool *tool) 1015 1019 { 1016 1020 if (tool->lost == perf_event__process_lost && 1017 - session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { 1021 + session->stats.nr_events[PERF_RECORD_LOST] != 0) { 1018 1022 ui__warning("Processed %d events and lost %d chunks!\n\n" 1019 1023 "Check IO/CPU overload!\n\n", 1020 - session->hists.stats.nr_events[0], 1021 - session->hists.stats.nr_events[PERF_RECORD_LOST]); 1024 + session->stats.nr_events[0], 1025 + session->stats.nr_events[PERF_RECORD_LOST]); 1022 1026 } 1023 1027 1024 - if (session->hists.stats.nr_unknown_events != 0) { 1028 + if (session->stats.nr_unknown_events != 0) { 1025 1029 ui__warning("Found %u unknown events!\n\n" 1026 1030 "Is this an older tool processing a perf.data " 1027 1031 "file generated by a more recent tool?\n\n" 1028 1032 "If that is not the case, consider " 1029 1033 "reporting to linux-kernel@vger.kernel.org.\n\n", 1030 - session->hists.stats.nr_unknown_events); 1034 + session->stats.nr_unknown_events); 1031 1035 } 1032 1036 1033 - if (session->hists.stats.nr_unknown_id != 0) { 1037 + if (session->stats.nr_unknown_id != 0) { 1034 1038 ui__warning("%u samples with id not present in the header\n", 1035 - session->hists.stats.nr_unknown_id); 1039 + session->stats.nr_unknown_id); 1036 1040 } 1037 1041 1038 - if (session->hists.stats.nr_invalid_chains != 0) { 1042 + if (session->stats.nr_invalid_chains != 0) { 1039 1043 ui__warning("Found invalid callchains!\n\n" 1040 1044 "%u out of %u events were discarded for this reason.\n\n" 1041 1045 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1042 - session->hists.stats.nr_invalid_chains, 1043 - session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); 1046 + session->stats.nr_invalid_chains, 1047 + session->stats.nr_events[PERF_RECORD_SAMPLE]); 1044 1048 } 1045 1049 1046 - if (session->hists.stats.nr_unprocessable_samples != 0) { 1050 + if (session->stats.nr_unprocessable_samples != 0) { 1047 1051 ui__warning("%u unprocessable samples recorded.\n" 1048 1052 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1049 - session->hists.stats.nr_unprocessable_samples); 1053 + session->stats.nr_unprocessable_samples); 1050 1054 } 1051 1055 } 1052 1056 ··· 1332 1336 1333 1337 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1334 1338 { 1335 - return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 1336 - __dsos__fprintf(&self->host_machine.user_dsos, fp) + 1337 - machines__fprintf_dsos(&self->machines, fp); 1339 + return machines__fprintf_dsos(&self->machines, fp); 1338 1340 } 1339 1341 1340 1342 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1341 1343 bool (skip)(struct dso *dso, int parm), int parm) 1342 1344 { 1343 - size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, skip, parm); 1344 - return ret + machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); 1345 + return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); 1345 1346 } 1346 1347 1347 1348 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) ··· 1346 1353 struct perf_evsel *pos; 1347 1354 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1348 1355 1349 - ret += hists__fprintf_nr_events(&session->hists, fp); 1356 + ret += events_stats__fprintf(&session->stats, fp); 1350 1357 1351 1358 list_for_each_entry(pos, &session->evlist->entries, node) { 1352 1359 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1353 - ret += hists__fprintf_nr_events(&pos->hists, fp); 1360 + ret += events_stats__fprintf(&pos->hists.stats, fp); 1354 1361 } 1355 1362 1356 1363 return ret; ··· 1362 1369 * FIXME: Here we have to actually print all the machines in this 1363 1370 * session, not just the host... 1364 1371 */ 1365 - return machine__fprintf(&session->host_machine, fp); 1372 + return machine__fprintf(&session->machines.host, fp); 1366 1373 } 1367 1374 1368 1375 void perf_session__remove_thread(struct perf_session *session, ··· 1371 1378 /* 1372 1379 * FIXME: This one makes no sense, we need to remove the thread from 1373 1380 * the machine it belongs to, perf_session can have many machines, so 1374 - * doing it always on ->host_machine is wrong. Fix when auditing all 1381 + * doing it always on ->machines.host is wrong. Fix when auditing all 1375 1382 * the 'perf kvm' code. 1376 1383 */ 1377 - machine__remove_thread(&session->host_machine, th); 1384 + machine__remove_thread(&session->machines.host, th); 1378 1385 } 1379 1386 1380 1387 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
+3 -27
tools/perf/util/session.h
··· 30 30 struct perf_session { 31 31 struct perf_header header; 32 32 unsigned long size; 33 - struct machine host_machine; 34 - struct rb_root machines; 33 + struct machines machines; 35 34 struct perf_evlist *evlist; 36 35 struct pevent *pevent; 37 - /* 38 - * FIXME: Need to split this up further, we need global 39 - * stats + per event stats. 40 - */ 41 - struct hists hists; 36 + struct events_stats stats; 42 37 int fd; 43 38 bool fd_pipe; 44 39 bool repipe; ··· 48 53 struct perf_session *perf_session__new(const char *filename, int mode, 49 54 bool force, bool repipe, 50 55 struct perf_tool *tool); 51 - void perf_session__delete(struct perf_session *self); 56 + void perf_session__delete(struct perf_session *session); 52 57 53 58 void perf_event_header__bswap(struct perf_event_header *self); 54 59 ··· 75 80 void perf_session__remove_thread(struct perf_session *self, struct thread *th); 76 81 77 82 static inline 78 - struct machine *perf_session__find_host_machine(struct perf_session *self) 79 - { 80 - return &self->host_machine; 81 - } 82 - 83 - static inline 84 83 struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid) 85 84 { 86 - if (pid == HOST_KERNEL_ID) 87 - return &self->host_machine; 88 85 return machines__find(&self->machines, pid); 89 86 } 90 87 91 88 static inline 92 89 struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid) 93 90 { 94 - if (pid == HOST_KERNEL_ID) 95 - return &self->host_machine; 96 91 return machines__findnew(&self->machines, pid); 97 - } 98 - 99 - static inline 100 - void perf_session__process_machines(struct perf_session *self, 101 - struct perf_tool *tool, 102 - machine__process_t process) 103 - { 104 - process(&self->host_machine, tool); 105 - return machines__process(&self->machines, process, tool); 106 92 } 107 93 108 94 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
+123 -109
tools/perf/util/sort.c
··· 60 60 static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, 61 61 size_t size, unsigned int width) 62 62 { 63 - return repsep_snprintf(bf, size, "%*s:%5d", width, 63 + return repsep_snprintf(bf, size, "%*s:%5d", width - 6, 64 64 self->thread->comm ?: "", self->thread->pid); 65 65 } 66 66 ··· 97 97 return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); 98 98 } 99 99 100 + struct sort_entry sort_comm = { 101 + .se_header = "Command", 102 + .se_cmp = sort__comm_cmp, 103 + .se_collapse = sort__comm_collapse, 104 + .se_snprintf = hist_entry__comm_snprintf, 105 + .se_width_idx = HISTC_COMM, 106 + }; 107 + 108 + /* --sort dso */ 109 + 100 110 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 101 111 { 102 112 struct dso *dso_l = map_l ? map_l->dso : NULL; ··· 127 117 return strcmp(dso_name_l, dso_name_r); 128 118 } 129 119 130 - struct sort_entry sort_comm = { 131 - .se_header = "Command", 132 - .se_cmp = sort__comm_cmp, 133 - .se_collapse = sort__comm_collapse, 134 - .se_snprintf = hist_entry__comm_snprintf, 135 - .se_width_idx = HISTC_COMM, 136 - }; 137 - 138 - /* --sort dso */ 139 - 140 120 static int64_t 141 121 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 142 122 { 143 123 return _sort__dso_cmp(left->ms.map, right->ms.map); 144 - } 145 - 146 - 147 - static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, 148 - u64 ip_l, u64 ip_r) 149 - { 150 - if (!sym_l || !sym_r) 151 - return cmp_null(sym_l, sym_r); 152 - 153 - if (sym_l == sym_r) 154 - return 0; 155 - 156 - if (sym_l) 157 - ip_l = sym_l->start; 158 - if (sym_r) 159 - ip_r = sym_r->start; 160 - 161 - return (int64_t)(ip_r - ip_l); 162 124 } 163 125 164 126 static int _hist_entry__dso_snprintf(struct map *map, char *bf, ··· 151 169 return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); 152 170 } 153 171 172 + struct sort_entry sort_dso = { 173 + .se_header = "Shared Object", 174 + .se_cmp = sort__dso_cmp, 175 + .se_snprintf = hist_entry__dso_snprintf, 176 + .se_width_idx = HISTC_DSO, 177 + }; 178 + 179 + /* --sort symbol */ 180 + 181 + static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, 182 + u64 ip_l, u64 ip_r) 183 + { 184 + if (!sym_l || !sym_r) 185 + return cmp_null(sym_l, sym_r); 186 + 187 + if (sym_l == sym_r) 188 + return 0; 189 + 190 + ip_l = sym_l->start; 191 + ip_r = sym_r->start; 192 + 193 + return (int64_t)(ip_r - ip_l); 194 + } 195 + 196 + static int64_t 197 + sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 198 + { 199 + u64 ip_l, ip_r; 200 + 201 + if (!left->ms.sym && !right->ms.sym) 202 + return right->level - left->level; 203 + 204 + if (!left->ms.sym || !right->ms.sym) 205 + return cmp_null(left->ms.sym, right->ms.sym); 206 + 207 + if (left->ms.sym == right->ms.sym) 208 + return 0; 209 + 210 + ip_l = left->ms.sym->start; 211 + ip_r = right->ms.sym->start; 212 + 213 + return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r); 214 + } 215 + 154 216 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 155 217 u64 ip, char level, char *bf, size_t size, 156 - unsigned int width __maybe_unused) 218 + unsigned int width) 157 219 { 158 220 size_t ret = 0; 159 221 ··· 223 197 return ret; 224 198 } 225 199 226 - 227 - struct sort_entry sort_dso = { 228 - .se_header = "Shared Object", 229 - .se_cmp = sort__dso_cmp, 230 - .se_snprintf = hist_entry__dso_snprintf, 231 - .se_width_idx = HISTC_DSO, 232 - }; 233 - 234 200 static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, 235 - size_t size, 236 - unsigned int width __maybe_unused) 201 + size_t size, unsigned int width) 237 202 { 238 203 return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, 239 204 self->level, bf, size, width); 240 - } 241 - 242 - /* --sort symbol */ 243 - static int64_t 244 - sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 245 - { 246 - u64 ip_l, ip_r; 247 - 248 - if (!left->ms.sym && !right->ms.sym) 249 - return right->level - left->level; 250 - 251 - if (!left->ms.sym || !right->ms.sym) 252 - return cmp_null(left->ms.sym, right->ms.sym); 253 - 254 - if (left->ms.sym == right->ms.sym) 255 - return 0; 256 - 257 - ip_l = left->ms.sym->start; 258 - ip_r = right->ms.sym->start; 259 - 260 - return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r); 261 205 } 262 206 263 207 struct sort_entry sort_sym = { ··· 331 335 static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf, 332 336 size_t size, unsigned int width) 333 337 { 334 - return repsep_snprintf(bf, size, "%-*d", width, self->cpu); 338 + return repsep_snprintf(bf, size, "%*d", width, self->cpu); 335 339 } 336 340 337 341 struct sort_entry sort_cpu = { ··· 340 344 .se_snprintf = hist_entry__cpu_snprintf, 341 345 .se_width_idx = HISTC_CPU, 342 346 }; 347 + 348 + /* sort keys for branch stacks */ 343 349 344 350 static int64_t 345 351 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) ··· 356 358 return _hist_entry__dso_snprintf(self->branch_info->from.map, 357 359 bf, size, width); 358 360 } 359 - 360 - struct sort_entry sort_dso_from = { 361 - .se_header = "Source Shared Object", 362 - .se_cmp = sort__dso_from_cmp, 363 - .se_snprintf = hist_entry__dso_from_snprintf, 364 - .se_width_idx = HISTC_DSO_FROM, 365 - }; 366 361 367 362 static int64_t 368 363 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) ··· 397 406 } 398 407 399 408 static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, 400 - size_t size, 401 - unsigned int width __maybe_unused) 409 + size_t size, unsigned int width) 402 410 { 403 411 struct addr_map_symbol *from = &self->branch_info->from; 404 412 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, ··· 406 416 } 407 417 408 418 static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, 409 - size_t size, 410 - unsigned int width __maybe_unused) 419 + size_t size, unsigned int width) 411 420 { 412 421 struct addr_map_symbol *to = &self->branch_info->to; 413 422 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 414 423 self->level, bf, size, width); 415 424 416 425 } 426 + 427 + struct sort_entry sort_dso_from = { 428 + .se_header = "Source Shared Object", 429 + .se_cmp = sort__dso_from_cmp, 430 + .se_snprintf = hist_entry__dso_from_snprintf, 431 + .se_width_idx = HISTC_DSO_FROM, 432 + }; 417 433 418 434 struct sort_entry sort_dso_to = { 419 435 .se_header = "Target Shared Object", ··· 480 484 481 485 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 482 486 483 - static struct sort_dimension sort_dimensions[] = { 487 + static struct sort_dimension common_sort_dimensions[] = { 484 488 DIM(SORT_PID, "pid", sort_thread), 485 489 DIM(SORT_COMM, "comm", sort_comm), 486 490 DIM(SORT_DSO, "dso", sort_dso), 487 - DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 488 - DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 489 491 DIM(SORT_SYM, "symbol", sort_sym), 490 - DIM(SORT_SYM_TO, "symbol_from", sort_sym_from), 491 - DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to), 492 492 DIM(SORT_PARENT, "parent", sort_parent), 493 493 DIM(SORT_CPU, "cpu", sort_cpu), 494 - DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 495 494 DIM(SORT_SRCLINE, "srcline", sort_srcline), 496 495 }; 496 + 497 + #undef DIM 498 + 499 + #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 500 + 501 + static struct sort_dimension bstack_sort_dimensions[] = { 502 + DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 503 + DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 504 + DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 505 + DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 506 + DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 507 + }; 508 + 509 + #undef DIM 497 510 498 511 int sort_dimension__add(const char *tok) 499 512 { 500 513 unsigned int i; 501 514 502 - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { 503 - struct sort_dimension *sd = &sort_dimensions[i]; 515 + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 516 + struct sort_dimension *sd = &common_sort_dimensions[i]; 504 517 505 518 if (strncasecmp(tok, sd->name, strlen(tok))) 506 519 continue; 520 + 507 521 if (sd->entry == &sort_parent) { 508 522 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 509 523 if (ret) { ··· 524 518 return -EINVAL; 525 519 } 526 520 sort__has_parent = 1; 527 - } else if (sd->entry == &sort_sym || 528 - sd->entry == &sort_sym_from || 529 - sd->entry == &sort_sym_to) { 521 + } else if (sd->entry == &sort_sym) { 530 522 sort__has_sym = 1; 531 523 } 532 524 ··· 534 530 if (sd->entry->se_collapse) 535 531 sort__need_collapse = 1; 536 532 537 - if (list_empty(&hist_entry__sort_list)) { 538 - if (!strcmp(sd->name, "pid")) 539 - sort__first_dimension = SORT_PID; 540 - else if (!strcmp(sd->name, "comm")) 541 - sort__first_dimension = SORT_COMM; 542 - else if (!strcmp(sd->name, "dso")) 543 - sort__first_dimension = SORT_DSO; 544 - else if (!strcmp(sd->name, "symbol")) 545 - sort__first_dimension = SORT_SYM; 546 - else if (!strcmp(sd->name, "parent")) 547 - sort__first_dimension = SORT_PARENT; 548 - else if (!strcmp(sd->name, "cpu")) 549 - sort__first_dimension = SORT_CPU; 550 - else if (!strcmp(sd->name, "symbol_from")) 551 - sort__first_dimension = SORT_SYM_FROM; 552 - else if (!strcmp(sd->name, "symbol_to")) 553 - sort__first_dimension = SORT_SYM_TO; 554 - else if (!strcmp(sd->name, "dso_from")) 555 - sort__first_dimension = SORT_DSO_FROM; 556 - else if (!strcmp(sd->name, "dso_to")) 557 - sort__first_dimension = SORT_DSO_TO; 558 - else if (!strcmp(sd->name, "mispredict")) 559 - sort__first_dimension = SORT_MISPREDICT; 560 - } 533 + if (list_empty(&hist_entry__sort_list)) 534 + sort__first_dimension = i; 561 535 562 536 list_add_tail(&sd->entry->list, &hist_entry__sort_list); 563 537 sd->taken = 1; 564 538 565 539 return 0; 566 540 } 541 + 542 + for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 543 + struct sort_dimension *sd = &bstack_sort_dimensions[i]; 544 + 545 + if (strncasecmp(tok, sd->name, strlen(tok))) 546 + continue; 547 + 548 + if (sort__branch_mode != 1) 549 + return -EINVAL; 550 + 551 + if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 552 + sort__has_sym = 1; 553 + 554 + if (sd->taken) 555 + return 0; 556 + 557 + if (sd->entry->se_collapse) 558 + sort__need_collapse = 1; 559 + 560 + if (list_empty(&hist_entry__sort_list)) 561 + sort__first_dimension = i + __SORT_BRANCH_STACK; 562 + 563 + list_add_tail(&sd->entry->list, &hist_entry__sort_list); 564 + sd->taken = 1; 565 + 566 + return 0; 567 + } 568 + 567 569 return -ESRCH; 568 570 } 569 571 ··· 579 569 580 570 for (tok = strtok_r(str, ", ", &tmp); 581 571 tok; tok = strtok_r(NULL, ", ", &tmp)) { 582 - if (sort_dimension__add(tok) < 0) { 572 + int ret = sort_dimension__add(tok); 573 + if (ret == -EINVAL) { 574 + error("Invalid --sort key: `%s'", tok); 575 + usage_with_options(usagestr, opts); 576 + } else if (ret == -ESRCH) { 583 577 error("Unknown --sort key: `%s'", tok); 584 578 usage_with_options(usagestr, opts); 585 579 }
+6 -2
tools/perf/util/sort.h
··· 122 122 } 123 123 124 124 enum sort_type { 125 + /* common sort keys */ 125 126 SORT_PID, 126 127 SORT_COMM, 127 128 SORT_DSO, 128 129 SORT_SYM, 129 130 SORT_PARENT, 130 131 SORT_CPU, 131 - SORT_DSO_FROM, 132 + SORT_SRCLINE, 133 + 134 + /* branch stack specific sort keys */ 135 + __SORT_BRANCH_STACK, 136 + SORT_DSO_FROM = __SORT_BRANCH_STACK, 132 137 SORT_DSO_TO, 133 138 SORT_SYM_FROM, 134 139 SORT_SYM_TO, 135 140 SORT_MISPREDICT, 136 - SORT_SRCLINE, 137 141 }; 138 142 139 143 /*
+18
tools/perf/util/string.c
··· 332 332 } 333 333 334 334 /** 335 + * ltrim - Removes leading whitespace from @s. 336 + * @s: The string to be stripped. 337 + * 338 + * Return pointer to the first non-whitespace character in @s. 339 + */ 340 + char *ltrim(char *s) 341 + { 342 + int len = strlen(s); 343 + 344 + while (len && isspace(*s)) { 345 + len--; 346 + s++; 347 + } 348 + 349 + return s; 350 + } 351 + 352 + /** 335 353 * rtrim - Removes trailing whitespace from @s. 336 354 * @s: The string to be stripped. 337 355 *
-3
tools/perf/util/symbol-elf.c
··· 1 - #include <libelf.h> 2 - #include <gelf.h> 3 - #include <elf.h> 4 1 #include <fcntl.h> 5 2 #include <stdio.h> 6 3 #include <errno.h>
-1
tools/perf/util/symbol-minimal.c
··· 1 1 #include "symbol.h" 2 2 3 - #include <elf.h> 4 3 #include <stdio.h> 5 4 #include <fcntl.h> 6 5 #include <string.h>
+9 -5
tools/perf/util/symbol.c
··· 768 768 else 769 769 machine = NULL; 770 770 771 - name = malloc(PATH_MAX); 772 - if (!name) 773 - return -1; 774 - 775 771 dso->adjust_symbols = 0; 776 772 777 773 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { ··· 790 794 791 795 if (machine) 792 796 root_dir = machine->root_dir; 797 + 798 + name = malloc(PATH_MAX); 799 + if (!name) 800 + return -1; 793 801 794 802 /* Iterate over candidate debug images. 795 803 * Keep track of "interesting" ones (those which have a symtab, dynsym, ··· 923 923 filename = dso__build_id_filename(dso, NULL, 0); 924 924 if (filename != NULL) { 925 925 err = dso__load_vmlinux(dso, map, filename, filter); 926 - if (err > 0) 926 + if (err > 0) { 927 + dso->lname_alloc = 1; 927 928 goto out; 929 + } 928 930 free(filename); 929 931 } 930 932 ··· 934 932 err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); 935 933 if (err > 0) { 936 934 dso__set_long_name(dso, strdup(vmlinux_path[i])); 935 + dso->lname_alloc = 1; 937 936 break; 938 937 } 939 938 } ··· 974 971 if (err > 0) { 975 972 dso__set_long_name(dso, 976 973 strdup(symbol_conf.vmlinux_name)); 974 + dso->lname_alloc = 1; 977 975 goto out_fixup; 978 976 } 979 977 return err;
+1 -1
tools/perf/util/symbol.h
··· 16 16 #ifdef LIBELF_SUPPORT 17 17 #include <libelf.h> 18 18 #include <gelf.h> 19 - #include <elf.h> 20 19 #endif 20 + #include <elf.h> 21 21 22 22 #include "dso.h" 23 23
+1 -1
tools/perf/util/sysfs.c
··· 8 8 }; 9 9 10 10 static int sysfs_found; 11 - char sysfs_mountpoint[PATH_MAX]; 11 + char sysfs_mountpoint[PATH_MAX + 1]; 12 12 13 13 static int sysfs_valid_mountpoint(const char *sysfs) 14 14 {
-2
tools/perf/util/top.h
··· 29 29 bool sort_has_symbols; 30 30 bool kptr_restrict_warned; 31 31 bool vmlinux_warned; 32 - bool sample_id_all_missing; 33 - bool exclude_guest_missing; 34 32 bool dump_symtab; 35 33 struct hist_entry *sym_filter_entry; 36 34 struct perf_evsel *sym_evsel;
+24
tools/perf/util/util.c
··· 12 12 */ 13 13 unsigned int page_size; 14 14 15 + bool test_attr__enabled; 16 + 15 17 bool perf_host = true; 16 18 bool perf_guest = false; 17 19 ··· 220 218 #else 221 219 void dump_stack(void) {} 222 220 #endif 221 + 222 + void get_term_dimensions(struct winsize *ws) 223 + { 224 + char *s = getenv("LINES"); 225 + 226 + if (s != NULL) { 227 + ws->ws_row = atoi(s); 228 + s = getenv("COLUMNS"); 229 + if (s != NULL) { 230 + ws->ws_col = atoi(s); 231 + if (ws->ws_row && ws->ws_col) 232 + return; 233 + } 234 + } 235 + #ifdef TIOCGWINSZ 236 + if (ioctl(1, TIOCGWINSZ, ws) == 0 && 237 + ws->ws_row && ws->ws_col) 238 + return; 239 + #endif 240 + ws->ws_row = 25; 241 + ws->ws_col = 80; 242 + }
+4
tools/perf/util/util.h
··· 265 265 size_t hex_width(u64 v); 266 266 int hex2u64(const char *ptr, u64 *val); 267 267 268 + char *ltrim(char *s); 268 269 char *rtrim(char *s); 269 270 270 271 void dump_stack(void); 271 272 272 273 extern unsigned int page_size; 274 + 275 + struct winsize; 276 + void get_term_dimensions(struct winsize *ws); 273 277 274 278 #endif