Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (52 commits)
perf record: Use per-task-per-cpu events for inherited events
perf record: Properly synchronize child creation
perf events: Allow per-task-per-cpu counters
perf diff: Percent calcs should use double values
perf diff: Change the default sort order to "dso,symbol"
perf diff: Use perf_session__fprintf_hists just like 'perf record'
perf report: Fix cut'n'paste error recently introduced
perf session: Move perf report specific hits out of perf_session__fprintf_hists
perf tools: Move hist entries printing routines from perf report
perf report: Generalize perf_session__fprintf_hists()
perf symbols: Move symbol filtering to event__preprocess_sample()
perf symbols: Adopt the strlists for dso, comm
perf symbols: Make symbol_conf global
perf probe: Fix to show which probe point is not found
perf probe: Check symbols in symtab/kallsyms
perf probe: Check build-id of vmlinux
perf probe: Reject second attempt of adding same-name event
perf probe: Support event name for --add option
perf probe: Add glob matching support on --del
perf probe: Use strlist__for_each macros in probe-event.c
...

+2299 -1419
+3 -9
include/linux/perf_event.h
··· 211 __u32 wakeup_watermark; /* bytes before wakeup */ 212 }; 213 214 - struct { /* Hardware breakpoint info */ 215 - __u64 bp_addr; 216 - __u32 bp_type; 217 - __u32 bp_len; 218 - __u64 __bp_reserved_1; 219 - __u64 __bp_reserved_2; 220 - }; 221 - 222 __u32 __reserved_2; 223 224 - __u64 __reserved_3; 225 }; 226 227 /*
··· 211 __u32 wakeup_watermark; /* bytes before wakeup */ 212 }; 213 214 __u32 __reserved_2; 215 216 + __u64 bp_addr; 217 + __u32 bp_type; 218 + __u32 bp_len; 219 }; 220 221 /*
+9 -6
kernel/perf_event.c
··· 782 783 add_event_to_ctx(event, ctx); 784 785 /* 786 * Don't put the event on if it is disabled or if 787 * it is in a group and the group isn't on. ··· 927 if (event->state >= PERF_EVENT_STATE_INACTIVE) 928 goto unlock; 929 __perf_event_mark_enabled(event, ctx); 930 931 /* 932 * If the event is in a group and isn't the group leader, ··· 1601 unsigned long flags; 1602 int err; 1603 1604 - /* 1605 - * If cpu is not a wildcard then this is a percpu event: 1606 - */ 1607 - if (cpu != -1) { 1608 /* Must be root to operate on a CPU event: */ 1609 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1610 return ERR_PTR(-EACCES); 1611 1612 - if (cpu < 0 || cpu > num_possible_cpus()) 1613 return ERR_PTR(-EINVAL); 1614 1615 /* ··· 4567 if (attr->type >= PERF_TYPE_MAX) 4568 return -EINVAL; 4569 4570 - if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) 4571 return -EINVAL; 4572 4573 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
··· 782 783 add_event_to_ctx(event, ctx); 784 785 + if (event->cpu != -1 && event->cpu != smp_processor_id()) 786 + goto unlock; 787 + 788 /* 789 * Don't put the event on if it is disabled or if 790 * it is in a group and the group isn't on. ··· 924 if (event->state >= PERF_EVENT_STATE_INACTIVE) 925 goto unlock; 926 __perf_event_mark_enabled(event, ctx); 927 + 928 + if (event->cpu != -1 && event->cpu != smp_processor_id()) 929 + goto unlock; 930 931 /* 932 * If the event is in a group and isn't the group leader, ··· 1595 unsigned long flags; 1596 int err; 1597 1598 + if (pid == -1 && cpu != -1) { 1599 /* Must be root to operate on a CPU event: */ 1600 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1601 return ERR_PTR(-EACCES); 1602 1603 + if (cpu < 0 || cpu >= nr_cpumask_bits) 1604 return ERR_PTR(-EINVAL); 1605 1606 /* ··· 4564 if (attr->type >= PERF_TYPE_MAX) 4565 return -EINVAL; 4566 4567 + if (attr->__reserved_1 || attr->__reserved_2) 4568 return -EINVAL; 4569 4570 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
+55
tools/perf/Documentation/perf-diff.txt
···
··· 1 + perf-diff(1) 2 + ============== 3 + 4 + NAME 5 + ---- 6 + perf-diff - Read two perf.data files and display the differential profile 7 + 8 + SYNOPSIS 9 + -------- 10 + [verse] 11 + 'perf diff' [oldfile] [newfile] 12 + 13 + DESCRIPTION 14 + ----------- 15 + This command displays the performance difference amongst two perf.data files 16 + captured via perf record. 17 + 18 + If no parameters are passed it will assume perf.data.old and perf.data. 19 + 20 + OPTIONS 21 + ------- 22 + -d:: 23 + --dsos=:: 24 + Only consider symbols in these dsos. CSV that understands 25 + file://filename entries. 26 + 27 + -C:: 28 + --comms=:: 29 + Only consider symbols in these comms. CSV that understands 30 + file://filename entries. 31 + 32 + -S:: 33 + --symbols=:: 34 + Only consider these symbols. CSV that understands 35 + file://filename entries. 36 + 37 + -s:: 38 + --sort=:: 39 + Sort by key(s): pid, comm, dso, symbol. 40 + 41 + -t:: 42 + --field-separator=:: 43 + 44 + Use a special separator character and don't pad with spaces, replacing 45 + all occurances of this separator in symbol names (and other output) 46 + with a '.' character, that thus it's the only non valid separator. 47 + 48 + -v:: 49 + --verbose:: 50 + Be verbose, for instance, show the raw counts in addition to the 51 + diff. 52 + 53 + SEE ALSO 54 + -------- 55 + linkperf:perf-record[1]
+2 -1
tools/perf/Documentation/perf-probe.txt
··· 49 ------------ 50 Probe points are defined by following syntax. 51 52 - "FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]" 53 54 'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, 'RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. In addition, 'SRC' specifies a source file which has that function. 55 It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number. 56 'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
··· 49 ------------ 50 Probe points are defined by following syntax. 51 52 + "[EVENT=]FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]" 53 54 + 'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'. 55 'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, 'RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. In addition, 'SRC' specifies a source file which has that function. 56 It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number. 57 'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
+4
tools/perf/Documentation/perf-report.txt
··· 39 Only consider these symbols. CSV that understands 40 file://filename entries. 41 42 -w:: 43 --field-width=:: 44 Force each column width to the provided list, for large terminal
··· 39 Only consider these symbols. CSV that understands 40 file://filename entries. 41 42 + -s:: 43 + --sort=:: 44 + Sort by key(s): pid, comm, dso, symbol, parent. 45 + 46 -w:: 47 --field-width=:: 48 Force each column width to the provided list, for large terminal
+26 -1
tools/perf/Documentation/perf-trace.txt
··· 8 SYNOPSIS 9 -------- 10 [verse] 11 - 'perf trace' [-i <file> | --input=file] symbol_name 12 13 DESCRIPTION 14 ----------- 15 This command reads the input file and displays the trace recorded. 16 17 OPTIONS 18 ------- 19 -D:: 20 --dump-raw-trace=:: 21 Display verbose dump of the trace data. 22 23 -s:: 24 --script=::
··· 8 SYNOPSIS 9 -------- 10 [verse] 11 + 'perf trace' {record <script> | report <script> [args] } 12 13 DESCRIPTION 14 ----------- 15 This command reads the input file and displays the trace recorded. 16 + 17 + There are several variants of perf trace: 18 + 19 + 'perf trace' to see a detailed trace of the workload that was 20 + recorded. 21 + 22 + 'perf trace record <script>' to record the events required for 'perf 23 + trace report'. <script> is the name displayed in the output of 24 + 'perf trace --list' i.e. the actual script name minus any language 25 + extension. 26 + 27 + 'perf trace report <script>' to run and display the results of 28 + <script>. <script> is the name displayed in the output of 'perf 29 + trace --list' i.e. the actual script name minus any language 30 + extension. The perf.data output from a previous run of 'perf trace 31 + record <script>' is used and should be present for this command to 32 + succeed. 33 34 OPTIONS 35 ------- 36 -D:: 37 --dump-raw-trace=:: 38 Display verbose dump of the trace data. 39 + 40 + -L:: 41 + --Latency=:: 42 + Show latency attributes (irqs/preemption disabled, etc). 43 + 44 + -l:: 45 + --list=:: 46 + Display a list of available trace scripts. 47 48 -s:: 49 --script=::
+1 -3
tools/perf/Makefile
··· 370 LIB_H += util/sort.h 371 LIB_H += util/hist.h 372 LIB_H += util/thread.h 373 - LIB_H += util/data_map.h 374 LIB_H += util/probe-finder.h 375 LIB_H += util/probe-event.h 376 ··· 427 BUILTIN_OBJS += bench/sched-pipe.o 428 BUILTIN_OBJS += bench/mem-memcpy.o 429 430 BUILTIN_OBJS += builtin-help.o 431 BUILTIN_OBJS += builtin-sched.o 432 BUILTIN_OBJS += builtin-buildid-list.o ··· 996 $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 997 $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' 998 $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' 999 - $(INSTALL) scripts/perl/Perf-Trace-Util/Makefile.PL -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util' 1000 - $(INSTALL) scripts/perl/Perf-Trace-Util/README -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util' 1001 ifdef BUILT_INS 1002 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 1003 $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
··· 370 LIB_H += util/sort.h 371 LIB_H += util/hist.h 372 LIB_H += util/thread.h 373 LIB_H += util/probe-finder.h 374 LIB_H += util/probe-event.h 375 ··· 428 BUILTIN_OBJS += bench/sched-pipe.o 429 BUILTIN_OBJS += bench/mem-memcpy.o 430 431 + BUILTIN_OBJS += builtin-diff.o 432 BUILTIN_OBJS += builtin-help.o 433 BUILTIN_OBJS += builtin-sched.o 434 BUILTIN_OBJS += builtin-buildid-list.o ··· 996 $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 997 $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' 998 $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' 999 ifdef BUILT_INS 1000 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 1001 $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+24 -44
tools/perf/builtin-annotate.c
··· 26 #include "util/sort.h" 27 #include "util/hist.h" 28 #include "util/session.h" 29 - #include "util/data_map.h" 30 31 static char const *input_name = "perf.data"; 32 ··· 49 struct sym_priv { 50 struct sym_hist *hist; 51 struct sym_ext *ext; 52 - }; 53 - 54 - static struct symbol_conf symbol_conf = { 55 - .priv_size = sizeof(struct sym_priv), 56 - .try_vmlinux_path = true, 57 }; 58 59 static const char *sym_hist_filter; ··· 116 h->ip[offset]); 117 } 118 119 - static int hist_entry__add(struct addr_location *al, u64 count) 120 { 121 bool hit; 122 - struct hist_entry *he = __hist_entry__add(al, NULL, count, &hit); 123 if (he == NULL) 124 return -ENOMEM; 125 hist_hit(he, al->addr); 126 return 0; 127 } 128 129 - static int process_sample_event(event_t *event) 130 { 131 struct addr_location al; 132 133 dump_printf("(IP, %d): %d: %p\n", event->header.misc, 134 event->ip.pid, (void *)(long)event->ip.ip); 135 136 - if (event__preprocess_sample(event, &al, symbol_filter) < 0) { 137 fprintf(stderr, "problem processing %d event, skipping it.\n", 138 event->header.type); 139 return -1; 140 } 141 142 - if (hist_entry__add(&al, 1)) { 143 fprintf(stderr, "problem incrementing symbol count, " 144 "skipping event\n"); 145 return -1; ··· 425 free_source_line(he, len); 426 } 427 428 - static void find_annotations(void) 429 { 430 struct rb_node *nd; 431 432 - for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { 433 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); 434 struct sym_priv *priv; 435 ··· 450 } 451 } 452 453 - static struct perf_file_handler file_handler = { 454 .process_sample_event = process_sample_event, 455 .process_mmap_event = event__process_mmap, 456 .process_comm_event = event__process_comm, ··· 459 460 static int __cmd_annotate(void) 461 { 462 - struct perf_session *session = perf_session__new(input_name, O_RDONLY, force); 463 - struct thread *idle; 464 int ret; 465 466 if (session == NULL) 467 return -ENOMEM; 468 469 - idle = register_idle_thread(); 470 - register_perf_file_handler(&file_handler); 471 - 472 - ret = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 473 if (ret) 474 goto out_delete; 475 ··· 476 } 477 478 if (verbose > 3) 479 - threads__fprintf(stdout); 480 481 if (verbose > 2) 482 dsos__fprintf(stdout); 483 484 - collapse__resort(); 485 - output__resort(event__total[0]); 486 - 487 - find_annotations(); 488 out_delete: 489 perf_session__delete(session); 490 ··· 516 OPT_END() 517 }; 518 519 - static void setup_sorting(void) 520 - { 521 - char *tmp, *tok, *str = strdup(sort_order); 522 - 523 - for (tok = strtok_r(str, ", ", &tmp); 524 - tok; tok = strtok_r(NULL, ", ", &tmp)) { 525 - if (sort_dimension__add(tok) < 0) { 526 - error("Unknown --sort key: `%s'", tok); 527 - usage_with_options(annotate_usage, options); 528 - } 529 - } 530 - 531 - free(str); 532 - } 533 - 534 int cmd_annotate(int argc, const char **argv, const char *prefix __used) 535 { 536 - if (symbol__init(&symbol_conf) < 0) 537 - return -1; 538 - 539 argc = parse_options(argc, argv, options, annotate_usage, 0); 540 541 - setup_sorting(); 542 543 if (argc) { 544 /*
··· 26 #include "util/sort.h" 27 #include "util/hist.h" 28 #include "util/session.h" 29 30 static char const *input_name = "perf.data"; 31 ··· 50 struct sym_priv { 51 struct sym_hist *hist; 52 struct sym_ext *ext; 53 }; 54 55 static const char *sym_hist_filter; ··· 122 h->ip[offset]); 123 } 124 125 + static int perf_session__add_hist_entry(struct perf_session *self, 126 + struct addr_location *al, u64 count) 127 { 128 bool hit; 129 + struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, 130 + count, &hit); 131 if (he == NULL) 132 return -ENOMEM; 133 hist_hit(he, al->addr); 134 return 0; 135 } 136 137 + static int process_sample_event(event_t *event, struct perf_session *session) 138 { 139 struct addr_location al; 140 141 dump_printf("(IP, %d): %d: %p\n", event->header.misc, 142 event->ip.pid, (void *)(long)event->ip.ip); 143 144 + if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) { 145 fprintf(stderr, "problem processing %d event, skipping it.\n", 146 event->header.type); 147 return -1; 148 } 149 150 + if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) { 151 fprintf(stderr, "problem incrementing symbol count, " 152 "skipping event\n"); 153 return -1; ··· 429 free_source_line(he, len); 430 } 431 432 + static void perf_session__find_annotations(struct perf_session *self) 433 { 434 struct rb_node *nd; 435 436 + for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) { 437 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); 438 struct sym_priv *priv; 439 ··· 454 } 455 } 456 457 + static struct perf_event_ops event_ops = { 458 .process_sample_event = process_sample_event, 459 .process_mmap_event = event__process_mmap, 460 .process_comm_event = event__process_comm, ··· 463 464 static int __cmd_annotate(void) 465 { 466 int ret; 467 + struct perf_session *session; 468 469 + session = perf_session__new(input_name, O_RDONLY, force); 470 if (session == NULL) 471 return -ENOMEM; 472 473 + ret = perf_session__process_events(session, &event_ops); 474 if (ret) 475 goto out_delete; 476 ··· 483 } 484 485 if (verbose > 3) 486 + perf_session__fprintf(session, stdout); 487 488 if (verbose > 2) 489 dsos__fprintf(stdout); 490 491 + perf_session__collapse_resort(session); 492 + perf_session__output_resort(session, session->event_total[0]); 493 + perf_session__find_annotations(session); 494 out_delete: 495 perf_session__delete(session); 496 ··· 524 OPT_END() 525 }; 526 527 int cmd_annotate(int argc, const char **argv, const char *prefix __used) 528 { 529 argc = parse_options(argc, argv, options, annotate_usage, 0); 530 531 + symbol_conf.priv_size = sizeof(struct sym_priv); 532 + symbol_conf.try_vmlinux_path = true; 533 + 534 + if (symbol__init() < 0) 535 + return -1; 536 + 537 + setup_sorting(annotate_usage, options); 538 539 if (argc) { 540 /*
+2 -2
tools/perf/builtin-buildid-list.c
··· 9 #include "builtin.h" 10 #include "perf.h" 11 #include "util/cache.h" 12 - #include "util/data_map.h" 13 #include "util/debug.h" 14 #include "util/parse-options.h" 15 #include "util/session.h" ··· 54 static int __cmd_buildid_list(void) 55 { 56 int err = -1; 57 - struct perf_session *session = perf_session__new(input_name, O_RDONLY, force); 58 59 if (session == NULL) 60 return -1; 61
··· 9 #include "builtin.h" 10 #include "perf.h" 11 #include "util/cache.h" 12 #include "util/debug.h" 13 #include "util/parse-options.h" 14 #include "util/session.h" ··· 55 static int __cmd_buildid_list(void) 56 { 57 int err = -1; 58 + struct perf_session *session; 59 60 + session = perf_session__new(input_name, O_RDONLY, force); 61 if (session == NULL) 62 return -1; 63
+248
tools/perf/builtin-diff.c
···
··· 1 + /* 2 + * builtin-diff.c 3 + * 4 + * Builtin diff command: Analyze two perf.data input files, look up and read 5 + * DSOs and symbol information, sort them and produce a diff. 6 + */ 7 + #include "builtin.h" 8 + 9 + #include "util/debug.h" 10 + #include "util/event.h" 11 + #include "util/hist.h" 12 + #include "util/session.h" 13 + #include "util/sort.h" 14 + #include "util/symbol.h" 15 + #include "util/util.h" 16 + 17 + #include <stdlib.h> 18 + 19 + static char const *input_old = "perf.data.old", 20 + *input_new = "perf.data"; 21 + static char diff__default_sort_order[] = "dso,symbol"; 22 + static int force; 23 + static bool show_displacement; 24 + 25 + static int perf_session__add_hist_entry(struct perf_session *self, 26 + struct addr_location *al, u64 count) 27 + { 28 + bool hit; 29 + struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, 30 + count, &hit); 31 + if (he == NULL) 32 + return -ENOMEM; 33 + 34 + if (hit) 35 + he->count += count; 36 + 37 + return 0; 38 + } 39 + 40 + static int diff__process_sample_event(event_t *event, struct perf_session *session) 41 + { 42 + struct addr_location al; 43 + struct sample_data data = { .period = 1, }; 44 + 45 + dump_printf("(IP, %d): %d: %p\n", event->header.misc, 46 + event->ip.pid, (void *)(long)event->ip.ip); 47 + 48 + if (event__preprocess_sample(event, session, &al, NULL) < 0) { 49 + pr_warning("problem processing %d event, skipping it.\n", 50 + event->header.type); 51 + return -1; 52 + } 53 + 54 + if (al.filtered) 55 + return 0; 56 + 57 + event__parse_sample(event, session->sample_type, &data); 58 + 59 + if (al.sym && perf_session__add_hist_entry(session, &al, data.period)) { 60 + pr_warning("problem incrementing symbol count, skipping event\n"); 61 + return -1; 62 + } 63 + 64 + session->events_stats.total += data.period; 65 + return 0; 66 + } 67 + 68 + static struct perf_event_ops event_ops = { 69 + .process_sample_event = diff__process_sample_event, 70 + .process_mmap_event = event__process_mmap, 71 + .process_comm_event = event__process_comm, 72 + .process_exit_event = event__process_task, 73 + .process_fork_event = event__process_task, 74 + .process_lost_event = event__process_lost, 75 + }; 76 + 77 + static void perf_session__insert_hist_entry_by_name(struct rb_root *root, 78 + struct hist_entry *he) 79 + { 80 + struct rb_node **p = &root->rb_node; 81 + struct rb_node *parent = NULL; 82 + struct hist_entry *iter; 83 + 84 + while (*p != NULL) { 85 + int cmp; 86 + parent = *p; 87 + iter = rb_entry(parent, struct hist_entry, rb_node); 88 + 89 + cmp = strcmp(he->map->dso->name, iter->map->dso->name); 90 + if (cmp > 0) 91 + p = &(*p)->rb_left; 92 + else if (cmp < 0) 93 + p = &(*p)->rb_right; 94 + else { 95 + cmp = strcmp(he->sym->name, iter->sym->name); 96 + if (cmp > 0) 97 + p = &(*p)->rb_left; 98 + else 99 + p = &(*p)->rb_right; 100 + } 101 + } 102 + 103 + rb_link_node(&he->rb_node, parent, p); 104 + rb_insert_color(&he->rb_node, root); 105 + } 106 + 107 + static void perf_session__resort_by_name(struct perf_session *self) 108 + { 109 + unsigned long position = 1; 110 + struct rb_root tmp = RB_ROOT; 111 + struct rb_node *next = rb_first(&self->hists); 112 + 113 + while (next != NULL) { 114 + struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node); 115 + 116 + next = rb_next(&n->rb_node); 117 + rb_erase(&n->rb_node, &self->hists); 118 + n->position = position++; 119 + perf_session__insert_hist_entry_by_name(&tmp, n); 120 + } 121 + 122 + self->hists = tmp; 123 + } 124 + 125 + static struct hist_entry * 126 + perf_session__find_hist_entry_by_name(struct perf_session *self, 127 + struct hist_entry *he) 128 + { 129 + struct rb_node *n = self->hists.rb_node; 130 + 131 + while (n) { 132 + struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 133 + int cmp = strcmp(he->map->dso->name, iter->map->dso->name); 134 + 135 + if (cmp > 0) 136 + n = n->rb_left; 137 + else if (cmp < 0) 138 + n = n->rb_right; 139 + else { 140 + cmp = strcmp(he->sym->name, iter->sym->name); 141 + if (cmp > 0) 142 + n = n->rb_left; 143 + else if (cmp < 0) 144 + n = n->rb_right; 145 + else 146 + return iter; 147 + } 148 + } 149 + 150 + return NULL; 151 + } 152 + 153 + static void perf_session__match_hists(struct perf_session *old_session, 154 + struct perf_session *new_session) 155 + { 156 + struct rb_node *nd; 157 + 158 + perf_session__resort_by_name(old_session); 159 + 160 + for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) { 161 + struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node); 162 + pos->pair = perf_session__find_hist_entry_by_name(old_session, pos); 163 + } 164 + } 165 + 166 + static int __cmd_diff(void) 167 + { 168 + int ret, i; 169 + struct perf_session *session[2]; 170 + 171 + session[0] = perf_session__new(input_old, O_RDONLY, force); 172 + session[1] = perf_session__new(input_new, O_RDONLY, force); 173 + if (session[0] == NULL || session[1] == NULL) 174 + return -ENOMEM; 175 + 176 + for (i = 0; i < 2; ++i) { 177 + ret = perf_session__process_events(session[i], &event_ops); 178 + if (ret) 179 + goto out_delete; 180 + perf_session__output_resort(session[i], session[i]->events_stats.total); 181 + } 182 + 183 + perf_session__match_hists(session[0], session[1]); 184 + perf_session__fprintf_hists(session[1], session[0], 185 + show_displacement, stdout); 186 + out_delete: 187 + for (i = 0; i < 2; ++i) 188 + perf_session__delete(session[i]); 189 + return ret; 190 + } 191 + 192 + static const char *const diff_usage[] = { 193 + "perf diff [<options>] [old_file] [new_file]", 194 + }; 195 + 196 + static const struct option options[] = { 197 + OPT_BOOLEAN('v', "verbose", &verbose, 198 + "be more verbose (show symbol address, etc)"), 199 + OPT_BOOLEAN('m', "displacement", &show_displacement, 200 + "Show position displacement relative to baseline"), 201 + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 202 + "dump raw trace in ASCII"), 203 + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 204 + OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 205 + "load module symbols - WARNING: use only with -k and LIVE kernel"), 206 + OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths, 207 + "Don't shorten the pathnames taking into account the cwd"), 208 + OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 209 + "only consider symbols in these dsos"), 210 + OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", 211 + "only consider symbols in these comms"), 212 + OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 213 + "only consider these symbols"), 214 + OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 215 + "sort by key(s): pid, comm, dso, symbol, parent"), 216 + OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", 217 + "separator for columns, no spaces will be added between " 218 + "columns '.' is reserved."), 219 + OPT_END() 220 + }; 221 + 222 + int cmd_diff(int argc, const char **argv, const char *prefix __used) 223 + { 224 + sort_order = diff__default_sort_order; 225 + argc = parse_options(argc, argv, options, diff_usage, 0); 226 + if (argc) { 227 + if (argc > 2) 228 + usage_with_options(diff_usage, options); 229 + if (argc == 2) { 230 + input_old = argv[0]; 231 + input_new = argv[1]; 232 + } else 233 + input_new = argv[0]; 234 + } 235 + 236 + symbol_conf.exclude_other = false; 237 + if (symbol__init() < 0) 238 + return -1; 239 + 240 + setup_sorting(diff_usage, options); 241 + setup_pager(); 242 + 243 + sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL); 244 + sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", NULL); 245 + sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", NULL); 246 + 247 + return __cmd_diff(); 248 + }
+27 -39
tools/perf/builtin-kmem.c
··· 12 #include "util/trace-event.h" 13 14 #include "util/debug.h" 15 - #include "util/data_map.h" 16 17 #include <linux/rbtree.h> 18 ··· 19 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); 20 21 static char const *input_name = "perf.data"; 22 - 23 - static u64 sample_type; 24 25 static int alloc_flag; 26 static int caller_flag; ··· 309 } 310 } 311 312 - static int process_sample_event(event_t *event) 313 { 314 struct sample_data data; 315 struct thread *thread; ··· 319 data.cpu = -1; 320 data.period = 1; 321 322 - event__parse_sample(event, sample_type, &data); 323 324 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 325 event->header.misc, ··· 327 (void *)(long)data.ip, 328 (long long)data.period); 329 330 - thread = threads__findnew(event->ip.pid); 331 if (thread == NULL) { 332 pr_debug("problem processing %d event, skipping it.\n", 333 event->header.type); ··· 342 return 0; 343 } 344 345 - static int sample_type_check(u64 type) 346 { 347 - sample_type = type; 348 - 349 - if (!(sample_type & PERF_SAMPLE_RAW)) { 350 fprintf(stderr, 351 "No trace sample to read. Did you call perf record " 352 "without -R?"); ··· 354 return 0; 355 } 356 357 - static struct perf_file_handler file_handler = { 358 .process_sample_event = process_sample_event, 359 .process_comm_event = event__process_comm, 360 .sample_type_check = sample_type_check, 361 }; 362 - 363 - static int read_events(void) 364 - { 365 - int err; 366 - struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 367 - 368 - if (session == NULL) 369 - return -ENOMEM; 370 - 371 - register_idle_thread(); 372 - register_perf_file_handler(&file_handler); 373 - 374 - err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 375 - perf_session__delete(session); 376 - return err; 377 - } 378 379 static double fragmentation(unsigned long n_req, unsigned long n_alloc) 380 { ··· 368 return 100.0 - (100.0 * n_req / n_alloc); 369 } 370 371 - static void __print_result(struct rb_root *root, int n_lines, int is_caller) 372 { 373 struct rb_node *next; 374 ··· 390 if (is_caller) { 391 addr = data->call_site; 392 if (!raw_ip) 393 - sym = map_groups__find_function(kmaps, addr, NULL); 394 } else 395 addr = data->ptr; 396 ··· 431 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs); 432 } 433 434 - static void print_result(void) 435 { 436 if (caller_flag) 437 - __print_result(&root_caller_sorted, caller_lines, 1); 438 if (alloc_flag) 439 - __print_result(&root_alloc_sorted, alloc_lines, 0); 440 print_summary(); 441 } 442 ··· 504 505 static int __cmd_kmem(void) 506 { 507 - setup_pager(); 508 - read_events(); 509 - sort_result(); 510 - print_result(); 511 512 - return 0; 513 } 514 515 static const char * const kmem_usage[] = { ··· 766 767 int cmd_kmem(int argc, const char **argv, const char *prefix __used) 768 { 769 - symbol__init(0); 770 - 771 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); 772 773 if (!argc) 774 usage_with_options(kmem_usage, kmem_options); 775 776 if (!strncmp(argv[0], "rec", 3)) { 777 return __cmd_record(argc, argv);
··· 12 #include "util/trace-event.h" 13 14 #include "util/debug.h" 15 16 #include <linux/rbtree.h> 17 ··· 20 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); 21 22 static char const *input_name = "perf.data"; 23 24 static int alloc_flag; 25 static int caller_flag; ··· 312 } 313 } 314 315 + static int process_sample_event(event_t *event, struct perf_session *session) 316 { 317 struct sample_data data; 318 struct thread *thread; ··· 322 data.cpu = -1; 323 data.period = 1; 324 325 + event__parse_sample(event, session->sample_type, &data); 326 327 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 328 event->header.misc, ··· 330 (void *)(long)data.ip, 331 (long long)data.period); 332 333 + thread = perf_session__findnew(session, event->ip.pid); 334 if (thread == NULL) { 335 pr_debug("problem processing %d event, skipping it.\n", 336 event->header.type); ··· 345 return 0; 346 } 347 348 + static int sample_type_check(struct perf_session *session) 349 { 350 + if (!(session->sample_type & PERF_SAMPLE_RAW)) { 351 fprintf(stderr, 352 "No trace sample to read. Did you call perf record " 353 "without -R?"); ··· 359 return 0; 360 } 361 362 + static struct perf_event_ops event_ops = { 363 .process_sample_event = process_sample_event, 364 .process_comm_event = event__process_comm, 365 .sample_type_check = sample_type_check, 366 }; 367 368 static double fragmentation(unsigned long n_req, unsigned long n_alloc) 369 { ··· 389 return 100.0 - (100.0 * n_req / n_alloc); 390 } 391 392 + static void __print_result(struct rb_root *root, struct perf_session *session, 393 + int n_lines, int is_caller) 394 { 395 struct rb_node *next; 396 ··· 410 if (is_caller) { 411 addr = data->call_site; 412 if (!raw_ip) 413 + sym = map_groups__find_function(&session->kmaps, session, addr, NULL); 414 } else 415 addr = data->ptr; 416 ··· 451 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs); 452 } 453 454 + static void print_result(struct perf_session *session) 455 { 456 if (caller_flag) 457 + __print_result(&root_caller_sorted, session, caller_lines, 1); 458 if (alloc_flag) 459 + __print_result(&root_alloc_sorted, session, alloc_lines, 0); 460 print_summary(); 461 } 462 ··· 524 525 static int __cmd_kmem(void) 526 { 527 + int err; 528 + struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 529 + if (session == NULL) 530 + return -ENOMEM; 531 532 + setup_pager(); 533 + err = perf_session__process_events(session, &event_ops); 534 + if (err != 0) 535 + goto out_delete; 536 + sort_result(); 537 + print_result(session); 538 + out_delete: 539 + perf_session__delete(session); 540 + return err; 541 } 542 543 static const char * const kmem_usage[] = { ··· 778 779 int cmd_kmem(int argc, const char **argv, const char *prefix __used) 780 { 781 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); 782 783 if (!argc) 784 usage_with_options(kmem_usage, kmem_options); 785 + 786 + symbol__init(); 787 788 if (!strncmp(argv[0], "rec", 3)) { 789 return __cmd_record(argc, argv);
+78 -63
tools/perf/builtin-probe.c
··· 38 #include "util/strlist.h" 39 #include "util/event.h" 40 #include "util/debug.h" 41 #include "util/parse-options.h" 42 #include "util/parse-events.h" /* For debugfs_path */ 43 #include "util/probe-finder.h" 44 #include "util/probe-event.h" 45 - 46 - /* Default vmlinux search paths */ 47 - #define NR_SEARCH_PATH 4 48 - const char *default_search_path[NR_SEARCH_PATH] = { 49 - "/lib/modules/%s/build/vmlinux", /* Custom build kernel */ 50 - "/usr/lib/debug/lib/modules/%s/vmlinux", /* Red Hat debuginfo */ 51 - "/boot/vmlinux-debug-%s", /* Ubuntu */ 52 - "./vmlinux", /* CWD */ 53 - }; 54 55 #define MAX_PATH_LEN 256 56 #define MAX_PROBES 128 57 58 /* Session management structure */ 59 static struct { 60 - char *vmlinux; 61 - char *release; 62 - int need_dwarf; 63 int nr_probe; 64 struct probe_point probes[MAX_PROBES]; 65 struct strlist *dellist; 66 } session; 67 68 - static bool listing; 69 70 /* Parse an event definition. Note that any error must die. */ 71 static void parse_probe_event(const char *str) ··· 72 die("Too many probes (> %d) are specified.", MAX_PROBES); 73 74 /* Parse perf-probe event into probe_point */ 75 - session.need_dwarf = parse_perf_probe_event(str, pp); 76 77 pr_debug("%d arguments\n", pp->nr_args); 78 } ··· 115 return 0; 116 } 117 118 - #ifndef NO_LIBDWARF 119 - static int open_default_vmlinux(void) 120 { 121 - struct utsname uts; 122 - char fname[MAX_PATH_LEN]; 123 - int fd, ret, i; 124 125 - ret = uname(&uts); 126 - if (ret) { 127 - pr_debug("uname() failed.\n"); 128 - return -errno; 129 } 130 - session.release = uts.release; 131 - for (i = 0; i < NR_SEARCH_PATH; i++) { 132 - ret = snprintf(fname, MAX_PATH_LEN, 133 - default_search_path[i], session.release); 134 - if (ret >= MAX_PATH_LEN || ret < 0) { 135 - pr_debug("Filename(%d,%s) is too long.\n", i, 136 - uts.release); 137 - errno = E2BIG; 138 - return -E2BIG; 139 - } 140 - pr_debug("try to open %s\n", fname); 141 - fd = open(fname, O_RDONLY); 142 - if (fd >= 0) 143 - break; 144 - } 145 - return fd; 146 } 147 #endif 148 ··· 150 OPT_BOOLEAN('v', "verbose", &verbose, 151 "be more verbose (show parsed arguments, etc)"), 152 #ifndef NO_LIBDWARF 153 - OPT_STRING('k', "vmlinux", &session.vmlinux, "file", 154 - "vmlinux/module pathname"), 155 #endif 156 - OPT_BOOLEAN('l', "list", &listing, "list up current probe events"), 157 OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", 158 opt_del_probe_event), 159 OPT_CALLBACK('a', "add", NULL, 160 #ifdef NO_LIBDWARF 161 - "FUNC[+OFFS|%return] [ARG ...]", 162 #else 163 - "FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]", 164 #endif 165 "probe point definition, where\n" 166 - "\t\tGRP:\tGroup name (optional)\n" 167 - "\t\tNAME:\tEvent name\n" 168 "\t\tFUNC:\tFunction name\n" 169 "\t\tOFFS:\tOffset from function entry (in byte)\n" 170 "\t\t%return:\tPut the probe at function return\n" ··· 179 #endif 180 "\t\t\tkprobe-tracer argument format.)\n", 181 opt_add_probe_event), 182 OPT_END() 183 }; 184 ··· 194 195 argc = parse_options(argc, argv, options, probe_usage, 196 PARSE_OPT_STOP_AT_NON_OPTION); 197 - if (argc > 0) 198 parse_probe_event_argv(argc, argv); 199 200 - if ((session.nr_probe == 0 && !session.dellist && !listing)) 201 usage_with_options(probe_usage, options); 202 203 - if (listing) { 204 if (session.nr_probe != 0 || session.dellist) { 205 pr_warning(" Error: Don't use --list with" 206 " --add/--del.\n"); ··· 222 return 0; 223 } 224 225 if (session.need_dwarf) 226 #ifdef NO_LIBDWARF 227 die("Debuginfo-analysis is not supported"); 228 #else /* !NO_LIBDWARF */ 229 pr_debug("Some probes require debuginfo.\n"); 230 231 - if (session.vmlinux) { 232 - pr_debug("Try to open %s.", session.vmlinux); 233 - fd = open(session.vmlinux, O_RDONLY); 234 - } else 235 - fd = open_default_vmlinux(); 236 if (fd < 0) { 237 if (session.need_dwarf) 238 die("Could not open debuginfo file."); ··· 261 262 lseek(fd, SEEK_SET, 0); 263 ret = find_probepoint(fd, pp); 264 - if (ret < 0) { 265 - if (session.need_dwarf) 266 - die("Could not analyze debuginfo."); 267 - 268 - pr_warning("An error occurred in debuginfo analysis. Try to use symbols.\n"); 269 - break; 270 } 271 - if (ret == 0) /* No error but failed to find probe point. */ 272 - die("No probe point found."); 273 } 274 close(fd); 275 ··· 289 if (pp->found) /* This probe is already found. */ 290 continue; 291 292 ret = synthesize_trace_kprobe_event(pp); 293 if (ret == -E2BIG) 294 die("probe point definition becomes too long."); ··· 298 } 299 300 /* Settng up probe points */ 301 - add_trace_kprobe_events(session.probes, session.nr_probe); 302 return 0; 303 } 304
··· 38 #include "util/strlist.h" 39 #include "util/event.h" 40 #include "util/debug.h" 41 + #include "util/symbol.h" 42 + #include "util/thread.h" 43 + #include "util/session.h" 44 #include "util/parse-options.h" 45 #include "util/parse-events.h" /* For debugfs_path */ 46 #include "util/probe-finder.h" 47 #include "util/probe-event.h" 48 49 #define MAX_PATH_LEN 256 50 #define MAX_PROBES 128 51 52 /* Session management structure */ 53 static struct { 54 + bool need_dwarf; 55 + bool list_events; 56 + bool force_add; 57 int nr_probe; 58 struct probe_point probes[MAX_PROBES]; 59 struct strlist *dellist; 60 + struct perf_session *psession; 61 + struct map *kmap; 62 } session; 63 64 65 /* Parse an event definition. Note that any error must die. */ 66 static void parse_probe_event(const char *str) ··· 77 die("Too many probes (> %d) are specified.", MAX_PROBES); 78 79 /* Parse perf-probe event into probe_point */ 80 + parse_perf_probe_event(str, pp, &session.need_dwarf); 81 82 pr_debug("%d arguments\n", pp->nr_args); 83 } ··· 120 return 0; 121 } 122 123 + /* Currently just checking function name from symbol map */ 124 + static void evaluate_probe_point(struct probe_point *pp) 125 { 126 + struct symbol *sym; 127 + sym = map__find_symbol_by_name(session.kmap, pp->function, 128 + session.psession, NULL); 129 + if (!sym) 130 + die("Kernel symbol \'%s\' not found - probe not added.", 131 + pp->function); 132 + } 133 134 + #ifndef NO_LIBDWARF 135 + static int open_vmlinux(void) 136 + { 137 + if (map__load(session.kmap, session.psession, NULL) < 0) { 138 + pr_debug("Failed to load kernel map.\n"); 139 + return -EINVAL; 140 } 141 + pr_debug("Try to open %s\n", session.kmap->dso->long_name); 142 + return open(session.kmap->dso->long_name, O_RDONLY); 143 } 144 #endif 145 ··· 163 OPT_BOOLEAN('v', "verbose", &verbose, 164 "be more verbose (show parsed arguments, etc)"), 165 #ifndef NO_LIBDWARF 166 + OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 167 + "file", "vmlinux pathname"), 168 #endif 169 + OPT_BOOLEAN('l', "list", &session.list_events, 170 + "list up current probe events"), 171 OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", 172 opt_del_probe_event), 173 OPT_CALLBACK('a', "add", NULL, 174 #ifdef NO_LIBDWARF 175 + "[EVENT=]FUNC[+OFFS|%return] [ARG ...]", 176 #else 177 + "[EVENT=]FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]", 178 #endif 179 "probe point definition, where\n" 180 + "\t\tGROUP:\tGroup name (optional)\n" 181 + "\t\tEVENT:\tEvent name\n" 182 "\t\tFUNC:\tFunction name\n" 183 "\t\tOFFS:\tOffset from function entry (in byte)\n" 184 "\t\t%return:\tPut the probe at function return\n" ··· 191 #endif 192 "\t\t\tkprobe-tracer argument format.)\n", 193 opt_add_probe_event), 194 + OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events" 195 + " with existing name"), 196 OPT_END() 197 }; 198 ··· 204 205 argc = parse_options(argc, argv, options, probe_usage, 206 PARSE_OPT_STOP_AT_NON_OPTION); 207 + if (argc > 0) { 208 + if (strcmp(argv[0], "-") == 0) { 209 + pr_warning(" Error: '-' is not supported.\n"); 210 + usage_with_options(probe_usage, options); 211 + } 212 parse_probe_event_argv(argc, argv); 213 + } 214 215 + if ((!session.nr_probe && !session.dellist && !session.list_events)) 216 usage_with_options(probe_usage, options); 217 218 + if (session.list_events) { 219 if (session.nr_probe != 0 || session.dellist) { 220 pr_warning(" Error: Don't use --list with" 221 " --add/--del.\n"); ··· 227 return 0; 228 } 229 230 + /* Initialize symbol maps for vmlinux */ 231 + symbol_conf.sort_by_name = true; 232 + if (symbol_conf.vmlinux_name == NULL) 233 + symbol_conf.try_vmlinux_path = true; 234 + if (symbol__init() < 0) 235 + die("Failed to init symbol map."); 236 + session.psession = perf_session__new(NULL, O_WRONLY, false); 237 + if (session.psession == NULL) 238 + die("Failed to init perf_session."); 239 + session.kmap = map_groups__find_by_name(&session.psession->kmaps, 240 + MAP__FUNCTION, 241 + "[kernel.kallsyms]"); 242 + if (!session.kmap) 243 + die("Could not find kernel map.\n"); 244 + 245 if (session.need_dwarf) 246 #ifdef NO_LIBDWARF 247 die("Debuginfo-analysis is not supported"); 248 #else /* !NO_LIBDWARF */ 249 pr_debug("Some probes require debuginfo.\n"); 250 251 + fd = open_vmlinux(); 252 if (fd < 0) { 253 if (session.need_dwarf) 254 die("Could not open debuginfo file."); ··· 255 256 lseek(fd, SEEK_SET, 0); 257 ret = find_probepoint(fd, pp); 258 + if (ret > 0) 259 + continue; 260 + if (ret == 0) { /* No error but failed to find probe point. */ 261 + synthesize_perf_probe_point(pp); 262 + die("Probe point '%s' not found. - probe not added.", 263 + pp->probes[0]); 264 } 265 + /* Error path */ 266 + if (session.need_dwarf) { 267 + if (ret == -ENOENT) 268 + pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO=y.\n"); 269 + die("Could not analyze debuginfo."); 270 + } 271 + pr_debug("An error occurred in debuginfo analysis." 272 + " Try to use symbols.\n"); 273 + break; 274 } 275 close(fd); 276 ··· 276 if (pp->found) /* This probe is already found. */ 277 continue; 278 279 + evaluate_probe_point(pp); 280 ret = synthesize_trace_kprobe_event(pp); 281 if (ret == -E2BIG) 282 die("probe point definition becomes too long."); ··· 284 } 285 286 /* Settng up probe points */ 287 + add_trace_kprobe_events(session.probes, session.nr_probe, 288 + session.force_add); 289 return 0; 290 } 291
+94 -49
tools/perf/builtin-record.c
··· 123 write_output(buf, size); 124 } 125 126 - static int process_synthesized_event(event_t *event) 127 { 128 write_event(event, event->header.size); 129 return 0; ··· 278 279 attr->mmap = track; 280 attr->comm = track; 281 - attr->inherit = (cpu < 0) && inherit; 282 attr->disabled = 1; 283 284 try_again: ··· 402 perf_header__write(&session->header, output, true); 403 } 404 405 - static int __cmd_record(int argc, const char **argv) 406 { 407 int i, counter; 408 struct stat st; ··· 410 int flags; 411 int err; 412 unsigned long waking = 0; 413 414 page_size = sysconf(_SC_PAGE_SIZE); 415 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); ··· 422 signal(SIGCHLD, sig_handler); 423 signal(SIGINT, sig_handler); 424 425 if (!stat(output_name, &st) && st.st_size) { 426 - if (!force && !append_file) { 427 - fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n", 428 - output_name); 429 - exit(-1); 430 } 431 } else { 432 append_file = 0; ··· 483 484 atexit(atexit_header); 485 486 - if (!system_wide) { 487 - pid = target_pid; 488 - if (pid == -1) 489 - pid = getpid(); 490 - 491 - open_counters(profile_cpu, pid); 492 - } else { 493 - if (profile_cpu != -1) { 494 - open_counters(profile_cpu, target_pid); 495 - } else { 496 - for (i = 0; i < nr_cpus; i++) 497 - open_counters(i, target_pid); 498 } 499 } 500 501 if (file_new) { ··· 551 } 552 553 if (!system_wide) 554 - event__synthesize_thread(pid, process_synthesized_event); 555 else 556 - event__synthesize_threads(process_synthesized_event); 557 - 558 - if (target_pid == -1 && argc) { 559 - pid = fork(); 560 - if (pid < 0) 561 - die("failed to fork"); 562 - 563 - if (!pid) { 564 - if (execvp(argv[0], (char **)argv)) { 565 - perror(argv[0]); 566 - exit(-1); 567 - } 568 - } else { 569 - /* 570 - * Wait a bit for the execv'ed child to appear 571 - * and be updated in /proc 572 - * FIXME: Do you know a less heuristical solution? 573 - */ 574 - usleep(1000); 575 - event__synthesize_thread(pid, 576 - process_synthesized_event); 577 - } 578 - 579 - child_pid = pid; 580 - } 581 582 if (realtime_prio) { 583 struct sched_param param; ··· 565 exit(-1); 566 } 567 } 568 569 for (;;) { 570 int hits = samples; ··· 665 { 666 int counter; 667 668 - symbol__init(0); 669 - 670 argc = parse_options(argc, argv, options, record_usage, 671 - PARSE_OPT_STOP_AT_NON_OPTION); 672 - if (!argc && target_pid == -1 && !system_wide) 673 usage_with_options(record_usage, options); 674 675 if (!nr_counters) { 676 nr_counters = 1;
··· 123 write_output(buf, size); 124 } 125 126 + static int process_synthesized_event(event_t *event, 127 + struct perf_session *self __used) 128 { 129 write_event(event, event->header.size); 130 return 0; ··· 277 278 attr->mmap = track; 279 attr->comm = track; 280 + attr->inherit = inherit; 281 attr->disabled = 1; 282 283 try_again: ··· 401 perf_header__write(&session->header, output, true); 402 } 403 404 + static int __cmd_record(int argc __used, const char **argv) 405 { 406 int i, counter; 407 struct stat st; ··· 409 int flags; 410 int err; 411 unsigned long waking = 0; 412 + int child_ready_pipe[2], go_pipe[2]; 413 + char buf; 414 415 page_size = sysconf(_SC_PAGE_SIZE); 416 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); ··· 419 signal(SIGCHLD, sig_handler); 420 signal(SIGINT, sig_handler); 421 422 + if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) { 423 + perror("failed to create pipes"); 424 + exit(-1); 425 + } 426 + 427 if (!stat(output_name, &st) && st.st_size) { 428 + if (!force) { 429 + if (!append_file) { 430 + pr_err("Error, output file %s exists, use -A " 431 + "to append or -f to overwrite.\n", 432 + output_name); 433 + exit(-1); 434 + } 435 + } else { 436 + char oldname[PATH_MAX]; 437 + snprintf(oldname, sizeof(oldname), "%s.old", 438 + output_name); 439 + unlink(oldname); 440 + rename(output_name, oldname); 441 } 442 } else { 443 append_file = 0; ··· 466 467 atexit(atexit_header); 468 469 + if (target_pid == -1) { 470 + pid = fork(); 471 + if (pid < 0) { 472 + perror("failed to fork"); 473 + exit(-1); 474 } 475 + 476 + if (!pid) { 477 + close(child_ready_pipe[0]); 478 + close(go_pipe[1]); 479 + fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 480 + 481 + /* 482 + * Do a dummy execvp to get the PLT entry resolved, 483 + * so we avoid the resolver overhead on the real 484 + * execvp call. 485 + */ 486 + execvp("", (char **)argv); 487 + 488 + /* 489 + * Tell the parent we're ready to go 490 + */ 491 + close(child_ready_pipe[1]); 492 + 493 + /* 494 + * Wait until the parent tells us to go. 495 + */ 496 + if (read(go_pipe[0], &buf, 1) == -1) 497 + perror("unable to read pipe"); 498 + 499 + execvp(argv[0], (char **)argv); 500 + 501 + perror(argv[0]); 502 + exit(-1); 503 + } 504 + 505 + child_pid = pid; 506 + 507 + if (!system_wide) 508 + target_pid = pid; 509 + 510 + close(child_ready_pipe[1]); 511 + close(go_pipe[0]); 512 + /* 513 + * wait for child to settle 514 + */ 515 + if (read(child_ready_pipe[0], &buf, 1) == -1) { 516 + perror("unable to read pipe"); 517 + exit(-1); 518 + } 519 + close(child_ready_pipe[0]); 520 + } 521 + 522 + 523 + if ((!system_wide && !inherit) || profile_cpu != -1) { 524 + open_counters(profile_cpu, target_pid); 525 + } else { 526 + for (i = 0; i < nr_cpus; i++) 527 + open_counters(i, target_pid); 528 } 529 530 if (file_new) { ··· 488 } 489 490 if (!system_wide) 491 + event__synthesize_thread(pid, process_synthesized_event, 492 + session); 493 else 494 + event__synthesize_threads(process_synthesized_event, session); 495 496 if (realtime_prio) { 497 struct sched_param param; ··· 525 exit(-1); 526 } 527 } 528 + 529 + /* 530 + * Let the child rip 531 + */ 532 + close(go_pipe[1]); 533 534 for (;;) { 535 int hits = samples; ··· 620 { 621 int counter; 622 623 argc = parse_options(argc, argv, options, record_usage, 624 + PARSE_OPT_STOP_AT_NON_OPTION); 625 + if (!argc && target_pid == -1 && (!system_wide || profile_cpu == -1)) 626 usage_with_options(record_usage, options); 627 + 628 + symbol__init(); 629 630 if (!nr_counters) { 631 nr_counters = 1;
+60 -665
tools/perf/builtin-report.c
··· 27 #include "util/parse-options.h" 28 #include "util/parse-events.h" 29 30 - #include "util/data_map.h" 31 #include "util/thread.h" 32 #include "util/sort.h" 33 #include "util/hist.h" 34 35 static char const *input_name = "perf.data"; 36 37 - static char *dso_list_str, *comm_list_str, *sym_list_str, 38 - *col_width_list_str; 39 - static struct strlist *dso_list, *comm_list, *sym_list; 40 - 41 static int force; 42 - 43 - static int full_paths; 44 - static int show_nr_samples; 45 46 static int show_threads; 47 static struct perf_read_values show_threads_values; ··· 41 static char default_pretty_printing_style[] = "normal"; 42 static char *pretty_printing_style = default_pretty_printing_style; 43 44 - static int exclude_other = 1; 45 - 46 static char callchain_default_opt[] = "fractal,0.5"; 47 48 - static struct perf_session *session; 49 - 50 - static u64 sample_type; 51 - 52 - struct symbol_conf symbol_conf; 53 - 54 - 55 - static size_t 56 - callchain__fprintf_left_margin(FILE *fp, int left_margin) 57 - { 58 - int i; 59 - int ret; 60 - 61 - ret = fprintf(fp, " "); 62 - 63 - for (i = 0; i < left_margin; i++) 64 - ret += fprintf(fp, " "); 65 - 66 - return ret; 67 - } 68 - 69 - static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 70 - int left_margin) 71 - { 72 - int i; 73 - size_t ret = 0; 74 - 75 - ret += callchain__fprintf_left_margin(fp, left_margin); 76 - 77 - for (i = 0; i < depth; i++) 78 - if (depth_mask & (1 << i)) 79 - ret += fprintf(fp, "| "); 80 - else 81 - ret += fprintf(fp, " "); 82 - 83 - ret += fprintf(fp, "\n"); 84 - 85 - return ret; 86 - } 87 - static size_t 88 - ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, 89 - int depth_mask, int count, u64 total_samples, 90 - int hits, int left_margin) 91 - { 92 - int i; 93 - size_t ret = 0; 94 - 95 - ret += callchain__fprintf_left_margin(fp, left_margin); 96 - for (i = 0; i < depth; i++) { 97 - if (depth_mask & (1 << i)) 98 - ret += fprintf(fp, "|"); 99 - else 100 - ret += fprintf(fp, " "); 101 - if (!count && i == depth - 1) { 102 - double percent; 103 - 104 - percent = hits * 100.0 / total_samples; 105 - ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); 106 - } else 107 - ret += fprintf(fp, "%s", " "); 108 - } 109 - if (chain->sym) 110 - ret += fprintf(fp, "%s\n", chain->sym->name); 111 - else 112 - ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); 113 - 114 - return ret; 115 - } 116 - 117 - static struct symbol *rem_sq_bracket; 118 - static struct callchain_list rem_hits; 119 - 120 - static void init_rem_hits(void) 121 - { 122 - rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); 123 - if (!rem_sq_bracket) { 124 - fprintf(stderr, "Not enough memory to display remaining hits\n"); 125 - return; 126 - } 127 - 128 - strcpy(rem_sq_bracket->name, "[...]"); 129 - rem_hits.sym = rem_sq_bracket; 130 - } 131 - 132 - static size_t 133 - __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 134 - u64 total_samples, int depth, int depth_mask, 135 - int left_margin) 136 - { 137 - struct rb_node *node, *next; 138 - struct callchain_node *child; 139 - struct callchain_list *chain; 140 - int new_depth_mask = depth_mask; 141 - u64 new_total; 142 - u64 remaining; 143 - size_t ret = 0; 144 - int i; 145 - 146 - if (callchain_param.mode == CHAIN_GRAPH_REL) 147 - new_total = self->children_hit; 148 - else 149 - new_total = total_samples; 150 - 151 - remaining = new_total; 152 - 153 - node = rb_first(&self->rb_root); 154 - while (node) { 155 - u64 cumul; 156 - 157 - child = rb_entry(node, struct callchain_node, rb_node); 158 - cumul = cumul_hits(child); 159 - remaining -= cumul; 160 - 161 - /* 162 - * The depth mask manages the output of pipes that show 163 - * the depth. We don't want to keep the pipes of the current 164 - * level for the last child of this depth. 165 - * Except if we have remaining filtered hits. They will 166 - * supersede the last child 167 - */ 168 - next = rb_next(node); 169 - if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) 170 - new_depth_mask &= ~(1 << (depth - 1)); 171 - 172 - /* 173 - * But we keep the older depth mask for the line seperator 174 - * to keep the level link until we reach the last child 175 - */ 176 - ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, 177 - left_margin); 178 - i = 0; 179 - list_for_each_entry(chain, &child->val, list) { 180 - if (chain->ip >= PERF_CONTEXT_MAX) 181 - continue; 182 - ret += ipchain__fprintf_graph(fp, chain, depth, 183 - new_depth_mask, i++, 184 - new_total, 185 - cumul, 186 - left_margin); 187 - } 188 - ret += __callchain__fprintf_graph(fp, child, new_total, 189 - depth + 1, 190 - new_depth_mask | (1 << depth), 191 - left_margin); 192 - node = next; 193 - } 194 - 195 - if (callchain_param.mode == CHAIN_GRAPH_REL && 196 - remaining && remaining != new_total) { 197 - 198 - if (!rem_sq_bracket) 199 - return ret; 200 - 201 - new_depth_mask &= ~(1 << (depth - 1)); 202 - 203 - ret += ipchain__fprintf_graph(fp, &rem_hits, depth, 204 - new_depth_mask, 0, new_total, 205 - remaining, left_margin); 206 - } 207 - 208 - return ret; 209 - } 210 - 211 - 212 - static size_t 213 - callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 214 - u64 total_samples, int left_margin) 215 - { 216 - struct callchain_list *chain; 217 - bool printed = false; 218 - int i = 0; 219 - int ret = 0; 220 - 221 - list_for_each_entry(chain, &self->val, list) { 222 - if (chain->ip >= PERF_CONTEXT_MAX) 223 - continue; 224 - 225 - if (!i++ && sort__first_dimension == SORT_SYM) 226 - continue; 227 - 228 - if (!printed) { 229 - ret += callchain__fprintf_left_margin(fp, left_margin); 230 - ret += fprintf(fp, "|\n"); 231 - ret += callchain__fprintf_left_margin(fp, left_margin); 232 - ret += fprintf(fp, "---"); 233 - 234 - left_margin += 3; 235 - printed = true; 236 - } else 237 - ret += callchain__fprintf_left_margin(fp, left_margin); 238 - 239 - if (chain->sym) 240 - ret += fprintf(fp, " %s\n", chain->sym->name); 241 - else 242 - ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 243 - } 244 - 245 - ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); 246 - 247 - return ret; 248 - } 249 - 250 - static size_t 251 - callchain__fprintf_flat(FILE *fp, struct callchain_node *self, 252 - u64 total_samples) 253 - { 254 - struct callchain_list *chain; 255 - size_t ret = 0; 256 - 257 - if (!self) 258 - return 0; 259 - 260 - ret += callchain__fprintf_flat(fp, self->parent, total_samples); 261 - 262 - 263 - list_for_each_entry(chain, &self->val, list) { 264 - if (chain->ip >= PERF_CONTEXT_MAX) 265 - continue; 266 - if (chain->sym) 267 - ret += fprintf(fp, " %s\n", chain->sym->name); 268 - else 269 - ret += fprintf(fp, " %p\n", 270 - (void *)(long)chain->ip); 271 - } 272 - 273 - return ret; 274 - } 275 - 276 - static size_t 277 - hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, 278 - u64 total_samples, int left_margin) 279 - { 280 - struct rb_node *rb_node; 281 - struct callchain_node *chain; 282 - size_t ret = 0; 283 - 284 - rb_node = rb_first(&self->sorted_chain); 285 - while (rb_node) { 286 - double percent; 287 - 288 - chain = rb_entry(rb_node, struct callchain_node, rb_node); 289 - percent = chain->hit * 100.0 / total_samples; 290 - switch (callchain_param.mode) { 291 - case CHAIN_FLAT: 292 - ret += percent_color_fprintf(fp, " %6.2f%%\n", 293 - percent); 294 - ret += callchain__fprintf_flat(fp, chain, total_samples); 295 - break; 296 - case CHAIN_GRAPH_ABS: /* Falldown */ 297 - case CHAIN_GRAPH_REL: 298 - ret += callchain__fprintf_graph(fp, chain, total_samples, 299 - left_margin); 300 - case CHAIN_NONE: 301 - default: 302 - break; 303 - } 304 - ret += fprintf(fp, "\n"); 305 - rb_node = rb_next(rb_node); 306 - } 307 - 308 - return ret; 309 - } 310 - 311 - static size_t 312 - hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) 313 - { 314 - struct sort_entry *se; 315 - size_t ret; 316 - 317 - if (exclude_other && !self->parent) 318 - return 0; 319 - 320 - if (total_samples) 321 - ret = percent_color_fprintf(fp, 322 - field_sep ? "%.2f" : " %6.2f%%", 323 - (self->count * 100.0) / total_samples); 324 - else 325 - ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count); 326 - 327 - if (show_nr_samples) { 328 - if (field_sep) 329 - fprintf(fp, "%c%lld", *field_sep, self->count); 330 - else 331 - fprintf(fp, "%11lld", self->count); 332 - } 333 - 334 - list_for_each_entry(se, &hist_entry__sort_list, list) { 335 - if (se->elide) 336 - continue; 337 - 338 - fprintf(fp, "%s", field_sep ?: " "); 339 - ret += se->print(fp, self, se->width ? *se->width : 0); 340 - } 341 - 342 - ret += fprintf(fp, "\n"); 343 - 344 - if (callchain) { 345 - int left_margin = 0; 346 - 347 - if (sort__first_dimension == SORT_COMM) { 348 - se = list_first_entry(&hist_entry__sort_list, typeof(*se), 349 - list); 350 - left_margin = se->width ? *se->width : 0; 351 - left_margin -= thread__comm_len(self->thread); 352 - } 353 - 354 - hist_entry_callchain__fprintf(fp, self, total_samples, 355 - left_margin); 356 - } 357 - 358 - return ret; 359 - } 360 - 361 - /* 362 - * 363 - */ 364 - 365 - static void dso__calc_col_width(struct dso *self) 366 - { 367 - if (!col_width_list_str && !field_sep && 368 - (!dso_list || strlist__has_entry(dso_list, self->name))) { 369 - unsigned int slen = strlen(self->name); 370 - if (slen > dsos__col_width) 371 - dsos__col_width = slen; 372 - } 373 - 374 - self->slen_calculated = 1; 375 - } 376 - 377 - static void thread__comm_adjust(struct thread *self) 378 - { 379 - char *comm = self->comm; 380 - 381 - if (!col_width_list_str && !field_sep && 382 - (!comm_list || strlist__has_entry(comm_list, comm))) { 383 - unsigned int slen = strlen(comm); 384 - 385 - if (slen > comms__col_width) { 386 - comms__col_width = slen; 387 - threads__col_width = slen + 6; 388 - } 389 - } 390 - } 391 - 392 - static int thread__set_comm_adjust(struct thread *self, const char *comm) 393 - { 394 - int ret = thread__set_comm(self, comm); 395 - 396 - if (ret) 397 - return ret; 398 - 399 - thread__comm_adjust(self); 400 - 401 - return 0; 402 - } 403 - 404 - static int call__match(struct symbol *sym) 405 - { 406 - if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 407 - return 1; 408 - 409 - return 0; 410 - } 411 - 412 - static struct symbol **resolve_callchain(struct thread *thread, 413 - struct ip_callchain *chain, 414 - struct symbol **parent) 415 - { 416 - u8 cpumode = PERF_RECORD_MISC_USER; 417 - struct symbol **syms = NULL; 418 - unsigned int i; 419 - 420 - if (callchain) { 421 - syms = calloc(chain->nr, sizeof(*syms)); 422 - if (!syms) { 423 - fprintf(stderr, "Can't allocate memory for symbols\n"); 424 - exit(-1); 425 - } 426 - } 427 - 428 - for (i = 0; i < chain->nr; i++) { 429 - u64 ip = chain->ips[i]; 430 - struct addr_location al; 431 - 432 - if (ip >= PERF_CONTEXT_MAX) { 433 - switch (ip) { 434 - case PERF_CONTEXT_HV: 435 - cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 436 - case PERF_CONTEXT_KERNEL: 437 - cpumode = PERF_RECORD_MISC_KERNEL; break; 438 - case PERF_CONTEXT_USER: 439 - cpumode = PERF_RECORD_MISC_USER; break; 440 - default: 441 - break; 442 - } 443 - continue; 444 - } 445 - 446 - thread__find_addr_location(thread, cpumode, MAP__FUNCTION, 447 - ip, &al, NULL); 448 - if (al.sym != NULL) { 449 - if (sort__has_parent && !*parent && 450 - call__match(al.sym)) 451 - *parent = al.sym; 452 - if (!callchain) 453 - break; 454 - syms[i] = al.sym; 455 - } 456 - } 457 - 458 - return syms; 459 - } 460 - 461 - /* 462 - * collect histogram counts 463 - */ 464 - 465 - static int hist_entry__add(struct addr_location *al, 466 - struct ip_callchain *chain, u64 count) 467 { 468 struct symbol **syms = NULL, *parent = NULL; 469 bool hit; 470 struct hist_entry *he; 471 472 - if ((sort__has_parent || callchain) && chain) 473 - syms = resolve_callchain(al->thread, chain, &parent); 474 - 475 - he = __hist_entry__add(al, parent, count, &hit); 476 if (he == NULL) 477 return -ENOMEM; 478 479 if (hit) 480 he->count += count; 481 482 - if (callchain) { 483 if (!hit) 484 callchain_init(&he->callchain); 485 append_chain(&he->callchain, chain, syms); ··· 69 } 70 71 return 0; 72 - } 73 - 74 - static size_t output__fprintf(FILE *fp, u64 total_samples) 75 - { 76 - struct hist_entry *pos; 77 - struct sort_entry *se; 78 - struct rb_node *nd; 79 - size_t ret = 0; 80 - unsigned int width; 81 - char *col_width = col_width_list_str; 82 - int raw_printing_style; 83 - 84 - raw_printing_style = !strcmp(pretty_printing_style, "raw"); 85 - 86 - init_rem_hits(); 87 - 88 - fprintf(fp, "# Samples: %Ld\n", (u64)total_samples); 89 - fprintf(fp, "#\n"); 90 - 91 - fprintf(fp, "# Overhead"); 92 - if (show_nr_samples) { 93 - if (field_sep) 94 - fprintf(fp, "%cSamples", *field_sep); 95 - else 96 - fputs(" Samples ", fp); 97 - } 98 - list_for_each_entry(se, &hist_entry__sort_list, list) { 99 - if (se->elide) 100 - continue; 101 - if (field_sep) { 102 - fprintf(fp, "%c%s", *field_sep, se->header); 103 - continue; 104 - } 105 - width = strlen(se->header); 106 - if (se->width) { 107 - if (col_width_list_str) { 108 - if (col_width) { 109 - *se->width = atoi(col_width); 110 - col_width = strchr(col_width, ','); 111 - if (col_width) 112 - ++col_width; 113 - } 114 - } 115 - width = *se->width = max(*se->width, width); 116 - } 117 - fprintf(fp, " %*s", width, se->header); 118 - } 119 - fprintf(fp, "\n"); 120 - 121 - if (field_sep) 122 - goto print_entries; 123 - 124 - fprintf(fp, "# ........"); 125 - if (show_nr_samples) 126 - fprintf(fp, " .........."); 127 - list_for_each_entry(se, &hist_entry__sort_list, list) { 128 - unsigned int i; 129 - 130 - if (se->elide) 131 - continue; 132 - 133 - fprintf(fp, " "); 134 - if (se->width) 135 - width = *se->width; 136 - else 137 - width = strlen(se->header); 138 - for (i = 0; i < width; i++) 139 - fprintf(fp, "."); 140 - } 141 - fprintf(fp, "\n"); 142 - 143 - fprintf(fp, "#\n"); 144 - 145 - print_entries: 146 - for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { 147 - pos = rb_entry(nd, struct hist_entry, rb_node); 148 - ret += hist_entry__fprintf(fp, pos, total_samples); 149 - } 150 - 151 - if (sort_order == default_sort_order && 152 - parent_pattern == default_parent_pattern) { 153 - fprintf(fp, "#\n"); 154 - fprintf(fp, "# (For a higher level overview, try: perf report --sort comm,dso)\n"); 155 - fprintf(fp, "#\n"); 156 - } 157 - fprintf(fp, "\n"); 158 - 159 - free(rem_sq_bracket); 160 - 161 - if (show_threads) 162 - perf_read_values_display(fp, &show_threads_values, 163 - raw_printing_style); 164 - 165 - return ret; 166 } 167 168 static int validate_chain(struct ip_callchain *chain, event_t *event) ··· 84 return 0; 85 } 86 87 - static int process_sample_event(event_t *event) 88 { 89 - struct sample_data data; 90 - int cpumode; 91 struct addr_location al; 92 - struct thread *thread; 93 94 - memset(&data, 0, sizeof(data)); 95 - data.period = 1; 96 - 97 - event__parse_sample(event, sample_type, &data); 98 99 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 100 event->header.misc, ··· 97 (void *)(long)data.ip, 98 (long long)data.period); 99 100 - if (sample_type & PERF_SAMPLE_CALLCHAIN) { 101 unsigned int i; 102 103 dump_printf("... chain: nr:%Lu\n", data.callchain->nr); ··· 115 } 116 } 117 118 - thread = threads__findnew(data.pid); 119 - if (thread == NULL) { 120 - pr_debug("problem processing %d event, skipping it.\n", 121 event->header.type); 122 return -1; 123 } 124 125 - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 126 - 127 - if (comm_list && !strlist__has_entry(comm_list, thread->comm)) 128 return 0; 129 130 - cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 131 - 132 - thread__find_addr_location(thread, cpumode, 133 - MAP__FUNCTION, data.ip, &al, NULL); 134 - /* 135 - * We have to do this here as we may have a dso with no symbol hit that 136 - * has a name longer than the ones with symbols sampled. 137 - */ 138 - if (al.map && !sort_dso.elide && !al.map->dso->slen_calculated) 139 - dso__calc_col_width(al.map->dso); 140 - 141 - if (dso_list && 142 - (!al.map || !al.map->dso || 143 - !(strlist__has_entry(dso_list, al.map->dso->short_name) || 144 - (al.map->dso->short_name != al.map->dso->long_name && 145 - strlist__has_entry(dso_list, al.map->dso->long_name))))) 146 - return 0; 147 - 148 - if (sym_list && al.sym && !strlist__has_entry(sym_list, al.sym->name)) 149 - return 0; 150 - 151 - if (hist_entry__add(&al, data.callchain, data.period)) { 152 pr_debug("problem incrementing symbol count, skipping event\n"); 153 return -1; 154 } 155 156 - event__stats.total += data.period; 157 - 158 return 0; 159 } 160 161 - static int process_comm_event(event_t *event) 162 - { 163 - struct thread *thread = threads__findnew(event->comm.pid); 164 - 165 - dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid); 166 - 167 - if (thread == NULL || 168 - thread__set_comm_adjust(thread, event->comm.comm)) { 169 - dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 170 - return -1; 171 - } 172 - 173 - return 0; 174 - } 175 - 176 - static int process_read_event(event_t *event) 177 { 178 struct perf_event_attr *attr; 179 ··· 156 return 0; 157 } 158 159 - static int sample_type_check(u64 type) 160 { 161 - sample_type = type; 162 - 163 - if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { 164 if (sort__has_parent) { 165 fprintf(stderr, "selected --sort parent, but no" 166 " callchain data. Did you call" 167 " perf record without -g?\n"); 168 return -1; 169 } 170 - if (callchain) { 171 fprintf(stderr, "selected -g but no callchain data." 172 " Did you call perf record without" 173 " -g?\n"); 174 return -1; 175 } 176 - } else if (callchain_param.mode != CHAIN_NONE && !callchain) { 177 - callchain = 1; 178 if (register_callchain_param(&callchain_param) < 0) { 179 fprintf(stderr, "Can't register callchain" 180 " params\n"); ··· 183 return 0; 184 } 185 186 - static struct perf_file_handler file_handler = { 187 .process_sample_event = process_sample_event, 188 .process_mmap_event = event__process_mmap, 189 - .process_comm_event = process_comm_event, 190 .process_exit_event = event__process_task, 191 .process_fork_event = event__process_task, 192 .process_lost_event = event__process_lost, ··· 197 198 static int __cmd_report(void) 199 { 200 - struct thread *idle; 201 int ret; 202 203 session = perf_session__new(input_name, O_RDONLY, force); 204 if (session == NULL) 205 return -ENOMEM; 206 207 - idle = register_idle_thread(); 208 - thread__comm_adjust(idle); 209 - 210 if (show_threads) 211 perf_read_values_init(&show_threads_values); 212 213 - register_perf_file_handler(&file_handler); 214 - 215 - ret = perf_session__process_events(session, full_paths, 216 - &event__cwdlen, &event__cwd); 217 if (ret) 218 goto out_delete; 219 ··· 217 } 218 219 if (verbose > 3) 220 - threads__fprintf(stdout); 221 222 if (verbose > 2) 223 dsos__fprintf(stdout); 224 225 - collapse__resort(); 226 - output__resort(event__stats.total); 227 - output__fprintf(stdout, event__stats.total); 228 229 - if (show_threads) 230 perf_read_values_destroy(&show_threads_values); 231 out_delete: 232 perf_session__delete(session); 233 return ret; ··· 248 char *tok; 249 char *endptr; 250 251 - callchain = 1; 252 253 if (!arg) 254 return 0; ··· 269 270 else if (!strncmp(tok, "none", strlen(arg))) { 271 callchain_param.mode = CHAIN_NONE; 272 - callchain = 0; 273 274 return 0; 275 } ··· 312 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 313 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 314 "load module symbols - WARNING: use only with -k and LIVE kernel"), 315 - OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples, 316 "Show a column with the number of samples"), 317 OPT_BOOLEAN('T', "threads", &show_threads, 318 "Show per-thread event counters"), ··· 320 "pretty printing style key: normal raw"), 321 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 322 "sort by key(s): pid, comm, dso, symbol, parent"), 323 - OPT_BOOLEAN('P', "full-paths", &full_paths, 324 "Don't shorten the pathnames taking into account the cwd"), 325 OPT_STRING('p', "parent", &parent_pattern, "regex", 326 "regex filter to identify parent, see: '--sort parent'"), 327 - OPT_BOOLEAN('x', "exclude-other", &exclude_other, 328 "Only display entries with parent-match"), 329 OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent", 330 "Display callchains using output_type and min percent threshold. " 331 "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt), 332 - OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]", 333 "only consider symbols in these dsos"), 334 - OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]", 335 "only consider symbols in these comms"), 336 - OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]", 337 "only consider these symbols"), 338 - OPT_STRING('w', "column-widths", &col_width_list_str, 339 "width[,width...]", 340 "don't try to adjust column width, use these fixed values"), 341 - OPT_STRING('t', "field-separator", &field_sep, "separator", 342 "separator for columns, no spaces will be added between " 343 "columns '.' is reserved."), 344 OPT_END() 345 }; 346 347 - static void setup_sorting(void) 348 - { 349 - char *tmp, *tok, *str = strdup(sort_order); 350 - 351 - for (tok = strtok_r(str, ", ", &tmp); 352 - tok; tok = strtok_r(NULL, ", ", &tmp)) { 353 - if (sort_dimension__add(tok) < 0) { 354 - error("Unknown --sort key: `%s'", tok); 355 - usage_with_options(report_usage, options); 356 - } 357 - } 358 - 359 - free(str); 360 - } 361 - 362 - static void setup_list(struct strlist **list, const char *list_str, 363 - struct sort_entry *se, const char *list_name, 364 - FILE *fp) 365 - { 366 - if (list_str) { 367 - *list = strlist__new(true, list_str); 368 - if (!*list) { 369 - fprintf(stderr, "problems parsing %s list\n", 370 - list_name); 371 - exit(129); 372 - } 373 - if (strlist__nr_entries(*list) == 1) { 374 - fprintf(fp, "# %s: %s\n", list_name, 375 - strlist__entry(*list, 0)->s); 376 - se->elide = true; 377 - } 378 - } 379 - } 380 - 381 int cmd_report(int argc, const char **argv, const char *prefix __used) 382 { 383 - if (symbol__init(&symbol_conf) < 0) 384 - return -1; 385 - 386 argc = parse_options(argc, argv, options, report_usage, 0); 387 388 - setup_sorting(); 389 390 if (parent_pattern != default_parent_pattern) { 391 sort_dimension__add("parent"); 392 sort_parent.elide = 1; 393 } else 394 - exclude_other = 0; 395 396 /* 397 * Any (unrecognized) arguments left? ··· 367 if (argc) 368 usage_with_options(report_usage, options); 369 370 - setup_pager(); 371 - 372 - setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout); 373 - setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout); 374 - setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout); 375 - 376 - if (field_sep && *field_sep == '.') { 377 - fputs("'.' is the only non valid --field-separator argument\n", 378 - stderr); 379 - exit(129); 380 - } 381 382 return __cmd_report(); 383 }
··· 27 #include "util/parse-options.h" 28 #include "util/parse-events.h" 29 30 #include "util/thread.h" 31 #include "util/sort.h" 32 #include "util/hist.h" 33 34 static char const *input_name = "perf.data"; 35 36 static int force; 37 38 static int show_threads; 39 static struct perf_read_values show_threads_values; ··· 49 static char default_pretty_printing_style[] = "normal"; 50 static char *pretty_printing_style = default_pretty_printing_style; 51 52 static char callchain_default_opt[] = "fractal,0.5"; 53 54 + static int perf_session__add_hist_entry(struct perf_session *self, 55 + struct addr_location *al, 56 + struct ip_callchain *chain, u64 count) 57 { 58 struct symbol **syms = NULL, *parent = NULL; 59 bool hit; 60 struct hist_entry *he; 61 62 + if ((sort__has_parent || symbol_conf.use_callchain) && chain) 63 + syms = perf_session__resolve_callchain(self, al->thread, 64 + chain, &parent); 65 + he = __perf_session__add_hist_entry(self, al, parent, count, &hit); 66 if (he == NULL) 67 return -ENOMEM; 68 69 if (hit) 70 he->count += count; 71 72 + if (symbol_conf.use_callchain) { 73 if (!hit) 74 callchain_init(&he->callchain); 75 append_chain(&he->callchain, chain, syms); ··· 495 } 496 497 return 0; 498 } 499 500 static int validate_chain(struct ip_callchain *chain, event_t *event) ··· 604 return 0; 605 } 606 607 + static int process_sample_event(event_t *event, struct perf_session *session) 608 { 609 + struct sample_data data = { .period = 1, }; 610 struct addr_location al; 611 612 + event__parse_sample(event, session->sample_type, &data); 613 614 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 615 event->header.misc, ··· 622 (void *)(long)data.ip, 623 (long long)data.period); 624 625 + if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { 626 unsigned int i; 627 628 dump_printf("... chain: nr:%Lu\n", data.callchain->nr); ··· 640 } 641 } 642 643 + if (event__preprocess_sample(event, session, &al, NULL) < 0) { 644 + fprintf(stderr, "problem processing %d event, skipping it.\n", 645 event->header.type); 646 return -1; 647 } 648 649 + if (al.filtered) 650 return 0; 651 652 + if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) { 653 pr_debug("problem incrementing symbol count, skipping event\n"); 654 return -1; 655 } 656 657 + session->events_stats.total += data.period; 658 return 0; 659 } 660 661 + static int process_read_event(event_t *event, struct perf_session *session __used) 662 { 663 struct perf_event_attr *attr; 664 ··· 721 return 0; 722 } 723 724 + static int sample_type_check(struct perf_session *session) 725 { 726 + if (!(session->sample_type & PERF_SAMPLE_CALLCHAIN)) { 727 if (sort__has_parent) { 728 fprintf(stderr, "selected --sort parent, but no" 729 " callchain data. Did you call" 730 " perf record without -g?\n"); 731 return -1; 732 } 733 + if (symbol_conf.use_callchain) { 734 fprintf(stderr, "selected -g but no callchain data." 735 " Did you call perf record without" 736 " -g?\n"); 737 return -1; 738 } 739 + } else if (callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { 740 + symbol_conf.use_callchain = true; 741 if (register_callchain_param(&callchain_param) < 0) { 742 fprintf(stderr, "Can't register callchain" 743 " params\n"); ··· 750 return 0; 751 } 752 753 + static struct perf_event_ops event_ops = { 754 .process_sample_event = process_sample_event, 755 .process_mmap_event = event__process_mmap, 756 + .process_comm_event = event__process_comm, 757 .process_exit_event = event__process_task, 758 .process_fork_event = event__process_task, 759 .process_lost_event = event__process_lost, ··· 764 765 static int __cmd_report(void) 766 { 767 int ret; 768 + struct perf_session *session; 769 770 session = perf_session__new(input_name, O_RDONLY, force); 771 if (session == NULL) 772 return -ENOMEM; 773 774 if (show_threads) 775 perf_read_values_init(&show_threads_values); 776 777 + ret = perf_session__process_events(session, &event_ops); 778 if (ret) 779 goto out_delete; 780 ··· 790 } 791 792 if (verbose > 3) 793 + perf_session__fprintf(session, stdout); 794 795 if (verbose > 2) 796 dsos__fprintf(stdout); 797 798 + perf_session__collapse_resort(session); 799 + perf_session__output_resort(session, session->events_stats.total); 800 + fprintf(stdout, "# Samples: %ld\n#\n", session->events_stats.total); 801 + perf_session__fprintf_hists(session, NULL, false, stdout); 802 + if (sort_order == default_sort_order && 803 + parent_pattern == default_parent_pattern) 804 + fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); 805 806 + if (show_threads) { 807 + bool raw_printing_style = !strcmp(pretty_printing_style, "raw"); 808 + perf_read_values_display(stdout, &show_threads_values, 809 + raw_printing_style); 810 perf_read_values_destroy(&show_threads_values); 811 + } 812 out_delete: 813 perf_session__delete(session); 814 return ret; ··· 813 char *tok; 814 char *endptr; 815 816 + symbol_conf.use_callchain = true; 817 818 if (!arg) 819 return 0; ··· 834 835 else if (!strncmp(tok, "none", strlen(arg))) { 836 callchain_param.mode = CHAIN_NONE; 837 + symbol_conf.use_callchain = true; 838 839 return 0; 840 } ··· 877 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 878 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 879 "load module symbols - WARNING: use only with -k and LIVE kernel"), 880 + OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 881 "Show a column with the number of samples"), 882 OPT_BOOLEAN('T', "threads", &show_threads, 883 "Show per-thread event counters"), ··· 885 "pretty printing style key: normal raw"), 886 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 887 "sort by key(s): pid, comm, dso, symbol, parent"), 888 + OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths, 889 "Don't shorten the pathnames taking into account the cwd"), 890 OPT_STRING('p', "parent", &parent_pattern, "regex", 891 "regex filter to identify parent, see: '--sort parent'"), 892 + OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, 893 "Only display entries with parent-match"), 894 OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent", 895 "Display callchains using output_type and min percent threshold. " 896 "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt), 897 + OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 898 "only consider symbols in these dsos"), 899 + OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", 900 "only consider symbols in these comms"), 901 + OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 902 "only consider these symbols"), 903 + OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, 904 "width[,width...]", 905 "don't try to adjust column width, use these fixed values"), 906 + OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", 907 "separator for columns, no spaces will be added between " 908 "columns '.' is reserved."), 909 OPT_END() 910 }; 911 912 int cmd_report(int argc, const char **argv, const char *prefix __used) 913 { 914 argc = parse_options(argc, argv, options, report_usage, 0); 915 916 + setup_pager(); 917 + 918 + if (symbol__init() < 0) 919 + return -1; 920 + 921 + setup_sorting(report_usage, options); 922 923 if (parent_pattern != default_parent_pattern) { 924 sort_dimension__add("parent"); 925 sort_parent.elide = 1; 926 } else 927 + symbol_conf.exclude_other = false; 928 929 /* 930 * Any (unrecognized) arguments left? ··· 964 if (argc) 965 usage_with_options(report_usage, options); 966 967 + sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); 968 + sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); 969 + sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); 970 971 return __cmd_report(); 972 }
+50 -43
tools/perf/builtin-sched.c
··· 12 #include "util/trace-event.h" 13 14 #include "util/debug.h" 15 - #include "util/data_map.h" 16 17 #include <sys/prctl.h> 18 ··· 20 #include <math.h> 21 22 static char const *input_name = "perf.data"; 23 - 24 - static u64 sample_type; 25 26 static char default_sort_order[] = "avg, max, switch, runtime"; 27 static char *sort_order = default_sort_order; ··· 728 729 struct trace_sched_handler { 730 void (*switch_event)(struct trace_switch_event *, 731 struct event *, 732 int cpu, 733 u64 timestamp, 734 struct thread *thread); 735 736 void (*runtime_event)(struct trace_runtime_event *, 737 struct event *, 738 int cpu, 739 u64 timestamp, 740 struct thread *thread); 741 742 void (*wakeup_event)(struct trace_wakeup_event *, 743 struct event *, 744 int cpu, 745 u64 timestamp, ··· 755 struct thread *thread); 756 757 void (*migrate_task_event)(struct trace_migrate_task_event *, 758 struct event *, 759 int cpu, 760 u64 timestamp, ··· 765 766 static void 767 replay_wakeup_event(struct trace_wakeup_event *wakeup_event, 768 struct event *event, 769 int cpu __used, 770 u64 timestamp __used, ··· 792 793 static void 794 replay_switch_event(struct trace_switch_event *switch_event, 795 struct event *event, 796 int cpu, 797 u64 timestamp, ··· 1026 1027 static void 1028 latency_switch_event(struct trace_switch_event *switch_event, 1029 struct event *event __used, 1030 int cpu, 1031 u64 timestamp, ··· 1050 die("hm, delta: %Ld < 0 ?\n", delta); 1051 1052 1053 - sched_out = threads__findnew(switch_event->prev_pid); 1054 - sched_in = threads__findnew(switch_event->next_pid); 1055 1056 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); 1057 if (!out_events) { ··· 1079 1080 static void 1081 latency_runtime_event(struct trace_runtime_event *runtime_event, 1082 struct event *event __used, 1083 int cpu, 1084 u64 timestamp, 1085 struct thread *this_thread __used) 1086 { 1087 - struct thread *thread = threads__findnew(runtime_event->pid); 1088 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); 1089 1090 BUG_ON(cpu >= MAX_CPUS || cpu < 0); ··· 1102 1103 static void 1104 latency_wakeup_event(struct trace_wakeup_event *wakeup_event, 1105 struct event *__event __used, 1106 int cpu __used, 1107 u64 timestamp, ··· 1116 if (!wakeup_event->success) 1117 return; 1118 1119 - wakee = threads__findnew(wakeup_event->pid); 1120 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); 1121 if (!atoms) { 1122 thread_atoms_insert(wakee); ··· 1150 1151 static void 1152 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, 1153 struct event *__event __used, 1154 int cpu __used, 1155 u64 timestamp, ··· 1166 if (profile_cpu == -1) 1167 return; 1168 1169 - migrant = threads__findnew(migrate_task_event->pid); 1170 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); 1171 if (!atoms) { 1172 thread_atoms_insert(migrant); ··· 1361 static struct trace_sched_handler *trace_handler; 1362 1363 static void 1364 - process_sched_wakeup_event(void *data, 1365 struct event *event, 1366 int cpu __used, 1367 u64 timestamp __used, ··· 1378 FILL_FIELD(wakeup_event, cpu, event, data); 1379 1380 if (trace_handler->wakeup_event) 1381 - trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread); 1382 } 1383 1384 /* ··· 1397 1398 static void 1399 map_switch_event(struct trace_switch_event *switch_event, 1400 struct event *event __used, 1401 int this_cpu, 1402 u64 timestamp, ··· 1425 die("hm, delta: %Ld < 0 ?\n", delta); 1426 1427 1428 - sched_out = threads__findnew(switch_event->prev_pid); 1429 - sched_in = threads__findnew(switch_event->next_pid); 1430 1431 curr_thread[this_cpu] = sched_in; 1432 ··· 1476 1477 1478 static void 1479 - process_sched_switch_event(void *data, 1480 struct event *event, 1481 int this_cpu, 1482 u64 timestamp __used, ··· 1503 nr_context_switch_bugs++; 1504 } 1505 if (trace_handler->switch_event) 1506 - trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread); 1507 1508 curr_pid[this_cpu] = switch_event.next_pid; 1509 } 1510 1511 static void 1512 - process_sched_runtime_event(void *data, 1513 struct event *event, 1514 int cpu __used, 1515 u64 timestamp __used, ··· 1524 FILL_FIELD(runtime_event, vruntime, event, data); 1525 1526 if (trace_handler->runtime_event) 1527 - trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread); 1528 } 1529 1530 static void ··· 1544 FILL_FIELD(fork_event, child_pid, event, data); 1545 1546 if (trace_handler->fork_event) 1547 - trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread); 1548 } 1549 1550 static void ··· 1559 } 1560 1561 static void 1562 - process_sched_migrate_task_event(void *data, 1563 struct event *event, 1564 int cpu __used, 1565 u64 timestamp __used, ··· 1575 FILL_FIELD(migrate_task_event, cpu, event, data); 1576 1577 if (trace_handler->migrate_task_event) 1578 - trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); 1579 } 1580 1581 static void 1582 - process_raw_event(event_t *raw_event __used, void *data, 1583 - int cpu, u64 timestamp, struct thread *thread) 1584 { 1585 struct event *event; 1586 int type; ··· 1591 event = trace_find_event(type); 1592 1593 if (!strcmp(event->name, "sched_switch")) 1594 - process_sched_switch_event(data, event, cpu, timestamp, thread); 1595 if (!strcmp(event->name, "sched_stat_runtime")) 1596 - process_sched_runtime_event(data, event, cpu, timestamp, thread); 1597 if (!strcmp(event->name, "sched_wakeup")) 1598 - process_sched_wakeup_event(data, event, cpu, timestamp, thread); 1599 if (!strcmp(event->name, "sched_wakeup_new")) 1600 - process_sched_wakeup_event(data, event, cpu, timestamp, thread); 1601 if (!strcmp(event->name, "sched_process_fork")) 1602 process_sched_fork_event(data, event, cpu, timestamp, thread); 1603 if (!strcmp(event->name, "sched_process_exit")) 1604 process_sched_exit_event(event, cpu, timestamp, thread); 1605 if (!strcmp(event->name, "sched_migrate_task")) 1606 - process_sched_migrate_task_event(data, event, cpu, timestamp, thread); 1607 } 1608 1609 - static int process_sample_event(event_t *event) 1610 { 1611 struct sample_data data; 1612 struct thread *thread; 1613 1614 - if (!(sample_type & PERF_SAMPLE_RAW)) 1615 return 0; 1616 1617 memset(&data, 0, sizeof(data)); ··· 1619 data.cpu = -1; 1620 data.period = -1; 1621 1622 - event__parse_sample(event, sample_type, &data); 1623 1624 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 1625 event->header.misc, ··· 1627 (void *)(long)data.ip, 1628 (long long)data.period); 1629 1630 - thread = threads__findnew(data.pid); 1631 if (thread == NULL) { 1632 pr_debug("problem processing %d event, skipping it.\n", 1633 event->header.type); ··· 1639 if (profile_cpu != -1 && profile_cpu != (int)data.cpu) 1640 return 0; 1641 1642 - process_raw_event(event, data.raw_data, data.cpu, data.time, thread); 1643 1644 return 0; 1645 } 1646 1647 - static int process_lost_event(event_t *event __used) 1648 { 1649 nr_lost_chunks++; 1650 nr_lost_events += event->lost.lost; ··· 1653 return 0; 1654 } 1655 1656 - static int sample_type_check(u64 type) 1657 { 1658 - sample_type = type; 1659 - 1660 - if (!(sample_type & PERF_SAMPLE_RAW)) { 1661 fprintf(stderr, 1662 "No trace sample to read. Did you call perf record " 1663 "without -R?"); ··· 1665 return 0; 1666 } 1667 1668 - static struct perf_file_handler file_handler = { 1669 .process_sample_event = process_sample_event, 1670 .process_comm_event = event__process_comm, 1671 .process_lost_event = process_lost_event, ··· 1676 { 1677 int err; 1678 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 1679 - 1680 if (session == NULL) 1681 return -ENOMEM; 1682 1683 - register_idle_thread(); 1684 - register_perf_file_handler(&file_handler); 1685 - 1686 - err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 1687 perf_session__delete(session); 1688 return err; 1689 } ··· 1911 if (!strcmp(argv[0], "trace")) 1912 return cmd_trace(argc, argv, prefix); 1913 1914 - symbol__init(0); 1915 if (!strncmp(argv[0], "rec", 3)) { 1916 return __cmd_record(argc, argv); 1917 } else if (!strncmp(argv[0], "lat", 3)) {
··· 12 #include "util/trace-event.h" 13 14 #include "util/debug.h" 15 16 #include <sys/prctl.h> 17 ··· 21 #include <math.h> 22 23 static char const *input_name = "perf.data"; 24 25 static char default_sort_order[] = "avg, max, switch, runtime"; 26 static char *sort_order = default_sort_order; ··· 731 732 struct trace_sched_handler { 733 void (*switch_event)(struct trace_switch_event *, 734 + struct perf_session *, 735 struct event *, 736 int cpu, 737 u64 timestamp, 738 struct thread *thread); 739 740 void (*runtime_event)(struct trace_runtime_event *, 741 + struct perf_session *, 742 struct event *, 743 int cpu, 744 u64 timestamp, 745 struct thread *thread); 746 747 void (*wakeup_event)(struct trace_wakeup_event *, 748 + struct perf_session *, 749 struct event *, 750 int cpu, 751 u64 timestamp, ··· 755 struct thread *thread); 756 757 void (*migrate_task_event)(struct trace_migrate_task_event *, 758 + struct perf_session *session, 759 struct event *, 760 int cpu, 761 u64 timestamp, ··· 764 765 static void 766 replay_wakeup_event(struct trace_wakeup_event *wakeup_event, 767 + struct perf_session *session __used, 768 struct event *event, 769 int cpu __used, 770 u64 timestamp __used, ··· 790 791 static void 792 replay_switch_event(struct trace_switch_event *switch_event, 793 + struct perf_session *session __used, 794 struct event *event, 795 int cpu, 796 u64 timestamp, ··· 1023 1024 static void 1025 latency_switch_event(struct trace_switch_event *switch_event, 1026 + struct perf_session *session, 1027 struct event *event __used, 1028 int cpu, 1029 u64 timestamp, ··· 1046 die("hm, delta: %Ld < 0 ?\n", delta); 1047 1048 1049 + sched_out = perf_session__findnew(session, switch_event->prev_pid); 1050 + sched_in = perf_session__findnew(session, switch_event->next_pid); 1051 1052 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); 1053 if (!out_events) { ··· 1075 1076 static void 1077 latency_runtime_event(struct trace_runtime_event *runtime_event, 1078 + struct perf_session *session, 1079 struct event *event __used, 1080 int cpu, 1081 u64 timestamp, 1082 struct thread *this_thread __used) 1083 { 1084 + struct thread *thread = perf_session__findnew(session, runtime_event->pid); 1085 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); 1086 1087 BUG_ON(cpu >= MAX_CPUS || cpu < 0); ··· 1097 1098 static void 1099 latency_wakeup_event(struct trace_wakeup_event *wakeup_event, 1100 + struct perf_session *session, 1101 struct event *__event __used, 1102 int cpu __used, 1103 u64 timestamp, ··· 1110 if (!wakeup_event->success) 1111 return; 1112 1113 + wakee = perf_session__findnew(session, wakeup_event->pid); 1114 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); 1115 if (!atoms) { 1116 thread_atoms_insert(wakee); ··· 1144 1145 static void 1146 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, 1147 + struct perf_session *session, 1148 struct event *__event __used, 1149 int cpu __used, 1150 u64 timestamp, ··· 1159 if (profile_cpu == -1) 1160 return; 1161 1162 + migrant = perf_session__findnew(session, migrate_task_event->pid); 1163 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); 1164 if (!atoms) { 1165 thread_atoms_insert(migrant); ··· 1354 static struct trace_sched_handler *trace_handler; 1355 1356 static void 1357 + process_sched_wakeup_event(void *data, struct perf_session *session, 1358 struct event *event, 1359 int cpu __used, 1360 u64 timestamp __used, ··· 1371 FILL_FIELD(wakeup_event, cpu, event, data); 1372 1373 if (trace_handler->wakeup_event) 1374 + trace_handler->wakeup_event(&wakeup_event, session, event, 1375 + cpu, timestamp, thread); 1376 } 1377 1378 /* ··· 1389 1390 static void 1391 map_switch_event(struct trace_switch_event *switch_event, 1392 + struct perf_session *session, 1393 struct event *event __used, 1394 int this_cpu, 1395 u64 timestamp, ··· 1416 die("hm, delta: %Ld < 0 ?\n", delta); 1417 1418 1419 + sched_out = perf_session__findnew(session, switch_event->prev_pid); 1420 + sched_in = perf_session__findnew(session, switch_event->next_pid); 1421 1422 curr_thread[this_cpu] = sched_in; 1423 ··· 1467 1468 1469 static void 1470 + process_sched_switch_event(void *data, struct perf_session *session, 1471 struct event *event, 1472 int this_cpu, 1473 u64 timestamp __used, ··· 1494 nr_context_switch_bugs++; 1495 } 1496 if (trace_handler->switch_event) 1497 + trace_handler->switch_event(&switch_event, session, event, 1498 + this_cpu, timestamp, thread); 1499 1500 curr_pid[this_cpu] = switch_event.next_pid; 1501 } 1502 1503 static void 1504 + process_sched_runtime_event(void *data, struct perf_session *session, 1505 struct event *event, 1506 int cpu __used, 1507 u64 timestamp __used, ··· 1514 FILL_FIELD(runtime_event, vruntime, event, data); 1515 1516 if (trace_handler->runtime_event) 1517 + trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread); 1518 } 1519 1520 static void ··· 1534 FILL_FIELD(fork_event, child_pid, event, data); 1535 1536 if (trace_handler->fork_event) 1537 + trace_handler->fork_event(&fork_event, event, 1538 + cpu, timestamp, thread); 1539 } 1540 1541 static void ··· 1548 } 1549 1550 static void 1551 + process_sched_migrate_task_event(void *data, struct perf_session *session, 1552 struct event *event, 1553 int cpu __used, 1554 u64 timestamp __used, ··· 1564 FILL_FIELD(migrate_task_event, cpu, event, data); 1565 1566 if (trace_handler->migrate_task_event) 1567 + trace_handler->migrate_task_event(&migrate_task_event, session, 1568 + event, cpu, timestamp, thread); 1569 } 1570 1571 static void 1572 + process_raw_event(event_t *raw_event __used, struct perf_session *session, 1573 + void *data, int cpu, u64 timestamp, struct thread *thread) 1574 { 1575 struct event *event; 1576 int type; ··· 1579 event = trace_find_event(type); 1580 1581 if (!strcmp(event->name, "sched_switch")) 1582 + process_sched_switch_event(data, session, event, cpu, timestamp, thread); 1583 if (!strcmp(event->name, "sched_stat_runtime")) 1584 + process_sched_runtime_event(data, session, event, cpu, timestamp, thread); 1585 if (!strcmp(event->name, "sched_wakeup")) 1586 + process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); 1587 if (!strcmp(event->name, "sched_wakeup_new")) 1588 + process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); 1589 if (!strcmp(event->name, "sched_process_fork")) 1590 process_sched_fork_event(data, event, cpu, timestamp, thread); 1591 if (!strcmp(event->name, "sched_process_exit")) 1592 process_sched_exit_event(event, cpu, timestamp, thread); 1593 if (!strcmp(event->name, "sched_migrate_task")) 1594 + process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread); 1595 } 1596 1597 + static int process_sample_event(event_t *event, struct perf_session *session) 1598 { 1599 struct sample_data data; 1600 struct thread *thread; 1601 1602 + if (!(session->sample_type & PERF_SAMPLE_RAW)) 1603 return 0; 1604 1605 memset(&data, 0, sizeof(data)); ··· 1607 data.cpu = -1; 1608 data.period = -1; 1609 1610 + event__parse_sample(event, session->sample_type, &data); 1611 1612 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 1613 event->header.misc, ··· 1615 (void *)(long)data.ip, 1616 (long long)data.period); 1617 1618 + thread = perf_session__findnew(session, data.pid); 1619 if (thread == NULL) { 1620 pr_debug("problem processing %d event, skipping it.\n", 1621 event->header.type); ··· 1627 if (profile_cpu != -1 && profile_cpu != (int)data.cpu) 1628 return 0; 1629 1630 + process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread); 1631 1632 return 0; 1633 } 1634 1635 + static int process_lost_event(event_t *event __used, 1636 + struct perf_session *session __used) 1637 { 1638 nr_lost_chunks++; 1639 nr_lost_events += event->lost.lost; ··· 1640 return 0; 1641 } 1642 1643 + static int sample_type_check(struct perf_session *session __used) 1644 { 1645 + if (!(session->sample_type & PERF_SAMPLE_RAW)) { 1646 fprintf(stderr, 1647 "No trace sample to read. Did you call perf record " 1648 "without -R?"); ··· 1654 return 0; 1655 } 1656 1657 + static struct perf_event_ops event_ops = { 1658 .process_sample_event = process_sample_event, 1659 .process_comm_event = event__process_comm, 1660 .process_lost_event = process_lost_event, ··· 1665 { 1666 int err; 1667 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 1668 if (session == NULL) 1669 return -ENOMEM; 1670 1671 + err = perf_session__process_events(session, &event_ops); 1672 perf_session__delete(session); 1673 return err; 1674 } ··· 1904 if (!strcmp(argv[0], "trace")) 1905 return cmd_trace(argc, argv, prefix); 1906 1907 + symbol__init(); 1908 if (!strncmp(argv[0], "rec", 3)) { 1909 return __cmd_record(argc, argv); 1910 } else if (!strncmp(argv[0], "lat", 3)) {
+24 -35
tools/perf/builtin-timechart.c
··· 30 #include "util/parse-options.h" 31 #include "util/parse-events.h" 32 #include "util/event.h" 33 - #include "util/data_map.h" 34 #include "util/svghelper.h" 35 36 static char const *input_name = "perf.data"; 37 static char const *output_name = "output.svg"; 38 - 39 - 40 - static u64 sample_type; 41 42 static unsigned int numcpus; 43 static u64 min_freq; /* Lowest CPU frequency seen */ ··· 278 static u64 cpus_pstate_start_times[MAX_CPUS]; 279 static u64 cpus_pstate_state[MAX_CPUS]; 280 281 - static int 282 - process_comm_event(event_t *event) 283 { 284 pid_set_comm(event->comm.pid, event->comm.comm); 285 return 0; 286 } 287 - static int 288 - process_fork_event(event_t *event) 289 { 290 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 291 return 0; 292 } 293 294 - static int 295 - process_exit_event(event_t *event) 296 { 297 pid_exit(event->fork.pid, event->fork.time); 298 return 0; ··· 475 } 476 477 478 - static int 479 - process_sample_event(event_t *event) 480 { 481 struct sample_data data; 482 struct trace_entry *te; 483 484 memset(&data, 0, sizeof(data)); 485 486 - event__parse_sample(event, sample_type, &data); 487 488 - if (sample_type & PERF_SAMPLE_TIME) { 489 if (!first_time || first_time > data.time) 490 first_time = data.time; 491 if (last_time < data.time) ··· 492 } 493 494 te = (void *)data.raw_data; 495 - if (sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) { 496 char *event_str; 497 struct power_entry *pe; 498 ··· 569 } 570 } 571 572 - static u64 sample_time(event_t *event) 573 { 574 int cursor; 575 576 cursor = 0; 577 - if (sample_type & PERF_SAMPLE_IP) 578 cursor++; 579 - if (sample_type & PERF_SAMPLE_TID) 580 cursor++; 581 - if (sample_type & PERF_SAMPLE_TIME) 582 return event->sample.array[cursor]; 583 return 0; 584 } ··· 588 * We first queue all events, sorted backwards by insertion. 589 * The order will get flipped later. 590 */ 591 - static int 592 - queue_sample_event(event_t *event) 593 { 594 struct sample_wrapper *copy, *prev; 595 int size; ··· 602 memset(copy, 0, size); 603 604 copy->next = NULL; 605 - copy->timestamp = sample_time(event); 606 607 memcpy(&copy->data, event, event->sample.header.size); 608 ··· 1014 svg_close(); 1015 } 1016 1017 - static void process_samples(void) 1018 { 1019 struct sample_wrapper *cursor; 1020 event_t *event; ··· 1025 while (cursor) { 1026 event = (void *)&cursor->data; 1027 cursor = cursor->next; 1028 - process_sample_event(event); 1029 } 1030 } 1031 1032 - static int sample_type_check(u64 type) 1033 { 1034 - sample_type = type; 1035 - 1036 - if (!(sample_type & PERF_SAMPLE_RAW)) { 1037 fprintf(stderr, "No trace samples found in the file.\n" 1038 "Have you used 'perf timechart record' to record it?\n"); 1039 return -1; ··· 1040 return 0; 1041 } 1042 1043 - static struct perf_file_handler file_handler = { 1044 .process_comm_event = process_comm_event, 1045 .process_fork_event = process_fork_event, 1046 .process_exit_event = process_exit_event, ··· 1056 if (session == NULL) 1057 return -ENOMEM; 1058 1059 - register_perf_file_handler(&file_handler); 1060 - 1061 - ret = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 1062 if (ret) 1063 goto out_delete; 1064 1065 - process_samples(); 1066 1067 end_sample_processing(); 1068 ··· 1137 1138 int cmd_timechart(int argc, const char **argv, const char *prefix __used) 1139 { 1140 - symbol__init(0); 1141 - 1142 argc = parse_options(argc, argv, options, timechart_usage, 1143 PARSE_OPT_STOP_AT_NON_OPTION); 1144 1145 if (argc && !strncmp(argv[0], "rec", 3)) 1146 return __cmd_record(argc, argv);
··· 30 #include "util/parse-options.h" 31 #include "util/parse-events.h" 32 #include "util/event.h" 33 + #include "util/session.h" 34 #include "util/svghelper.h" 35 36 static char const *input_name = "perf.data"; 37 static char const *output_name = "output.svg"; 38 39 static unsigned int numcpus; 40 static u64 min_freq; /* Lowest CPU frequency seen */ ··· 281 static u64 cpus_pstate_start_times[MAX_CPUS]; 282 static u64 cpus_pstate_state[MAX_CPUS]; 283 284 + static int process_comm_event(event_t *event, struct perf_session *session __used) 285 { 286 pid_set_comm(event->comm.pid, event->comm.comm); 287 return 0; 288 } 289 + 290 + static int process_fork_event(event_t *event, struct perf_session *session __used) 291 { 292 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 293 return 0; 294 } 295 296 + static int process_exit_event(event_t *event, struct perf_session *session __used) 297 { 298 pid_exit(event->fork.pid, event->fork.time); 299 return 0; ··· 480 } 481 482 483 + static int process_sample_event(event_t *event, struct perf_session *session) 484 { 485 struct sample_data data; 486 struct trace_entry *te; 487 488 memset(&data, 0, sizeof(data)); 489 490 + event__parse_sample(event, session->sample_type, &data); 491 492 + if (session->sample_type & PERF_SAMPLE_TIME) { 493 if (!first_time || first_time > data.time) 494 first_time = data.time; 495 if (last_time < data.time) ··· 498 } 499 500 te = (void *)data.raw_data; 501 + if (session->sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) { 502 char *event_str; 503 struct power_entry *pe; 504 ··· 575 } 576 } 577 578 + static u64 sample_time(event_t *event, const struct perf_session *session) 579 { 580 int cursor; 581 582 cursor = 0; 583 + if (session->sample_type & PERF_SAMPLE_IP) 584 cursor++; 585 + if (session->sample_type & PERF_SAMPLE_TID) 586 cursor++; 587 + if (session->sample_type & PERF_SAMPLE_TIME) 588 return event->sample.array[cursor]; 589 return 0; 590 } ··· 594 * We first queue all events, sorted backwards by insertion. 595 * The order will get flipped later. 596 */ 597 + static int queue_sample_event(event_t *event, struct perf_session *session) 598 { 599 struct sample_wrapper *copy, *prev; 600 int size; ··· 609 memset(copy, 0, size); 610 611 copy->next = NULL; 612 + copy->timestamp = sample_time(event, session); 613 614 memcpy(&copy->data, event, event->sample.header.size); 615 ··· 1021 svg_close(); 1022 } 1023 1024 + static void process_samples(struct perf_session *session) 1025 { 1026 struct sample_wrapper *cursor; 1027 event_t *event; ··· 1032 while (cursor) { 1033 event = (void *)&cursor->data; 1034 cursor = cursor->next; 1035 + process_sample_event(event, session); 1036 } 1037 } 1038 1039 + static int sample_type_check(struct perf_session *session) 1040 { 1041 + if (!(session->sample_type & PERF_SAMPLE_RAW)) { 1042 fprintf(stderr, "No trace samples found in the file.\n" 1043 "Have you used 'perf timechart record' to record it?\n"); 1044 return -1; ··· 1049 return 0; 1050 } 1051 1052 + static struct perf_event_ops event_ops = { 1053 .process_comm_event = process_comm_event, 1054 .process_fork_event = process_fork_event, 1055 .process_exit_event = process_exit_event, ··· 1065 if (session == NULL) 1066 return -ENOMEM; 1067 1068 + ret = perf_session__process_events(session, &event_ops); 1069 if (ret) 1070 goto out_delete; 1071 1072 + process_samples(session); 1073 1074 end_sample_processing(); 1075 ··· 1148 1149 int cmd_timechart(int argc, const char **argv, const char *prefix __used) 1150 { 1151 argc = parse_options(argc, argv, options, timechart_usage, 1152 PARSE_OPT_STOP_AT_NON_OPTION); 1153 + 1154 + symbol__init(); 1155 1156 if (argc && !strncmp(argv[0], "rec", 3)) 1157 return __cmd_record(argc, argv);
+27 -18
tools/perf/builtin-top.c
··· 20 21 #include "perf.h" 22 23 - #include "util/symbol.h" 24 #include "util/color.h" 25 #include "util/thread.h" 26 #include "util/util.h" 27 #include <linux/rbtree.h> ··· 80 static bool hide_kernel_symbols = false; 81 static bool hide_user_symbols = false; 82 static struct winsize winsize; 83 - struct symbol_conf symbol_conf; 84 85 /* 86 * Source ··· 926 return 0; 927 } 928 929 - static void event__process_sample(const event_t *self, int counter) 930 { 931 u64 ip = self->ip.ip; 932 struct sym_entry *syme; ··· 947 return; 948 } 949 950 - if (event__preprocess_sample(self, &al, symbol_filter) < 0 || 951 - al.sym == NULL) 952 return; 953 954 syme = symbol__priv(al.sym); ··· 966 } 967 } 968 969 - static int event__process(event_t *event) 970 { 971 switch (event->header.type) { 972 case PERF_RECORD_COMM: 973 - event__process_comm(event); 974 break; 975 case PERF_RECORD_MMAP: 976 - event__process_mmap(event); 977 break; 978 default: 979 break; ··· 1000 return head; 1001 } 1002 1003 - static void mmap_read_counter(struct mmap_data *md) 1004 { 1005 unsigned int head = mmap_read_head(md); 1006 unsigned int old = md->prev; ··· 1054 } 1055 1056 if (event->header.type == PERF_RECORD_SAMPLE) 1057 - event__process_sample(event, md->counter); 1058 else 1059 - event__process(event); 1060 old += size; 1061 } 1062 ··· 1066 static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; 1067 static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; 1068 1069 - static void mmap_read(void) 1070 { 1071 int i, counter; 1072 1073 for (i = 0; i < nr_cpus; i++) { 1074 for (counter = 0; counter < nr_counters; counter++) 1075 - mmap_read_counter(&mmap_array[i][counter]); 1076 } 1077 } 1078 ··· 1157 pthread_t thread; 1158 int i, counter; 1159 int ret; 1160 1161 if (target_pid != -1) 1162 - event__synthesize_thread(target_pid, event__process); 1163 else 1164 - event__synthesize_threads(event__process); 1165 1166 for (i = 0; i < nr_cpus; i++) { 1167 group_fd = -1; ··· 1179 /* Wait for a minimal set of events before starting the snapshot */ 1180 poll(event_array, nr_poll, 100); 1181 1182 - mmap_read(); 1183 1184 if (pthread_create(&thread, NULL, display_thread, NULL)) { 1185 printf("Could not create display thread.\n"); ··· 1199 while (1) { 1200 int hits = samples; 1201 1202 - mmap_read(); 1203 1204 if (hits == samples) 1205 ret = poll(event_array, nr_poll, 100); ··· 1282 (nr_counters + 1) * sizeof(unsigned long)); 1283 if (symbol_conf.vmlinux_name == NULL) 1284 symbol_conf.try_vmlinux_path = true; 1285 - if (symbol__init(&symbol_conf) < 0) 1286 return -1; 1287 1288 if (delay_secs < 1)
··· 20 21 #include "perf.h" 22 23 #include "util/color.h" 24 + #include "util/session.h" 25 + #include "util/symbol.h" 26 #include "util/thread.h" 27 #include "util/util.h" 28 #include <linux/rbtree.h> ··· 79 static bool hide_kernel_symbols = false; 80 static bool hide_user_symbols = false; 81 static struct winsize winsize; 82 83 /* 84 * Source ··· 926 return 0; 927 } 928 929 + static void event__process_sample(const event_t *self, 930 + struct perf_session *session, int counter) 931 { 932 u64 ip = self->ip.ip; 933 struct sym_entry *syme; ··· 946 return; 947 } 948 949 + if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 || 950 + al.sym == NULL || al.filtered) 951 return; 952 953 syme = symbol__priv(al.sym); ··· 965 } 966 } 967 968 + static int event__process(event_t *event, struct perf_session *session) 969 { 970 switch (event->header.type) { 971 case PERF_RECORD_COMM: 972 + event__process_comm(event, session); 973 break; 974 case PERF_RECORD_MMAP: 975 + event__process_mmap(event, session); 976 break; 977 default: 978 break; ··· 999 return head; 1000 } 1001 1002 + static void perf_session__mmap_read_counter(struct perf_session *self, 1003 + struct mmap_data *md) 1004 { 1005 unsigned int head = mmap_read_head(md); 1006 unsigned int old = md->prev; ··· 1052 } 1053 1054 if (event->header.type == PERF_RECORD_SAMPLE) 1055 + event__process_sample(event, self, md->counter); 1056 else 1057 + event__process(event, self); 1058 old += size; 1059 } 1060 ··· 1064 static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; 1065 static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; 1066 1067 + static void perf_session__mmap_read(struct perf_session *self) 1068 { 1069 int i, counter; 1070 1071 for (i = 0; i < nr_cpus; i++) { 1072 for (counter = 0; counter < nr_counters; counter++) 1073 + perf_session__mmap_read_counter(self, &mmap_array[i][counter]); 1074 } 1075 } 1076 ··· 1155 pthread_t thread; 1156 int i, counter; 1157 int ret; 1158 + /* 1159 + * FIXME: perf_session__new should allow passing a O_MMAP, so that all this 1160 + * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. 1161 + */ 1162 + struct perf_session *session = perf_session__new(NULL, O_WRONLY, false); 1163 + if (session == NULL) 1164 + return -ENOMEM; 1165 1166 if (target_pid != -1) 1167 + event__synthesize_thread(target_pid, event__process, session); 1168 else 1169 + event__synthesize_threads(event__process, session); 1170 1171 for (i = 0; i < nr_cpus; i++) { 1172 group_fd = -1; ··· 1170 /* Wait for a minimal set of events before starting the snapshot */ 1171 poll(event_array, nr_poll, 100); 1172 1173 + perf_session__mmap_read(session); 1174 1175 if (pthread_create(&thread, NULL, display_thread, NULL)) { 1176 printf("Could not create display thread.\n"); ··· 1190 while (1) { 1191 int hits = samples; 1192 1193 + perf_session__mmap_read(session); 1194 1195 if (hits == samples) 1196 ret = poll(event_array, nr_poll, 100); ··· 1273 (nr_counters + 1) * sizeof(unsigned long)); 1274 if (symbol_conf.vmlinux_name == NULL) 1275 symbol_conf.try_vmlinux_path = true; 1276 + if (symbol__init() < 0) 1277 return -1; 1278 1279 if (delay_secs < 1)
+306 -43
tools/perf/builtin-trace.c
··· 12 static char const *script_name; 13 static char const *generate_script_lang; 14 15 - static int default_start_script(const char *script __attribute((unused))) 16 { 17 return 0; 18 } ··· 24 return 0; 25 } 26 27 - static int default_generate_script(const char *outfile __attribute ((unused))) 28 { 29 return 0; 30 } ··· 59 #include "util/debug.h" 60 61 #include "util/trace-event.h" 62 - #include "util/data_map.h" 63 #include "util/exec_cmd.h" 64 65 static char const *input_name = "perf.data"; 66 67 - static struct perf_session *session; 68 - static u64 sample_type; 69 - 70 - static int process_sample_event(event_t *event) 71 { 72 struct sample_data data; 73 struct thread *thread; ··· 73 data.cpu = -1; 74 data.period = 1; 75 76 - event__parse_sample(event, sample_type, &data); 77 78 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 79 event->header.misc, ··· 81 (void *)(long)data.ip, 82 (long long)data.period); 83 84 - thread = threads__findnew(event->ip.pid); 85 if (thread == NULL) { 86 pr_debug("problem processing %d event, skipping it.\n", 87 event->header.type); 88 return -1; 89 } 90 91 - if (sample_type & PERF_SAMPLE_RAW) { 92 /* 93 * FIXME: better resolve from pid from the struct trace_entry 94 * field, although it should be the same than this perf ··· 98 data.raw_size, 99 data.time, thread->comm); 100 } 101 - event__stats.total += data.period; 102 103 return 0; 104 } 105 106 - static int sample_type_check(u64 type) 107 { 108 - sample_type = type; 109 - 110 - if (!(sample_type & PERF_SAMPLE_RAW)) { 111 fprintf(stderr, 112 "No trace sample to read. Did you call perf record " 113 "without -R?"); ··· 115 return 0; 116 } 117 118 - static struct perf_file_handler file_handler = { 119 .process_sample_event = process_sample_event, 120 .process_comm_event = event__process_comm, 121 .sample_type_check = sample_type_check, 122 }; 123 124 - static int __cmd_trace(void) 125 { 126 - int err; 127 - 128 - session = perf_session__new(input_name, O_RDONLY, 0); 129 - if (session == NULL) 130 - return -ENOMEM; 131 - 132 - register_idle_thread(); 133 - register_perf_file_handler(&file_handler); 134 - 135 - err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 136 - perf_session__delete(session); 137 - return err; 138 } 139 140 struct script_spec { ··· 274 return 0; 275 } 276 277 static const char * const annotate_usage[] = { 278 "perf trace [<options>] <command>", 279 NULL ··· 522 "dump raw trace in ASCII"), 523 OPT_BOOLEAN('v', "verbose", &verbose, 524 "be more verbose (show symbol address, etc)"), 525 - OPT_BOOLEAN('l', "latency", &latency_format, 526 "show latency attributes (irqs/preemption disabled, etc)"), 527 OPT_CALLBACK('s', "script", NULL, "name", 528 "script file name (lang:script name, script name, or *)", 529 parse_scriptname), ··· 537 538 int cmd_trace(int argc, const char **argv, const char *prefix __used) 539 { 540 - int err; 541 542 - symbol__init(0); 543 544 setup_scripting(); 545 546 - argc = parse_options(argc, argv, options, annotate_usage, 0); 547 - if (argc) { 548 - /* 549 - * Special case: if there's an argument left then assume tha 550 - * it's a symbol filter: 551 - */ 552 - if (argc > 1) 553 - usage_with_options(annotate_usage, options); 554 - } 555 556 setup_pager(); 557 558 if (generate_script_lang) { 559 struct stat perf_stat; ··· 624 } 625 626 if (script_name) { 627 - err = scripting_ops->start_script(script_name); 628 if (err) 629 goto out; 630 } 631 632 - err = __cmd_trace(); 633 634 cleanup_scripting(); 635 out: 636 return err;
··· 12 static char const *script_name; 13 static char const *generate_script_lang; 14 15 + static int default_start_script(const char *script __unused, 16 + int argc __unused, 17 + const char **argv __unused) 18 { 19 return 0; 20 } ··· 22 return 0; 23 } 24 25 + static int default_generate_script(const char *outfile __unused) 26 { 27 return 0; 28 } ··· 57 #include "util/debug.h" 58 59 #include "util/trace-event.h" 60 #include "util/exec_cmd.h" 61 62 static char const *input_name = "perf.data"; 63 64 + static int process_sample_event(event_t *event, struct perf_session *session) 65 { 66 struct sample_data data; 67 struct thread *thread; ··· 75 data.cpu = -1; 76 data.period = 1; 77 78 + event__parse_sample(event, session->sample_type, &data); 79 80 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 81 event->header.misc, ··· 83 (void *)(long)data.ip, 84 (long long)data.period); 85 86 + thread = perf_session__findnew(session, event->ip.pid); 87 if (thread == NULL) { 88 pr_debug("problem processing %d event, skipping it.\n", 89 event->header.type); 90 return -1; 91 } 92 93 + if (session->sample_type & PERF_SAMPLE_RAW) { 94 /* 95 * FIXME: better resolve from pid from the struct trace_entry 96 * field, although it should be the same than this perf ··· 100 data.raw_size, 101 data.time, thread->comm); 102 } 103 104 + session->events_stats.total += data.period; 105 return 0; 106 } 107 108 + static int sample_type_check(struct perf_session *session) 109 { 110 + if (!(session->sample_type & PERF_SAMPLE_RAW)) { 111 fprintf(stderr, 112 "No trace sample to read. Did you call perf record " 113 "without -R?"); ··· 119 return 0; 120 } 121 122 + static struct perf_event_ops event_ops = { 123 .process_sample_event = process_sample_event, 124 .process_comm_event = event__process_comm, 125 .sample_type_check = sample_type_check, 126 }; 127 128 + static int __cmd_trace(struct perf_session *session) 129 { 130 + return perf_session__process_events(session, &event_ops); 131 } 132 133 struct script_spec { ··· 289 return 0; 290 } 291 292 + #define for_each_lang(scripts_dir, lang_dirent, lang_next) \ 293 + while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \ 294 + lang_next) \ 295 + if (lang_dirent.d_type == DT_DIR && \ 296 + (strcmp(lang_dirent.d_name, ".")) && \ 297 + (strcmp(lang_dirent.d_name, ".."))) 298 + 299 + #define for_each_script(lang_dir, script_dirent, script_next) \ 300 + while (!readdir_r(lang_dir, &script_dirent, &script_next) && \ 301 + script_next) \ 302 + if (script_dirent.d_type != DT_DIR) 303 + 304 + 305 + #define RECORD_SUFFIX "-record" 306 + #define REPORT_SUFFIX "-report" 307 + 308 + struct script_desc { 309 + struct list_head node; 310 + char *name; 311 + char *half_liner; 312 + char *args; 313 + }; 314 + 315 + LIST_HEAD(script_descs); 316 + 317 + static struct script_desc *script_desc__new(const char *name) 318 + { 319 + struct script_desc *s = zalloc(sizeof(*s)); 320 + 321 + if (s != NULL) 322 + s->name = strdup(name); 323 + 324 + return s; 325 + } 326 + 327 + static void script_desc__delete(struct script_desc *s) 328 + { 329 + free(s->name); 330 + free(s); 331 + } 332 + 333 + static void script_desc__add(struct script_desc *s) 334 + { 335 + list_add_tail(&s->node, &script_descs); 336 + } 337 + 338 + static struct script_desc *script_desc__find(const char *name) 339 + { 340 + struct script_desc *s; 341 + 342 + list_for_each_entry(s, &script_descs, node) 343 + if (strcasecmp(s->name, name) == 0) 344 + return s; 345 + return NULL; 346 + } 347 + 348 + static struct script_desc *script_desc__findnew(const char *name) 349 + { 350 + struct script_desc *s = script_desc__find(name); 351 + 352 + if (s) 353 + return s; 354 + 355 + s = script_desc__new(name); 356 + if (!s) 357 + goto out_delete_desc; 358 + 359 + script_desc__add(s); 360 + 361 + return s; 362 + 363 + out_delete_desc: 364 + script_desc__delete(s); 365 + 366 + return NULL; 367 + } 368 + 369 + static char *ends_with(char *str, const char *suffix) 370 + { 371 + size_t suffix_len = strlen(suffix); 372 + char *p = str; 373 + 374 + if (strlen(str) > suffix_len) { 375 + p = str + strlen(str) - suffix_len; 376 + if (!strncmp(p, suffix, suffix_len)) 377 + return p; 378 + } 379 + 380 + return NULL; 381 + } 382 + 383 + static char *ltrim(char *str) 384 + { 385 + int len = strlen(str); 386 + 387 + while (len && isspace(*str)) { 388 + len--; 389 + str++; 390 + } 391 + 392 + return str; 393 + } 394 + 395 + static int read_script_info(struct script_desc *desc, const char *filename) 396 + { 397 + char line[BUFSIZ], *p; 398 + FILE *fp; 399 + 400 + fp = fopen(filename, "r"); 401 + if (!fp) 402 + return -1; 403 + 404 + while (fgets(line, sizeof(line), fp)) { 405 + p = ltrim(line); 406 + if (strlen(p) == 0) 407 + continue; 408 + if (*p != '#') 409 + continue; 410 + p++; 411 + if (strlen(p) && *p == '!') 412 + continue; 413 + 414 + p = ltrim(p); 415 + if (strlen(p) && p[strlen(p) - 1] == '\n') 416 + p[strlen(p) - 1] = '\0'; 417 + 418 + if (!strncmp(p, "description:", strlen("description:"))) { 419 + p += strlen("description:"); 420 + desc->half_liner = strdup(ltrim(p)); 421 + continue; 422 + } 423 + 424 + if (!strncmp(p, "args:", strlen("args:"))) { 425 + p += strlen("args:"); 426 + desc->args = strdup(ltrim(p)); 427 + continue; 428 + } 429 + } 430 + 431 + fclose(fp); 432 + 433 + return 0; 434 + } 435 + 436 + static int list_available_scripts(const struct option *opt __used, 437 + const char *s __used, int unset __used) 438 + { 439 + struct dirent *script_next, *lang_next, script_dirent, lang_dirent; 440 + char scripts_path[MAXPATHLEN]; 441 + DIR *scripts_dir, *lang_dir; 442 + char script_path[MAXPATHLEN]; 443 + char lang_path[MAXPATHLEN]; 444 + struct script_desc *desc; 445 + char first_half[BUFSIZ]; 446 + char *script_root; 447 + char *str; 448 + 449 + snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); 450 + 451 + scripts_dir = opendir(scripts_path); 452 + if (!scripts_dir) 453 + return -1; 454 + 455 + for_each_lang(scripts_dir, lang_dirent, lang_next) { 456 + snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, 457 + lang_dirent.d_name); 458 + lang_dir = opendir(lang_path); 459 + if (!lang_dir) 460 + continue; 461 + 462 + for_each_script(lang_dir, script_dirent, script_next) { 463 + script_root = strdup(script_dirent.d_name); 464 + str = ends_with(script_root, REPORT_SUFFIX); 465 + if (str) { 466 + *str = '\0'; 467 + desc = script_desc__findnew(script_root); 468 + snprintf(script_path, MAXPATHLEN, "%s/%s", 469 + lang_path, script_dirent.d_name); 470 + read_script_info(desc, script_path); 471 + } 472 + free(script_root); 473 + } 474 + } 475 + 476 + fprintf(stdout, "List of available trace scripts:\n"); 477 + list_for_each_entry(desc, &script_descs, node) { 478 + sprintf(first_half, "%s %s", desc->name, 479 + desc->args ? desc->args : ""); 480 + fprintf(stdout, " %-36s %s\n", first_half, 481 + desc->half_liner ? desc->half_liner : ""); 482 + } 483 + 484 + exit(0); 485 + } 486 + 487 + static char *get_script_path(const char *script_root, const char *suffix) 488 + { 489 + struct dirent *script_next, *lang_next, script_dirent, lang_dirent; 490 + char scripts_path[MAXPATHLEN]; 491 + char script_path[MAXPATHLEN]; 492 + DIR *scripts_dir, *lang_dir; 493 + char lang_path[MAXPATHLEN]; 494 + char *str, *__script_root; 495 + char *path = NULL; 496 + 497 + snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); 498 + 499 + scripts_dir = opendir(scripts_path); 500 + if (!scripts_dir) 501 + return NULL; 502 + 503 + for_each_lang(scripts_dir, lang_dirent, lang_next) { 504 + snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, 505 + lang_dirent.d_name); 506 + lang_dir = opendir(lang_path); 507 + if (!lang_dir) 508 + continue; 509 + 510 + for_each_script(lang_dir, script_dirent, script_next) { 511 + __script_root = strdup(script_dirent.d_name); 512 + str = ends_with(__script_root, suffix); 513 + if (str) { 514 + *str = '\0'; 515 + if (strcmp(__script_root, script_root)) 516 + continue; 517 + snprintf(script_path, MAXPATHLEN, "%s/%s", 518 + lang_path, script_dirent.d_name); 519 + path = strdup(script_path); 520 + free(__script_root); 521 + break; 522 + } 523 + free(__script_root); 524 + } 525 + } 526 + 527 + return path; 528 + } 529 + 530 static const char * const annotate_usage[] = { 531 "perf trace [<options>] <command>", 532 NULL ··· 299 "dump raw trace in ASCII"), 300 OPT_BOOLEAN('v', "verbose", &verbose, 301 "be more verbose (show symbol address, etc)"), 302 + OPT_BOOLEAN('L', "Latency", &latency_format, 303 "show latency attributes (irqs/preemption disabled, etc)"), 304 + OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts", 305 + list_available_scripts), 306 OPT_CALLBACK('s', "script", NULL, "name", 307 "script file name (lang:script name, script name, or *)", 308 parse_scriptname), ··· 312 313 int cmd_trace(int argc, const char **argv, const char *prefix __used) 314 { 315 + struct perf_session *session; 316 + const char *suffix = NULL; 317 + const char **__argv; 318 + char *script_path; 319 + int i, err; 320 321 + if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) { 322 + if (argc < 3) { 323 + fprintf(stderr, 324 + "Please specify a record script\n"); 325 + return -1; 326 + } 327 + suffix = RECORD_SUFFIX; 328 + } 329 + 330 + if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) { 331 + if (argc < 3) { 332 + fprintf(stderr, 333 + "Please specify a report script\n"); 334 + return -1; 335 + } 336 + suffix = REPORT_SUFFIX; 337 + } 338 + 339 + if (suffix) { 340 + script_path = get_script_path(argv[2], suffix); 341 + if (!script_path) { 342 + fprintf(stderr, "script not found\n"); 343 + return -1; 344 + } 345 + 346 + __argv = malloc((argc + 1) * sizeof(const char *)); 347 + __argv[0] = "/bin/sh"; 348 + __argv[1] = script_path; 349 + for (i = 3; i < argc; i++) 350 + __argv[i - 1] = argv[i]; 351 + __argv[argc - 1] = NULL; 352 + 353 + execvp("/bin/sh", (char **)__argv); 354 + exit(-1); 355 + } 356 357 setup_scripting(); 358 359 + argc = parse_options(argc, argv, options, annotate_usage, 360 + PARSE_OPT_STOP_AT_NON_OPTION); 361 362 + if (symbol__init() < 0) 363 + return -1; 364 setup_pager(); 365 + 366 + session = perf_session__new(input_name, O_RDONLY, 0); 367 + if (session == NULL) 368 + return -ENOMEM; 369 370 if (generate_script_lang) { 371 struct stat perf_stat; ··· 362 } 363 364 if (script_name) { 365 + err = scripting_ops->start_script(script_name, argc, argv); 366 if (err) 367 goto out; 368 } 369 370 + err = __cmd_trace(session); 371 372 + perf_session__delete(session); 373 cleanup_scripting(); 374 out: 375 return err;
+1
tools/perf/builtin.h
··· 17 extern int cmd_annotate(int argc, const char **argv, const char *prefix); 18 extern int cmd_bench(int argc, const char **argv, const char *prefix); 19 extern int cmd_buildid_list(int argc, const char **argv, const char *prefix); 20 extern int cmd_help(int argc, const char **argv, const char *prefix); 21 extern int cmd_sched(int argc, const char **argv, const char *prefix); 22 extern int cmd_list(int argc, const char **argv, const char *prefix);
··· 17 extern int cmd_annotate(int argc, const char **argv, const char *prefix); 18 extern int cmd_bench(int argc, const char **argv, const char *prefix); 19 extern int cmd_buildid_list(int argc, const char **argv, const char *prefix); 20 + extern int cmd_diff(int argc, const char **argv, const char *prefix); 21 extern int cmd_help(int argc, const char **argv, const char *prefix); 22 extern int cmd_sched(int argc, const char **argv, const char *prefix); 23 extern int cmd_list(int argc, const char **argv, const char *prefix);
+1
tools/perf/command-list.txt
··· 5 perf-annotate mainporcelain common 6 perf-bench mainporcelain common 7 perf-buildid-list mainporcelain common 8 perf-list mainporcelain common 9 perf-sched mainporcelain common 10 perf-record mainporcelain common
··· 5 perf-annotate mainporcelain common 6 perf-bench mainporcelain common 7 perf-buildid-list mainporcelain common 8 + perf-diff mainporcelain common 9 perf-list mainporcelain common 10 perf-sched mainporcelain common 11 perf-record mainporcelain common
+1
tools/perf/perf.c
··· 286 const char *cmd = argv[0]; 287 static struct cmd_struct commands[] = { 288 { "buildid-list", cmd_buildid_list, 0 }, 289 { "help", cmd_help, 0 }, 290 { "list", cmd_list, 0 }, 291 { "record", cmd_record, 0 },
··· 286 const char *cmd = argv[0]; 287 static struct cmd_struct commands[] = { 288 { "buildid-list", cmd_buildid_list, 0 }, 289 + { "diff", cmd_diff, 0 }, 290 { "help", cmd_help, 0 }, 291 { "list", cmd_list, 0 }, 292 { "record", cmd_record, 0 },
+1
tools/perf/scripts/perl/bin/check-perf-trace-report
··· 1 #!/bin/bash 2 perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl 3 4
··· 1 #!/bin/bash 2 + # description: useless but exhaustive test script 3 perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl 4 5
+3 -1
tools/perf/scripts/perl/bin/rw-by-file-report
··· 1 #!/bin/bash 2 - perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl 3 4 5
··· 1 #!/bin/bash 2 + # description: r/w activity for a program, by file 3 + # args: <comm> 4 + perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $1 5 6 7
+1
tools/perf/scripts/perl/bin/rw-by-pid-report
··· 1 #!/bin/bash 2 perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl 3 4
··· 1 #!/bin/bash 2 + # description: system-wide r/w activity 3 perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl 4 5
+1
tools/perf/scripts/perl/bin/wakeup-latency-report
··· 1 #!/bin/bash 2 perf trace -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl 3 4
··· 1 #!/bin/bash 2 + # description: system-wide min/max/avg wakeup latency 3 perf trace -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl 4 5
+1
tools/perf/scripts/perl/bin/workqueue-stats-report
··· 1 #!/bin/bash 2 perf trace -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl 3 4
··· 1 #!/bin/bash 2 + # description: workqueue stats (ins/exe/create/destroy) 3 perf trace -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl 4 5
+3 -2
tools/perf/scripts/perl/rw-by-file.pl
··· 18 use Perf::Trace::Core; 19 use Perf::Trace::Util; 20 21 - # change this to the comm of the program you're interested in 22 - my $for_comm = "perf"; 23 24 my %reads; 25 my %writes;
··· 18 use Perf::Trace::Core; 19 use Perf::Trace::Util; 20 21 + my $usage = "perf trace -s rw-by-file.pl <comm>\n"; 22 + 23 + my $for_comm = shift or die $usage; 24 25 my %reads; 26 my %writes;
+53 -43
tools/perf/util/data_map.c
··· 1 - #include "data_map.h" 2 #include "symbol.h" 3 #include "util.h" 4 #include "debug.h" 5 6 - 7 - static struct perf_file_handler *curr_handler; 8 - static unsigned long mmap_window = 32; 9 - static char __cwd[PATH_MAX]; 10 - 11 - static int process_event_stub(event_t *event __used) 12 { 13 dump_printf(": unhandled!\n"); 14 return 0; 15 } 16 17 - void register_perf_file_handler(struct perf_file_handler *handler) 18 { 19 if (!handler->process_sample_event) 20 handler->process_sample_event = process_event_stub; ··· 31 handler->process_throttle_event = process_event_stub; 32 if (!handler->process_unthrottle_event) 33 handler->process_unthrottle_event = process_event_stub; 34 - 35 - curr_handler = handler; 36 } 37 38 static const char *event__name[] = { ··· 56 event__name[i], event__total[i]); 57 } 58 59 - static int 60 - process_event(event_t *event, unsigned long offset, unsigned long head) 61 { 62 trace_event(event); 63 ··· 73 74 switch (event->header.type) { 75 case PERF_RECORD_SAMPLE: 76 - return curr_handler->process_sample_event(event); 77 case PERF_RECORD_MMAP: 78 - return curr_handler->process_mmap_event(event); 79 case PERF_RECORD_COMM: 80 - return curr_handler->process_comm_event(event); 81 case PERF_RECORD_FORK: 82 - return curr_handler->process_fork_event(event); 83 case PERF_RECORD_EXIT: 84 - return curr_handler->process_exit_event(event); 85 case PERF_RECORD_LOST: 86 - return curr_handler->process_lost_event(event); 87 case PERF_RECORD_READ: 88 - return curr_handler->process_read_event(event); 89 case PERF_RECORD_THROTTLE: 90 - return curr_handler->process_throttle_event(event); 91 case PERF_RECORD_UNTHROTTLE: 92 - return curr_handler->process_unthrottle_event(event); 93 default: 94 - curr_handler->total_unknown++; 95 return -1; 96 } 97 } ··· 125 return err; 126 } 127 128 int perf_session__process_events(struct perf_session *self, 129 - int full_paths, int *cwdlen, char **cwd) 130 { 131 int err; 132 unsigned long head, shift; 133 unsigned long offset = 0; 134 size_t page_size; 135 - u64 sample_type; 136 event_t *event; 137 uint32_t size; 138 char *buf; 139 140 - if (curr_handler == NULL) { 141 - pr_debug("Forgot to register perf file handler\n"); 142 - return -EINVAL; 143 - } 144 145 page_size = getpagesize(); 146 147 head = self->header.data_offset; 148 - sample_type = perf_header__sample_type(&self->header); 149 150 err = -EINVAL; 151 - if (curr_handler->sample_type_check && 152 - curr_handler->sample_type_check(sample_type) < 0) 153 goto out_err; 154 155 - if (!full_paths) { 156 - if (getcwd(__cwd, sizeof(__cwd)) == NULL) { 157 - pr_err("failed to get the current directory\n"); 158 err = -errno; 159 goto out_err; 160 } 161 - *cwd = __cwd; 162 - *cwdlen = strlen(*cwd); 163 - } else { 164 - *cwd = NULL; 165 - *cwdlen = 0; 166 } 167 168 shift = page_size * (head / page_size); ··· 184 head -= shift; 185 186 remap: 187 - buf = mmap(NULL, page_size * mmap_window, PROT_READ, 188 MAP_SHARED, self->fd, offset); 189 if (buf == MAP_FAILED) { 190 pr_err("failed to mmap file\n"); ··· 199 if (!size) 200 size = 8; 201 202 - if (head + event->header.size >= page_size * mmap_window) { 203 int munmap_ret; 204 205 shift = page_size * (head / page_size); 206 207 - munmap_ret = munmap(buf, page_size * mmap_window); 208 assert(munmap_ret == 0); 209 210 offset += shift; ··· 219 (void *)(long)event->header.size, 220 event->header.type); 221 222 - if (!size || process_event(event, offset, head) < 0) { 223 224 dump_printf("%p [%p]: skipping unknown header type: %d\n", 225 (void *)(offset + head),
··· 1 #include "symbol.h" 2 #include "util.h" 3 #include "debug.h" 4 + #include "thread.h" 5 + #include "session.h" 6 7 + static int process_event_stub(event_t *event __used, 8 + struct perf_session *session __used) 9 { 10 dump_printf(": unhandled!\n"); 11 return 0; 12 } 13 14 + static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 15 { 16 if (!handler->process_sample_event) 17 handler->process_sample_event = process_event_stub; ··· 34 handler->process_throttle_event = process_event_stub; 35 if (!handler->process_unthrottle_event) 36 handler->process_unthrottle_event = process_event_stub; 37 } 38 39 static const char *event__name[] = { ··· 61 event__name[i], event__total[i]); 62 } 63 64 + static int process_event(event_t *event, struct perf_session *session, 65 + struct perf_event_ops *ops, 66 + unsigned long offset, unsigned long head) 67 { 68 trace_event(event); 69 ··· 77 78 switch (event->header.type) { 79 case PERF_RECORD_SAMPLE: 80 + return ops->process_sample_event(event, session); 81 case PERF_RECORD_MMAP: 82 + return ops->process_mmap_event(event, session); 83 case PERF_RECORD_COMM: 84 + return ops->process_comm_event(event, session); 85 case PERF_RECORD_FORK: 86 + return ops->process_fork_event(event, session); 87 case PERF_RECORD_EXIT: 88 + return ops->process_exit_event(event, session); 89 case PERF_RECORD_LOST: 90 + return ops->process_lost_event(event, session); 91 case PERF_RECORD_READ: 92 + return ops->process_read_event(event, session); 93 case PERF_RECORD_THROTTLE: 94 + return ops->process_throttle_event(event, session); 95 case PERF_RECORD_UNTHROTTLE: 96 + return ops->process_unthrottle_event(event, session); 97 default: 98 + ops->total_unknown++; 99 return -1; 100 } 101 } ··· 129 return err; 130 } 131 132 + static struct thread *perf_session__register_idle_thread(struct perf_session *self) 133 + { 134 + struct thread *thread = perf_session__findnew(self, 0); 135 + 136 + if (!thread || thread__set_comm(thread, "swapper")) { 137 + pr_err("problem inserting idle task.\n"); 138 + thread = NULL; 139 + } 140 + 141 + return thread; 142 + } 143 + 144 int perf_session__process_events(struct perf_session *self, 145 + struct perf_event_ops *ops) 146 { 147 int err; 148 unsigned long head, shift; 149 unsigned long offset = 0; 150 size_t page_size; 151 event_t *event; 152 uint32_t size; 153 char *buf; 154 155 + if (perf_session__register_idle_thread(self) == NULL) 156 + return -ENOMEM; 157 + 158 + perf_event_ops__fill_defaults(ops); 159 160 page_size = getpagesize(); 161 162 head = self->header.data_offset; 163 + self->sample_type = perf_header__sample_type(&self->header); 164 165 err = -EINVAL; 166 + if (ops->sample_type_check && ops->sample_type_check(self) < 0) 167 goto out_err; 168 169 + if (!ops->full_paths) { 170 + char bf[PATH_MAX]; 171 + 172 + if (getcwd(bf, sizeof(bf)) == NULL) { 173 err = -errno; 174 + out_getcwd_err: 175 + pr_err("failed to get the current directory\n"); 176 goto out_err; 177 } 178 + self->cwd = strdup(bf); 179 + if (self->cwd == NULL) { 180 + err = -ENOMEM; 181 + goto out_getcwd_err; 182 + } 183 + self->cwdlen = strlen(self->cwd); 184 } 185 186 shift = page_size * (head / page_size); ··· 174 head -= shift; 175 176 remap: 177 + buf = mmap(NULL, page_size * self->mmap_window, PROT_READ, 178 MAP_SHARED, self->fd, offset); 179 if (buf == MAP_FAILED) { 180 pr_err("failed to mmap file\n"); ··· 189 if (!size) 190 size = 8; 191 192 + if (head + event->header.size >= page_size * self->mmap_window) { 193 int munmap_ret; 194 195 shift = page_size * (head / page_size); 196 197 + munmap_ret = munmap(buf, page_size * self->mmap_window); 198 assert(munmap_ret == 0); 199 200 offset += shift; ··· 209 (void *)(long)event->header.size, 210 event->header.type); 211 212 + if (!size || process_event(event, self, ops, offset, head) < 0) { 213 214 dump_printf("%p [%p]: skipping unknown header type: %d\n", 215 (void *)(offset + head),
-29
tools/perf/util/data_map.h
··· 1 - #ifndef __PERF_DATAMAP_H 2 - #define __PERF_DATAMAP_H 3 - 4 - #include "event.h" 5 - #include "header.h" 6 - #include "session.h" 7 - 8 - typedef int (*event_type_handler_t)(event_t *); 9 - 10 - struct perf_file_handler { 11 - event_type_handler_t process_sample_event; 12 - event_type_handler_t process_mmap_event; 13 - event_type_handler_t process_comm_event; 14 - event_type_handler_t process_fork_event; 15 - event_type_handler_t process_exit_event; 16 - event_type_handler_t process_lost_event; 17 - event_type_handler_t process_read_event; 18 - event_type_handler_t process_throttle_event; 19 - event_type_handler_t process_unthrottle_event; 20 - int (*sample_type_check)(u64 sample_type); 21 - unsigned long total_unknown; 22 - }; 23 - 24 - void register_perf_file_handler(struct perf_file_handler *handler); 25 - int perf_session__process_events(struct perf_session *self, 26 - int full_paths, int *cwdlen, char **cwd); 27 - int perf_header__read_build_ids(int input, u64 offset, u64 file_size); 28 - 29 - #endif
···
+111 -35
tools/perf/util/event.c
··· 1 #include <linux/types.h> 2 #include "event.h" 3 #include "debug.h" 4 #include "string.h" 5 #include "thread.h" 6 7 static pid_t event__synthesize_comm(pid_t pid, int full, 8 - int (*process)(event_t *event)) 9 { 10 event_t ev; 11 char filename[PATH_MAX]; ··· 59 if (!full) { 60 ev.comm.tid = pid; 61 62 - process(&ev); 63 goto out_fclose; 64 } 65 ··· 77 78 ev.comm.tid = pid; 79 80 - process(&ev); 81 } 82 closedir(tasks); 83 ··· 91 } 92 93 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, 94 - int (*process)(event_t *event)) 95 { 96 char filename[PATH_MAX]; 97 FILE *fp; ··· 148 ev.mmap.pid = tgid; 149 ev.mmap.tid = pid; 150 151 - process(&ev); 152 } 153 } 154 ··· 156 return 0; 157 } 158 159 - int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)) 160 { 161 - pid_t tgid = event__synthesize_comm(pid, 1, process); 162 if (tgid == -1) 163 return -1; 164 - return event__synthesize_mmap_events(pid, tgid, process); 165 } 166 167 - void event__synthesize_threads(int (*process)(event_t *event)) 168 { 169 DIR *proc; 170 struct dirent dirent, *next; ··· 183 if (*end) /* only interested in proper numerical dirents */ 184 continue; 185 186 - event__synthesize_thread(pid, process); 187 } 188 189 closedir(proc); 190 } 191 192 - char *event__cwd; 193 - int event__cwdlen; 194 - 195 - struct events_stats event__stats; 196 - 197 - int event__process_comm(event_t *self) 198 { 199 - struct thread *thread = threads__findnew(self->comm.pid); 200 201 dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); 202 203 - if (thread == NULL || thread__set_comm(thread, self->comm.comm)) { 204 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 205 return -1; 206 } ··· 231 return 0; 232 } 233 234 - int event__process_lost(event_t *self) 235 { 236 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); 237 - event__stats.lost += self->lost.lost; 238 return 0; 239 } 240 241 - int event__process_mmap(event_t *self) 242 { 243 - struct thread *thread = threads__findnew(self->mmap.pid); 244 struct map *map = map__new(&self->mmap, MAP__FUNCTION, 245 - event__cwd, event__cwdlen); 246 247 dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", 248 self->mmap.pid, self->mmap.tid, ··· 259 return 0; 260 } 261 262 - int event__process_task(event_t *self) 263 { 264 - struct thread *thread = threads__findnew(self->fork.pid); 265 - struct thread *parent = threads__findnew(self->fork.ppid); 266 267 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, 268 self->fork.ppid, self->fork.ptid); ··· 284 return 0; 285 } 286 287 - void thread__find_addr_location(struct thread *self, u8 cpumode, 288 enum map_type type, u64 addr, 289 struct addr_location *al, 290 symbol_filter_t filter) ··· 297 298 if (cpumode & PERF_RECORD_MISC_KERNEL) { 299 al->level = 'k'; 300 - mg = kmaps; 301 } else if (cpumode & PERF_RECORD_MISC_USER) 302 al->level = '.'; 303 else { ··· 318 * "[vdso]" dso, but for now lets use the old trick of looking 319 * in the whole kernel symbol list. 320 */ 321 - if ((long long)al->addr < 0 && mg != kmaps) { 322 - mg = kmaps; 323 goto try_again; 324 } 325 al->sym = NULL; 326 } else { 327 al->addr = al->map->map_ip(al->map, al->addr); 328 - al->sym = map__find_symbol(al->map, al->addr, filter); 329 } 330 } 331 332 - int event__preprocess_sample(const event_t *self, struct addr_location *al, 333 - symbol_filter_t filter) 334 { 335 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 336 - struct thread *thread = threads__findnew(self->ip.pid); 337 338 if (thread == NULL) 339 return -1; 340 341 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 342 343 - thread__find_addr_location(thread, cpumode, MAP__FUNCTION, 344 self->ip.ip, al, filter); 345 dump_printf(" ...... dso: %s\n", 346 al->map ? al->map->dso->long_name : 347 al->level == 'H' ? "[hypervisor]" : "<not found>"); 348 return 0; 349 } 350
··· 1 #include <linux/types.h> 2 #include "event.h" 3 #include "debug.h" 4 + #include "session.h" 5 + #include "sort.h" 6 #include "string.h" 7 + #include "strlist.h" 8 #include "thread.h" 9 10 static pid_t event__synthesize_comm(pid_t pid, int full, 11 + int (*process)(event_t *event, 12 + struct perf_session *session), 13 + struct perf_session *session) 14 { 15 event_t ev; 16 char filename[PATH_MAX]; ··· 54 if (!full) { 55 ev.comm.tid = pid; 56 57 + process(&ev, session); 58 goto out_fclose; 59 } 60 ··· 72 73 ev.comm.tid = pid; 74 75 + process(&ev, session); 76 } 77 closedir(tasks); 78 ··· 86 } 87 88 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, 89 + int (*process)(event_t *event, 90 + struct perf_session *session), 91 + struct perf_session *session) 92 { 93 char filename[PATH_MAX]; 94 FILE *fp; ··· 141 ev.mmap.pid = tgid; 142 ev.mmap.tid = pid; 143 144 + process(&ev, session); 145 } 146 } 147 ··· 149 return 0; 150 } 151 152 + int event__synthesize_thread(pid_t pid, 153 + int (*process)(event_t *event, 154 + struct perf_session *session), 155 + struct perf_session *session) 156 { 157 + pid_t tgid = event__synthesize_comm(pid, 1, process, session); 158 if (tgid == -1) 159 return -1; 160 + return event__synthesize_mmap_events(pid, tgid, process, session); 161 } 162 163 + void event__synthesize_threads(int (*process)(event_t *event, 164 + struct perf_session *session), 165 + struct perf_session *session) 166 { 167 DIR *proc; 168 struct dirent dirent, *next; ··· 171 if (*end) /* only interested in proper numerical dirents */ 172 continue; 173 174 + event__synthesize_thread(pid, process, session); 175 } 176 177 closedir(proc); 178 } 179 180 + static void thread__comm_adjust(struct thread *self) 181 { 182 + char *comm = self->comm; 183 + 184 + if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 185 + (!symbol_conf.comm_list || 186 + strlist__has_entry(symbol_conf.comm_list, comm))) { 187 + unsigned int slen = strlen(comm); 188 + 189 + if (slen > comms__col_width) { 190 + comms__col_width = slen; 191 + threads__col_width = slen + 6; 192 + } 193 + } 194 + } 195 + 196 + static int thread__set_comm_adjust(struct thread *self, const char *comm) 197 + { 198 + int ret = thread__set_comm(self, comm); 199 + 200 + if (ret) 201 + return ret; 202 + 203 + thread__comm_adjust(self); 204 + 205 + return 0; 206 + } 207 + 208 + int event__process_comm(event_t *self, struct perf_session *session) 209 + { 210 + struct thread *thread = perf_session__findnew(session, self->comm.pid); 211 212 dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); 213 214 + if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) { 215 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 216 return -1; 217 } ··· 196 return 0; 197 } 198 199 + int event__process_lost(event_t *self, struct perf_session *session) 200 { 201 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); 202 + session->events_stats.lost += self->lost.lost; 203 return 0; 204 } 205 206 + int event__process_mmap(event_t *self, struct perf_session *session) 207 { 208 + struct thread *thread = perf_session__findnew(session, self->mmap.pid); 209 struct map *map = map__new(&self->mmap, MAP__FUNCTION, 210 + session->cwd, session->cwdlen); 211 212 dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", 213 self->mmap.pid, self->mmap.tid, ··· 224 return 0; 225 } 226 227 + int event__process_task(event_t *self, struct perf_session *session) 228 { 229 + struct thread *thread = perf_session__findnew(session, self->fork.pid); 230 + struct thread *parent = perf_session__findnew(session, self->fork.ppid); 231 232 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, 233 self->fork.ppid, self->fork.ptid); ··· 249 return 0; 250 } 251 252 + void thread__find_addr_location(struct thread *self, 253 + struct perf_session *session, u8 cpumode, 254 enum map_type type, u64 addr, 255 struct addr_location *al, 256 symbol_filter_t filter) ··· 261 262 if (cpumode & PERF_RECORD_MISC_KERNEL) { 263 al->level = 'k'; 264 + mg = &session->kmaps; 265 } else if (cpumode & PERF_RECORD_MISC_USER) 266 al->level = '.'; 267 else { ··· 282 * "[vdso]" dso, but for now lets use the old trick of looking 283 * in the whole kernel symbol list. 284 */ 285 + if ((long long)al->addr < 0 && mg != &session->kmaps) { 286 + mg = &session->kmaps; 287 goto try_again; 288 } 289 al->sym = NULL; 290 } else { 291 al->addr = al->map->map_ip(al->map, al->addr); 292 + al->sym = map__find_symbol(al->map, session, al->addr, filter); 293 } 294 } 295 296 + static void dso__calc_col_width(struct dso *self) 297 + { 298 + if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 299 + (!symbol_conf.dso_list || 300 + strlist__has_entry(symbol_conf.dso_list, self->name))) { 301 + unsigned int slen = strlen(self->name); 302 + if (slen > dsos__col_width) 303 + dsos__col_width = slen; 304 + } 305 + 306 + self->slen_calculated = 1; 307 + } 308 + 309 + int event__preprocess_sample(const event_t *self, struct perf_session *session, 310 + struct addr_location *al, symbol_filter_t filter) 311 { 312 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 313 + struct thread *thread = perf_session__findnew(session, self->ip.pid); 314 315 if (thread == NULL) 316 return -1; 317 318 + if (symbol_conf.comm_list && 319 + !strlist__has_entry(symbol_conf.comm_list, thread->comm)) 320 + goto out_filtered; 321 + 322 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 323 324 + thread__find_addr_location(thread, session, cpumode, MAP__FUNCTION, 325 self->ip.ip, al, filter); 326 dump_printf(" ...... dso: %s\n", 327 al->map ? al->map->dso->long_name : 328 al->level == 'H' ? "[hypervisor]" : "<not found>"); 329 + /* 330 + * We have to do this here as we may have a dso with no symbol hit that 331 + * has a name longer than the ones with symbols sampled. 332 + */ 333 + if (al->map && !sort_dso.elide && !al->map->dso->slen_calculated) 334 + dso__calc_col_width(al->map->dso); 335 + 336 + if (symbol_conf.dso_list && 337 + (!al->map || !al->map->dso || 338 + !(strlist__has_entry(symbol_conf.dso_list, al->map->dso->short_name) || 339 + (al->map->dso->short_name != al->map->dso->long_name && 340 + strlist__has_entry(symbol_conf.dso_list, al->map->dso->long_name))))) 341 + goto out_filtered; 342 + 343 + if (symbol_conf.sym_list && al->sym && 344 + !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) 345 + goto out_filtered; 346 + 347 + al->filtered = false; 348 + return 0; 349 + 350 + out_filtered: 351 + al->filtered = true; 352 return 0; 353 } 354
+21 -15
tools/perf/util/event.h
··· 149 struct map *map__clone(struct map *self); 150 int map__overlap(struct map *l, struct map *r); 151 size_t map__fprintf(struct map *self, FILE *fp); 152 - struct symbol *map__find_symbol(struct map *self, u64 addr, 153 - symbol_filter_t filter); 154 struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 155 symbol_filter_t filter); 156 void map__fixup_start(struct map *self); 157 void map__fixup_end(struct map *self); 158 159 - int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); 160 - void event__synthesize_threads(int (*process)(event_t *event)); 161 162 - extern char *event__cwd; 163 - extern int event__cwdlen; 164 - extern struct events_stats event__stats; 165 - extern unsigned long event__total[PERF_RECORD_MAX]; 166 - 167 - int event__process_comm(event_t *self); 168 - int event__process_lost(event_t *self); 169 - int event__process_mmap(event_t *self); 170 - int event__process_task(event_t *self); 171 172 struct addr_location; 173 - int event__preprocess_sample(const event_t *self, struct addr_location *al, 174 - symbol_filter_t filter); 175 int event__parse_sample(event_t *event, u64 type, struct sample_data *data); 176 177 #endif /* __PERF_RECORD_H */
··· 149 struct map *map__clone(struct map *self); 150 int map__overlap(struct map *l, struct map *r); 151 size_t map__fprintf(struct map *self, FILE *fp); 152 + 153 + struct perf_session; 154 + 155 + int map__load(struct map *self, struct perf_session *session, 156 + symbol_filter_t filter); 157 + struct symbol *map__find_symbol(struct map *self, struct perf_session *session, 158 + u64 addr, symbol_filter_t filter); 159 struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 160 + struct perf_session *session, 161 symbol_filter_t filter); 162 void map__fixup_start(struct map *self); 163 void map__fixup_end(struct map *self); 164 165 + int event__synthesize_thread(pid_t pid, 166 + int (*process)(event_t *event, 167 + struct perf_session *session), 168 + struct perf_session *session); 169 + void event__synthesize_threads(int (*process)(event_t *event, 170 + struct perf_session *session), 171 + struct perf_session *session); 172 173 + int event__process_comm(event_t *self, struct perf_session *session); 174 + int event__process_lost(event_t *self, struct perf_session *session); 175 + int event__process_mmap(event_t *self, struct perf_session *session); 176 + int event__process_task(event_t *self, struct perf_session *session); 177 178 struct addr_location; 179 + int event__preprocess_sample(const event_t *self, struct perf_session *session, 180 + struct addr_location *al, symbol_filter_t filter); 181 int event__parse_sample(event_t *event, u64 type, struct sample_data *data); 182 183 #endif /* __PERF_RECORD_H */
+1 -1
tools/perf/util/header.c
··· 8 #include "header.h" 9 #include "../perf.h" 10 #include "trace-event.h" 11 #include "symbol.h" 12 - #include "data_map.h" 13 #include "debug.h" 14 15 /*
··· 8 #include "header.h" 9 #include "../perf.h" 10 #include "trace-event.h" 11 + #include "session.h" 12 #include "symbol.h" 13 #include "debug.h" 14 15 /*
+489 -29
tools/perf/util/hist.c
··· 1 #include "hist.h" 2 - 3 - struct rb_root hist; 4 - struct rb_root collapse_hists; 5 - struct rb_root output_hists; 6 - int callchain; 7 8 struct callchain_param callchain_param = { 9 .mode = CHAIN_GRAPH_REL, ··· 12 * histogram, sorted on item, collects counts 13 */ 14 15 - struct hist_entry *__hist_entry__add(struct addr_location *al, 16 - struct symbol *sym_parent, 17 - u64 count, bool *hit) 18 { 19 - struct rb_node **p = &hist.rb_node; 20 struct rb_node *parent = NULL; 21 struct hist_entry *he; 22 struct hist_entry entry = { ··· 53 return NULL; 54 *he = entry; 55 rb_link_node(&he->rb_node, parent, p); 56 - rb_insert_color(&he->rb_node, &hist); 57 *hit = false; 58 return he; 59 } ··· 101 * collapse the histogram 102 */ 103 104 - void collapse__insert_entry(struct hist_entry *he) 105 { 106 - struct rb_node **p = &collapse_hists.rb_node; 107 struct rb_node *parent = NULL; 108 struct hist_entry *iter; 109 int64_t cmp; ··· 127 } 128 129 rb_link_node(&he->rb_node, parent, p); 130 - rb_insert_color(&he->rb_node, &collapse_hists); 131 } 132 133 - void collapse__resort(void) 134 { 135 struct rb_node *next; 136 struct hist_entry *n; 137 138 if (!sort__need_collapse) 139 return; 140 141 - next = rb_first(&hist); 142 while (next) { 143 n = rb_entry(next, struct hist_entry, rb_node); 144 next = rb_next(&n->rb_node); 145 146 - rb_erase(&n->rb_node, &hist); 147 - collapse__insert_entry(n); 148 } 149 } 150 151 /* 152 * reverse the map, sort on count. 153 */ 154 155 - void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) 156 { 157 - struct rb_node **p = &output_hists.rb_node; 158 struct rb_node *parent = NULL; 159 struct hist_entry *iter; 160 161 - if (callchain) 162 callchain_param.sort(&he->sorted_chain, &he->callchain, 163 min_callchain_hits, &callchain_param); 164 ··· 180 } 181 182 rb_link_node(&he->rb_node, parent, p); 183 - rb_insert_color(&he->rb_node, &output_hists); 184 } 185 186 - void output__resort(u64 total_samples) 187 { 188 struct rb_node *next; 189 struct hist_entry *n; 190 - struct rb_root *tree = &hist; 191 u64 min_callchain_hits; 192 193 min_callchain_hits = 194 total_samples * (callchain_param.min_percent / 100); 195 196 - if (sort__need_collapse) 197 - tree = &collapse_hists; 198 - 199 - next = rb_first(tree); 200 201 while (next) { 202 n = rb_entry(next, struct hist_entry, rb_node); 203 next = rb_next(&n->rb_node); 204 205 - rb_erase(&n->rb_node, tree); 206 - output__insert_entry(n, min_callchain_hits); 207 } 208 }
··· 1 #include "hist.h" 2 + #include "session.h" 3 + #include "sort.h" 4 + #include <math.h> 5 6 struct callchain_param callchain_param = { 7 .mode = CHAIN_GRAPH_REL, ··· 14 * histogram, sorted on item, collects counts 15 */ 16 17 + struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, 18 + struct addr_location *al, 19 + struct symbol *sym_parent, 20 + u64 count, bool *hit) 21 { 22 + struct rb_node **p = &self->hists.rb_node; 23 struct rb_node *parent = NULL; 24 struct hist_entry *he; 25 struct hist_entry entry = { ··· 54 return NULL; 55 *he = entry; 56 rb_link_node(&he->rb_node, parent, p); 57 + rb_insert_color(&he->rb_node, &self->hists); 58 *hit = false; 59 return he; 60 } ··· 102 * collapse the histogram 103 */ 104 105 + static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he) 106 { 107 + struct rb_node **p = &root->rb_node; 108 struct rb_node *parent = NULL; 109 struct hist_entry *iter; 110 int64_t cmp; ··· 128 } 129 130 rb_link_node(&he->rb_node, parent, p); 131 + rb_insert_color(&he->rb_node, root); 132 } 133 134 + void perf_session__collapse_resort(struct perf_session *self) 135 { 136 + struct rb_root tmp; 137 struct rb_node *next; 138 struct hist_entry *n; 139 140 if (!sort__need_collapse) 141 return; 142 143 + tmp = RB_ROOT; 144 + next = rb_first(&self->hists); 145 + 146 while (next) { 147 n = rb_entry(next, struct hist_entry, rb_node); 148 next = rb_next(&n->rb_node); 149 150 + rb_erase(&n->rb_node, &self->hists); 151 + collapse__insert_entry(&tmp, n); 152 } 153 + 154 + self->hists = tmp; 155 } 156 157 /* 158 * reverse the map, sort on count. 159 */ 160 161 + static void perf_session__insert_output_hist_entry(struct rb_root *root, 162 + struct hist_entry *he, 163 + u64 min_callchain_hits) 164 { 165 + struct rb_node **p = &root->rb_node; 166 struct rb_node *parent = NULL; 167 struct hist_entry *iter; 168 169 + if (symbol_conf.use_callchain) 170 callchain_param.sort(&he->sorted_chain, &he->callchain, 171 min_callchain_hits, &callchain_param); 172 ··· 174 } 175 176 rb_link_node(&he->rb_node, parent, p); 177 + rb_insert_color(&he->rb_node, root); 178 } 179 180 + void perf_session__output_resort(struct perf_session *self, u64 total_samples) 181 { 182 + struct rb_root tmp; 183 struct rb_node *next; 184 struct hist_entry *n; 185 u64 min_callchain_hits; 186 187 min_callchain_hits = 188 total_samples * (callchain_param.min_percent / 100); 189 190 + tmp = RB_ROOT; 191 + next = rb_first(&self->hists); 192 193 while (next) { 194 n = rb_entry(next, struct hist_entry, rb_node); 195 next = rb_next(&n->rb_node); 196 197 + rb_erase(&n->rb_node, &self->hists); 198 + perf_session__insert_output_hist_entry(&tmp, n, 199 + min_callchain_hits); 200 } 201 + 202 + self->hists = tmp; 203 + } 204 + 205 + static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 206 + { 207 + int i; 208 + int ret = fprintf(fp, " "); 209 + 210 + for (i = 0; i < left_margin; i++) 211 + ret += fprintf(fp, " "); 212 + 213 + return ret; 214 + } 215 + 216 + static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 217 + int left_margin) 218 + { 219 + int i; 220 + size_t ret = callchain__fprintf_left_margin(fp, left_margin); 221 + 222 + for (i = 0; i < depth; i++) 223 + if (depth_mask & (1 << i)) 224 + ret += fprintf(fp, "| "); 225 + else 226 + ret += fprintf(fp, " "); 227 + 228 + ret += fprintf(fp, "\n"); 229 + 230 + return ret; 231 + } 232 + 233 + static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, 234 + int depth, int depth_mask, int count, 235 + u64 total_samples, int hits, 236 + int left_margin) 237 + { 238 + int i; 239 + size_t ret = 0; 240 + 241 + ret += callchain__fprintf_left_margin(fp, left_margin); 242 + for (i = 0; i < depth; i++) { 243 + if (depth_mask & (1 << i)) 244 + ret += fprintf(fp, "|"); 245 + else 246 + ret += fprintf(fp, " "); 247 + if (!count && i == depth - 1) { 248 + double percent; 249 + 250 + percent = hits * 100.0 / total_samples; 251 + ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); 252 + } else 253 + ret += fprintf(fp, "%s", " "); 254 + } 255 + if (chain->sym) 256 + ret += fprintf(fp, "%s\n", chain->sym->name); 257 + else 258 + ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); 259 + 260 + return ret; 261 + } 262 + 263 + static struct symbol *rem_sq_bracket; 264 + static struct callchain_list rem_hits; 265 + 266 + static void init_rem_hits(void) 267 + { 268 + rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); 269 + if (!rem_sq_bracket) { 270 + fprintf(stderr, "Not enough memory to display remaining hits\n"); 271 + return; 272 + } 273 + 274 + strcpy(rem_sq_bracket->name, "[...]"); 275 + rem_hits.sym = rem_sq_bracket; 276 + } 277 + 278 + static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 279 + u64 total_samples, int depth, 280 + int depth_mask, int left_margin) 281 + { 282 + struct rb_node *node, *next; 283 + struct callchain_node *child; 284 + struct callchain_list *chain; 285 + int new_depth_mask = depth_mask; 286 + u64 new_total; 287 + u64 remaining; 288 + size_t ret = 0; 289 + int i; 290 + 291 + if (callchain_param.mode == CHAIN_GRAPH_REL) 292 + new_total = self->children_hit; 293 + else 294 + new_total = total_samples; 295 + 296 + remaining = new_total; 297 + 298 + node = rb_first(&self->rb_root); 299 + while (node) { 300 + u64 cumul; 301 + 302 + child = rb_entry(node, struct callchain_node, rb_node); 303 + cumul = cumul_hits(child); 304 + remaining -= cumul; 305 + 306 + /* 307 + * The depth mask manages the output of pipes that show 308 + * the depth. We don't want to keep the pipes of the current 309 + * level for the last child of this depth. 310 + * Except if we have remaining filtered hits. They will 311 + * supersede the last child 312 + */ 313 + next = rb_next(node); 314 + if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) 315 + new_depth_mask &= ~(1 << (depth - 1)); 316 + 317 + /* 318 + * But we keep the older depth mask for the line seperator 319 + * to keep the level link until we reach the last child 320 + */ 321 + ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, 322 + left_margin); 323 + i = 0; 324 + list_for_each_entry(chain, &child->val, list) { 325 + if (chain->ip >= PERF_CONTEXT_MAX) 326 + continue; 327 + ret += ipchain__fprintf_graph(fp, chain, depth, 328 + new_depth_mask, i++, 329 + new_total, 330 + cumul, 331 + left_margin); 332 + } 333 + ret += __callchain__fprintf_graph(fp, child, new_total, 334 + depth + 1, 335 + new_depth_mask | (1 << depth), 336 + left_margin); 337 + node = next; 338 + } 339 + 340 + if (callchain_param.mode == CHAIN_GRAPH_REL && 341 + remaining && remaining != new_total) { 342 + 343 + if (!rem_sq_bracket) 344 + return ret; 345 + 346 + new_depth_mask &= ~(1 << (depth - 1)); 347 + 348 + ret += ipchain__fprintf_graph(fp, &rem_hits, depth, 349 + new_depth_mask, 0, new_total, 350 + remaining, left_margin); 351 + } 352 + 353 + return ret; 354 + } 355 + 356 + static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 357 + u64 total_samples, int left_margin) 358 + { 359 + struct callchain_list *chain; 360 + bool printed = false; 361 + int i = 0; 362 + int ret = 0; 363 + 364 + list_for_each_entry(chain, &self->val, list) { 365 + if (chain->ip >= PERF_CONTEXT_MAX) 366 + continue; 367 + 368 + if (!i++ && sort__first_dimension == SORT_SYM) 369 + continue; 370 + 371 + if (!printed) { 372 + ret += callchain__fprintf_left_margin(fp, left_margin); 373 + ret += fprintf(fp, "|\n"); 374 + ret += callchain__fprintf_left_margin(fp, left_margin); 375 + ret += fprintf(fp, "---"); 376 + 377 + left_margin += 3; 378 + printed = true; 379 + } else 380 + ret += callchain__fprintf_left_margin(fp, left_margin); 381 + 382 + if (chain->sym) 383 + ret += fprintf(fp, " %s\n", chain->sym->name); 384 + else 385 + ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 386 + } 387 + 388 + ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); 389 + 390 + return ret; 391 + } 392 + 393 + static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self, 394 + u64 total_samples) 395 + { 396 + struct callchain_list *chain; 397 + size_t ret = 0; 398 + 399 + if (!self) 400 + return 0; 401 + 402 + ret += callchain__fprintf_flat(fp, self->parent, total_samples); 403 + 404 + 405 + list_for_each_entry(chain, &self->val, list) { 406 + if (chain->ip >= PERF_CONTEXT_MAX) 407 + continue; 408 + if (chain->sym) 409 + ret += fprintf(fp, " %s\n", chain->sym->name); 410 + else 411 + ret += fprintf(fp, " %p\n", 412 + (void *)(long)chain->ip); 413 + } 414 + 415 + return ret; 416 + } 417 + 418 + static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, 419 + u64 total_samples, int left_margin) 420 + { 421 + struct rb_node *rb_node; 422 + struct callchain_node *chain; 423 + size_t ret = 0; 424 + 425 + rb_node = rb_first(&self->sorted_chain); 426 + while (rb_node) { 427 + double percent; 428 + 429 + chain = rb_entry(rb_node, struct callchain_node, rb_node); 430 + percent = chain->hit * 100.0 / total_samples; 431 + switch (callchain_param.mode) { 432 + case CHAIN_FLAT: 433 + ret += percent_color_fprintf(fp, " %6.2f%%\n", 434 + percent); 435 + ret += callchain__fprintf_flat(fp, chain, total_samples); 436 + break; 437 + case CHAIN_GRAPH_ABS: /* Falldown */ 438 + case CHAIN_GRAPH_REL: 439 + ret += callchain__fprintf_graph(fp, chain, total_samples, 440 + left_margin); 441 + case CHAIN_NONE: 442 + default: 443 + break; 444 + } 445 + ret += fprintf(fp, "\n"); 446 + rb_node = rb_next(rb_node); 447 + } 448 + 449 + return ret; 450 + } 451 + 452 + static size_t hist_entry__fprintf(struct hist_entry *self, 453 + struct perf_session *session, 454 + struct perf_session *pair_session, 455 + bool show_displacement, 456 + long displacement, FILE *fp) 457 + { 458 + struct sort_entry *se; 459 + u64 count, total; 460 + const char *sep = symbol_conf.field_sep; 461 + size_t ret; 462 + 463 + if (symbol_conf.exclude_other && !self->parent) 464 + return 0; 465 + 466 + if (pair_session) { 467 + count = self->pair ? self->pair->count : 0; 468 + total = pair_session->events_stats.total; 469 + } else { 470 + count = self->count; 471 + total = session->events_stats.total; 472 + } 473 + 474 + if (total) 475 + ret = percent_color_fprintf(fp, sep ? "%.2f" : " %6.2f%%", 476 + (count * 100.0) / total); 477 + else 478 + ret = fprintf(fp, sep ? "%lld" : "%12lld ", count); 479 + 480 + if (symbol_conf.show_nr_samples) { 481 + if (sep) 482 + fprintf(fp, "%c%lld", *sep, count); 483 + else 484 + fprintf(fp, "%11lld", count); 485 + } 486 + 487 + if (pair_session) { 488 + char bf[32]; 489 + double old_percent = 0, new_percent = 0, diff; 490 + 491 + if (total > 0) 492 + old_percent = (count * 100.0) / total; 493 + if (session->events_stats.total > 0) 494 + new_percent = (self->count * 100.0) / session->events_stats.total; 495 + 496 + diff = new_percent - old_percent; 497 + 498 + if (fabs(diff) >= 0.01) 499 + snprintf(bf, sizeof(bf), "%+4.2F%%", diff); 500 + else 501 + snprintf(bf, sizeof(bf), " "); 502 + 503 + if (sep) 504 + ret += fprintf(fp, "%c%s", *sep, bf); 505 + else 506 + ret += fprintf(fp, "%11.11s", bf); 507 + 508 + if (show_displacement) { 509 + if (displacement) 510 + snprintf(bf, sizeof(bf), "%+4ld", displacement); 511 + else 512 + snprintf(bf, sizeof(bf), " "); 513 + 514 + if (sep) 515 + fprintf(fp, "%c%s", *sep, bf); 516 + else 517 + fprintf(fp, "%6.6s", bf); 518 + } 519 + } 520 + 521 + list_for_each_entry(se, &hist_entry__sort_list, list) { 522 + if (se->elide) 523 + continue; 524 + 525 + fprintf(fp, "%s", sep ?: " "); 526 + ret += se->print(fp, self, se->width ? *se->width : 0); 527 + } 528 + 529 + ret += fprintf(fp, "\n"); 530 + 531 + if (symbol_conf.use_callchain) { 532 + int left_margin = 0; 533 + 534 + if (sort__first_dimension == SORT_COMM) { 535 + se = list_first_entry(&hist_entry__sort_list, typeof(*se), 536 + list); 537 + left_margin = se->width ? *se->width : 0; 538 + left_margin -= thread__comm_len(self->thread); 539 + } 540 + 541 + hist_entry_callchain__fprintf(fp, self, session->events_stats.total, 542 + left_margin); 543 + } 544 + 545 + return ret; 546 + } 547 + 548 + size_t perf_session__fprintf_hists(struct perf_session *self, 549 + struct perf_session *pair, 550 + bool show_displacement, FILE *fp) 551 + { 552 + struct sort_entry *se; 553 + struct rb_node *nd; 554 + size_t ret = 0; 555 + unsigned long position = 1; 556 + long displacement = 0; 557 + unsigned int width; 558 + const char *sep = symbol_conf.field_sep; 559 + char *col_width = symbol_conf.col_width_list_str; 560 + 561 + init_rem_hits(); 562 + 563 + fprintf(fp, "# %s", pair ? "Baseline" : "Overhead"); 564 + 565 + if (symbol_conf.show_nr_samples) { 566 + if (sep) 567 + fprintf(fp, "%cSamples", *sep); 568 + else 569 + fputs(" Samples ", fp); 570 + } 571 + 572 + if (pair) { 573 + if (sep) 574 + ret += fprintf(fp, "%cDelta", *sep); 575 + else 576 + ret += fprintf(fp, " Delta "); 577 + 578 + if (show_displacement) { 579 + if (sep) 580 + ret += fprintf(fp, "%cDisplacement", *sep); 581 + else 582 + ret += fprintf(fp, " Displ"); 583 + } 584 + } 585 + 586 + list_for_each_entry(se, &hist_entry__sort_list, list) { 587 + if (se->elide) 588 + continue; 589 + if (sep) { 590 + fprintf(fp, "%c%s", *sep, se->header); 591 + continue; 592 + } 593 + width = strlen(se->header); 594 + if (se->width) { 595 + if (symbol_conf.col_width_list_str) { 596 + if (col_width) { 597 + *se->width = atoi(col_width); 598 + col_width = strchr(col_width, ','); 599 + if (col_width) 600 + ++col_width; 601 + } 602 + } 603 + width = *se->width = max(*se->width, width); 604 + } 605 + fprintf(fp, " %*s", width, se->header); 606 + } 607 + fprintf(fp, "\n"); 608 + 609 + if (sep) 610 + goto print_entries; 611 + 612 + fprintf(fp, "# ........"); 613 + if (symbol_conf.show_nr_samples) 614 + fprintf(fp, " .........."); 615 + if (pair) { 616 + fprintf(fp, " .........."); 617 + if (show_displacement) 618 + fprintf(fp, " ....."); 619 + } 620 + list_for_each_entry(se, &hist_entry__sort_list, list) { 621 + unsigned int i; 622 + 623 + if (se->elide) 624 + continue; 625 + 626 + fprintf(fp, " "); 627 + if (se->width) 628 + width = *se->width; 629 + else 630 + width = strlen(se->header); 631 + for (i = 0; i < width; i++) 632 + fprintf(fp, "."); 633 + } 634 + 635 + fprintf(fp, "\n#\n"); 636 + 637 + print_entries: 638 + for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) { 639 + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 640 + 641 + if (show_displacement) { 642 + if (h->pair != NULL) 643 + displacement = ((long)h->pair->position - 644 + (long)position); 645 + else 646 + displacement = 0; 647 + ++position; 648 + } 649 + ret += hist_entry__fprintf(h, self, pair, show_displacement, 650 + displacement, fp); 651 + } 652 + 653 + free(rem_sq_bracket); 654 + 655 + return ret; 656 }
+16 -39
tools/perf/util/hist.h
··· 1 #ifndef __PERF_HIST_H 2 #define __PERF_HIST_H 3 - #include "../builtin.h" 4 5 - #include "util.h" 6 - 7 - #include "color.h" 8 - #include <linux/list.h> 9 - #include "cache.h" 10 - #include <linux/rbtree.h> 11 - #include "symbol.h" 12 - #include "string.h" 13 #include "callchain.h" 14 - #include "strlist.h" 15 - #include "values.h" 16 17 - #include "../perf.h" 18 - #include "debug.h" 19 - #include "header.h" 20 - 21 - #include "parse-options.h" 22 - #include "parse-events.h" 23 - 24 - #include "thread.h" 25 - #include "sort.h" 26 - 27 - extern struct rb_root hist; 28 - extern struct rb_root collapse_hists; 29 - extern struct rb_root output_hists; 30 - extern int callchain; 31 extern struct callchain_param callchain_param; 32 - extern unsigned long total; 33 - extern unsigned long total_mmap; 34 - extern unsigned long total_comm; 35 - extern unsigned long total_fork; 36 - extern unsigned long total_unknown; 37 - extern unsigned long total_lost; 38 39 - struct hist_entry *__hist_entry__add(struct addr_location *al, 40 - struct symbol *parent, 41 - u64 count, bool *hit); 42 extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); 43 extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); 44 - extern void hist_entry__free(struct hist_entry *); 45 - extern void collapse__insert_entry(struct hist_entry *); 46 - extern void collapse__resort(void); 47 - extern void output__insert_entry(struct hist_entry *, u64); 48 - extern void output__resort(u64); 49 50 #endif /* __PERF_HIST_H */
··· 1 #ifndef __PERF_HIST_H 2 #define __PERF_HIST_H 3 4 + #include <linux/types.h> 5 #include "callchain.h" 6 7 extern struct callchain_param callchain_param; 8 9 + struct perf_session; 10 + struct hist_entry; 11 + struct addr_location; 12 + struct symbol; 13 + 14 + struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, 15 + struct addr_location *al, 16 + struct symbol *parent, 17 + u64 count, bool *hit); 18 extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); 19 extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); 20 + void hist_entry__free(struct hist_entry *); 21 22 + void perf_session__output_resort(struct perf_session *self, u64 total_samples); 23 + void perf_session__collapse_resort(struct perf_session *self); 24 + size_t perf_session__fprintf_hists(struct perf_session *self, 25 + struct perf_session *pair, 26 + bool show_displacement, FILE *fp); 27 #endif /* __PERF_HIST_H */
+12 -6
tools/perf/util/map.c
··· 104 105 #define DSO__DELETED "(deleted)" 106 107 - static int map__load(struct map *self, symbol_filter_t filter) 108 { 109 const char *name = self->dso->long_name; 110 - int nr = dso__load(self->dso, self, filter); 111 112 if (nr < 0) { 113 if (self->dso->has_build_id) { 114 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; ··· 148 return 0; 149 } 150 151 - struct symbol *map__find_symbol(struct map *self, u64 addr, 152 - symbol_filter_t filter) 153 { 154 - if (!dso__loaded(self->dso, self->type) && map__load(self, filter) < 0) 155 return NULL; 156 157 return dso__find_symbol(self->dso, self->type, addr); 158 } 159 160 struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 161 symbol_filter_t filter) 162 { 163 - if (!dso__loaded(self->dso, self->type) && map__load(self, filter) < 0) 164 return NULL; 165 166 if (!dso__sorted_by_name(self->dso, self->type))
··· 104 105 #define DSO__DELETED "(deleted)" 106 107 + int map__load(struct map *self, struct perf_session *session, 108 + symbol_filter_t filter) 109 { 110 const char *name = self->dso->long_name; 111 + int nr; 112 113 + if (dso__loaded(self->dso, self->type)) 114 + return 0; 115 + 116 + nr = dso__load(self->dso, self, session, filter); 117 if (nr < 0) { 118 if (self->dso->has_build_id) { 119 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; ··· 143 return 0; 144 } 145 146 + struct symbol *map__find_symbol(struct map *self, struct perf_session *session, 147 + u64 addr, symbol_filter_t filter) 148 { 149 + if (map__load(self, session, filter) < 0) 150 return NULL; 151 152 return dso__find_symbol(self->dso, self->type, addr); 153 } 154 155 struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 156 + struct perf_session *session, 157 symbol_filter_t filter) 158 { 159 + if (map__load(self, session, filter) < 0) 160 return NULL; 161 162 if (!dso__sorted_by_name(self->dso, self->type))
+141 -66
tools/perf/util/probe-event.c
··· 69 char c, nc = 0; 70 /* 71 * <Syntax> 72 - * perf probe SRC:LN 73 - * perf probe FUNC[+OFFS|%return][@SRC] 74 */ 75 76 ptr = strpbrk(arg, ":+@%"); 77 if (ptr) { ··· 163 } 164 165 /* Parse perf-probe event definition */ 166 - int parse_perf_probe_event(const char *str, struct probe_point *pp) 167 { 168 char **argv; 169 - int argc, i, need_dwarf = 0; 170 171 argv = argv_split(str, &argc); 172 if (!argv) ··· 180 /* Parse probe point */ 181 parse_perf_probe_probepoint(argv[0], pp); 182 if (pp->file || pp->line) 183 - need_dwarf = 1; 184 185 /* Copy arguments and ensure return probe has no C argument */ 186 pp->nr_args = argc - 1; ··· 193 if (pp->retprobe) 194 semantic_error("You can't specify local" 195 " variable for kretprobe"); 196 - need_dwarf = 1; 197 } 198 } 199 200 argv_free(argv); 201 - return need_dwarf; 202 } 203 204 /* Parse kprobe_events event into struct probe_point */ 205 - void parse_trace_kprobe_event(const char *str, char **group, char **event, 206 - struct probe_point *pp) 207 { 208 char pr; 209 char *p; ··· 217 218 /* Scan event and group name. */ 219 ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", 220 - &pr, (float *)(void *)group, (float *)(void *)event); 221 if (ret != 3) 222 semantic_error("Failed to parse event name: %s", argv[0]); 223 - pr_debug("Group:%s Event:%s probe:%c\n", *group, *event, pr); 224 - 225 - if (!pp) 226 - goto end; 227 228 pp->retprobe = (pr == 'r'); 229 230 /* Scan function name and offset */ 231 - ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, &pp->offset); 232 if (ret == 1) 233 pp->offset = 0; 234 ··· 246 die("Failed to copy argument."); 247 } 248 249 - end: 250 argv_free(argv); 251 } 252 253 - int synthesize_perf_probe_event(struct probe_point *pp) 254 { 255 char *buf; 256 char offs[64] = "", line[64] = ""; 257 - int i, len, ret; 258 259 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 260 if (!buf) ··· 275 offs, pp->retprobe ? "%return" : "", line); 276 else 277 ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", pp->file, line); 278 - if (ret <= 0) 279 - goto error; 280 - len = ret; 281 282 for (i = 0; i < pp->nr_args; i++) { 283 ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", 284 pp->args[i]); ··· 305 return pp->found; 306 error: 307 free(pp->probes[0]); 308 309 return ret; 310 } ··· 335 return pp->found; 336 error: 337 free(pp->probes[0]); 338 339 return ret; 340 } ··· 395 { 396 int i; 397 398 if (pp->function) 399 free(pp->function); 400 if (pp->file) ··· 413 } 414 415 /* Show an event */ 416 - static void show_perf_probe_event(const char *group, const char *event, 417 - const char *place, struct probe_point *pp) 418 { 419 - int i; 420 char buf[128]; 421 422 - e_snprintf(buf, 128, "%s:%s", group, event); 423 printf(" %-40s (on %s", buf, place); 424 425 if (pp->nr_args > 0) { ··· 435 /* List up current perf-probe events */ 436 void show_perf_probe_events(void) 437 { 438 - unsigned int i; 439 - int fd, nr; 440 - char *group, *event; 441 struct probe_point pp; 442 struct strlist *rawlist; 443 struct str_node *ent; ··· 444 rawlist = get_trace_kprobe_event_rawlist(fd); 445 close(fd); 446 447 - for (i = 0; i < strlist__nr_entries(rawlist); i++) { 448 - ent = strlist__entry(rawlist, i); 449 - parse_trace_kprobe_event(ent->s, &group, &event, &pp); 450 /* Synthesize only event probe point */ 451 - nr = pp.nr_args; 452 - pp.nr_args = 0; 453 - synthesize_perf_probe_event(&pp); 454 - pp.nr_args = nr; 455 /* Show an event */ 456 - show_perf_probe_event(group, event, pp.probes[0], &pp); 457 - free(group); 458 - free(event); 459 clear_probe_point(&pp); 460 } 461 ··· 459 /* Get current perf-probe event names */ 460 static struct strlist *get_perf_event_names(int fd, bool include_group) 461 { 462 - unsigned int i; 463 - char *group, *event; 464 char buf[128]; 465 struct strlist *sl, *rawlist; 466 struct str_node *ent; 467 468 rawlist = get_trace_kprobe_event_rawlist(fd); 469 470 sl = strlist__new(true, NULL); 471 - for (i = 0; i < strlist__nr_entries(rawlist); i++) { 472 - ent = strlist__entry(rawlist, i); 473 - parse_trace_kprobe_event(ent->s, &group, &event, NULL); 474 if (include_group) { 475 - if (e_snprintf(buf, 128, "%s:%s", group, event) < 0) 476 die("Failed to copy group:event name."); 477 strlist__add(sl, buf); 478 } else 479 - strlist__add(sl, event); 480 - free(group); 481 - free(event); 482 } 483 484 strlist__delete(rawlist); ··· 496 } 497 498 static void get_new_event_name(char *buf, size_t len, const char *base, 499 - struct strlist *namelist) 500 { 501 int i, ret; 502 ··· 506 die("snprintf() failed: %s", strerror(-ret)); 507 if (!strlist__has_entry(namelist, buf)) 508 return; 509 510 /* Try to add suffix */ 511 for (i = 1; i < MAX_EVENT_INDEX; i++) { ··· 525 die("Too many events are on the same function."); 526 } 527 528 - void add_trace_kprobe_events(struct probe_point *probes, int nr_probes) 529 { 530 int i, j, fd; 531 struct probe_point *pp; 532 char buf[MAX_CMDLEN]; 533 char event[64]; 534 struct strlist *namelist; 535 536 fd = open_kprobe_events(O_RDWR, O_APPEND); 537 /* Get current event names */ ··· 541 542 for (j = 0; j < nr_probes; j++) { 543 pp = probes + j; 544 for (i = 0; i < pp->found; i++) { 545 /* Get an unused new event name */ 546 - get_new_event_name(event, 64, pp->function, namelist); 547 snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n", 548 pp->retprobe ? 'r' : 'p', 549 - PERFPROBE_GROUP, event, 550 pp->probes[i]); 551 write_trace_kprobe_event(fd, buf); 552 printf("Added new event:\n"); 553 /* Get the first parameter (probe-point) */ 554 sscanf(pp->probes[i], "%s", buf); 555 - show_perf_probe_event(PERFPROBE_GROUP, event, 556 - buf, pp); 557 /* Add added event name to namelist */ 558 strlist__add(namelist, event); 559 } 560 } 561 /* Show how to use the event. */ ··· 580 close(fd); 581 } 582 583 static void del_trace_kprobe_event(int fd, const char *group, 584 const char *event, struct strlist *namelist) 585 { 586 char buf[128]; 587 588 if (e_snprintf(buf, 128, "%s:%s", group, event) < 0) 589 die("Failed to copy event."); 590 - if (!strlist__has_entry(namelist, buf)) { 591 - pr_warning("Warning: event \"%s\" is not found.\n", buf); 592 - return; 593 - } 594 - /* Convert from perf-probe event to trace-kprobe event */ 595 - if (e_snprintf(buf, 128, "-:%s/%s", group, event) < 0) 596 - die("Failed to copy event."); 597 598 - write_trace_kprobe_event(fd, buf); 599 - printf("Remove event: %s:%s\n", group, event); 600 } 601 602 void del_trace_kprobe_events(struct strlist *dellist) 603 { 604 int fd; 605 - unsigned int i; 606 const char *group, *event; 607 char *p, *str; 608 struct str_node *ent; ··· 638 /* Get current event names */ 639 namelist = get_perf_event_names(fd, true); 640 641 - for (i = 0; i < strlist__nr_entries(dellist); i++) { 642 - ent = strlist__entry(dellist, i); 643 str = strdup(ent->s); 644 if (!str) 645 die("Failed to copy event."); 646 p = strchr(str, ':'); 647 if (p) { 648 group = str; 649 *p = '\0'; 650 event = p + 1; 651 } else { 652 - group = PERFPROBE_GROUP; 653 event = str; 654 } 655 del_trace_kprobe_event(fd, group, event, namelist); 656 free(str); 657 }
··· 69 char c, nc = 0; 70 /* 71 * <Syntax> 72 + * perf probe [EVENT=]SRC:LN 73 + * perf probe [EVENT=]FUNC[+OFFS|%return][@SRC] 74 + * 75 + * TODO:Group name support 76 */ 77 + 78 + ptr = strchr(arg, '='); 79 + if (ptr) { /* Event name */ 80 + *ptr = '\0'; 81 + tmp = ptr + 1; 82 + ptr = strchr(arg, ':'); 83 + if (ptr) /* Group name is not supported yet. */ 84 + semantic_error("Group name is not supported yet."); 85 + pp->event = strdup(arg); 86 + arg = tmp; 87 + } 88 89 ptr = strpbrk(arg, ":+@%"); 90 if (ptr) { ··· 150 } 151 152 /* Parse perf-probe event definition */ 153 + void parse_perf_probe_event(const char *str, struct probe_point *pp, 154 + bool *need_dwarf) 155 { 156 char **argv; 157 + int argc, i; 158 + 159 + *need_dwarf = false; 160 161 argv = argv_split(str, &argc); 162 if (!argv) ··· 164 /* Parse probe point */ 165 parse_perf_probe_probepoint(argv[0], pp); 166 if (pp->file || pp->line) 167 + *need_dwarf = true; 168 169 /* Copy arguments and ensure return probe has no C argument */ 170 pp->nr_args = argc - 1; ··· 177 if (pp->retprobe) 178 semantic_error("You can't specify local" 179 " variable for kretprobe"); 180 + *need_dwarf = true; 181 } 182 } 183 184 argv_free(argv); 185 } 186 187 /* Parse kprobe_events event into struct probe_point */ 188 + void parse_trace_kprobe_event(const char *str, struct probe_point *pp) 189 { 190 char pr; 191 char *p; ··· 203 204 /* Scan event and group name. */ 205 ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", 206 + &pr, (float *)(void *)&pp->group, 207 + (float *)(void *)&pp->event); 208 if (ret != 3) 209 semantic_error("Failed to parse event name: %s", argv[0]); 210 + pr_debug("Group:%s Event:%s probe:%c\n", pp->group, pp->event, pr); 211 212 pp->retprobe = (pr == 'r'); 213 214 /* Scan function name and offset */ 215 + ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, 216 + &pp->offset); 217 if (ret == 1) 218 pp->offset = 0; 219 ··· 233 die("Failed to copy argument."); 234 } 235 236 argv_free(argv); 237 } 238 239 + /* Synthesize only probe point (not argument) */ 240 + int synthesize_perf_probe_point(struct probe_point *pp) 241 { 242 char *buf; 243 char offs[64] = "", line[64] = ""; 244 + int ret; 245 246 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 247 if (!buf) ··· 262 offs, pp->retprobe ? "%return" : "", line); 263 else 264 ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", pp->file, line); 265 + if (ret <= 0) { 266 + error: 267 + free(pp->probes[0]); 268 + pp->probes[0] = NULL; 269 + } 270 + return ret; 271 + } 272 273 + int synthesize_perf_probe_event(struct probe_point *pp) 274 + { 275 + char *buf; 276 + int i, len, ret; 277 + 278 + len = synthesize_perf_probe_point(pp); 279 + if (len < 0) 280 + return 0; 281 + 282 + buf = pp->probes[0]; 283 for (i = 0; i < pp->nr_args; i++) { 284 ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", 285 pp->args[i]); ··· 278 return pp->found; 279 error: 280 free(pp->probes[0]); 281 + pp->probes[0] = NULL; 282 283 return ret; 284 } ··· 307 return pp->found; 308 error: 309 free(pp->probes[0]); 310 + pp->probes[0] = NULL; 311 312 return ret; 313 } ··· 366 { 367 int i; 368 369 + if (pp->event) 370 + free(pp->event); 371 + if (pp->group) 372 + free(pp->group); 373 if (pp->function) 374 free(pp->function); 375 if (pp->file) ··· 380 } 381 382 /* Show an event */ 383 + static void show_perf_probe_event(const char *event, const char *place, 384 + struct probe_point *pp) 385 { 386 + int i, ret; 387 char buf[128]; 388 389 + ret = e_snprintf(buf, 128, "%s:%s", pp->group, event); 390 + if (ret < 0) 391 + die("Failed to copy event: %s", strerror(-ret)); 392 printf(" %-40s (on %s", buf, place); 393 394 if (pp->nr_args > 0) { ··· 400 /* List up current perf-probe events */ 401 void show_perf_probe_events(void) 402 { 403 + int fd; 404 struct probe_point pp; 405 struct strlist *rawlist; 406 struct str_node *ent; ··· 411 rawlist = get_trace_kprobe_event_rawlist(fd); 412 close(fd); 413 414 + strlist__for_each(ent, rawlist) { 415 + parse_trace_kprobe_event(ent->s, &pp); 416 /* Synthesize only event probe point */ 417 + synthesize_perf_probe_point(&pp); 418 /* Show an event */ 419 + show_perf_probe_event(pp.event, pp.probes[0], &pp); 420 clear_probe_point(&pp); 421 } 422 ··· 432 /* Get current perf-probe event names */ 433 static struct strlist *get_perf_event_names(int fd, bool include_group) 434 { 435 char buf[128]; 436 struct strlist *sl, *rawlist; 437 struct str_node *ent; 438 + struct probe_point pp; 439 440 + memset(&pp, 0, sizeof(pp)); 441 rawlist = get_trace_kprobe_event_rawlist(fd); 442 443 sl = strlist__new(true, NULL); 444 + strlist__for_each(ent, rawlist) { 445 + parse_trace_kprobe_event(ent->s, &pp); 446 if (include_group) { 447 + if (e_snprintf(buf, 128, "%s:%s", pp.group, 448 + pp.event) < 0) 449 die("Failed to copy group:event name."); 450 strlist__add(sl, buf); 451 } else 452 + strlist__add(sl, pp.event); 453 + clear_probe_point(&pp); 454 } 455 456 strlist__delete(rawlist); ··· 470 } 471 472 static void get_new_event_name(char *buf, size_t len, const char *base, 473 + struct strlist *namelist, bool allow_suffix) 474 { 475 int i, ret; 476 ··· 480 die("snprintf() failed: %s", strerror(-ret)); 481 if (!strlist__has_entry(namelist, buf)) 482 return; 483 + 484 + if (!allow_suffix) { 485 + pr_warning("Error: event \"%s\" already exists. " 486 + "(Use -f to force duplicates.)\n", base); 487 + die("Can't add new event."); 488 + } 489 490 /* Try to add suffix */ 491 for (i = 1; i < MAX_EVENT_INDEX; i++) { ··· 493 die("Too many events are on the same function."); 494 } 495 496 + void add_trace_kprobe_events(struct probe_point *probes, int nr_probes, 497 + bool force_add) 498 { 499 int i, j, fd; 500 struct probe_point *pp; 501 char buf[MAX_CMDLEN]; 502 char event[64]; 503 struct strlist *namelist; 504 + bool allow_suffix; 505 506 fd = open_kprobe_events(O_RDWR, O_APPEND); 507 /* Get current event names */ ··· 507 508 for (j = 0; j < nr_probes; j++) { 509 pp = probes + j; 510 + if (!pp->event) 511 + pp->event = strdup(pp->function); 512 + if (!pp->group) 513 + pp->group = strdup(PERFPROBE_GROUP); 514 + DIE_IF(!pp->event || !pp->group); 515 + /* If force_add is true, suffix search is allowed */ 516 + allow_suffix = force_add; 517 for (i = 0; i < pp->found; i++) { 518 /* Get an unused new event name */ 519 + get_new_event_name(event, 64, pp->event, namelist, 520 + allow_suffix); 521 snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n", 522 pp->retprobe ? 'r' : 'p', 523 + pp->group, event, 524 pp->probes[i]); 525 write_trace_kprobe_event(fd, buf); 526 printf("Added new event:\n"); 527 /* Get the first parameter (probe-point) */ 528 sscanf(pp->probes[i], "%s", buf); 529 + show_perf_probe_event(event, buf, pp); 530 /* Add added event name to namelist */ 531 strlist__add(namelist, event); 532 + /* 533 + * Probes after the first probe which comes from same 534 + * user input are always allowed to add suffix, because 535 + * there might be several addresses corresponding to 536 + * one code line. 537 + */ 538 + allow_suffix = true; 539 } 540 } 541 /* Show how to use the event. */ ··· 532 close(fd); 533 } 534 535 + static void __del_trace_kprobe_event(int fd, struct str_node *ent) 536 + { 537 + char *p; 538 + char buf[128]; 539 + 540 + /* Convert from perf-probe event to trace-kprobe event */ 541 + if (e_snprintf(buf, 128, "-:%s", ent->s) < 0) 542 + die("Failed to copy event."); 543 + p = strchr(buf + 2, ':'); 544 + if (!p) 545 + die("Internal error: %s should have ':' but not.", ent->s); 546 + *p = '/'; 547 + 548 + write_trace_kprobe_event(fd, buf); 549 + printf("Remove event: %s\n", ent->s); 550 + } 551 + 552 static void del_trace_kprobe_event(int fd, const char *group, 553 const char *event, struct strlist *namelist) 554 { 555 char buf[128]; 556 + struct str_node *ent, *n; 557 + int found = 0; 558 559 if (e_snprintf(buf, 128, "%s:%s", group, event) < 0) 560 die("Failed to copy event."); 561 562 + if (strpbrk(buf, "*?")) { /* Glob-exp */ 563 + strlist__for_each_safe(ent, n, namelist) 564 + if (strglobmatch(ent->s, buf)) { 565 + found++; 566 + __del_trace_kprobe_event(fd, ent); 567 + strlist__remove(namelist, ent); 568 + } 569 + } else { 570 + ent = strlist__find(namelist, buf); 571 + if (ent) { 572 + found++; 573 + __del_trace_kprobe_event(fd, ent); 574 + strlist__remove(namelist, ent); 575 + } 576 + } 577 + if (found == 0) 578 + pr_info("Info: event \"%s\" does not exist, could not remove it.\n", buf); 579 } 580 581 void del_trace_kprobe_events(struct strlist *dellist) 582 { 583 int fd; 584 const char *group, *event; 585 char *p, *str; 586 struct str_node *ent; ··· 564 /* Get current event names */ 565 namelist = get_perf_event_names(fd, true); 566 567 + strlist__for_each(ent, dellist) { 568 str = strdup(ent->s); 569 if (!str) 570 die("Failed to copy event."); 571 + pr_debug("Parsing: %s\n", str); 572 p = strchr(str, ':'); 573 if (p) { 574 group = str; 575 *p = '\0'; 576 event = p + 1; 577 } else { 578 + group = "*"; 579 event = str; 580 } 581 + pr_debug("Group: %s, Event: %s\n", group, event); 582 del_trace_kprobe_event(fd, group, event, namelist); 583 free(str); 584 }
+7 -4
tools/perf/util/probe-event.h
··· 1 #ifndef _PROBE_EVENT_H 2 #define _PROBE_EVENT_H 3 4 #include "probe-finder.h" 5 #include "strlist.h" 6 7 - extern int parse_perf_probe_event(const char *str, struct probe_point *pp); 8 extern int synthesize_perf_probe_event(struct probe_point *pp); 9 - extern void parse_trace_kprobe_event(const char *str, char **group, 10 - char **event, struct probe_point *pp); 11 extern int synthesize_trace_kprobe_event(struct probe_point *pp); 12 - extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes); 13 extern void del_trace_kprobe_events(struct strlist *dellist); 14 extern void show_perf_probe_events(void); 15
··· 1 #ifndef _PROBE_EVENT_H 2 #define _PROBE_EVENT_H 3 4 + #include <stdbool.h> 5 #include "probe-finder.h" 6 #include "strlist.h" 7 8 + extern void parse_perf_probe_event(const char *str, struct probe_point *pp, 9 + bool *need_dwarf); 10 + extern int synthesize_perf_probe_point(struct probe_point *pp); 11 extern int synthesize_perf_probe_event(struct probe_point *pp); 12 + extern void parse_trace_kprobe_event(const char *str, struct probe_point *pp); 13 extern int synthesize_trace_kprobe_event(struct probe_point *pp); 14 + extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes, 15 + bool force_add); 16 extern void del_trace_kprobe_events(struct strlist *dellist); 17 extern void show_perf_probe_events(void); 18
+1 -3
tools/perf/util/probe-finder.c
··· 687 struct probe_finder pf = {.pp = pp}; 688 689 ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); 690 - if (ret != DW_DLV_OK) { 691 - pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO.\n"); 692 return -ENOENT; 693 - } 694 695 pp->found = 0; 696 while (++cu_number) {
··· 687 struct probe_finder pf = {.pp = pp}; 688 689 ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); 690 + if (ret != DW_DLV_OK) 691 return -ENOENT; 692 693 pp->found = 0; 694 while (++cu_number) {
+3
tools/perf/util/probe-finder.h
··· 12 } 13 14 struct probe_point { 15 /* Inputs */ 16 char *file; /* File name */ 17 int line; /* Line number */
··· 12 } 13 14 struct probe_point { 15 + char *event; /* Event name */ 16 + char *group; /* Event group */ 17 + 18 /* Inputs */ 19 char *file; /* File name */ 20 int line; /* Line number */
+77 -7
tools/perf/util/session.c
··· 4 #include <sys/types.h> 5 6 #include "session.h" 7 #include "util.h" 8 9 static int perf_session__open(struct perf_session *self, bool force) ··· 51 52 struct perf_session *perf_session__new(const char *filename, int mode, bool force) 53 { 54 - size_t len = strlen(filename) + 1; 55 struct perf_session *self = zalloc(sizeof(*self) + len); 56 57 if (self == NULL) 58 goto out; 59 60 if (perf_header__init(&self->header) < 0) 61 - goto out_delete; 62 63 memcpy(self->filename, filename, len); 64 65 - if (mode == O_RDONLY && perf_session__open(self, force) < 0) { 66 - perf_session__delete(self); 67 - self = NULL; 68 - } 69 out: 70 return self; 71 - out_delete: 72 free(self); 73 return NULL; 74 } 75 ··· 87 { 88 perf_header__exit(&self->header); 89 close(self->fd); 90 free(self); 91 }
··· 4 #include <sys/types.h> 5 6 #include "session.h" 7 + #include "sort.h" 8 #include "util.h" 9 10 static int perf_session__open(struct perf_session *self, bool force) ··· 50 51 struct perf_session *perf_session__new(const char *filename, int mode, bool force) 52 { 53 + size_t len = filename ? strlen(filename) + 1 : 0; 54 struct perf_session *self = zalloc(sizeof(*self) + len); 55 56 if (self == NULL) 57 goto out; 58 59 if (perf_header__init(&self->header) < 0) 60 + goto out_free; 61 62 memcpy(self->filename, filename, len); 63 + self->threads = RB_ROOT; 64 + self->last_match = NULL; 65 + self->mmap_window = 32; 66 + self->cwd = NULL; 67 + self->cwdlen = 0; 68 + map_groups__init(&self->kmaps); 69 70 + if (perf_session__create_kernel_maps(self) < 0) 71 + goto out_delete; 72 + 73 + if (mode == O_RDONLY && perf_session__open(self, force) < 0) 74 + goto out_delete; 75 out: 76 return self; 77 + out_free: 78 free(self); 79 + return NULL; 80 + out_delete: 81 + perf_session__delete(self); 82 return NULL; 83 } 84 ··· 76 { 77 perf_header__exit(&self->header); 78 close(self->fd); 79 + free(self->cwd); 80 free(self); 81 + } 82 + 83 + static bool symbol__match_parent_regex(struct symbol *sym) 84 + { 85 + if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 86 + return 1; 87 + 88 + return 0; 89 + } 90 + 91 + struct symbol **perf_session__resolve_callchain(struct perf_session *self, 92 + struct thread *thread, 93 + struct ip_callchain *chain, 94 + struct symbol **parent) 95 + { 96 + u8 cpumode = PERF_RECORD_MISC_USER; 97 + struct symbol **syms = NULL; 98 + unsigned int i; 99 + 100 + if (symbol_conf.use_callchain) { 101 + syms = calloc(chain->nr, sizeof(*syms)); 102 + if (!syms) { 103 + fprintf(stderr, "Can't allocate memory for symbols\n"); 104 + exit(-1); 105 + } 106 + } 107 + 108 + for (i = 0; i < chain->nr; i++) { 109 + u64 ip = chain->ips[i]; 110 + struct addr_location al; 111 + 112 + if (ip >= PERF_CONTEXT_MAX) { 113 + switch (ip) { 114 + case PERF_CONTEXT_HV: 115 + cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 116 + case PERF_CONTEXT_KERNEL: 117 + cpumode = PERF_RECORD_MISC_KERNEL; break; 118 + case PERF_CONTEXT_USER: 119 + cpumode = PERF_RECORD_MISC_USER; break; 120 + default: 121 + break; 122 + } 123 + continue; 124 + } 125 + 126 + thread__find_addr_location(thread, self, cpumode, 127 + MAP__FUNCTION, ip, &al, NULL); 128 + if (al.sym != NULL) { 129 + if (sort__has_parent && !*parent && 130 + symbol__match_parent_regex(al.sym)) 131 + *parent = al.sym; 132 + if (!symbol_conf.use_callchain) 133 + break; 134 + syms[i] = al.sym; 135 + } 136 + } 137 + 138 + return syms; 139 }
+45
tools/perf/util/session.h
··· 1 #ifndef __PERF_SESSION_H 2 #define __PERF_SESSION_H 3 4 #include "header.h" 5 6 struct perf_session { 7 struct perf_header header; 8 unsigned long size; 9 int fd; 10 char filename[0]; 11 }; 12 13 struct perf_session *perf_session__new(const char *filename, int mode, bool force); 14 void perf_session__delete(struct perf_session *self); 15 16 #endif /* __PERF_SESSION_H */
··· 1 #ifndef __PERF_SESSION_H 2 #define __PERF_SESSION_H 3 4 + #include "event.h" 5 #include "header.h" 6 + #include "thread.h" 7 + #include <linux/rbtree.h> 8 + #include "../../../include/linux/perf_event.h" 9 + 10 + struct ip_callchain; 11 + struct thread; 12 + struct symbol; 13 14 struct perf_session { 15 struct perf_header header; 16 unsigned long size; 17 + unsigned long mmap_window; 18 + struct map_groups kmaps; 19 + struct rb_root threads; 20 + struct thread *last_match; 21 + struct events_stats events_stats; 22 + unsigned long event_total[PERF_RECORD_MAX]; 23 + struct rb_root hists; 24 + u64 sample_type; 25 int fd; 26 + int cwdlen; 27 + char *cwd; 28 char filename[0]; 29 + }; 30 + 31 + typedef int (*event_op)(event_t *self, struct perf_session *session); 32 + 33 + struct perf_event_ops { 34 + event_op process_sample_event; 35 + event_op process_mmap_event; 36 + event_op process_comm_event; 37 + event_op process_fork_event; 38 + event_op process_exit_event; 39 + event_op process_lost_event; 40 + event_op process_read_event; 41 + event_op process_throttle_event; 42 + event_op process_unthrottle_event; 43 + int (*sample_type_check)(struct perf_session *session); 44 + unsigned long total_unknown; 45 + bool full_paths; 46 }; 47 48 struct perf_session *perf_session__new(const char *filename, int mode, bool force); 49 void perf_session__delete(struct perf_session *self); 50 + 51 + int perf_session__process_events(struct perf_session *self, 52 + struct perf_event_ops *event_ops); 53 + 54 + struct symbol **perf_session__resolve_callchain(struct perf_session *self, 55 + struct thread *thread, 56 + struct ip_callchain *chain, 57 + struct symbol **parent); 58 + 59 + int perf_header__read_build_ids(int input, u64 offset, u64 file_size); 60 61 #endif /* __PERF_SESSION_H */
+26
tools/perf/util/sort.c
··· 288 289 return -ESRCH; 290 }
··· 288 289 return -ESRCH; 290 } 291 + 292 + void setup_sorting(const char * const usagestr[], const struct option *opts) 293 + { 294 + char *tmp, *tok, *str = strdup(sort_order); 295 + 296 + for (tok = strtok_r(str, ", ", &tmp); 297 + tok; tok = strtok_r(NULL, ", ", &tmp)) { 298 + if (sort_dimension__add(tok) < 0) { 299 + error("Unknown --sort key: `%s'", tok); 300 + usage_with_options(usagestr, opts); 301 + } 302 + } 303 + 304 + free(str); 305 + } 306 + 307 + void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, 308 + const char *list_name, FILE *fp) 309 + { 310 + if (list && strlist__nr_entries(list) == 1) { 311 + if (fp != NULL) 312 + fprintf(fp, "# %s: %s\n", list_name, 313 + strlist__entry(list, 0)->s); 314 + self->elide = true; 315 + } 316 + }
+10 -2
tools/perf/util/sort.h
··· 49 struct symbol *sym; 50 u64 ip; 51 char level; 52 - struct symbol *parent; 53 struct callchain_node callchain; 54 - struct rb_root sorted_chain; 55 }; 56 57 enum sort_type { ··· 85 extern struct sort_entry sort_thread; 86 extern struct list_head hist_entry__sort_list; 87 88 extern int repsep_fprintf(FILE *fp, const char *fmt, ...); 89 extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); 90 extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); ··· 101 extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *); 102 extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int); 103 extern int sort_dimension__add(const char *); 104 105 #endif /* __PERF_SORT_H */
··· 49 struct symbol *sym; 50 u64 ip; 51 char level; 52 + struct symbol *parent; 53 struct callchain_node callchain; 54 + union { 55 + unsigned long position; 56 + struct hist_entry *pair; 57 + struct rb_root sorted_chain; 58 + }; 59 }; 60 61 enum sort_type { ··· 81 extern struct sort_entry sort_thread; 82 extern struct list_head hist_entry__sort_list; 83 84 + void setup_sorting(const char * const usagestr[], const struct option *opts); 85 + 86 extern int repsep_fprintf(FILE *fp, const char *fmt, ...); 87 extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); 88 extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); ··· 95 extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *); 96 extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int); 97 extern int sort_dimension__add(const char *); 98 + void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, 99 + const char *list_name, FILE *fp); 100 101 #endif /* __PERF_SORT_H */
+25
tools/perf/util/string.c
··· 226 argv_free(argv); 227 return NULL; 228 }
··· 226 argv_free(argv); 227 return NULL; 228 } 229 + 230 + /* Glob expression pattern matching */ 231 + bool strglobmatch(const char *str, const char *pat) 232 + { 233 + while (*str && *pat && *pat != '*') { 234 + if (*pat == '?') { 235 + str++; 236 + pat++; 237 + } else 238 + if (*str++ != *pat++) 239 + return false; 240 + } 241 + /* Check wild card */ 242 + if (*pat == '*') { 243 + while (*pat == '*') 244 + pat++; 245 + if (!*pat) /* Tail wild card matches all */ 246 + return true; 247 + while (*str) 248 + if (strglobmatch(str++, pat)) 249 + return true; 250 + } 251 + return !*str && !*pat; 252 + } 253 +
+2
tools/perf/util/string.h
··· 1 #ifndef __PERF_STRING_H_ 2 #define __PERF_STRING_H_ 3 4 #include "types.h" 5 6 int hex2u64(const char *ptr, u64 *val); ··· 9 s64 perf_atoll(const char *str); 10 char **argv_split(const char *str, int *argcp); 11 void argv_free(char **argv); 12 13 #define _STR(x) #x 14 #define STR(x) _STR(x)
··· 1 #ifndef __PERF_STRING_H_ 2 #define __PERF_STRING_H_ 3 4 + #include <stdbool.h> 5 #include "types.h" 6 7 int hex2u64(const char *ptr, u64 *val); ··· 8 s64 perf_atoll(const char *str); 9 char **argv_split(const char *str, int *argcp); 10 void argv_free(char **argv); 11 + bool strglobmatch(const char *str, const char *pat); 12 13 #define _STR(x) #x 14 #define STR(x) _STR(x)
+3 -3
tools/perf/util/strlist.c
··· 102 str_node__delete(sn, self->dupstr); 103 } 104 105 - bool strlist__has_entry(struct strlist *self, const char *entry) 106 { 107 struct rb_node **p = &self->entries.rb_node; 108 struct rb_node *parent = NULL; ··· 120 else if (rc < 0) 121 p = &(*p)->rb_right; 122 else 123 - return true; 124 } 125 126 - return false; 127 } 128 129 static int strlist__parse_list_entry(struct strlist *self, const char *s)
··· 102 str_node__delete(sn, self->dupstr); 103 } 104 105 + struct str_node *strlist__find(struct strlist *self, const char *entry) 106 { 107 struct rb_node **p = &self->entries.rb_node; 108 struct rb_node *parent = NULL; ··· 120 else if (rc < 0) 121 p = &(*p)->rb_right; 122 else 123 + return sn; 124 } 125 126 + return NULL; 127 } 128 129 static int strlist__parse_list_entry(struct strlist *self, const char *s)
+40 -1
tools/perf/util/strlist.h
··· 23 int strlist__add(struct strlist *self, const char *str); 24 25 struct str_node *strlist__entry(const struct strlist *self, unsigned int idx); 26 - bool strlist__has_entry(struct strlist *self, const char *entry); 27 28 static inline bool strlist__empty(const struct strlist *self) 29 { ··· 39 { 40 return self->nr_entries; 41 } 42 43 int strlist__parse_list(struct strlist *self, const char *s); 44 #endif /* __PERF_STRLIST_H */
··· 23 int strlist__add(struct strlist *self, const char *str); 24 25 struct str_node *strlist__entry(const struct strlist *self, unsigned int idx); 26 + struct str_node *strlist__find(struct strlist *self, const char *entry); 27 + 28 + static inline bool strlist__has_entry(struct strlist *self, const char *entry) 29 + { 30 + return strlist__find(self, entry) != NULL; 31 + } 32 33 static inline bool strlist__empty(const struct strlist *self) 34 { ··· 34 { 35 return self->nr_entries; 36 } 37 + 38 + /* For strlist iteration */ 39 + static inline struct str_node *strlist__first(struct strlist *self) 40 + { 41 + struct rb_node *rn = rb_first(&self->entries); 42 + return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 43 + } 44 + static inline struct str_node *strlist__next(struct str_node *sn) 45 + { 46 + struct rb_node *rn; 47 + if (!sn) 48 + return NULL; 49 + rn = rb_next(&sn->rb_node); 50 + return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 51 + } 52 + 53 + /** 54 + * strlist_for_each - iterate over a strlist 55 + * @pos: the &struct str_node to use as a loop cursor. 56 + * @self: the &struct strlist for loop. 57 + */ 58 + #define strlist__for_each(pos, self) \ 59 + for (pos = strlist__first(self); pos; pos = strlist__next(pos)) 60 + 61 + /** 62 + * strlist_for_each_safe - iterate over a strlist safe against removal of 63 + * str_node 64 + * @pos: the &struct str_node to use as a loop cursor. 65 + * @n: another &struct str_node to use as temporary storage. 66 + * @self: the &struct strlist for loop. 67 + */ 68 + #define strlist__for_each_safe(pos, n, self) \ 69 + for (pos = strlist__first(self), n = strlist__next(pos); pos;\ 70 + pos = n, n = strlist__next(n)) 71 72 int strlist__parse_list(struct strlist *self, const char *s); 73 #endif /* __PERF_STRLIST_H */
+87 -57
tools/perf/util/symbol.c
··· 1 #include "util.h" 2 #include "../perf.h" 3 #include "string.h" 4 #include "symbol.h" 5 #include "thread.h" ··· 33 static void dsos__add(struct list_head *head, struct dso *dso); 34 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 35 static int dso__load_kernel_sym(struct dso *self, struct map *map, 36 - struct map_groups *mg, symbol_filter_t filter); 37 - unsigned int symbol__priv_size; 38 static int vmlinux_path__nr_entries; 39 static char **vmlinux_path; 40 41 - static struct symbol_conf symbol_conf__defaults = { 42 .use_modules = true, 43 .try_vmlinux_path = true, 44 }; 45 - 46 - static struct map_groups kmaps_mem; 47 - struct map_groups *kmaps = &kmaps_mem; 48 49 bool dso__loaded(const struct dso *self, enum map_type type) 50 { ··· 131 static struct symbol *symbol__new(u64 start, u64 len, const char *name) 132 { 133 size_t namelen = strlen(name) + 1; 134 - struct symbol *self = zalloc(symbol__priv_size + 135 sizeof(*self) + namelen); 136 if (self == NULL) 137 return NULL; 138 139 - if (symbol__priv_size) 140 - self = ((void *)self) + symbol__priv_size; 141 142 self->start = start; 143 self->end = len ? start + len - 1 : start; ··· 151 152 static void symbol__delete(struct symbol *self) 153 { 154 - free(((void *)self) - symbol__priv_size); 155 } 156 157 static size_t symbol__fprintf(struct symbol *self, FILE *fp) ··· 455 * the original ELF section names vmlinux have. 456 */ 457 static int dso__split_kallsyms(struct dso *self, struct map *map, 458 - struct map_groups *mg, symbol_filter_t filter) 459 { 460 struct map *curr_map = map; 461 struct symbol *pos; ··· 472 473 module = strchr(pos->name, '\t'); 474 if (module) { 475 - if (!mg->use_modules) 476 goto discard_symbol; 477 478 *module++ = '\0'; 479 480 if (strcmp(self->name, module)) { 481 - curr_map = map_groups__find_by_name(mg, map->type, module); 482 if (curr_map == NULL) { 483 pr_debug("/proc/{kallsyms,modules} " 484 "inconsistency!\n"); ··· 509 } 510 511 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 512 - map_groups__insert(mg, curr_map); 513 ++kernel_range; 514 } 515 ··· 530 531 532 static int dso__load_kallsyms(struct dso *self, struct map *map, 533 - struct map_groups *mg, symbol_filter_t filter) 534 { 535 if (dso__load_all_kallsyms(self, map) < 0) 536 return -1; ··· 538 symbols__fixup_end(&self->symbols[map->type]); 539 self->origin = DSO__ORIG_KERNEL; 540 541 - return dso__split_kallsyms(self, map, mg, filter); 542 - } 543 - 544 - size_t kernel_maps__fprintf(FILE *fp) 545 - { 546 - size_t printed = fprintf(fp, "Kernel maps:\n"); 547 - printed += map_groups__fprintf_maps(kmaps, fp); 548 - return printed + fprintf(fp, "END kernel maps\n"); 549 } 550 551 static int dso__load_perf_map(struct dso *self, struct map *map, ··· 865 } 866 867 static int dso__load_sym(struct dso *self, struct map *map, 868 - struct map_groups *mg, const char *name, int fd, 869 symbol_filter_t filter, int kernel, int kmodule) 870 { 871 struct map *curr_map = map; ··· 969 snprintf(dso_name, sizeof(dso_name), 970 "%s%s", self->short_name, section_name); 971 972 - curr_map = map_groups__find_by_name(mg, map->type, dso_name); 973 if (curr_map == NULL) { 974 u64 start = sym.st_value; 975 ··· 988 curr_map->map_ip = identity__map_ip; 989 curr_map->unmap_ip = identity__map_ip; 990 curr_dso->origin = DSO__ORIG_KERNEL; 991 - map_groups__insert(kmaps, curr_map); 992 dsos__add(&dsos__kernel, curr_dso); 993 } else 994 curr_dso = curr_map->dso; ··· 1203 return origin[self->origin]; 1204 } 1205 1206 - int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) 1207 { 1208 int size = PATH_MAX; 1209 char *name; ··· 1215 dso__set_loaded(self, map->type); 1216 1217 if (self->kernel) 1218 - return dso__load_kernel_sym(self, map, kmaps, filter); 1219 1220 name = malloc(size); 1221 if (!name) ··· 1316 return NULL; 1317 } 1318 1319 - static int dsos__set_modules_path_dir(char *dirname) 1320 { 1321 struct dirent *dent; 1322 DIR *dir = opendir(dirname); ··· 1336 1337 snprintf(path, sizeof(path), "%s/%s", 1338 dirname, dent->d_name); 1339 - if (dsos__set_modules_path_dir(path) < 0) 1340 goto failure; 1341 } else { 1342 char *dot = strrchr(dent->d_name, '.'), ··· 1350 (int)(dot - dent->d_name), dent->d_name); 1351 1352 strxfrchar(dso_name, '-', '_'); 1353 - map = map_groups__find_by_name(kmaps, MAP__FUNCTION, dso_name); 1354 if (map == NULL) 1355 continue; 1356 ··· 1370 return -1; 1371 } 1372 1373 - static int dsos__set_modules_path(void) 1374 { 1375 struct utsname uts; 1376 char modules_path[PATH_MAX]; ··· 1381 snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", 1382 uts.release); 1383 1384 - return dsos__set_modules_path_dir(modules_path); 1385 } 1386 1387 /* ··· 1403 return self; 1404 } 1405 1406 - static int map_groups__create_module_maps(struct map_groups *self) 1407 { 1408 char *line = NULL; 1409 size_t n; ··· 1460 dso->has_build_id = true; 1461 1462 dso->origin = DSO__ORIG_KMODULE; 1463 - map_groups__insert(self, map); 1464 dsos__add(&dsos__kernel, dso); 1465 } 1466 1467 free(line); 1468 fclose(file); 1469 1470 - return dsos__set_modules_path(); 1471 1472 out_delete_line: 1473 free(line); ··· 1476 } 1477 1478 static int dso__load_vmlinux(struct dso *self, struct map *map, 1479 - struct map_groups *mg, 1480 const char *vmlinux, symbol_filter_t filter) 1481 { 1482 int err = -1, fd; ··· 1510 return -1; 1511 1512 dso__set_loaded(self, map->type); 1513 - err = dso__load_sym(self, map, mg, self->long_name, fd, filter, 1, 0); 1514 close(fd); 1515 1516 return err; 1517 } 1518 1519 static int dso__load_kernel_sym(struct dso *self, struct map *map, 1520 - struct map_groups *mg, symbol_filter_t filter) 1521 { 1522 int err; 1523 bool is_kallsyms; ··· 1527 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1528 vmlinux_path__nr_entries); 1529 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1530 - err = dso__load_vmlinux(self, map, mg, 1531 vmlinux_path[i], filter); 1532 if (err > 0) { 1533 pr_debug("Using %s for symbols\n", ··· 1543 if (is_kallsyms) 1544 goto do_kallsyms; 1545 1546 - err = dso__load_vmlinux(self, map, mg, self->long_name, filter); 1547 if (err <= 0) { 1548 pr_info("The file %s cannot be used, " 1549 "trying to use /proc/kallsyms...", self->long_name); 1550 do_kallsyms: 1551 - err = dso__load_kallsyms(self, map, mg, filter); 1552 if (err > 0 && !is_kallsyms) 1553 dso__set_long_name(self, strdup("[kernel.kallsyms]")); 1554 } ··· 1741 return -1; 1742 } 1743 1744 - int symbol__init(struct symbol_conf *conf) 1745 { 1746 - const struct symbol_conf *pconf = conf ?: &symbol_conf__defaults; 1747 1748 elf_version(EV_CURRENT); 1749 - symbol__priv_size = pconf->priv_size; 1750 - if (pconf->sort_by_name) 1751 - symbol__priv_size += (sizeof(struct symbol_name_rb_node) - 1752 - sizeof(struct symbol)); 1753 - map_groups__init(kmaps); 1754 1755 - if (pconf->try_vmlinux_path && vmlinux_path__init() < 0) 1756 return -1; 1757 1758 - if (map_groups__create_kernel_maps(kmaps, pconf->vmlinux_name) < 0) { 1759 - vmlinux_path__exit(); 1760 return -1; 1761 } 1762 1763 - kmaps->use_modules = pconf->use_modules; 1764 - if (pconf->use_modules && map_groups__create_module_maps(kmaps) < 0) 1765 - pr_debug("Failed to load list of modules in use, " 1766 - "continuing...\n"); 1767 /* 1768 * Now that we have all the maps created, just set the ->end of them: 1769 */ 1770 - map_groups__fixup_end(kmaps); 1771 return 0; 1772 }
··· 1 #include "util.h" 2 #include "../perf.h" 3 + #include "session.h" 4 + #include "sort.h" 5 #include "string.h" 6 #include "symbol.h" 7 #include "thread.h" ··· 31 static void dsos__add(struct list_head *head, struct dso *dso); 32 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 33 static int dso__load_kernel_sym(struct dso *self, struct map *map, 34 + struct perf_session *session, symbol_filter_t filter); 35 static int vmlinux_path__nr_entries; 36 static char **vmlinux_path; 37 38 + struct symbol_conf symbol_conf = { 39 + .exclude_other = true, 40 .use_modules = true, 41 .try_vmlinux_path = true, 42 }; 43 44 bool dso__loaded(const struct dso *self, enum map_type type) 45 { ··· 132 static struct symbol *symbol__new(u64 start, u64 len, const char *name) 133 { 134 size_t namelen = strlen(name) + 1; 135 + struct symbol *self = zalloc(symbol_conf.priv_size + 136 sizeof(*self) + namelen); 137 if (self == NULL) 138 return NULL; 139 140 + if (symbol_conf.priv_size) 141 + self = ((void *)self) + symbol_conf.priv_size; 142 143 self->start = start; 144 self->end = len ? start + len - 1 : start; ··· 152 153 static void symbol__delete(struct symbol *self) 154 { 155 + free(((void *)self) - symbol_conf.priv_size); 156 } 157 158 static size_t symbol__fprintf(struct symbol *self, FILE *fp) ··· 456 * the original ELF section names vmlinux have. 457 */ 458 static int dso__split_kallsyms(struct dso *self, struct map *map, 459 + struct perf_session *session, symbol_filter_t filter) 460 { 461 struct map *curr_map = map; 462 struct symbol *pos; ··· 473 474 module = strchr(pos->name, '\t'); 475 if (module) { 476 + if (!symbol_conf.use_modules) 477 goto discard_symbol; 478 479 *module++ = '\0'; 480 481 if (strcmp(self->name, module)) { 482 + curr_map = map_groups__find_by_name(&session->kmaps, map->type, module); 483 if (curr_map == NULL) { 484 pr_debug("/proc/{kallsyms,modules} " 485 "inconsistency!\n"); ··· 510 } 511 512 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 513 + map_groups__insert(&session->kmaps, curr_map); 514 ++kernel_range; 515 } 516 ··· 531 532 533 static int dso__load_kallsyms(struct dso *self, struct map *map, 534 + struct perf_session *session, symbol_filter_t filter) 535 { 536 if (dso__load_all_kallsyms(self, map) < 0) 537 return -1; ··· 539 symbols__fixup_end(&self->symbols[map->type]); 540 self->origin = DSO__ORIG_KERNEL; 541 542 + return dso__split_kallsyms(self, map, session, filter); 543 } 544 545 static int dso__load_perf_map(struct dso *self, struct map *map, ··· 873 } 874 875 static int dso__load_sym(struct dso *self, struct map *map, 876 + struct perf_session *session, const char *name, int fd, 877 symbol_filter_t filter, int kernel, int kmodule) 878 { 879 struct map *curr_map = map; ··· 977 snprintf(dso_name, sizeof(dso_name), 978 "%s%s", self->short_name, section_name); 979 980 + curr_map = map_groups__find_by_name(&session->kmaps, map->type, dso_name); 981 if (curr_map == NULL) { 982 u64 start = sym.st_value; 983 ··· 996 curr_map->map_ip = identity__map_ip; 997 curr_map->unmap_ip = identity__map_ip; 998 curr_dso->origin = DSO__ORIG_KERNEL; 999 + map_groups__insert(&session->kmaps, curr_map); 1000 dsos__add(&dsos__kernel, curr_dso); 1001 } else 1002 curr_dso = curr_map->dso; ··· 1211 return origin[self->origin]; 1212 } 1213 1214 + int dso__load(struct dso *self, struct map *map, struct perf_session *session, 1215 + symbol_filter_t filter) 1216 { 1217 int size = PATH_MAX; 1218 char *name; ··· 1222 dso__set_loaded(self, map->type); 1223 1224 if (self->kernel) 1225 + return dso__load_kernel_sym(self, map, session, filter); 1226 1227 name = malloc(size); 1228 if (!name) ··· 1323 return NULL; 1324 } 1325 1326 + static int perf_session__set_modules_path_dir(struct perf_session *self, char *dirname) 1327 { 1328 struct dirent *dent; 1329 DIR *dir = opendir(dirname); ··· 1343 1344 snprintf(path, sizeof(path), "%s/%s", 1345 dirname, dent->d_name); 1346 + if (perf_session__set_modules_path_dir(self, path) < 0) 1347 goto failure; 1348 } else { 1349 char *dot = strrchr(dent->d_name, '.'), ··· 1357 (int)(dot - dent->d_name), dent->d_name); 1358 1359 strxfrchar(dso_name, '-', '_'); 1360 + map = map_groups__find_by_name(&self->kmaps, MAP__FUNCTION, dso_name); 1361 if (map == NULL) 1362 continue; 1363 ··· 1377 return -1; 1378 } 1379 1380 + static int perf_session__set_modules_path(struct perf_session *self) 1381 { 1382 struct utsname uts; 1383 char modules_path[PATH_MAX]; ··· 1388 snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", 1389 uts.release); 1390 1391 + return perf_session__set_modules_path_dir(self, modules_path); 1392 } 1393 1394 /* ··· 1410 return self; 1411 } 1412 1413 + static int perf_session__create_module_maps(struct perf_session *self) 1414 { 1415 char *line = NULL; 1416 size_t n; ··· 1467 dso->has_build_id = true; 1468 1469 dso->origin = DSO__ORIG_KMODULE; 1470 + map_groups__insert(&self->kmaps, map); 1471 dsos__add(&dsos__kernel, dso); 1472 } 1473 1474 free(line); 1475 fclose(file); 1476 1477 + return perf_session__set_modules_path(self); 1478 1479 out_delete_line: 1480 free(line); ··· 1483 } 1484 1485 static int dso__load_vmlinux(struct dso *self, struct map *map, 1486 + struct perf_session *session, 1487 const char *vmlinux, symbol_filter_t filter) 1488 { 1489 int err = -1, fd; ··· 1517 return -1; 1518 1519 dso__set_loaded(self, map->type); 1520 + err = dso__load_sym(self, map, session, self->long_name, fd, filter, 1, 0); 1521 close(fd); 1522 1523 return err; 1524 } 1525 1526 static int dso__load_kernel_sym(struct dso *self, struct map *map, 1527 + struct perf_session *session, symbol_filter_t filter) 1528 { 1529 int err; 1530 bool is_kallsyms; ··· 1534 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1535 vmlinux_path__nr_entries); 1536 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1537 + err = dso__load_vmlinux(self, map, session, 1538 vmlinux_path[i], filter); 1539 if (err > 0) { 1540 pr_debug("Using %s for symbols\n", ··· 1550 if (is_kallsyms) 1551 goto do_kallsyms; 1552 1553 + err = dso__load_vmlinux(self, map, session, self->long_name, filter); 1554 if (err <= 0) { 1555 pr_info("The file %s cannot be used, " 1556 "trying to use /proc/kallsyms...", self->long_name); 1557 do_kallsyms: 1558 + err = dso__load_kallsyms(self, map, session, filter); 1559 if (err > 0 && !is_kallsyms) 1560 dso__set_long_name(self, strdup("[kernel.kallsyms]")); 1561 } ··· 1748 return -1; 1749 } 1750 1751 + static int setup_list(struct strlist **list, const char *list_str, 1752 + const char *list_name) 1753 { 1754 + if (list_str == NULL) 1755 + return 0; 1756 1757 + *list = strlist__new(true, list_str); 1758 + if (!*list) { 1759 + pr_err("problems parsing %s list\n", list_name); 1760 + return -1; 1761 + } 1762 + return 0; 1763 + } 1764 + 1765 + int symbol__init(void) 1766 + { 1767 elf_version(EV_CURRENT); 1768 + if (symbol_conf.sort_by_name) 1769 + symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 1770 + sizeof(struct symbol)); 1771 1772 + if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0) 1773 return -1; 1774 1775 + if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 1776 + pr_err("'.' is the only non valid --field-separator argument\n"); 1777 return -1; 1778 } 1779 1780 + if (setup_list(&symbol_conf.dso_list, 1781 + symbol_conf.dso_list_str, "dso") < 0) 1782 + return -1; 1783 + 1784 + if (setup_list(&symbol_conf.comm_list, 1785 + symbol_conf.comm_list_str, "comm") < 0) 1786 + goto out_free_dso_list; 1787 + 1788 + if (setup_list(&symbol_conf.sym_list, 1789 + symbol_conf.sym_list_str, "symbol") < 0) 1790 + goto out_free_comm_list; 1791 + 1792 + return 0; 1793 + 1794 + out_free_dso_list: 1795 + strlist__delete(symbol_conf.dso_list); 1796 + out_free_comm_list: 1797 + strlist__delete(symbol_conf.comm_list); 1798 + return -1; 1799 + } 1800 + 1801 + int perf_session__create_kernel_maps(struct perf_session *self) 1802 + { 1803 + if (map_groups__create_kernel_maps(&self->kmaps, 1804 + symbol_conf.vmlinux_name) < 0) 1805 + return -1; 1806 + 1807 + if (symbol_conf.use_modules && 1808 + perf_session__create_module_maps(self) < 0) 1809 + pr_debug("Failed to load list of modules for session %s, " 1810 + "continuing...\n", self->filename); 1811 /* 1812 * Now that we have all the maps created, just set the ->end of them: 1813 */ 1814 + map_groups__fixup_end(&self->kmaps); 1815 return 0; 1816 }
+24 -10
tools/perf/util/symbol.h
··· 49 char name[0]; 50 }; 51 52 struct symbol_conf { 53 unsigned short priv_size; 54 bool try_vmlinux_path, 55 use_modules, 56 - sort_by_name; 57 - const char *vmlinux_name; 58 }; 59 60 - extern unsigned int symbol__priv_size; 61 62 static inline void *symbol__priv(struct symbol *self) 63 { 64 - return ((void *)self) - symbol__priv_size; 65 } 66 67 struct addr_location { ··· 83 struct symbol *sym; 84 u64 addr; 85 char level; 86 }; 87 88 struct dso { ··· 112 113 void dso__sort_by_name(struct dso *self, enum map_type type); 114 115 struct dso *dsos__findnew(const char *name); 116 - int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); 117 void dsos__fprintf(FILE *fp); 118 size_t dsos__fprintf_buildid(FILE *fp); 119 ··· 133 bool dsos__read_build_ids(void); 134 int build_id__sprintf(u8 *self, int len, char *bf); 135 136 - size_t kernel_maps__fprintf(FILE *fp); 137 138 - int symbol__init(struct symbol_conf *conf); 139 - 140 - struct map_groups; 141 - struct map_groups *kmaps; 142 extern struct list_head dsos__user, dsos__kernel; 143 extern struct dso *vdso; 144 #endif /* __PERF_SYMBOL */
··· 49 char name[0]; 50 }; 51 52 + struct strlist; 53 + 54 struct symbol_conf { 55 unsigned short priv_size; 56 bool try_vmlinux_path, 57 use_modules, 58 + sort_by_name, 59 + show_nr_samples, 60 + use_callchain, 61 + exclude_other; 62 + const char *vmlinux_name, 63 + *field_sep; 64 + char *dso_list_str, 65 + *comm_list_str, 66 + *sym_list_str, 67 + *col_width_list_str; 68 + struct strlist *dso_list, 69 + *comm_list, 70 + *sym_list; 71 }; 72 73 + extern struct symbol_conf symbol_conf; 74 75 static inline void *symbol__priv(struct symbol *self) 76 { 77 + return ((void *)self) - symbol_conf.priv_size; 78 } 79 80 struct addr_location { ··· 70 struct symbol *sym; 71 u64 addr; 72 char level; 73 + bool filtered; 74 }; 75 76 struct dso { ··· 98 99 void dso__sort_by_name(struct dso *self, enum map_type type); 100 101 + struct perf_session; 102 + 103 struct dso *dsos__findnew(const char *name); 104 + int dso__load(struct dso *self, struct map *map, struct perf_session *session, 105 + symbol_filter_t filter); 106 void dsos__fprintf(FILE *fp); 107 size_t dsos__fprintf_buildid(FILE *fp); 108 ··· 116 bool dsos__read_build_ids(void); 117 int build_id__sprintf(u8 *self, int len, char *bf); 118 119 + int symbol__init(void); 120 + int perf_session__create_kernel_maps(struct perf_session *self); 121 122 extern struct list_head dsos__user, dsos__kernel; 123 extern struct dso *vdso; 124 #endif /* __PERF_SYMBOL */
+12 -25
tools/perf/util/thread.c
··· 2 #include <stdlib.h> 3 #include <stdio.h> 4 #include <string.h> 5 #include "thread.h" 6 #include "util.h" 7 #include "debug.h" 8 - 9 - static struct rb_root threads; 10 - static struct thread *last_match; 11 12 void map_groups__init(struct map_groups *self) 13 { ··· 120 map_groups__fprintf(&self->mg, fp); 121 } 122 123 - struct thread *threads__findnew(pid_t pid) 124 { 125 - struct rb_node **p = &threads.rb_node; 126 struct rb_node *parent = NULL; 127 struct thread *th; 128 ··· 131 * so most of the time we dont have to look up 132 * the full rbtree: 133 */ 134 - if (last_match && last_match->pid == pid) 135 - return last_match; 136 137 while (*p != NULL) { 138 parent = *p; 139 th = rb_entry(parent, struct thread, rb_node); 140 141 if (th->pid == pid) { 142 - last_match = th; 143 return th; 144 } 145 ··· 152 th = thread__new(pid); 153 if (th != NULL) { 154 rb_link_node(&th->rb_node, parent, p); 155 - rb_insert_color(&th->rb_node, &threads); 156 - last_match = th; 157 } 158 159 return th; 160 - } 161 - 162 - struct thread *register_idle_thread(void) 163 - { 164 - struct thread *thread = threads__findnew(0); 165 - 166 - if (!thread || thread__set_comm(thread, "swapper")) { 167 - fprintf(stderr, "problem inserting idle task.\n"); 168 - exit(-1); 169 - } 170 - 171 - return thread; 172 } 173 174 static void map_groups__remove_overlappings(struct map_groups *self, ··· 267 return 0; 268 } 269 270 - size_t threads__fprintf(FILE *fp) 271 { 272 size_t ret = 0; 273 struct rb_node *nd; 274 275 - for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { 276 struct thread *pos = rb_entry(nd, struct thread, rb_node); 277 278 ret += thread__fprintf(pos, fp); ··· 282 } 283 284 struct symbol *map_groups__find_symbol(struct map_groups *self, 285 enum map_type type, u64 addr, 286 symbol_filter_t filter) 287 { 288 struct map *map = map_groups__find(self, type, addr); 289 290 if (map != NULL) 291 - return map__find_symbol(map, map->map_ip(map, addr), filter); 292 293 return NULL; 294 }
··· 2 #include <stdlib.h> 3 #include <stdio.h> 4 #include <string.h> 5 + #include "session.h" 6 #include "thread.h" 7 #include "util.h" 8 #include "debug.h" 9 10 void map_groups__init(struct map_groups *self) 11 { ··· 122 map_groups__fprintf(&self->mg, fp); 123 } 124 125 + struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) 126 { 127 + struct rb_node **p = &self->threads.rb_node; 128 struct rb_node *parent = NULL; 129 struct thread *th; 130 ··· 133 * so most of the time we dont have to look up 134 * the full rbtree: 135 */ 136 + if (self->last_match && self->last_match->pid == pid) 137 + return self->last_match; 138 139 while (*p != NULL) { 140 parent = *p; 141 th = rb_entry(parent, struct thread, rb_node); 142 143 if (th->pid == pid) { 144 + self->last_match = th; 145 return th; 146 } 147 ··· 154 th = thread__new(pid); 155 if (th != NULL) { 156 rb_link_node(&th->rb_node, parent, p); 157 + rb_insert_color(&th->rb_node, &self->threads); 158 + self->last_match = th; 159 } 160 161 return th; 162 } 163 164 static void map_groups__remove_overlappings(struct map_groups *self, ··· 281 return 0; 282 } 283 284 + size_t perf_session__fprintf(struct perf_session *self, FILE *fp) 285 { 286 size_t ret = 0; 287 struct rb_node *nd; 288 289 + for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) { 290 struct thread *pos = rb_entry(nd, struct thread, rb_node); 291 292 ret += thread__fprintf(pos, fp); ··· 296 } 297 298 struct symbol *map_groups__find_symbol(struct map_groups *self, 299 + struct perf_session *session, 300 enum map_type type, u64 addr, 301 symbol_filter_t filter) 302 { 303 struct map *map = map_groups__find(self, type, addr); 304 305 if (map != NULL) 306 + return map__find_symbol(map, session, map->map_ip(map, addr), filter); 307 308 return NULL; 309 }
+8 -8
tools/perf/util/thread.h
··· 8 struct map_groups { 9 struct rb_root maps[MAP__NR_TYPES]; 10 struct list_head removed_maps[MAP__NR_TYPES]; 11 - bool use_modules; 12 }; 13 14 struct thread { ··· 22 void map_groups__init(struct map_groups *self); 23 int thread__set_comm(struct thread *self, const char *comm); 24 int thread__comm_len(struct thread *self); 25 - struct thread *threads__findnew(pid_t pid); 26 - struct thread *register_idle_thread(void); 27 void thread__insert_map(struct thread *self, struct map *map); 28 int thread__fork(struct thread *self, struct thread *parent); 29 size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp); 30 - size_t threads__fprintf(FILE *fp); 31 32 void maps__insert(struct rb_root *maps, struct map *map); 33 struct map *maps__find(struct rb_root *maps, u64 addr); ··· 48 return self ? map_groups__find(&self->mg, type, addr) : NULL; 49 } 50 51 - void thread__find_addr_location(struct thread *self, u8 cpumode, 52 enum map_type type, u64 addr, 53 struct addr_location *al, 54 symbol_filter_t filter); 55 struct symbol *map_groups__find_symbol(struct map_groups *self, 56 enum map_type type, u64 addr, 57 symbol_filter_t filter); 58 59 static inline struct symbol * 60 - map_groups__find_function(struct map_groups *self, u64 addr, 61 - symbol_filter_t filter) 62 { 63 - return map_groups__find_symbol(self, MAP__FUNCTION, addr, filter); 64 } 65 66 struct map *map_groups__find_by_name(struct map_groups *self,
··· 8 struct map_groups { 9 struct rb_root maps[MAP__NR_TYPES]; 10 struct list_head removed_maps[MAP__NR_TYPES]; 11 }; 12 13 struct thread { ··· 23 void map_groups__init(struct map_groups *self); 24 int thread__set_comm(struct thread *self, const char *comm); 25 int thread__comm_len(struct thread *self); 26 + struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); 27 void thread__insert_map(struct thread *self, struct map *map); 28 int thread__fork(struct thread *self, struct thread *parent); 29 size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp); 30 + size_t perf_session__fprintf(struct perf_session *self, FILE *fp); 31 32 void maps__insert(struct rb_root *maps, struct map *map); 33 struct map *maps__find(struct rb_root *maps, u64 addr); ··· 50 return self ? map_groups__find(&self->mg, type, addr) : NULL; 51 } 52 53 + void thread__find_addr_location(struct thread *self, 54 + struct perf_session *session, u8 cpumode, 55 enum map_type type, u64 addr, 56 struct addr_location *al, 57 symbol_filter_t filter); 58 struct symbol *map_groups__find_symbol(struct map_groups *self, 59 + struct perf_session *session, 60 enum map_type type, u64 addr, 61 symbol_filter_t filter); 62 63 static inline struct symbol * 64 + map_groups__find_function(struct map_groups *self, struct perf_session *session, 65 + u64 addr, symbol_filter_t filter) 66 { 67 + return map_groups__find_symbol(self, session, MAP__FUNCTION, addr, filter); 68 } 69 70 struct map *map_groups__find_by_name(struct map_groups *self,
+31 -11
tools/perf/util/trace-event-perl.c
··· 267 } 268 269 static void perl_process_event(int cpu, void *data, 270 - int size __attribute((unused)), 271 unsigned long long nsecs, char *comm) 272 { 273 struct format_field *field; ··· 359 /* 360 * Start trace script 361 */ 362 - static int perl_start_script(const char *script) 363 { 364 - const char *command_line[2] = { "", NULL }; 365 366 command_line[1] = script; 367 368 my_perl = perl_alloc(); 369 perl_construct(my_perl); 370 371 - if (perl_parse(my_perl, xs_init, 2, (char **)command_line, 372 - (char **)NULL)) 373 - return -1; 374 375 - perl_run(my_perl); 376 - if (SvTRUE(ERRSV)) 377 - return -1; 378 379 run_start_sub(); 380 381 fprintf(stderr, "perf trace started with Perl script %s\n\n", script); 382 - 383 return 0; 384 } 385 386 /* ··· 597 "\n etc.\n"); 598 } 599 600 - static int perl_start_script_unsupported(const char *script __unused) 601 { 602 print_unsupported_msg(); 603
··· 267 } 268 269 static void perl_process_event(int cpu, void *data, 270 + int size __unused, 271 unsigned long long nsecs, char *comm) 272 { 273 struct format_field *field; ··· 359 /* 360 * Start trace script 361 */ 362 + static int perl_start_script(const char *script, int argc, const char **argv) 363 { 364 + const char **command_line; 365 + int i, err = 0; 366 367 + command_line = malloc((argc + 2) * sizeof(const char *)); 368 + command_line[0] = ""; 369 command_line[1] = script; 370 + for (i = 2; i < argc + 2; i++) 371 + command_line[i] = argv[i - 2]; 372 373 my_perl = perl_alloc(); 374 perl_construct(my_perl); 375 376 + if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, 377 + (char **)NULL)) { 378 + err = -1; 379 + goto error; 380 + } 381 382 + if (perl_run(my_perl)) { 383 + err = -1; 384 + goto error; 385 + } 386 + 387 + if (SvTRUE(ERRSV)) { 388 + err = -1; 389 + goto error; 390 + } 391 392 run_start_sub(); 393 394 + free(command_line); 395 fprintf(stderr, "perf trace started with Perl script %s\n\n", script); 396 return 0; 397 + error: 398 + perl_free(my_perl); 399 + free(command_line); 400 + 401 + return err; 402 } 403 404 /* ··· 579 "\n etc.\n"); 580 } 581 582 + static int perl_start_script_unsupported(const char *script __unused, 583 + int argc __unused, 584 + const char **argv __unused) 585 { 586 print_unsupported_msg(); 587
+1 -1
tools/perf/util/trace-event.h
··· 270 271 struct scripting_ops { 272 const char *name; 273 - int (*start_script) (const char *); 274 int (*stop_script) (void); 275 void (*process_event) (int cpu, void *data, int size, 276 unsigned long long nsecs, char *comm);
··· 270 271 struct scripting_ops { 272 const char *name; 273 + int (*start_script) (const char *script, int argc, const char **argv); 274 int (*stop_script) (void); 275 void (*process_event) (int cpu, void *data, int size, 276 unsigned long long nsecs, char *comm);