Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (52 commits)
perf record: Use per-task-per-cpu events for inherited events
perf record: Properly synchronize child creation
perf events: Allow per-task-per-cpu counters
perf diff: Percent calcs should use double values
perf diff: Change the default sort order to "dso,symbol"
perf diff: Use perf_session__fprintf_hists just like 'perf record'
perf report: Fix cut'n'paste error recently introduced
perf session: Move perf report specific hits out of perf_session__fprintf_hists
perf tools: Move hist entries printing routines from perf report
perf report: Generalize perf_session__fprintf_hists()
perf symbols: Move symbol filtering to event__preprocess_sample()
perf symbols: Adopt the strlists for dso, comm
perf symbols: Make symbol_conf global
perf probe: Fix to show which probe point is not found
perf probe: Check symbols in symtab/kallsyms
perf probe: Check build-id of vmlinux
perf probe: Reject second attempt of adding same-name event
perf probe: Support event name for --add option
perf probe: Add glob matching support on --del
perf probe: Use strlist__for_each macros in probe-event.c
...

+2299 -1419
+3 -9
include/linux/perf_event.h
··· 211 211 __u32 wakeup_watermark; /* bytes before wakeup */ 212 212 }; 213 213 214 - struct { /* Hardware breakpoint info */ 215 - __u64 bp_addr; 216 - __u32 bp_type; 217 - __u32 bp_len; 218 - __u64 __bp_reserved_1; 219 - __u64 __bp_reserved_2; 220 - }; 221 - 222 214 __u32 __reserved_2; 223 215 224 - __u64 __reserved_3; 216 + __u64 bp_addr; 217 + __u32 bp_type; 218 + __u32 bp_len; 225 219 }; 226 220 227 221 /*
+9 -6
kernel/perf_event.c
··· 782 782 783 783 add_event_to_ctx(event, ctx); 784 784 785 + if (event->cpu != -1 && event->cpu != smp_processor_id()) 786 + goto unlock; 787 + 785 788 /* 786 789 * Don't put the event on if it is disabled or if 787 790 * it is in a group and the group isn't on. ··· 927 924 if (event->state >= PERF_EVENT_STATE_INACTIVE) 928 925 goto unlock; 929 926 __perf_event_mark_enabled(event, ctx); 927 + 928 + if (event->cpu != -1 && event->cpu != smp_processor_id()) 929 + goto unlock; 930 930 931 931 /* 932 932 * If the event is in a group and isn't the group leader, ··· 1601 1595 unsigned long flags; 1602 1596 int err; 1603 1597 1604 - /* 1605 - * If cpu is not a wildcard then this is a percpu event: 1606 - */ 1607 - if (cpu != -1) { 1598 + if (pid == -1 && cpu != -1) { 1608 1599 /* Must be root to operate on a CPU event: */ 1609 1600 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1610 1601 return ERR_PTR(-EACCES); 1611 1602 1612 - if (cpu < 0 || cpu > num_possible_cpus()) 1603 + if (cpu < 0 || cpu >= nr_cpumask_bits) 1613 1604 return ERR_PTR(-EINVAL); 1614 1605 1615 1606 /* ··· 4567 4564 if (attr->type >= PERF_TYPE_MAX) 4568 4565 return -EINVAL; 4569 4566 4570 - if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) 4567 + if (attr->__reserved_1 || attr->__reserved_2) 4571 4568 return -EINVAL; 4572 4569 4573 4570 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
+55
tools/perf/Documentation/perf-diff.txt
··· 1 + perf-diff(1) 2 + ============== 3 + 4 + NAME 5 + ---- 6 + perf-diff - Read two perf.data files and display the differential profile 7 + 8 + SYNOPSIS 9 + -------- 10 + [verse] 11 + 'perf diff' [oldfile] [newfile] 12 + 13 + DESCRIPTION 14 + ----------- 15 + This command displays the performance difference amongst two perf.data files 16 + captured via perf record. 17 + 18 + If no parameters are passed it will assume perf.data.old and perf.data. 19 + 20 + OPTIONS 21 + ------- 22 + -d:: 23 + --dsos=:: 24 + Only consider symbols in these dsos. CSV that understands 25 + file://filename entries. 26 + 27 + -C:: 28 + --comms=:: 29 + Only consider symbols in these comms. CSV that understands 30 + file://filename entries. 31 + 32 + -S:: 33 + --symbols=:: 34 + Only consider these symbols. CSV that understands 35 + file://filename entries. 36 + 37 + -s:: 38 + --sort=:: 39 + Sort by key(s): pid, comm, dso, symbol. 40 + 41 + -t:: 42 + --field-separator=:: 43 + 44 + Use a special separator character and don't pad with spaces, replacing 45 + all occurances of this separator in symbol names (and other output) 46 + with a '.' character, that thus it's the only non valid separator. 47 + 48 + -v:: 49 + --verbose:: 50 + Be verbose, for instance, show the raw counts in addition to the 51 + diff. 52 + 53 + SEE ALSO 54 + -------- 55 + linkperf:perf-record[1]
+2 -1
tools/perf/Documentation/perf-probe.txt
··· 49 49 ------------ 50 50 Probe points are defined by following syntax. 51 51 52 - "FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]" 52 + "[EVENT=]FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]" 53 53 54 + 'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'. 54 55 'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, 'RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. In addition, 'SRC' specifies a source file which has that function. 55 56 It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number. 56 57 'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
+4
tools/perf/Documentation/perf-report.txt
··· 39 39 Only consider these symbols. CSV that understands 40 40 file://filename entries. 41 41 42 + -s:: 43 + --sort=:: 44 + Sort by key(s): pid, comm, dso, symbol, parent. 45 + 42 46 -w:: 43 47 --field-width=:: 44 48 Force each column width to the provided list, for large terminal
+26 -1
tools/perf/Documentation/perf-trace.txt
··· 8 8 SYNOPSIS 9 9 -------- 10 10 [verse] 11 - 'perf trace' [-i <file> | --input=file] symbol_name 11 + 'perf trace' {record <script> | report <script> [args] } 12 12 13 13 DESCRIPTION 14 14 ----------- 15 15 This command reads the input file and displays the trace recorded. 16 + 17 + There are several variants of perf trace: 18 + 19 + 'perf trace' to see a detailed trace of the workload that was 20 + recorded. 21 + 22 + 'perf trace record <script>' to record the events required for 'perf 23 + trace report'. <script> is the name displayed in the output of 24 + 'perf trace --list' i.e. the actual script name minus any language 25 + extension. 26 + 27 + 'perf trace report <script>' to run and display the results of 28 + <script>. <script> is the name displayed in the output of 'perf 29 + trace --list' i.e. the actual script name minus any language 30 + extension. The perf.data output from a previous run of 'perf trace 31 + record <script>' is used and should be present for this command to 32 + succeed. 16 33 17 34 OPTIONS 18 35 ------- 19 36 -D:: 20 37 --dump-raw-trace=:: 21 38 Display verbose dump of the trace data. 39 + 40 + -L:: 41 + --Latency=:: 42 + Show latency attributes (irqs/preemption disabled, etc). 43 + 44 + -l:: 45 + --list=:: 46 + Display a list of available trace scripts. 22 47 23 48 -s:: 24 49 --script=::
+1 -3
tools/perf/Makefile
··· 370 370 LIB_H += util/sort.h 371 371 LIB_H += util/hist.h 372 372 LIB_H += util/thread.h 373 - LIB_H += util/data_map.h 374 373 LIB_H += util/probe-finder.h 375 374 LIB_H += util/probe-event.h 376 375 ··· 427 428 BUILTIN_OBJS += bench/sched-pipe.o 428 429 BUILTIN_OBJS += bench/mem-memcpy.o 429 430 431 + BUILTIN_OBJS += builtin-diff.o 430 432 BUILTIN_OBJS += builtin-help.o 431 433 BUILTIN_OBJS += builtin-sched.o 432 434 BUILTIN_OBJS += builtin-buildid-list.o ··· 996 996 $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 997 997 $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' 998 998 $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' 999 - $(INSTALL) scripts/perl/Perf-Trace-Util/Makefile.PL -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util' 1000 - $(INSTALL) scripts/perl/Perf-Trace-Util/README -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util' 1001 999 ifdef BUILT_INS 1002 1000 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 1003 1001 $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+24 -44
tools/perf/builtin-annotate.c
··· 26 26 #include "util/sort.h" 27 27 #include "util/hist.h" 28 28 #include "util/session.h" 29 - #include "util/data_map.h" 30 29 31 30 static char const *input_name = "perf.data"; 32 31 ··· 49 50 struct sym_priv { 50 51 struct sym_hist *hist; 51 52 struct sym_ext *ext; 52 - }; 53 - 54 - static struct symbol_conf symbol_conf = { 55 - .priv_size = sizeof(struct sym_priv), 56 - .try_vmlinux_path = true, 57 53 }; 58 54 59 55 static const char *sym_hist_filter; ··· 116 122 h->ip[offset]); 117 123 } 118 124 119 - static int hist_entry__add(struct addr_location *al, u64 count) 125 + static int perf_session__add_hist_entry(struct perf_session *self, 126 + struct addr_location *al, u64 count) 120 127 { 121 128 bool hit; 122 - struct hist_entry *he = __hist_entry__add(al, NULL, count, &hit); 129 + struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, 130 + count, &hit); 123 131 if (he == NULL) 124 132 return -ENOMEM; 125 133 hist_hit(he, al->addr); 126 134 return 0; 127 135 } 128 136 129 - static int process_sample_event(event_t *event) 137 + static int process_sample_event(event_t *event, struct perf_session *session) 130 138 { 131 139 struct addr_location al; 132 140 133 141 dump_printf("(IP, %d): %d: %p\n", event->header.misc, 134 142 event->ip.pid, (void *)(long)event->ip.ip); 135 143 136 - if (event__preprocess_sample(event, &al, symbol_filter) < 0) { 144 + if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) { 137 145 fprintf(stderr, "problem processing %d event, skipping it.\n", 138 146 event->header.type); 139 147 return -1; 140 148 } 141 149 142 - if (hist_entry__add(&al, 1)) { 150 + if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) { 143 151 fprintf(stderr, "problem incrementing symbol count, " 144 152 "skipping event\n"); 145 153 return -1; ··· 425 429 free_source_line(he, len); 426 430 } 427 431 428 - static void find_annotations(void) 432 + static void perf_session__find_annotations(struct perf_session *self) 429 433 { 430 434 struct rb_node *nd; 431 435 432 - for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { 436 + for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) { 433 437 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); 434 438 struct sym_priv *priv; 435 439 ··· 450 454 } 451 455 } 452 456 453 - static struct perf_file_handler file_handler = { 457 + static struct perf_event_ops event_ops = { 454 458 .process_sample_event = process_sample_event, 455 459 .process_mmap_event = event__process_mmap, 456 460 .process_comm_event = event__process_comm, ··· 459 463 460 464 static int __cmd_annotate(void) 461 465 { 462 - struct perf_session *session = perf_session__new(input_name, O_RDONLY, force); 463 - struct thread *idle; 464 466 int ret; 467 + struct perf_session *session; 465 468 469 + session = perf_session__new(input_name, O_RDONLY, force); 466 470 if (session == NULL) 467 471 return -ENOMEM; 468 472 469 - idle = register_idle_thread(); 470 - register_perf_file_handler(&file_handler); 471 - 472 - ret = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 473 + ret = perf_session__process_events(session, &event_ops); 473 474 if (ret) 474 475 goto out_delete; 475 476 ··· 476 483 } 477 484 478 485 if (verbose > 3) 479 - threads__fprintf(stdout); 486 + perf_session__fprintf(session, stdout); 480 487 481 488 if (verbose > 2) 482 489 dsos__fprintf(stdout); 483 490 484 - collapse__resort(); 485 - output__resort(event__total[0]); 486 - 487 - find_annotations(); 491 + perf_session__collapse_resort(session); 492 + perf_session__output_resort(session, session->event_total[0]); 493 + perf_session__find_annotations(session); 488 494 out_delete: 489 495 perf_session__delete(session); 490 496 ··· 516 524 OPT_END() 517 525 }; 518 526 519 - static void setup_sorting(void) 520 - { 521 - char *tmp, *tok, *str = strdup(sort_order); 522 - 523 - for (tok = strtok_r(str, ", ", &tmp); 524 - tok; tok = strtok_r(NULL, ", ", &tmp)) { 525 - if (sort_dimension__add(tok) < 0) { 526 - error("Unknown --sort key: `%s'", tok); 527 - usage_with_options(annotate_usage, options); 528 - } 529 - } 530 - 531 - free(str); 532 - } 533 - 534 527 int cmd_annotate(int argc, const char **argv, const char *prefix __used) 535 528 { 536 - if (symbol__init(&symbol_conf) < 0) 537 - return -1; 538 - 539 529 argc = parse_options(argc, argv, options, annotate_usage, 0); 540 530 541 - setup_sorting(); 531 + symbol_conf.priv_size = sizeof(struct sym_priv); 532 + symbol_conf.try_vmlinux_path = true; 533 + 534 + if (symbol__init() < 0) 535 + return -1; 536 + 537 + setup_sorting(annotate_usage, options); 542 538 543 539 if (argc) { 544 540 /*
+2 -2
tools/perf/builtin-buildid-list.c
··· 9 9 #include "builtin.h" 10 10 #include "perf.h" 11 11 #include "util/cache.h" 12 - #include "util/data_map.h" 13 12 #include "util/debug.h" 14 13 #include "util/parse-options.h" 15 14 #include "util/session.h" ··· 54 55 static int __cmd_buildid_list(void) 55 56 { 56 57 int err = -1; 57 - struct perf_session *session = perf_session__new(input_name, O_RDONLY, force); 58 + struct perf_session *session; 58 59 60 + session = perf_session__new(input_name, O_RDONLY, force); 59 61 if (session == NULL) 60 62 return -1; 61 63
+248
tools/perf/builtin-diff.c
··· 1 + /* 2 + * builtin-diff.c 3 + * 4 + * Builtin diff command: Analyze two perf.data input files, look up and read 5 + * DSOs and symbol information, sort them and produce a diff. 6 + */ 7 + #include "builtin.h" 8 + 9 + #include "util/debug.h" 10 + #include "util/event.h" 11 + #include "util/hist.h" 12 + #include "util/session.h" 13 + #include "util/sort.h" 14 + #include "util/symbol.h" 15 + #include "util/util.h" 16 + 17 + #include <stdlib.h> 18 + 19 + static char const *input_old = "perf.data.old", 20 + *input_new = "perf.data"; 21 + static char diff__default_sort_order[] = "dso,symbol"; 22 + static int force; 23 + static bool show_displacement; 24 + 25 + static int perf_session__add_hist_entry(struct perf_session *self, 26 + struct addr_location *al, u64 count) 27 + { 28 + bool hit; 29 + struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, 30 + count, &hit); 31 + if (he == NULL) 32 + return -ENOMEM; 33 + 34 + if (hit) 35 + he->count += count; 36 + 37 + return 0; 38 + } 39 + 40 + static int diff__process_sample_event(event_t *event, struct perf_session *session) 41 + { 42 + struct addr_location al; 43 + struct sample_data data = { .period = 1, }; 44 + 45 + dump_printf("(IP, %d): %d: %p\n", event->header.misc, 46 + event->ip.pid, (void *)(long)event->ip.ip); 47 + 48 + if (event__preprocess_sample(event, session, &al, NULL) < 0) { 49 + pr_warning("problem processing %d event, skipping it.\n", 50 + event->header.type); 51 + return -1; 52 + } 53 + 54 + if (al.filtered) 55 + return 0; 56 + 57 + event__parse_sample(event, session->sample_type, &data); 58 + 59 + if (al.sym && perf_session__add_hist_entry(session, &al, data.period)) { 60 + pr_warning("problem incrementing symbol count, skipping event\n"); 61 + return -1; 62 + } 63 + 64 + session->events_stats.total += data.period; 65 + return 0; 66 + } 67 + 68 + static struct perf_event_ops event_ops = { 69 + .process_sample_event = diff__process_sample_event, 70 + .process_mmap_event = event__process_mmap, 71 + .process_comm_event = event__process_comm, 72 + .process_exit_event = event__process_task, 73 + .process_fork_event = event__process_task, 74 + .process_lost_event = event__process_lost, 75 + }; 76 + 77 + static void perf_session__insert_hist_entry_by_name(struct rb_root *root, 78 + struct hist_entry *he) 79 + { 80 + struct rb_node **p = &root->rb_node; 81 + struct rb_node *parent = NULL; 82 + struct hist_entry *iter; 83 + 84 + while (*p != NULL) { 85 + int cmp; 86 + parent = *p; 87 + iter = rb_entry(parent, struct hist_entry, rb_node); 88 + 89 + cmp = strcmp(he->map->dso->name, iter->map->dso->name); 90 + if (cmp > 0) 91 + p = &(*p)->rb_left; 92 + else if (cmp < 0) 93 + p = &(*p)->rb_right; 94 + else { 95 + cmp = strcmp(he->sym->name, iter->sym->name); 96 + if (cmp > 0) 97 + p = &(*p)->rb_left; 98 + else 99 + p = &(*p)->rb_right; 100 + } 101 + } 102 + 103 + rb_link_node(&he->rb_node, parent, p); 104 + rb_insert_color(&he->rb_node, root); 105 + } 106 + 107 + static void perf_session__resort_by_name(struct perf_session *self) 108 + { 109 + unsigned long position = 1; 110 + struct rb_root tmp = RB_ROOT; 111 + struct rb_node *next = rb_first(&self->hists); 112 + 113 + while (next != NULL) { 114 + struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node); 115 + 116 + next = rb_next(&n->rb_node); 117 + rb_erase(&n->rb_node, &self->hists); 118 + n->position = position++; 119 + perf_session__insert_hist_entry_by_name(&tmp, n); 120 + } 121 + 122 + self->hists = tmp; 123 + } 124 + 125 + static struct hist_entry * 126 + perf_session__find_hist_entry_by_name(struct perf_session *self, 127 + struct hist_entry *he) 128 + { 129 + struct rb_node *n = self->hists.rb_node; 130 + 131 + while (n) { 132 + struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 133 + int cmp = strcmp(he->map->dso->name, iter->map->dso->name); 134 + 135 + if (cmp > 0) 136 + n = n->rb_left; 137 + else if (cmp < 0) 138 + n = n->rb_right; 139 + else { 140 + cmp = strcmp(he->sym->name, iter->sym->name); 141 + if (cmp > 0) 142 + n = n->rb_left; 143 + else if (cmp < 0) 144 + n = n->rb_right; 145 + else 146 + return iter; 147 + } 148 + } 149 + 150 + return NULL; 151 + } 152 + 153 + static void perf_session__match_hists(struct perf_session *old_session, 154 + struct perf_session *new_session) 155 + { 156 + struct rb_node *nd; 157 + 158 + perf_session__resort_by_name(old_session); 159 + 160 + for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) { 161 + struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node); 162 + pos->pair = perf_session__find_hist_entry_by_name(old_session, pos); 163 + } 164 + } 165 + 166 + static int __cmd_diff(void) 167 + { 168 + int ret, i; 169 + struct perf_session *session[2]; 170 + 171 + session[0] = perf_session__new(input_old, O_RDONLY, force); 172 + session[1] = perf_session__new(input_new, O_RDONLY, force); 173 + if (session[0] == NULL || session[1] == NULL) 174 + return -ENOMEM; 175 + 176 + for (i = 0; i < 2; ++i) { 177 + ret = perf_session__process_events(session[i], &event_ops); 178 + if (ret) 179 + goto out_delete; 180 + perf_session__output_resort(session[i], session[i]->events_stats.total); 181 + } 182 + 183 + perf_session__match_hists(session[0], session[1]); 184 + perf_session__fprintf_hists(session[1], session[0], 185 + show_displacement, stdout); 186 + out_delete: 187 + for (i = 0; i < 2; ++i) 188 + perf_session__delete(session[i]); 189 + return ret; 190 + } 191 + 192 + static const char *const diff_usage[] = { 193 + "perf diff [<options>] [old_file] [new_file]", 194 + }; 195 + 196 + static const struct option options[] = { 197 + OPT_BOOLEAN('v', "verbose", &verbose, 198 + "be more verbose (show symbol address, etc)"), 199 + OPT_BOOLEAN('m', "displacement", &show_displacement, 200 + "Show position displacement relative to baseline"), 201 + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 202 + "dump raw trace in ASCII"), 203 + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 204 + OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 205 + "load module symbols - WARNING: use only with -k and LIVE kernel"), 206 + OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths, 207 + "Don't shorten the pathnames taking into account the cwd"), 208 + OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 209 + "only consider symbols in these dsos"), 210 + OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", 211 + "only consider symbols in these comms"), 212 + OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 213 + "only consider these symbols"), 214 + OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 215 + "sort by key(s): pid, comm, dso, symbol, parent"), 216 + OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", 217 + "separator for columns, no spaces will be added between " 218 + "columns '.' is reserved."), 219 + OPT_END() 220 + }; 221 + 222 + int cmd_diff(int argc, const char **argv, const char *prefix __used) 223 + { 224 + sort_order = diff__default_sort_order; 225 + argc = parse_options(argc, argv, options, diff_usage, 0); 226 + if (argc) { 227 + if (argc > 2) 228 + usage_with_options(diff_usage, options); 229 + if (argc == 2) { 230 + input_old = argv[0]; 231 + input_new = argv[1]; 232 + } else 233 + input_new = argv[0]; 234 + } 235 + 236 + symbol_conf.exclude_other = false; 237 + if (symbol__init() < 0) 238 + return -1; 239 + 240 + setup_sorting(diff_usage, options); 241 + setup_pager(); 242 + 243 + sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL); 244 + sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", NULL); 245 + sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", NULL); 246 + 247 + return __cmd_diff(); 248 + }
+27 -39
tools/perf/builtin-kmem.c
··· 12 12 #include "util/trace-event.h" 13 13 14 14 #include "util/debug.h" 15 - #include "util/data_map.h" 16 15 17 16 #include <linux/rbtree.h> 18 17 ··· 19 20 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); 20 21 21 22 static char const *input_name = "perf.data"; 22 - 23 - static u64 sample_type; 24 23 25 24 static int alloc_flag; 26 25 static int caller_flag; ··· 309 312 } 310 313 } 311 314 312 - static int process_sample_event(event_t *event) 315 + static int process_sample_event(event_t *event, struct perf_session *session) 313 316 { 314 317 struct sample_data data; 315 318 struct thread *thread; ··· 319 322 data.cpu = -1; 320 323 data.period = 1; 321 324 322 - event__parse_sample(event, sample_type, &data); 325 + event__parse_sample(event, session->sample_type, &data); 323 326 324 327 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 325 328 event->header.misc, ··· 327 330 (void *)(long)data.ip, 328 331 (long long)data.period); 329 332 330 - thread = threads__findnew(event->ip.pid); 333 + thread = perf_session__findnew(session, event->ip.pid); 331 334 if (thread == NULL) { 332 335 pr_debug("problem processing %d event, skipping it.\n", 333 336 event->header.type); ··· 342 345 return 0; 343 346 } 344 347 345 - static int sample_type_check(u64 type) 348 + static int sample_type_check(struct perf_session *session) 346 349 { 347 - sample_type = type; 348 - 349 - if (!(sample_type & PERF_SAMPLE_RAW)) { 350 + if (!(session->sample_type & PERF_SAMPLE_RAW)) { 350 351 fprintf(stderr, 351 352 "No trace sample to read. Did you call perf record " 352 353 "without -R?"); ··· 354 359 return 0; 355 360 } 356 361 357 - static struct perf_file_handler file_handler = { 362 + static struct perf_event_ops event_ops = { 358 363 .process_sample_event = process_sample_event, 359 364 .process_comm_event = event__process_comm, 360 365 .sample_type_check = sample_type_check, 361 366 }; 362 - 363 - static int read_events(void) 364 - { 365 - int err; 366 - struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 367 - 368 - if (session == NULL) 369 - return -ENOMEM; 370 - 371 - register_idle_thread(); 372 - register_perf_file_handler(&file_handler); 373 - 374 - err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 375 - perf_session__delete(session); 376 - return err; 377 - } 378 367 379 368 static double fragmentation(unsigned long n_req, unsigned long n_alloc) 380 369 { ··· 368 389 return 100.0 - (100.0 * n_req / n_alloc); 369 390 } 370 391 371 - static void __print_result(struct rb_root *root, int n_lines, int is_caller) 392 + static void __print_result(struct rb_root *root, struct perf_session *session, 393 + int n_lines, int is_caller) 372 394 { 373 395 struct rb_node *next; 374 396 ··· 390 410 if (is_caller) { 391 411 addr = data->call_site; 392 412 if (!raw_ip) 393 - sym = map_groups__find_function(kmaps, addr, NULL); 413 + sym = map_groups__find_function(&session->kmaps, session, addr, NULL); 394 414 } else 395 415 addr = data->ptr; 396 416 ··· 431 451 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs); 432 452 } 433 453 434 - static void print_result(void) 454 + static void print_result(struct perf_session *session) 435 455 { 436 456 if (caller_flag) 437 - __print_result(&root_caller_sorted, caller_lines, 1); 457 + __print_result(&root_caller_sorted, session, caller_lines, 1); 438 458 if (alloc_flag) 439 - __print_result(&root_alloc_sorted, alloc_lines, 0); 459 + __print_result(&root_alloc_sorted, session, alloc_lines, 0); 440 460 print_summary(); 441 461 } 442 462 ··· 504 524 505 525 static int __cmd_kmem(void) 506 526 { 507 - setup_pager(); 508 - read_events(); 509 - sort_result(); 510 - print_result(); 527 + int err; 528 + struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 529 + if (session == NULL) 530 + return -ENOMEM; 511 531 512 - return 0; 532 + setup_pager(); 533 + err = perf_session__process_events(session, &event_ops); 534 + if (err != 0) 535 + goto out_delete; 536 + sort_result(); 537 + print_result(session); 538 + out_delete: 539 + perf_session__delete(session); 540 + return err; 513 541 } 514 542 515 543 static const char * const kmem_usage[] = { ··· 766 778 767 779 int cmd_kmem(int argc, const char **argv, const char *prefix __used) 768 780 { 769 - symbol__init(0); 770 - 771 781 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); 772 782 773 783 if (!argc) 774 784 usage_with_options(kmem_usage, kmem_options); 785 + 786 + symbol__init(); 775 787 776 788 if (!strncmp(argv[0], "rec", 3)) { 777 789 return __cmd_record(argc, argv);
+78 -63
tools/perf/builtin-probe.c
··· 38 38 #include "util/strlist.h" 39 39 #include "util/event.h" 40 40 #include "util/debug.h" 41 + #include "util/symbol.h" 42 + #include "util/thread.h" 43 + #include "util/session.h" 41 44 #include "util/parse-options.h" 42 45 #include "util/parse-events.h" /* For debugfs_path */ 43 46 #include "util/probe-finder.h" 44 47 #include "util/probe-event.h" 45 - 46 - /* Default vmlinux search paths */ 47 - #define NR_SEARCH_PATH 4 48 - const char *default_search_path[NR_SEARCH_PATH] = { 49 - "/lib/modules/%s/build/vmlinux", /* Custom build kernel */ 50 - "/usr/lib/debug/lib/modules/%s/vmlinux", /* Red Hat debuginfo */ 51 - "/boot/vmlinux-debug-%s", /* Ubuntu */ 52 - "./vmlinux", /* CWD */ 53 - }; 54 48 55 49 #define MAX_PATH_LEN 256 56 50 #define MAX_PROBES 128 57 51 58 52 /* Session management structure */ 59 53 static struct { 60 - char *vmlinux; 61 - char *release; 62 - int need_dwarf; 54 + bool need_dwarf; 55 + bool list_events; 56 + bool force_add; 63 57 int nr_probe; 64 58 struct probe_point probes[MAX_PROBES]; 65 59 struct strlist *dellist; 60 + struct perf_session *psession; 61 + struct map *kmap; 66 62 } session; 67 63 68 - static bool listing; 69 64 70 65 /* Parse an event definition. Note that any error must die. */ 71 66 static void parse_probe_event(const char *str) ··· 72 77 die("Too many probes (> %d) are specified.", MAX_PROBES); 73 78 74 79 /* Parse perf-probe event into probe_point */ 75 - session.need_dwarf = parse_perf_probe_event(str, pp); 80 + parse_perf_probe_event(str, pp, &session.need_dwarf); 76 81 77 82 pr_debug("%d arguments\n", pp->nr_args); 78 83 } ··· 115 120 return 0; 116 121 } 117 122 118 - #ifndef NO_LIBDWARF 119 - static int open_default_vmlinux(void) 123 + /* Currently just checking function name from symbol map */ 124 + static void evaluate_probe_point(struct probe_point *pp) 120 125 { 121 - struct utsname uts; 122 - char fname[MAX_PATH_LEN]; 123 - int fd, ret, i; 126 + struct symbol *sym; 127 + sym = map__find_symbol_by_name(session.kmap, pp->function, 128 + session.psession, NULL); 129 + if (!sym) 130 + die("Kernel symbol \'%s\' not found - probe not added.", 131 + pp->function); 132 + } 124 133 125 - ret = uname(&uts); 126 - if (ret) { 127 - pr_debug("uname() failed.\n"); 128 - return -errno; 134 + #ifndef NO_LIBDWARF 135 + static int open_vmlinux(void) 136 + { 137 + if (map__load(session.kmap, session.psession, NULL) < 0) { 138 + pr_debug("Failed to load kernel map.\n"); 139 + return -EINVAL; 129 140 } 130 - session.release = uts.release; 131 - for (i = 0; i < NR_SEARCH_PATH; i++) { 132 - ret = snprintf(fname, MAX_PATH_LEN, 133 - default_search_path[i], session.release); 134 - if (ret >= MAX_PATH_LEN || ret < 0) { 135 - pr_debug("Filename(%d,%s) is too long.\n", i, 136 - uts.release); 137 - errno = E2BIG; 138 - return -E2BIG; 139 - } 140 - pr_debug("try to open %s\n", fname); 141 - fd = open(fname, O_RDONLY); 142 - if (fd >= 0) 143 - break; 144 - } 145 - return fd; 141 + pr_debug("Try to open %s\n", session.kmap->dso->long_name); 142 + return open(session.kmap->dso->long_name, O_RDONLY); 146 143 } 147 144 #endif 148 145 ··· 150 163 OPT_BOOLEAN('v', "verbose", &verbose, 151 164 "be more verbose (show parsed arguments, etc)"), 152 165 #ifndef NO_LIBDWARF 153 - OPT_STRING('k', "vmlinux", &session.vmlinux, "file", 154 - "vmlinux/module pathname"), 166 + OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 167 + "file", "vmlinux pathname"), 155 168 #endif 156 - OPT_BOOLEAN('l', "list", &listing, "list up current probe events"), 169 + OPT_BOOLEAN('l', "list", &session.list_events, 170 + "list up current probe events"), 157 171 OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", 158 172 opt_del_probe_event), 159 173 OPT_CALLBACK('a', "add", NULL, 160 174 #ifdef NO_LIBDWARF 161 - "FUNC[+OFFS|%return] [ARG ...]", 175 + "[EVENT=]FUNC[+OFFS|%return] [ARG ...]", 162 176 #else 163 - "FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]", 177 + "[EVENT=]FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]", 164 178 #endif 165 179 "probe point definition, where\n" 166 - "\t\tGRP:\tGroup name (optional)\n" 167 - "\t\tNAME:\tEvent name\n" 180 + "\t\tGROUP:\tGroup name (optional)\n" 181 + "\t\tEVENT:\tEvent name\n" 168 182 "\t\tFUNC:\tFunction name\n" 169 183 "\t\tOFFS:\tOffset from function entry (in byte)\n" 170 184 "\t\t%return:\tPut the probe at function return\n" ··· 179 191 #endif 180 192 "\t\t\tkprobe-tracer argument format.)\n", 181 193 opt_add_probe_event), 194 + OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events" 195 + " with existing name"), 182 196 OPT_END() 183 197 }; 184 198 ··· 194 204 195 205 argc = parse_options(argc, argv, options, probe_usage, 196 206 PARSE_OPT_STOP_AT_NON_OPTION); 197 - if (argc > 0) 207 + if (argc > 0) { 208 + if (strcmp(argv[0], "-") == 0) { 209 + pr_warning(" Error: '-' is not supported.\n"); 210 + usage_with_options(probe_usage, options); 211 + } 198 212 parse_probe_event_argv(argc, argv); 213 + } 199 214 200 - if ((session.nr_probe == 0 && !session.dellist && !listing)) 215 + if ((!session.nr_probe && !session.dellist && !session.list_events)) 201 216 usage_with_options(probe_usage, options); 202 217 203 - if (listing) { 218 + if (session.list_events) { 204 219 if (session.nr_probe != 0 || session.dellist) { 205 220 pr_warning(" Error: Don't use --list with" 206 221 " --add/--del.\n"); ··· 222 227 return 0; 223 228 } 224 229 230 + /* Initialize symbol maps for vmlinux */ 231 + symbol_conf.sort_by_name = true; 232 + if (symbol_conf.vmlinux_name == NULL) 233 + symbol_conf.try_vmlinux_path = true; 234 + if (symbol__init() < 0) 235 + die("Failed to init symbol map."); 236 + session.psession = perf_session__new(NULL, O_WRONLY, false); 237 + if (session.psession == NULL) 238 + die("Failed to init perf_session."); 239 + session.kmap = map_groups__find_by_name(&session.psession->kmaps, 240 + MAP__FUNCTION, 241 + "[kernel.kallsyms]"); 242 + if (!session.kmap) 243 + die("Could not find kernel map.\n"); 244 + 225 245 if (session.need_dwarf) 226 246 #ifdef NO_LIBDWARF 227 247 die("Debuginfo-analysis is not supported"); 228 248 #else /* !NO_LIBDWARF */ 229 249 pr_debug("Some probes require debuginfo.\n"); 230 250 231 - if (session.vmlinux) { 232 - pr_debug("Try to open %s.", session.vmlinux); 233 - fd = open(session.vmlinux, O_RDONLY); 234 - } else 235 - fd = open_default_vmlinux(); 251 + fd = open_vmlinux(); 236 252 if (fd < 0) { 237 253 if (session.need_dwarf) 238 254 die("Could not open debuginfo file."); ··· 261 255 262 256 lseek(fd, SEEK_SET, 0); 263 257 ret = find_probepoint(fd, pp); 264 - if (ret < 0) { 265 - if (session.need_dwarf) 266 - die("Could not analyze debuginfo."); 267 - 268 - pr_warning("An error occurred in debuginfo analysis. Try to use symbols.\n"); 269 - break; 258 + if (ret > 0) 259 + continue; 260 + if (ret == 0) { /* No error but failed to find probe point. */ 261 + synthesize_perf_probe_point(pp); 262 + die("Probe point '%s' not found. - probe not added.", 263 + pp->probes[0]); 270 264 } 271 - if (ret == 0) /* No error but failed to find probe point. */ 272 - die("No probe point found."); 265 + /* Error path */ 266 + if (session.need_dwarf) { 267 + if (ret == -ENOENT) 268 + pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO=y.\n"); 269 + die("Could not analyze debuginfo."); 270 + } 271 + pr_debug("An error occurred in debuginfo analysis." 272 + " Try to use symbols.\n"); 273 + break; 273 274 } 274 275 close(fd); 275 276 ··· 289 276 if (pp->found) /* This probe is already found. */ 290 277 continue; 291 278 279 + evaluate_probe_point(pp); 292 280 ret = synthesize_trace_kprobe_event(pp); 293 281 if (ret == -E2BIG) 294 282 die("probe point definition becomes too long."); ··· 298 284 } 299 285 300 286 /* Settng up probe points */ 301 - add_trace_kprobe_events(session.probes, session.nr_probe); 287 + add_trace_kprobe_events(session.probes, session.nr_probe, 288 + session.force_add); 302 289 return 0; 303 290 } 304 291
+94 -49
tools/perf/builtin-record.c
··· 123 123 write_output(buf, size); 124 124 } 125 125 126 - static int process_synthesized_event(event_t *event) 126 + static int process_synthesized_event(event_t *event, 127 + struct perf_session *self __used) 127 128 { 128 129 write_event(event, event->header.size); 129 130 return 0; ··· 278 277 279 278 attr->mmap = track; 280 279 attr->comm = track; 281 - attr->inherit = (cpu < 0) && inherit; 280 + attr->inherit = inherit; 282 281 attr->disabled = 1; 283 282 284 283 try_again: ··· 402 401 perf_header__write(&session->header, output, true); 403 402 } 404 403 405 - static int __cmd_record(int argc, const char **argv) 404 + static int __cmd_record(int argc __used, const char **argv) 406 405 { 407 406 int i, counter; 408 407 struct stat st; ··· 410 409 int flags; 411 410 int err; 412 411 unsigned long waking = 0; 412 + int child_ready_pipe[2], go_pipe[2]; 413 + char buf; 413 414 414 415 page_size = sysconf(_SC_PAGE_SIZE); 415 416 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); ··· 422 419 signal(SIGCHLD, sig_handler); 423 420 signal(SIGINT, sig_handler); 424 421 422 + if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) { 423 + perror("failed to create pipes"); 424 + exit(-1); 425 + } 426 + 425 427 if (!stat(output_name, &st) && st.st_size) { 426 - if (!force && !append_file) { 427 - fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n", 428 - output_name); 429 - exit(-1); 428 + if (!force) { 429 + if (!append_file) { 430 + pr_err("Error, output file %s exists, use -A " 431 + "to append or -f to overwrite.\n", 432 + output_name); 433 + exit(-1); 434 + } 435 + } else { 436 + char oldname[PATH_MAX]; 437 + snprintf(oldname, sizeof(oldname), "%s.old", 438 + output_name); 439 + unlink(oldname); 440 + rename(output_name, oldname); 430 441 } 431 442 } else { 432 443 append_file = 0; ··· 483 466 484 467 atexit(atexit_header); 485 468 486 - if (!system_wide) { 487 - pid = target_pid; 488 - if (pid == -1) 489 - pid = getpid(); 490 - 491 - open_counters(profile_cpu, pid); 492 - } else { 493 - if (profile_cpu != -1) { 494 - open_counters(profile_cpu, target_pid); 495 - } else { 496 - for (i = 0; i < nr_cpus; i++) 497 - open_counters(i, target_pid); 469 + if (target_pid == -1) { 470 + pid = fork(); 471 + if (pid < 0) { 472 + perror("failed to fork"); 473 + exit(-1); 498 474 } 475 + 476 + if (!pid) { 477 + close(child_ready_pipe[0]); 478 + close(go_pipe[1]); 479 + fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 480 + 481 + /* 482 + * Do a dummy execvp to get the PLT entry resolved, 483 + * so we avoid the resolver overhead on the real 484 + * execvp call. 485 + */ 486 + execvp("", (char **)argv); 487 + 488 + /* 489 + * Tell the parent we're ready to go 490 + */ 491 + close(child_ready_pipe[1]); 492 + 493 + /* 494 + * Wait until the parent tells us to go. 495 + */ 496 + if (read(go_pipe[0], &buf, 1) == -1) 497 + perror("unable to read pipe"); 498 + 499 + execvp(argv[0], (char **)argv); 500 + 501 + perror(argv[0]); 502 + exit(-1); 503 + } 504 + 505 + child_pid = pid; 506 + 507 + if (!system_wide) 508 + target_pid = pid; 509 + 510 + close(child_ready_pipe[1]); 511 + close(go_pipe[0]); 512 + /* 513 + * wait for child to settle 514 + */ 515 + if (read(child_ready_pipe[0], &buf, 1) == -1) { 516 + perror("unable to read pipe"); 517 + exit(-1); 518 + } 519 + close(child_ready_pipe[0]); 520 + } 521 + 522 + 523 + if ((!system_wide && !inherit) || profile_cpu != -1) { 524 + open_counters(profile_cpu, target_pid); 525 + } else { 526 + for (i = 0; i < nr_cpus; i++) 527 + open_counters(i, target_pid); 499 528 } 500 529 501 530 if (file_new) { ··· 551 488 } 552 489 553 490 if (!system_wide) 554 - event__synthesize_thread(pid, process_synthesized_event); 491 + event__synthesize_thread(pid, process_synthesized_event, 492 + session); 555 493 else 556 - event__synthesize_threads(process_synthesized_event); 557 - 558 - if (target_pid == -1 && argc) { 559 - pid = fork(); 560 - if (pid < 0) 561 - die("failed to fork"); 562 - 563 - if (!pid) { 564 - if (execvp(argv[0], (char **)argv)) { 565 - perror(argv[0]); 566 - exit(-1); 567 - } 568 - } else { 569 - /* 570 - * Wait a bit for the execv'ed child to appear 571 - * and be updated in /proc 572 - * FIXME: Do you know a less heuristical solution? 573 - */ 574 - usleep(1000); 575 - event__synthesize_thread(pid, 576 - process_synthesized_event); 577 - } 578 - 579 - child_pid = pid; 580 - } 494 + event__synthesize_threads(process_synthesized_event, session); 581 495 582 496 if (realtime_prio) { 583 497 struct sched_param param; ··· 565 525 exit(-1); 566 526 } 567 527 } 528 + 529 + /* 530 + * Let the child rip 531 + */ 532 + close(go_pipe[1]); 568 533 569 534 for (;;) { 570 535 int hits = samples; ··· 665 620 { 666 621 int counter; 667 622 668 - symbol__init(0); 669 - 670 623 argc = parse_options(argc, argv, options, record_usage, 671 - PARSE_OPT_STOP_AT_NON_OPTION); 672 - if (!argc && target_pid == -1 && !system_wide) 624 + PARSE_OPT_STOP_AT_NON_OPTION); 625 + if (!argc && target_pid == -1 && (!system_wide || profile_cpu == -1)) 673 626 usage_with_options(record_usage, options); 627 + 628 + symbol__init(); 674 629 675 630 if (!nr_counters) { 676 631 nr_counters = 1;
+60 -665
tools/perf/builtin-report.c
··· 27 27 #include "util/parse-options.h" 28 28 #include "util/parse-events.h" 29 29 30 - #include "util/data_map.h" 31 30 #include "util/thread.h" 32 31 #include "util/sort.h" 33 32 #include "util/hist.h" 34 33 35 34 static char const *input_name = "perf.data"; 36 35 37 - static char *dso_list_str, *comm_list_str, *sym_list_str, 38 - *col_width_list_str; 39 - static struct strlist *dso_list, *comm_list, *sym_list; 40 - 41 36 static int force; 42 - 43 - static int full_paths; 44 - static int show_nr_samples; 45 37 46 38 static int show_threads; 47 39 static struct perf_read_values show_threads_values; ··· 41 49 static char default_pretty_printing_style[] = "normal"; 42 50 static char *pretty_printing_style = default_pretty_printing_style; 43 51 44 - static int exclude_other = 1; 45 - 46 52 static char callchain_default_opt[] = "fractal,0.5"; 47 53 48 - static struct perf_session *session; 49 - 50 - static u64 sample_type; 51 - 52 - struct symbol_conf symbol_conf; 53 - 54 - 55 - static size_t 56 - callchain__fprintf_left_margin(FILE *fp, int left_margin) 57 - { 58 - int i; 59 - int ret; 60 - 61 - ret = fprintf(fp, " "); 62 - 63 - for (i = 0; i < left_margin; i++) 64 - ret += fprintf(fp, " "); 65 - 66 - return ret; 67 - } 68 - 69 - static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 70 - int left_margin) 71 - { 72 - int i; 73 - size_t ret = 0; 74 - 75 - ret += callchain__fprintf_left_margin(fp, left_margin); 76 - 77 - for (i = 0; i < depth; i++) 78 - if (depth_mask & (1 << i)) 79 - ret += fprintf(fp, "| "); 80 - else 81 - ret += fprintf(fp, " "); 82 - 83 - ret += fprintf(fp, "\n"); 84 - 85 - return ret; 86 - } 87 - static size_t 88 - ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, 89 - int depth_mask, int count, u64 total_samples, 90 - int hits, int left_margin) 91 - { 92 - int i; 93 - size_t ret = 0; 94 - 95 - ret += callchain__fprintf_left_margin(fp, left_margin); 96 - for (i = 0; i < depth; i++) { 97 - if (depth_mask & (1 << i)) 98 - ret += fprintf(fp, "|"); 99 - else 100 - ret += fprintf(fp, " "); 101 - if (!count && i == depth - 1) { 102 - double percent; 103 - 104 - percent = hits * 100.0 / total_samples; 105 - ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); 106 - } else 107 - ret += fprintf(fp, "%s", " "); 108 - } 109 - if (chain->sym) 110 - ret += fprintf(fp, "%s\n", chain->sym->name); 111 - else 112 - ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); 113 - 114 - return ret; 115 - } 116 - 117 - static struct symbol *rem_sq_bracket; 118 - static struct callchain_list rem_hits; 119 - 120 - static void init_rem_hits(void) 121 - { 122 - rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); 123 - if (!rem_sq_bracket) { 124 - fprintf(stderr, "Not enough memory to display remaining hits\n"); 125 - return; 126 - } 127 - 128 - strcpy(rem_sq_bracket->name, "[...]"); 129 - rem_hits.sym = rem_sq_bracket; 130 - } 131 - 132 - static size_t 133 - __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 134 - u64 total_samples, int depth, int depth_mask, 135 - int left_margin) 136 - { 137 - struct rb_node *node, *next; 138 - struct callchain_node *child; 139 - struct callchain_list *chain; 140 - int new_depth_mask = depth_mask; 141 - u64 new_total; 142 - u64 remaining; 143 - size_t ret = 0; 144 - int i; 145 - 146 - if (callchain_param.mode == CHAIN_GRAPH_REL) 147 - new_total = self->children_hit; 148 - else 149 - new_total = total_samples; 150 - 151 - remaining = new_total; 152 - 153 - node = rb_first(&self->rb_root); 154 - while (node) { 155 - u64 cumul; 156 - 157 - child = rb_entry(node, struct callchain_node, rb_node); 158 - cumul = cumul_hits(child); 159 - remaining -= cumul; 160 - 161 - /* 162 - * The depth mask manages the output of pipes that show 163 - * the depth. We don't want to keep the pipes of the current 164 - * level for the last child of this depth. 165 - * Except if we have remaining filtered hits. They will 166 - * supersede the last child 167 - */ 168 - next = rb_next(node); 169 - if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) 170 - new_depth_mask &= ~(1 << (depth - 1)); 171 - 172 - /* 173 - * But we keep the older depth mask for the line seperator 174 - * to keep the level link until we reach the last child 175 - */ 176 - ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, 177 - left_margin); 178 - i = 0; 179 - list_for_each_entry(chain, &child->val, list) { 180 - if (chain->ip >= PERF_CONTEXT_MAX) 181 - continue; 182 - ret += ipchain__fprintf_graph(fp, chain, depth, 183 - new_depth_mask, i++, 184 - new_total, 185 - cumul, 186 - left_margin); 187 - } 188 - ret += __callchain__fprintf_graph(fp, child, new_total, 189 - depth + 1, 190 - new_depth_mask | (1 << depth), 191 - left_margin); 192 - node = next; 193 - } 194 - 195 - if (callchain_param.mode == CHAIN_GRAPH_REL && 196 - remaining && remaining != new_total) { 197 - 198 - if (!rem_sq_bracket) 199 - return ret; 200 - 201 - new_depth_mask &= ~(1 << (depth - 1)); 202 - 203 - ret += ipchain__fprintf_graph(fp, &rem_hits, depth, 204 - new_depth_mask, 0, new_total, 205 - remaining, left_margin); 206 - } 207 - 208 - return ret; 209 - } 210 - 211 - 212 - static size_t 213 - callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 214 - u64 total_samples, int left_margin) 215 - { 216 - struct callchain_list *chain; 217 - bool printed = false; 218 - int i = 0; 219 - int ret = 0; 220 - 221 - list_for_each_entry(chain, &self->val, list) { 222 - if (chain->ip >= PERF_CONTEXT_MAX) 223 - continue; 224 - 225 - if (!i++ && sort__first_dimension == SORT_SYM) 226 - continue; 227 - 228 - if (!printed) { 229 - ret += callchain__fprintf_left_margin(fp, left_margin); 230 - ret += fprintf(fp, "|\n"); 231 - ret += callchain__fprintf_left_margin(fp, left_margin); 232 - ret += fprintf(fp, "---"); 233 - 234 - left_margin += 3; 235 - printed = true; 236 - } else 237 - ret += callchain__fprintf_left_margin(fp, left_margin); 238 - 239 - if (chain->sym) 240 - ret += fprintf(fp, " %s\n", chain->sym->name); 241 - else 242 - ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 243 - } 244 - 245 - ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); 246 - 247 - return ret; 248 - } 249 - 250 - static size_t 251 - callchain__fprintf_flat(FILE *fp, struct callchain_node *self, 252 - u64 total_samples) 253 - { 254 - struct callchain_list *chain; 255 - size_t ret = 0; 256 - 257 - if (!self) 258 - return 0; 259 - 260 - ret += callchain__fprintf_flat(fp, self->parent, total_samples); 261 - 262 - 263 - list_for_each_entry(chain, &self->val, list) { 264 - if (chain->ip >= PERF_CONTEXT_MAX) 265 - continue; 266 - if (chain->sym) 267 - ret += fprintf(fp, " %s\n", chain->sym->name); 268 - else 269 - ret += fprintf(fp, " %p\n", 270 - (void *)(long)chain->ip); 271 - } 272 - 273 - return ret; 274 - } 275 - 276 - static size_t 277 - hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, 278 - u64 total_samples, int left_margin) 279 - { 280 - struct rb_node *rb_node; 281 - struct callchain_node *chain; 282 - size_t ret = 0; 283 - 284 - rb_node = rb_first(&self->sorted_chain); 285 - while (rb_node) { 286 - double percent; 287 - 288 - chain = rb_entry(rb_node, struct callchain_node, rb_node); 289 - percent = chain->hit * 100.0 / total_samples; 290 - switch (callchain_param.mode) { 291 - case CHAIN_FLAT: 292 - ret += percent_color_fprintf(fp, " %6.2f%%\n", 293 - percent); 294 - ret += callchain__fprintf_flat(fp, chain, total_samples); 295 - break; 296 - case CHAIN_GRAPH_ABS: /* Falldown */ 297 - case CHAIN_GRAPH_REL: 298 - ret += callchain__fprintf_graph(fp, chain, total_samples, 299 - left_margin); 300 - case CHAIN_NONE: 301 - default: 302 - break; 303 - } 304 - ret += fprintf(fp, "\n"); 305 - rb_node = rb_next(rb_node); 306 - } 307 - 308 - return ret; 309 - } 310 - 311 - static size_t 312 - hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) 313 - { 314 - struct sort_entry *se; 315 - size_t ret; 316 - 317 - if (exclude_other && !self->parent) 318 - return 0; 319 - 320 - if (total_samples) 321 - ret = percent_color_fprintf(fp, 322 - field_sep ? "%.2f" : " %6.2f%%", 323 - (self->count * 100.0) / total_samples); 324 - else 325 - ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count); 326 - 327 - if (show_nr_samples) { 328 - if (field_sep) 329 - fprintf(fp, "%c%lld", *field_sep, self->count); 330 - else 331 - fprintf(fp, "%11lld", self->count); 332 - } 333 - 334 - list_for_each_entry(se, &hist_entry__sort_list, list) { 335 - if (se->elide) 336 - continue; 337 - 338 - fprintf(fp, "%s", field_sep ?: " "); 339 - ret += se->print(fp, self, se->width ? *se->width : 0); 340 - } 341 - 342 - ret += fprintf(fp, "\n"); 343 - 344 - if (callchain) { 345 - int left_margin = 0; 346 - 347 - if (sort__first_dimension == SORT_COMM) { 348 - se = list_first_entry(&hist_entry__sort_list, typeof(*se), 349 - list); 350 - left_margin = se->width ? *se->width : 0; 351 - left_margin -= thread__comm_len(self->thread); 352 - } 353 - 354 - hist_entry_callchain__fprintf(fp, self, total_samples, 355 - left_margin); 356 - } 357 - 358 - return ret; 359 - } 360 - 361 - /* 362 - * 363 - */ 364 - 365 - static void dso__calc_col_width(struct dso *self) 366 - { 367 - if (!col_width_list_str && !field_sep && 368 - (!dso_list || strlist__has_entry(dso_list, self->name))) { 369 - unsigned int slen = strlen(self->name); 370 - if (slen > dsos__col_width) 371 - dsos__col_width = slen; 372 - } 373 - 374 - self->slen_calculated = 1; 375 - } 376 - 377 - static void thread__comm_adjust(struct thread *self) 378 - { 379 - char *comm = self->comm; 380 - 381 - if (!col_width_list_str && !field_sep && 382 - (!comm_list || strlist__has_entry(comm_list, comm))) { 383 - unsigned int slen = strlen(comm); 384 - 385 - if (slen > comms__col_width) { 386 - comms__col_width = slen; 387 - threads__col_width = slen + 6; 388 - } 389 - } 390 - } 391 - 392 - static int thread__set_comm_adjust(struct thread *self, const char *comm) 393 - { 394 - int ret = thread__set_comm(self, comm); 395 - 396 - if (ret) 397 - return ret; 398 - 399 - thread__comm_adjust(self); 400 - 401 - return 0; 402 - } 403 - 404 - static int call__match(struct symbol *sym) 405 - { 406 - if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 407 - return 1; 408 - 409 - return 0; 410 - } 411 - 412 - static struct symbol **resolve_callchain(struct thread *thread, 413 - struct ip_callchain *chain, 414 - struct symbol **parent) 415 - { 416 - u8 cpumode = PERF_RECORD_MISC_USER; 417 - struct symbol **syms = NULL; 418 - unsigned int i; 419 - 420 - if (callchain) { 421 - syms = calloc(chain->nr, sizeof(*syms)); 422 - if (!syms) { 423 - fprintf(stderr, "Can't allocate memory for symbols\n"); 424 - exit(-1); 425 - } 426 - } 427 - 428 - for (i = 0; i < chain->nr; i++) { 429 - u64 ip = chain->ips[i]; 430 - struct addr_location al; 431 - 432 - if (ip >= PERF_CONTEXT_MAX) { 433 - switch (ip) { 434 - case PERF_CONTEXT_HV: 435 - cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 436 - case PERF_CONTEXT_KERNEL: 437 - cpumode = PERF_RECORD_MISC_KERNEL; break; 438 - case PERF_CONTEXT_USER: 439 - cpumode = PERF_RECORD_MISC_USER; break; 440 - default: 441 - break; 442 - } 443 - continue; 444 - } 445 - 446 - thread__find_addr_location(thread, cpumode, MAP__FUNCTION, 447 - ip, &al, NULL); 448 - if (al.sym != NULL) { 449 - if (sort__has_parent && !*parent && 450 - call__match(al.sym)) 451 - *parent = al.sym; 452 - if (!callchain) 453 - break; 454 - syms[i] = al.sym; 455 - } 456 - } 457 - 458 - return syms; 459 - } 460 - 461 - /* 462 - * collect histogram counts 463 - */ 464 - 465 - static int hist_entry__add(struct addr_location *al, 466 - struct ip_callchain *chain, u64 count) 54 + static int perf_session__add_hist_entry(struct perf_session *self, 55 + struct addr_location *al, 56 + struct ip_callchain *chain, u64 count) 467 57 { 468 58 struct symbol **syms = NULL, *parent = NULL; 469 59 bool hit; 470 60 struct hist_entry *he; 471 61 472 - if ((sort__has_parent || callchain) && chain) 473 - syms = resolve_callchain(al->thread, chain, &parent); 474 - 475 - he = __hist_entry__add(al, parent, count, &hit); 62 + if ((sort__has_parent || symbol_conf.use_callchain) && chain) 63 + syms = perf_session__resolve_callchain(self, al->thread, 64 + chain, &parent); 65 + he = __perf_session__add_hist_entry(self, al, parent, count, &hit); 476 66 if (he == NULL) 477 67 return -ENOMEM; 478 68 479 69 if (hit) 480 70 he->count += count; 481 71 482 - if (callchain) { 72 + if (symbol_conf.use_callchain) { 483 73 if (!hit) 484 74 callchain_init(&he->callchain); 485 75 append_chain(&he->callchain, chain, syms); ··· 69 495 } 70 496 71 497 return 0; 72 - } 73 - 74 - static size_t output__fprintf(FILE *fp, u64 total_samples) 75 - { 76 - struct hist_entry *pos; 77 - struct sort_entry *se; 78 - struct rb_node *nd; 79 - size_t ret = 0; 80 - unsigned int width; 81 - char *col_width = col_width_list_str; 82 - int raw_printing_style; 83 - 84 - raw_printing_style = !strcmp(pretty_printing_style, "raw"); 85 - 86 - init_rem_hits(); 87 - 88 - fprintf(fp, "# Samples: %Ld\n", (u64)total_samples); 89 - fprintf(fp, "#\n"); 90 - 91 - fprintf(fp, "# Overhead"); 92 - if (show_nr_samples) { 93 - if (field_sep) 94 - fprintf(fp, "%cSamples", *field_sep); 95 - else 96 - fputs(" Samples ", fp); 97 - } 98 - list_for_each_entry(se, &hist_entry__sort_list, list) { 99 - if (se->elide) 100 - continue; 101 - if (field_sep) { 102 - fprintf(fp, "%c%s", *field_sep, se->header); 103 - continue; 104 - } 105 - width = strlen(se->header); 106 - if (se->width) { 107 - if (col_width_list_str) { 108 - if (col_width) { 109 - *se->width = atoi(col_width); 110 - col_width = strchr(col_width, ','); 111 - if (col_width) 112 - ++col_width; 113 - } 114 - } 115 - width = *se->width = max(*se->width, width); 116 - } 117 - fprintf(fp, " %*s", width, se->header); 118 - } 119 - fprintf(fp, "\n"); 120 - 121 - if (field_sep) 122 - goto print_entries; 123 - 124 - fprintf(fp, "# ........"); 125 - if (show_nr_samples) 126 - fprintf(fp, " .........."); 127 - list_for_each_entry(se, &hist_entry__sort_list, list) { 128 - unsigned int i; 129 - 130 - if (se->elide) 131 - continue; 132 - 133 - fprintf(fp, " "); 134 - if (se->width) 135 - width = *se->width; 136 - else 137 - width = strlen(se->header); 138 - for (i = 0; i < width; i++) 139 - fprintf(fp, "."); 140 - } 141 - fprintf(fp, "\n"); 142 - 143 - fprintf(fp, "#\n"); 144 - 145 - print_entries: 146 - for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { 147 - pos = rb_entry(nd, struct hist_entry, rb_node); 148 - ret += hist_entry__fprintf(fp, pos, total_samples); 149 - } 150 - 151 - if (sort_order == default_sort_order && 152 - parent_pattern == default_parent_pattern) { 153 - fprintf(fp, "#\n"); 154 - fprintf(fp, "# (For a higher level overview, try: perf report --sort comm,dso)\n"); 155 - fprintf(fp, "#\n"); 156 - } 157 - fprintf(fp, "\n"); 158 - 159 - free(rem_sq_bracket); 160 - 161 - if (show_threads) 162 - perf_read_values_display(fp, &show_threads_values, 163 - raw_printing_style); 164 - 165 - return ret; 166 498 } 167 499 168 500 static int validate_chain(struct ip_callchain *chain, event_t *event) ··· 84 604 return 0; 85 605 } 86 606 87 - static int process_sample_event(event_t *event) 607 + static int process_sample_event(event_t *event, struct perf_session *session) 88 608 { 89 - struct sample_data data; 90 - int cpumode; 609 + struct sample_data data = { .period = 1, }; 91 610 struct addr_location al; 92 - struct thread *thread; 93 611 94 - memset(&data, 0, sizeof(data)); 95 - data.period = 1; 96 - 97 - event__parse_sample(event, sample_type, &data); 612 + event__parse_sample(event, session->sample_type, &data); 98 613 99 614 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 100 615 event->header.misc, ··· 97 622 (void *)(long)data.ip, 98 623 (long long)data.period); 99 624 100 - if (sample_type & PERF_SAMPLE_CALLCHAIN) { 625 + if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { 101 626 unsigned int i; 102 627 103 628 dump_printf("... chain: nr:%Lu\n", data.callchain->nr); ··· 115 640 } 116 641 } 117 642 118 - thread = threads__findnew(data.pid); 119 - if (thread == NULL) { 120 - pr_debug("problem processing %d event, skipping it.\n", 643 + if (event__preprocess_sample(event, session, &al, NULL) < 0) { 644 + fprintf(stderr, "problem processing %d event, skipping it.\n", 121 645 event->header.type); 122 646 return -1; 123 647 } 124 648 125 - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 126 - 127 - if (comm_list && !strlist__has_entry(comm_list, thread->comm)) 649 + if (al.filtered) 128 650 return 0; 129 651 130 - cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 131 - 132 - thread__find_addr_location(thread, cpumode, 133 - MAP__FUNCTION, data.ip, &al, NULL); 134 - /* 135 - * We have to do this here as we may have a dso with no symbol hit that 136 - * has a name longer than the ones with symbols sampled. 137 - */ 138 - if (al.map && !sort_dso.elide && !al.map->dso->slen_calculated) 139 - dso__calc_col_width(al.map->dso); 140 - 141 - if (dso_list && 142 - (!al.map || !al.map->dso || 143 - !(strlist__has_entry(dso_list, al.map->dso->short_name) || 144 - (al.map->dso->short_name != al.map->dso->long_name && 145 - strlist__has_entry(dso_list, al.map->dso->long_name))))) 146 - return 0; 147 - 148 - if (sym_list && al.sym && !strlist__has_entry(sym_list, al.sym->name)) 149 - return 0; 150 - 151 - if (hist_entry__add(&al, data.callchain, data.period)) { 652 + if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) { 152 653 pr_debug("problem incrementing symbol count, skipping event\n"); 153 654 return -1; 154 655 } 155 656 156 - event__stats.total += data.period; 157 - 657 + session->events_stats.total += data.period; 158 658 return 0; 159 659 } 160 660 161 - static int process_comm_event(event_t *event) 162 - { 163 - struct thread *thread = threads__findnew(event->comm.pid); 164 - 165 - dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid); 166 - 167 - if (thread == NULL || 168 - thread__set_comm_adjust(thread, event->comm.comm)) { 169 - dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 170 - return -1; 171 - } 172 - 173 - return 0; 174 - } 175 - 176 - static int process_read_event(event_t *event) 661 + static int process_read_event(event_t *event, struct perf_session *session __used) 177 662 { 178 663 struct perf_event_attr *attr; 179 664 ··· 156 721 return 0; 157 722 } 158 723 159 - static int sample_type_check(u64 type) 724 + static int sample_type_check(struct perf_session *session) 160 725 { 161 - sample_type = type; 162 - 163 - if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { 726 + if (!(session->sample_type & PERF_SAMPLE_CALLCHAIN)) { 164 727 if (sort__has_parent) { 165 728 fprintf(stderr, "selected --sort parent, but no" 166 729 " callchain data. Did you call" 167 730 " perf record without -g?\n"); 168 731 return -1; 169 732 } 170 - if (callchain) { 733 + if (symbol_conf.use_callchain) { 171 734 fprintf(stderr, "selected -g but no callchain data." 172 735 " Did you call perf record without" 173 736 " -g?\n"); 174 737 return -1; 175 738 } 176 - } else if (callchain_param.mode != CHAIN_NONE && !callchain) { 177 - callchain = 1; 739 + } else if (callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { 740 + symbol_conf.use_callchain = true; 178 741 if (register_callchain_param(&callchain_param) < 0) { 179 742 fprintf(stderr, "Can't register callchain" 180 743 " params\n"); ··· 183 750 return 0; 184 751 } 185 752 186 - static struct perf_file_handler file_handler = { 753 + static struct perf_event_ops event_ops = { 187 754 .process_sample_event = process_sample_event, 188 755 .process_mmap_event = event__process_mmap, 189 - .process_comm_event = process_comm_event, 756 + .process_comm_event = event__process_comm, 190 757 .process_exit_event = event__process_task, 191 758 .process_fork_event = event__process_task, 192 759 .process_lost_event = event__process_lost, ··· 197 764 198 765 static int __cmd_report(void) 199 766 { 200 - struct thread *idle; 201 767 int ret; 768 + struct perf_session *session; 202 769 203 770 session = perf_session__new(input_name, O_RDONLY, force); 204 771 if (session == NULL) 205 772 return -ENOMEM; 206 773 207 - idle = register_idle_thread(); 208 - thread__comm_adjust(idle); 209 - 210 774 if (show_threads) 211 775 perf_read_values_init(&show_threads_values); 212 776 213 - register_perf_file_handler(&file_handler); 214 - 215 - ret = perf_session__process_events(session, full_paths, 216 - &event__cwdlen, &event__cwd); 777 + ret = perf_session__process_events(session, &event_ops); 217 778 if (ret) 218 779 goto out_delete; 219 780 ··· 217 790 } 218 791 219 792 if (verbose > 3) 220 - threads__fprintf(stdout); 793 + perf_session__fprintf(session, stdout); 221 794 222 795 if (verbose > 2) 223 796 dsos__fprintf(stdout); 224 797 225 - collapse__resort(); 226 - output__resort(event__stats.total); 227 - output__fprintf(stdout, event__stats.total); 798 + perf_session__collapse_resort(session); 799 + perf_session__output_resort(session, session->events_stats.total); 800 + fprintf(stdout, "# Samples: %ld\n#\n", session->events_stats.total); 801 + perf_session__fprintf_hists(session, NULL, false, stdout); 802 + if (sort_order == default_sort_order && 803 + parent_pattern == default_parent_pattern) 804 + fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); 228 805 229 - if (show_threads) 806 + if (show_threads) { 807 + bool raw_printing_style = !strcmp(pretty_printing_style, "raw"); 808 + perf_read_values_display(stdout, &show_threads_values, 809 + raw_printing_style); 230 810 perf_read_values_destroy(&show_threads_values); 811 + } 231 812 out_delete: 232 813 perf_session__delete(session); 233 814 return ret; ··· 248 813 char *tok; 249 814 char *endptr; 250 815 251 - callchain = 1; 816 + symbol_conf.use_callchain = true; 252 817 253 818 if (!arg) 254 819 return 0; ··· 269 834 270 835 else if (!strncmp(tok, "none", strlen(arg))) { 271 836 callchain_param.mode = CHAIN_NONE; 272 - callchain = 0; 837 + symbol_conf.use_callchain = true; 273 838 274 839 return 0; 275 840 } ··· 312 877 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 313 878 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 314 879 "load module symbols - WARNING: use only with -k and LIVE kernel"), 315 - OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples, 880 + OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 316 881 "Show a column with the number of samples"), 317 882 OPT_BOOLEAN('T', "threads", &show_threads, 318 883 "Show per-thread event counters"), ··· 320 885 "pretty printing style key: normal raw"), 321 886 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 322 887 "sort by key(s): pid, comm, dso, symbol, parent"), 323 - OPT_BOOLEAN('P', "full-paths", &full_paths, 888 + OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths, 324 889 "Don't shorten the pathnames taking into account the cwd"), 325 890 OPT_STRING('p', "parent", &parent_pattern, "regex", 326 891 "regex filter to identify parent, see: '--sort parent'"), 327 - OPT_BOOLEAN('x', "exclude-other", &exclude_other, 892 + OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, 328 893 "Only display entries with parent-match"), 329 894 OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent", 330 895 "Display callchains using output_type and min percent threshold. " 331 896 "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt), 332 - OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]", 897 + OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 333 898 "only consider symbols in these dsos"), 334 - OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]", 899 + OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", 335 900 "only consider symbols in these comms"), 336 - OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]", 901 + OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 337 902 "only consider these symbols"), 338 - OPT_STRING('w', "column-widths", &col_width_list_str, 903 + OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, 339 904 "width[,width...]", 340 905 "don't try to adjust column width, use these fixed values"), 341 - OPT_STRING('t', "field-separator", &field_sep, "separator", 906 + OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", 342 907 "separator for columns, no spaces will be added between " 343 908 "columns '.' is reserved."), 344 909 OPT_END() 345 910 }; 346 911 347 - static void setup_sorting(void) 348 - { 349 - char *tmp, *tok, *str = strdup(sort_order); 350 - 351 - for (tok = strtok_r(str, ", ", &tmp); 352 - tok; tok = strtok_r(NULL, ", ", &tmp)) { 353 - if (sort_dimension__add(tok) < 0) { 354 - error("Unknown --sort key: `%s'", tok); 355 - usage_with_options(report_usage, options); 356 - } 357 - } 358 - 359 - free(str); 360 - } 361 - 362 - static void setup_list(struct strlist **list, const char *list_str, 363 - struct sort_entry *se, const char *list_name, 364 - FILE *fp) 365 - { 366 - if (list_str) { 367 - *list = strlist__new(true, list_str); 368 - if (!*list) { 369 - fprintf(stderr, "problems parsing %s list\n", 370 - list_name); 371 - exit(129); 372 - } 373 - if (strlist__nr_entries(*list) == 1) { 374 - fprintf(fp, "# %s: %s\n", list_name, 375 - strlist__entry(*list, 0)->s); 376 - se->elide = true; 377 - } 378 - } 379 - } 380 - 381 912 int cmd_report(int argc, const char **argv, const char *prefix __used) 382 913 { 383 - if (symbol__init(&symbol_conf) < 0) 384 - return -1; 385 - 386 914 argc = parse_options(argc, argv, options, report_usage, 0); 387 915 388 - setup_sorting(); 916 + setup_pager(); 917 + 918 + if (symbol__init() < 0) 919 + return -1; 920 + 921 + setup_sorting(report_usage, options); 389 922 390 923 if (parent_pattern != default_parent_pattern) { 391 924 sort_dimension__add("parent"); 392 925 sort_parent.elide = 1; 393 926 } else 394 - exclude_other = 0; 927 + symbol_conf.exclude_other = false; 395 928 396 929 /* 397 930 * Any (unrecognized) arguments left? ··· 367 964 if (argc) 368 965 usage_with_options(report_usage, options); 369 966 370 - setup_pager(); 371 - 372 - setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout); 373 - setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout); 374 - setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout); 375 - 376 - if (field_sep && *field_sep == '.') { 377 - fputs("'.' is the only non valid --field-separator argument\n", 378 - stderr); 379 - exit(129); 380 - } 967 + sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); 968 + sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); 969 + sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); 381 970 382 971 return __cmd_report(); 383 972 }
+50 -43
tools/perf/builtin-sched.c
··· 12 12 #include "util/trace-event.h" 13 13 14 14 #include "util/debug.h" 15 - #include "util/data_map.h" 16 15 17 16 #include <sys/prctl.h> 18 17 ··· 20 21 #include <math.h> 21 22 22 23 static char const *input_name = "perf.data"; 23 - 24 - static u64 sample_type; 25 24 26 25 static char default_sort_order[] = "avg, max, switch, runtime"; 27 26 static char *sort_order = default_sort_order; ··· 728 731 729 732 struct trace_sched_handler { 730 733 void (*switch_event)(struct trace_switch_event *, 734 + struct perf_session *, 731 735 struct event *, 732 736 int cpu, 733 737 u64 timestamp, 734 738 struct thread *thread); 735 739 736 740 void (*runtime_event)(struct trace_runtime_event *, 741 + struct perf_session *, 737 742 struct event *, 738 743 int cpu, 739 744 u64 timestamp, 740 745 struct thread *thread); 741 746 742 747 void (*wakeup_event)(struct trace_wakeup_event *, 748 + struct perf_session *, 743 749 struct event *, 744 750 int cpu, 745 751 u64 timestamp, ··· 755 755 struct thread *thread); 756 756 757 757 void (*migrate_task_event)(struct trace_migrate_task_event *, 758 + struct perf_session *session, 758 759 struct event *, 759 760 int cpu, 760 761 u64 timestamp, ··· 765 764 766 765 static void 767 766 replay_wakeup_event(struct trace_wakeup_event *wakeup_event, 767 + struct perf_session *session __used, 768 768 struct event *event, 769 769 int cpu __used, 770 770 u64 timestamp __used, ··· 792 790 793 791 static void 794 792 replay_switch_event(struct trace_switch_event *switch_event, 793 + struct perf_session *session __used, 795 794 struct event *event, 796 795 int cpu, 797 796 u64 timestamp, ··· 1026 1023 1027 1024 static void 1028 1025 latency_switch_event(struct trace_switch_event *switch_event, 1026 + struct perf_session *session, 1029 1027 struct event *event __used, 1030 1028 int cpu, 1031 1029 u64 timestamp, ··· 1050 1046 die("hm, delta: %Ld < 0 ?\n", delta); 1051 1047 1052 1048 1053 - sched_out = threads__findnew(switch_event->prev_pid); 1054 - sched_in = threads__findnew(switch_event->next_pid); 1049 + sched_out = perf_session__findnew(session, switch_event->prev_pid); 1050 + sched_in = perf_session__findnew(session, switch_event->next_pid); 1055 1051 1056 1052 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); 1057 1053 if (!out_events) { ··· 1079 1075 1080 1076 static void 1081 1077 latency_runtime_event(struct trace_runtime_event *runtime_event, 1078 + struct perf_session *session, 1082 1079 struct event *event __used, 1083 1080 int cpu, 1084 1081 u64 timestamp, 1085 1082 struct thread *this_thread __used) 1086 1083 { 1087 - struct thread *thread = threads__findnew(runtime_event->pid); 1084 + struct thread *thread = perf_session__findnew(session, runtime_event->pid); 1088 1085 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); 1089 1086 1090 1087 BUG_ON(cpu >= MAX_CPUS || cpu < 0); ··· 1102 1097 1103 1098 static void 1104 1099 latency_wakeup_event(struct trace_wakeup_event *wakeup_event, 1100 + struct perf_session *session, 1105 1101 struct event *__event __used, 1106 1102 int cpu __used, 1107 1103 u64 timestamp, ··· 1116 1110 if (!wakeup_event->success) 1117 1111 return; 1118 1112 1119 - wakee = threads__findnew(wakeup_event->pid); 1113 + wakee = perf_session__findnew(session, wakeup_event->pid); 1120 1114 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); 1121 1115 if (!atoms) { 1122 1116 thread_atoms_insert(wakee); ··· 1150 1144 1151 1145 static void 1152 1146 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, 1147 + struct perf_session *session, 1153 1148 struct event *__event __used, 1154 1149 int cpu __used, 1155 1150 u64 timestamp, ··· 1166 1159 if (profile_cpu == -1) 1167 1160 return; 1168 1161 1169 - migrant = threads__findnew(migrate_task_event->pid); 1162 + migrant = perf_session__findnew(session, migrate_task_event->pid); 1170 1163 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); 1171 1164 if (!atoms) { 1172 1165 thread_atoms_insert(migrant); ··· 1361 1354 static struct trace_sched_handler *trace_handler; 1362 1355 1363 1356 static void 1364 - process_sched_wakeup_event(void *data, 1357 + process_sched_wakeup_event(void *data, struct perf_session *session, 1365 1358 struct event *event, 1366 1359 int cpu __used, 1367 1360 u64 timestamp __used, ··· 1378 1371 FILL_FIELD(wakeup_event, cpu, event, data); 1379 1372 1380 1373 if (trace_handler->wakeup_event) 1381 - trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread); 1374 + trace_handler->wakeup_event(&wakeup_event, session, event, 1375 + cpu, timestamp, thread); 1382 1376 } 1383 1377 1384 1378 /* ··· 1397 1389 1398 1390 static void 1399 1391 map_switch_event(struct trace_switch_event *switch_event, 1392 + struct perf_session *session, 1400 1393 struct event *event __used, 1401 1394 int this_cpu, 1402 1395 u64 timestamp, ··· 1425 1416 die("hm, delta: %Ld < 0 ?\n", delta); 1426 1417 1427 1418 1428 - sched_out = threads__findnew(switch_event->prev_pid); 1429 - sched_in = threads__findnew(switch_event->next_pid); 1419 + sched_out = perf_session__findnew(session, switch_event->prev_pid); 1420 + sched_in = perf_session__findnew(session, switch_event->next_pid); 1430 1421 1431 1422 curr_thread[this_cpu] = sched_in; 1432 1423 ··· 1476 1467 1477 1468 1478 1469 static void 1479 - process_sched_switch_event(void *data, 1470 + process_sched_switch_event(void *data, struct perf_session *session, 1480 1471 struct event *event, 1481 1472 int this_cpu, 1482 1473 u64 timestamp __used, ··· 1503 1494 nr_context_switch_bugs++; 1504 1495 } 1505 1496 if (trace_handler->switch_event) 1506 - trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread); 1497 + trace_handler->switch_event(&switch_event, session, event, 1498 + this_cpu, timestamp, thread); 1507 1499 1508 1500 curr_pid[this_cpu] = switch_event.next_pid; 1509 1501 } 1510 1502 1511 1503 static void 1512 - process_sched_runtime_event(void *data, 1504 + process_sched_runtime_event(void *data, struct perf_session *session, 1513 1505 struct event *event, 1514 1506 int cpu __used, 1515 1507 u64 timestamp __used, ··· 1524 1514 FILL_FIELD(runtime_event, vruntime, event, data); 1525 1515 1526 1516 if (trace_handler->runtime_event) 1527 - trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread); 1517 + trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread); 1528 1518 } 1529 1519 1530 1520 static void ··· 1544 1534 FILL_FIELD(fork_event, child_pid, event, data); 1545 1535 1546 1536 if (trace_handler->fork_event) 1547 - trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread); 1537 + trace_handler->fork_event(&fork_event, event, 1538 + cpu, timestamp, thread); 1548 1539 } 1549 1540 1550 1541 static void ··· 1559 1548 } 1560 1549 1561 1550 static void 1562 - process_sched_migrate_task_event(void *data, 1551 + process_sched_migrate_task_event(void *data, struct perf_session *session, 1563 1552 struct event *event, 1564 1553 int cpu __used, 1565 1554 u64 timestamp __used, ··· 1575 1564 FILL_FIELD(migrate_task_event, cpu, event, data); 1576 1565 1577 1566 if (trace_handler->migrate_task_event) 1578 - trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); 1567 + trace_handler->migrate_task_event(&migrate_task_event, session, 1568 + event, cpu, timestamp, thread); 1579 1569 } 1580 1570 1581 1571 static void 1582 - process_raw_event(event_t *raw_event __used, void *data, 1583 - int cpu, u64 timestamp, struct thread *thread) 1572 + process_raw_event(event_t *raw_event __used, struct perf_session *session, 1573 + void *data, int cpu, u64 timestamp, struct thread *thread) 1584 1574 { 1585 1575 struct event *event; 1586 1576 int type; ··· 1591 1579 event = trace_find_event(type); 1592 1580 1593 1581 if (!strcmp(event->name, "sched_switch")) 1594 - process_sched_switch_event(data, event, cpu, timestamp, thread); 1582 + process_sched_switch_event(data, session, event, cpu, timestamp, thread); 1595 1583 if (!strcmp(event->name, "sched_stat_runtime")) 1596 - process_sched_runtime_event(data, event, cpu, timestamp, thread); 1584 + process_sched_runtime_event(data, session, event, cpu, timestamp, thread); 1597 1585 if (!strcmp(event->name, "sched_wakeup")) 1598 - process_sched_wakeup_event(data, event, cpu, timestamp, thread); 1586 + process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); 1599 1587 if (!strcmp(event->name, "sched_wakeup_new")) 1600 - process_sched_wakeup_event(data, event, cpu, timestamp, thread); 1588 + process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); 1601 1589 if (!strcmp(event->name, "sched_process_fork")) 1602 1590 process_sched_fork_event(data, event, cpu, timestamp, thread); 1603 1591 if (!strcmp(event->name, "sched_process_exit")) 1604 1592 process_sched_exit_event(event, cpu, timestamp, thread); 1605 1593 if (!strcmp(event->name, "sched_migrate_task")) 1606 - process_sched_migrate_task_event(data, event, cpu, timestamp, thread); 1594 + process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread); 1607 1595 } 1608 1596 1609 - static int process_sample_event(event_t *event) 1597 + static int process_sample_event(event_t *event, struct perf_session *session) 1610 1598 { 1611 1599 struct sample_data data; 1612 1600 struct thread *thread; 1613 1601 1614 - if (!(sample_type & PERF_SAMPLE_RAW)) 1602 + if (!(session->sample_type & PERF_SAMPLE_RAW)) 1615 1603 return 0; 1616 1604 1617 1605 memset(&data, 0, sizeof(data)); ··· 1619 1607 data.cpu = -1; 1620 1608 data.period = -1; 1621 1609 1622 - event__parse_sample(event, sample_type, &data); 1610 + event__parse_sample(event, session->sample_type, &data); 1623 1611 1624 1612 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 1625 1613 event->header.misc, ··· 1627 1615 (void *)(long)data.ip, 1628 1616 (long long)data.period); 1629 1617 1630 - thread = threads__findnew(data.pid); 1618 + thread = perf_session__findnew(session, data.pid); 1631 1619 if (thread == NULL) { 1632 1620 pr_debug("problem processing %d event, skipping it.\n", 1633 1621 event->header.type); ··· 1639 1627 if (profile_cpu != -1 && profile_cpu != (int)data.cpu) 1640 1628 return 0; 1641 1629 1642 - process_raw_event(event, data.raw_data, data.cpu, data.time, thread); 1630 + process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread); 1643 1631 1644 1632 return 0; 1645 1633 } 1646 1634 1647 - static int process_lost_event(event_t *event __used) 1635 + static int process_lost_event(event_t *event __used, 1636 + struct perf_session *session __used) 1648 1637 { 1649 1638 nr_lost_chunks++; 1650 1639 nr_lost_events += event->lost.lost; ··· 1653 1640 return 0; 1654 1641 } 1655 1642 1656 - static int sample_type_check(u64 type) 1643 + static int sample_type_check(struct perf_session *session __used) 1657 1644 { 1658 - sample_type = type; 1659 - 1660 - if (!(sample_type & PERF_SAMPLE_RAW)) { 1645 + if (!(session->sample_type & PERF_SAMPLE_RAW)) { 1661 1646 fprintf(stderr, 1662 1647 "No trace sample to read. Did you call perf record " 1663 1648 "without -R?"); ··· 1665 1654 return 0; 1666 1655 } 1667 1656 1668 - static struct perf_file_handler file_handler = { 1657 + static struct perf_event_ops event_ops = { 1669 1658 .process_sample_event = process_sample_event, 1670 1659 .process_comm_event = event__process_comm, 1671 1660 .process_lost_event = process_lost_event, ··· 1676 1665 { 1677 1666 int err; 1678 1667 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 1679 - 1680 1668 if (session == NULL) 1681 1669 return -ENOMEM; 1682 1670 1683 - register_idle_thread(); 1684 - register_perf_file_handler(&file_handler); 1685 - 1686 - err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 1671 + err = perf_session__process_events(session, &event_ops); 1687 1672 perf_session__delete(session); 1688 1673 return err; 1689 1674 } ··· 1911 1904 if (!strcmp(argv[0], "trace")) 1912 1905 return cmd_trace(argc, argv, prefix); 1913 1906 1914 - symbol__init(0); 1907 + symbol__init(); 1915 1908 if (!strncmp(argv[0], "rec", 3)) { 1916 1909 return __cmd_record(argc, argv); 1917 1910 } else if (!strncmp(argv[0], "lat", 3)) {
+24 -35
tools/perf/builtin-timechart.c
··· 30 30 #include "util/parse-options.h" 31 31 #include "util/parse-events.h" 32 32 #include "util/event.h" 33 - #include "util/data_map.h" 33 + #include "util/session.h" 34 34 #include "util/svghelper.h" 35 35 36 36 static char const *input_name = "perf.data"; 37 37 static char const *output_name = "output.svg"; 38 - 39 - 40 - static u64 sample_type; 41 38 42 39 static unsigned int numcpus; 43 40 static u64 min_freq; /* Lowest CPU frequency seen */ ··· 278 281 static u64 cpus_pstate_start_times[MAX_CPUS]; 279 282 static u64 cpus_pstate_state[MAX_CPUS]; 280 283 281 - static int 282 - process_comm_event(event_t *event) 284 + static int process_comm_event(event_t *event, struct perf_session *session __used) 283 285 { 284 286 pid_set_comm(event->comm.pid, event->comm.comm); 285 287 return 0; 286 288 } 287 - static int 288 - process_fork_event(event_t *event) 289 + 290 + static int process_fork_event(event_t *event, struct perf_session *session __used) 289 291 { 290 292 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 291 293 return 0; 292 294 } 293 295 294 - static int 295 - process_exit_event(event_t *event) 296 + static int process_exit_event(event_t *event, struct perf_session *session __used) 296 297 { 297 298 pid_exit(event->fork.pid, event->fork.time); 298 299 return 0; ··· 475 480 } 476 481 477 482 478 - static int 479 - process_sample_event(event_t *event) 483 + static int process_sample_event(event_t *event, struct perf_session *session) 480 484 { 481 485 struct sample_data data; 482 486 struct trace_entry *te; 483 487 484 488 memset(&data, 0, sizeof(data)); 485 489 486 - event__parse_sample(event, sample_type, &data); 490 + event__parse_sample(event, session->sample_type, &data); 487 491 488 - if (sample_type & PERF_SAMPLE_TIME) { 492 + if (session->sample_type & PERF_SAMPLE_TIME) { 489 493 if (!first_time || first_time > data.time) 490 494 first_time = data.time; 491 495 if (last_time < data.time) ··· 492 498 } 493 499 494 500 te = (void *)data.raw_data; 495 - if (sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) { 501 + if (session->sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) { 496 502 char *event_str; 497 503 struct power_entry *pe; 498 504 ··· 569 575 } 570 576 } 571 577 572 - static u64 sample_time(event_t *event) 578 + static u64 sample_time(event_t *event, const struct perf_session *session) 573 579 { 574 580 int cursor; 575 581 576 582 cursor = 0; 577 - if (sample_type & PERF_SAMPLE_IP) 583 + if (session->sample_type & PERF_SAMPLE_IP) 578 584 cursor++; 579 - if (sample_type & PERF_SAMPLE_TID) 585 + if (session->sample_type & PERF_SAMPLE_TID) 580 586 cursor++; 581 - if (sample_type & PERF_SAMPLE_TIME) 587 + if (session->sample_type & PERF_SAMPLE_TIME) 582 588 return event->sample.array[cursor]; 583 589 return 0; 584 590 } ··· 588 594 * We first queue all events, sorted backwards by insertion. 589 595 * The order will get flipped later. 590 596 */ 591 - static int 592 - queue_sample_event(event_t *event) 597 + static int queue_sample_event(event_t *event, struct perf_session *session) 593 598 { 594 599 struct sample_wrapper *copy, *prev; 595 600 int size; ··· 602 609 memset(copy, 0, size); 603 610 604 611 copy->next = NULL; 605 - copy->timestamp = sample_time(event); 612 + copy->timestamp = sample_time(event, session); 606 613 607 614 memcpy(&copy->data, event, event->sample.header.size); 608 615 ··· 1014 1021 svg_close(); 1015 1022 } 1016 1023 1017 - static void process_samples(void) 1024 + static void process_samples(struct perf_session *session) 1018 1025 { 1019 1026 struct sample_wrapper *cursor; 1020 1027 event_t *event; ··· 1025 1032 while (cursor) { 1026 1033 event = (void *)&cursor->data; 1027 1034 cursor = cursor->next; 1028 - process_sample_event(event); 1035 + process_sample_event(event, session); 1029 1036 } 1030 1037 } 1031 1038 1032 - static int sample_type_check(u64 type) 1039 + static int sample_type_check(struct perf_session *session) 1033 1040 { 1034 - sample_type = type; 1035 - 1036 - if (!(sample_type & PERF_SAMPLE_RAW)) { 1041 + if (!(session->sample_type & PERF_SAMPLE_RAW)) { 1037 1042 fprintf(stderr, "No trace samples found in the file.\n" 1038 1043 "Have you used 'perf timechart record' to record it?\n"); 1039 1044 return -1; ··· 1040 1049 return 0; 1041 1050 } 1042 1051 1043 - static struct perf_file_handler file_handler = { 1052 + static struct perf_event_ops event_ops = { 1044 1053 .process_comm_event = process_comm_event, 1045 1054 .process_fork_event = process_fork_event, 1046 1055 .process_exit_event = process_exit_event, ··· 1056 1065 if (session == NULL) 1057 1066 return -ENOMEM; 1058 1067 1059 - register_perf_file_handler(&file_handler); 1060 - 1061 - ret = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 1068 + ret = perf_session__process_events(session, &event_ops); 1062 1069 if (ret) 1063 1070 goto out_delete; 1064 1071 1065 - process_samples(); 1072 + process_samples(session); 1066 1073 1067 1074 end_sample_processing(); 1068 1075 ··· 1137 1148 1138 1149 int cmd_timechart(int argc, const char **argv, const char *prefix __used) 1139 1150 { 1140 - symbol__init(0); 1141 - 1142 1151 argc = parse_options(argc, argv, options, timechart_usage, 1143 1152 PARSE_OPT_STOP_AT_NON_OPTION); 1153 + 1154 + symbol__init(); 1144 1155 1145 1156 if (argc && !strncmp(argv[0], "rec", 3)) 1146 1157 return __cmd_record(argc, argv);
+27 -18
tools/perf/builtin-top.c
··· 20 20 21 21 #include "perf.h" 22 22 23 - #include "util/symbol.h" 24 23 #include "util/color.h" 24 + #include "util/session.h" 25 + #include "util/symbol.h" 25 26 #include "util/thread.h" 26 27 #include "util/util.h" 27 28 #include <linux/rbtree.h> ··· 80 79 static bool hide_kernel_symbols = false; 81 80 static bool hide_user_symbols = false; 82 81 static struct winsize winsize; 83 - struct symbol_conf symbol_conf; 84 82 85 83 /* 86 84 * Source ··· 926 926 return 0; 927 927 } 928 928 929 - static void event__process_sample(const event_t *self, int counter) 929 + static void event__process_sample(const event_t *self, 930 + struct perf_session *session, int counter) 930 931 { 931 932 u64 ip = self->ip.ip; 932 933 struct sym_entry *syme; ··· 947 946 return; 948 947 } 949 948 950 - if (event__preprocess_sample(self, &al, symbol_filter) < 0 || 951 - al.sym == NULL) 949 + if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 || 950 + al.sym == NULL || al.filtered) 952 951 return; 953 952 954 953 syme = symbol__priv(al.sym); ··· 966 965 } 967 966 } 968 967 969 - static int event__process(event_t *event) 968 + static int event__process(event_t *event, struct perf_session *session) 970 969 { 971 970 switch (event->header.type) { 972 971 case PERF_RECORD_COMM: 973 - event__process_comm(event); 972 + event__process_comm(event, session); 974 973 break; 975 974 case PERF_RECORD_MMAP: 976 - event__process_mmap(event); 975 + event__process_mmap(event, session); 977 976 break; 978 977 default: 979 978 break; ··· 1000 999 return head; 1001 1000 } 1002 1001 1003 - static void mmap_read_counter(struct mmap_data *md) 1002 + static void perf_session__mmap_read_counter(struct perf_session *self, 1003 + struct mmap_data *md) 1004 1004 { 1005 1005 unsigned int head = mmap_read_head(md); 1006 1006 unsigned int old = md->prev; ··· 1054 1052 } 1055 1053 1056 1054 if (event->header.type == PERF_RECORD_SAMPLE) 1057 - event__process_sample(event, md->counter); 1055 + event__process_sample(event, self, md->counter); 1058 1056 else 1059 - event__process(event); 1057 + event__process(event, self); 1060 1058 old += size; 1061 1059 } 1062 1060 ··· 1066 1064 static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; 1067 1065 static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; 1068 1066 1069 - static void mmap_read(void) 1067 + static void perf_session__mmap_read(struct perf_session *self) 1070 1068 { 1071 1069 int i, counter; 1072 1070 1073 1071 for (i = 0; i < nr_cpus; i++) { 1074 1072 for (counter = 0; counter < nr_counters; counter++) 1075 - mmap_read_counter(&mmap_array[i][counter]); 1073 + perf_session__mmap_read_counter(self, &mmap_array[i][counter]); 1076 1074 } 1077 1075 } 1078 1076 ··· 1157 1155 pthread_t thread; 1158 1156 int i, counter; 1159 1157 int ret; 1158 + /* 1159 + * FIXME: perf_session__new should allow passing a O_MMAP, so that all this 1160 + * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. 1161 + */ 1162 + struct perf_session *session = perf_session__new(NULL, O_WRONLY, false); 1163 + if (session == NULL) 1164 + return -ENOMEM; 1160 1165 1161 1166 if (target_pid != -1) 1162 - event__synthesize_thread(target_pid, event__process); 1167 + event__synthesize_thread(target_pid, event__process, session); 1163 1168 else 1164 - event__synthesize_threads(event__process); 1169 + event__synthesize_threads(event__process, session); 1165 1170 1166 1171 for (i = 0; i < nr_cpus; i++) { 1167 1172 group_fd = -1; ··· 1179 1170 /* Wait for a minimal set of events before starting the snapshot */ 1180 1171 poll(event_array, nr_poll, 100); 1181 1172 1182 - mmap_read(); 1173 + perf_session__mmap_read(session); 1183 1174 1184 1175 if (pthread_create(&thread, NULL, display_thread, NULL)) { 1185 1176 printf("Could not create display thread.\n"); ··· 1199 1190 while (1) { 1200 1191 int hits = samples; 1201 1192 1202 - mmap_read(); 1193 + perf_session__mmap_read(session); 1203 1194 1204 1195 if (hits == samples) 1205 1196 ret = poll(event_array, nr_poll, 100); ··· 1282 1273 (nr_counters + 1) * sizeof(unsigned long)); 1283 1274 if (symbol_conf.vmlinux_name == NULL) 1284 1275 symbol_conf.try_vmlinux_path = true; 1285 - if (symbol__init(&symbol_conf) < 0) 1276 + if (symbol__init() < 0) 1286 1277 return -1; 1287 1278 1288 1279 if (delay_secs < 1)
+306 -43
tools/perf/builtin-trace.c
··· 12 12 static char const *script_name; 13 13 static char const *generate_script_lang; 14 14 15 - static int default_start_script(const char *script __attribute((unused))) 15 + static int default_start_script(const char *script __unused, 16 + int argc __unused, 17 + const char **argv __unused) 16 18 { 17 19 return 0; 18 20 } ··· 24 22 return 0; 25 23 } 26 24 27 - static int default_generate_script(const char *outfile __attribute ((unused))) 25 + static int default_generate_script(const char *outfile __unused) 28 26 { 29 27 return 0; 30 28 } ··· 59 57 #include "util/debug.h" 60 58 61 59 #include "util/trace-event.h" 62 - #include "util/data_map.h" 63 60 #include "util/exec_cmd.h" 64 61 65 62 static char const *input_name = "perf.data"; 66 63 67 - static struct perf_session *session; 68 - static u64 sample_type; 69 - 70 - static int process_sample_event(event_t *event) 64 + static int process_sample_event(event_t *event, struct perf_session *session) 71 65 { 72 66 struct sample_data data; 73 67 struct thread *thread; ··· 73 75 data.cpu = -1; 74 76 data.period = 1; 75 77 76 - event__parse_sample(event, sample_type, &data); 78 + event__parse_sample(event, session->sample_type, &data); 77 79 78 80 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 79 81 event->header.misc, ··· 81 83 (void *)(long)data.ip, 82 84 (long long)data.period); 83 85 84 - thread = threads__findnew(event->ip.pid); 86 + thread = perf_session__findnew(session, event->ip.pid); 85 87 if (thread == NULL) { 86 88 pr_debug("problem processing %d event, skipping it.\n", 87 89 event->header.type); 88 90 return -1; 89 91 } 90 92 91 - if (sample_type & PERF_SAMPLE_RAW) { 93 + if (session->sample_type & PERF_SAMPLE_RAW) { 92 94 /* 93 95 * FIXME: better resolve from pid from the struct trace_entry 94 96 * field, although it should be the same than this perf ··· 98 100 data.raw_size, 99 101 data.time, thread->comm); 100 102 } 101 - event__stats.total += data.period; 102 103 104 + session->events_stats.total += data.period; 103 105 return 0; 104 106 } 105 107 106 - static int sample_type_check(u64 type) 108 + static int sample_type_check(struct perf_session *session) 107 109 { 108 - sample_type = type; 109 - 110 - if (!(sample_type & PERF_SAMPLE_RAW)) { 110 + if (!(session->sample_type & PERF_SAMPLE_RAW)) { 111 111 fprintf(stderr, 112 112 "No trace sample to read. Did you call perf record " 113 113 "without -R?"); ··· 115 119 return 0; 116 120 } 117 121 118 - static struct perf_file_handler file_handler = { 122 + static struct perf_event_ops event_ops = { 119 123 .process_sample_event = process_sample_event, 120 124 .process_comm_event = event__process_comm, 121 125 .sample_type_check = sample_type_check, 122 126 }; 123 127 124 - static int __cmd_trace(void) 128 + static int __cmd_trace(struct perf_session *session) 125 129 { 126 - int err; 127 - 128 - session = perf_session__new(input_name, O_RDONLY, 0); 129 - if (session == NULL) 130 - return -ENOMEM; 131 - 132 - register_idle_thread(); 133 - register_perf_file_handler(&file_handler); 134 - 135 - err = perf_session__process_events(session, 0, &event__cwdlen, &event__cwd); 136 - perf_session__delete(session); 137 - return err; 130 + return perf_session__process_events(session, &event_ops); 138 131 } 139 132 140 133 struct script_spec { ··· 274 289 return 0; 275 290 } 276 291 292 + #define for_each_lang(scripts_dir, lang_dirent, lang_next) \ 293 + while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \ 294 + lang_next) \ 295 + if (lang_dirent.d_type == DT_DIR && \ 296 + (strcmp(lang_dirent.d_name, ".")) && \ 297 + (strcmp(lang_dirent.d_name, ".."))) 298 + 299 + #define for_each_script(lang_dir, script_dirent, script_next) \ 300 + while (!readdir_r(lang_dir, &script_dirent, &script_next) && \ 301 + script_next) \ 302 + if (script_dirent.d_type != DT_DIR) 303 + 304 + 305 + #define RECORD_SUFFIX "-record" 306 + #define REPORT_SUFFIX "-report" 307 + 308 + struct script_desc { 309 + struct list_head node; 310 + char *name; 311 + char *half_liner; 312 + char *args; 313 + }; 314 + 315 + LIST_HEAD(script_descs); 316 + 317 + static struct script_desc *script_desc__new(const char *name) 318 + { 319 + struct script_desc *s = zalloc(sizeof(*s)); 320 + 321 + if (s != NULL) 322 + s->name = strdup(name); 323 + 324 + return s; 325 + } 326 + 327 + static void script_desc__delete(struct script_desc *s) 328 + { 329 + free(s->name); 330 + free(s); 331 + } 332 + 333 + static void script_desc__add(struct script_desc *s) 334 + { 335 + list_add_tail(&s->node, &script_descs); 336 + } 337 + 338 + static struct script_desc *script_desc__find(const char *name) 339 + { 340 + struct script_desc *s; 341 + 342 + list_for_each_entry(s, &script_descs, node) 343 + if (strcasecmp(s->name, name) == 0) 344 + return s; 345 + return NULL; 346 + } 347 + 348 + static struct script_desc *script_desc__findnew(const char *name) 349 + { 350 + struct script_desc *s = script_desc__find(name); 351 + 352 + if (s) 353 + return s; 354 + 355 + s = script_desc__new(name); 356 + if (!s) 357 + goto out_delete_desc; 358 + 359 + script_desc__add(s); 360 + 361 + return s; 362 + 363 + out_delete_desc: 364 + script_desc__delete(s); 365 + 366 + return NULL; 367 + } 368 + 369 + static char *ends_with(char *str, const char *suffix) 370 + { 371 + size_t suffix_len = strlen(suffix); 372 + char *p = str; 373 + 374 + if (strlen(str) > suffix_len) { 375 + p = str + strlen(str) - suffix_len; 376 + if (!strncmp(p, suffix, suffix_len)) 377 + return p; 378 + } 379 + 380 + return NULL; 381 + } 382 + 383 + static char *ltrim(char *str) 384 + { 385 + int len = strlen(str); 386 + 387 + while (len && isspace(*str)) { 388 + len--; 389 + str++; 390 + } 391 + 392 + return str; 393 + } 394 + 395 + static int read_script_info(struct script_desc *desc, const char *filename) 396 + { 397 + char line[BUFSIZ], *p; 398 + FILE *fp; 399 + 400 + fp = fopen(filename, "r"); 401 + if (!fp) 402 + return -1; 403 + 404 + while (fgets(line, sizeof(line), fp)) { 405 + p = ltrim(line); 406 + if (strlen(p) == 0) 407 + continue; 408 + if (*p != '#') 409 + continue; 410 + p++; 411 + if (strlen(p) && *p == '!') 412 + continue; 413 + 414 + p = ltrim(p); 415 + if (strlen(p) && p[strlen(p) - 1] == '\n') 416 + p[strlen(p) - 1] = '\0'; 417 + 418 + if (!strncmp(p, "description:", strlen("description:"))) { 419 + p += strlen("description:"); 420 + desc->half_liner = strdup(ltrim(p)); 421 + continue; 422 + } 423 + 424 + if (!strncmp(p, "args:", strlen("args:"))) { 425 + p += strlen("args:"); 426 + desc->args = strdup(ltrim(p)); 427 + continue; 428 + } 429 + } 430 + 431 + fclose(fp); 432 + 433 + return 0; 434 + } 435 + 436 + static int list_available_scripts(const struct option *opt __used, 437 + const char *s __used, int unset __used) 438 + { 439 + struct dirent *script_next, *lang_next, script_dirent, lang_dirent; 440 + char scripts_path[MAXPATHLEN]; 441 + DIR *scripts_dir, *lang_dir; 442 + char script_path[MAXPATHLEN]; 443 + char lang_path[MAXPATHLEN]; 444 + struct script_desc *desc; 445 + char first_half[BUFSIZ]; 446 + char *script_root; 447 + char *str; 448 + 449 + snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); 450 + 451 + scripts_dir = opendir(scripts_path); 452 + if (!scripts_dir) 453 + return -1; 454 + 455 + for_each_lang(scripts_dir, lang_dirent, lang_next) { 456 + snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, 457 + lang_dirent.d_name); 458 + lang_dir = opendir(lang_path); 459 + if (!lang_dir) 460 + continue; 461 + 462 + for_each_script(lang_dir, script_dirent, script_next) { 463 + script_root = strdup(script_dirent.d_name); 464 + str = ends_with(script_root, REPORT_SUFFIX); 465 + if (str) { 466 + *str = '\0'; 467 + desc = script_desc__findnew(script_root); 468 + snprintf(script_path, MAXPATHLEN, "%s/%s", 469 + lang_path, script_dirent.d_name); 470 + read_script_info(desc, script_path); 471 + } 472 + free(script_root); 473 + } 474 + } 475 + 476 + fprintf(stdout, "List of available trace scripts:\n"); 477 + list_for_each_entry(desc, &script_descs, node) { 478 + sprintf(first_half, "%s %s", desc->name, 479 + desc->args ? desc->args : ""); 480 + fprintf(stdout, " %-36s %s\n", first_half, 481 + desc->half_liner ? desc->half_liner : ""); 482 + } 483 + 484 + exit(0); 485 + } 486 + 487 + static char *get_script_path(const char *script_root, const char *suffix) 488 + { 489 + struct dirent *script_next, *lang_next, script_dirent, lang_dirent; 490 + char scripts_path[MAXPATHLEN]; 491 + char script_path[MAXPATHLEN]; 492 + DIR *scripts_dir, *lang_dir; 493 + char lang_path[MAXPATHLEN]; 494 + char *str, *__script_root; 495 + char *path = NULL; 496 + 497 + snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); 498 + 499 + scripts_dir = opendir(scripts_path); 500 + if (!scripts_dir) 501 + return NULL; 502 + 503 + for_each_lang(scripts_dir, lang_dirent, lang_next) { 504 + snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, 505 + lang_dirent.d_name); 506 + lang_dir = opendir(lang_path); 507 + if (!lang_dir) 508 + continue; 509 + 510 + for_each_script(lang_dir, script_dirent, script_next) { 511 + __script_root = strdup(script_dirent.d_name); 512 + str = ends_with(__script_root, suffix); 513 + if (str) { 514 + *str = '\0'; 515 + if (strcmp(__script_root, script_root)) 516 + continue; 517 + snprintf(script_path, MAXPATHLEN, "%s/%s", 518 + lang_path, script_dirent.d_name); 519 + path = strdup(script_path); 520 + free(__script_root); 521 + break; 522 + } 523 + free(__script_root); 524 + } 525 + } 526 + 527 + return path; 528 + } 529 + 277 530 static const char * const annotate_usage[] = { 278 531 "perf trace [<options>] <command>", 279 532 NULL ··· 522 299 "dump raw trace in ASCII"), 523 300 OPT_BOOLEAN('v', "verbose", &verbose, 524 301 "be more verbose (show symbol address, etc)"), 525 - OPT_BOOLEAN('l', "latency", &latency_format, 302 + OPT_BOOLEAN('L', "Latency", &latency_format, 526 303 "show latency attributes (irqs/preemption disabled, etc)"), 304 + OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts", 305 + list_available_scripts), 527 306 OPT_CALLBACK('s', "script", NULL, "name", 528 307 "script file name (lang:script name, script name, or *)", 529 308 parse_scriptname), ··· 537 312 538 313 int cmd_trace(int argc, const char **argv, const char *prefix __used) 539 314 { 540 - int err; 315 + struct perf_session *session; 316 + const char *suffix = NULL; 317 + const char **__argv; 318 + char *script_path; 319 + int i, err; 541 320 542 - symbol__init(0); 321 + if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) { 322 + if (argc < 3) { 323 + fprintf(stderr, 324 + "Please specify a record script\n"); 325 + return -1; 326 + } 327 + suffix = RECORD_SUFFIX; 328 + } 329 + 330 + if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) { 331 + if (argc < 3) { 332 + fprintf(stderr, 333 + "Please specify a report script\n"); 334 + return -1; 335 + } 336 + suffix = REPORT_SUFFIX; 337 + } 338 + 339 + if (suffix) { 340 + script_path = get_script_path(argv[2], suffix); 341 + if (!script_path) { 342 + fprintf(stderr, "script not found\n"); 343 + return -1; 344 + } 345 + 346 + __argv = malloc((argc + 1) * sizeof(const char *)); 347 + __argv[0] = "/bin/sh"; 348 + __argv[1] = script_path; 349 + for (i = 3; i < argc; i++) 350 + __argv[i - 1] = argv[i]; 351 + __argv[argc - 1] = NULL; 352 + 353 + execvp("/bin/sh", (char **)__argv); 354 + exit(-1); 355 + } 543 356 544 357 setup_scripting(); 545 358 546 - argc = parse_options(argc, argv, options, annotate_usage, 0); 547 - if (argc) { 548 - /* 549 - * Special case: if there's an argument left then assume tha 550 - * it's a symbol filter: 551 - */ 552 - if (argc > 1) 553 - usage_with_options(annotate_usage, options); 554 - } 359 + argc = parse_options(argc, argv, options, annotate_usage, 360 + PARSE_OPT_STOP_AT_NON_OPTION); 555 361 362 + if (symbol__init() < 0) 363 + return -1; 556 364 setup_pager(); 365 + 366 + session = perf_session__new(input_name, O_RDONLY, 0); 367 + if (session == NULL) 368 + return -ENOMEM; 557 369 558 370 if (generate_script_lang) { 559 371 struct stat perf_stat; ··· 624 362 } 625 363 626 364 if (script_name) { 627 - err = scripting_ops->start_script(script_name); 365 + err = scripting_ops->start_script(script_name, argc, argv); 628 366 if (err) 629 367 goto out; 630 368 } 631 369 632 - err = __cmd_trace(); 370 + err = __cmd_trace(session); 633 371 372 + perf_session__delete(session); 634 373 cleanup_scripting(); 635 374 out: 636 375 return err;
+1
tools/perf/builtin.h
··· 17 17 extern int cmd_annotate(int argc, const char **argv, const char *prefix); 18 18 extern int cmd_bench(int argc, const char **argv, const char *prefix); 19 19 extern int cmd_buildid_list(int argc, const char **argv, const char *prefix); 20 + extern int cmd_diff(int argc, const char **argv, const char *prefix); 20 21 extern int cmd_help(int argc, const char **argv, const char *prefix); 21 22 extern int cmd_sched(int argc, const char **argv, const char *prefix); 22 23 extern int cmd_list(int argc, const char **argv, const char *prefix);
+1
tools/perf/command-list.txt
··· 5 5 perf-annotate mainporcelain common 6 6 perf-bench mainporcelain common 7 7 perf-buildid-list mainporcelain common 8 + perf-diff mainporcelain common 8 9 perf-list mainporcelain common 9 10 perf-sched mainporcelain common 10 11 perf-record mainporcelain common
+1
tools/perf/perf.c
··· 286 286 const char *cmd = argv[0]; 287 287 static struct cmd_struct commands[] = { 288 288 { "buildid-list", cmd_buildid_list, 0 }, 289 + { "diff", cmd_diff, 0 }, 289 290 { "help", cmd_help, 0 }, 290 291 { "list", cmd_list, 0 }, 291 292 { "record", cmd_record, 0 },
+1
tools/perf/scripts/perl/bin/check-perf-trace-report
··· 1 1 #!/bin/bash 2 + # description: useless but exhaustive test script 2 3 perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl 3 4 4 5
+3 -1
tools/perf/scripts/perl/bin/rw-by-file-report
··· 1 1 #!/bin/bash 2 - perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl 2 + # description: r/w activity for a program, by file 3 + # args: <comm> 4 + perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $1 3 5 4 6 5 7
+1
tools/perf/scripts/perl/bin/rw-by-pid-report
··· 1 1 #!/bin/bash 2 + # description: system-wide r/w activity 2 3 perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl 3 4 4 5
+1
tools/perf/scripts/perl/bin/wakeup-latency-report
··· 1 1 #!/bin/bash 2 + # description: system-wide min/max/avg wakeup latency 2 3 perf trace -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl 3 4 4 5
+1
tools/perf/scripts/perl/bin/workqueue-stats-report
··· 1 1 #!/bin/bash 2 + # description: workqueue stats (ins/exe/create/destroy) 2 3 perf trace -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl 3 4 4 5
+3 -2
tools/perf/scripts/perl/rw-by-file.pl
··· 18 18 use Perf::Trace::Core; 19 19 use Perf::Trace::Util; 20 20 21 - # change this to the comm of the program you're interested in 22 - my $for_comm = "perf"; 21 + my $usage = "perf trace -s rw-by-file.pl <comm>\n"; 22 + 23 + my $for_comm = shift or die $usage; 23 24 24 25 my %reads; 25 26 my %writes;
+53 -43
tools/perf/util/data_map.c
··· 1 - #include "data_map.h" 2 1 #include "symbol.h" 3 2 #include "util.h" 4 3 #include "debug.h" 4 + #include "thread.h" 5 + #include "session.h" 5 6 6 - 7 - static struct perf_file_handler *curr_handler; 8 - static unsigned long mmap_window = 32; 9 - static char __cwd[PATH_MAX]; 10 - 11 - static int process_event_stub(event_t *event __used) 7 + static int process_event_stub(event_t *event __used, 8 + struct perf_session *session __used) 12 9 { 13 10 dump_printf(": unhandled!\n"); 14 11 return 0; 15 12 } 16 13 17 - void register_perf_file_handler(struct perf_file_handler *handler) 14 + static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 18 15 { 19 16 if (!handler->process_sample_event) 20 17 handler->process_sample_event = process_event_stub; ··· 31 34 handler->process_throttle_event = process_event_stub; 32 35 if (!handler->process_unthrottle_event) 33 36 handler->process_unthrottle_event = process_event_stub; 34 - 35 - curr_handler = handler; 36 37 } 37 38 38 39 static const char *event__name[] = { ··· 56 61 event__name[i], event__total[i]); 57 62 } 58 63 59 - static int 60 - process_event(event_t *event, unsigned long offset, unsigned long head) 64 + static int process_event(event_t *event, struct perf_session *session, 65 + struct perf_event_ops *ops, 66 + unsigned long offset, unsigned long head) 61 67 { 62 68 trace_event(event); 63 69 ··· 73 77 74 78 switch (event->header.type) { 75 79 case PERF_RECORD_SAMPLE: 76 - return curr_handler->process_sample_event(event); 80 + return ops->process_sample_event(event, session); 77 81 case PERF_RECORD_MMAP: 78 - return curr_handler->process_mmap_event(event); 82 + return ops->process_mmap_event(event, session); 79 83 case PERF_RECORD_COMM: 80 - return curr_handler->process_comm_event(event); 84 + return ops->process_comm_event(event, session); 81 85 case PERF_RECORD_FORK: 82 - return curr_handler->process_fork_event(event); 86 + return ops->process_fork_event(event, session); 83 87 case PERF_RECORD_EXIT: 84 - return curr_handler->process_exit_event(event); 88 + return ops->process_exit_event(event, session); 85 89 case PERF_RECORD_LOST: 86 - return curr_handler->process_lost_event(event); 90 + return ops->process_lost_event(event, session); 87 91 case PERF_RECORD_READ: 88 - return curr_handler->process_read_event(event); 92 + return ops->process_read_event(event, session); 89 93 case PERF_RECORD_THROTTLE: 90 - return curr_handler->process_throttle_event(event); 94 + return ops->process_throttle_event(event, session); 91 95 case PERF_RECORD_UNTHROTTLE: 92 - return curr_handler->process_unthrottle_event(event); 96 + return ops->process_unthrottle_event(event, session); 93 97 default: 94 - curr_handler->total_unknown++; 98 + ops->total_unknown++; 95 99 return -1; 96 100 } 97 101 } ··· 125 129 return err; 126 130 } 127 131 132 + static struct thread *perf_session__register_idle_thread(struct perf_session *self) 133 + { 134 + struct thread *thread = perf_session__findnew(self, 0); 135 + 136 + if (!thread || thread__set_comm(thread, "swapper")) { 137 + pr_err("problem inserting idle task.\n"); 138 + thread = NULL; 139 + } 140 + 141 + return thread; 142 + } 143 + 128 144 int perf_session__process_events(struct perf_session *self, 129 - int full_paths, int *cwdlen, char **cwd) 145 + struct perf_event_ops *ops) 130 146 { 131 147 int err; 132 148 unsigned long head, shift; 133 149 unsigned long offset = 0; 134 150 size_t page_size; 135 - u64 sample_type; 136 151 event_t *event; 137 152 uint32_t size; 138 153 char *buf; 139 154 140 - if (curr_handler == NULL) { 141 - pr_debug("Forgot to register perf file handler\n"); 142 - return -EINVAL; 143 - } 155 + if (perf_session__register_idle_thread(self) == NULL) 156 + return -ENOMEM; 157 + 158 + perf_event_ops__fill_defaults(ops); 144 159 145 160 page_size = getpagesize(); 146 161 147 162 head = self->header.data_offset; 148 - sample_type = perf_header__sample_type(&self->header); 163 + self->sample_type = perf_header__sample_type(&self->header); 149 164 150 165 err = -EINVAL; 151 - if (curr_handler->sample_type_check && 152 - curr_handler->sample_type_check(sample_type) < 0) 166 + if (ops->sample_type_check && ops->sample_type_check(self) < 0) 153 167 goto out_err; 154 168 155 - if (!full_paths) { 156 - if (getcwd(__cwd, sizeof(__cwd)) == NULL) { 157 - pr_err("failed to get the current directory\n"); 169 + if (!ops->full_paths) { 170 + char bf[PATH_MAX]; 171 + 172 + if (getcwd(bf, sizeof(bf)) == NULL) { 158 173 err = -errno; 174 + out_getcwd_err: 175 + pr_err("failed to get the current directory\n"); 159 176 goto out_err; 160 177 } 161 - *cwd = __cwd; 162 - *cwdlen = strlen(*cwd); 163 - } else { 164 - *cwd = NULL; 165 - *cwdlen = 0; 178 + self->cwd = strdup(bf); 179 + if (self->cwd == NULL) { 180 + err = -ENOMEM; 181 + goto out_getcwd_err; 182 + } 183 + self->cwdlen = strlen(self->cwd); 166 184 } 167 185 168 186 shift = page_size * (head / page_size); ··· 184 174 head -= shift; 185 175 186 176 remap: 187 - buf = mmap(NULL, page_size * mmap_window, PROT_READ, 177 + buf = mmap(NULL, page_size * self->mmap_window, PROT_READ, 188 178 MAP_SHARED, self->fd, offset); 189 179 if (buf == MAP_FAILED) { 190 180 pr_err("failed to mmap file\n"); ··· 199 189 if (!size) 200 190 size = 8; 201 191 202 - if (head + event->header.size >= page_size * mmap_window) { 192 + if (head + event->header.size >= page_size * self->mmap_window) { 203 193 int munmap_ret; 204 194 205 195 shift = page_size * (head / page_size); 206 196 207 - munmap_ret = munmap(buf, page_size * mmap_window); 197 + munmap_ret = munmap(buf, page_size * self->mmap_window); 208 198 assert(munmap_ret == 0); 209 199 210 200 offset += shift; ··· 219 209 (void *)(long)event->header.size, 220 210 event->header.type); 221 211 222 - if (!size || process_event(event, offset, head) < 0) { 212 + if (!size || process_event(event, self, ops, offset, head) < 0) { 223 213 224 214 dump_printf("%p [%p]: skipping unknown header type: %d\n", 225 215 (void *)(offset + head),
-29
tools/perf/util/data_map.h
··· 1 - #ifndef __PERF_DATAMAP_H 2 - #define __PERF_DATAMAP_H 3 - 4 - #include "event.h" 5 - #include "header.h" 6 - #include "session.h" 7 - 8 - typedef int (*event_type_handler_t)(event_t *); 9 - 10 - struct perf_file_handler { 11 - event_type_handler_t process_sample_event; 12 - event_type_handler_t process_mmap_event; 13 - event_type_handler_t process_comm_event; 14 - event_type_handler_t process_fork_event; 15 - event_type_handler_t process_exit_event; 16 - event_type_handler_t process_lost_event; 17 - event_type_handler_t process_read_event; 18 - event_type_handler_t process_throttle_event; 19 - event_type_handler_t process_unthrottle_event; 20 - int (*sample_type_check)(u64 sample_type); 21 - unsigned long total_unknown; 22 - }; 23 - 24 - void register_perf_file_handler(struct perf_file_handler *handler); 25 - int perf_session__process_events(struct perf_session *self, 26 - int full_paths, int *cwdlen, char **cwd); 27 - int perf_header__read_build_ids(int input, u64 offset, u64 file_size); 28 - 29 - #endif
+111 -35
tools/perf/util/event.c
··· 1 1 #include <linux/types.h> 2 2 #include "event.h" 3 3 #include "debug.h" 4 + #include "session.h" 5 + #include "sort.h" 4 6 #include "string.h" 7 + #include "strlist.h" 5 8 #include "thread.h" 6 9 7 10 static pid_t event__synthesize_comm(pid_t pid, int full, 8 - int (*process)(event_t *event)) 11 + int (*process)(event_t *event, 12 + struct perf_session *session), 13 + struct perf_session *session) 9 14 { 10 15 event_t ev; 11 16 char filename[PATH_MAX]; ··· 59 54 if (!full) { 60 55 ev.comm.tid = pid; 61 56 62 - process(&ev); 57 + process(&ev, session); 63 58 goto out_fclose; 64 59 } 65 60 ··· 77 72 78 73 ev.comm.tid = pid; 79 74 80 - process(&ev); 75 + process(&ev, session); 81 76 } 82 77 closedir(tasks); 83 78 ··· 91 86 } 92 87 93 88 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, 94 - int (*process)(event_t *event)) 89 + int (*process)(event_t *event, 90 + struct perf_session *session), 91 + struct perf_session *session) 95 92 { 96 93 char filename[PATH_MAX]; 97 94 FILE *fp; ··· 148 141 ev.mmap.pid = tgid; 149 142 ev.mmap.tid = pid; 150 143 151 - process(&ev); 144 + process(&ev, session); 152 145 } 153 146 } 154 147 ··· 156 149 return 0; 157 150 } 158 151 159 - int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)) 152 + int event__synthesize_thread(pid_t pid, 153 + int (*process)(event_t *event, 154 + struct perf_session *session), 155 + struct perf_session *session) 160 156 { 161 - pid_t tgid = event__synthesize_comm(pid, 1, process); 157 + pid_t tgid = event__synthesize_comm(pid, 1, process, session); 162 158 if (tgid == -1) 163 159 return -1; 164 - return event__synthesize_mmap_events(pid, tgid, process); 160 + return event__synthesize_mmap_events(pid, tgid, process, session); 165 161 } 166 162 167 - void event__synthesize_threads(int (*process)(event_t *event)) 163 + void event__synthesize_threads(int (*process)(event_t *event, 164 + struct perf_session *session), 165 + struct perf_session *session) 168 166 { 169 167 DIR *proc; 170 168 struct dirent dirent, *next; ··· 183 171 if (*end) /* only interested in proper numerical dirents */ 184 172 continue; 185 173 186 - event__synthesize_thread(pid, process); 174 + event__synthesize_thread(pid, process, session); 187 175 } 188 176 189 177 closedir(proc); 190 178 } 191 179 192 - char *event__cwd; 193 - int event__cwdlen; 194 - 195 - struct events_stats event__stats; 196 - 197 - int event__process_comm(event_t *self) 180 + static void thread__comm_adjust(struct thread *self) 198 181 { 199 - struct thread *thread = threads__findnew(self->comm.pid); 182 + char *comm = self->comm; 183 + 184 + if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 185 + (!symbol_conf.comm_list || 186 + strlist__has_entry(symbol_conf.comm_list, comm))) { 187 + unsigned int slen = strlen(comm); 188 + 189 + if (slen > comms__col_width) { 190 + comms__col_width = slen; 191 + threads__col_width = slen + 6; 192 + } 193 + } 194 + } 195 + 196 + static int thread__set_comm_adjust(struct thread *self, const char *comm) 197 + { 198 + int ret = thread__set_comm(self, comm); 199 + 200 + if (ret) 201 + return ret; 202 + 203 + thread__comm_adjust(self); 204 + 205 + return 0; 206 + } 207 + 208 + int event__process_comm(event_t *self, struct perf_session *session) 209 + { 210 + struct thread *thread = perf_session__findnew(session, self->comm.pid); 200 211 201 212 dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); 202 213 203 - if (thread == NULL || thread__set_comm(thread, self->comm.comm)) { 214 + if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) { 204 215 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 205 216 return -1; 206 217 } ··· 231 196 return 0; 232 197 } 233 198 234 - int event__process_lost(event_t *self) 199 + int event__process_lost(event_t *self, struct perf_session *session) 235 200 { 236 201 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); 237 - event__stats.lost += self->lost.lost; 202 + session->events_stats.lost += self->lost.lost; 238 203 return 0; 239 204 } 240 205 241 - int event__process_mmap(event_t *self) 206 + int event__process_mmap(event_t *self, struct perf_session *session) 242 207 { 243 - struct thread *thread = threads__findnew(self->mmap.pid); 208 + struct thread *thread = perf_session__findnew(session, self->mmap.pid); 244 209 struct map *map = map__new(&self->mmap, MAP__FUNCTION, 245 - event__cwd, event__cwdlen); 210 + session->cwd, session->cwdlen); 246 211 247 212 dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", 248 213 self->mmap.pid, self->mmap.tid, ··· 259 224 return 0; 260 225 } 261 226 262 - int event__process_task(event_t *self) 227 + int event__process_task(event_t *self, struct perf_session *session) 263 228 { 264 - struct thread *thread = threads__findnew(self->fork.pid); 265 - struct thread *parent = threads__findnew(self->fork.ppid); 229 + struct thread *thread = perf_session__findnew(session, self->fork.pid); 230 + struct thread *parent = perf_session__findnew(session, self->fork.ppid); 266 231 267 232 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, 268 233 self->fork.ppid, self->fork.ptid); ··· 284 249 return 0; 285 250 } 286 251 287 - void thread__find_addr_location(struct thread *self, u8 cpumode, 252 + void thread__find_addr_location(struct thread *self, 253 + struct perf_session *session, u8 cpumode, 288 254 enum map_type type, u64 addr, 289 255 struct addr_location *al, 290 256 symbol_filter_t filter) ··· 297 261 298 262 if (cpumode & PERF_RECORD_MISC_KERNEL) { 299 263 al->level = 'k'; 300 - mg = kmaps; 264 + mg = &session->kmaps; 301 265 } else if (cpumode & PERF_RECORD_MISC_USER) 302 266 al->level = '.'; 303 267 else { ··· 318 282 * "[vdso]" dso, but for now lets use the old trick of looking 319 283 * in the whole kernel symbol list. 320 284 */ 321 - if ((long long)al->addr < 0 && mg != kmaps) { 322 - mg = kmaps; 285 + if ((long long)al->addr < 0 && mg != &session->kmaps) { 286 + mg = &session->kmaps; 323 287 goto try_again; 324 288 } 325 289 al->sym = NULL; 326 290 } else { 327 291 al->addr = al->map->map_ip(al->map, al->addr); 328 - al->sym = map__find_symbol(al->map, al->addr, filter); 292 + al->sym = map__find_symbol(al->map, session, al->addr, filter); 329 293 } 330 294 } 331 295 332 - int event__preprocess_sample(const event_t *self, struct addr_location *al, 333 - symbol_filter_t filter) 296 + static void dso__calc_col_width(struct dso *self) 297 + { 298 + if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 299 + (!symbol_conf.dso_list || 300 + strlist__has_entry(symbol_conf.dso_list, self->name))) { 301 + unsigned int slen = strlen(self->name); 302 + if (slen > dsos__col_width) 303 + dsos__col_width = slen; 304 + } 305 + 306 + self->slen_calculated = 1; 307 + } 308 + 309 + int event__preprocess_sample(const event_t *self, struct perf_session *session, 310 + struct addr_location *al, symbol_filter_t filter) 334 311 { 335 312 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 336 - struct thread *thread = threads__findnew(self->ip.pid); 313 + struct thread *thread = perf_session__findnew(session, self->ip.pid); 337 314 338 315 if (thread == NULL) 339 316 return -1; 340 317 318 + if (symbol_conf.comm_list && 319 + !strlist__has_entry(symbol_conf.comm_list, thread->comm)) 320 + goto out_filtered; 321 + 341 322 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 342 323 343 - thread__find_addr_location(thread, cpumode, MAP__FUNCTION, 324 + thread__find_addr_location(thread, session, cpumode, MAP__FUNCTION, 344 325 self->ip.ip, al, filter); 345 326 dump_printf(" ...... dso: %s\n", 346 327 al->map ? al->map->dso->long_name : 347 328 al->level == 'H' ? "[hypervisor]" : "<not found>"); 329 + /* 330 + * We have to do this here as we may have a dso with no symbol hit that 331 + * has a name longer than the ones with symbols sampled. 332 + */ 333 + if (al->map && !sort_dso.elide && !al->map->dso->slen_calculated) 334 + dso__calc_col_width(al->map->dso); 335 + 336 + if (symbol_conf.dso_list && 337 + (!al->map || !al->map->dso || 338 + !(strlist__has_entry(symbol_conf.dso_list, al->map->dso->short_name) || 339 + (al->map->dso->short_name != al->map->dso->long_name && 340 + strlist__has_entry(symbol_conf.dso_list, al->map->dso->long_name))))) 341 + goto out_filtered; 342 + 343 + if (symbol_conf.sym_list && al->sym && 344 + !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) 345 + goto out_filtered; 346 + 347 + al->filtered = false; 348 + return 0; 349 + 350 + out_filtered: 351 + al->filtered = true; 348 352 return 0; 349 353 } 350 354
+21 -15
tools/perf/util/event.h
··· 149 149 struct map *map__clone(struct map *self); 150 150 int map__overlap(struct map *l, struct map *r); 151 151 size_t map__fprintf(struct map *self, FILE *fp); 152 - struct symbol *map__find_symbol(struct map *self, u64 addr, 153 - symbol_filter_t filter); 152 + 153 + struct perf_session; 154 + 155 + int map__load(struct map *self, struct perf_session *session, 156 + symbol_filter_t filter); 157 + struct symbol *map__find_symbol(struct map *self, struct perf_session *session, 158 + u64 addr, symbol_filter_t filter); 154 159 struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 160 + struct perf_session *session, 155 161 symbol_filter_t filter); 156 162 void map__fixup_start(struct map *self); 157 163 void map__fixup_end(struct map *self); 158 164 159 - int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); 160 - void event__synthesize_threads(int (*process)(event_t *event)); 165 + int event__synthesize_thread(pid_t pid, 166 + int (*process)(event_t *event, 167 + struct perf_session *session), 168 + struct perf_session *session); 169 + void event__synthesize_threads(int (*process)(event_t *event, 170 + struct perf_session *session), 171 + struct perf_session *session); 161 172 162 - extern char *event__cwd; 163 - extern int event__cwdlen; 164 - extern struct events_stats event__stats; 165 - extern unsigned long event__total[PERF_RECORD_MAX]; 166 - 167 - int event__process_comm(event_t *self); 168 - int event__process_lost(event_t *self); 169 - int event__process_mmap(event_t *self); 170 - int event__process_task(event_t *self); 173 + int event__process_comm(event_t *self, struct perf_session *session); 174 + int event__process_lost(event_t *self, struct perf_session *session); 175 + int event__process_mmap(event_t *self, struct perf_session *session); 176 + int event__process_task(event_t *self, struct perf_session *session); 171 177 172 178 struct addr_location; 173 - int event__preprocess_sample(const event_t *self, struct addr_location *al, 174 - symbol_filter_t filter); 179 + int event__preprocess_sample(const event_t *self, struct perf_session *session, 180 + struct addr_location *al, symbol_filter_t filter); 175 181 int event__parse_sample(event_t *event, u64 type, struct sample_data *data); 176 182 177 183 #endif /* __PERF_RECORD_H */
+1 -1
tools/perf/util/header.c
··· 8 8 #include "header.h" 9 9 #include "../perf.h" 10 10 #include "trace-event.h" 11 + #include "session.h" 11 12 #include "symbol.h" 12 - #include "data_map.h" 13 13 #include "debug.h" 14 14 15 15 /*
+489 -29
tools/perf/util/hist.c
··· 1 1 #include "hist.h" 2 - 3 - struct rb_root hist; 4 - struct rb_root collapse_hists; 5 - struct rb_root output_hists; 6 - int callchain; 2 + #include "session.h" 3 + #include "sort.h" 4 + #include <math.h> 7 5 8 6 struct callchain_param callchain_param = { 9 7 .mode = CHAIN_GRAPH_REL, ··· 12 14 * histogram, sorted on item, collects counts 13 15 */ 14 16 15 - struct hist_entry *__hist_entry__add(struct addr_location *al, 16 - struct symbol *sym_parent, 17 - u64 count, bool *hit) 17 + struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, 18 + struct addr_location *al, 19 + struct symbol *sym_parent, 20 + u64 count, bool *hit) 18 21 { 19 - struct rb_node **p = &hist.rb_node; 22 + struct rb_node **p = &self->hists.rb_node; 20 23 struct rb_node *parent = NULL; 21 24 struct hist_entry *he; 22 25 struct hist_entry entry = { ··· 53 54 return NULL; 54 55 *he = entry; 55 56 rb_link_node(&he->rb_node, parent, p); 56 - rb_insert_color(&he->rb_node, &hist); 57 + rb_insert_color(&he->rb_node, &self->hists); 57 58 *hit = false; 58 59 return he; 59 60 } ··· 101 102 * collapse the histogram 102 103 */ 103 104 104 - void collapse__insert_entry(struct hist_entry *he) 105 + static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he) 105 106 { 106 - struct rb_node **p = &collapse_hists.rb_node; 107 + struct rb_node **p = &root->rb_node; 107 108 struct rb_node *parent = NULL; 108 109 struct hist_entry *iter; 109 110 int64_t cmp; ··· 127 128 } 128 129 129 130 rb_link_node(&he->rb_node, parent, p); 130 - rb_insert_color(&he->rb_node, &collapse_hists); 131 + rb_insert_color(&he->rb_node, root); 131 132 } 132 133 133 - void collapse__resort(void) 134 + void perf_session__collapse_resort(struct perf_session *self) 134 135 { 136 + struct rb_root tmp; 135 137 struct rb_node *next; 136 138 struct hist_entry *n; 137 139 138 140 if (!sort__need_collapse) 139 141 return; 140 142 141 - next = rb_first(&hist); 143 + tmp = RB_ROOT; 144 + next = rb_first(&self->hists); 145 + 142 146 while (next) { 143 147 n = rb_entry(next, struct hist_entry, rb_node); 144 148 next = rb_next(&n->rb_node); 145 149 146 - rb_erase(&n->rb_node, &hist); 147 - collapse__insert_entry(n); 150 + rb_erase(&n->rb_node, &self->hists); 151 + collapse__insert_entry(&tmp, n); 148 152 } 153 + 154 + self->hists = tmp; 149 155 } 150 156 151 157 /* 152 158 * reverse the map, sort on count. 153 159 */ 154 160 155 - void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) 161 + static void perf_session__insert_output_hist_entry(struct rb_root *root, 162 + struct hist_entry *he, 163 + u64 min_callchain_hits) 156 164 { 157 - struct rb_node **p = &output_hists.rb_node; 165 + struct rb_node **p = &root->rb_node; 158 166 struct rb_node *parent = NULL; 159 167 struct hist_entry *iter; 160 168 161 - if (callchain) 169 + if (symbol_conf.use_callchain) 162 170 callchain_param.sort(&he->sorted_chain, &he->callchain, 163 171 min_callchain_hits, &callchain_param); 164 172 ··· 180 174 } 181 175 182 176 rb_link_node(&he->rb_node, parent, p); 183 - rb_insert_color(&he->rb_node, &output_hists); 177 + rb_insert_color(&he->rb_node, root); 184 178 } 185 179 186 - void output__resort(u64 total_samples) 180 + void perf_session__output_resort(struct perf_session *self, u64 total_samples) 187 181 { 182 + struct rb_root tmp; 188 183 struct rb_node *next; 189 184 struct hist_entry *n; 190 - struct rb_root *tree = &hist; 191 185 u64 min_callchain_hits; 192 186 193 187 min_callchain_hits = 194 188 total_samples * (callchain_param.min_percent / 100); 195 189 196 - if (sort__need_collapse) 197 - tree = &collapse_hists; 198 - 199 - next = rb_first(tree); 190 + tmp = RB_ROOT; 191 + next = rb_first(&self->hists); 200 192 201 193 while (next) { 202 194 n = rb_entry(next, struct hist_entry, rb_node); 203 195 next = rb_next(&n->rb_node); 204 196 205 - rb_erase(&n->rb_node, tree); 206 - output__insert_entry(n, min_callchain_hits); 197 + rb_erase(&n->rb_node, &self->hists); 198 + perf_session__insert_output_hist_entry(&tmp, n, 199 + min_callchain_hits); 207 200 } 201 + 202 + self->hists = tmp; 203 + } 204 + 205 + static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 206 + { 207 + int i; 208 + int ret = fprintf(fp, " "); 209 + 210 + for (i = 0; i < left_margin; i++) 211 + ret += fprintf(fp, " "); 212 + 213 + return ret; 214 + } 215 + 216 + static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 217 + int left_margin) 218 + { 219 + int i; 220 + size_t ret = callchain__fprintf_left_margin(fp, left_margin); 221 + 222 + for (i = 0; i < depth; i++) 223 + if (depth_mask & (1 << i)) 224 + ret += fprintf(fp, "| "); 225 + else 226 + ret += fprintf(fp, " "); 227 + 228 + ret += fprintf(fp, "\n"); 229 + 230 + return ret; 231 + } 232 + 233 + static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, 234 + int depth, int depth_mask, int count, 235 + u64 total_samples, int hits, 236 + int left_margin) 237 + { 238 + int i; 239 + size_t ret = 0; 240 + 241 + ret += callchain__fprintf_left_margin(fp, left_margin); 242 + for (i = 0; i < depth; i++) { 243 + if (depth_mask & (1 << i)) 244 + ret += fprintf(fp, "|"); 245 + else 246 + ret += fprintf(fp, " "); 247 + if (!count && i == depth - 1) { 248 + double percent; 249 + 250 + percent = hits * 100.0 / total_samples; 251 + ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); 252 + } else 253 + ret += fprintf(fp, "%s", " "); 254 + } 255 + if (chain->sym) 256 + ret += fprintf(fp, "%s\n", chain->sym->name); 257 + else 258 + ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); 259 + 260 + return ret; 261 + } 262 + 263 + static struct symbol *rem_sq_bracket; 264 + static struct callchain_list rem_hits; 265 + 266 + static void init_rem_hits(void) 267 + { 268 + rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); 269 + if (!rem_sq_bracket) { 270 + fprintf(stderr, "Not enough memory to display remaining hits\n"); 271 + return; 272 + } 273 + 274 + strcpy(rem_sq_bracket->name, "[...]"); 275 + rem_hits.sym = rem_sq_bracket; 276 + } 277 + 278 + static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 279 + u64 total_samples, int depth, 280 + int depth_mask, int left_margin) 281 + { 282 + struct rb_node *node, *next; 283 + struct callchain_node *child; 284 + struct callchain_list *chain; 285 + int new_depth_mask = depth_mask; 286 + u64 new_total; 287 + u64 remaining; 288 + size_t ret = 0; 289 + int i; 290 + 291 + if (callchain_param.mode == CHAIN_GRAPH_REL) 292 + new_total = self->children_hit; 293 + else 294 + new_total = total_samples; 295 + 296 + remaining = new_total; 297 + 298 + node = rb_first(&self->rb_root); 299 + while (node) { 300 + u64 cumul; 301 + 302 + child = rb_entry(node, struct callchain_node, rb_node); 303 + cumul = cumul_hits(child); 304 + remaining -= cumul; 305 + 306 + /* 307 + * The depth mask manages the output of pipes that show 308 + * the depth. We don't want to keep the pipes of the current 309 + * level for the last child of this depth. 310 + * Except if we have remaining filtered hits. They will 311 + * supersede the last child 312 + */ 313 + next = rb_next(node); 314 + if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) 315 + new_depth_mask &= ~(1 << (depth - 1)); 316 + 317 + /* 318 + * But we keep the older depth mask for the line seperator 319 + * to keep the level link until we reach the last child 320 + */ 321 + ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, 322 + left_margin); 323 + i = 0; 324 + list_for_each_entry(chain, &child->val, list) { 325 + if (chain->ip >= PERF_CONTEXT_MAX) 326 + continue; 327 + ret += ipchain__fprintf_graph(fp, chain, depth, 328 + new_depth_mask, i++, 329 + new_total, 330 + cumul, 331 + left_margin); 332 + } 333 + ret += __callchain__fprintf_graph(fp, child, new_total, 334 + depth + 1, 335 + new_depth_mask | (1 << depth), 336 + left_margin); 337 + node = next; 338 + } 339 + 340 + if (callchain_param.mode == CHAIN_GRAPH_REL && 341 + remaining && remaining != new_total) { 342 + 343 + if (!rem_sq_bracket) 344 + return ret; 345 + 346 + new_depth_mask &= ~(1 << (depth - 1)); 347 + 348 + ret += ipchain__fprintf_graph(fp, &rem_hits, depth, 349 + new_depth_mask, 0, new_total, 350 + remaining, left_margin); 351 + } 352 + 353 + return ret; 354 + } 355 + 356 + static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 357 + u64 total_samples, int left_margin) 358 + { 359 + struct callchain_list *chain; 360 + bool printed = false; 361 + int i = 0; 362 + int ret = 0; 363 + 364 + list_for_each_entry(chain, &self->val, list) { 365 + if (chain->ip >= PERF_CONTEXT_MAX) 366 + continue; 367 + 368 + if (!i++ && sort__first_dimension == SORT_SYM) 369 + continue; 370 + 371 + if (!printed) { 372 + ret += callchain__fprintf_left_margin(fp, left_margin); 373 + ret += fprintf(fp, "|\n"); 374 + ret += callchain__fprintf_left_margin(fp, left_margin); 375 + ret += fprintf(fp, "---"); 376 + 377 + left_margin += 3; 378 + printed = true; 379 + } else 380 + ret += callchain__fprintf_left_margin(fp, left_margin); 381 + 382 + if (chain->sym) 383 + ret += fprintf(fp, " %s\n", chain->sym->name); 384 + else 385 + ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 386 + } 387 + 388 + ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); 389 + 390 + return ret; 391 + } 392 + 393 + static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self, 394 + u64 total_samples) 395 + { 396 + struct callchain_list *chain; 397 + size_t ret = 0; 398 + 399 + if (!self) 400 + return 0; 401 + 402 + ret += callchain__fprintf_flat(fp, self->parent, total_samples); 403 + 404 + 405 + list_for_each_entry(chain, &self->val, list) { 406 + if (chain->ip >= PERF_CONTEXT_MAX) 407 + continue; 408 + if (chain->sym) 409 + ret += fprintf(fp, " %s\n", chain->sym->name); 410 + else 411 + ret += fprintf(fp, " %p\n", 412 + (void *)(long)chain->ip); 413 + } 414 + 415 + return ret; 416 + } 417 + 418 + static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, 419 + u64 total_samples, int left_margin) 420 + { 421 + struct rb_node *rb_node; 422 + struct callchain_node *chain; 423 + size_t ret = 0; 424 + 425 + rb_node = rb_first(&self->sorted_chain); 426 + while (rb_node) { 427 + double percent; 428 + 429 + chain = rb_entry(rb_node, struct callchain_node, rb_node); 430 + percent = chain->hit * 100.0 / total_samples; 431 + switch (callchain_param.mode) { 432 + case CHAIN_FLAT: 433 + ret += percent_color_fprintf(fp, " %6.2f%%\n", 434 + percent); 435 + ret += callchain__fprintf_flat(fp, chain, total_samples); 436 + break; 437 + case CHAIN_GRAPH_ABS: /* Falldown */ 438 + case CHAIN_GRAPH_REL: 439 + ret += callchain__fprintf_graph(fp, chain, total_samples, 440 + left_margin); 441 + case CHAIN_NONE: 442 + default: 443 + break; 444 + } 445 + ret += fprintf(fp, "\n"); 446 + rb_node = rb_next(rb_node); 447 + } 448 + 449 + return ret; 450 + } 451 + 452 + static size_t hist_entry__fprintf(struct hist_entry *self, 453 + struct perf_session *session, 454 + struct perf_session *pair_session, 455 + bool show_displacement, 456 + long displacement, FILE *fp) 457 + { 458 + struct sort_entry *se; 459 + u64 count, total; 460 + const char *sep = symbol_conf.field_sep; 461 + size_t ret; 462 + 463 + if (symbol_conf.exclude_other && !self->parent) 464 + return 0; 465 + 466 + if (pair_session) { 467 + count = self->pair ? self->pair->count : 0; 468 + total = pair_session->events_stats.total; 469 + } else { 470 + count = self->count; 471 + total = session->events_stats.total; 472 + } 473 + 474 + if (total) 475 + ret = percent_color_fprintf(fp, sep ? "%.2f" : " %6.2f%%", 476 + (count * 100.0) / total); 477 + else 478 + ret = fprintf(fp, sep ? "%lld" : "%12lld ", count); 479 + 480 + if (symbol_conf.show_nr_samples) { 481 + if (sep) 482 + fprintf(fp, "%c%lld", *sep, count); 483 + else 484 + fprintf(fp, "%11lld", count); 485 + } 486 + 487 + if (pair_session) { 488 + char bf[32]; 489 + double old_percent = 0, new_percent = 0, diff; 490 + 491 + if (total > 0) 492 + old_percent = (count * 100.0) / total; 493 + if (session->events_stats.total > 0) 494 + new_percent = (self->count * 100.0) / session->events_stats.total; 495 + 496 + diff = new_percent - old_percent; 497 + 498 + if (fabs(diff) >= 0.01) 499 + snprintf(bf, sizeof(bf), "%+4.2F%%", diff); 500 + else 501 + snprintf(bf, sizeof(bf), " "); 502 + 503 + if (sep) 504 + ret += fprintf(fp, "%c%s", *sep, bf); 505 + else 506 + ret += fprintf(fp, "%11.11s", bf); 507 + 508 + if (show_displacement) { 509 + if (displacement) 510 + snprintf(bf, sizeof(bf), "%+4ld", displacement); 511 + else 512 + snprintf(bf, sizeof(bf), " "); 513 + 514 + if (sep) 515 + fprintf(fp, "%c%s", *sep, bf); 516 + else 517 + fprintf(fp, "%6.6s", bf); 518 + } 519 + } 520 + 521 + list_for_each_entry(se, &hist_entry__sort_list, list) { 522 + if (se->elide) 523 + continue; 524 + 525 + fprintf(fp, "%s", sep ?: " "); 526 + ret += se->print(fp, self, se->width ? *se->width : 0); 527 + } 528 + 529 + ret += fprintf(fp, "\n"); 530 + 531 + if (symbol_conf.use_callchain) { 532 + int left_margin = 0; 533 + 534 + if (sort__first_dimension == SORT_COMM) { 535 + se = list_first_entry(&hist_entry__sort_list, typeof(*se), 536 + list); 537 + left_margin = se->width ? *se->width : 0; 538 + left_margin -= thread__comm_len(self->thread); 539 + } 540 + 541 + hist_entry_callchain__fprintf(fp, self, session->events_stats.total, 542 + left_margin); 543 + } 544 + 545 + return ret; 546 + } 547 + 548 + size_t perf_session__fprintf_hists(struct perf_session *self, 549 + struct perf_session *pair, 550 + bool show_displacement, FILE *fp) 551 + { 552 + struct sort_entry *se; 553 + struct rb_node *nd; 554 + size_t ret = 0; 555 + unsigned long position = 1; 556 + long displacement = 0; 557 + unsigned int width; 558 + const char *sep = symbol_conf.field_sep; 559 + char *col_width = symbol_conf.col_width_list_str; 560 + 561 + init_rem_hits(); 562 + 563 + fprintf(fp, "# %s", pair ? "Baseline" : "Overhead"); 564 + 565 + if (symbol_conf.show_nr_samples) { 566 + if (sep) 567 + fprintf(fp, "%cSamples", *sep); 568 + else 569 + fputs(" Samples ", fp); 570 + } 571 + 572 + if (pair) { 573 + if (sep) 574 + ret += fprintf(fp, "%cDelta", *sep); 575 + else 576 + ret += fprintf(fp, " Delta "); 577 + 578 + if (show_displacement) { 579 + if (sep) 580 + ret += fprintf(fp, "%cDisplacement", *sep); 581 + else 582 + ret += fprintf(fp, " Displ"); 583 + } 584 + } 585 + 586 + list_for_each_entry(se, &hist_entry__sort_list, list) { 587 + if (se->elide) 588 + continue; 589 + if (sep) { 590 + fprintf(fp, "%c%s", *sep, se->header); 591 + continue; 592 + } 593 + width = strlen(se->header); 594 + if (se->width) { 595 + if (symbol_conf.col_width_list_str) { 596 + if (col_width) { 597 + *se->width = atoi(col_width); 598 + col_width = strchr(col_width, ','); 599 + if (col_width) 600 + ++col_width; 601 + } 602 + } 603 + width = *se->width = max(*se->width, width); 604 + } 605 + fprintf(fp, " %*s", width, se->header); 606 + } 607 + fprintf(fp, "\n"); 608 + 609 + if (sep) 610 + goto print_entries; 611 + 612 + fprintf(fp, "# ........"); 613 + if (symbol_conf.show_nr_samples) 614 + fprintf(fp, " .........."); 615 + if (pair) { 616 + fprintf(fp, " .........."); 617 + if (show_displacement) 618 + fprintf(fp, " ....."); 619 + } 620 + list_for_each_entry(se, &hist_entry__sort_list, list) { 621 + unsigned int i; 622 + 623 + if (se->elide) 624 + continue; 625 + 626 + fprintf(fp, " "); 627 + if (se->width) 628 + width = *se->width; 629 + else 630 + width = strlen(se->header); 631 + for (i = 0; i < width; i++) 632 + fprintf(fp, "."); 633 + } 634 + 635 + fprintf(fp, "\n#\n"); 636 + 637 + print_entries: 638 + for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) { 639 + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 640 + 641 + if (show_displacement) { 642 + if (h->pair != NULL) 643 + displacement = ((long)h->pair->position - 644 + (long)position); 645 + else 646 + displacement = 0; 647 + ++position; 648 + } 649 + ret += hist_entry__fprintf(h, self, pair, show_displacement, 650 + displacement, fp); 651 + } 652 + 653 + free(rem_sq_bracket); 654 + 655 + return ret; 208 656 }
+16 -39
tools/perf/util/hist.h
··· 1 1 #ifndef __PERF_HIST_H 2 2 #define __PERF_HIST_H 3 - #include "../builtin.h" 4 3 5 - #include "util.h" 6 - 7 - #include "color.h" 8 - #include <linux/list.h> 9 - #include "cache.h" 10 - #include <linux/rbtree.h> 11 - #include "symbol.h" 12 - #include "string.h" 4 + #include <linux/types.h> 13 5 #include "callchain.h" 14 - #include "strlist.h" 15 - #include "values.h" 16 6 17 - #include "../perf.h" 18 - #include "debug.h" 19 - #include "header.h" 20 - 21 - #include "parse-options.h" 22 - #include "parse-events.h" 23 - 24 - #include "thread.h" 25 - #include "sort.h" 26 - 27 - extern struct rb_root hist; 28 - extern struct rb_root collapse_hists; 29 - extern struct rb_root output_hists; 30 - extern int callchain; 31 7 extern struct callchain_param callchain_param; 32 - extern unsigned long total; 33 - extern unsigned long total_mmap; 34 - extern unsigned long total_comm; 35 - extern unsigned long total_fork; 36 - extern unsigned long total_unknown; 37 - extern unsigned long total_lost; 38 8 39 - struct hist_entry *__hist_entry__add(struct addr_location *al, 40 - struct symbol *parent, 41 - u64 count, bool *hit); 9 + struct perf_session; 10 + struct hist_entry; 11 + struct addr_location; 12 + struct symbol; 13 + 14 + struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, 15 + struct addr_location *al, 16 + struct symbol *parent, 17 + u64 count, bool *hit); 42 18 extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); 43 19 extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); 44 - extern void hist_entry__free(struct hist_entry *); 45 - extern void collapse__insert_entry(struct hist_entry *); 46 - extern void collapse__resort(void); 47 - extern void output__insert_entry(struct hist_entry *, u64); 48 - extern void output__resort(u64); 20 + void hist_entry__free(struct hist_entry *); 49 21 22 + void perf_session__output_resort(struct perf_session *self, u64 total_samples); 23 + void perf_session__collapse_resort(struct perf_session *self); 24 + size_t perf_session__fprintf_hists(struct perf_session *self, 25 + struct perf_session *pair, 26 + bool show_displacement, FILE *fp); 50 27 #endif /* __PERF_HIST_H */
+12 -6
tools/perf/util/map.c
··· 104 104 105 105 #define DSO__DELETED "(deleted)" 106 106 107 - static int map__load(struct map *self, symbol_filter_t filter) 107 + int map__load(struct map *self, struct perf_session *session, 108 + symbol_filter_t filter) 108 109 { 109 110 const char *name = self->dso->long_name; 110 - int nr = dso__load(self->dso, self, filter); 111 + int nr; 111 112 113 + if (dso__loaded(self->dso, self->type)) 114 + return 0; 115 + 116 + nr = dso__load(self->dso, self, session, filter); 112 117 if (nr < 0) { 113 118 if (self->dso->has_build_id) { 114 119 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; ··· 148 143 return 0; 149 144 } 150 145 151 - struct symbol *map__find_symbol(struct map *self, u64 addr, 152 - symbol_filter_t filter) 146 + struct symbol *map__find_symbol(struct map *self, struct perf_session *session, 147 + u64 addr, symbol_filter_t filter) 153 148 { 154 - if (!dso__loaded(self->dso, self->type) && map__load(self, filter) < 0) 149 + if (map__load(self, session, filter) < 0) 155 150 return NULL; 156 151 157 152 return dso__find_symbol(self->dso, self->type, addr); 158 153 } 159 154 160 155 struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 156 + struct perf_session *session, 161 157 symbol_filter_t filter) 162 158 { 163 - if (!dso__loaded(self->dso, self->type) && map__load(self, filter) < 0) 159 + if (map__load(self, session, filter) < 0) 164 160 return NULL; 165 161 166 162 if (!dso__sorted_by_name(self->dso, self->type))
+141 -66
tools/perf/util/probe-event.c
··· 69 69 char c, nc = 0; 70 70 /* 71 71 * <Syntax> 72 - * perf probe SRC:LN 73 - * perf probe FUNC[+OFFS|%return][@SRC] 72 + * perf probe [EVENT=]SRC:LN 73 + * perf probe [EVENT=]FUNC[+OFFS|%return][@SRC] 74 + * 75 + * TODO:Group name support 74 76 */ 77 + 78 + ptr = strchr(arg, '='); 79 + if (ptr) { /* Event name */ 80 + *ptr = '\0'; 81 + tmp = ptr + 1; 82 + ptr = strchr(arg, ':'); 83 + if (ptr) /* Group name is not supported yet. */ 84 + semantic_error("Group name is not supported yet."); 85 + pp->event = strdup(arg); 86 + arg = tmp; 87 + } 75 88 76 89 ptr = strpbrk(arg, ":+@%"); 77 90 if (ptr) { ··· 163 150 } 164 151 165 152 /* Parse perf-probe event definition */ 166 - int parse_perf_probe_event(const char *str, struct probe_point *pp) 153 + void parse_perf_probe_event(const char *str, struct probe_point *pp, 154 + bool *need_dwarf) 167 155 { 168 156 char **argv; 169 - int argc, i, need_dwarf = 0; 157 + int argc, i; 158 + 159 + *need_dwarf = false; 170 160 171 161 argv = argv_split(str, &argc); 172 162 if (!argv) ··· 180 164 /* Parse probe point */ 181 165 parse_perf_probe_probepoint(argv[0], pp); 182 166 if (pp->file || pp->line) 183 - need_dwarf = 1; 167 + *need_dwarf = true; 184 168 185 169 /* Copy arguments and ensure return probe has no C argument */ 186 170 pp->nr_args = argc - 1; ··· 193 177 if (pp->retprobe) 194 178 semantic_error("You can't specify local" 195 179 " variable for kretprobe"); 196 - need_dwarf = 1; 180 + *need_dwarf = true; 197 181 } 198 182 } 199 183 200 184 argv_free(argv); 201 - return need_dwarf; 202 185 } 203 186 204 187 /* Parse kprobe_events event into struct probe_point */ 205 - void parse_trace_kprobe_event(const char *str, char **group, char **event, 206 - struct probe_point *pp) 188 + void parse_trace_kprobe_event(const char *str, struct probe_point *pp) 207 189 { 208 190 char pr; 209 191 char *p; ··· 217 203 218 204 /* Scan event and group name. */ 219 205 ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", 220 - &pr, (float *)(void *)group, (float *)(void *)event); 206 + &pr, (float *)(void *)&pp->group, 207 + (float *)(void *)&pp->event); 221 208 if (ret != 3) 222 209 semantic_error("Failed to parse event name: %s", argv[0]); 223 - pr_debug("Group:%s Event:%s probe:%c\n", *group, *event, pr); 224 - 225 - if (!pp) 226 - goto end; 210 + pr_debug("Group:%s Event:%s probe:%c\n", pp->group, pp->event, pr); 227 211 228 212 pp->retprobe = (pr == 'r'); 229 213 230 214 /* Scan function name and offset */ 231 - ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, &pp->offset); 215 + ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, 216 + &pp->offset); 232 217 if (ret == 1) 233 218 pp->offset = 0; 234 219 ··· 246 233 die("Failed to copy argument."); 247 234 } 248 235 249 - end: 250 236 argv_free(argv); 251 237 } 252 238 253 - int synthesize_perf_probe_event(struct probe_point *pp) 239 + /* Synthesize only probe point (not argument) */ 240 + int synthesize_perf_probe_point(struct probe_point *pp) 254 241 { 255 242 char *buf; 256 243 char offs[64] = "", line[64] = ""; 257 - int i, len, ret; 244 + int ret; 258 245 259 246 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 260 247 if (!buf) ··· 275 262 offs, pp->retprobe ? "%return" : "", line); 276 263 else 277 264 ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", pp->file, line); 278 - if (ret <= 0) 279 - goto error; 280 - len = ret; 265 + if (ret <= 0) { 266 + error: 267 + free(pp->probes[0]); 268 + pp->probes[0] = NULL; 269 + } 270 + return ret; 271 + } 281 272 273 + int synthesize_perf_probe_event(struct probe_point *pp) 274 + { 275 + char *buf; 276 + int i, len, ret; 277 + 278 + len = synthesize_perf_probe_point(pp); 279 + if (len < 0) 280 + return 0; 281 + 282 + buf = pp->probes[0]; 282 283 for (i = 0; i < pp->nr_args; i++) { 283 284 ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", 284 285 pp->args[i]); ··· 305 278 return pp->found; 306 279 error: 307 280 free(pp->probes[0]); 281 + pp->probes[0] = NULL; 308 282 309 283 return ret; 310 284 } ··· 335 307 return pp->found; 336 308 error: 337 309 free(pp->probes[0]); 310 + pp->probes[0] = NULL; 338 311 339 312 return ret; 340 313 } ··· 395 366 { 396 367 int i; 397 368 369 + if (pp->event) 370 + free(pp->event); 371 + if (pp->group) 372 + free(pp->group); 398 373 if (pp->function) 399 374 free(pp->function); 400 375 if (pp->file) ··· 413 380 } 414 381 415 382 /* Show an event */ 416 - static void show_perf_probe_event(const char *group, const char *event, 417 - const char *place, struct probe_point *pp) 383 + static void show_perf_probe_event(const char *event, const char *place, 384 + struct probe_point *pp) 418 385 { 419 - int i; 386 + int i, ret; 420 387 char buf[128]; 421 388 422 - e_snprintf(buf, 128, "%s:%s", group, event); 389 + ret = e_snprintf(buf, 128, "%s:%s", pp->group, event); 390 + if (ret < 0) 391 + die("Failed to copy event: %s", strerror(-ret)); 423 392 printf(" %-40s (on %s", buf, place); 424 393 425 394 if (pp->nr_args > 0) { ··· 435 400 /* List up current perf-probe events */ 436 401 void show_perf_probe_events(void) 437 402 { 438 - unsigned int i; 439 - int fd, nr; 440 - char *group, *event; 403 + int fd; 441 404 struct probe_point pp; 442 405 struct strlist *rawlist; 443 406 struct str_node *ent; ··· 444 411 rawlist = get_trace_kprobe_event_rawlist(fd); 445 412 close(fd); 446 413 447 - for (i = 0; i < strlist__nr_entries(rawlist); i++) { 448 - ent = strlist__entry(rawlist, i); 449 - parse_trace_kprobe_event(ent->s, &group, &event, &pp); 414 + strlist__for_each(ent, rawlist) { 415 + parse_trace_kprobe_event(ent->s, &pp); 450 416 /* Synthesize only event probe point */ 451 - nr = pp.nr_args; 452 - pp.nr_args = 0; 453 - synthesize_perf_probe_event(&pp); 454 - pp.nr_args = nr; 417 + synthesize_perf_probe_point(&pp); 455 418 /* Show an event */ 456 - show_perf_probe_event(group, event, pp.probes[0], &pp); 457 - free(group); 458 - free(event); 419 + show_perf_probe_event(pp.event, pp.probes[0], &pp); 459 420 clear_probe_point(&pp); 460 421 } 461 422 ··· 459 432 /* Get current perf-probe event names */ 460 433 static struct strlist *get_perf_event_names(int fd, bool include_group) 461 434 { 462 - unsigned int i; 463 - char *group, *event; 464 435 char buf[128]; 465 436 struct strlist *sl, *rawlist; 466 437 struct str_node *ent; 438 + struct probe_point pp; 467 439 440 + memset(&pp, 0, sizeof(pp)); 468 441 rawlist = get_trace_kprobe_event_rawlist(fd); 469 442 470 443 sl = strlist__new(true, NULL); 471 - for (i = 0; i < strlist__nr_entries(rawlist); i++) { 472 - ent = strlist__entry(rawlist, i); 473 - parse_trace_kprobe_event(ent->s, &group, &event, NULL); 444 + strlist__for_each(ent, rawlist) { 445 + parse_trace_kprobe_event(ent->s, &pp); 474 446 if (include_group) { 475 - if (e_snprintf(buf, 128, "%s:%s", group, event) < 0) 447 + if (e_snprintf(buf, 128, "%s:%s", pp.group, 448 + pp.event) < 0) 476 449 die("Failed to copy group:event name."); 477 450 strlist__add(sl, buf); 478 451 } else 479 - strlist__add(sl, event); 480 - free(group); 481 - free(event); 452 + strlist__add(sl, pp.event); 453 + clear_probe_point(&pp); 482 454 } 483 455 484 456 strlist__delete(rawlist); ··· 496 470 } 497 471 498 472 static void get_new_event_name(char *buf, size_t len, const char *base, 499 - struct strlist *namelist) 473 + struct strlist *namelist, bool allow_suffix) 500 474 { 501 475 int i, ret; 502 476 ··· 506 480 die("snprintf() failed: %s", strerror(-ret)); 507 481 if (!strlist__has_entry(namelist, buf)) 508 482 return; 483 + 484 + if (!allow_suffix) { 485 + pr_warning("Error: event \"%s\" already exists. " 486 + "(Use -f to force duplicates.)\n", base); 487 + die("Can't add new event."); 488 + } 509 489 510 490 /* Try to add suffix */ 511 491 for (i = 1; i < MAX_EVENT_INDEX; i++) { ··· 525 493 die("Too many events are on the same function."); 526 494 } 527 495 528 - void add_trace_kprobe_events(struct probe_point *probes, int nr_probes) 496 + void add_trace_kprobe_events(struct probe_point *probes, int nr_probes, 497 + bool force_add) 529 498 { 530 499 int i, j, fd; 531 500 struct probe_point *pp; 532 501 char buf[MAX_CMDLEN]; 533 502 char event[64]; 534 503 struct strlist *namelist; 504 + bool allow_suffix; 535 505 536 506 fd = open_kprobe_events(O_RDWR, O_APPEND); 537 507 /* Get current event names */ ··· 541 507 542 508 for (j = 0; j < nr_probes; j++) { 543 509 pp = probes + j; 510 + if (!pp->event) 511 + pp->event = strdup(pp->function); 512 + if (!pp->group) 513 + pp->group = strdup(PERFPROBE_GROUP); 514 + DIE_IF(!pp->event || !pp->group); 515 + /* If force_add is true, suffix search is allowed */ 516 + allow_suffix = force_add; 544 517 for (i = 0; i < pp->found; i++) { 545 518 /* Get an unused new event name */ 546 - get_new_event_name(event, 64, pp->function, namelist); 519 + get_new_event_name(event, 64, pp->event, namelist, 520 + allow_suffix); 547 521 snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n", 548 522 pp->retprobe ? 'r' : 'p', 549 - PERFPROBE_GROUP, event, 523 + pp->group, event, 550 524 pp->probes[i]); 551 525 write_trace_kprobe_event(fd, buf); 552 526 printf("Added new event:\n"); 553 527 /* Get the first parameter (probe-point) */ 554 528 sscanf(pp->probes[i], "%s", buf); 555 - show_perf_probe_event(PERFPROBE_GROUP, event, 556 - buf, pp); 529 + show_perf_probe_event(event, buf, pp); 557 530 /* Add added event name to namelist */ 558 531 strlist__add(namelist, event); 532 + /* 533 + * Probes after the first probe which comes from same 534 + * user input are always allowed to add suffix, because 535 + * there might be several addresses corresponding to 536 + * one code line. 537 + */ 538 + allow_suffix = true; 559 539 } 560 540 } 561 541 /* Show how to use the event. */ ··· 580 532 close(fd); 581 533 } 582 534 535 + static void __del_trace_kprobe_event(int fd, struct str_node *ent) 536 + { 537 + char *p; 538 + char buf[128]; 539 + 540 + /* Convert from perf-probe event to trace-kprobe event */ 541 + if (e_snprintf(buf, 128, "-:%s", ent->s) < 0) 542 + die("Failed to copy event."); 543 + p = strchr(buf + 2, ':'); 544 + if (!p) 545 + die("Internal error: %s should have ':' but not.", ent->s); 546 + *p = '/'; 547 + 548 + write_trace_kprobe_event(fd, buf); 549 + printf("Remove event: %s\n", ent->s); 550 + } 551 + 583 552 static void del_trace_kprobe_event(int fd, const char *group, 584 553 const char *event, struct strlist *namelist) 585 554 { 586 555 char buf[128]; 556 + struct str_node *ent, *n; 557 + int found = 0; 587 558 588 559 if (e_snprintf(buf, 128, "%s:%s", group, event) < 0) 589 560 die("Failed to copy event."); 590 - if (!strlist__has_entry(namelist, buf)) { 591 - pr_warning("Warning: event \"%s\" is not found.\n", buf); 592 - return; 593 - } 594 - /* Convert from perf-probe event to trace-kprobe event */ 595 - if (e_snprintf(buf, 128, "-:%s/%s", group, event) < 0) 596 - die("Failed to copy event."); 597 561 598 - write_trace_kprobe_event(fd, buf); 599 - printf("Remove event: %s:%s\n", group, event); 562 + if (strpbrk(buf, "*?")) { /* Glob-exp */ 563 + strlist__for_each_safe(ent, n, namelist) 564 + if (strglobmatch(ent->s, buf)) { 565 + found++; 566 + __del_trace_kprobe_event(fd, ent); 567 + strlist__remove(namelist, ent); 568 + } 569 + } else { 570 + ent = strlist__find(namelist, buf); 571 + if (ent) { 572 + found++; 573 + __del_trace_kprobe_event(fd, ent); 574 + strlist__remove(namelist, ent); 575 + } 576 + } 577 + if (found == 0) 578 + pr_info("Info: event \"%s\" does not exist, could not remove it.\n", buf); 600 579 } 601 580 602 581 void del_trace_kprobe_events(struct strlist *dellist) 603 582 { 604 583 int fd; 605 - unsigned int i; 606 584 const char *group, *event; 607 585 char *p, *str; 608 586 struct str_node *ent; ··· 638 564 /* Get current event names */ 639 565 namelist = get_perf_event_names(fd, true); 640 566 641 - for (i = 0; i < strlist__nr_entries(dellist); i++) { 642 - ent = strlist__entry(dellist, i); 567 + strlist__for_each(ent, dellist) { 643 568 str = strdup(ent->s); 644 569 if (!str) 645 570 die("Failed to copy event."); 571 + pr_debug("Parsing: %s\n", str); 646 572 p = strchr(str, ':'); 647 573 if (p) { 648 574 group = str; 649 575 *p = '\0'; 650 576 event = p + 1; 651 577 } else { 652 - group = PERFPROBE_GROUP; 578 + group = "*"; 653 579 event = str; 654 580 } 581 + pr_debug("Group: %s, Event: %s\n", group, event); 655 582 del_trace_kprobe_event(fd, group, event, namelist); 656 583 free(str); 657 584 }
+7 -4
tools/perf/util/probe-event.h
··· 1 1 #ifndef _PROBE_EVENT_H 2 2 #define _PROBE_EVENT_H 3 3 4 + #include <stdbool.h> 4 5 #include "probe-finder.h" 5 6 #include "strlist.h" 6 7 7 - extern int parse_perf_probe_event(const char *str, struct probe_point *pp); 8 + extern void parse_perf_probe_event(const char *str, struct probe_point *pp, 9 + bool *need_dwarf); 10 + extern int synthesize_perf_probe_point(struct probe_point *pp); 8 11 extern int synthesize_perf_probe_event(struct probe_point *pp); 9 - extern void parse_trace_kprobe_event(const char *str, char **group, 10 - char **event, struct probe_point *pp); 12 + extern void parse_trace_kprobe_event(const char *str, struct probe_point *pp); 11 13 extern int synthesize_trace_kprobe_event(struct probe_point *pp); 12 - extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes); 14 + extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes, 15 + bool force_add); 13 16 extern void del_trace_kprobe_events(struct strlist *dellist); 14 17 extern void show_perf_probe_events(void); 15 18
+1 -3
tools/perf/util/probe-finder.c
··· 687 687 struct probe_finder pf = {.pp = pp}; 688 688 689 689 ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); 690 - if (ret != DW_DLV_OK) { 691 - pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO.\n"); 690 + if (ret != DW_DLV_OK) 692 691 return -ENOENT; 693 - } 694 692 695 693 pp->found = 0; 696 694 while (++cu_number) {
+3
tools/perf/util/probe-finder.h
··· 12 12 } 13 13 14 14 struct probe_point { 15 + char *event; /* Event name */ 16 + char *group; /* Event group */ 17 + 15 18 /* Inputs */ 16 19 char *file; /* File name */ 17 20 int line; /* Line number */
+77 -7
tools/perf/util/session.c
··· 4 4 #include <sys/types.h> 5 5 6 6 #include "session.h" 7 + #include "sort.h" 7 8 #include "util.h" 8 9 9 10 static int perf_session__open(struct perf_session *self, bool force) ··· 51 50 52 51 struct perf_session *perf_session__new(const char *filename, int mode, bool force) 53 52 { 54 - size_t len = strlen(filename) + 1; 53 + size_t len = filename ? strlen(filename) + 1 : 0; 55 54 struct perf_session *self = zalloc(sizeof(*self) + len); 56 55 57 56 if (self == NULL) 58 57 goto out; 59 58 60 59 if (perf_header__init(&self->header) < 0) 61 - goto out_delete; 60 + goto out_free; 62 61 63 62 memcpy(self->filename, filename, len); 63 + self->threads = RB_ROOT; 64 + self->last_match = NULL; 65 + self->mmap_window = 32; 66 + self->cwd = NULL; 67 + self->cwdlen = 0; 68 + map_groups__init(&self->kmaps); 64 69 65 - if (mode == O_RDONLY && perf_session__open(self, force) < 0) { 66 - perf_session__delete(self); 67 - self = NULL; 68 - } 70 + if (perf_session__create_kernel_maps(self) < 0) 71 + goto out_delete; 72 + 73 + if (mode == O_RDONLY && perf_session__open(self, force) < 0) 74 + goto out_delete; 69 75 out: 70 76 return self; 71 - out_delete: 77 + out_free: 72 78 free(self); 79 + return NULL; 80 + out_delete: 81 + perf_session__delete(self); 73 82 return NULL; 74 83 } 75 84 ··· 87 76 { 88 77 perf_header__exit(&self->header); 89 78 close(self->fd); 79 + free(self->cwd); 90 80 free(self); 81 + } 82 + 83 + static bool symbol__match_parent_regex(struct symbol *sym) 84 + { 85 + if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 86 + return 1; 87 + 88 + return 0; 89 + } 90 + 91 + struct symbol **perf_session__resolve_callchain(struct perf_session *self, 92 + struct thread *thread, 93 + struct ip_callchain *chain, 94 + struct symbol **parent) 95 + { 96 + u8 cpumode = PERF_RECORD_MISC_USER; 97 + struct symbol **syms = NULL; 98 + unsigned int i; 99 + 100 + if (symbol_conf.use_callchain) { 101 + syms = calloc(chain->nr, sizeof(*syms)); 102 + if (!syms) { 103 + fprintf(stderr, "Can't allocate memory for symbols\n"); 104 + exit(-1); 105 + } 106 + } 107 + 108 + for (i = 0; i < chain->nr; i++) { 109 + u64 ip = chain->ips[i]; 110 + struct addr_location al; 111 + 112 + if (ip >= PERF_CONTEXT_MAX) { 113 + switch (ip) { 114 + case PERF_CONTEXT_HV: 115 + cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 116 + case PERF_CONTEXT_KERNEL: 117 + cpumode = PERF_RECORD_MISC_KERNEL; break; 118 + case PERF_CONTEXT_USER: 119 + cpumode = PERF_RECORD_MISC_USER; break; 120 + default: 121 + break; 122 + } 123 + continue; 124 + } 125 + 126 + thread__find_addr_location(thread, self, cpumode, 127 + MAP__FUNCTION, ip, &al, NULL); 128 + if (al.sym != NULL) { 129 + if (sort__has_parent && !*parent && 130 + symbol__match_parent_regex(al.sym)) 131 + *parent = al.sym; 132 + if (!symbol_conf.use_callchain) 133 + break; 134 + syms[i] = al.sym; 135 + } 136 + } 137 + 138 + return syms; 91 139 }
+45
tools/perf/util/session.h
··· 1 1 #ifndef __PERF_SESSION_H 2 2 #define __PERF_SESSION_H 3 3 4 + #include "event.h" 4 5 #include "header.h" 6 + #include "thread.h" 7 + #include <linux/rbtree.h> 8 + #include "../../../include/linux/perf_event.h" 9 + 10 + struct ip_callchain; 11 + struct thread; 12 + struct symbol; 5 13 6 14 struct perf_session { 7 15 struct perf_header header; 8 16 unsigned long size; 17 + unsigned long mmap_window; 18 + struct map_groups kmaps; 19 + struct rb_root threads; 20 + struct thread *last_match; 21 + struct events_stats events_stats; 22 + unsigned long event_total[PERF_RECORD_MAX]; 23 + struct rb_root hists; 24 + u64 sample_type; 9 25 int fd; 26 + int cwdlen; 27 + char *cwd; 10 28 char filename[0]; 29 + }; 30 + 31 + typedef int (*event_op)(event_t *self, struct perf_session *session); 32 + 33 + struct perf_event_ops { 34 + event_op process_sample_event; 35 + event_op process_mmap_event; 36 + event_op process_comm_event; 37 + event_op process_fork_event; 38 + event_op process_exit_event; 39 + event_op process_lost_event; 40 + event_op process_read_event; 41 + event_op process_throttle_event; 42 + event_op process_unthrottle_event; 43 + int (*sample_type_check)(struct perf_session *session); 44 + unsigned long total_unknown; 45 + bool full_paths; 11 46 }; 12 47 13 48 struct perf_session *perf_session__new(const char *filename, int mode, bool force); 14 49 void perf_session__delete(struct perf_session *self); 50 + 51 + int perf_session__process_events(struct perf_session *self, 52 + struct perf_event_ops *event_ops); 53 + 54 + struct symbol **perf_session__resolve_callchain(struct perf_session *self, 55 + struct thread *thread, 56 + struct ip_callchain *chain, 57 + struct symbol **parent); 58 + 59 + int perf_header__read_build_ids(int input, u64 offset, u64 file_size); 15 60 16 61 #endif /* __PERF_SESSION_H */
+26
tools/perf/util/sort.c
··· 288 288 289 289 return -ESRCH; 290 290 } 291 + 292 + void setup_sorting(const char * const usagestr[], const struct option *opts) 293 + { 294 + char *tmp, *tok, *str = strdup(sort_order); 295 + 296 + for (tok = strtok_r(str, ", ", &tmp); 297 + tok; tok = strtok_r(NULL, ", ", &tmp)) { 298 + if (sort_dimension__add(tok) < 0) { 299 + error("Unknown --sort key: `%s'", tok); 300 + usage_with_options(usagestr, opts); 301 + } 302 + } 303 + 304 + free(str); 305 + } 306 + 307 + void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, 308 + const char *list_name, FILE *fp) 309 + { 310 + if (list && strlist__nr_entries(list) == 1) { 311 + if (fp != NULL) 312 + fprintf(fp, "# %s: %s\n", list_name, 313 + strlist__entry(list, 0)->s); 314 + self->elide = true; 315 + } 316 + }
+10 -2
tools/perf/util/sort.h
··· 49 49 struct symbol *sym; 50 50 u64 ip; 51 51 char level; 52 - struct symbol *parent; 52 + struct symbol *parent; 53 53 struct callchain_node callchain; 54 - struct rb_root sorted_chain; 54 + union { 55 + unsigned long position; 56 + struct hist_entry *pair; 57 + struct rb_root sorted_chain; 58 + }; 55 59 }; 56 60 57 61 enum sort_type { ··· 85 81 extern struct sort_entry sort_thread; 86 82 extern struct list_head hist_entry__sort_list; 87 83 84 + void setup_sorting(const char * const usagestr[], const struct option *opts); 85 + 88 86 extern int repsep_fprintf(FILE *fp, const char *fmt, ...); 89 87 extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); 90 88 extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); ··· 101 95 extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *); 102 96 extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int); 103 97 extern int sort_dimension__add(const char *); 98 + void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, 99 + const char *list_name, FILE *fp); 104 100 105 101 #endif /* __PERF_SORT_H */
+25
tools/perf/util/string.c
··· 226 226 argv_free(argv); 227 227 return NULL; 228 228 } 229 + 230 + /* Glob expression pattern matching */ 231 + bool strglobmatch(const char *str, const char *pat) 232 + { 233 + while (*str && *pat && *pat != '*') { 234 + if (*pat == '?') { 235 + str++; 236 + pat++; 237 + } else 238 + if (*str++ != *pat++) 239 + return false; 240 + } 241 + /* Check wild card */ 242 + if (*pat == '*') { 243 + while (*pat == '*') 244 + pat++; 245 + if (!*pat) /* Tail wild card matches all */ 246 + return true; 247 + while (*str) 248 + if (strglobmatch(str++, pat)) 249 + return true; 250 + } 251 + return !*str && !*pat; 252 + } 253 +
+2
tools/perf/util/string.h
··· 1 1 #ifndef __PERF_STRING_H_ 2 2 #define __PERF_STRING_H_ 3 3 4 + #include <stdbool.h> 4 5 #include "types.h" 5 6 6 7 int hex2u64(const char *ptr, u64 *val); ··· 9 8 s64 perf_atoll(const char *str); 10 9 char **argv_split(const char *str, int *argcp); 11 10 void argv_free(char **argv); 11 + bool strglobmatch(const char *str, const char *pat); 12 12 13 13 #define _STR(x) #x 14 14 #define STR(x) _STR(x)
+3 -3
tools/perf/util/strlist.c
··· 102 102 str_node__delete(sn, self->dupstr); 103 103 } 104 104 105 - bool strlist__has_entry(struct strlist *self, const char *entry) 105 + struct str_node *strlist__find(struct strlist *self, const char *entry) 106 106 { 107 107 struct rb_node **p = &self->entries.rb_node; 108 108 struct rb_node *parent = NULL; ··· 120 120 else if (rc < 0) 121 121 p = &(*p)->rb_right; 122 122 else 123 - return true; 123 + return sn; 124 124 } 125 125 126 - return false; 126 + return NULL; 127 127 } 128 128 129 129 static int strlist__parse_list_entry(struct strlist *self, const char *s)
+40 -1
tools/perf/util/strlist.h
··· 23 23 int strlist__add(struct strlist *self, const char *str); 24 24 25 25 struct str_node *strlist__entry(const struct strlist *self, unsigned int idx); 26 - bool strlist__has_entry(struct strlist *self, const char *entry); 26 + struct str_node *strlist__find(struct strlist *self, const char *entry); 27 + 28 + static inline bool strlist__has_entry(struct strlist *self, const char *entry) 29 + { 30 + return strlist__find(self, entry) != NULL; 31 + } 27 32 28 33 static inline bool strlist__empty(const struct strlist *self) 29 34 { ··· 39 34 { 40 35 return self->nr_entries; 41 36 } 37 + 38 + /* For strlist iteration */ 39 + static inline struct str_node *strlist__first(struct strlist *self) 40 + { 41 + struct rb_node *rn = rb_first(&self->entries); 42 + return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 43 + } 44 + static inline struct str_node *strlist__next(struct str_node *sn) 45 + { 46 + struct rb_node *rn; 47 + if (!sn) 48 + return NULL; 49 + rn = rb_next(&sn->rb_node); 50 + return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 51 + } 52 + 53 + /** 54 + * strlist_for_each - iterate over a strlist 55 + * @pos: the &struct str_node to use as a loop cursor. 56 + * @self: the &struct strlist for loop. 57 + */ 58 + #define strlist__for_each(pos, self) \ 59 + for (pos = strlist__first(self); pos; pos = strlist__next(pos)) 60 + 61 + /** 62 + * strlist_for_each_safe - iterate over a strlist safe against removal of 63 + * str_node 64 + * @pos: the &struct str_node to use as a loop cursor. 65 + * @n: another &struct str_node to use as temporary storage. 66 + * @self: the &struct strlist for loop. 67 + */ 68 + #define strlist__for_each_safe(pos, n, self) \ 69 + for (pos = strlist__first(self), n = strlist__next(pos); pos;\ 70 + pos = n, n = strlist__next(n)) 42 71 43 72 int strlist__parse_list(struct strlist *self, const char *s); 44 73 #endif /* __PERF_STRLIST_H */
+87 -57
tools/perf/util/symbol.c
··· 1 1 #include "util.h" 2 2 #include "../perf.h" 3 + #include "session.h" 4 + #include "sort.h" 3 5 #include "string.h" 4 6 #include "symbol.h" 5 7 #include "thread.h" ··· 33 31 static void dsos__add(struct list_head *head, struct dso *dso); 34 32 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 35 33 static int dso__load_kernel_sym(struct dso *self, struct map *map, 36 - struct map_groups *mg, symbol_filter_t filter); 37 - unsigned int symbol__priv_size; 34 + struct perf_session *session, symbol_filter_t filter); 38 35 static int vmlinux_path__nr_entries; 39 36 static char **vmlinux_path; 40 37 41 - static struct symbol_conf symbol_conf__defaults = { 38 + struct symbol_conf symbol_conf = { 39 + .exclude_other = true, 42 40 .use_modules = true, 43 41 .try_vmlinux_path = true, 44 42 }; 45 - 46 - static struct map_groups kmaps_mem; 47 - struct map_groups *kmaps = &kmaps_mem; 48 43 49 44 bool dso__loaded(const struct dso *self, enum map_type type) 50 45 { ··· 131 132 static struct symbol *symbol__new(u64 start, u64 len, const char *name) 132 133 { 133 134 size_t namelen = strlen(name) + 1; 134 - struct symbol *self = zalloc(symbol__priv_size + 135 + struct symbol *self = zalloc(symbol_conf.priv_size + 135 136 sizeof(*self) + namelen); 136 137 if (self == NULL) 137 138 return NULL; 138 139 139 - if (symbol__priv_size) 140 - self = ((void *)self) + symbol__priv_size; 140 + if (symbol_conf.priv_size) 141 + self = ((void *)self) + symbol_conf.priv_size; 141 142 142 143 self->start = start; 143 144 self->end = len ? start + len - 1 : start; ··· 151 152 152 153 static void symbol__delete(struct symbol *self) 153 154 { 154 - free(((void *)self) - symbol__priv_size); 155 + free(((void *)self) - symbol_conf.priv_size); 155 156 } 156 157 157 158 static size_t symbol__fprintf(struct symbol *self, FILE *fp) ··· 455 456 * the original ELF section names vmlinux have. 456 457 */ 457 458 static int dso__split_kallsyms(struct dso *self, struct map *map, 458 - struct map_groups *mg, symbol_filter_t filter) 459 + struct perf_session *session, symbol_filter_t filter) 459 460 { 460 461 struct map *curr_map = map; 461 462 struct symbol *pos; ··· 472 473 473 474 module = strchr(pos->name, '\t'); 474 475 if (module) { 475 - if (!mg->use_modules) 476 + if (!symbol_conf.use_modules) 476 477 goto discard_symbol; 477 478 478 479 *module++ = '\0'; 479 480 480 481 if (strcmp(self->name, module)) { 481 - curr_map = map_groups__find_by_name(mg, map->type, module); 482 + curr_map = map_groups__find_by_name(&session->kmaps, map->type, module); 482 483 if (curr_map == NULL) { 483 484 pr_debug("/proc/{kallsyms,modules} " 484 485 "inconsistency!\n"); ··· 509 510 } 510 511 511 512 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 512 - map_groups__insert(mg, curr_map); 513 + map_groups__insert(&session->kmaps, curr_map); 513 514 ++kernel_range; 514 515 } 515 516 ··· 530 531 531 532 532 533 static int dso__load_kallsyms(struct dso *self, struct map *map, 533 - struct map_groups *mg, symbol_filter_t filter) 534 + struct perf_session *session, symbol_filter_t filter) 534 535 { 535 536 if (dso__load_all_kallsyms(self, map) < 0) 536 537 return -1; ··· 538 539 symbols__fixup_end(&self->symbols[map->type]); 539 540 self->origin = DSO__ORIG_KERNEL; 540 541 541 - return dso__split_kallsyms(self, map, mg, filter); 542 - } 543 - 544 - size_t kernel_maps__fprintf(FILE *fp) 545 - { 546 - size_t printed = fprintf(fp, "Kernel maps:\n"); 547 - printed += map_groups__fprintf_maps(kmaps, fp); 548 - return printed + fprintf(fp, "END kernel maps\n"); 542 + return dso__split_kallsyms(self, map, session, filter); 549 543 } 550 544 551 545 static int dso__load_perf_map(struct dso *self, struct map *map, ··· 865 873 } 866 874 867 875 static int dso__load_sym(struct dso *self, struct map *map, 868 - struct map_groups *mg, const char *name, int fd, 876 + struct perf_session *session, const char *name, int fd, 869 877 symbol_filter_t filter, int kernel, int kmodule) 870 878 { 871 879 struct map *curr_map = map; ··· 969 977 snprintf(dso_name, sizeof(dso_name), 970 978 "%s%s", self->short_name, section_name); 971 979 972 - curr_map = map_groups__find_by_name(mg, map->type, dso_name); 980 + curr_map = map_groups__find_by_name(&session->kmaps, map->type, dso_name); 973 981 if (curr_map == NULL) { 974 982 u64 start = sym.st_value; 975 983 ··· 988 996 curr_map->map_ip = identity__map_ip; 989 997 curr_map->unmap_ip = identity__map_ip; 990 998 curr_dso->origin = DSO__ORIG_KERNEL; 991 - map_groups__insert(kmaps, curr_map); 999 + map_groups__insert(&session->kmaps, curr_map); 992 1000 dsos__add(&dsos__kernel, curr_dso); 993 1001 } else 994 1002 curr_dso = curr_map->dso; ··· 1203 1211 return origin[self->origin]; 1204 1212 } 1205 1213 1206 - int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) 1214 + int dso__load(struct dso *self, struct map *map, struct perf_session *session, 1215 + symbol_filter_t filter) 1207 1216 { 1208 1217 int size = PATH_MAX; 1209 1218 char *name; ··· 1215 1222 dso__set_loaded(self, map->type); 1216 1223 1217 1224 if (self->kernel) 1218 - return dso__load_kernel_sym(self, map, kmaps, filter); 1225 + return dso__load_kernel_sym(self, map, session, filter); 1219 1226 1220 1227 name = malloc(size); 1221 1228 if (!name) ··· 1316 1323 return NULL; 1317 1324 } 1318 1325 1319 - static int dsos__set_modules_path_dir(char *dirname) 1326 + static int perf_session__set_modules_path_dir(struct perf_session *self, char *dirname) 1320 1327 { 1321 1328 struct dirent *dent; 1322 1329 DIR *dir = opendir(dirname); ··· 1336 1343 1337 1344 snprintf(path, sizeof(path), "%s/%s", 1338 1345 dirname, dent->d_name); 1339 - if (dsos__set_modules_path_dir(path) < 0) 1346 + if (perf_session__set_modules_path_dir(self, path) < 0) 1340 1347 goto failure; 1341 1348 } else { 1342 1349 char *dot = strrchr(dent->d_name, '.'), ··· 1350 1357 (int)(dot - dent->d_name), dent->d_name); 1351 1358 1352 1359 strxfrchar(dso_name, '-', '_'); 1353 - map = map_groups__find_by_name(kmaps, MAP__FUNCTION, dso_name); 1360 + map = map_groups__find_by_name(&self->kmaps, MAP__FUNCTION, dso_name); 1354 1361 if (map == NULL) 1355 1362 continue; 1356 1363 ··· 1370 1377 return -1; 1371 1378 } 1372 1379 1373 - static int dsos__set_modules_path(void) 1380 + static int perf_session__set_modules_path(struct perf_session *self) 1374 1381 { 1375 1382 struct utsname uts; 1376 1383 char modules_path[PATH_MAX]; ··· 1381 1388 snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", 1382 1389 uts.release); 1383 1390 1384 - return dsos__set_modules_path_dir(modules_path); 1391 + return perf_session__set_modules_path_dir(self, modules_path); 1385 1392 } 1386 1393 1387 1394 /* ··· 1403 1410 return self; 1404 1411 } 1405 1412 1406 - static int map_groups__create_module_maps(struct map_groups *self) 1413 + static int perf_session__create_module_maps(struct perf_session *self) 1407 1414 { 1408 1415 char *line = NULL; 1409 1416 size_t n; ··· 1460 1467 dso->has_build_id = true; 1461 1468 1462 1469 dso->origin = DSO__ORIG_KMODULE; 1463 - map_groups__insert(self, map); 1470 + map_groups__insert(&self->kmaps, map); 1464 1471 dsos__add(&dsos__kernel, dso); 1465 1472 } 1466 1473 1467 1474 free(line); 1468 1475 fclose(file); 1469 1476 1470 - return dsos__set_modules_path(); 1477 + return perf_session__set_modules_path(self); 1471 1478 1472 1479 out_delete_line: 1473 1480 free(line); ··· 1476 1483 } 1477 1484 1478 1485 static int dso__load_vmlinux(struct dso *self, struct map *map, 1479 - struct map_groups *mg, 1486 + struct perf_session *session, 1480 1487 const char *vmlinux, symbol_filter_t filter) 1481 1488 { 1482 1489 int err = -1, fd; ··· 1510 1517 return -1; 1511 1518 1512 1519 dso__set_loaded(self, map->type); 1513 - err = dso__load_sym(self, map, mg, self->long_name, fd, filter, 1, 0); 1520 + err = dso__load_sym(self, map, session, self->long_name, fd, filter, 1, 0); 1514 1521 close(fd); 1515 1522 1516 1523 return err; 1517 1524 } 1518 1525 1519 1526 static int dso__load_kernel_sym(struct dso *self, struct map *map, 1520 - struct map_groups *mg, symbol_filter_t filter) 1527 + struct perf_session *session, symbol_filter_t filter) 1521 1528 { 1522 1529 int err; 1523 1530 bool is_kallsyms; ··· 1527 1534 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1528 1535 vmlinux_path__nr_entries); 1529 1536 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1530 - err = dso__load_vmlinux(self, map, mg, 1537 + err = dso__load_vmlinux(self, map, session, 1531 1538 vmlinux_path[i], filter); 1532 1539 if (err > 0) { 1533 1540 pr_debug("Using %s for symbols\n", ··· 1543 1550 if (is_kallsyms) 1544 1551 goto do_kallsyms; 1545 1552 1546 - err = dso__load_vmlinux(self, map, mg, self->long_name, filter); 1553 + err = dso__load_vmlinux(self, map, session, self->long_name, filter); 1547 1554 if (err <= 0) { 1548 1555 pr_info("The file %s cannot be used, " 1549 1556 "trying to use /proc/kallsyms...", self->long_name); 1550 1557 do_kallsyms: 1551 - err = dso__load_kallsyms(self, map, mg, filter); 1558 + err = dso__load_kallsyms(self, map, session, filter); 1552 1559 if (err > 0 && !is_kallsyms) 1553 1560 dso__set_long_name(self, strdup("[kernel.kallsyms]")); 1554 1561 } ··· 1741 1748 return -1; 1742 1749 } 1743 1750 1744 - int symbol__init(struct symbol_conf *conf) 1751 + static int setup_list(struct strlist **list, const char *list_str, 1752 + const char *list_name) 1745 1753 { 1746 - const struct symbol_conf *pconf = conf ?: &symbol_conf__defaults; 1754 + if (list_str == NULL) 1755 + return 0; 1747 1756 1757 + *list = strlist__new(true, list_str); 1758 + if (!*list) { 1759 + pr_err("problems parsing %s list\n", list_name); 1760 + return -1; 1761 + } 1762 + return 0; 1763 + } 1764 + 1765 + int symbol__init(void) 1766 + { 1748 1767 elf_version(EV_CURRENT); 1749 - symbol__priv_size = pconf->priv_size; 1750 - if (pconf->sort_by_name) 1751 - symbol__priv_size += (sizeof(struct symbol_name_rb_node) - 1752 - sizeof(struct symbol)); 1753 - map_groups__init(kmaps); 1768 + if (symbol_conf.sort_by_name) 1769 + symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 1770 + sizeof(struct symbol)); 1754 1771 1755 - if (pconf->try_vmlinux_path && vmlinux_path__init() < 0) 1772 + if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0) 1756 1773 return -1; 1757 1774 1758 - if (map_groups__create_kernel_maps(kmaps, pconf->vmlinux_name) < 0) { 1759 - vmlinux_path__exit(); 1775 + if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 1776 + pr_err("'.' is the only non valid --field-separator argument\n"); 1760 1777 return -1; 1761 1778 } 1762 1779 1763 - kmaps->use_modules = pconf->use_modules; 1764 - if (pconf->use_modules && map_groups__create_module_maps(kmaps) < 0) 1765 - pr_debug("Failed to load list of modules in use, " 1766 - "continuing...\n"); 1780 + if (setup_list(&symbol_conf.dso_list, 1781 + symbol_conf.dso_list_str, "dso") < 0) 1782 + return -1; 1783 + 1784 + if (setup_list(&symbol_conf.comm_list, 1785 + symbol_conf.comm_list_str, "comm") < 0) 1786 + goto out_free_dso_list; 1787 + 1788 + if (setup_list(&symbol_conf.sym_list, 1789 + symbol_conf.sym_list_str, "symbol") < 0) 1790 + goto out_free_comm_list; 1791 + 1792 + return 0; 1793 + 1794 + out_free_dso_list: 1795 + strlist__delete(symbol_conf.dso_list); 1796 + out_free_comm_list: 1797 + strlist__delete(symbol_conf.comm_list); 1798 + return -1; 1799 + } 1800 + 1801 + int perf_session__create_kernel_maps(struct perf_session *self) 1802 + { 1803 + if (map_groups__create_kernel_maps(&self->kmaps, 1804 + symbol_conf.vmlinux_name) < 0) 1805 + return -1; 1806 + 1807 + if (symbol_conf.use_modules && 1808 + perf_session__create_module_maps(self) < 0) 1809 + pr_debug("Failed to load list of modules for session %s, " 1810 + "continuing...\n", self->filename); 1767 1811 /* 1768 1812 * Now that we have all the maps created, just set the ->end of them: 1769 1813 */ 1770 - map_groups__fixup_end(kmaps); 1814 + map_groups__fixup_end(&self->kmaps); 1771 1815 return 0; 1772 1816 }
+24 -10
tools/perf/util/symbol.h
··· 49 49 char name[0]; 50 50 }; 51 51 52 + struct strlist; 53 + 52 54 struct symbol_conf { 53 55 unsigned short priv_size; 54 56 bool try_vmlinux_path, 55 57 use_modules, 56 - sort_by_name; 57 - const char *vmlinux_name; 58 + sort_by_name, 59 + show_nr_samples, 60 + use_callchain, 61 + exclude_other; 62 + const char *vmlinux_name, 63 + *field_sep; 64 + char *dso_list_str, 65 + *comm_list_str, 66 + *sym_list_str, 67 + *col_width_list_str; 68 + struct strlist *dso_list, 69 + *comm_list, 70 + *sym_list; 58 71 }; 59 72 60 - extern unsigned int symbol__priv_size; 73 + extern struct symbol_conf symbol_conf; 61 74 62 75 static inline void *symbol__priv(struct symbol *self) 63 76 { 64 - return ((void *)self) - symbol__priv_size; 77 + return ((void *)self) - symbol_conf.priv_size; 65 78 } 66 79 67 80 struct addr_location { ··· 83 70 struct symbol *sym; 84 71 u64 addr; 85 72 char level; 73 + bool filtered; 86 74 }; 87 75 88 76 struct dso { ··· 112 98 113 99 void dso__sort_by_name(struct dso *self, enum map_type type); 114 100 101 + struct perf_session; 102 + 115 103 struct dso *dsos__findnew(const char *name); 116 - int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); 104 + int dso__load(struct dso *self, struct map *map, struct perf_session *session, 105 + symbol_filter_t filter); 117 106 void dsos__fprintf(FILE *fp); 118 107 size_t dsos__fprintf_buildid(FILE *fp); 119 108 ··· 133 116 bool dsos__read_build_ids(void); 134 117 int build_id__sprintf(u8 *self, int len, char *bf); 135 118 136 - size_t kernel_maps__fprintf(FILE *fp); 119 + int symbol__init(void); 120 + int perf_session__create_kernel_maps(struct perf_session *self); 137 121 138 - int symbol__init(struct symbol_conf *conf); 139 - 140 - struct map_groups; 141 - struct map_groups *kmaps; 142 122 extern struct list_head dsos__user, dsos__kernel; 143 123 extern struct dso *vdso; 144 124 #endif /* __PERF_SYMBOL */
+12 -25
tools/perf/util/thread.c
··· 2 2 #include <stdlib.h> 3 3 #include <stdio.h> 4 4 #include <string.h> 5 + #include "session.h" 5 6 #include "thread.h" 6 7 #include "util.h" 7 8 #include "debug.h" 8 - 9 - static struct rb_root threads; 10 - static struct thread *last_match; 11 9 12 10 void map_groups__init(struct map_groups *self) 13 11 { ··· 120 122 map_groups__fprintf(&self->mg, fp); 121 123 } 122 124 123 - struct thread *threads__findnew(pid_t pid) 125 + struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) 124 126 { 125 - struct rb_node **p = &threads.rb_node; 127 + struct rb_node **p = &self->threads.rb_node; 126 128 struct rb_node *parent = NULL; 127 129 struct thread *th; 128 130 ··· 131 133 * so most of the time we dont have to look up 132 134 * the full rbtree: 133 135 */ 134 - if (last_match && last_match->pid == pid) 135 - return last_match; 136 + if (self->last_match && self->last_match->pid == pid) 137 + return self->last_match; 136 138 137 139 while (*p != NULL) { 138 140 parent = *p; 139 141 th = rb_entry(parent, struct thread, rb_node); 140 142 141 143 if (th->pid == pid) { 142 - last_match = th; 144 + self->last_match = th; 143 145 return th; 144 146 } 145 147 ··· 152 154 th = thread__new(pid); 153 155 if (th != NULL) { 154 156 rb_link_node(&th->rb_node, parent, p); 155 - rb_insert_color(&th->rb_node, &threads); 156 - last_match = th; 157 + rb_insert_color(&th->rb_node, &self->threads); 158 + self->last_match = th; 157 159 } 158 160 159 161 return th; 160 - } 161 - 162 - struct thread *register_idle_thread(void) 163 - { 164 - struct thread *thread = threads__findnew(0); 165 - 166 - if (!thread || thread__set_comm(thread, "swapper")) { 167 - fprintf(stderr, "problem inserting idle task.\n"); 168 - exit(-1); 169 - } 170 - 171 - return thread; 172 162 } 173 163 174 164 static void map_groups__remove_overlappings(struct map_groups *self, ··· 267 281 return 0; 268 282 } 269 283 270 - size_t threads__fprintf(FILE *fp) 284 + size_t perf_session__fprintf(struct perf_session *self, FILE *fp) 271 285 { 272 286 size_t ret = 0; 273 287 struct rb_node *nd; 274 288 275 - for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { 289 + for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) { 276 290 struct thread *pos = rb_entry(nd, struct thread, rb_node); 277 291 278 292 ret += thread__fprintf(pos, fp); ··· 282 296 } 283 297 284 298 struct symbol *map_groups__find_symbol(struct map_groups *self, 299 + struct perf_session *session, 285 300 enum map_type type, u64 addr, 286 301 symbol_filter_t filter) 287 302 { 288 303 struct map *map = map_groups__find(self, type, addr); 289 304 290 305 if (map != NULL) 291 - return map__find_symbol(map, map->map_ip(map, addr), filter); 306 + return map__find_symbol(map, session, map->map_ip(map, addr), filter); 292 307 293 308 return NULL; 294 309 }
+8 -8
tools/perf/util/thread.h
··· 8 8 struct map_groups { 9 9 struct rb_root maps[MAP__NR_TYPES]; 10 10 struct list_head removed_maps[MAP__NR_TYPES]; 11 - bool use_modules; 12 11 }; 13 12 14 13 struct thread { ··· 22 23 void map_groups__init(struct map_groups *self); 23 24 int thread__set_comm(struct thread *self, const char *comm); 24 25 int thread__comm_len(struct thread *self); 25 - struct thread *threads__findnew(pid_t pid); 26 - struct thread *register_idle_thread(void); 26 + struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); 27 27 void thread__insert_map(struct thread *self, struct map *map); 28 28 int thread__fork(struct thread *self, struct thread *parent); 29 29 size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp); 30 - size_t threads__fprintf(FILE *fp); 30 + size_t perf_session__fprintf(struct perf_session *self, FILE *fp); 31 31 32 32 void maps__insert(struct rb_root *maps, struct map *map); 33 33 struct map *maps__find(struct rb_root *maps, u64 addr); ··· 48 50 return self ? map_groups__find(&self->mg, type, addr) : NULL; 49 51 } 50 52 51 - void thread__find_addr_location(struct thread *self, u8 cpumode, 53 + void thread__find_addr_location(struct thread *self, 54 + struct perf_session *session, u8 cpumode, 52 55 enum map_type type, u64 addr, 53 56 struct addr_location *al, 54 57 symbol_filter_t filter); 55 58 struct symbol *map_groups__find_symbol(struct map_groups *self, 59 + struct perf_session *session, 56 60 enum map_type type, u64 addr, 57 61 symbol_filter_t filter); 58 62 59 63 static inline struct symbol * 60 - map_groups__find_function(struct map_groups *self, u64 addr, 61 - symbol_filter_t filter) 64 + map_groups__find_function(struct map_groups *self, struct perf_session *session, 65 + u64 addr, symbol_filter_t filter) 62 66 { 63 - return map_groups__find_symbol(self, MAP__FUNCTION, addr, filter); 67 + return map_groups__find_symbol(self, session, MAP__FUNCTION, addr, filter); 64 68 } 65 69 66 70 struct map *map_groups__find_by_name(struct map_groups *self,
+31 -11
tools/perf/util/trace-event-perl.c
··· 267 267 } 268 268 269 269 static void perl_process_event(int cpu, void *data, 270 - int size __attribute((unused)), 270 + int size __unused, 271 271 unsigned long long nsecs, char *comm) 272 272 { 273 273 struct format_field *field; ··· 359 359 /* 360 360 * Start trace script 361 361 */ 362 - static int perl_start_script(const char *script) 362 + static int perl_start_script(const char *script, int argc, const char **argv) 363 363 { 364 - const char *command_line[2] = { "", NULL }; 364 + const char **command_line; 365 + int i, err = 0; 365 366 367 + command_line = malloc((argc + 2) * sizeof(const char *)); 368 + command_line[0] = ""; 366 369 command_line[1] = script; 370 + for (i = 2; i < argc + 2; i++) 371 + command_line[i] = argv[i - 2]; 367 372 368 373 my_perl = perl_alloc(); 369 374 perl_construct(my_perl); 370 375 371 - if (perl_parse(my_perl, xs_init, 2, (char **)command_line, 372 - (char **)NULL)) 373 - return -1; 376 + if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, 377 + (char **)NULL)) { 378 + err = -1; 379 + goto error; 380 + } 374 381 375 - perl_run(my_perl); 376 - if (SvTRUE(ERRSV)) 377 - return -1; 382 + if (perl_run(my_perl)) { 383 + err = -1; 384 + goto error; 385 + } 386 + 387 + if (SvTRUE(ERRSV)) { 388 + err = -1; 389 + goto error; 390 + } 378 391 379 392 run_start_sub(); 380 393 394 + free(command_line); 381 395 fprintf(stderr, "perf trace started with Perl script %s\n\n", script); 382 - 383 396 return 0; 397 + error: 398 + perl_free(my_perl); 399 + free(command_line); 400 + 401 + return err; 384 402 } 385 403 386 404 /* ··· 597 579 "\n etc.\n"); 598 580 } 599 581 600 - static int perl_start_script_unsupported(const char *script __unused) 582 + static int perl_start_script_unsupported(const char *script __unused, 583 + int argc __unused, 584 + const char **argv __unused) 601 585 { 602 586 print_unsupported_msg(); 603 587
+1 -1
tools/perf/util/trace-event.h
··· 270 270 271 271 struct scripting_ops { 272 272 const char *name; 273 - int (*start_script) (const char *); 273 + int (*start_script) (const char *script, int argc, const char **argv); 274 274 int (*stop_script) (void); 275 275 void (*process_event) (int cpu, void *data, int size, 276 276 unsigned long long nsecs, char *comm);