Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf, x86: Complain louder about BIOSen corrupting CPU/PMU state and continue
perf, x86: P4 PMU - Read proper MSR register to catch unflagged overflows
perf symbols: Look at .dynsym again if .symtab not found
perf build-id: Add quirk to deal with perf.data file format breakage
perf session: Pass evsel in event_ops->sample()
perf: Better fit max unprivileged mlock pages for tools needs
perf_events: Fix stale ->cgrp pointer in update_cgrp_time_from_cpuctx()
perf top: Fix uninitialized 'counter' variable
tracing: Fix set_ftrace_filter probe function display
perf, x86: Fix Intel fixed counters base initialization

+168 -69
+8 -3
arch/x86/kernel/cpu/perf_event.c
··· 500 return true; 501 502 bios_fail: 503 - printk(KERN_CONT "Broken BIOS detected, using software events only.\n"); 504 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); 505 - return false; 506 507 msr_fail: 508 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); 509 return false; 510 } 511 ··· 917 hwc->event_base = 0; 918 } else if (hwc->idx >= X86_PMC_IDX_FIXED) { 919 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 920 - hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0; 921 } else { 922 hwc->config_base = x86_pmu_config_addr(hwc->idx); 923 hwc->event_base = x86_pmu_event_addr(hwc->idx);
··· 500 return true; 501 502 bios_fail: 503 + /* 504 + * We still allow the PMU driver to operate: 505 + */ 506 + printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n"); 507 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); 508 + 509 + return true; 510 511 msr_fail: 512 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); 513 + 514 return false; 515 } 516 ··· 912 hwc->event_base = 0; 913 } else if (hwc->idx >= X86_PMC_IDX_FIXED) { 914 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 915 + hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED); 916 } else { 917 hwc->config_base = x86_pmu_config_addr(hwc->idx); 918 hwc->event_base = x86_pmu_event_addr(hwc->idx);
+1
arch/x86/kernel/cpu/perf_event_p4.c
··· 777 * the counter has reached zero value and continued counting before 778 * real NMI signal was received: 779 */ 780 if (!(v & ARCH_P4_UNFLAGGED_BIT)) 781 return 1; 782
··· 777 * the counter has reached zero value and continued counting before 778 * real NMI signal was received: 779 */ 780 + rdmsrl(hwc->event_base, v); 781 if (!(v & ARCH_P4_UNFLAGGED_BIT)) 782 return 1; 783
-2
include/linux/perf_event.h
··· 938 struct list_head rotation_list; 939 int jiffies_interval; 940 struct pmu *active_pmu; 941 - #ifdef CONFIG_CGROUP_PERF 942 struct perf_cgroup *cgrp; 943 - #endif 944 }; 945 946 struct perf_output_handle {
··· 938 struct list_head rotation_list; 939 int jiffies_interval; 940 struct pmu *active_pmu; 941 struct perf_cgroup *cgrp; 942 }; 943 944 struct perf_output_handle {
+13 -2
kernel/perf_event.c
··· 145 */ 146 int sysctl_perf_event_paranoid __read_mostly = 1; 147 148 - int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ 149 150 /* 151 * max perf event sample rate ··· 942 static void 943 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 944 { 945 /* 946 * We can have double detach due to exit/hot-unplug + close. 947 */ ··· 951 952 event->attach_state &= ~PERF_ATTACH_CONTEXT; 953 954 - if (is_cgroup_event(event)) 955 ctx->nr_cgroups--; 956 957 ctx->nr_events--; 958 if (event->attr.inherit_stat)
··· 145 */ 146 int sysctl_perf_event_paranoid __read_mostly = 1; 147 148 + /* Minimum for 128 pages + 1 for the user control page */ 149 + int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ 150 151 /* 152 * max perf event sample rate ··· 941 static void 942 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 943 { 944 + struct perf_cpu_context *cpuctx; 945 /* 946 * We can have double detach due to exit/hot-unplug + close. 947 */ ··· 949 950 event->attach_state &= ~PERF_ATTACH_CONTEXT; 951 952 + if (is_cgroup_event(event)) { 953 ctx->nr_cgroups--; 954 + cpuctx = __get_cpu_context(ctx); 955 + /* 956 + * if there are no more cgroup events 957 + * then cler cgrp to avoid stale pointer 958 + * in update_cgrp_time_from_cpuctx() 959 + */ 960 + if (!ctx->nr_cgroups) 961 + cpuctx->cgrp = NULL; 962 + } 963 964 ctx->nr_events--; 965 if (event->attr.inherit_stat)
+1 -2
kernel/trace/ftrace.c
··· 1467 return t_hash_next(m, pos); 1468 1469 (*pos)++; 1470 - iter->pos = *pos; 1471 1472 if (iter->flags & FTRACE_ITER_PRINTALL) 1473 return t_hash_start(m, pos); ··· 1502 if (!rec) 1503 return t_hash_start(m, pos); 1504 1505 - iter->func_pos = *pos; 1506 iter->func = rec; 1507 1508 return iter;
··· 1467 return t_hash_next(m, pos); 1468 1469 (*pos)++; 1470 + iter->pos = iter->func_pos = *pos; 1471 1472 if (iter->flags & FTRACE_ITER_PRINTALL) 1473 return t_hash_start(m, pos); ··· 1502 if (!rec) 1503 return t_hash_start(m, pos); 1504 1505 iter->func = rec; 1506 1507 return iter;
+4 -14
tools/perf/builtin-annotate.c
··· 42 43 static int perf_evlist__add_sample(struct perf_evlist *evlist, 44 struct perf_sample *sample, 45 struct addr_location *al) 46 { 47 - struct perf_evsel *evsel; 48 struct hist_entry *he; 49 int ret; 50 ··· 57 symbol__delete(al->sym); 58 } 59 return 0; 60 - } 61 - 62 - evsel = perf_evlist__id2evsel(evlist, sample->id); 63 - if (evsel == NULL) { 64 - /* 65 - * FIXME: Propagate this back, but at least we're in a builtin, 66 - * where exit() is allowed. ;-) 67 - */ 68 - ui__warning("Invalid %s file, contains samples with id not in " 69 - "its header!\n", input_name); 70 - exit_browser(0); 71 - exit(1); 72 } 73 74 he = __hists__add_entry(&evsel->hists, al, NULL, 1); ··· 80 81 static int process_sample_event(union perf_event *event, 82 struct perf_sample *sample, 83 struct perf_session *session) 84 { 85 struct addr_location al; ··· 92 return -1; 93 } 94 95 - if (!al.filtered && perf_evlist__add_sample(session->evlist, sample, &al)) { 96 pr_warning("problem incrementing symbol count, " 97 "skipping event\n"); 98 return -1;
··· 42 43 static int perf_evlist__add_sample(struct perf_evlist *evlist, 44 struct perf_sample *sample, 45 + struct perf_evsel *evsel, 46 struct addr_location *al) 47 { 48 struct hist_entry *he; 49 int ret; 50 ··· 57 symbol__delete(al->sym); 58 } 59 return 0; 60 } 61 62 he = __hists__add_entry(&evsel->hists, al, NULL, 1); ··· 92 93 static int process_sample_event(union perf_event *event, 94 struct perf_sample *sample, 95 + struct perf_evsel *evsel, 96 struct perf_session *session) 97 { 98 struct addr_location al; ··· 103 return -1; 104 } 105 106 + if (!al.filtered && 107 + perf_evlist__add_sample(session->evlist, sample, evsel, &al)) { 108 pr_warning("problem incrementing symbol count, " 109 "skipping event\n"); 110 return -1;
+1
tools/perf/builtin-diff.c
··· 32 33 static int diff__process_sample_event(union perf_event *event, 34 struct perf_sample *sample, 35 struct perf_session *session) 36 { 37 struct addr_location al;
··· 32 33 static int diff__process_sample_event(union perf_event *event, 34 struct perf_sample *sample, 35 + struct perf_evsel *evsel __used, 36 struct perf_session *session) 37 { 38 struct addr_location al;
+10 -1
tools/perf/builtin-inject.c
··· 43 return perf_event__repipe_synth(event, session); 44 } 45 46 static int perf_event__repipe_mmap(union perf_event *event, 47 struct perf_sample *sample, 48 struct perf_session *session) ··· 132 133 static int perf_event__inject_buildid(union perf_event *event, 134 struct perf_sample *sample, 135 struct perf_session *session) 136 { 137 struct addr_location al; ··· 173 } 174 175 struct perf_event_ops inject_ops = { 176 - .sample = perf_event__repipe, 177 .mmap = perf_event__repipe, 178 .comm = perf_event__repipe, 179 .fork = perf_event__repipe,
··· 43 return perf_event__repipe_synth(event, session); 44 } 45 46 + static int perf_event__repipe_sample(union perf_event *event, 47 + struct perf_sample *sample __used, 48 + struct perf_evsel *evsel __used, 49 + struct perf_session *session) 50 + { 51 + return perf_event__repipe_synth(event, session); 52 + } 53 + 54 static int perf_event__repipe_mmap(union perf_event *event, 55 struct perf_sample *sample, 56 struct perf_session *session) ··· 124 125 static int perf_event__inject_buildid(union perf_event *event, 126 struct perf_sample *sample, 127 + struct perf_evsel *evsel __used, 128 struct perf_session *session) 129 { 130 struct addr_location al; ··· 164 } 165 166 struct perf_event_ops inject_ops = { 167 + .sample = perf_event__repipe_sample, 168 .mmap = perf_event__repipe, 169 .comm = perf_event__repipe, 170 .fork = perf_event__repipe,
+1
tools/perf/builtin-kmem.c
··· 305 306 static int process_sample_event(union perf_event *event, 307 struct perf_sample *sample, 308 struct perf_session *session) 309 { 310 struct thread *thread = perf_session__findnew(session, event->ip.pid);
··· 305 306 static int process_sample_event(union perf_event *event, 307 struct perf_sample *sample, 308 + struct perf_evsel *evsel __used, 309 struct perf_session *session) 310 { 311 struct thread *thread = perf_session__findnew(session, event->ip.pid);
+3 -1
tools/perf/builtin-lock.c
··· 845 die("Unknown type of information\n"); 846 } 847 848 - static int process_sample_event(union perf_event *event, struct perf_sample *sample, 849 struct perf_session *s) 850 { 851 struct thread *thread = perf_session__findnew(s, sample->tid);
··· 845 die("Unknown type of information\n"); 846 } 847 848 + static int process_sample_event(union perf_event *event, 849 + struct perf_sample *sample, 850 + struct perf_evsel *evsel __used, 851 struct perf_session *s) 852 { 853 struct thread *thread = perf_session__findnew(s, sample->tid);
+4 -15
tools/perf/builtin-report.c
··· 50 51 static int perf_session__add_hist_entry(struct perf_session *session, 52 struct addr_location *al, 53 - struct perf_sample *sample) 54 { 55 struct symbol *parent = NULL; 56 int err = 0; 57 struct hist_entry *he; 58 - struct perf_evsel *evsel; 59 60 if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { 61 err = perf_session__resolve_callchain(session, al->thread, 62 sample->callchain, &parent); 63 if (err) 64 return err; 65 - } 66 - 67 - evsel = perf_evlist__id2evsel(session->evlist, sample->id); 68 - if (evsel == NULL) { 69 - /* 70 - * FIXME: Propagate this back, but at least we're in a builtin, 71 - * where exit() is allowed. ;-) 72 - */ 73 - ui__warning("Invalid %s file, contains samples with id %" PRIu64 " not in " 74 - "its header!\n", input_name, sample->id); 75 - exit_browser(0); 76 - exit(1); 77 } 78 79 he = __hists__add_entry(&evsel->hists, al, parent, sample->period); ··· 101 102 static int process_sample_event(union perf_event *event, 103 struct perf_sample *sample, 104 struct perf_session *session) 105 { 106 struct addr_location al; ··· 116 if (al.filtered || (hide_unresolved && al.sym == NULL)) 117 return 0; 118 119 - if (perf_session__add_hist_entry(session, &al, sample)) { 120 pr_debug("problem incrementing symbol period, skipping event\n"); 121 return -1; 122 }
··· 50 51 static int perf_session__add_hist_entry(struct perf_session *session, 52 struct addr_location *al, 53 + struct perf_sample *sample, 54 + struct perf_evsel *evsel) 55 { 56 struct symbol *parent = NULL; 57 int err = 0; 58 struct hist_entry *he; 59 60 if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { 61 err = perf_session__resolve_callchain(session, al->thread, 62 sample->callchain, &parent); 63 if (err) 64 return err; 65 } 66 67 he = __hists__add_entry(&evsel->hists, al, parent, sample->period); ··· 113 114 static int process_sample_event(union perf_event *event, 115 struct perf_sample *sample, 116 + struct perf_evsel *evsel, 117 struct perf_session *session) 118 { 119 struct addr_location al; ··· 127 if (al.filtered || (hide_unresolved && al.sym == NULL)) 128 return 0; 129 130 + if (perf_session__add_hist_entry(session, &al, sample, evsel)) { 131 pr_debug("problem incrementing symbol period, skipping event\n"); 132 return -1; 133 }
+1
tools/perf/builtin-sched.c
··· 1603 1604 static int process_sample_event(union perf_event *event, 1605 struct perf_sample *sample, 1606 struct perf_session *session) 1607 { 1608 struct thread *thread;
··· 1603 1604 static int process_sample_event(union perf_event *event, 1605 struct perf_sample *sample, 1606 + struct perf_evsel *evsel __used, 1607 struct perf_session *session) 1608 { 1609 struct thread *thread;
+4 -11
tools/perf/builtin-script.c
··· 162 163 static void process_event(union perf_event *event __unused, 164 struct perf_sample *sample, 165 struct perf_session *session, 166 struct thread *thread) 167 { 168 - struct perf_event_attr *attr; 169 - struct perf_evsel *evsel; 170 - 171 - evsel = perf_evlist__id2evsel(session->evlist, sample->id); 172 - if (evsel == NULL) { 173 - pr_err("Invalid data. Contains samples with id not in " 174 - "its header!\n"); 175 - return; 176 - } 177 - attr = &evsel->attr; 178 179 if (output_fields[attr->type] == 0) 180 return; ··· 236 237 static int process_sample_event(union perf_event *event, 238 struct perf_sample *sample, 239 struct perf_session *session) 240 { 241 struct thread *thread = perf_session__findnew(session, event->ip.pid); ··· 257 last_timestamp = sample->time; 258 return 0; 259 } 260 - scripting_ops->process_event(event, sample, session, thread); 261 262 session->hists.stats.total_period += sample->period; 263 return 0;
··· 162 163 static void process_event(union perf_event *event __unused, 164 struct perf_sample *sample, 165 + struct perf_evsel *evsel, 166 struct perf_session *session, 167 struct thread *thread) 168 { 169 + struct perf_event_attr *attr = &evsel->attr; 170 171 if (output_fields[attr->type] == 0) 172 return; ··· 244 245 static int process_sample_event(union perf_event *event, 246 struct perf_sample *sample, 247 + struct perf_evsel *evsel, 248 struct perf_session *session) 249 { 250 struct thread *thread = perf_session__findnew(session, event->ip.pid); ··· 264 last_timestamp = sample->time; 265 return 0; 266 } 267 + scripting_ops->process_event(event, sample, evsel, session, thread); 268 269 session->hists.stats.total_period += sample->period; 270 return 0;
+11
tools/perf/builtin-timechart.c
··· 488 489 static int process_sample_event(union perf_event *event __used, 490 struct perf_sample *sample, 491 struct perf_session *session) 492 { 493 struct trace_entry *te; ··· 507 struct power_entry_old *peo; 508 peo = (void *)te; 509 #endif 510 event_str = perf_header__find_event(te->type); 511 512 if (!event_str)
··· 488 489 static int process_sample_event(union perf_event *event __used, 490 struct perf_sample *sample, 491 + struct perf_evsel *evsel __used, 492 struct perf_session *session) 493 { 494 struct trace_entry *te; ··· 506 struct power_entry_old *peo; 507 peo = (void *)te; 508 #endif 509 + /* 510 + * FIXME: use evsel, its already mapped from id to perf_evsel, 511 + * remove perf_header__find_event infrastructure bits. 512 + * Mapping all these "power:cpu_idle" strings to the tracepoint 513 + * ID and then just comparing against evsel->attr.config. 514 + * 515 + * e.g.: 516 + * 517 + * if (evsel->attr.config == power_cpu_idle_id) 518 + */ 519 event_str = perf_header__find_event(te->type); 520 521 if (!event_str)
+3 -1
tools/perf/builtin-top.c
··· 515 break; 516 case 'E': 517 if (top.evlist->nr_entries > 1) { 518 - int counter; 519 fprintf(stderr, "\nAvailable events:"); 520 521 list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
··· 515 break; 516 case 'E': 517 if (top.evlist->nr_entries > 1) { 518 + /* Select 0 as the default event: */ 519 + int counter = 0; 520 + 521 fprintf(stderr, "\nAvailable events:"); 522 523 list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
+1
tools/perf/util/build-id.c
··· 16 17 static int build_id__mark_dso_hit(union perf_event *event, 18 struct perf_sample *sample __used, 19 struct perf_session *session) 20 { 21 struct addr_location al;
··· 16 17 static int build_id__mark_dso_hit(union perf_event *event, 18 struct perf_sample *sample __used, 19 + struct perf_evsel *evsel __used, 20 struct perf_session *session) 21 { 22 struct addr_location al;
+56 -1
tools/perf/util/header.c
··· 695 return err; 696 } 697 698 static int perf_header__read_build_ids(struct perf_header *header, 699 int input, u64 offset, u64 size) 700 { 701 struct perf_session *session = container_of(header, struct perf_session, header); 702 struct build_id_event bev; 703 char filename[PATH_MAX]; 704 - u64 limit = offset + size; 705 int err = -1; 706 707 while (offset < limit) { ··· 753 len = bev.header.size - sizeof(bev); 754 if (read(input, filename, len) != len) 755 goto out; 756 757 __event_process_build_id(&bev, filename, session); 758
··· 695 return err; 696 } 697 698 + static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 699 + int input, u64 offset, u64 size) 700 + { 701 + struct perf_session *session = container_of(header, struct perf_session, header); 702 + struct { 703 + struct perf_event_header header; 704 + u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 705 + char filename[0]; 706 + } old_bev; 707 + struct build_id_event bev; 708 + char filename[PATH_MAX]; 709 + u64 limit = offset + size; 710 + 711 + while (offset < limit) { 712 + ssize_t len; 713 + 714 + if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 715 + return -1; 716 + 717 + if (header->needs_swap) 718 + perf_event_header__bswap(&old_bev.header); 719 + 720 + len = old_bev.header.size - sizeof(old_bev); 721 + if (read(input, filename, len) != len) 722 + return -1; 723 + 724 + bev.header = old_bev.header; 725 + bev.pid = 0; 726 + memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 727 + __event_process_build_id(&bev, filename, session); 728 + 729 + offset += bev.header.size; 730 + } 731 + 732 + return 0; 733 + } 734 + 735 static int perf_header__read_build_ids(struct perf_header *header, 736 int input, u64 offset, u64 size) 737 { 738 struct perf_session *session = container_of(header, struct perf_session, header); 739 struct build_id_event bev; 740 char filename[PATH_MAX]; 741 + u64 limit = offset + size, orig_offset = offset; 742 int err = -1; 743 744 while (offset < limit) { ··· 716 len = bev.header.size - sizeof(bev); 717 if (read(input, filename, len) != len) 718 goto out; 719 + /* 720 + * The a1645ce1 changeset: 721 + * 722 + * "perf: 'perf kvm' tool for monitoring guest performance from host" 723 + * 724 + * Added a field to struct build_id_event that broke the file 725 + * format. 726 + * 727 + * Since the kernel build-id is the first entry, process the 728 + * table using the old format if the well known 729 + * '[kernel.kallsyms]' string for the kernel build-id has the 730 + * first 4 characters chopped off (where the pid_t sits). 731 + */ 732 + if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 733 + if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 734 + return -1; 735 + return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 736 + } 737 738 __event_process_build_id(&bev, filename, session); 739
+1
tools/perf/util/hist.h
··· 29 u32 nr_events[PERF_RECORD_HEADER_MAX]; 30 u32 nr_unknown_events; 31 u32 nr_invalid_chains; 32 }; 33 34 enum hist_column {
··· 29 u32 nr_events[PERF_RECORD_HEADER_MAX]; 30 u32 nr_unknown_events; 31 u32 nr_invalid_chains; 32 + u32 nr_unknown_id; 33 }; 34 35 enum hist_column {
+1
tools/perf/util/scripting-engines/trace-event-perl.c
··· 247 248 static void perl_process_event(union perf_event *pevent __unused, 249 struct perf_sample *sample, 250 struct perf_session *session __unused, 251 struct thread *thread) 252 {
··· 247 248 static void perl_process_event(union perf_event *pevent __unused, 249 struct perf_sample *sample, 250 + struct perf_evsel *evsel, 251 struct perf_session *session __unused, 252 struct thread *thread) 253 {
+1
tools/perf/util/scripting-engines/trace-event-python.c
··· 206 207 static void python_process_event(union perf_event *pevent __unused, 208 struct perf_sample *sample, 209 struct perf_session *session __unused, 210 struct thread *thread) 211 {
··· 206 207 static void python_process_event(union perf_event *pevent __unused, 208 struct perf_sample *sample, 209 + struct perf_evsel *evsel __unused, 210 struct perf_session *session __unused, 211 struct thread *thread) 212 {
+23 -2
tools/perf/util/session.c
··· 280 return 0; 281 } 282 283 static int process_event_stub(union perf_event *event __used, 284 struct perf_sample *sample __used, 285 struct perf_session *session __used) ··· 312 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 313 { 314 if (handler->sample == NULL) 315 - handler->sample = process_event_stub; 316 if (handler->mmap == NULL) 317 handler->mmap = process_event_stub; 318 if (handler->comm == NULL) ··· 707 struct perf_event_ops *ops, 708 u64 file_offset) 709 { 710 dump_event(session, event, file_offset, sample); 711 712 switch (event->header.type) { 713 case PERF_RECORD_SAMPLE: 714 dump_sample(session, event, sample); 715 - return ops->sample(event, sample, session); 716 case PERF_RECORD_MMAP: 717 return ops->mmap(event, sample, session); 718 case PERF_RECORD_COMM: ··· 859 "If that is not the case, consider " 860 "reporting to linux-kernel@vger.kernel.org.\n\n", 861 session->hists.stats.nr_unknown_events); 862 } 863 864 if (session->hists.stats.nr_invalid_chains != 0) {
··· 280 return 0; 281 } 282 283 + static int process_event_sample_stub(union perf_event *event __used, 284 + struct perf_sample *sample __used, 285 + struct perf_evsel *evsel __used, 286 + struct perf_session *session __used) 287 + { 288 + dump_printf(": unhandled!\n"); 289 + return 0; 290 + } 291 + 292 static int process_event_stub(union perf_event *event __used, 293 struct perf_sample *sample __used, 294 struct perf_session *session __used) ··· 303 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 304 { 305 if (handler->sample == NULL) 306 + handler->sample = process_event_sample_stub; 307 if (handler->mmap == NULL) 308 handler->mmap = process_event_stub; 309 if (handler->comm == NULL) ··· 698 struct perf_event_ops *ops, 699 u64 file_offset) 700 { 701 + struct perf_evsel *evsel; 702 + 703 dump_event(session, event, file_offset, sample); 704 705 switch (event->header.type) { 706 case PERF_RECORD_SAMPLE: 707 dump_sample(session, event, sample); 708 + evsel = perf_evlist__id2evsel(session->evlist, sample->id); 709 + if (evsel == NULL) { 710 + ++session->hists.stats.nr_unknown_id; 711 + return -1; 712 + } 713 + return ops->sample(event, sample, evsel, session); 714 case PERF_RECORD_MMAP: 715 return ops->mmap(event, sample, session); 716 case PERF_RECORD_COMM: ··· 843 "If that is not the case, consider " 844 "reporting to linux-kernel@vger.kernel.org.\n\n", 845 session->hists.stats.nr_unknown_events); 846 + } 847 + 848 + if (session->hists.stats.nr_unknown_id != 0) { 849 + ui__warning("%u samples with id not present in the header\n", 850 + session->hists.stats.nr_unknown_id); 851 } 852 853 if (session->hists.stats.nr_invalid_chains != 0) {
+5 -2
tools/perf/util/session.h
··· 55 char filename[0]; 56 }; 57 58 struct perf_event_ops; 59 60 typedef int (*event_op)(union perf_event *self, struct perf_sample *sample, 61 struct perf_session *session); 62 typedef int (*event_synth_op)(union perf_event *self, ··· 68 struct perf_event_ops *ops); 69 70 struct perf_event_ops { 71 - event_op sample, 72 - mmap, 73 comm, 74 fork, 75 exit,
··· 55 char filename[0]; 56 }; 57 58 + struct perf_evsel; 59 struct perf_event_ops; 60 61 + typedef int (*event_sample)(union perf_event *event, struct perf_sample *sample, 62 + struct perf_evsel *evsel, struct perf_session *session); 63 typedef int (*event_op)(union perf_event *self, struct perf_sample *sample, 64 struct perf_session *session); 65 typedef int (*event_synth_op)(union perf_event *self, ··· 65 struct perf_event_ops *ops); 66 67 struct perf_event_ops { 68 + event_sample sample; 69 + event_op mmap, 70 comm, 71 fork, 72 exit,
+13 -12
tools/perf/util/symbol.c
··· 1486 * On the first pass, only load images if they have a full symtab. 1487 * Failing that, do a second pass where we accept .dynsym also 1488 */ 1489 - for (self->symtab_type = SYMTAB__BUILD_ID_CACHE, want_symtab = 1; 1490 self->symtab_type != SYMTAB__NOT_FOUND; 1491 self->symtab_type++) { 1492 switch (self->symtab_type) { ··· 1538 snprintf(name, size, "%s%s", symbol_conf.symfs, 1539 self->long_name); 1540 break; 1541 - 1542 - default: 1543 - /* 1544 - * If we wanted a full symtab but no image had one, 1545 - * relax our requirements and repeat the search. 1546 - */ 1547 - if (want_symtab) { 1548 - want_symtab = 0; 1549 - self->symtab_type = SYMTAB__BUILD_ID_CACHE; 1550 - } else 1551 - continue; 1552 } 1553 1554 /* Name is now the name of the next image to try */ ··· 1563 ret += nr_plt; 1564 break; 1565 } 1566 } 1567 1568 free(name);
··· 1486 * On the first pass, only load images if they have a full symtab. 1487 * Failing that, do a second pass where we accept .dynsym also 1488 */ 1489 + want_symtab = 1; 1490 + restart: 1491 + for (self->symtab_type = SYMTAB__BUILD_ID_CACHE; 1492 self->symtab_type != SYMTAB__NOT_FOUND; 1493 self->symtab_type++) { 1494 switch (self->symtab_type) { ··· 1536 snprintf(name, size, "%s%s", symbol_conf.symfs, 1537 self->long_name); 1538 break; 1539 + default:; 1540 } 1541 1542 /* Name is now the name of the next image to try */ ··· 1571 ret += nr_plt; 1572 break; 1573 } 1574 + } 1575 + 1576 + /* 1577 + * If we wanted a full symtab but no image had one, 1578 + * relax our requirements and repeat the search. 1579 + */ 1580 + if (ret <= 0 && want_symtab) { 1581 + want_symtab = 0; 1582 + goto restart; 1583 } 1584 1585 free(name);
+1
tools/perf/util/trace-event-scripting.c
··· 38 39 static void process_event_unsupported(union perf_event *event __unused, 40 struct perf_sample *sample __unused, 41 struct perf_session *session __unused, 42 struct thread *thread __unused) 43 {
··· 38 39 static void process_event_unsupported(union perf_event *event __unused, 40 struct perf_sample *sample __unused, 41 + struct perf_evsel *evsel __unused, 42 struct perf_session *session __unused, 43 struct thread *thread __unused) 44 {
+1
tools/perf/util/trace-event.h
··· 280 int (*stop_script) (void); 281 void (*process_event) (union perf_event *event, 282 struct perf_sample *sample, 283 struct perf_session *session, 284 struct thread *thread); 285 int (*generate_script) (const char *outfile);
··· 280 int (*stop_script) (void); 281 void (*process_event) (union perf_event *event, 282 struct perf_sample *sample, 283 + struct perf_evsel *evsel, 284 struct perf_session *session, 285 struct thread *thread); 286 int (*generate_script) (const char *outfile);