Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-core-for-mingo-20160310' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

- Implement 'perf stat --metric-only' (Andi Kleen)

- Fix perf script python database export crash (Chris Phlipot)

Infrastructure changes:

- perf top/report --hierarchy assorted fixes for problems introduced in this
perf/core cycle (Namhyung Kim)

- Support '~' operation in libtraceevent (Steven Rosted)

Build fixes:

- Fix bulding of jitdump on opensuse on ubuntu systems when the DWARF
devel files are not installed (Arnaldo Carvalho de Melo)

- Do not try building jitdump on unsupported arches (Jiri Olsa)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+542 -146
+6
tools/lib/traceevent/event-parse.c
··· 2398 2398 break; 2399 2399 *val = left + right; 2400 2400 break; 2401 + case '~': 2402 + ret = arg_num_eval(arg->op.right, &right); 2403 + if (!ret) 2404 + break; 2405 + *val = ~right; 2406 + break; 2401 2407 default: 2402 2408 do_warning("unknown op '%s'", arg->op.op); 2403 2409 ret = 0;
+27
tools/perf/Documentation/perf-stat.txt
··· 147 147 The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution. 148 148 example: 'perf stat -I 1000 -e cycles -a sleep 5' 149 149 150 + --metric-only:: 151 + Only print computed metrics. Print them in a single line. 152 + Don't show any raw values. Not supported with --per-thread. 153 + 150 154 --per-socket:: 151 155 Aggregate counts per processor socket for system-wide mode measurements. This 152 156 is a useful mode to detect imbalance between sockets. To enable this mode, ··· 222 218 27075259 cache misses # 3.335 M/sec 223 219 224 220 Wall-clock time elapsed: 719.554352 msecs 221 + 222 + CSV FORMAT 223 + ---------- 224 + 225 + With -x, perf stat is able to output a not-quite-CSV format output 226 + Commas in the output are not put into "". To make it easy to parse 227 + it is recommended to use a different character like -x \; 228 + 229 + The fields are in this order: 230 + 231 + - optional usec time stamp in fractions of second (with -I xxx) 232 + - optional CPU, core, or socket identifier 233 + - optional number of logical CPUs aggregated 234 + - counter value 235 + - unit of the counter value or empty 236 + - event name 237 + - run time of counter 238 + - percentage of measurement time the counter was running 239 + - optional variance if multiple values are collected with -r 240 + - optional metric value 241 + - optional unit of metric 242 + 243 + Additional metrics may be printed with all earlier fields being empty. 225 244 226 245 SEE ALSO 227 246 --------
+1
tools/perf/arch/arm/Makefile
··· 1 1 ifndef NO_DWARF 2 2 PERF_HAVE_DWARF_REGS := 1 3 3 endif 4 + PERF_HAVE_JITDUMP := 1
+1
tools/perf/arch/arm64/Makefile
··· 1 1 ifndef NO_DWARF 2 2 PERF_HAVE_DWARF_REGS := 1 3 3 endif 4 + PERF_HAVE_JITDUMP := 1
+1
tools/perf/arch/powerpc/Makefile
··· 3 3 endif 4 4 5 5 HAVE_KVM_STAT_SUPPORT := 1 6 + PERF_HAVE_JITDUMP := 1
+1
tools/perf/arch/x86/Makefile
··· 3 3 endif 4 4 HAVE_KVM_STAT_SUPPORT := 1 5 5 PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 6 + PERF_HAVE_JITDUMP := 1
+7 -5
tools/perf/builtin-inject.c
··· 73 73 return perf_event__repipe_synth(tool, event); 74 74 } 75 75 76 - #ifdef HAVE_LIBELF_SUPPORT 76 + #ifdef HAVE_JITDUMP 77 77 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused, 78 78 union perf_event *event __maybe_unused, 79 79 struct ordered_events *oe __maybe_unused) ··· 245 245 return err; 246 246 } 247 247 248 - #ifdef HAVE_LIBELF_SUPPORT 248 + #ifdef HAVE_JITDUMP 249 249 static int perf_event__jit_repipe_mmap(struct perf_tool *tool, 250 250 union perf_event *event, 251 251 struct perf_sample *sample, ··· 283 283 return err; 284 284 } 285 285 286 - #ifdef HAVE_LIBELF_SUPPORT 286 + #ifdef HAVE_JITDUMP 287 287 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool, 288 288 union perf_event *event, 289 289 struct perf_sample *sample, ··· 778 778 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat, 779 779 "Merge sched-stat and sched-switch for getting events " 780 780 "where and how long tasks slept"), 781 + #ifdef HAVE_JITDUMP 781 782 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"), 783 + #endif 782 784 OPT_INCR('v', "verbose", &verbose, 783 785 "be more verbose (show build ids, etc)"), 784 786 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file", ··· 797 795 "perf inject [<options>]", 798 796 NULL 799 797 }; 800 - #ifndef HAVE_LIBELF_SUPPORT 798 + #ifndef HAVE_JITDUMP 801 799 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true); 802 800 #endif 803 801 argc = parse_options(argc, argv, options, inject_usage, 0); ··· 835 833 inject.tool.ordered_events = true; 836 834 inject.tool.ordering_requires_timestamps = true; 837 835 } 838 - #ifdef HAVE_LIBELF_SUPPORT 836 + #ifdef HAVE_JITDUMP 839 837 if (inject.jit_mode) { 840 838 inject.tool.mmap2 = perf_event__jit_repipe_mmap2; 841 839 inject.tool.mmap = perf_event__jit_repipe_mmap;
+232 -12
tools/perf/builtin-stat.c
··· 122 122 static unsigned int initial_delay = 0; 123 123 static unsigned int unit_width = 4; /* strlen("unit") */ 124 124 static bool forever = false; 125 + static bool metric_only = false; 125 126 static struct timespec ref_time; 126 127 static struct cpu_map *aggr_map; 127 128 static aggr_get_id_t aggr_get_id; ··· 828 827 fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit); 829 828 } 830 829 830 + #define METRIC_ONLY_LEN 20 831 + 832 + /* Filter out some columns that don't work well in metrics only mode */ 833 + 834 + static bool valid_only_metric(const char *unit) 835 + { 836 + if (!unit) 837 + return false; 838 + if (strstr(unit, "/sec") || 839 + strstr(unit, "hz") || 840 + strstr(unit, "Hz") || 841 + strstr(unit, "CPUs utilized")) 842 + return false; 843 + return true; 844 + } 845 + 846 + static const char *fixunit(char *buf, struct perf_evsel *evsel, 847 + const char *unit) 848 + { 849 + if (!strncmp(unit, "of all", 6)) { 850 + snprintf(buf, 1024, "%s %s", perf_evsel__name(evsel), 851 + unit); 852 + return buf; 853 + } 854 + return unit; 855 + } 856 + 857 + static void print_metric_only(void *ctx, const char *color, const char *fmt, 858 + const char *unit, double val) 859 + { 860 + struct outstate *os = ctx; 861 + FILE *out = os->fh; 862 + int n; 863 + char buf[1024]; 864 + unsigned mlen = METRIC_ONLY_LEN; 865 + 866 + if (!valid_only_metric(unit)) 867 + return; 868 + unit = fixunit(buf, os->evsel, unit); 869 + if (color) 870 + n = color_fprintf(out, color, fmt, val); 871 + else 872 + n = fprintf(out, fmt, val); 873 + if (n > METRIC_ONLY_LEN) 874 + n = METRIC_ONLY_LEN; 875 + if (mlen < strlen(unit)) 876 + mlen = strlen(unit) + 1; 877 + fprintf(out, "%*s", mlen - n, ""); 878 + } 879 + 880 + static void print_metric_only_csv(void *ctx, const char *color __maybe_unused, 881 + const char *fmt, 882 + const char *unit, double val) 883 + { 884 + struct outstate *os = ctx; 885 + FILE *out = os->fh; 886 + char buf[64], *vals, *ends; 887 + char tbuf[1024]; 888 + 889 + if (!valid_only_metric(unit)) 890 + return; 891 + unit = fixunit(tbuf, os->evsel, unit); 892 + snprintf(buf, sizeof buf, fmt, val); 893 + vals = buf; 894 + while (isspace(*vals)) 895 + vals++; 896 + ends = vals; 897 + while (isdigit(*ends) || *ends == '.') 898 + ends++; 899 + *ends = 0; 900 + fprintf(out, "%s%s", vals, csv_sep); 901 + } 902 + 903 + static void new_line_metric(void *ctx __maybe_unused) 904 + { 905 + } 906 + 907 + static void print_metric_header(void *ctx, const char *color __maybe_unused, 908 + const char *fmt __maybe_unused, 909 + const char *unit, double val __maybe_unused) 910 + { 911 + struct outstate *os = ctx; 912 + char tbuf[1024]; 913 + 914 + if (!valid_only_metric(unit)) 915 + return; 916 + unit = fixunit(tbuf, os->evsel, unit); 917 + if (csv_output) 918 + fprintf(os->fh, "%s%s", unit, csv_sep); 919 + else 920 + fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit); 921 + } 922 + 831 923 static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg) 832 924 { 833 925 FILE *output = stat_config.output; ··· 1015 921 print_metric_t pm = print_metric_std; 1016 922 void (*nl)(void *); 1017 923 1018 - nl = new_line_std; 924 + if (metric_only) { 925 + nl = new_line_metric; 926 + if (csv_output) 927 + pm = print_metric_only_csv; 928 + else 929 + pm = print_metric_only; 930 + } else 931 + nl = new_line_std; 1019 932 1020 - if (csv_output) { 933 + if (csv_output && !metric_only) { 1021 934 static int aggr_fields[] = { 1022 935 [AGGR_GLOBAL] = 0, 1023 936 [AGGR_THREAD] = 1, ··· 1041 940 os.nfields++; 1042 941 } 1043 942 if (run == 0 || ena == 0 || counter->counts->scaled == -1) { 943 + if (metric_only) { 944 + pm(&os, NULL, "", "", 0); 945 + return; 946 + } 1044 947 aggr_printout(counter, id, nr); 1045 948 1046 949 fprintf(stat_config.output, "%*s%s", ··· 1073 968 return; 1074 969 } 1075 970 1076 - if (nsec_counter(counter)) 971 + if (metric_only) 972 + /* nothing */; 973 + else if (nsec_counter(counter)) 1077 974 nsec_printout(id, nr, counter, uval); 1078 975 else 1079 976 abs_printout(id, nr, counter, uval); ··· 1084 977 out.new_line = nl; 1085 978 out.ctx = &os; 1086 979 1087 - if (csv_output) { 980 + if (csv_output && !metric_only) { 1088 981 print_noise(counter, noise); 1089 982 print_running(run, ena); 1090 983 } ··· 1092 985 perf_stat__print_shadow_stats(counter, uval, 1093 986 first_shadow_cpu(counter, id), 1094 987 &out); 1095 - if (!csv_output) { 988 + if (!csv_output && !metric_only) { 1096 989 print_noise(counter, noise); 1097 990 print_running(run, ena); 1098 991 } ··· 1128 1021 int cpu, s, s2, id, nr; 1129 1022 double uval; 1130 1023 u64 ena, run, val; 1024 + bool first; 1131 1025 1132 1026 if (!(aggr_map || aggr_get_id)) 1133 1027 return; 1134 1028 1135 1029 aggr_update_shadow(); 1136 1030 1031 + /* 1032 + * With metric_only everything is on a single line. 1033 + * Without each counter has its own line. 1034 + */ 1137 1035 for (s = 0; s < aggr_map->nr; s++) { 1036 + if (prefix && metric_only) 1037 + fprintf(output, "%s", prefix); 1038 + 1138 1039 id = aggr_map->map[s]; 1040 + first = true; 1139 1041 evlist__for_each(evsel_list, counter) { 1140 1042 val = ena = run = 0; 1141 1043 nr = 0; ··· 1157 1041 run += perf_counts(counter->counts, cpu, 0)->run; 1158 1042 nr++; 1159 1043 } 1160 - if (prefix) 1044 + if (first && metric_only) { 1045 + first = false; 1046 + aggr_printout(counter, id, nr); 1047 + } 1048 + if (prefix && !metric_only) 1161 1049 fprintf(output, "%s", prefix); 1162 1050 1163 1051 uval = val * counter->scale; 1164 1052 printout(id, nr, counter, uval, prefix, run, ena, 1.0); 1165 - fputc('\n', output); 1053 + if (!metric_only) 1054 + fputc('\n', output); 1166 1055 } 1056 + if (metric_only) 1057 + fputc('\n', output); 1167 1058 } 1168 1059 } 1169 1060 ··· 1215 1092 avg_enabled = avg_stats(&ps->res_stats[1]); 1216 1093 avg_running = avg_stats(&ps->res_stats[2]); 1217 1094 1218 - if (prefix) 1095 + if (prefix && !metric_only) 1219 1096 fprintf(output, "%s", prefix); 1220 1097 1221 1098 uval = avg * counter->scale; 1222 1099 printout(-1, 0, counter, uval, prefix, avg_running, avg_enabled, avg); 1223 - fprintf(output, "\n"); 1100 + if (!metric_only) 1101 + fprintf(output, "\n"); 1224 1102 } 1225 1103 1226 1104 /* ··· 1250 1126 } 1251 1127 } 1252 1128 1129 + static void print_no_aggr_metric(char *prefix) 1130 + { 1131 + int cpu; 1132 + int nrcpus = 0; 1133 + struct perf_evsel *counter; 1134 + u64 ena, run, val; 1135 + double uval; 1136 + 1137 + nrcpus = evsel_list->cpus->nr; 1138 + for (cpu = 0; cpu < nrcpus; cpu++) { 1139 + bool first = true; 1140 + 1141 + if (prefix) 1142 + fputs(prefix, stat_config.output); 1143 + evlist__for_each(evsel_list, counter) { 1144 + if (first) { 1145 + aggr_printout(counter, cpu, 0); 1146 + first = false; 1147 + } 1148 + val = perf_counts(counter->counts, cpu, 0)->val; 1149 + ena = perf_counts(counter->counts, cpu, 0)->ena; 1150 + run = perf_counts(counter->counts, cpu, 0)->run; 1151 + 1152 + uval = val * counter->scale; 1153 + printout(cpu, 0, counter, uval, prefix, run, ena, 1.0); 1154 + } 1155 + fputc('\n', stat_config.output); 1156 + } 1157 + } 1158 + 1159 + static int aggr_header_lens[] = { 1160 + [AGGR_CORE] = 18, 1161 + [AGGR_SOCKET] = 12, 1162 + [AGGR_NONE] = 6, 1163 + [AGGR_THREAD] = 24, 1164 + [AGGR_GLOBAL] = 0, 1165 + }; 1166 + 1167 + static void print_metric_headers(char *prefix) 1168 + { 1169 + struct perf_stat_output_ctx out; 1170 + struct perf_evsel *counter; 1171 + struct outstate os = { 1172 + .fh = stat_config.output 1173 + }; 1174 + 1175 + if (prefix) 1176 + fprintf(stat_config.output, "%s", prefix); 1177 + 1178 + if (!csv_output) 1179 + fprintf(stat_config.output, "%*s", 1180 + aggr_header_lens[stat_config.aggr_mode], ""); 1181 + 1182 + /* Print metrics headers only */ 1183 + evlist__for_each(evsel_list, counter) { 1184 + os.evsel = counter; 1185 + out.ctx = &os; 1186 + out.print_metric = print_metric_header; 1187 + out.new_line = new_line_metric; 1188 + os.evsel = counter; 1189 + perf_stat__print_shadow_stats(counter, 0, 1190 + 0, 1191 + &out); 1192 + } 1193 + fputc('\n', stat_config.output); 1194 + } 1195 + 1253 1196 static void print_interval(char *prefix, struct timespec *ts) 1254 1197 { 1255 1198 FILE *output = stat_config.output; ··· 1324 1133 1325 1134 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep); 1326 1135 1327 - if (num_print_interval == 0 && !csv_output) { 1136 + if (num_print_interval == 0 && !csv_output && !metric_only) { 1328 1137 switch (stat_config.aggr_mode) { 1329 1138 case AGGR_SOCKET: 1330 1139 fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit"); ··· 1411 1220 else 1412 1221 print_header(argc, argv); 1413 1222 1223 + if (metric_only) { 1224 + static int num_print_iv; 1225 + 1226 + if (num_print_iv == 0) 1227 + print_metric_headers(prefix); 1228 + if (num_print_iv++ == 25) 1229 + num_print_iv = 0; 1230 + if (stat_config.aggr_mode == AGGR_GLOBAL && prefix) 1231 + fprintf(stat_config.output, "%s", prefix); 1232 + } 1233 + 1414 1234 switch (stat_config.aggr_mode) { 1415 1235 case AGGR_CORE: 1416 1236 case AGGR_SOCKET: ··· 1434 1232 case AGGR_GLOBAL: 1435 1233 evlist__for_each(evsel_list, counter) 1436 1234 print_counter_aggr(counter, prefix); 1235 + if (metric_only) 1236 + fputc('\n', stat_config.output); 1437 1237 break; 1438 1238 case AGGR_NONE: 1439 - evlist__for_each(evsel_list, counter) 1440 - print_counter(counter, prefix); 1239 + if (metric_only) 1240 + print_no_aggr_metric(prefix); 1241 + else { 1242 + evlist__for_each(evsel_list, counter) 1243 + print_counter(counter, prefix); 1244 + } 1441 1245 break; 1442 1246 case AGGR_UNSET: 1443 1247 default: ··· 1564 1356 "aggregate counts per thread", AGGR_THREAD), 1565 1357 OPT_UINTEGER('D', "delay", &initial_delay, 1566 1358 "ms to wait before starting measurement after program start"), 1359 + OPT_BOOLEAN(0, "metric-only", &metric_only, 1360 + "Only print computed metrics. No raw values"), 1567 1361 OPT_END() 1568 1362 }; 1569 1363 ··· 2204 1994 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2205 1995 parse_options_usage(stat_usage, stat_options, "o", 1); 2206 1996 parse_options_usage(NULL, stat_options, "log-fd", 0); 1997 + goto out; 1998 + } 1999 + 2000 + if (metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2001 + fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2002 + goto out; 2003 + } 2004 + 2005 + if (metric_only && run_count > 1) { 2006 + fprintf(stderr, "--metric-only is not supported with -r\n"); 2207 2007 goto out; 2208 2008 } 2209 2009
+7
tools/perf/config/Makefile
··· 328 328 endif # NO_LIBBPF 329 329 endif # NO_LIBELF 330 330 331 + ifdef PERF_HAVE_JITDUMP 332 + ifndef NO_DWARF 333 + $(call detected,CONFIG_JITDUMP) 334 + CFLAGS += -DHAVE_JITDUMP 335 + endif 336 + endif 337 + 331 338 ifeq ($(ARCH),powerpc) 332 339 ifndef NO_DWARF 333 340 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
+49 -24
tools/perf/ui/browsers/hists.c
··· 1928 1928 1929 1929 static int hist_browser__fprintf_hierarchy_entry(struct hist_browser *browser, 1930 1930 struct hist_entry *he, 1931 - FILE *fp, int level, 1932 - int nr_sort_keys) 1931 + FILE *fp, int level) 1933 1932 { 1934 1933 char s[8192]; 1935 1934 int printed = 0; ··· 1938 1939 .size = sizeof(s), 1939 1940 }; 1940 1941 struct perf_hpp_fmt *fmt; 1942 + struct perf_hpp_list_node *fmt_node; 1941 1943 bool first = true; 1942 1944 int ret; 1943 - int hierarchy_indent = nr_sort_keys * HIERARCHY_INDENT; 1945 + int hierarchy_indent = (he->hists->nr_hpp_node - 2) * HIERARCHY_INDENT; 1944 1946 1945 1947 printed = fprintf(fp, "%*s", level * HIERARCHY_INDENT, ""); 1946 1948 1947 1949 folded_sign = hist_entry__folded(he); 1948 1950 printed += fprintf(fp, "%c", folded_sign); 1949 1951 1950 - hists__for_each_format(he->hists, fmt) { 1951 - if (perf_hpp__should_skip(fmt, he->hists)) 1952 - continue; 1953 - 1954 - if (perf_hpp__is_sort_entry(fmt) || 1955 - perf_hpp__is_dynamic_entry(fmt)) 1956 - break; 1957 - 1952 + /* the first hpp_list_node is for overhead columns */ 1953 + fmt_node = list_first_entry(&he->hists->hpp_formats, 1954 + struct perf_hpp_list_node, list); 1955 + perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) { 1958 1956 if (!first) { 1959 1957 ret = scnprintf(hpp.buf, hpp.size, " "); 1960 1958 advance_hpp(&hpp, ret); ··· 1988 1992 struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries), 1989 1993 browser->min_pcnt); 1990 1994 int printed = 0; 1991 - int nr_sort = browser->hists->nr_sort_keys; 1992 1995 1993 1996 while (nd) { 1994 1997 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); ··· 1995 2000 if (symbol_conf.report_hierarchy) { 1996 2001 printed += hist_browser__fprintf_hierarchy_entry(browser, 1997 2002 h, fp, 1998 - h->depth, 1999 - nr_sort); 2003 + h->depth); 2000 2004 } else { 2001 2005 printed += hist_browser__fprintf_entry(browser, h, fp); 2002 2006 } ··· 2136 2142 if (hists->uid_filter_str) 2137 2143 printed += snprintf(bf + printed, size - printed, 2138 2144 ", UID: %s", hists->uid_filter_str); 2139 - if (thread) 2140 - printed += scnprintf(bf + printed, size - printed, 2145 + if (thread) { 2146 + if (sort__has_thread) { 2147 + printed += scnprintf(bf + printed, size - printed, 2141 2148 ", Thread: %s(%d)", 2142 2149 (thread->comm_set ? thread__comm_str(thread) : ""), 2143 2150 thread->tid); 2151 + } else { 2152 + printed += scnprintf(bf + printed, size - printed, 2153 + ", Thread: %s", 2154 + (thread->comm_set ? thread__comm_str(thread) : "")); 2155 + } 2156 + } 2144 2157 if (dso) 2145 2158 printed += scnprintf(bf + printed, size - printed, 2146 2159 ", DSO: %s", dso->short_name); ··· 2322 2321 { 2323 2322 struct thread *thread = act->thread; 2324 2323 2324 + if ((!sort__has_thread && !sort__has_comm) || thread == NULL) 2325 + return 0; 2326 + 2325 2327 if (browser->hists->thread_filter) { 2326 2328 pstack__remove(browser->pstack, &browser->hists->thread_filter); 2327 2329 perf_hpp__set_elide(HISTC_THREAD, false); 2328 2330 thread__zput(browser->hists->thread_filter); 2329 2331 ui_helpline__pop(); 2330 2332 } else { 2331 - ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"", 2332 - thread->comm_set ? thread__comm_str(thread) : "", 2333 - thread->tid); 2333 + if (sort__has_thread) { 2334 + ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"", 2335 + thread->comm_set ? thread__comm_str(thread) : "", 2336 + thread->tid); 2337 + } else { 2338 + ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s thread\"", 2339 + thread->comm_set ? thread__comm_str(thread) : ""); 2340 + } 2341 + 2334 2342 browser->hists->thread_filter = thread__get(thread); 2335 2343 perf_hpp__set_elide(HISTC_THREAD, false); 2336 2344 pstack__push(browser->pstack, &browser->hists->thread_filter); ··· 2354 2344 add_thread_opt(struct hist_browser *browser, struct popup_action *act, 2355 2345 char **optstr, struct thread *thread) 2356 2346 { 2357 - if (!sort__has_thread || thread == NULL) 2347 + int ret; 2348 + 2349 + if ((!sort__has_thread && !sort__has_comm) || thread == NULL) 2358 2350 return 0; 2359 2351 2360 - if (asprintf(optstr, "Zoom %s %s(%d) thread", 2361 - browser->hists->thread_filter ? "out of" : "into", 2362 - thread->comm_set ? thread__comm_str(thread) : "", 2363 - thread->tid) < 0) 2352 + if (sort__has_thread) { 2353 + ret = asprintf(optstr, "Zoom %s %s(%d) thread", 2354 + browser->hists->thread_filter ? "out of" : "into", 2355 + thread->comm_set ? thread__comm_str(thread) : "", 2356 + thread->tid); 2357 + } else { 2358 + ret = asprintf(optstr, "Zoom %s %s thread", 2359 + browser->hists->thread_filter ? "out of" : "into", 2360 + thread->comm_set ? thread__comm_str(thread) : ""); 2361 + } 2362 + if (ret < 0) 2364 2363 return 0; 2365 2364 2366 2365 act->thread = thread; ··· 2381 2362 do_zoom_dso(struct hist_browser *browser, struct popup_action *act) 2382 2363 { 2383 2364 struct map *map = act->ms.map; 2365 + 2366 + if (!sort__has_dso || map == NULL) 2367 + return 0; 2384 2368 2385 2369 if (browser->hists->dso_filter) { 2386 2370 pstack__remove(browser->pstack, &browser->hists->dso_filter); ··· 2536 2514 static int 2537 2515 do_zoom_socket(struct hist_browser *browser, struct popup_action *act) 2538 2516 { 2517 + if (!sort__has_socket || act->socket < 0) 2518 + return 0; 2519 + 2539 2520 if (browser->hists->socket_filter > -1) { 2540 2521 pstack__remove(browser->pstack, &browser->hists->socket_filter); 2541 2522 browser->hists->socket_filter = -1;
-3
tools/perf/ui/hist.c
··· 515 515 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 516 516 struct perf_hpp_fmt *format) 517 517 { 518 - if (perf_hpp__is_sort_entry(format) || perf_hpp__is_dynamic_entry(format)) 519 - list->nr_sort_keys++; 520 - 521 518 list_add_tail(&format->sort_list, &list->sorts); 522 519 } 523 520
+3
tools/perf/util/Build
··· 107 107 libperf-$(CONFIG_ZLIB) += zlib.o 108 108 libperf-$(CONFIG_LZMA) += lzma.o 109 109 libperf-y += demangle-java.o 110 + 111 + ifdef CONFIG_JITDUMP 110 112 libperf-$(CONFIG_LIBELF) += jitdump.o 111 113 libperf-$(CONFIG_LIBELF) += genelf.o 112 114 libperf-$(CONFIG_LIBELF) += genelf_debug.o 115 + endif 113 116 114 117 CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" 115 118 # avoid compiler warnings in 32-bit mode
+2 -4
tools/perf/util/evsel.h
··· 93 93 const char *unit; 94 94 struct event_format *tp_format; 95 95 off_t id_offset; 96 - union { 97 - void *priv; 98 - u64 db_id; 99 - }; 96 + void *priv; 97 + u64 db_id; 100 98 struct cgroup_sel *cgrp; 101 99 void *handler; 102 100 struct cpu_map *cpus;
+131 -13
tools/perf/util/hist.c
··· 1087 1087 */ 1088 1088 1089 1089 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1090 + static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1091 + enum hist_filter type); 1092 + 1093 + typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1094 + 1095 + static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1096 + { 1097 + return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1098 + } 1099 + 1100 + static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1101 + enum hist_filter type, 1102 + fmt_chk_fn check) 1103 + { 1104 + struct perf_hpp_fmt *fmt; 1105 + bool type_match = false; 1106 + struct hist_entry *parent = he->parent_he; 1107 + 1108 + switch (type) { 1109 + case HIST_FILTER__THREAD: 1110 + if (symbol_conf.comm_list == NULL && 1111 + symbol_conf.pid_list == NULL && 1112 + symbol_conf.tid_list == NULL) 1113 + return; 1114 + break; 1115 + case HIST_FILTER__DSO: 1116 + if (symbol_conf.dso_list == NULL) 1117 + return; 1118 + break; 1119 + case HIST_FILTER__SYMBOL: 1120 + if (symbol_conf.sym_list == NULL) 1121 + return; 1122 + break; 1123 + case HIST_FILTER__PARENT: 1124 + case HIST_FILTER__GUEST: 1125 + case HIST_FILTER__HOST: 1126 + case HIST_FILTER__SOCKET: 1127 + default: 1128 + return; 1129 + } 1130 + 1131 + /* if it's filtered by own fmt, it has to have filter bits */ 1132 + perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1133 + if (check(fmt)) { 1134 + type_match = true; 1135 + break; 1136 + } 1137 + } 1138 + 1139 + if (type_match) { 1140 + /* 1141 + * If the filter is for current level entry, propagate 1142 + * filter marker to parents. The marker bit was 1143 + * already set by default so it only needs to clear 1144 + * non-filtered entries. 1145 + */ 1146 + if (!(he->filtered & (1 << type))) { 1147 + while (parent) { 1148 + parent->filtered &= ~(1 << type); 1149 + parent = parent->parent_he; 1150 + } 1151 + } 1152 + } else { 1153 + /* 1154 + * If current entry doesn't have matching formats, set 1155 + * filter marker for upper level entries. it will be 1156 + * cleared if its lower level entries is not filtered. 1157 + * 1158 + * For lower-level entries, it inherits parent's 1159 + * filter bit so that lower level entries of a 1160 + * non-filtered entry won't set the filter marker. 1161 + */ 1162 + if (parent == NULL) 1163 + he->filtered |= (1 << type); 1164 + else 1165 + he->filtered |= (parent->filtered & (1 << type)); 1166 + } 1167 + } 1168 + 1169 + static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1170 + { 1171 + hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1172 + check_thread_entry); 1173 + 1174 + hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1175 + perf_hpp__is_dso_entry); 1176 + 1177 + hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1178 + perf_hpp__is_sym_entry); 1179 + 1180 + hists__apply_filters(he->hists, he); 1181 + } 1090 1182 1091 1183 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1092 1184 struct rb_root *root, 1093 1185 struct hist_entry *he, 1186 + struct hist_entry *parent_he, 1094 1187 struct perf_hpp_list *hpp_list) 1095 1188 { 1096 1189 struct rb_node **p = &root->rb_node; ··· 1218 1125 if (new == NULL) 1219 1126 return NULL; 1220 1127 1221 - hists__apply_filters(hists, new); 1222 1128 hists->nr_entries++; 1223 1129 1224 1130 /* save related format list for output */ 1225 1131 new->hpp_list = hpp_list; 1132 + new->parent_he = parent_he; 1133 + 1134 + hist_entry__apply_hierarchy_filters(new); 1226 1135 1227 1136 /* some fields are now passed to 'new' */ 1228 1137 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { ··· 1265 1170 continue; 1266 1171 1267 1172 /* insert copy of 'he' for each fmt into the hierarchy */ 1268 - new_he = hierarchy_insert_entry(hists, root, he, &node->hpp); 1173 + new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1269 1174 if (new_he == NULL) { 1270 1175 ret = -1; 1271 1176 break; 1272 1177 } 1273 1178 1274 1179 root = &new_he->hroot_in; 1275 - new_he->parent_he = parent; 1276 1180 new_he->depth = depth++; 1277 1181 parent = new_he; 1278 1182 } ··· 1453 1359 hists->stats.total_period += h->stat.period; 1454 1360 } 1455 1361 1362 + static void hierarchy_recalc_total_periods(struct hists *hists) 1363 + { 1364 + struct rb_node *node; 1365 + struct hist_entry *he; 1366 + 1367 + node = rb_first(&hists->entries); 1368 + 1369 + hists->stats.total_period = 0; 1370 + hists->stats.total_non_filtered_period = 0; 1371 + 1372 + /* 1373 + * recalculate total period using top-level entries only 1374 + * since lower level entries only see non-filtered entries 1375 + * but upper level entries have sum of both entries. 1376 + */ 1377 + while (node) { 1378 + he = rb_entry(node, struct hist_entry, rb_node); 1379 + node = rb_next(node); 1380 + 1381 + hists->stats.total_period += he->stat.period; 1382 + if (!he->filtered) 1383 + hists->stats.total_non_filtered_period += he->stat.period; 1384 + } 1385 + } 1386 + 1456 1387 static void hierarchy_insert_output_entry(struct rb_root *root, 1457 1388 struct hist_entry *he) 1458 1389 { ··· 1542 1423 1543 1424 continue; 1544 1425 } 1545 - 1546 - /* only update stat for leaf entries to avoid duplication */ 1547 - hists__inc_stats(hists, he); 1548 - if (!he->filtered) 1549 - hists__calc_col_len(hists, he); 1550 1426 1551 1427 if (!use_callchain) 1552 1428 continue; ··· 1622 1508 hists__reset_col_len(hists); 1623 1509 1624 1510 if (symbol_conf.report_hierarchy) { 1625 - return hists__hierarchy_output_resort(hists, prog, 1626 - &hists->entries_collapsed, 1627 - &hists->entries, 1628 - min_callchain_hits, 1629 - use_callchain); 1511 + hists__hierarchy_output_resort(hists, prog, 1512 + &hists->entries_collapsed, 1513 + &hists->entries, 1514 + min_callchain_hits, 1515 + use_callchain); 1516 + hierarchy_recalc_total_periods(hists); 1517 + return; 1630 1518 } 1631 1519 1632 1520 if (sort__need_collapse) ··· 1948 1832 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 1949 1833 } 1950 1834 } 1835 + 1836 + hierarchy_recalc_total_periods(hists); 1951 1837 1952 1838 /* 1953 1839 * resort output after applying a new filter since filter in a lower
+4 -2
tools/perf/util/hist.h
··· 79 79 int socket_filter; 80 80 struct perf_hpp_list *hpp_list; 81 81 struct list_head hpp_formats; 82 - int nr_sort_keys; 83 82 int nr_hpp_node; 84 83 }; 85 84 ··· 240 241 struct perf_hpp_list { 241 242 struct list_head fields; 242 243 struct list_head sorts; 243 - int nr_sort_keys; 244 244 }; 245 245 246 246 extern struct perf_hpp_list perf_hpp_list; ··· 316 318 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt); 317 319 bool perf_hpp__is_srcline_entry(struct perf_hpp_fmt *fmt); 318 320 bool perf_hpp__is_srcfile_entry(struct perf_hpp_fmt *fmt); 321 + bool perf_hpp__is_thread_entry(struct perf_hpp_fmt *fmt); 322 + bool perf_hpp__is_comm_entry(struct perf_hpp_fmt *fmt); 323 + bool perf_hpp__is_dso_entry(struct perf_hpp_fmt *fmt); 324 + bool perf_hpp__is_sym_entry(struct perf_hpp_fmt *fmt); 319 325 320 326 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt); 321 327
+2 -2
tools/perf/util/pmu.c
··· 98 98 char scale[128]; 99 99 int fd, ret = -1; 100 100 char path[PATH_MAX]; 101 - const char *lc; 101 + char *lc; 102 102 103 103 snprintf(path, PATH_MAX, "%s/%s.scale", dir, name); 104 104 ··· 146 146 /* restore locale */ 147 147 setlocale(LC_NUMERIC, lc); 148 148 149 - free((char *) lc); 149 + free(lc); 150 150 151 151 ret = 0; 152 152 error:
+67 -80
tools/perf/util/sort.c
··· 27 27 int sort__has_dso = 0; 28 28 int sort__has_socket = 0; 29 29 int sort__has_thread = 0; 30 + int sort__has_comm = 0; 30 31 enum sort_mode sort__mode = SORT_MODE__NORMAL; 31 32 32 33 /* ··· 1489 1488 return format->header == __sort__hpp_header; 1490 1489 } 1491 1490 1492 - bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt) 1493 - { 1494 - struct hpp_sort_entry *hse; 1495 - 1496 - if (!perf_hpp__is_sort_entry(fmt)) 1497 - return false; 1498 - 1499 - hse = container_of(fmt, struct hpp_sort_entry, hpp); 1500 - return hse->se == &sort_trace; 1491 + #define MK_SORT_ENTRY_CHK(key) \ 1492 + bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1493 + { \ 1494 + struct hpp_sort_entry *hse; \ 1495 + \ 1496 + if (!perf_hpp__is_sort_entry(fmt)) \ 1497 + return false; \ 1498 + \ 1499 + hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1500 + return hse->se == &sort_ ## key ; \ 1501 1501 } 1502 1502 1503 - bool perf_hpp__is_srcline_entry(struct perf_hpp_fmt *fmt) 1504 - { 1505 - struct hpp_sort_entry *hse; 1503 + MK_SORT_ENTRY_CHK(trace) 1504 + MK_SORT_ENTRY_CHK(srcline) 1505 + MK_SORT_ENTRY_CHK(srcfile) 1506 + MK_SORT_ENTRY_CHK(thread) 1507 + MK_SORT_ENTRY_CHK(comm) 1508 + MK_SORT_ENTRY_CHK(dso) 1509 + MK_SORT_ENTRY_CHK(sym) 1506 1510 1507 - if (!perf_hpp__is_sort_entry(fmt)) 1508 - return false; 1509 - 1510 - hse = container_of(fmt, struct hpp_sort_entry, hpp); 1511 - return hse->se == &sort_srcline; 1512 - } 1513 - 1514 - bool perf_hpp__is_srcfile_entry(struct perf_hpp_fmt *fmt) 1515 - { 1516 - struct hpp_sort_entry *hse; 1517 - 1518 - if (!perf_hpp__is_sort_entry(fmt)) 1519 - return false; 1520 - 1521 - hse = container_of(fmt, struct hpp_sort_entry, hpp); 1522 - return hse->se == &sort_srcfile; 1523 - } 1524 1511 1525 1512 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1526 1513 { ··· 1591 1602 { 1592 1603 struct perf_hpp_fmt *fmt; 1593 1604 struct hpp_sort_entry *hse; 1605 + int ret = -1; 1606 + int r; 1594 1607 1595 - fmt = he->fmt; 1596 - if (fmt == NULL || !perf_hpp__is_sort_entry(fmt)) 1597 - return -1; 1608 + perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1609 + if (!perf_hpp__is_sort_entry(fmt)) 1610 + continue; 1598 1611 1599 - hse = container_of(fmt, struct hpp_sort_entry, hpp); 1600 - if (hse->se->se_filter == NULL) 1601 - return -1; 1612 + hse = container_of(fmt, struct hpp_sort_entry, hpp); 1613 + if (hse->se->se_filter == NULL) 1614 + continue; 1602 1615 1603 - return hse->se->se_filter(he, type, arg); 1616 + /* 1617 + * hist entry is filtered if any of sort key in the hpp list 1618 + * is applied. But it should skip non-matched filter types. 1619 + */ 1620 + r = hse->se->se_filter(he, type, arg); 1621 + if (r >= 0) { 1622 + if (ret < 0) 1623 + ret = 0; 1624 + ret |= r; 1625 + } 1626 + } 1627 + 1628 + return ret; 1604 1629 } 1605 1630 1606 - static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, int level) 1631 + static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1632 + struct perf_hpp_list *list, 1633 + int level) 1607 1634 { 1608 1635 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1609 1636 1610 1637 if (hse == NULL) 1611 1638 return -1; 1612 1639 1613 - perf_hpp__register_sort_field(&hse->hpp); 1640 + perf_hpp_list__register_sort_field(list, &hse->hpp); 1614 1641 return 0; 1615 1642 } 1616 1643 1617 - static int __sort_dimension__add_hpp_output(struct perf_hpp_list *list, 1618 - struct sort_dimension *sd) 1644 + static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1645 + struct perf_hpp_list *list) 1619 1646 { 1620 1647 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1621 1648 ··· 2152 2147 return ret; 2153 2148 } 2154 2149 2155 - static int __sort_dimension__add(struct sort_dimension *sd, int level) 2150 + static int __sort_dimension__add(struct sort_dimension *sd, 2151 + struct perf_hpp_list *list, 2152 + int level) 2156 2153 { 2157 2154 if (sd->taken) 2158 2155 return 0; 2159 2156 2160 - if (__sort_dimension__add_hpp_sort(sd, level) < 0) 2157 + if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2161 2158 return -1; 2162 2159 2163 2160 if (sd->entry->se_collapse) ··· 2170 2163 return 0; 2171 2164 } 2172 2165 2173 - static int __hpp_dimension__add(struct hpp_dimension *hd, int level) 2166 + static int __hpp_dimension__add(struct hpp_dimension *hd, 2167 + struct perf_hpp_list *list, 2168 + int level) 2174 2169 { 2175 2170 struct perf_hpp_fmt *fmt; 2176 2171 ··· 2184 2175 return -1; 2185 2176 2186 2177 hd->taken = 1; 2187 - perf_hpp__register_sort_field(fmt); 2178 + perf_hpp_list__register_sort_field(list, fmt); 2188 2179 return 0; 2189 2180 } 2190 2181 ··· 2194 2185 if (sd->taken) 2195 2186 return 0; 2196 2187 2197 - if (__sort_dimension__add_hpp_output(list, sd) < 0) 2188 + if (__sort_dimension__add_hpp_output(sd, list) < 0) 2198 2189 return -1; 2199 2190 2200 2191 sd->taken = 1; ··· 2224 2215 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2225 2216 } 2226 2217 2227 - static int sort_dimension__add(const char *tok, struct perf_evlist *evlist, 2218 + static int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2219 + struct perf_evlist *evlist __maybe_unused, 2228 2220 int level) 2229 2221 { 2230 2222 unsigned int i; ··· 2263 2253 sort__has_socket = 1; 2264 2254 } else if (sd->entry == &sort_thread) { 2265 2255 sort__has_thread = 1; 2256 + } else if (sd->entry == &sort_comm) { 2257 + sort__has_comm = 1; 2266 2258 } 2267 2259 2268 - return __sort_dimension__add(sd, level); 2260 + return __sort_dimension__add(sd, list, level); 2269 2261 } 2270 2262 2271 2263 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { ··· 2276 2264 if (strncasecmp(tok, hd->name, strlen(tok))) 2277 2265 continue; 2278 2266 2279 - return __hpp_dimension__add(hd, level); 2267 + return __hpp_dimension__add(hd, list, level); 2280 2268 } 2281 2269 2282 2270 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { ··· 2291 2279 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2292 2280 sort__has_sym = 1; 2293 2281 2294 - __sort_dimension__add(sd, level); 2282 + __sort_dimension__add(sd, list, level); 2295 2283 return 0; 2296 2284 } 2297 2285 ··· 2307 2295 if (sd->entry == &sort_mem_daddr_sym) 2308 2296 sort__has_sym = 1; 2309 2297 2310 - __sort_dimension__add(sd, level); 2298 + __sort_dimension__add(sd, list, level); 2311 2299 return 0; 2312 2300 } 2313 2301 ··· 2317 2305 return -ESRCH; 2318 2306 } 2319 2307 2320 - static int setup_sort_list(char *str, struct perf_evlist *evlist) 2308 + static int setup_sort_list(struct perf_hpp_list *list, char *str, 2309 + struct perf_evlist *evlist) 2321 2310 { 2322 2311 char *tmp, *tok; 2323 2312 int ret = 0; ··· 2345 2332 } 2346 2333 2347 2334 if (*tok) { 2348 - ret = sort_dimension__add(tok, evlist, level); 2335 + ret = sort_dimension__add(list, tok, evlist, level); 2349 2336 if (ret == -EINVAL) { 2350 2337 error("Invalid --sort key: `%s'", tok); 2351 2338 break; ··· 2493 2480 } 2494 2481 } 2495 2482 2496 - ret = setup_sort_list(str, evlist); 2483 + ret = setup_sort_list(&perf_hpp_list, str, evlist); 2497 2484 2498 2485 free(str); 2499 2486 return ret; ··· 2706 2693 return ret; 2707 2694 } 2708 2695 2709 - static void evlist__set_hists_nr_sort_keys(struct perf_evlist *evlist) 2710 - { 2711 - struct perf_evsel *evsel; 2712 - 2713 - evlist__for_each(evlist, evsel) { 2714 - struct perf_hpp_fmt *fmt; 2715 - struct hists *hists = evsel__hists(evsel); 2716 - 2717 - hists->nr_sort_keys = perf_hpp_list.nr_sort_keys; 2718 - 2719 - /* 2720 - * If dynamic entries were used, it might add multiple 2721 - * entries to each evsel for a single field name. Set 2722 - * actual number of sort keys for each hists. 2723 - */ 2724 - perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 2725 - if (perf_hpp__is_dynamic_entry(fmt) && 2726 - !perf_hpp__defined_dynamic_entry(fmt, hists)) 2727 - hists->nr_sort_keys--; 2728 - } 2729 - } 2730 - } 2731 - 2732 2696 int setup_sorting(struct perf_evlist *evlist) 2733 2697 { 2734 2698 int err; ··· 2715 2725 return err; 2716 2726 2717 2727 if (parent_pattern != default_parent_pattern) { 2718 - err = sort_dimension__add("parent", evlist, -1); 2728 + err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2719 2729 if (err < 0) 2720 2730 return err; 2721 2731 } 2722 - 2723 - if (evlist != NULL) 2724 - evlist__set_hists_nr_sort_keys(evlist); 2725 2732 2726 2733 reset_dimensions(); 2727 2734
+1 -1
tools/perf/util/sort.h
··· 37 37 extern int sort__has_sym; 38 38 extern int sort__has_socket; 39 39 extern int sort__has_thread; 40 + extern int sort__has_comm; 40 41 extern enum sort_mode sort__mode; 41 42 extern struct sort_entry sort_comm; 42 43 extern struct sort_entry sort_dso; ··· 130 129 void *raw_data; 131 130 u32 raw_size; 132 131 void *trace_output; 133 - struct perf_hpp_fmt *fmt; 134 132 struct perf_hpp_list *hpp_list; 135 133 struct hist_entry *parent_he; 136 134 union {