Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-tools-fixes-for-v6.0-2022-08-27' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull perf tools fixes from Arnaldo Carvalho de Melo:

- Fixup setup of weak groups when using 'perf stat --repeat', add a
'perf test' for it.

- Fix memory leaks in 'perf sched record' detected with
-fsanitize=address.

- Fix build when PYTHON_CONFIG is user supplied.

- Capitalize topdown metrics' names in 'perf stat', so that the output,
sometimes parsed, matches the Intel SDM docs.

- Make sure the documentation for the save_type filter about Intel
systems with Arch LBR support (12th-Gen+ client or 4th-Gen Xeon+
server) reflects recent related kernel changes.

- Fix 'perf record' man page formatting of description of support to
hybrid systems.

- Update arm64´s KVM header from the kernel sources.

* tag 'perf-tools-fixes-for-v6.0-2022-08-27' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
perf stat: Capitalize topdown metrics' names
perf docs: Update the documentation for the save_type filter
perf sched: Fix memory leaks in __cmd_record detected with -fsanitize=address
perf record: Fix manpage formatting of description of support to hybrid systems
perf test: Stat test for repeat with a weak group
perf stat: Clear evsel->reset_group for each stat run
tools kvm headers arm64: Update KVM header from the kernel sources
perf python: Fix build when PYTHON_CONFIG is user supplied

+61 -32
+4 -2
tools/arch/arm64/include/uapi/asm/kvm.h
··· 75 75 76 76 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ 77 77 #define KVM_ARM_DEVICE_TYPE_SHIFT 0 78 - #define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) 78 + #define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \ 79 + KVM_ARM_DEVICE_TYPE_SHIFT) 79 80 #define KVM_ARM_DEVICE_ID_SHIFT 16 80 - #define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) 81 + #define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \ 82 + KVM_ARM_DEVICE_ID_SHIFT) 81 83 82 84 /* Supported device IDs */ 83 85 #define KVM_ARM_DEVICE_VGIC_V2 0
-10
tools/perf/Documentation/intel-hybrid.txt
··· 21 21 22 22 It indicates cpu0-cpu15 are core cpus and cpu16-cpu23 are atom cpus. 23 23 24 - Quickstart 25 - 26 - List hybrid event 27 - ----------------- 28 - 29 24 As before, use perf-list to list the symbolic event. 30 25 31 26 perf list ··· 35 40 be supported. 36 41 37 42 Enable hybrid event with a specific pmu 38 - --------------------------------------- 39 43 40 44 To enable a core only event or atom only event, following syntax is supported: 41 45 ··· 47 53 perf stat -e cpu_core/cycles/ 48 54 49 55 Create two events for one hardware event automatically 50 - ------------------------------------------------------ 51 56 52 57 When creating one event and the event is available on both atom and core, 53 58 two events are created automatically. One is for atom, the other is for ··· 125 132 The first 'cycles' is core event, the second 'cycles' is atom event. 126 133 127 134 Thread mode example: 128 - -------------------- 129 135 130 136 perf-stat reports the scaled counts for hybrid event and with a percentage 131 137 displayed. The percentage is the event's running time/enabling time. ··· 168 176 604,097,080 cpu_atom/cycles/ (99.57%) 169 177 170 178 perf-record: 171 - ------------ 172 179 173 180 If there is no '-e' specified in perf record, on hybrid platform, 174 181 it creates two default 'cycles' and adds them to event list. One 175 182 is for core, the other is for atom. 176 183 177 184 perf-stat: 178 - ---------- 179 185 180 186 If there is no '-e' specified in perf stat, on hybrid platform, 181 187 besides of software events, following events are created and
+5 -2
tools/perf/Documentation/perf-record.txt
··· 397 397 - abort_tx: only when the target is a hardware transaction abort 398 398 - cond: conditional branches 399 399 - save_type: save branch type during sampling in case binary is not available later 400 + For the platforms with Intel Arch LBR support (12th-Gen+ client or 401 + 4th-Gen Xeon+ server), the save branch type is unconditionally enabled 402 + when the taken branch stack sampling is enabled. 400 403 401 404 + 402 405 The option requires at least one branch type among any, any_call, any_ret, ind_call, cond. ··· 760 757 defaults to CPU layout. Masks defined or provided by the option value are 761 758 filtered through the mask provided by -C option. 762 759 763 - include::intel-hybrid.txt[] 764 - 765 760 --debuginfod[=URLs]:: 766 761 Specify debuginfod URL to be used when cacheing perf.data binaries, 767 762 it follows the same syntax as the DEBUGINFOD_URLS variable, like: ··· 778 777 Note that BPF can collect stack traces using frame pointer ("fp") 779 778 only, as of now. So the applications built without the frame 780 779 pointer might see bogus addresses. 780 + 781 + include::intel-hybrid.txt[] 781 782 782 783 SEE ALSO 783 784 --------
+1 -1
tools/perf/Makefile.config
··· 265 265 # defined. get-executable-or-default fails with an error if the first argument is supplied but 266 266 # doesn't exist. 267 267 override PYTHON_CONFIG := $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO)) 268 - override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_AUTO))) 268 + override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_CONFIG))) 269 269 270 270 grep-libs = $(filter -l%,$(1)) 271 271 strip-libs = $(filter-out -l%,$(1))
+19 -5
tools/perf/builtin-sched.c
··· 3355 3355 static int __cmd_record(int argc, const char **argv) 3356 3356 { 3357 3357 unsigned int rec_argc, i, j; 3358 - const char **rec_argv; 3358 + char **rec_argv; 3359 + const char **rec_argv_copy; 3359 3360 const char * const record_args[] = { 3360 3361 "record", 3361 3362 "-a", ··· 3385 3384 ARRAY_SIZE(schedstat_args) : 0; 3386 3385 3387 3386 struct tep_event *waking_event; 3387 + int ret; 3388 3388 3389 3389 /* 3390 3390 * +2 for either "-e", "sched:sched_wakeup" or ··· 3393 3391 */ 3394 3392 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1; 3395 3393 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3396 - 3397 3394 if (rec_argv == NULL) 3398 3395 return -ENOMEM; 3396 + rec_argv_copy = calloc(rec_argc + 1, sizeof(char *)); 3397 + if (rec_argv_copy == NULL) { 3398 + free(rec_argv); 3399 + return -ENOMEM; 3400 + } 3399 3401 3400 3402 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3401 3403 rec_argv[i] = strdup(record_args[i]); 3402 3404 3403 - rec_argv[i++] = "-e"; 3405 + rec_argv[i++] = strdup("-e"); 3404 3406 waking_event = trace_event__tp_format("sched", "sched_waking"); 3405 3407 if (!IS_ERR(waking_event)) 3406 3408 rec_argv[i++] = strdup("sched:sched_waking"); ··· 3415 3409 rec_argv[i++] = strdup(schedstat_args[j]); 3416 3410 3417 3411 for (j = 1; j < (unsigned int)argc; j++, i++) 3418 - rec_argv[i] = argv[j]; 3412 + rec_argv[i] = strdup(argv[j]); 3419 3413 3420 3414 BUG_ON(i != rec_argc); 3421 3415 3422 - return cmd_record(i, rec_argv); 3416 + memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc); 3417 + ret = cmd_record(rec_argc, rec_argv_copy); 3418 + 3419 + for (i = 0; i < rec_argc; i++) 3420 + free(rec_argv[i]); 3421 + free(rec_argv); 3422 + free(rec_argv_copy); 3423 + 3424 + return ret; 3423 3425 } 3424 3426 3425 3427 int cmd_sched(int argc, const char **argv)
+1
tools/perf/builtin-stat.c
··· 826 826 } 827 827 828 828 evlist__for_each_entry(evsel_list, counter) { 829 + counter->reset_group = false; 829 830 if (bpf_counter__load(counter, &target)) 830 831 return -1; 831 832 if (!evsel__is_bpf(counter))
+19
tools/perf/tests/shell/stat.sh
··· 28 28 echo "stat record and report test [Success]" 29 29 } 30 30 31 + test_stat_repeat_weak_groups() { 32 + echo "stat repeat weak groups test" 33 + if ! perf stat -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}' \ 34 + true 2>&1 | grep -q 'seconds time elapsed' 35 + then 36 + echo "stat repeat weak groups test [Skipped event parsing failed]" 37 + return 38 + fi 39 + if ! perf stat -r2 -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}:W' \ 40 + true > /dev/null 2>&1 41 + then 42 + echo "stat repeat weak groups test [Failed]" 43 + err=1 44 + return 45 + fi 46 + echo "stat repeat weak groups test [Success]" 47 + } 48 + 31 49 test_topdown_groups() { 32 50 # Topdown events must be grouped with the slots event first. Test that 33 51 # parse-events reorders this. ··· 93 75 94 76 test_default_stat 95 77 test_stat_record_report 78 + test_stat_repeat_weak_groups 96 79 test_topdown_groups 97 80 test_topdown_weak_groups 98 81 exit $err
+12 -12
tools/perf/util/stat-shadow.c
··· 1193 1193 &rsd); 1194 1194 if (retiring > 0.7) 1195 1195 color = PERF_COLOR_GREEN; 1196 - print_metric(config, ctxp, color, "%8.1f%%", "retiring", 1196 + print_metric(config, ctxp, color, "%8.1f%%", "Retiring", 1197 1197 retiring * 100.); 1198 1198 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) && 1199 1199 full_td(cpu_map_idx, st, &rsd)) { ··· 1202 1202 &rsd); 1203 1203 if (fe_bound > 0.2) 1204 1204 color = PERF_COLOR_RED; 1205 - print_metric(config, ctxp, color, "%8.1f%%", "frontend bound", 1205 + print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound", 1206 1206 fe_bound * 100.); 1207 1207 } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) && 1208 1208 full_td(cpu_map_idx, st, &rsd)) { ··· 1211 1211 &rsd); 1212 1212 if (be_bound > 0.2) 1213 1213 color = PERF_COLOR_RED; 1214 - print_metric(config, ctxp, color, "%8.1f%%", "backend bound", 1214 + print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound", 1215 1215 be_bound * 100.); 1216 1216 } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) && 1217 1217 full_td(cpu_map_idx, st, &rsd)) { ··· 1220 1220 &rsd); 1221 1221 if (bad_spec > 0.1) 1222 1222 color = PERF_COLOR_RED; 1223 - print_metric(config, ctxp, color, "%8.1f%%", "bad speculation", 1223 + print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation", 1224 1224 bad_spec * 100.); 1225 1225 } else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) && 1226 1226 full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { ··· 1234 1234 1235 1235 if (retiring > 0.7 && heavy_ops > 0.1) 1236 1236 color = PERF_COLOR_GREEN; 1237 - print_metric(config, ctxp, color, "%8.1f%%", "heavy operations", 1237 + print_metric(config, ctxp, color, "%8.1f%%", "Heavy Operations", 1238 1238 heavy_ops * 100.); 1239 1239 if (retiring > 0.7 && light_ops > 0.6) 1240 1240 color = PERF_COLOR_GREEN; 1241 1241 else 1242 1242 color = NULL; 1243 - print_metric(config, ctxp, color, "%8.1f%%", "light operations", 1243 + print_metric(config, ctxp, color, "%8.1f%%", "Light Operations", 1244 1244 light_ops * 100.); 1245 1245 } else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) && 1246 1246 full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { ··· 1254 1254 1255 1255 if (bad_spec > 0.1 && br_mis > 0.05) 1256 1256 color = PERF_COLOR_RED; 1257 - print_metric(config, ctxp, color, "%8.1f%%", "branch mispredict", 1257 + print_metric(config, ctxp, color, "%8.1f%%", "Branch Mispredict", 1258 1258 br_mis * 100.); 1259 1259 if (bad_spec > 0.1 && m_clears > 0.05) 1260 1260 color = PERF_COLOR_RED; 1261 1261 else 1262 1262 color = NULL; 1263 - print_metric(config, ctxp, color, "%8.1f%%", "machine clears", 1263 + print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears", 1264 1264 m_clears * 100.); 1265 1265 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) && 1266 1266 full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { ··· 1274 1274 1275 1275 if (fe_bound > 0.2 && fetch_lat > 0.15) 1276 1276 color = PERF_COLOR_RED; 1277 - print_metric(config, ctxp, color, "%8.1f%%", "fetch latency", 1277 + print_metric(config, ctxp, color, "%8.1f%%", "Fetch Latency", 1278 1278 fetch_lat * 100.); 1279 1279 if (fe_bound > 0.2 && fetch_bw > 0.1) 1280 1280 color = PERF_COLOR_RED; 1281 1281 else 1282 1282 color = NULL; 1283 - print_metric(config, ctxp, color, "%8.1f%%", "fetch bandwidth", 1283 + print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth", 1284 1284 fetch_bw * 100.); 1285 1285 } else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) && 1286 1286 full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { ··· 1294 1294 1295 1295 if (be_bound > 0.2 && mem_bound > 0.2) 1296 1296 color = PERF_COLOR_RED; 1297 - print_metric(config, ctxp, color, "%8.1f%%", "memory bound", 1297 + print_metric(config, ctxp, color, "%8.1f%%", "Memory Bound", 1298 1298 mem_bound * 100.); 1299 1299 if (be_bound > 0.2 && core_bound > 0.1) 1300 1300 color = PERF_COLOR_RED; 1301 1301 else 1302 1302 color = NULL; 1303 - print_metric(config, ctxp, color, "%8.1f%%", "Core bound", 1303 + print_metric(config, ctxp, color, "%8.1f%%", "Core Bound", 1304 1304 core_bound * 100.); 1305 1305 } else if (evsel->metric_expr) { 1306 1306 generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,