Merge tag 'perf-urgent-for-mingo-5.1-20190416' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/urgent fixes from Arnaldo Carvalho de Melo:

core:

Mao Han:

- Use hweight64() instead of hweight_long(attr.sample_regs_user) when parsing
samples, this is what the kernel uses and fixes de problem in 32-bit
architectures such as C-SKY that have more than 32 registers that can come
in a sample.

perf stat:

Jiri Olsa:

- Disable DIR_FORMAT feature for 'perf stat record', fixing an assert()
failure.

Intel PT:

Adrian Hunter:

- Fix use of parent_id in calls_view in export-to-sqlite.py.

BPF:

Gustavo A. R. Silva:

- Fix lock/unlock imbalances when processing BPF/BTF info, found by the
coverity tool.

libtraceevent:

Rikard Falkeborn:

- Fix missing equality check for strcmp(), detected by the cppcheck tool.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+22 -17
+1 -1
tools/lib/traceevent/event-parse.c
··· 2233 2233 return val & 0xffffffff; 2234 2234 2235 2235 if (strcmp(type, "u64") == 0 || 2236 - strcmp(type, "s64")) 2236 + strcmp(type, "s64") == 0) 2237 2237 return val; 2238 2238 2239 2239 if (strcmp(type, "s8") == 0)
+1
tools/perf/builtin-stat.c
··· 1308 1308 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1309 1309 perf_header__set_feat(&session->header, feat); 1310 1310 1311 + perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1311 1312 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1312 1313 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1313 1314 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
+1 -1
tools/perf/scripts/python/export-to-sqlite.py
··· 331 331 'return_id,' 332 332 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' 333 333 'parent_call_path_id,' 334 - 'parent_id' 334 + 'calls.parent_id' 335 335 ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') 336 336 337 337 do_query(query, 'CREATE VIEW samples_view AS '
+6 -6
tools/perf/util/evsel.c
··· 2368 2368 if (data->user_regs.abi) { 2369 2369 u64 mask = evsel->attr.sample_regs_user; 2370 2370 2371 - sz = hweight_long(mask) * sizeof(u64); 2371 + sz = hweight64(mask) * sizeof(u64); 2372 2372 OVERFLOW_CHECK(array, sz, max_size); 2373 2373 data->user_regs.mask = mask; 2374 2374 data->user_regs.regs = (u64 *)array; ··· 2424 2424 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 2425 2425 u64 mask = evsel->attr.sample_regs_intr; 2426 2426 2427 - sz = hweight_long(mask) * sizeof(u64); 2427 + sz = hweight64(mask) * sizeof(u64); 2428 2428 OVERFLOW_CHECK(array, sz, max_size); 2429 2429 data->intr_regs.mask = mask; 2430 2430 data->intr_regs.regs = (u64 *)array; ··· 2552 2552 if (type & PERF_SAMPLE_REGS_USER) { 2553 2553 if (sample->user_regs.abi) { 2554 2554 result += sizeof(u64); 2555 - sz = hweight_long(sample->user_regs.mask) * sizeof(u64); 2555 + sz = hweight64(sample->user_regs.mask) * sizeof(u64); 2556 2556 result += sz; 2557 2557 } else { 2558 2558 result += sizeof(u64); ··· 2580 2580 if (type & PERF_SAMPLE_REGS_INTR) { 2581 2581 if (sample->intr_regs.abi) { 2582 2582 result += sizeof(u64); 2583 - sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); 2583 + sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 2584 2584 result += sz; 2585 2585 } else { 2586 2586 result += sizeof(u64); ··· 2710 2710 if (type & PERF_SAMPLE_REGS_USER) { 2711 2711 if (sample->user_regs.abi) { 2712 2712 *array++ = sample->user_regs.abi; 2713 - sz = hweight_long(sample->user_regs.mask) * sizeof(u64); 2713 + sz = hweight64(sample->user_regs.mask) * sizeof(u64); 2714 2714 memcpy(array, sample->user_regs.regs, sz); 2715 2715 array = (void *)array + sz; 2716 2716 } else { ··· 2746 2746 if (type & PERF_SAMPLE_REGS_INTR) { 2747 2747 if (sample->intr_regs.abi) { 2748 2748 *array++ = sample->intr_regs.abi; 2749 - sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); 2749 + sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 2750 2750 memcpy(array, sample->intr_regs.regs, sz); 2751 2751 array = (void *)array + sz; 2752 2752 } else {
+13 -9
tools/perf/util/header.c
··· 2606 2606 perf_env__insert_bpf_prog_info(env, info_node); 2607 2607 } 2608 2608 2609 + up_write(&env->bpf_progs.lock); 2609 2610 return 0; 2610 2611 out: 2611 2612 free(info_linear); ··· 2624 2623 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) 2625 2624 { 2626 2625 struct perf_env *env = &ff->ph->env; 2626 + struct btf_node *node = NULL; 2627 2627 u32 count, i; 2628 + int err = -1; 2628 2629 2629 2630 if (ff->ph->needs_swap) { 2630 2631 pr_warning("interpreting btf from systems with endianity is not yet supported\n"); ··· 2639 2636 down_write(&env->bpf_progs.lock); 2640 2637 2641 2638 for (i = 0; i < count; ++i) { 2642 - struct btf_node *node; 2643 2639 u32 id, data_size; 2644 2640 2645 2641 if (do_read_u32(ff, &id)) 2646 - return -1; 2642 + goto out; 2647 2643 if (do_read_u32(ff, &data_size)) 2648 - return -1; 2644 + goto out; 2649 2645 2650 2646 node = malloc(sizeof(struct btf_node) + data_size); 2651 2647 if (!node) 2652 - return -1; 2648 + goto out; 2653 2649 2654 2650 node->id = id; 2655 2651 node->data_size = data_size; 2656 2652 2657 - if (__do_read(ff, node->data, data_size)) { 2658 - free(node); 2659 - return -1; 2660 - } 2653 + if (__do_read(ff, node->data, data_size)) 2654 + goto out; 2661 2655 2662 2656 perf_env__insert_btf(env, node); 2657 + node = NULL; 2663 2658 } 2664 2659 2660 + err = 0; 2661 + out: 2665 2662 up_write(&env->bpf_progs.lock); 2666 - return 0; 2663 + free(node); 2664 + return err; 2667 2665 } 2668 2666 2669 2667 struct feature_ops {