Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf parse-event: Add init and exit to parse_event_error

parse_events() may succeed but leave string memory allocations reachable
in the error.

Add an init/exit that must be called to initialize and clean up the
error. This fixes a leak in metricgroup parse_ids.

Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20211107090002.3784612-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Ian Rogers and committed by
Arnaldo Carvalho de Melo
07eafd4e 6c191289

+74 -51
+2 -1
tools/perf/arch/powerpc/util/kvm-stat.c
··· 113 113 struct parse_events_error err; 114 114 int ret; 115 115 116 - bzero(&err, sizeof(err)); 116 + parse_events_error__init(&err); 117 117 ret = parse_events(evlist, str, &err); 118 118 if (err.str) 119 119 parse_events_error__print(&err, "tracepoint"); 120 + parse_events_error__exit(&err); 120 121 return ret; 121 122 } 122 123
+4 -2
tools/perf/bench/evlist-open-close.c
··· 78 78 79 79 static struct evlist *bench__create_evlist(char *evstr) 80 80 { 81 - struct parse_events_error err = { .idx = 0, }; 81 + struct parse_events_error err; 82 82 struct evlist *evlist = evlist__new(); 83 83 int ret; 84 84 ··· 87 87 return NULL; 88 88 } 89 89 90 + parse_events_error__init(&err); 90 91 ret = parse_events(evlist, evstr, &err); 91 92 if (ret) { 92 93 parse_events_error__print(&err, evstr); 94 + parse_events_error__exit(&err); 93 95 pr_err("Run 'perf list' for a list of valid events\n"); 94 96 ret = 1; 95 97 goto out_delete_evlist; 96 98 } 97 - 99 + parse_events_error__exit(&err); 98 100 ret = evlist__create_maps(evlist, &opts.target); 99 101 if (ret < 0) { 100 102 pr_err("Not enough memory to create thread/cpu maps\n");
+22 -16
tools/perf/builtin-stat.c
··· 1750 1750 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1751 1751 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1752 1752 }; 1753 - struct parse_events_error errinfo; 1754 - 1755 1753 /* Set attrs if no event is selected and !null_run: */ 1756 1754 if (stat_config.null_run) 1757 1755 return 0; 1758 1756 1759 - bzero(&errinfo, sizeof(errinfo)); 1760 1757 if (transaction_run) { 1758 + struct parse_events_error errinfo; 1761 1759 /* Handle -T as -M transaction. Once platform specific metrics 1762 1760 * support has been added to the json files, all architectures 1763 1761 * will use this approach. To determine transaction support ··· 1770 1772 &stat_config.metric_events); 1771 1773 } 1772 1774 1775 + parse_events_error__init(&errinfo); 1773 1776 if (pmu_have_event("cpu", "cycles-ct") && 1774 1777 pmu_have_event("cpu", "el-start")) 1775 1778 err = parse_events(evsel_list, transaction_attrs, ··· 1782 1783 if (err) { 1783 1784 fprintf(stderr, "Cannot set up transaction events\n"); 1784 1785 parse_events_error__print(&errinfo, transaction_attrs); 1785 - return -1; 1786 1786 } 1787 - return 0; 1787 + parse_events_error__exit(&errinfo); 1788 + return err ? -1 : 0; 1788 1789 } 1789 1790 1790 1791 if (smi_cost) { 1792 + struct parse_events_error errinfo; 1791 1793 int smi; 1792 1794 1793 1795 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { ··· 1804 1804 smi_reset = true; 1805 1805 } 1806 1806 1807 - if (pmu_have_event("msr", "aperf") && 1808 - pmu_have_event("msr", "smi")) { 1809 - if (!force_metric_only) 1810 - stat_config.metric_only = true; 1811 - err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1812 - } else { 1807 + if (!pmu_have_event("msr", "aperf") || 1808 + !pmu_have_event("msr", "smi")) { 1813 1809 fprintf(stderr, "To measure SMI cost, it needs " 1814 1810 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1815 - parse_events_error__print(&errinfo, smi_cost_attrs); 1816 1811 return -1; 1817 1812 } 1813 + if (!force_metric_only) 1814 + stat_config.metric_only = true; 1815 + 1816 + parse_events_error__init(&errinfo); 1817 + err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1818 1818 if (err) { 1819 1819 parse_events_error__print(&errinfo, smi_cost_attrs); 1820 1820 fprintf(stderr, "Cannot set up SMI cost events\n"); 1821 - return -1; 1822 1821 } 1823 - return 0; 1822 + parse_events_error__exit(&errinfo); 1823 + return err ? -1 : 0; 1824 1824 } 1825 1825 1826 1826 if (topdown_run) { ··· 1875 1875 return -1; 1876 1876 } 1877 1877 if (topdown_attrs[0] && str) { 1878 + struct parse_events_error errinfo; 1878 1879 if (warn) 1879 1880 arch_topdown_group_warn(); 1880 1881 setup_metrics: 1882 + parse_events_error__init(&errinfo); 1881 1883 err = parse_events(evsel_list, str, &errinfo); 1882 1884 if (err) { 1883 1885 fprintf(stderr, 1884 1886 "Cannot set up top down events %s: %d\n", 1885 1887 str, err); 1886 1888 parse_events_error__print(&errinfo, str); 1889 + parse_events_error__exit(&errinfo); 1887 1890 free(str); 1888 1891 return -1; 1889 1892 } 1893 + parse_events_error__exit(&errinfo); 1890 1894 } else { 1891 1895 fprintf(stderr, "System does not support topdown\n"); 1892 1896 return -1; ··· 1900 1896 1901 1897 if (!evsel_list->core.nr_entries) { 1902 1898 if (perf_pmu__has_hybrid()) { 1899 + struct parse_events_error errinfo; 1903 1900 const char *hybrid_str = "cycles,instructions,branches,branch-misses"; 1904 1901 1905 1902 if (target__has_cpu(&target)) ··· 1911 1906 return -1; 1912 1907 } 1913 1908 1909 + parse_events_error__init(&errinfo); 1914 1910 err = parse_events(evsel_list, hybrid_str, &errinfo); 1915 1911 if (err) { 1916 1912 fprintf(stderr, 1917 1913 "Cannot set up hybrid events %s: %d\n", 1918 1914 hybrid_str, err); 1919 1915 parse_events_error__print(&errinfo, hybrid_str); 1920 - return -1; 1921 1916 } 1922 - return err; 1917 + parse_events_error__exit(&errinfo); 1918 + return err ? -1 : 0; 1923 1919 } 1924 1920 1925 1921 if (target__has_cpu(&target))
+7 -10
tools/perf/builtin-trace.c
··· 3063 3063 struct parse_events_error err; 3064 3064 int ret; 3065 3065 3066 - bzero(&err, sizeof(err)); 3066 + parse_events_error__init(&err); 3067 3067 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3068 - if (ret) { 3069 - free(err.str); 3070 - free(err.help); 3071 - free(err.first_str); 3072 - free(err.first_help); 3068 + parse_events_error__exit(&err); 3069 + if (ret) 3073 3070 return false; 3074 - } 3075 3071 3076 3072 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3077 3073 if (!strstarts(evsel__name(evsel), "probe:vfs_getname")) ··· 4921 4925 if (trace.perfconfig_events != NULL) { 4922 4926 struct parse_events_error parse_err; 4923 4927 4924 - bzero(&parse_err, sizeof(parse_err)); 4928 + parse_events_error__init(&parse_err); 4925 4929 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 4926 - if (err) { 4930 + if (err) 4927 4931 parse_events_error__print(&parse_err, trace.perfconfig_events); 4932 + parse_events_error__exit(&parse_err); 4933 + if (err) 4928 4934 goto out; 4929 - } 4930 4935 } 4931 4936 4932 4937 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
+2 -1
tools/perf/tests/backward-ring-buffer.c
··· 115 115 goto out_delete_evlist; 116 116 } 117 117 118 - bzero(&parse_error, sizeof(parse_error)); 118 + parse_events_error__init(&parse_error); 119 119 /* 120 120 * Set backward bit, ring buffer should be writing from end. Record 121 121 * it in aux evlist 122 122 */ 123 123 err = parse_events(evlist, "syscalls:sys_enter_prctl/overwrite/", &parse_error); 124 + parse_events_error__exit(&parse_error); 124 125 if (err) { 125 126 pr_debug("Failed to parse tracepoint event, try use root\n"); 126 127 ret = TEST_SKIP;
+2 -1
tools/perf/tests/bpf.c
··· 123 123 struct parse_events_state parse_state; 124 124 struct parse_events_error parse_error; 125 125 126 - bzero(&parse_error, sizeof(parse_error)); 126 + parse_events_error__init(&parse_error); 127 127 bzero(&parse_state, sizeof(parse_state)); 128 128 parse_state.error = &parse_error; 129 129 INIT_LIST_HEAD(&parse_state.list); 130 130 131 131 err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL); 132 + parse_events_error__exit(&parse_error); 132 133 if (err || list_empty(&parse_state.list)) { 133 134 pr_debug("Failed to add events selected by BPF\n"); 134 135 return TEST_FAIL;
+2
tools/perf/tests/expand-cgroup.c
··· 124 124 evlist = evlist__new(); 125 125 TEST_ASSERT_VAL("failed to get evlist", evlist); 126 126 127 + parse_events_error__init(&err); 127 128 ret = parse_events(evlist, event_str, &err); 128 129 if (ret < 0) { 129 130 pr_debug("failed to parse event '%s', err %d, str '%s'\n", ··· 136 135 rblist__init(&metric_events); 137 136 ret = test_expand_events(evlist, &metric_events); 138 137 out: 138 + parse_events_error__exit(&err); 139 139 evlist__delete(evlist); 140 140 return ret; 141 141 }
+2 -2
tools/perf/tests/parse-events.c
··· 2045 2045 struct evlist *evlist; 2046 2046 int ret; 2047 2047 2048 - bzero(&err, sizeof(err)); 2049 2048 if (e->valid && !e->valid()) { 2050 2049 pr_debug("... SKIP"); 2051 2050 return 0; ··· 2054 2055 if (evlist == NULL) 2055 2056 return -ENOMEM; 2056 2057 2058 + parse_events_error__init(&err); 2057 2059 ret = parse_events(evlist, e->name, &err); 2058 2060 if (ret) { 2059 2061 pr_debug("failed to parse event '%s', err %d, str '%s'\n", ··· 2063 2063 } else { 2064 2064 ret = e->check(evlist); 2065 2065 } 2066 - 2066 + parse_events_error__exit(&err); 2067 2067 evlist__delete(evlist); 2068 2068 2069 2069 return ret;
+10 -12
tools/perf/tests/pmu-events.c
··· 787 787 788 788 static int check_parse_cpu(const char *id, bool same_cpu, const struct pmu_event *pe) 789 789 { 790 - struct parse_events_error error = { .idx = 0, }; 790 + struct parse_events_error error; 791 + int ret; 791 792 792 - int ret = check_parse_id(id, &error, NULL); 793 + parse_events_error__init(&error); 794 + ret = check_parse_id(id, &error, NULL); 793 795 if (ret && same_cpu) { 794 796 pr_warning("Parse event failed metric '%s' id '%s' expr '%s'\n", 795 797 pe->metric_name, id, pe->metric_expr); ··· 802 800 id, pe->metric_name, pe->metric_expr); 803 801 ret = 0; 804 802 } 805 - free(error.str); 806 - free(error.help); 807 - free(error.first_str); 808 - free(error.first_help); 803 + parse_events_error__exit(&error); 809 804 return ret; 810 805 } 811 806 812 807 static int check_parse_fake(const char *id) 813 808 { 814 - struct parse_events_error error = { .idx = 0, }; 815 - int ret = check_parse_id(id, &error, &perf_pmu__fake); 809 + struct parse_events_error error; 810 + int ret; 816 811 817 - free(error.str); 818 - free(error.help); 819 - free(error.first_str); 820 - free(error.first_help); 812 + parse_events_error__init(&error); 813 + ret = check_parse_id(id, &error, &perf_pmu__fake); 814 + parse_events_error__exit(&error); 821 815 return ret; 822 816 } 823 817
+2
tools/perf/tests/topology.c
··· 49 49 50 50 session->evlist = evlist__new(); 51 51 TEST_ASSERT_VAL("can't get evlist", session->evlist); 52 + parse_events_error__init(&err); 52 53 parse_events(session->evlist, "cpu_core/cycles/", &err); 54 + parse_events_error__exit(&err); 53 55 } 54 56 55 57 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
+2 -1
tools/perf/util/metricgroup.c
··· 1339 1339 goto err_out; 1340 1340 } 1341 1341 pr_debug("Parsing metric events '%s'\n", events.buf); 1342 - bzero(&parse_error, sizeof(parse_error)); 1342 + parse_events_error__init(&parse_error); 1343 1343 ret = __parse_events(parsed_evlist, events.buf, &parse_error, fake_pmu); 1344 1344 if (ret) { 1345 1345 parse_events_error__print(&parse_error, events.buf); ··· 1352 1352 *out_evlist = parsed_evlist; 1353 1353 parsed_evlist = NULL; 1354 1354 err_out: 1355 + parse_events_error__exit(&parse_error); 1355 1356 evlist__delete(parsed_evlist); 1356 1357 strbuf_release(&events); 1357 1358 return ret;
+15 -5
tools/perf/util/parse-events.c
··· 2301 2301 return ret; 2302 2302 } 2303 2303 2304 + void parse_events_error__init(struct parse_events_error *err) 2305 + { 2306 + bzero(err, sizeof(*err)); 2307 + } 2308 + 2309 + void parse_events_error__exit(struct parse_events_error *err) 2310 + { 2311 + zfree(&err->str); 2312 + zfree(&err->help); 2313 + zfree(&err->first_str); 2314 + zfree(&err->first_help); 2315 + } 2316 + 2304 2317 void parse_events_error__handle(struct parse_events_error *err, int idx, 2305 2318 char *str, char *help) 2306 2319 { ··· 2418 2405 return; 2419 2406 2420 2407 __parse_events_error__print(err->idx, err->str, err->help, event); 2421 - zfree(&err->str); 2422 - zfree(&err->help); 2423 2408 2424 2409 if (err->num_errors > 1) { 2425 2410 fputs("\nInitial error:\n", stderr); 2426 2411 __parse_events_error__print(err->first_idx, err->first_str, 2427 2412 err->first_help, event); 2428 - zfree(&err->first_str); 2429 - zfree(&err->first_help); 2430 2413 } 2431 2414 } 2432 2415 ··· 2435 2426 struct parse_events_error err; 2436 2427 int ret; 2437 2428 2438 - bzero(&err, sizeof(err)); 2429 + parse_events_error__init(&err); 2439 2430 ret = parse_events(evlist, str, &err); 2440 2431 2441 2432 if (ret) { 2442 2433 parse_events_error__print(&err, str); 2443 2434 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2444 2435 } 2436 + parse_events_error__exit(&err); 2445 2437 2446 2438 return ret; 2447 2439 }
+2
tools/perf/util/parse-events.h
··· 242 242 int valid_event_mount(const char *eventfs); 243 243 char *parse_events_formats_error_string(char *additional_terms); 244 244 245 + void parse_events_error__init(struct parse_events_error *err); 246 + void parse_events_error__exit(struct parse_events_error *err); 245 247 void parse_events_error__handle(struct parse_events_error *err, int idx, 246 248 char *str, char *help); 247 249 void parse_events_error__print(struct parse_events_error *err,