Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf tools: Experiment with cppcheck

Experimenting a bit using cppcheck[1], a static checker brought to my
attention by Colin, reducing the scope of some variables, reducing the
line of source code lines in the process:

$ cppcheck --enable=style tools/perf/util/thread.c
Checking tools/perf/util/thread.c...
[tools/perf/util/thread.c:17]: (style) The scope of the variable 'leader' can be reduced.
[tools/perf/util/thread.c:133]: (style) The scope of the variable 'err' can be reduced.
[tools/perf/util/thread.c:273]: (style) The scope of the variable 'err' can be reduced.

Will continue later, but these are already useful, keep them.

1: https://sourceforge.net/p/cppcheck/wiki/Home/

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Colin Ian King <colin.king@canonical.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-ixws7lbycihhpmq9cc949ti6@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+13 -20
+5 -7
tools/perf/util/evlist.c
··· 384 384 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, 385 385 struct perf_evsel *evsel, int cpu) 386 386 { 387 - int thread, err; 387 + int thread; 388 388 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 389 389 390 390 if (!evsel->fd) 391 391 return -EINVAL; 392 392 393 393 for (thread = 0; thread < nr_threads; thread++) { 394 - err = ioctl(FD(evsel, cpu, thread), 395 - PERF_EVENT_IOC_ENABLE, 0); 394 + int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 396 395 if (err) 397 396 return err; 398 397 } ··· 402 403 struct perf_evsel *evsel, 403 404 int thread) 404 405 { 405 - int cpu, err; 406 + int cpu; 406 407 int nr_cpus = cpu_map__nr(evlist->cpus); 407 408 408 409 if (!evsel->fd) 409 410 return -EINVAL; 410 411 411 412 for (cpu = 0; cpu < nr_cpus; cpu++) { 412 - err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 413 + int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 413 414 if (err) 414 415 return err; 415 416 } ··· 1605 1606 struct perf_evsel *evsel; 1606 1607 int ncpus = cpu_map__nr(evlist->cpus); 1607 1608 int nthreads = thread_map__nr(evlist->threads); 1608 - int n; 1609 1609 1610 1610 evlist__for_each_entry_reverse(evlist, evsel) { 1611 - n = evsel->cpus ? evsel->cpus->nr : ncpus; 1611 + int n = evsel->cpus ? evsel->cpus->nr : ncpus; 1612 1612 perf_evsel__close(evsel, n, nthreads); 1613 1613 } 1614 1614 }
+1 -2
tools/perf/util/evsel.c
··· 985 985 986 986 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 987 987 { 988 - int cpu, thread; 989 - 990 988 if (evsel->system_wide) 991 989 nthreads = 1; 992 990 993 991 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); 994 992 995 993 if (evsel->fd) { 994 + int cpu, thread; 996 995 for (cpu = 0; cpu < ncpus; cpu++) { 997 996 for (thread = 0; thread < nthreads; thread++) { 998 997 FD(evsel, cpu, thread) = -1;
+3 -3
tools/perf/util/machine.c
··· 1745 1745 int max_stack) 1746 1746 { 1747 1747 struct ip_callchain *chain = sample->callchain; 1748 - int chain_nr = min(max_stack, (int)chain->nr); 1748 + int chain_nr = min(max_stack, (int)chain->nr), i; 1749 1749 u8 cpumode = PERF_RECORD_MISC_USER; 1750 - int i, j, err; 1751 1750 u64 ip; 1752 1751 1753 1752 for (i = 0; i < chain_nr; i++) { ··· 1757 1758 /* LBR only affects the user callchain */ 1758 1759 if (i != chain_nr) { 1759 1760 struct branch_stack *lbr_stack = sample->branch_stack; 1760 - int lbr_nr = lbr_stack->nr; 1761 + int lbr_nr = lbr_stack->nr, j; 1761 1762 /* 1762 1763 * LBR callstack can only get user call chain. 1763 1764 * The mix_chain_nr is kernel call chain ··· 1771 1772 int mix_chain_nr = i + 1 + lbr_nr + 1; 1772 1773 1773 1774 for (j = 0; j < mix_chain_nr; j++) { 1775 + int err; 1774 1776 if (callchain_param.order == ORDER_CALLEE) { 1775 1777 if (j < i + 1) 1776 1778 ip = chain->ips[j];
+1 -2
tools/perf/util/strbuf.h
··· 66 66 int strbuf_grow(struct strbuf *buf, size_t); 67 67 68 68 static inline int strbuf_setlen(struct strbuf *sb, size_t len) { 69 - int ret; 70 69 if (!sb->alloc) { 71 - ret = strbuf_grow(sb, 0); 70 + int ret = strbuf_grow(sb, 0); 72 71 if (ret) 73 72 return ret; 74 73 }
+3 -6
tools/perf/util/thread.c
··· 14 14 15 15 int thread__init_map_groups(struct thread *thread, struct machine *machine) 16 16 { 17 - struct thread *leader; 18 17 pid_t pid = thread->pid_; 19 18 20 19 if (pid == thread->tid || pid == -1) { 21 20 thread->mg = map_groups__new(machine); 22 21 } else { 23 - leader = __machine__findnew_thread(machine, pid, pid); 22 + struct thread *leader = __machine__findnew_thread(machine, pid, pid); 24 23 if (leader) { 25 24 thread->mg = map_groups__get(leader->mg); 26 25 thread__put(leader); ··· 129 130 bool exec) 130 131 { 131 132 struct comm *new, *curr = thread__comm(thread); 132 - int err; 133 133 134 134 /* Override the default :tid entry */ 135 135 if (!thread->comm_set) { 136 - err = comm__override(curr, str, timestamp, exec); 136 + int err = comm__override(curr, str, timestamp, exec); 137 137 if (err) 138 138 return err; 139 139 } else { ··· 268 270 269 271 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) 270 272 { 271 - int err; 272 - 273 273 if (parent->comm_set) { 274 274 const char *comm = thread__comm_str(parent); 275 + int err; 275 276 if (!comm) 276 277 return -ENOMEM; 277 278 err = thread__set_comm(thread, comm, timestamp);