Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf mmap: Simplify perf_mmap__read_init()

It isn't necessary to pass the 'start', 'end' and 'overwrite' arguments
to perf_mmap__read_init(). The data is stored in the struct perf_mmap.

Discard the parameters.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Suggested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1520350567-80082-8-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Kan Liang and committed by
Arnaldo Carvalho de Melo
b9bae2c8 0019dc87

+18 -40
+1 -2
tools/perf/arch/x86/tests/perf-time-to-tsc.c
··· 61 61 u64 test_tsc, comm1_tsc, comm2_tsc; 62 62 u64 test_time, comm1_time = 0, comm2_time = 0; 63 63 struct perf_mmap *md; 64 - u64 end, start; 65 64 66 65 threads = thread_map__new(-1, getpid(), UINT_MAX); 67 66 CHECK_NOT_NULL__(threads); ··· 111 112 112 113 for (i = 0; i < evlist->nr_mmaps; i++) { 113 114 md = &evlist->mmap[i]; 114 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 115 + if (perf_mmap__read_init(md) < 0) 115 116 continue; 116 117 117 118 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/builtin-kvm.c
··· 746 746 struct perf_evlist *evlist = kvm->evlist; 747 747 union perf_event *event; 748 748 struct perf_mmap *md; 749 - u64 end, start; 750 749 u64 timestamp; 751 750 s64 n = 0; 752 751 int err; 753 752 754 753 *mmap_time = ULLONG_MAX; 755 754 md = &evlist->mmap[idx]; 756 - err = perf_mmap__read_init(md, false, &start, &end); 755 + err = perf_mmap__read_init(md); 757 756 if (err < 0) 758 757 return (err == -EAGAIN) ? 0 : -1; 759 758
+1 -2
tools/perf/builtin-top.c
··· 817 817 struct perf_session *session = top->session; 818 818 union perf_event *event; 819 819 struct machine *machine; 820 - u64 end, start; 821 820 int ret; 822 821 823 822 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx]; 824 - if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0) 823 + if (perf_mmap__read_init(md) < 0) 825 824 return; 826 825 827 826 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/builtin-trace.c
··· 2503 2503 for (i = 0; i < evlist->nr_mmaps; i++) { 2504 2504 union perf_event *event; 2505 2505 struct perf_mmap *md; 2506 - u64 end, start; 2507 2506 2508 2507 md = &evlist->mmap[i]; 2509 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 2508 + if (perf_mmap__read_init(md) < 0) 2510 2509 continue; 2511 2510 2512 2511 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/tests/backward-ring-buffer.c
··· 33 33 for (i = 0; i < evlist->nr_mmaps; i++) { 34 34 struct perf_mmap *map = &evlist->overwrite_mmap[i]; 35 35 union perf_event *event; 36 - u64 start, end; 37 36 38 - perf_mmap__read_init(map, true, &start, &end); 37 + perf_mmap__read_init(map); 39 38 while ((event = perf_mmap__read_event(map)) != NULL) { 40 39 const u32 type = event->header.type; 41 40
+1 -2
tools/perf/tests/bpf.c
··· 177 177 for (i = 0; i < evlist->nr_mmaps; i++) { 178 178 union perf_event *event; 179 179 struct perf_mmap *md; 180 - u64 end, start; 181 180 182 181 md = &evlist->mmap[i]; 183 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 182 + if (perf_mmap__read_init(md) < 0) 184 183 continue; 185 184 186 185 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/tests/code-reading.c
··· 410 410 { 411 411 union perf_event *event; 412 412 struct perf_mmap *md; 413 - u64 end, start; 414 413 int i, ret; 415 414 416 415 for (i = 0; i < evlist->nr_mmaps; i++) { 417 416 md = &evlist->mmap[i]; 418 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 417 + if (perf_mmap__read_init(md) < 0) 419 418 continue; 420 419 421 420 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/tests/keep-tracking.c
··· 28 28 { 29 29 union perf_event *event; 30 30 struct perf_mmap *md; 31 - u64 end, start; 32 31 int i, found; 33 32 34 33 found = 0; 35 34 for (i = 0; i < evlist->nr_mmaps; i++) { 36 35 md = &evlist->mmap[i]; 37 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 36 + if (perf_mmap__read_init(md) < 0) 38 37 continue; 39 38 while ((event = perf_mmap__read_event(md)) != NULL) { 40 39 if (event->header.type == PERF_RECORD_COMM &&
+1 -2
tools/perf/tests/mmap-basic.c
··· 39 39 struct perf_evsel *evsels[nsyscalls], *evsel; 40 40 char sbuf[STRERR_BUFSIZE]; 41 41 struct perf_mmap *md; 42 - u64 end, start; 43 42 44 43 threads = thread_map__new(-1, getpid(), UINT_MAX); 45 44 if (threads == NULL) { ··· 108 109 } 109 110 110 111 md = &evlist->mmap[0]; 111 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 112 + if (perf_mmap__read_init(md) < 0) 112 113 goto out_init; 113 114 114 115 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/tests/openat-syscall-tp-fields.c
··· 87 87 for (i = 0; i < evlist->nr_mmaps; i++) { 88 88 union perf_event *event; 89 89 struct perf_mmap *md; 90 - u64 end, start; 91 90 92 91 md = &evlist->mmap[i]; 93 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 92 + if (perf_mmap__read_init(md) < 0) 94 93 continue; 95 94 96 95 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/tests/perf-record.c
··· 165 165 for (i = 0; i < evlist->nr_mmaps; i++) { 166 166 union perf_event *event; 167 167 struct perf_mmap *md; 168 - u64 end, start; 169 168 170 169 md = &evlist->mmap[i]; 171 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 170 + if (perf_mmap__read_init(md) < 0) 172 171 continue; 173 172 174 173 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/tests/sw-clock.c
··· 40 40 struct cpu_map *cpus; 41 41 struct thread_map *threads; 42 42 struct perf_mmap *md; 43 - u64 end, start; 44 43 45 44 attr.sample_freq = 500; 46 45 ··· 95 96 perf_evlist__disable(evlist); 96 97 97 98 md = &evlist->mmap[0]; 98 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 99 + if (perf_mmap__read_init(md) < 0) 99 100 goto out_init; 100 101 101 102 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/tests/switch-tracking.c
··· 259 259 LIST_HEAD(events); 260 260 struct event_node *events_array, *node; 261 261 struct perf_mmap *md; 262 - u64 end, start; 263 262 int i, ret; 264 263 265 264 for (i = 0; i < evlist->nr_mmaps; i++) { 266 265 md = &evlist->mmap[i]; 267 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 266 + if (perf_mmap__read_init(md) < 0) 268 267 continue; 269 268 270 269 while ((event = perf_mmap__read_event(md)) != NULL) {
+1 -2
tools/perf/tests/task-exit.c
··· 48 48 struct cpu_map *cpus; 49 49 struct thread_map *threads; 50 50 struct perf_mmap *md; 51 - u64 end, start; 52 51 53 52 signal(SIGCHLD, sig_handler); 54 53 ··· 112 113 113 114 retry: 114 115 md = &evlist->mmap[0]; 115 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 116 + if (perf_mmap__read_init(md) < 0) 116 117 goto out_init; 117 118 118 119 while ((event = perf_mmap__read_event(md)) != NULL) {
+2 -8
tools/perf/util/mmap.c
··· 235 235 /* 236 236 * Report the start and end of the available data in ringbuffer 237 237 */ 238 - int perf_mmap__read_init(struct perf_mmap *md, bool overwrite, 239 - u64 *startp, u64 *endp) 238 + int perf_mmap__read_init(struct perf_mmap *md) 240 239 { 241 240 u64 head = perf_mmap__read_head(md); 242 241 u64 old = md->prev; 243 242 unsigned char *data = md->base + page_size; 244 243 unsigned long size; 245 244 246 - *startp = overwrite ? head : old; 247 - *endp = overwrite ? old : head; 248 245 md->start = md->overwrite ? head : old; 249 246 md->end = md->overwrite ? old : head; 250 247 ··· 264 267 */ 265 268 if (overwrite_rb_find_range(data, md->mask, head, &md->start, &md->end)) 266 269 return -EINVAL; 267 - *startp = md->start; 268 - *endp = md->end; 269 270 } 270 271 271 272 return 0; ··· 273 278 int push(void *to, void *buf, size_t size)) 274 279 { 275 280 u64 head = perf_mmap__read_head(md); 276 - u64 end, start; 277 281 unsigned char *data = md->base + page_size; 278 282 unsigned long size; 279 283 void *buf; 280 284 int rc = 0; 281 285 282 - rc = perf_mmap__read_init(md, md->overwrite, &start, &end); 286 + rc = perf_mmap__read_init(md); 283 287 if (rc < 0) 284 288 return (rc == -EAGAIN) ? 0 : -1; 285 289
+1 -2
tools/perf/util/mmap.h
··· 96 96 97 97 size_t perf_mmap__mmap_len(struct perf_mmap *map); 98 98 99 - int perf_mmap__read_init(struct perf_mmap *md, bool overwrite, 100 - u64 *startp, u64 *endp); 99 + int perf_mmap__read_init(struct perf_mmap *md); 101 100 void perf_mmap__read_done(struct perf_mmap *map); 102 101 #endif /*__PERF_MMAP_H */
+1 -2
tools/perf/util/python.c
··· 984 984 int sample_id_all = 1, cpu; 985 985 static char *kwlist[] = { "cpu", "sample_id_all", NULL }; 986 986 struct perf_mmap *md; 987 - u64 end, start; 988 987 int err; 989 988 990 989 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, ··· 991 992 return NULL; 992 993 993 994 md = &evlist->mmap[cpu]; 994 - if (perf_mmap__read_init(md, false, &start, &end) < 0) 995 + if (perf_mmap__read_init(md) < 0) 995 996 goto end; 996 997 997 998 event = perf_mmap__read_event(md);