Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf evlist: Map backward events to backward_mmap

In perf_evlist__mmap_per_evsel(), select backward_mmap for backward
events. Utilize new perf_mmap APIs. Dynamically alloc backward_mmap.

Remove useless functions.

Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: He Kuang <hekuang@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-9-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Wang Nan and committed by
Arnaldo Carvalho de Melo
078c3386 b2cb615d

+29 -29
+2 -2
tools/perf/tests/backward-ring-buffer.c
··· 31 31 for (i = 0; i < evlist->nr_mmaps; i++) { 32 32 union perf_event *event; 33 33 34 - perf_evlist__mmap_read_catchup(evlist, i); 35 - while ((event = perf_evlist__mmap_read_backward(evlist, i)) != NULL) { 34 + perf_mmap__read_catchup(&evlist->backward_mmap[i]); 35 + while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) { 36 36 const u32 type = event->header.type; 37 37 38 38 switch (type) {
+27 -27
tools/perf/util/evlist.c
··· 27 27 #include <linux/log2.h> 28 28 #include <linux/err.h> 29 29 30 - static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); 31 30 static void perf_mmap__munmap(struct perf_mmap *map); 32 31 static void perf_mmap__put(struct perf_mmap *map); 33 32 ··· 691 692 { 692 693 int i; 693 694 695 + if (!evlist->backward_mmap) 696 + return 0; 697 + 694 698 for (i = 0; i < evlist->nr_mmaps; i++) { 695 - int fd = evlist->mmap[i].fd; 699 + int fd = evlist->backward_mmap[i].fd; 696 700 int err; 697 701 698 702 if (fd < 0) ··· 906 904 perf_mmap__munmap(md); 907 905 } 908 906 909 - static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) 910 - { 911 - perf_mmap__get(&evlist->mmap[idx]); 912 - } 913 - 914 - static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) 915 - { 916 - perf_mmap__put(&evlist->mmap[idx]); 917 - } 918 - 919 907 void perf_mmap__consume(struct perf_mmap *md, bool overwrite) 920 908 { 921 909 if (!overwrite) { ··· 1041 1049 return 0; 1042 1050 } 1043 1051 1044 - static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, 1045 - struct mmap_params *mp, int fd) 1046 - { 1047 - return perf_mmap__mmap(&evlist->mmap[idx], mp, fd); 1048 - } 1049 - 1050 1052 static bool 1051 1053 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, 1052 1054 struct perf_evsel *evsel) ··· 1052 1066 1053 1067 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, 1054 1068 struct mmap_params *mp, int cpu, 1055 - int thread, int *output) 1069 + int thread, int *_output, int *_output_backward) 1056 1070 { 1057 1071 struct perf_evsel *evsel; 1058 1072 int revent; 1059 1073 1060 1074 evlist__for_each_entry(evlist, evsel) { 1075 + struct perf_mmap *maps = evlist->mmap; 1076 + int *output = _output; 1061 1077 int fd; 1062 1078 1063 - if (!!evsel->attr.write_backward != (evlist->overwrite && evlist->backward)) 1064 - continue; 1079 + if (evsel->attr.write_backward) { 1080 + output = _output_backward; 1081 + maps = evlist->backward_mmap; 1082 + 1083 + if (!maps) { 1084 + maps = perf_evlist__alloc_mmap(evlist); 1085 + if (!maps) 1086 + return -1; 1087 + evlist->backward_mmap = maps; 1088 + } 1089 + } 1065 1090 1066 1091 if (evsel->system_wide && thread) 1067 1092 continue; ··· 1081 1084 1082 1085 if (*output == -1) { 1083 1086 *output = fd; 1084 - if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0) 1087 + 1088 + if (perf_mmap__mmap(&maps[idx], mp, *output) < 0) 1085 1089 return -1; 1086 1090 } else { 1087 1091 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) 1088 1092 return -1; 1089 1093 1090 - perf_evlist__mmap_get(evlist, idx); 1094 + perf_mmap__get(&maps[idx]); 1091 1095 } 1092 1096 1093 1097 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; ··· 1101 1103 * Therefore don't add it for polling. 1102 1104 */ 1103 1105 if (!evsel->system_wide && 1104 - __perf_evlist__add_pollfd(evlist, fd, &evlist->mmap[idx], revent) < 0) { 1105 - perf_evlist__mmap_put(evlist, idx); 1106 + __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) { 1107 + perf_mmap__put(&maps[idx]); 1106 1108 return -1; 1107 1109 } 1108 1110 ··· 1128 1130 pr_debug2("perf event ring buffer mmapped per cpu\n"); 1129 1131 for (cpu = 0; cpu < nr_cpus; cpu++) { 1130 1132 int output = -1; 1133 + int output_backward = -1; 1131 1134 1132 1135 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, 1133 1136 true); 1134 1137 1135 1138 for (thread = 0; thread < nr_threads; thread++) { 1136 1139 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, 1137 - thread, &output)) 1140 + thread, &output, &output_backward)) 1138 1141 goto out_unmap; 1139 1142 } 1140 1143 } ··· 1156 1157 pr_debug2("perf event ring buffer mmapped per thread\n"); 1157 1158 for (thread = 0; thread < nr_threads; thread++) { 1158 1159 int output = -1; 1160 + int output_backward = -1; 1159 1161 1160 1162 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, 1161 1163 false); 1162 1164 1163 1165 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, 1164 - &output)) 1166 + &output, &output_backward)) 1165 1167 goto out_unmap; 1166 1168 } 1167 1169