Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libperf: Switch cpu to more accurate cpu_map_idx

Modify variable names and adopt perf_cpu_map__for_each_cpu() in
perf_evsel__open().

Renaming is done by looking for consistency in API usage.

Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Clarke <pc@us.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Vineet Singh <vineet.singh@intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: zhengjun.xing@intel.com
Link: https://lore.kernel.org/r/20220105061351.120843-28-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Ian Rogers and committed by
Arnaldo Carvalho de Melo
7e3d1784 2ca0a371

+50 -48
+45 -43
tools/lib/perf/evsel.c
··· 43 43 free(evsel); 44 44 } 45 45 46 - #define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y)) 47 - #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL) 46 + #define FD(_evsel, _cpu_map_idx, _thread) \ 47 + ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread)) 48 + #define MMAP(_evsel, _cpu_map_idx, _thread) \ 49 + (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \ 50 + : NULL) 48 51 49 52 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 50 53 { 51 54 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); 52 55 53 56 if (evsel->fd) { 54 - int cpu, thread; 55 - for (cpu = 0; cpu < ncpus; cpu++) { 57 + int idx, thread; 58 + 59 + for (idx = 0; idx < ncpus; idx++) { 56 60 for (thread = 0; thread < nthreads; thread++) { 57 - int *fd = FD(evsel, cpu, thread); 61 + int *fd = FD(evsel, idx, thread); 58 62 59 63 if (fd) 60 64 *fd = -1; ··· 84 80 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); 85 81 } 86 82 87 - static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd) 83 + static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd) 88 84 { 89 85 struct perf_evsel *leader = evsel->leader; 90 86 int *fd; ··· 101 97 if (!leader->fd) 102 98 return -ENOTCONN; 103 99 104 - fd = FD(leader, cpu, thread); 100 + fd = FD(leader, cpu_map_idx, thread); 105 101 if (fd == NULL || *fd == -1) 106 102 return -EBADF; 107 103 ··· 113 109 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, 114 110 struct perf_thread_map *threads) 115 111 { 116 - int cpu, thread, err = 0; 112 + int cpu, idx, thread, err = 0; 117 113 118 114 if (cpus == NULL) { 119 115 static struct perf_cpu_map *empty_cpu_map; ··· 143 139 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) 144 140 return -ENOMEM; 145 141 146 - for (cpu = 0; cpu < cpus->nr; cpu++) { 142 + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { 147 143 for (thread = 0; thread < threads->nr; thread++) { 148 144 int fd, group_fd, *evsel_fd; 149 145 150 - evsel_fd = FD(evsel, cpu, thread); 146 + evsel_fd = FD(evsel, idx, thread); 151 147 if (evsel_fd == NULL) 152 148 return -EINVAL; 153 149 154 - err = get_group_fd(evsel, cpu, thread, &group_fd); 150 + err = get_group_fd(evsel, idx, thread, &group_fd); 155 151 if (err < 0) 156 152 return err; 157 153 158 154 fd = sys_perf_event_open(&evsel->attr, 159 155 threads->map[thread].pid, 160 - cpus->map[cpu], group_fd, 0); 156 + cpu, group_fd, 0); 161 157 162 158 if (fd < 0) 163 159 return -errno; ··· 169 165 return err; 170 166 } 171 167 172 - static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu) 168 + static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx) 173 169 { 174 170 int thread; 175 171 176 172 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { 177 - int *fd = FD(evsel, cpu, thread); 173 + int *fd = FD(evsel, cpu_map_idx, thread); 178 174 179 175 if (fd && *fd >= 0) { 180 176 close(*fd); ··· 185 181 186 182 void perf_evsel__close_fd(struct perf_evsel *evsel) 187 183 { 188 - int cpu; 189 - 190 - for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) 191 - perf_evsel__close_fd_cpu(evsel, cpu); 184 + for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++) 185 + perf_evsel__close_fd_cpu(evsel, idx); 192 186 } 193 187 194 188 void perf_evsel__free_fd(struct perf_evsel *evsel) ··· 204 202 perf_evsel__free_fd(evsel); 205 203 } 206 204 207 - void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu) 205 + void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx) 208 206 { 209 207 if (evsel->fd == NULL) 210 208 return; 211 209 212 - perf_evsel__close_fd_cpu(evsel, cpu); 210 + perf_evsel__close_fd_cpu(evsel, cpu_map_idx); 213 211 } 214 212 215 213 void perf_evsel__munmap(struct perf_evsel *evsel) 216 214 { 217 - int cpu, thread; 215 + int idx, thread; 218 216 219 217 if (evsel->fd == NULL || evsel->mmap == NULL) 220 218 return; 221 219 222 - for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { 220 + for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { 223 221 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 224 - int *fd = FD(evsel, cpu, thread); 222 + int *fd = FD(evsel, idx, thread); 225 223 226 224 if (fd == NULL || *fd < 0) 227 225 continue; 228 226 229 - perf_mmap__munmap(MMAP(evsel, cpu, thread)); 227 + perf_mmap__munmap(MMAP(evsel, idx, thread)); 230 228 } 231 229 } 232 230 ··· 236 234 237 235 int perf_evsel__mmap(struct perf_evsel *evsel, int pages) 238 236 { 239 - int ret, cpu, thread; 237 + int ret, idx, thread; 240 238 struct perf_mmap_param mp = { 241 239 .prot = PROT_READ | PROT_WRITE, 242 240 .mask = (pages * page_size) - 1, ··· 248 246 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0) 249 247 return -ENOMEM; 250 248 251 - for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { 249 + for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { 252 250 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 253 - int *fd = FD(evsel, cpu, thread); 251 + int *fd = FD(evsel, idx, thread); 254 252 struct perf_mmap *map; 255 253 256 254 if (fd == NULL || *fd < 0) 257 255 continue; 258 256 259 - map = MMAP(evsel, cpu, thread); 257 + map = MMAP(evsel, idx, thread); 260 258 perf_mmap__init(map, NULL, false, NULL); 261 259 262 - ret = perf_mmap__mmap(map, &mp, *fd, cpu); 260 + ret = perf_mmap__mmap(map, &mp, *fd, idx); 263 261 if (ret) { 264 262 perf_evsel__munmap(evsel); 265 263 return ret; ··· 270 268 return 0; 271 269 } 272 270 273 - void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread) 271 + void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread) 274 272 { 275 - int *fd = FD(evsel, cpu, thread); 273 + int *fd = FD(evsel, cpu_map_idx, thread); 276 274 277 - if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL) 275 + if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL) 278 276 return NULL; 279 277 280 - return MMAP(evsel, cpu, thread)->base; 278 + return MMAP(evsel, cpu_map_idx, thread)->base; 281 279 } 282 280 283 281 int perf_evsel__read_size(struct perf_evsel *evsel) ··· 305 303 return size; 306 304 } 307 305 308 - int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, 306 + int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread, 309 307 struct perf_counts_values *count) 310 308 { 311 309 size_t size = perf_evsel__read_size(evsel); 312 - int *fd = FD(evsel, cpu, thread); 310 + int *fd = FD(evsel, cpu_map_idx, thread); 313 311 314 312 memset(count, 0, sizeof(*count)); 315 313 316 314 if (fd == NULL || *fd < 0) 317 315 return -EINVAL; 318 316 319 - if (MMAP(evsel, cpu, thread) && 320 - !perf_mmap__read_self(MMAP(evsel, cpu, thread), count)) 317 + if (MMAP(evsel, cpu_map_idx, thread) && 318 + !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count)) 321 319 return 0; 322 320 323 321 if (readn(*fd, count->values, size) <= 0) ··· 328 326 329 327 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, 330 328 int ioc, void *arg, 331 - int cpu) 329 + int cpu_map_idx) 332 330 { 333 331 int thread; 334 332 335 333 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 336 334 int err; 337 - int *fd = FD(evsel, cpu, thread); 335 + int *fd = FD(evsel, cpu_map_idx, thread); 338 336 339 337 if (fd == NULL || *fd < 0) 340 338 return -1; ··· 348 346 return 0; 349 347 } 350 348 351 - int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu) 349 + int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx) 352 350 { 353 - return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu); 351 + return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx); 354 352 } 355 353 356 354 int perf_evsel__enable(struct perf_evsel *evsel) ··· 363 361 return err; 364 362 } 365 363 366 - int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu) 364 + int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx) 367 365 { 368 - return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu); 366 + return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx); 369 367 } 370 368 371 369 int perf_evsel__disable(struct perf_evsel *evsel)
+5 -5
tools/lib/perf/include/perf/evsel.h
··· 28 28 LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, 29 29 struct perf_thread_map *threads); 30 30 LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel); 31 - LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu); 31 + LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx); 32 32 LIBPERF_API int perf_evsel__mmap(struct perf_evsel *evsel, int pages); 33 33 LIBPERF_API void perf_evsel__munmap(struct perf_evsel *evsel); 34 - LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread); 35 - LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, 34 + LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread); 35 + LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread, 36 36 struct perf_counts_values *count); 37 37 LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel); 38 - LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu); 38 + LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx); 39 39 LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel); 40 - LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu); 40 + LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx); 41 41 LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel); 42 42 LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel); 43 43 LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);