Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <perf/evlist.h>
3#include <perf/evsel.h>
4#include <linux/bitops.h>
5#include <linux/list.h>
6#include <linux/hash.h>
7#include <sys/ioctl.h>
8#include <internal/evlist.h>
9#include <internal/evsel.h>
10#include <internal/xyarray.h>
11#include <internal/mmap.h>
12#include <internal/cpumap.h>
13#include <internal/threadmap.h>
14#include <internal/lib.h>
15#include <linux/zalloc.h>
16#include <stdlib.h>
17#include <errno.h>
18#include <unistd.h>
19#include <fcntl.h>
20#include <signal.h>
21#include <poll.h>
22#include <sys/mman.h>
23#include <perf/cpumap.h>
24#include <perf/threadmap.h>
25#include <api/fd/array.h>
26
27void perf_evlist__init(struct perf_evlist *evlist)
28{
29 int i;
30
31 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
32 INIT_HLIST_HEAD(&evlist->heads[i]);
33 INIT_LIST_HEAD(&evlist->entries);
34 evlist->nr_entries = 0;
35 fdarray__init(&evlist->pollfd, 64);
36}
37
38static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
39 struct perf_evsel *evsel)
40{
41 /*
42 * We already have cpus for evsel (via PMU sysfs) so
43 * keep it, if there's no target cpu list defined.
44 */
45 if (!evsel->own_cpus || evlist->has_user_cpus) {
46 perf_cpu_map__put(evsel->cpus);
47 evsel->cpus = perf_cpu_map__get(evlist->cpus);
48 } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
49 perf_cpu_map__put(evsel->cpus);
50 evsel->cpus = perf_cpu_map__get(evlist->cpus);
51 } else if (evsel->cpus != evsel->own_cpus) {
52 perf_cpu_map__put(evsel->cpus);
53 evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
54 }
55
56 perf_thread_map__put(evsel->threads);
57 evsel->threads = perf_thread_map__get(evlist->threads);
58 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
59}
60
61static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
62{
63 struct perf_evsel *evsel;
64
65 perf_evlist__for_each_evsel(evlist, evsel)
66 __perf_evlist__propagate_maps(evlist, evsel);
67}
68
69void perf_evlist__add(struct perf_evlist *evlist,
70 struct perf_evsel *evsel)
71{
72 list_add_tail(&evsel->node, &evlist->entries);
73 evlist->nr_entries += 1;
74 __perf_evlist__propagate_maps(evlist, evsel);
75}
76
77void perf_evlist__remove(struct perf_evlist *evlist,
78 struct perf_evsel *evsel)
79{
80 list_del_init(&evsel->node);
81 evlist->nr_entries -= 1;
82}
83
84struct perf_evlist *perf_evlist__new(void)
85{
86 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
87
88 if (evlist != NULL)
89 perf_evlist__init(evlist);
90
91 return evlist;
92}
93
94struct perf_evsel *
95perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
96{
97 struct perf_evsel *next;
98
99 if (!prev) {
100 next = list_first_entry(&evlist->entries,
101 struct perf_evsel,
102 node);
103 } else {
104 next = list_next_entry(prev, node);
105 }
106
107 /* Empty list is noticed here so don't need checking on entry. */
108 if (&next->node == &evlist->entries)
109 return NULL;
110
111 return next;
112}
113
114static void perf_evlist__purge(struct perf_evlist *evlist)
115{
116 struct perf_evsel *pos, *n;
117
118 perf_evlist__for_each_entry_safe(evlist, n, pos) {
119 list_del_init(&pos->node);
120 perf_evsel__delete(pos);
121 }
122
123 evlist->nr_entries = 0;
124}
125
126void perf_evlist__exit(struct perf_evlist *evlist)
127{
128 perf_cpu_map__put(evlist->cpus);
129 perf_cpu_map__put(evlist->all_cpus);
130 perf_thread_map__put(evlist->threads);
131 evlist->cpus = NULL;
132 evlist->all_cpus = NULL;
133 evlist->threads = NULL;
134 fdarray__exit(&evlist->pollfd);
135}
136
137void perf_evlist__delete(struct perf_evlist *evlist)
138{
139 if (evlist == NULL)
140 return;
141
142 perf_evlist__munmap(evlist);
143 perf_evlist__close(evlist);
144 perf_evlist__purge(evlist);
145 perf_evlist__exit(evlist);
146 free(evlist);
147}
148
149void perf_evlist__set_maps(struct perf_evlist *evlist,
150 struct perf_cpu_map *cpus,
151 struct perf_thread_map *threads)
152{
153 /*
154 * Allow for the possibility that one or another of the maps isn't being
155 * changed i.e. don't put it. Note we are assuming the maps that are
156 * being applied are brand new and evlist is taking ownership of the
157 * original reference count of 1. If that is not the case it is up to
158 * the caller to increase the reference count.
159 */
160 if (cpus != evlist->cpus) {
161 perf_cpu_map__put(evlist->cpus);
162 evlist->cpus = perf_cpu_map__get(cpus);
163 }
164
165 if (threads != evlist->threads) {
166 perf_thread_map__put(evlist->threads);
167 evlist->threads = perf_thread_map__get(threads);
168 }
169
170 if (!evlist->all_cpus && cpus)
171 evlist->all_cpus = perf_cpu_map__get(cpus);
172
173 perf_evlist__propagate_maps(evlist);
174}
175
176int perf_evlist__open(struct perf_evlist *evlist)
177{
178 struct perf_evsel *evsel;
179 int err;
180
181 perf_evlist__for_each_entry(evlist, evsel) {
182 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
183 if (err < 0)
184 goto out_err;
185 }
186
187 return 0;
188
189out_err:
190 perf_evlist__close(evlist);
191 return err;
192}
193
194void perf_evlist__close(struct perf_evlist *evlist)
195{
196 struct perf_evsel *evsel;
197
198 perf_evlist__for_each_entry_reverse(evlist, evsel)
199 perf_evsel__close(evsel);
200}
201
202void perf_evlist__enable(struct perf_evlist *evlist)
203{
204 struct perf_evsel *evsel;
205
206 perf_evlist__for_each_entry(evlist, evsel)
207 perf_evsel__enable(evsel);
208}
209
210void perf_evlist__disable(struct perf_evlist *evlist)
211{
212 struct perf_evsel *evsel;
213
214 perf_evlist__for_each_entry(evlist, evsel)
215 perf_evsel__disable(evsel);
216}
217
218u64 perf_evlist__read_format(struct perf_evlist *evlist)
219{
220 struct perf_evsel *first = perf_evlist__first(evlist);
221
222 return first->attr.read_format;
223}
224
225#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
226
227static void perf_evlist__id_hash(struct perf_evlist *evlist,
228 struct perf_evsel *evsel,
229 int cpu, int thread, u64 id)
230{
231 int hash;
232 struct perf_sample_id *sid = SID(evsel, cpu, thread);
233
234 sid->id = id;
235 sid->evsel = evsel;
236 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
237 hlist_add_head(&sid->node, &evlist->heads[hash]);
238}
239
240void perf_evlist__id_add(struct perf_evlist *evlist,
241 struct perf_evsel *evsel,
242 int cpu, int thread, u64 id)
243{
244 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
245 evsel->id[evsel->ids++] = id;
246}
247
248int perf_evlist__id_add_fd(struct perf_evlist *evlist,
249 struct perf_evsel *evsel,
250 int cpu, int thread, int fd)
251{
252 u64 read_data[4] = { 0, };
253 int id_idx = 1; /* The first entry is the counter value */
254 u64 id;
255 int ret;
256
257 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
258 if (!ret)
259 goto add;
260
261 if (errno != ENOTTY)
262 return -1;
263
264 /* Legacy way to get event id.. All hail to old kernels! */
265
266 /*
267 * This way does not work with group format read, so bail
268 * out in that case.
269 */
270 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
271 return -1;
272
273 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
274 read(fd, &read_data, sizeof(read_data)) == -1)
275 return -1;
276
277 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
278 ++id_idx;
279 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
280 ++id_idx;
281
282 id = read_data[id_idx];
283
284add:
285 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
286 return 0;
287}
288
289int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
290{
291 int nr_cpus = perf_cpu_map__nr(evlist->cpus);
292 int nr_threads = perf_thread_map__nr(evlist->threads);
293 int nfds = 0;
294 struct perf_evsel *evsel;
295
296 perf_evlist__for_each_entry(evlist, evsel) {
297 if (evsel->system_wide)
298 nfds += nr_cpus;
299 else
300 nfds += nr_cpus * nr_threads;
301 }
302
303 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
304 fdarray__grow(&evlist->pollfd, nfds) < 0)
305 return -ENOMEM;
306
307 return 0;
308}
309
310int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
311 void *ptr, short revent, enum fdarray_flags flags)
312{
313 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
314
315 if (pos >= 0) {
316 evlist->pollfd.priv[pos].ptr = ptr;
317 fcntl(fd, F_SETFL, O_NONBLOCK);
318 }
319
320 return pos;
321}
322
323static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
324 void *arg __maybe_unused)
325{
326 struct perf_mmap *map = fda->priv[fd].ptr;
327
328 if (map)
329 perf_mmap__put(map);
330}
331
332int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
333{
334 return fdarray__filter(&evlist->pollfd, revents_and_mask,
335 perf_evlist__munmap_filtered, NULL);
336}
337
338int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
339{
340 return fdarray__poll(&evlist->pollfd, timeout);
341}
342
343static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
344{
345 int i;
346 struct perf_mmap *map;
347
348 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
349 if (!map)
350 return NULL;
351
352 for (i = 0; i < evlist->nr_mmaps; i++) {
353 struct perf_mmap *prev = i ? &map[i - 1] : NULL;
354
355 /*
356 * When the perf_mmap() call is made we grab one refcount, plus
357 * one extra to let perf_mmap__consume() get the last
358 * events after all real references (perf_mmap__get()) are
359 * dropped.
360 *
361 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
362 * thus does perf_mmap__get() on it.
363 */
364 perf_mmap__init(&map[i], prev, overwrite, NULL);
365 }
366
367 return map;
368}
369
370static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
371 struct perf_evsel *evsel, int idx, int cpu,
372 int thread)
373{
374 struct perf_sample_id *sid = SID(evsel, cpu, thread);
375
376 sid->idx = idx;
377 if (evlist->cpus && cpu >= 0)
378 sid->cpu = evlist->cpus->map[cpu];
379 else
380 sid->cpu = -1;
381 if (!evsel->system_wide && evlist->threads && thread >= 0)
382 sid->tid = perf_thread_map__pid(evlist->threads, thread);
383 else
384 sid->tid = -1;
385}
386
387static struct perf_mmap*
388perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
389{
390 struct perf_mmap *maps;
391
392 maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
393
394 if (!maps) {
395 maps = perf_evlist__alloc_mmap(evlist, overwrite);
396 if (!maps)
397 return NULL;
398
399 if (overwrite)
400 evlist->mmap_ovw = maps;
401 else
402 evlist->mmap = maps;
403 }
404
405 return &maps[idx];
406}
407
408#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
409
410static int
411perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
412 int output, int cpu)
413{
414 return perf_mmap__mmap(map, mp, output, cpu);
415}
416
417static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
418 bool overwrite)
419{
420 if (overwrite)
421 evlist->mmap_ovw_first = map;
422 else
423 evlist->mmap_first = map;
424}
425
426static int
427mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
428 int idx, struct perf_mmap_param *mp, int cpu_idx,
429 int thread, int *_output, int *_output_overwrite)
430{
431 int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
432 struct perf_evsel *evsel;
433 int revent;
434
435 perf_evlist__for_each_entry(evlist, evsel) {
436 bool overwrite = evsel->attr.write_backward;
437 struct perf_mmap *map;
438 int *output, fd, cpu;
439
440 if (evsel->system_wide && thread)
441 continue;
442
443 cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
444 if (cpu == -1)
445 continue;
446
447 map = ops->get(evlist, overwrite, idx);
448 if (map == NULL)
449 return -ENOMEM;
450
451 if (overwrite) {
452 mp->prot = PROT_READ;
453 output = _output_overwrite;
454 } else {
455 mp->prot = PROT_READ | PROT_WRITE;
456 output = _output;
457 }
458
459 fd = FD(evsel, cpu, thread);
460
461 if (*output == -1) {
462 *output = fd;
463
464 /*
465 * The last one will be done at perf_mmap__consume(), so that we
466 * make sure we don't prevent tools from consuming every last event in
467 * the ring buffer.
468 *
469 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
470 * anymore, but the last events for it are still in the ring buffer,
471 * waiting to be consumed.
472 *
473 * Tools can chose to ignore this at their own discretion, but the
474 * evlist layer can't just drop it when filtering events in
475 * perf_evlist__filter_pollfd().
476 */
477 refcount_set(&map->refcnt, 2);
478
479 if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
480 return -1;
481
482 if (!idx)
483 perf_evlist__set_mmap_first(evlist, map, overwrite);
484 } else {
485 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
486 return -1;
487
488 perf_mmap__get(map);
489 }
490
491 revent = !overwrite ? POLLIN : 0;
492
493 if (!evsel->system_wide &&
494 perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
495 perf_mmap__put(map);
496 return -1;
497 }
498
499 if (evsel->attr.read_format & PERF_FORMAT_ID) {
500 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
501 fd) < 0)
502 return -1;
503 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
504 thread);
505 }
506 }
507
508 return 0;
509}
510
511static int
512mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
513 struct perf_mmap_param *mp)
514{
515 int thread;
516 int nr_threads = perf_thread_map__nr(evlist->threads);
517
518 for (thread = 0; thread < nr_threads; thread++) {
519 int output = -1;
520 int output_overwrite = -1;
521
522 if (ops->idx)
523 ops->idx(evlist, mp, thread, false);
524
525 if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
526 &output, &output_overwrite))
527 goto out_unmap;
528 }
529
530 return 0;
531
532out_unmap:
533 perf_evlist__munmap(evlist);
534 return -1;
535}
536
537static int
538mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
539 struct perf_mmap_param *mp)
540{
541 int nr_threads = perf_thread_map__nr(evlist->threads);
542 int nr_cpus = perf_cpu_map__nr(evlist->cpus);
543 int cpu, thread;
544
545 for (cpu = 0; cpu < nr_cpus; cpu++) {
546 int output = -1;
547 int output_overwrite = -1;
548
549 if (ops->idx)
550 ops->idx(evlist, mp, cpu, true);
551
552 for (thread = 0; thread < nr_threads; thread++) {
553 if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
554 thread, &output, &output_overwrite))
555 goto out_unmap;
556 }
557 }
558
559 return 0;
560
561out_unmap:
562 perf_evlist__munmap(evlist);
563 return -1;
564}
565
566static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
567{
568 int nr_mmaps;
569
570 nr_mmaps = perf_cpu_map__nr(evlist->cpus);
571 if (perf_cpu_map__empty(evlist->cpus))
572 nr_mmaps = perf_thread_map__nr(evlist->threads);
573
574 return nr_mmaps;
575}
576
577int perf_evlist__mmap_ops(struct perf_evlist *evlist,
578 struct perf_evlist_mmap_ops *ops,
579 struct perf_mmap_param *mp)
580{
581 struct perf_evsel *evsel;
582 const struct perf_cpu_map *cpus = evlist->cpus;
583 const struct perf_thread_map *threads = evlist->threads;
584
585 if (!ops || !ops->get || !ops->mmap)
586 return -EINVAL;
587
588 mp->mask = evlist->mmap_len - page_size - 1;
589
590 evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
591
592 perf_evlist__for_each_entry(evlist, evsel) {
593 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
594 evsel->sample_id == NULL &&
595 perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
596 return -ENOMEM;
597 }
598
599 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
600 return -ENOMEM;
601
602 if (perf_cpu_map__empty(cpus))
603 return mmap_per_thread(evlist, ops, mp);
604
605 return mmap_per_cpu(evlist, ops, mp);
606}
607
608int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
609{
610 struct perf_mmap_param mp;
611 struct perf_evlist_mmap_ops ops = {
612 .get = perf_evlist__mmap_cb_get,
613 .mmap = perf_evlist__mmap_cb_mmap,
614 };
615
616 evlist->mmap_len = (pages + 1) * page_size;
617
618 return perf_evlist__mmap_ops(evlist, &ops, &mp);
619}
620
621void perf_evlist__munmap(struct perf_evlist *evlist)
622{
623 int i;
624
625 if (evlist->mmap) {
626 for (i = 0; i < evlist->nr_mmaps; i++)
627 perf_mmap__munmap(&evlist->mmap[i]);
628 }
629
630 if (evlist->mmap_ovw) {
631 for (i = 0; i < evlist->nr_mmaps; i++)
632 perf_mmap__munmap(&evlist->mmap_ovw[i]);
633 }
634
635 zfree(&evlist->mmap);
636 zfree(&evlist->mmap_ovw);
637}
638
639struct perf_mmap*
640perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
641 bool overwrite)
642{
643 if (map)
644 return map->next;
645
646 return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
647}