at v5.5-rc1 263 lines 5.6 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2#include <errno.h> 3#include <unistd.h> 4#include <sys/syscall.h> 5#include <perf/evsel.h> 6#include <perf/cpumap.h> 7#include <perf/threadmap.h> 8#include <linux/list.h> 9#include <internal/evsel.h> 10#include <linux/zalloc.h> 11#include <stdlib.h> 12#include <internal/xyarray.h> 13#include <internal/cpumap.h> 14#include <internal/threadmap.h> 15#include <internal/lib.h> 16#include <linux/string.h> 17#include <sys/ioctl.h> 18 19void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr) 20{ 21 INIT_LIST_HEAD(&evsel->node); 22 evsel->attr = *attr; 23} 24 25struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) 26{ 27 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 28 29 if (evsel != NULL) 30 perf_evsel__init(evsel, attr); 31 32 return evsel; 33} 34 35void perf_evsel__delete(struct perf_evsel *evsel) 36{ 37 free(evsel); 38} 39 40#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y)) 41 42int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 43{ 44 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); 45 46 if (evsel->fd) { 47 int cpu, thread; 48 for (cpu = 0; cpu < ncpus; cpu++) { 49 for (thread = 0; thread < nthreads; thread++) { 50 FD(evsel, cpu, thread) = -1; 51 } 52 } 53 } 54 55 return evsel->fd != NULL ? 0 : -ENOMEM; 56} 57 58static int 59sys_perf_event_open(struct perf_event_attr *attr, 60 pid_t pid, int cpu, int group_fd, 61 unsigned long flags) 62{ 63 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); 64} 65 66int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, 67 struct perf_thread_map *threads) 68{ 69 int cpu, thread, err = 0; 70 71 if (cpus == NULL) { 72 static struct perf_cpu_map *empty_cpu_map; 73 74 if (empty_cpu_map == NULL) { 75 empty_cpu_map = perf_cpu_map__dummy_new(); 76 if (empty_cpu_map == NULL) 77 return -ENOMEM; 78 } 79 80 cpus = empty_cpu_map; 81 } 82 83 if (threads == NULL) { 84 static struct perf_thread_map *empty_thread_map; 85 86 if (empty_thread_map == NULL) { 87 empty_thread_map = perf_thread_map__new_dummy(); 88 if (empty_thread_map == NULL) 89 return -ENOMEM; 90 } 91 92 threads = empty_thread_map; 93 } 94 95 if (evsel->fd == NULL && 96 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) 97 return -ENOMEM; 98 99 for (cpu = 0; cpu < cpus->nr; cpu++) { 100 for (thread = 0; thread < threads->nr; thread++) { 101 int fd; 102 103 fd = sys_perf_event_open(&evsel->attr, 104 threads->map[thread].pid, 105 cpus->map[cpu], -1, 0); 106 107 if (fd < 0) 108 return -errno; 109 110 FD(evsel, cpu, thread) = fd; 111 } 112 } 113 114 return err; 115} 116 117void perf_evsel__close_fd(struct perf_evsel *evsel) 118{ 119 int cpu, thread; 120 121 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) 122 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { 123 if (FD(evsel, cpu, thread) >= 0) 124 close(FD(evsel, cpu, thread)); 125 FD(evsel, cpu, thread) = -1; 126 } 127} 128 129void perf_evsel__free_fd(struct perf_evsel *evsel) 130{ 131 xyarray__delete(evsel->fd); 132 evsel->fd = NULL; 133} 134 135void perf_evsel__close(struct perf_evsel *evsel) 136{ 137 if (evsel->fd == NULL) 138 return; 139 140 perf_evsel__close_fd(evsel); 141 perf_evsel__free_fd(evsel); 142} 143 144int perf_evsel__read_size(struct perf_evsel *evsel) 145{ 146 u64 read_format = evsel->attr.read_format; 147 int entry = sizeof(u64); /* value */ 148 int size = 0; 149 int nr = 1; 150 151 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 152 size += sizeof(u64); 153 154 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 155 size += sizeof(u64); 156 157 if (read_format & PERF_FORMAT_ID) 158 entry += sizeof(u64); 159 160 if (read_format & PERF_FORMAT_GROUP) { 161 nr = evsel->nr_members; 162 size += sizeof(u64); 163 } 164 165 size += entry * nr; 166 return size; 167} 168 169int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, 170 struct perf_counts_values *count) 171{ 172 size_t size = perf_evsel__read_size(evsel); 173 174 memset(count, 0, sizeof(*count)); 175 176 if (FD(evsel, cpu, thread) < 0) 177 return -EINVAL; 178 179 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0) 180 return -errno; 181 182 return 0; 183} 184 185static int perf_evsel__run_ioctl(struct perf_evsel *evsel, 186 int ioc, void *arg) 187{ 188 int cpu, thread; 189 190 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { 191 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 192 int fd = FD(evsel, cpu, thread), 193 err = ioctl(fd, ioc, arg); 194 195 if (err) 196 return err; 197 } 198 } 199 200 return 0; 201} 202 203int perf_evsel__enable(struct perf_evsel *evsel) 204{ 205 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0); 206} 207 208int perf_evsel__disable(struct perf_evsel *evsel) 209{ 210 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0); 211} 212 213int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter) 214{ 215 return perf_evsel__run_ioctl(evsel, 216 PERF_EVENT_IOC_SET_FILTER, 217 (void *)filter); 218} 219 220struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) 221{ 222 return evsel->cpus; 223} 224 225struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel) 226{ 227 return evsel->threads; 228} 229 230struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel) 231{ 232 return &evsel->attr; 233} 234 235int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) 236{ 237 if (ncpus == 0 || nthreads == 0) 238 return 0; 239 240 if (evsel->system_wide) 241 nthreads = 1; 242 243 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); 244 if (evsel->sample_id == NULL) 245 return -ENOMEM; 246 247 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); 248 if (evsel->id == NULL) { 249 xyarray__delete(evsel->sample_id); 250 evsel->sample_id = NULL; 251 return -ENOMEM; 252 } 253 254 return 0; 255} 256 257void perf_evsel__free_id(struct perf_evsel *evsel) 258{ 259 xyarray__delete(evsel->sample_id); 260 evsel->sample_id = NULL; 261 zfree(&evsel->id); 262 evsel->ids = 0; 263}