Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
10#include <sys/mman.h>
11#include <inttypes.h>
12#include <asm/bug.h>
13#include "debug.h"
14#include "event.h"
15#include "mmap.h"
16#include "util.h" /* page_size */
17
18size_t perf_mmap__mmap_len(struct perf_mmap *map)
19{
20 return map->mask + 1 + page_size;
21}
22
23/* When check_messup is true, 'end' must points to a good entry */
24static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
25 u64 start, u64 end, u64 *prev)
26{
27 unsigned char *data = map->base + page_size;
28 union perf_event *event = NULL;
29 int diff = end - start;
30
31 if (check_messup) {
32 /*
33 * If we're further behind than half the buffer, there's a chance
34 * the writer will bite our tail and mess up the samples under us.
35 *
36 * If we somehow ended up ahead of the 'end', we got messed up.
37 *
38 * In either case, truncate and restart at 'end'.
39 */
40 if (diff > map->mask / 2 || diff < 0) {
41 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
42
43 /*
44 * 'end' points to a known good entry, start there.
45 */
46 start = end;
47 diff = 0;
48 }
49 }
50
51 if (diff >= (int)sizeof(event->header)) {
52 size_t size;
53
54 event = (union perf_event *)&data[start & map->mask];
55 size = event->header.size;
56
57 if (size < sizeof(event->header) || diff < (int)size) {
58 event = NULL;
59 goto broken_event;
60 }
61
62 /*
63 * Event straddles the mmap boundary -- header should always
64 * be inside due to u64 alignment of output.
65 */
66 if ((start & map->mask) + size != ((start + size) & map->mask)) {
67 unsigned int offset = start;
68 unsigned int len = min(sizeof(*event), size), cpy;
69 void *dst = map->event_copy;
70
71 do {
72 cpy = min(map->mask + 1 - (offset & map->mask), len);
73 memcpy(dst, &data[offset & map->mask], cpy);
74 offset += cpy;
75 dst += cpy;
76 len -= cpy;
77 } while (len);
78
79 event = (union perf_event *)map->event_copy;
80 }
81
82 start += size;
83 }
84
85broken_event:
86 if (prev)
87 *prev = start;
88
89 return event;
90}
91
92union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
93{
94 u64 head;
95 u64 old = map->prev;
96
97 /*
98 * Check if event was unmapped due to a POLLHUP/POLLERR.
99 */
100 if (!refcount_read(&map->refcnt))
101 return NULL;
102
103 head = perf_mmap__read_head(map);
104
105 return perf_mmap__read(map, check_messup, old, head, &map->prev);
106}
107
108union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
109{
110 u64 head, end;
111 u64 start = map->prev;
112
113 /*
114 * Check if event was unmapped due to a POLLHUP/POLLERR.
115 */
116 if (!refcount_read(&map->refcnt))
117 return NULL;
118
119 head = perf_mmap__read_head(map);
120 if (!head)
121 return NULL;
122
123 /*
124 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
125 * it each time when kernel writes to it, so in fact 'head' is
126 * negative. 'end' pointer is made manually by adding the size of
127 * the ring buffer to 'head' pointer, means the validate data can
128 * read is the whole ring buffer. If 'end' is positive, the ring
129 * buffer has not fully filled, so we must adjust 'end' to 0.
130 *
131 * However, since both 'head' and 'end' is unsigned, we can't
132 * simply compare 'end' against 0. Here we compare '-head' and
133 * the size of the ring buffer, where -head is the number of bytes
134 * kernel write to the ring buffer.
135 */
136 if (-head < (u64)(map->mask + 1))
137 end = 0;
138 else
139 end = head + map->mask + 1;
140
141 return perf_mmap__read(map, false, start, end, &map->prev);
142}
143
144void perf_mmap__read_catchup(struct perf_mmap *map)
145{
146 u64 head;
147
148 if (!refcount_read(&map->refcnt))
149 return;
150
151 head = perf_mmap__read_head(map);
152 map->prev = head;
153}
154
155static bool perf_mmap__empty(struct perf_mmap *map)
156{
157 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
158}
159
160void perf_mmap__get(struct perf_mmap *map)
161{
162 refcount_inc(&map->refcnt);
163}
164
165void perf_mmap__put(struct perf_mmap *map)
166{
167 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
168
169 if (refcount_dec_and_test(&map->refcnt))
170 perf_mmap__munmap(map);
171}
172
173void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
174{
175 if (!overwrite) {
176 u64 old = map->prev;
177
178 perf_mmap__write_tail(map, old);
179 }
180
181 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
182 perf_mmap__put(map);
183}
184
185int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
186 struct auxtrace_mmap_params *mp __maybe_unused,
187 void *userpg __maybe_unused,
188 int fd __maybe_unused)
189{
190 return 0;
191}
192
193void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
194{
195}
196
197void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
198 off_t auxtrace_offset __maybe_unused,
199 unsigned int auxtrace_pages __maybe_unused,
200 bool auxtrace_overwrite __maybe_unused)
201{
202}
203
204void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
205 struct perf_evlist *evlist __maybe_unused,
206 int idx __maybe_unused,
207 bool per_cpu __maybe_unused)
208{
209}
210
211void perf_mmap__munmap(struct perf_mmap *map)
212{
213 if (map->base != NULL) {
214 munmap(map->base, perf_mmap__mmap_len(map));
215 map->base = NULL;
216 map->fd = -1;
217 refcount_set(&map->refcnt, 0);
218 }
219 auxtrace_mmap__munmap(&map->auxtrace_mmap);
220}
221
222int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
223{
224 /*
225 * The last one will be done at perf_evlist__mmap_consume(), so that we
226 * make sure we don't prevent tools from consuming every last event in
227 * the ring buffer.
228 *
229 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
230 * anymore, but the last events for it are still in the ring buffer,
231 * waiting to be consumed.
232 *
233 * Tools can chose to ignore this at their own discretion, but the
234 * evlist layer can't just drop it when filtering events in
235 * perf_evlist__filter_pollfd().
236 */
237 refcount_set(&map->refcnt, 2);
238 map->prev = 0;
239 map->mask = mp->mask;
240 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
241 MAP_SHARED, fd, 0);
242 if (map->base == MAP_FAILED) {
243 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
244 errno);
245 map->base = NULL;
246 return -1;
247 }
248 map->fd = fd;
249
250 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
251 &mp->auxtrace_mp, map->base, fd))
252 return -1;
253
254 return 0;
255}
256
257static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
258{
259 struct perf_event_header *pheader;
260 u64 evt_head = head;
261 int size = mask + 1;
262
263 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
264 pheader = (struct perf_event_header *)(buf + (head & mask));
265 *start = head;
266 while (true) {
267 if (evt_head - head >= (unsigned int)size) {
268 pr_debug("Finished reading backward ring buffer: rewind\n");
269 if (evt_head - head > (unsigned int)size)
270 evt_head -= pheader->size;
271 *end = evt_head;
272 return 0;
273 }
274
275 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
276
277 if (pheader->size == 0) {
278 pr_debug("Finished reading backward ring buffer: get start\n");
279 *end = evt_head;
280 return 0;
281 }
282
283 evt_head += pheader->size;
284 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
285 }
286 WARN_ONCE(1, "Shouldn't get here\n");
287 return -1;
288}
289
290static int rb_find_range(void *data, int mask, u64 head, u64 old,
291 u64 *start, u64 *end, bool backward)
292{
293 if (!backward) {
294 *start = old;
295 *end = head;
296 return 0;
297 }
298
299 return backward_rb_find_range(data, mask, head, start, end);
300}
301
302int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
303 void *to, int push(void *to, void *buf, size_t size))
304{
305 u64 head = perf_mmap__read_head(md);
306 u64 old = md->prev;
307 u64 end = head, start = old;
308 unsigned char *data = md->base + page_size;
309 unsigned long size;
310 void *buf;
311 int rc = 0;
312
313 if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
314 return -1;
315
316 if (start == end)
317 return 0;
318
319 size = end - start;
320 if (size > (unsigned long)(md->mask) + 1) {
321 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
322
323 md->prev = head;
324 perf_mmap__consume(md, overwrite || backward);
325 return 0;
326 }
327
328 if ((start & md->mask) + size != (end & md->mask)) {
329 buf = &data[start & md->mask];
330 size = md->mask + 1 - (start & md->mask);
331 start += size;
332
333 if (push(to, buf, size) < 0) {
334 rc = -1;
335 goto out;
336 }
337 }
338
339 buf = &data[start & md->mask];
340 size = end - start;
341 start += size;
342
343 if (push(to, buf, size) < 0) {
344 rc = -1;
345 goto out;
346 }
347
348 md->prev = head;
349 perf_mmap__consume(md, overwrite || backward);
350out:
351 return rc;
352}