Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#include <errno.h>
2#include <linux/kernel.h>
3#include <linux/types.h>
4#include <inttypes.h>
5#include <stdlib.h>
6#include <unistd.h>
7#include <stdio.h>
8#include <string.h>
9#include <sys/param.h>
10
11#include "parse-events.h"
12#include "evlist.h"
13#include "evsel.h"
14#include "thread_map.h"
15#include "cpumap.h"
16#include "machine.h"
17#include "event.h"
18#include "thread.h"
19
20#include "tests.h"
21
22#include "sane_ctype.h"
23
24#define BUFSZ 1024
25#define READLEN 128
26
27struct state {
28 u64 done[1024];
29 size_t done_cnt;
30};
31
32static unsigned int hex(char c)
33{
34 if (c >= '0' && c <= '9')
35 return c - '0';
36 if (c >= 'a' && c <= 'f')
37 return c - 'a' + 10;
38 return c - 'A' + 10;
39}
40
41static size_t read_objdump_chunk(const char **line, unsigned char **buf,
42 size_t *buf_len)
43{
44 size_t bytes_read = 0;
45 unsigned char *chunk_start = *buf;
46
47 /* Read bytes */
48 while (*buf_len > 0) {
49 char c1, c2;
50
51 /* Get 2 hex digits */
52 c1 = *(*line)++;
53 if (!isxdigit(c1))
54 break;
55 c2 = *(*line)++;
56 if (!isxdigit(c2))
57 break;
58
59 /* Store byte and advance buf */
60 **buf = (hex(c1) << 4) | hex(c2);
61 (*buf)++;
62 (*buf_len)--;
63 bytes_read++;
64
65 /* End of chunk? */
66 if (isspace(**line))
67 break;
68 }
69
70 /*
71 * objdump will display raw insn as LE if code endian
72 * is LE and bytes_per_chunk > 1. In that case reverse
73 * the chunk we just read.
74 *
75 * see disassemble_bytes() at binutils/objdump.c for details
76 * how objdump chooses display endian)
77 */
78 if (bytes_read > 1 && !bigendian()) {
79 unsigned char *chunk_end = chunk_start + bytes_read - 1;
80 unsigned char tmp;
81
82 while (chunk_start < chunk_end) {
83 tmp = *chunk_start;
84 *chunk_start = *chunk_end;
85 *chunk_end = tmp;
86 chunk_start++;
87 chunk_end--;
88 }
89 }
90
91 return bytes_read;
92}
93
94static size_t read_objdump_line(const char *line, unsigned char *buf,
95 size_t buf_len)
96{
97 const char *p;
98 size_t ret, bytes_read = 0;
99
100 /* Skip to a colon */
101 p = strchr(line, ':');
102 if (!p)
103 return 0;
104 p++;
105
106 /* Skip initial spaces */
107 while (*p) {
108 if (!isspace(*p))
109 break;
110 p++;
111 }
112
113 do {
114 ret = read_objdump_chunk(&p, &buf, &buf_len);
115 bytes_read += ret;
116 p++;
117 } while (ret > 0);
118
119 /* return number of successfully read bytes */
120 return bytes_read;
121}
122
123static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
124{
125 char *line = NULL;
126 size_t line_len, off_last = 0;
127 ssize_t ret;
128 int err = 0;
129 u64 addr, last_addr = start_addr;
130
131 while (off_last < *len) {
132 size_t off, read_bytes, written_bytes;
133 unsigned char tmp[BUFSZ];
134
135 ret = getline(&line, &line_len, f);
136 if (feof(f))
137 break;
138 if (ret < 0) {
139 pr_debug("getline failed\n");
140 err = -1;
141 break;
142 }
143
144 /* read objdump data into temporary buffer */
145 read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
146 if (!read_bytes)
147 continue;
148
149 if (sscanf(line, "%"PRIx64, &addr) != 1)
150 continue;
151 if (addr < last_addr) {
152 pr_debug("addr going backwards, read beyond section?\n");
153 break;
154 }
155 last_addr = addr;
156
157 /* copy it from temporary buffer to 'buf' according
158 * to address on current objdump line */
159 off = addr - start_addr;
160 if (off >= *len)
161 break;
162 written_bytes = MIN(read_bytes, *len - off);
163 memcpy(buf + off, tmp, written_bytes);
164 off_last = off + written_bytes;
165 }
166
167 /* len returns number of bytes that could not be read */
168 *len -= off_last;
169
170 free(line);
171
172 return err;
173}
174
175static int read_via_objdump(const char *filename, u64 addr, void *buf,
176 size_t len)
177{
178 char cmd[PATH_MAX * 2];
179 const char *fmt;
180 FILE *f;
181 int ret;
182
183 fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
184 ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
185 filename);
186 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
187 return -1;
188
189 pr_debug("Objdump command is: %s\n", cmd);
190
191 /* Ignore objdump errors */
192 strcat(cmd, " 2>/dev/null");
193
194 f = popen(cmd, "r");
195 if (!f) {
196 pr_debug("popen failed\n");
197 return -1;
198 }
199
200 ret = read_objdump_output(f, buf, &len, addr);
201 if (len) {
202 pr_debug("objdump read too few bytes: %zd\n", len);
203 if (!ret)
204 ret = len;
205 }
206
207 pclose(f);
208
209 return ret;
210}
211
212static void dump_buf(unsigned char *buf, size_t len)
213{
214 size_t i;
215
216 for (i = 0; i < len; i++) {
217 pr_debug("0x%02x ", buf[i]);
218 if (i % 16 == 15)
219 pr_debug("\n");
220 }
221 pr_debug("\n");
222}
223
224static int read_object_code(u64 addr, size_t len, u8 cpumode,
225 struct thread *thread, struct state *state)
226{
227 struct addr_location al;
228 unsigned char buf1[BUFSZ];
229 unsigned char buf2[BUFSZ];
230 size_t ret_len;
231 u64 objdump_addr;
232 int ret;
233
234 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
235
236 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al);
237 if (!al.map || !al.map->dso) {
238 pr_debug("thread__find_addr_map failed\n");
239 return -1;
240 }
241
242 pr_debug("File is: %s\n", al.map->dso->long_name);
243
244 if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
245 !dso__is_kcore(al.map->dso)) {
246 pr_debug("Unexpected kernel address - skipping\n");
247 return 0;
248 }
249
250 pr_debug("On file address is: %#"PRIx64"\n", al.addr);
251
252 if (len > BUFSZ)
253 len = BUFSZ;
254
255 /* Do not go off the map */
256 if (addr + len > al.map->end)
257 len = al.map->end - addr;
258
259 /* Read the object code using perf */
260 ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine,
261 al.addr, buf1, len);
262 if (ret_len != len) {
263 pr_debug("dso__data_read_offset failed\n");
264 return -1;
265 }
266
267 /*
268 * Converting addresses for use by objdump requires more information.
269 * map__load() does that. See map__rip_2objdump() for details.
270 */
271 if (map__load(al.map))
272 return -1;
273
274 /* objdump struggles with kcore - try each map only once */
275 if (dso__is_kcore(al.map->dso)) {
276 size_t d;
277
278 for (d = 0; d < state->done_cnt; d++) {
279 if (state->done[d] == al.map->start) {
280 pr_debug("kcore map tested already");
281 pr_debug(" - skipping\n");
282 return 0;
283 }
284 }
285 if (state->done_cnt >= ARRAY_SIZE(state->done)) {
286 pr_debug("Too many kcore maps - skipping\n");
287 return 0;
288 }
289 state->done[state->done_cnt++] = al.map->start;
290 }
291
292 /* Read the object code using objdump */
293 objdump_addr = map__rip_2objdump(al.map, al.addr);
294 ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
295 if (ret > 0) {
296 /*
297 * The kernel maps are inaccurate - assume objdump is right in
298 * that case.
299 */
300 if (cpumode == PERF_RECORD_MISC_KERNEL ||
301 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
302 len -= ret;
303 if (len) {
304 pr_debug("Reducing len to %zu\n", len);
305 } else if (dso__is_kcore(al.map->dso)) {
306 /*
307 * objdump cannot handle very large segments
308 * that may be found in kcore.
309 */
310 pr_debug("objdump failed for kcore");
311 pr_debug(" - skipping\n");
312 return 0;
313 } else {
314 return -1;
315 }
316 }
317 }
318 if (ret < 0) {
319 pr_debug("read_via_objdump failed\n");
320 return -1;
321 }
322
323 /* The results should be identical */
324 if (memcmp(buf1, buf2, len)) {
325 pr_debug("Bytes read differ from those read by objdump\n");
326 pr_debug("buf1 (dso):\n");
327 dump_buf(buf1, len);
328 pr_debug("buf2 (objdump):\n");
329 dump_buf(buf2, len);
330 return -1;
331 }
332 pr_debug("Bytes read match those read by objdump\n");
333
334 return 0;
335}
336
337static int process_sample_event(struct machine *machine,
338 struct perf_evlist *evlist,
339 union perf_event *event, struct state *state)
340{
341 struct perf_sample sample;
342 struct thread *thread;
343 int ret;
344
345 if (perf_evlist__parse_sample(evlist, event, &sample)) {
346 pr_debug("perf_evlist__parse_sample failed\n");
347 return -1;
348 }
349
350 thread = machine__findnew_thread(machine, sample.pid, sample.tid);
351 if (!thread) {
352 pr_debug("machine__findnew_thread failed\n");
353 return -1;
354 }
355
356 ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
357 thread__put(thread);
358 return ret;
359}
360
361static int process_event(struct machine *machine, struct perf_evlist *evlist,
362 union perf_event *event, struct state *state)
363{
364 if (event->header.type == PERF_RECORD_SAMPLE)
365 return process_sample_event(machine, evlist, event, state);
366
367 if (event->header.type == PERF_RECORD_THROTTLE ||
368 event->header.type == PERF_RECORD_UNTHROTTLE)
369 return 0;
370
371 if (event->header.type < PERF_RECORD_MAX) {
372 int ret;
373
374 ret = machine__process_event(machine, event, NULL);
375 if (ret < 0)
376 pr_debug("machine__process_event failed, event type %u\n",
377 event->header.type);
378 return ret;
379 }
380
381 return 0;
382}
383
384static int process_events(struct machine *machine, struct perf_evlist *evlist,
385 struct state *state)
386{
387 union perf_event *event;
388 int i, ret;
389
390 for (i = 0; i < evlist->nr_mmaps; i++) {
391 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
392 ret = process_event(machine, evlist, event, state);
393 perf_evlist__mmap_consume(evlist, i);
394 if (ret < 0)
395 return ret;
396 }
397 }
398 return 0;
399}
400
401static int comp(const void *a, const void *b)
402{
403 return *(int *)a - *(int *)b;
404}
405
406static void do_sort_something(void)
407{
408 int buf[40960], i;
409
410 for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
411 buf[i] = ARRAY_SIZE(buf) - i - 1;
412
413 qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
414
415 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
416 if (buf[i] != i) {
417 pr_debug("qsort failed\n");
418 break;
419 }
420 }
421}
422
423static void sort_something(void)
424{
425 int i;
426
427 for (i = 0; i < 10; i++)
428 do_sort_something();
429}
430
431static void syscall_something(void)
432{
433 int pipefd[2];
434 int i;
435
436 for (i = 0; i < 1000; i++) {
437 if (pipe(pipefd) < 0) {
438 pr_debug("pipe failed\n");
439 break;
440 }
441 close(pipefd[1]);
442 close(pipefd[0]);
443 }
444}
445
446static void fs_something(void)
447{
448 const char *test_file_name = "temp-perf-code-reading-test-file--";
449 FILE *f;
450 int i;
451
452 for (i = 0; i < 1000; i++) {
453 f = fopen(test_file_name, "w+");
454 if (f) {
455 fclose(f);
456 unlink(test_file_name);
457 }
458 }
459}
460
461static void do_something(void)
462{
463 fs_something();
464
465 sort_something();
466
467 syscall_something();
468}
469
470enum {
471 TEST_CODE_READING_OK,
472 TEST_CODE_READING_NO_VMLINUX,
473 TEST_CODE_READING_NO_KCORE,
474 TEST_CODE_READING_NO_ACCESS,
475 TEST_CODE_READING_NO_KERNEL_OBJ,
476};
477
478static int do_test_code_reading(bool try_kcore)
479{
480 struct machine *machine;
481 struct thread *thread;
482 struct record_opts opts = {
483 .mmap_pages = UINT_MAX,
484 .user_freq = UINT_MAX,
485 .user_interval = ULLONG_MAX,
486 .freq = 500,
487 .target = {
488 .uses_mmap = true,
489 },
490 };
491 struct state state = {
492 .done_cnt = 0,
493 };
494 struct thread_map *threads = NULL;
495 struct cpu_map *cpus = NULL;
496 struct perf_evlist *evlist = NULL;
497 struct perf_evsel *evsel = NULL;
498 int err = -1, ret;
499 pid_t pid;
500 struct map *map;
501 bool have_vmlinux, have_kcore, excl_kernel = false;
502
503 pid = getpid();
504
505 machine = machine__new_host();
506
507 ret = machine__create_kernel_maps(machine);
508 if (ret < 0) {
509 pr_debug("machine__create_kernel_maps failed\n");
510 goto out_err;
511 }
512
513 /* Force the use of kallsyms instead of vmlinux to try kcore */
514 if (try_kcore)
515 symbol_conf.kallsyms_name = "/proc/kallsyms";
516
517 /* Load kernel map */
518 map = machine__kernel_map(machine);
519 ret = map__load(map);
520 if (ret < 0) {
521 pr_debug("map__load failed\n");
522 goto out_err;
523 }
524 have_vmlinux = dso__is_vmlinux(map->dso);
525 have_kcore = dso__is_kcore(map->dso);
526
527 /* 2nd time through we just try kcore */
528 if (try_kcore && !have_kcore)
529 return TEST_CODE_READING_NO_KCORE;
530
531 /* No point getting kernel events if there is no kernel object */
532 if (!have_vmlinux && !have_kcore)
533 excl_kernel = true;
534
535 threads = thread_map__new_by_tid(pid);
536 if (!threads) {
537 pr_debug("thread_map__new_by_tid failed\n");
538 goto out_err;
539 }
540
541 ret = perf_event__synthesize_thread_map(NULL, threads,
542 perf_event__process, machine, false, 500);
543 if (ret < 0) {
544 pr_debug("perf_event__synthesize_thread_map failed\n");
545 goto out_err;
546 }
547
548 thread = machine__findnew_thread(machine, pid, pid);
549 if (!thread) {
550 pr_debug("machine__findnew_thread failed\n");
551 goto out_put;
552 }
553
554 cpus = cpu_map__new(NULL);
555 if (!cpus) {
556 pr_debug("cpu_map__new failed\n");
557 goto out_put;
558 }
559
560 while (1) {
561 const char *str;
562
563 evlist = perf_evlist__new();
564 if (!evlist) {
565 pr_debug("perf_evlist__new failed\n");
566 goto out_put;
567 }
568
569 perf_evlist__set_maps(evlist, cpus, threads);
570
571 if (excl_kernel)
572 str = "cycles:u";
573 else
574 str = "cycles";
575 pr_debug("Parsing event '%s'\n", str);
576 ret = parse_events(evlist, str, NULL);
577 if (ret < 0) {
578 pr_debug("parse_events failed\n");
579 goto out_put;
580 }
581
582 perf_evlist__config(evlist, &opts, NULL);
583
584 evsel = perf_evlist__first(evlist);
585
586 evsel->attr.comm = 1;
587 evsel->attr.disabled = 1;
588 evsel->attr.enable_on_exec = 0;
589
590 ret = perf_evlist__open(evlist);
591 if (ret < 0) {
592 if (!excl_kernel) {
593 excl_kernel = true;
594 /*
595 * Both cpus and threads are now owned by evlist
596 * and will be freed by following perf_evlist__set_maps
597 * call. Getting refference to keep them alive.
598 */
599 cpu_map__get(cpus);
600 thread_map__get(threads);
601 perf_evlist__set_maps(evlist, NULL, NULL);
602 perf_evlist__delete(evlist);
603 evlist = NULL;
604 continue;
605 }
606
607 if (verbose > 0) {
608 char errbuf[512];
609 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
610 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
611 }
612
613 goto out_put;
614 }
615 break;
616 }
617
618 ret = perf_evlist__mmap(evlist, UINT_MAX, false);
619 if (ret < 0) {
620 pr_debug("perf_evlist__mmap failed\n");
621 goto out_put;
622 }
623
624 perf_evlist__enable(evlist);
625
626 do_something();
627
628 perf_evlist__disable(evlist);
629
630 ret = process_events(machine, evlist, &state);
631 if (ret < 0)
632 goto out_put;
633
634 if (!have_vmlinux && !have_kcore && !try_kcore)
635 err = TEST_CODE_READING_NO_KERNEL_OBJ;
636 else if (!have_vmlinux && !try_kcore)
637 err = TEST_CODE_READING_NO_VMLINUX;
638 else if (excl_kernel)
639 err = TEST_CODE_READING_NO_ACCESS;
640 else
641 err = TEST_CODE_READING_OK;
642out_put:
643 thread__put(thread);
644out_err:
645
646 if (evlist) {
647 perf_evlist__delete(evlist);
648 } else {
649 cpu_map__put(cpus);
650 thread_map__put(threads);
651 }
652 machine__delete_threads(machine);
653 machine__delete(machine);
654
655 return err;
656}
657
658int test__code_reading(int subtest __maybe_unused)
659{
660 int ret;
661
662 ret = do_test_code_reading(false);
663 if (!ret)
664 ret = do_test_code_reading(true);
665
666 switch (ret) {
667 case TEST_CODE_READING_OK:
668 return 0;
669 case TEST_CODE_READING_NO_VMLINUX:
670 pr_debug("no vmlinux\n");
671 return 0;
672 case TEST_CODE_READING_NO_KCORE:
673 pr_debug("no kcore\n");
674 return 0;
675 case TEST_CODE_READING_NO_ACCESS:
676 pr_debug("no access\n");
677 return 0;
678 case TEST_CODE_READING_NO_KERNEL_OBJ:
679 pr_debug("no kernel obj\n");
680 return 0;
681 default:
682 return -1;
683 };
684}