Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.0-rc4 110 lines 2.8 kB view raw
1#include "evsel.h" 2#include "tests.h" 3#include "thread_map.h" 4#include "cpumap.h" 5#include "debug.h" 6 7int test__open_syscall_event_on_all_cpus(void) 8{ 9 int err = -1, fd, cpu; 10 struct cpu_map *cpus; 11 struct perf_evsel *evsel; 12 unsigned int nr_open_calls = 111, i; 13 cpu_set_t cpu_set; 14 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 15 char sbuf[STRERR_BUFSIZE]; 16 17 if (threads == NULL) { 18 pr_debug("thread_map__new\n"); 19 return -1; 20 } 21 22 cpus = cpu_map__new(NULL); 23 if (cpus == NULL) { 24 pr_debug("cpu_map__new\n"); 25 goto out_thread_map_delete; 26 } 27 28 CPU_ZERO(&cpu_set); 29 30 evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); 31 if (evsel == NULL) { 32 pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); 33 goto out_thread_map_delete; 34 } 35 36 if (perf_evsel__open(evsel, cpus, threads) < 0) { 37 pr_debug("failed to open counter: %s, " 38 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 39 strerror_r(errno, sbuf, sizeof(sbuf))); 40 goto out_evsel_delete; 41 } 42 43 for (cpu = 0; cpu < cpus->nr; ++cpu) { 44 unsigned int ncalls = nr_open_calls + cpu; 45 /* 46 * XXX eventually lift this restriction in a way that 47 * keeps perf building on older glibc installations 48 * without CPU_ALLOC. 1024 cpus in 2010 still seems 49 * a reasonable upper limit tho :-) 50 */ 51 if (cpus->map[cpu] >= CPU_SETSIZE) { 52 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); 53 continue; 54 } 55 56 CPU_SET(cpus->map[cpu], &cpu_set); 57 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 58 pr_debug("sched_setaffinity() failed on CPU %d: %s ", 59 cpus->map[cpu], 60 strerror_r(errno, sbuf, sizeof(sbuf))); 61 goto out_close_fd; 62 } 63 for (i = 0; i < ncalls; ++i) { 64 fd = open("/etc/passwd", O_RDONLY); 65 close(fd); 66 } 67 CPU_CLR(cpus->map[cpu], &cpu_set); 68 } 69 70 /* 71 * Here we need to explicitely preallocate the counts, as if 72 * we use the auto allocation it will allocate just for 1 cpu, 73 * as we start by cpu 0. 74 */ 75 if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { 76 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); 77 goto out_close_fd; 78 } 79 80 err = 0; 81 82 for (cpu = 0; cpu < cpus->nr; ++cpu) { 83 unsigned int expected; 84 85 if (cpus->map[cpu] >= CPU_SETSIZE) 86 continue; 87 88 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { 89 pr_debug("perf_evsel__read_on_cpu\n"); 90 err = -1; 91 break; 92 } 93 94 expected = nr_open_calls + cpu; 95 if (evsel->counts->cpu[cpu].val != expected) { 96 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", 97 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); 98 err = -1; 99 } 100 } 101 102 perf_evsel__free_counts(evsel); 103out_close_fd: 104 perf_evsel__close_fd(evsel, 1, threads->nr); 105out_evsel_delete: 106 perf_evsel__delete(evsel); 107out_thread_map_delete: 108 thread_map__delete(threads); 109 return err; 110}