Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <Python.h>
3#include <structmember.h>
4#include <inttypes.h>
5#include <poll.h>
6#include <linux/err.h>
7#include <perf/cpumap.h>
8#ifdef HAVE_LIBTRACEEVENT
9#include <traceevent/event-parse.h>
10#endif
11#include <perf/mmap.h>
12#include "evlist.h"
13#include "callchain.h"
14#include "evsel.h"
15#include "event.h"
16#include "print_binary.h"
17#include "thread_map.h"
18#include "trace-event.h"
19#include "mmap.h"
20#include "stat.h"
21#include "metricgroup.h"
22#include "util/bpf-filter.h"
23#include "util/env.h"
24#include "util/pmu.h"
25#include "util/pmus.h"
26#include <internal/lib.h>
27#include "util.h"
28
29#if PY_MAJOR_VERSION < 3
30#define _PyUnicode_FromString(arg) \
31 PyString_FromString(arg)
32#define _PyUnicode_AsString(arg) \
33 PyString_AsString(arg)
34#define _PyUnicode_FromFormat(...) \
35 PyString_FromFormat(__VA_ARGS__)
36#define _PyLong_FromLong(arg) \
37 PyInt_FromLong(arg)
38
39#else
40
41#define _PyUnicode_FromString(arg) \
42 PyUnicode_FromString(arg)
43#define _PyUnicode_FromFormat(...) \
44 PyUnicode_FromFormat(__VA_ARGS__)
45#define _PyLong_FromLong(arg) \
46 PyLong_FromLong(arg)
47#endif
48
49#ifndef Py_TYPE
50#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
51#endif
52
53/*
54 * Avoid bringing in event parsing.
55 */
56int parse_event(struct evlist *evlist __maybe_unused, const char *str __maybe_unused)
57{
58 return 0;
59}
60
61/*
62 * Provide these two so that we don't have to link against callchain.c and
63 * start dragging hist.c, etc.
64 */
65struct callchain_param callchain_param;
66
67int parse_callchain_record(const char *arg __maybe_unused,
68 struct callchain_param *param __maybe_unused)
69{
70 return 0;
71}
72
73/*
74 * Add these not to drag util/env.c
75 */
76struct perf_env perf_env;
77
78const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
79{
80 return NULL;
81}
82
83// This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
84const char *perf_env__arch(struct perf_env *env __maybe_unused)
85{
86 return NULL;
87}
88
89/*
90 * These ones are needed not to drag the PMU bandwagon, jevents generated
91 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
92 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
93 * far, for the perf python binding known usecases, revisit if this become
94 * necessary.
95 */
96struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
97{
98 return NULL;
99}
100
101int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...)
102{
103 return EOF;
104}
105
106int perf_pmus__num_core_pmus(void)
107{
108 return 1;
109}
110
111bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
112{
113 return false;
114}
115
116/*
117 * Add this one here not to drag util/metricgroup.c
118 */
119int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
120 struct rblist *new_metric_events,
121 struct rblist *old_metric_events)
122{
123 return 0;
124}
125
126/*
127 * Add this one here not to drag util/trace-event-info.c
128 */
129char *tracepoint_id_to_name(u64 config)
130{
131 return NULL;
132}
133
134/*
135 * XXX: All these evsel destructors need some better mechanism, like a linked
136 * list of destructors registered when the relevant code indeed is used instead
137 * of having more and more calls in perf_evsel__delete(). -- acme
138 *
139 * For now, add some more:
140 *
141 * Not to drag the BPF bandwagon...
142 */
143void bpf_counter__destroy(struct evsel *evsel);
144int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
145int bpf_counter__disable(struct evsel *evsel);
146
147void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
148{
149}
150
151int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
152{
153 return 0;
154}
155
156int bpf_counter__disable(struct evsel *evsel __maybe_unused)
157{
158 return 0;
159}
160
161// not to drag util/bpf-filter.c
162#ifdef HAVE_BPF_SKEL
163int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
164{
165 return 0;
166}
167
168int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
169{
170 return 0;
171}
172#endif
173
174/*
175 * Support debug printing even though util/debug.c is not linked. That means
176 * implementing 'verbose' and 'eprintf'.
177 */
178int verbose;
179int debug_peo_args;
180
181int eprintf(int level, int var, const char *fmt, ...);
182
183int eprintf(int level, int var, const char *fmt, ...)
184{
185 va_list args;
186 int ret = 0;
187
188 if (var >= level) {
189 va_start(args, fmt);
190 ret = vfprintf(stderr, fmt, args);
191 va_end(args);
192 }
193
194 return ret;
195}
196
197/* Define PyVarObject_HEAD_INIT for python 2.5 */
198#ifndef PyVarObject_HEAD_INIT
199# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
200#endif
201
202#if PY_MAJOR_VERSION < 3
203PyMODINIT_FUNC initperf(void);
204#else
205PyMODINIT_FUNC PyInit_perf(void);
206#endif
207
208#define member_def(type, member, ptype, help) \
209 { #member, ptype, \
210 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
211 0, help }
212
213#define sample_member_def(name, member, ptype, help) \
214 { #name, ptype, \
215 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
216 0, help }
217
218struct pyrf_event {
219 PyObject_HEAD
220 struct evsel *evsel;
221 struct perf_sample sample;
222 union perf_event event;
223};
224
225#define sample_members \
226 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
227 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
228 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
229 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
230 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
231 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
232 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
233 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
234 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
235
236static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
237
238static PyMemberDef pyrf_mmap_event__members[] = {
239 sample_members
240 member_def(perf_event_header, type, T_UINT, "event type"),
241 member_def(perf_event_header, misc, T_UINT, "event misc"),
242 member_def(perf_record_mmap, pid, T_UINT, "event pid"),
243 member_def(perf_record_mmap, tid, T_UINT, "event tid"),
244 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
245 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
246 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
247 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
248 { .name = NULL, },
249};
250
251static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
252{
253 PyObject *ret;
254 char *s;
255
256 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
257 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
258 "filename: %s }",
259 pevent->event.mmap.pid, pevent->event.mmap.tid,
260 pevent->event.mmap.start, pevent->event.mmap.len,
261 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
262 ret = PyErr_NoMemory();
263 } else {
264 ret = _PyUnicode_FromString(s);
265 free(s);
266 }
267 return ret;
268}
269
270static PyTypeObject pyrf_mmap_event__type = {
271 PyVarObject_HEAD_INIT(NULL, 0)
272 .tp_name = "perf.mmap_event",
273 .tp_basicsize = sizeof(struct pyrf_event),
274 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
275 .tp_doc = pyrf_mmap_event__doc,
276 .tp_members = pyrf_mmap_event__members,
277 .tp_repr = (reprfunc)pyrf_mmap_event__repr,
278};
279
280static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
281
282static PyMemberDef pyrf_task_event__members[] = {
283 sample_members
284 member_def(perf_event_header, type, T_UINT, "event type"),
285 member_def(perf_record_fork, pid, T_UINT, "event pid"),
286 member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
287 member_def(perf_record_fork, tid, T_UINT, "event tid"),
288 member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
289 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
290 { .name = NULL, },
291};
292
293static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
294{
295 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
296 "ptid: %u, time: %" PRI_lu64 "}",
297 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
298 pevent->event.fork.pid,
299 pevent->event.fork.ppid,
300 pevent->event.fork.tid,
301 pevent->event.fork.ptid,
302 pevent->event.fork.time);
303}
304
305static PyTypeObject pyrf_task_event__type = {
306 PyVarObject_HEAD_INIT(NULL, 0)
307 .tp_name = "perf.task_event",
308 .tp_basicsize = sizeof(struct pyrf_event),
309 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
310 .tp_doc = pyrf_task_event__doc,
311 .tp_members = pyrf_task_event__members,
312 .tp_repr = (reprfunc)pyrf_task_event__repr,
313};
314
315static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
316
317static PyMemberDef pyrf_comm_event__members[] = {
318 sample_members
319 member_def(perf_event_header, type, T_UINT, "event type"),
320 member_def(perf_record_comm, pid, T_UINT, "event pid"),
321 member_def(perf_record_comm, tid, T_UINT, "event tid"),
322 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
323 { .name = NULL, },
324};
325
326static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
327{
328 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
329 pevent->event.comm.pid,
330 pevent->event.comm.tid,
331 pevent->event.comm.comm);
332}
333
334static PyTypeObject pyrf_comm_event__type = {
335 PyVarObject_HEAD_INIT(NULL, 0)
336 .tp_name = "perf.comm_event",
337 .tp_basicsize = sizeof(struct pyrf_event),
338 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
339 .tp_doc = pyrf_comm_event__doc,
340 .tp_members = pyrf_comm_event__members,
341 .tp_repr = (reprfunc)pyrf_comm_event__repr,
342};
343
344static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
345
346static PyMemberDef pyrf_throttle_event__members[] = {
347 sample_members
348 member_def(perf_event_header, type, T_UINT, "event type"),
349 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
350 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
351 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
352 { .name = NULL, },
353};
354
355static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
356{
357 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
358
359 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
360 ", stream_id: %" PRI_lu64 " }",
361 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
362 te->time, te->id, te->stream_id);
363}
364
365static PyTypeObject pyrf_throttle_event__type = {
366 PyVarObject_HEAD_INIT(NULL, 0)
367 .tp_name = "perf.throttle_event",
368 .tp_basicsize = sizeof(struct pyrf_event),
369 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
370 .tp_doc = pyrf_throttle_event__doc,
371 .tp_members = pyrf_throttle_event__members,
372 .tp_repr = (reprfunc)pyrf_throttle_event__repr,
373};
374
375static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
376
377static PyMemberDef pyrf_lost_event__members[] = {
378 sample_members
379 member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
380 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
381 { .name = NULL, },
382};
383
384static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
385{
386 PyObject *ret;
387 char *s;
388
389 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
390 "lost: %#" PRI_lx64 " }",
391 pevent->event.lost.id, pevent->event.lost.lost) < 0) {
392 ret = PyErr_NoMemory();
393 } else {
394 ret = _PyUnicode_FromString(s);
395 free(s);
396 }
397 return ret;
398}
399
400static PyTypeObject pyrf_lost_event__type = {
401 PyVarObject_HEAD_INIT(NULL, 0)
402 .tp_name = "perf.lost_event",
403 .tp_basicsize = sizeof(struct pyrf_event),
404 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
405 .tp_doc = pyrf_lost_event__doc,
406 .tp_members = pyrf_lost_event__members,
407 .tp_repr = (reprfunc)pyrf_lost_event__repr,
408};
409
410static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
411
412static PyMemberDef pyrf_read_event__members[] = {
413 sample_members
414 member_def(perf_record_read, pid, T_UINT, "event pid"),
415 member_def(perf_record_read, tid, T_UINT, "event tid"),
416 { .name = NULL, },
417};
418
419static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
420{
421 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
422 pevent->event.read.pid,
423 pevent->event.read.tid);
424 /*
425 * FIXME: return the array of read values,
426 * making this method useful ;-)
427 */
428}
429
430static PyTypeObject pyrf_read_event__type = {
431 PyVarObject_HEAD_INIT(NULL, 0)
432 .tp_name = "perf.read_event",
433 .tp_basicsize = sizeof(struct pyrf_event),
434 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
435 .tp_doc = pyrf_read_event__doc,
436 .tp_members = pyrf_read_event__members,
437 .tp_repr = (reprfunc)pyrf_read_event__repr,
438};
439
440static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
441
442static PyMemberDef pyrf_sample_event__members[] = {
443 sample_members
444 member_def(perf_event_header, type, T_UINT, "event type"),
445 { .name = NULL, },
446};
447
448static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
449{
450 PyObject *ret;
451 char *s;
452
453 if (asprintf(&s, "{ type: sample }") < 0) {
454 ret = PyErr_NoMemory();
455 } else {
456 ret = _PyUnicode_FromString(s);
457 free(s);
458 }
459 return ret;
460}
461
462#ifdef HAVE_LIBTRACEEVENT
463static bool is_tracepoint(struct pyrf_event *pevent)
464{
465 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
466}
467
468static PyObject*
469tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
470{
471 struct tep_handle *pevent = field->event->tep;
472 void *data = pe->sample.raw_data;
473 PyObject *ret = NULL;
474 unsigned long long val;
475 unsigned int offset, len;
476
477 if (field->flags & TEP_FIELD_IS_ARRAY) {
478 offset = field->offset;
479 len = field->size;
480 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
481 val = tep_read_number(pevent, data + offset, len);
482 offset = val;
483 len = offset >> 16;
484 offset &= 0xffff;
485 if (tep_field_is_relative(field->flags))
486 offset += field->offset + field->size;
487 }
488 if (field->flags & TEP_FIELD_IS_STRING &&
489 is_printable_array(data + offset, len)) {
490 ret = _PyUnicode_FromString((char *)data + offset);
491 } else {
492 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
493 field->flags &= ~TEP_FIELD_IS_STRING;
494 }
495 } else {
496 val = tep_read_number(pevent, data + field->offset,
497 field->size);
498 if (field->flags & TEP_FIELD_IS_POINTER)
499 ret = PyLong_FromUnsignedLong((unsigned long) val);
500 else if (field->flags & TEP_FIELD_IS_SIGNED)
501 ret = PyLong_FromLong((long) val);
502 else
503 ret = PyLong_FromUnsignedLong((unsigned long) val);
504 }
505
506 return ret;
507}
508
509static PyObject*
510get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
511{
512 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
513 struct evsel *evsel = pevent->evsel;
514 struct tep_format_field *field;
515
516 if (!evsel->tp_format) {
517 struct tep_event *tp_format;
518
519 tp_format = trace_event__tp_format_id(evsel->core.attr.config);
520 if (IS_ERR_OR_NULL(tp_format))
521 return NULL;
522
523 evsel->tp_format = tp_format;
524 }
525
526 field = tep_find_any_field(evsel->tp_format, str);
527 if (!field)
528 return NULL;
529
530 return tracepoint_field(pevent, field);
531}
532#endif /* HAVE_LIBTRACEEVENT */
533
534static PyObject*
535pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
536{
537 PyObject *obj = NULL;
538
539#ifdef HAVE_LIBTRACEEVENT
540 if (is_tracepoint(pevent))
541 obj = get_tracepoint_field(pevent, attr_name);
542#endif
543
544 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
545}
546
547static PyTypeObject pyrf_sample_event__type = {
548 PyVarObject_HEAD_INIT(NULL, 0)
549 .tp_name = "perf.sample_event",
550 .tp_basicsize = sizeof(struct pyrf_event),
551 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
552 .tp_doc = pyrf_sample_event__doc,
553 .tp_members = pyrf_sample_event__members,
554 .tp_repr = (reprfunc)pyrf_sample_event__repr,
555 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
556};
557
558static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
559
560static PyMemberDef pyrf_context_switch_event__members[] = {
561 sample_members
562 member_def(perf_event_header, type, T_UINT, "event type"),
563 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
564 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
565 { .name = NULL, },
566};
567
568static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
569{
570 PyObject *ret;
571 char *s;
572
573 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
574 pevent->event.context_switch.next_prev_pid,
575 pevent->event.context_switch.next_prev_tid,
576 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
577 ret = PyErr_NoMemory();
578 } else {
579 ret = _PyUnicode_FromString(s);
580 free(s);
581 }
582 return ret;
583}
584
585static PyTypeObject pyrf_context_switch_event__type = {
586 PyVarObject_HEAD_INIT(NULL, 0)
587 .tp_name = "perf.context_switch_event",
588 .tp_basicsize = sizeof(struct pyrf_event),
589 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
590 .tp_doc = pyrf_context_switch_event__doc,
591 .tp_members = pyrf_context_switch_event__members,
592 .tp_repr = (reprfunc)pyrf_context_switch_event__repr,
593};
594
595static int pyrf_event__setup_types(void)
596{
597 int err;
598 pyrf_mmap_event__type.tp_new =
599 pyrf_task_event__type.tp_new =
600 pyrf_comm_event__type.tp_new =
601 pyrf_lost_event__type.tp_new =
602 pyrf_read_event__type.tp_new =
603 pyrf_sample_event__type.tp_new =
604 pyrf_context_switch_event__type.tp_new =
605 pyrf_throttle_event__type.tp_new = PyType_GenericNew;
606 err = PyType_Ready(&pyrf_mmap_event__type);
607 if (err < 0)
608 goto out;
609 err = PyType_Ready(&pyrf_lost_event__type);
610 if (err < 0)
611 goto out;
612 err = PyType_Ready(&pyrf_task_event__type);
613 if (err < 0)
614 goto out;
615 err = PyType_Ready(&pyrf_comm_event__type);
616 if (err < 0)
617 goto out;
618 err = PyType_Ready(&pyrf_throttle_event__type);
619 if (err < 0)
620 goto out;
621 err = PyType_Ready(&pyrf_read_event__type);
622 if (err < 0)
623 goto out;
624 err = PyType_Ready(&pyrf_sample_event__type);
625 if (err < 0)
626 goto out;
627 err = PyType_Ready(&pyrf_context_switch_event__type);
628 if (err < 0)
629 goto out;
630out:
631 return err;
632}
633
634static PyTypeObject *pyrf_event__type[] = {
635 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
636 [PERF_RECORD_LOST] = &pyrf_lost_event__type,
637 [PERF_RECORD_COMM] = &pyrf_comm_event__type,
638 [PERF_RECORD_EXIT] = &pyrf_task_event__type,
639 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
640 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
641 [PERF_RECORD_FORK] = &pyrf_task_event__type,
642 [PERF_RECORD_READ] = &pyrf_read_event__type,
643 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
644 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
645 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
646};
647
648static PyObject *pyrf_event__new(union perf_event *event)
649{
650 struct pyrf_event *pevent;
651 PyTypeObject *ptype;
652
653 if ((event->header.type < PERF_RECORD_MMAP ||
654 event->header.type > PERF_RECORD_SAMPLE) &&
655 !(event->header.type == PERF_RECORD_SWITCH ||
656 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
657 return NULL;
658
659 ptype = pyrf_event__type[event->header.type];
660 pevent = PyObject_New(struct pyrf_event, ptype);
661 if (pevent != NULL)
662 memcpy(&pevent->event, event, event->header.size);
663 return (PyObject *)pevent;
664}
665
666struct pyrf_cpu_map {
667 PyObject_HEAD
668
669 struct perf_cpu_map *cpus;
670};
671
672static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
673 PyObject *args, PyObject *kwargs)
674{
675 static char *kwlist[] = { "cpustr", NULL };
676 char *cpustr = NULL;
677
678 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
679 kwlist, &cpustr))
680 return -1;
681
682 pcpus->cpus = perf_cpu_map__new(cpustr);
683 if (pcpus->cpus == NULL)
684 return -1;
685 return 0;
686}
687
688static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
689{
690 perf_cpu_map__put(pcpus->cpus);
691 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
692}
693
694static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
695{
696 struct pyrf_cpu_map *pcpus = (void *)obj;
697
698 return perf_cpu_map__nr(pcpus->cpus);
699}
700
701static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
702{
703 struct pyrf_cpu_map *pcpus = (void *)obj;
704
705 if (i >= perf_cpu_map__nr(pcpus->cpus))
706 return NULL;
707
708 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
709}
710
711static PySequenceMethods pyrf_cpu_map__sequence_methods = {
712 .sq_length = pyrf_cpu_map__length,
713 .sq_item = pyrf_cpu_map__item,
714};
715
716static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
717
718static PyTypeObject pyrf_cpu_map__type = {
719 PyVarObject_HEAD_INIT(NULL, 0)
720 .tp_name = "perf.cpu_map",
721 .tp_basicsize = sizeof(struct pyrf_cpu_map),
722 .tp_dealloc = (destructor)pyrf_cpu_map__delete,
723 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
724 .tp_doc = pyrf_cpu_map__doc,
725 .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
726 .tp_init = (initproc)pyrf_cpu_map__init,
727};
728
729static int pyrf_cpu_map__setup_types(void)
730{
731 pyrf_cpu_map__type.tp_new = PyType_GenericNew;
732 return PyType_Ready(&pyrf_cpu_map__type);
733}
734
735struct pyrf_thread_map {
736 PyObject_HEAD
737
738 struct perf_thread_map *threads;
739};
740
741static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
742 PyObject *args, PyObject *kwargs)
743{
744 static char *kwlist[] = { "pid", "tid", "uid", NULL };
745 int pid = -1, tid = -1, uid = UINT_MAX;
746
747 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
748 kwlist, &pid, &tid, &uid))
749 return -1;
750
751 pthreads->threads = thread_map__new(pid, tid, uid);
752 if (pthreads->threads == NULL)
753 return -1;
754 return 0;
755}
756
757static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
758{
759 perf_thread_map__put(pthreads->threads);
760 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
761}
762
763static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
764{
765 struct pyrf_thread_map *pthreads = (void *)obj;
766
767 return perf_thread_map__nr(pthreads->threads);
768}
769
770static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
771{
772 struct pyrf_thread_map *pthreads = (void *)obj;
773
774 if (i >= perf_thread_map__nr(pthreads->threads))
775 return NULL;
776
777 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
778}
779
780static PySequenceMethods pyrf_thread_map__sequence_methods = {
781 .sq_length = pyrf_thread_map__length,
782 .sq_item = pyrf_thread_map__item,
783};
784
785static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
786
787static PyTypeObject pyrf_thread_map__type = {
788 PyVarObject_HEAD_INIT(NULL, 0)
789 .tp_name = "perf.thread_map",
790 .tp_basicsize = sizeof(struct pyrf_thread_map),
791 .tp_dealloc = (destructor)pyrf_thread_map__delete,
792 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
793 .tp_doc = pyrf_thread_map__doc,
794 .tp_as_sequence = &pyrf_thread_map__sequence_methods,
795 .tp_init = (initproc)pyrf_thread_map__init,
796};
797
798static int pyrf_thread_map__setup_types(void)
799{
800 pyrf_thread_map__type.tp_new = PyType_GenericNew;
801 return PyType_Ready(&pyrf_thread_map__type);
802}
803
804struct pyrf_evsel {
805 PyObject_HEAD
806
807 struct evsel evsel;
808};
809
810static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
811 PyObject *args, PyObject *kwargs)
812{
813 struct perf_event_attr attr = {
814 .type = PERF_TYPE_HARDWARE,
815 .config = PERF_COUNT_HW_CPU_CYCLES,
816 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
817 };
818 static char *kwlist[] = {
819 "type",
820 "config",
821 "sample_freq",
822 "sample_period",
823 "sample_type",
824 "read_format",
825 "disabled",
826 "inherit",
827 "pinned",
828 "exclusive",
829 "exclude_user",
830 "exclude_kernel",
831 "exclude_hv",
832 "exclude_idle",
833 "mmap",
834 "context_switch",
835 "comm",
836 "freq",
837 "inherit_stat",
838 "enable_on_exec",
839 "task",
840 "watermark",
841 "precise_ip",
842 "mmap_data",
843 "sample_id_all",
844 "wakeup_events",
845 "bp_type",
846 "bp_addr",
847 "bp_len",
848 NULL
849 };
850 u64 sample_period = 0;
851 u32 disabled = 0,
852 inherit = 0,
853 pinned = 0,
854 exclusive = 0,
855 exclude_user = 0,
856 exclude_kernel = 0,
857 exclude_hv = 0,
858 exclude_idle = 0,
859 mmap = 0,
860 context_switch = 0,
861 comm = 0,
862 freq = 1,
863 inherit_stat = 0,
864 enable_on_exec = 0,
865 task = 0,
866 watermark = 0,
867 precise_ip = 0,
868 mmap_data = 0,
869 sample_id_all = 1;
870 int idx = 0;
871
872 if (!PyArg_ParseTupleAndKeywords(args, kwargs,
873 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
874 &attr.type, &attr.config, &attr.sample_freq,
875 &sample_period, &attr.sample_type,
876 &attr.read_format, &disabled, &inherit,
877 &pinned, &exclusive, &exclude_user,
878 &exclude_kernel, &exclude_hv, &exclude_idle,
879 &mmap, &context_switch, &comm, &freq, &inherit_stat,
880 &enable_on_exec, &task, &watermark,
881 &precise_ip, &mmap_data, &sample_id_all,
882 &attr.wakeup_events, &attr.bp_type,
883 &attr.bp_addr, &attr.bp_len, &idx))
884 return -1;
885
886 /* union... */
887 if (sample_period != 0) {
888 if (attr.sample_freq != 0)
889 return -1; /* FIXME: throw right exception */
890 attr.sample_period = sample_period;
891 }
892
893 /* Bitfields */
894 attr.disabled = disabled;
895 attr.inherit = inherit;
896 attr.pinned = pinned;
897 attr.exclusive = exclusive;
898 attr.exclude_user = exclude_user;
899 attr.exclude_kernel = exclude_kernel;
900 attr.exclude_hv = exclude_hv;
901 attr.exclude_idle = exclude_idle;
902 attr.mmap = mmap;
903 attr.context_switch = context_switch;
904 attr.comm = comm;
905 attr.freq = freq;
906 attr.inherit_stat = inherit_stat;
907 attr.enable_on_exec = enable_on_exec;
908 attr.task = task;
909 attr.watermark = watermark;
910 attr.precise_ip = precise_ip;
911 attr.mmap_data = mmap_data;
912 attr.sample_id_all = sample_id_all;
913 attr.size = sizeof(attr);
914
915 evsel__init(&pevsel->evsel, &attr, idx);
916 return 0;
917}
918
919static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
920{
921 evsel__exit(&pevsel->evsel);
922 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
923}
924
925static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
926 PyObject *args, PyObject *kwargs)
927{
928 struct evsel *evsel = &pevsel->evsel;
929 struct perf_cpu_map *cpus = NULL;
930 struct perf_thread_map *threads = NULL;
931 PyObject *pcpus = NULL, *pthreads = NULL;
932 int group = 0, inherit = 0;
933 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
934
935 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
936 &pcpus, &pthreads, &group, &inherit))
937 return NULL;
938
939 if (pthreads != NULL)
940 threads = ((struct pyrf_thread_map *)pthreads)->threads;
941
942 if (pcpus != NULL)
943 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
944
945 evsel->core.attr.inherit = inherit;
946 /*
947 * This will group just the fds for this single evsel, to group
948 * multiple events, use evlist.open().
949 */
950 if (evsel__open(evsel, cpus, threads) < 0) {
951 PyErr_SetFromErrno(PyExc_OSError);
952 return NULL;
953 }
954
955 Py_INCREF(Py_None);
956 return Py_None;
957}
958
959static PyMethodDef pyrf_evsel__methods[] = {
960 {
961 .ml_name = "open",
962 .ml_meth = (PyCFunction)pyrf_evsel__open,
963 .ml_flags = METH_VARARGS | METH_KEYWORDS,
964 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
965 },
966 { .ml_name = NULL, }
967};
968
969static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
970
971static PyTypeObject pyrf_evsel__type = {
972 PyVarObject_HEAD_INIT(NULL, 0)
973 .tp_name = "perf.evsel",
974 .tp_basicsize = sizeof(struct pyrf_evsel),
975 .tp_dealloc = (destructor)pyrf_evsel__delete,
976 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
977 .tp_doc = pyrf_evsel__doc,
978 .tp_methods = pyrf_evsel__methods,
979 .tp_init = (initproc)pyrf_evsel__init,
980};
981
982static int pyrf_evsel__setup_types(void)
983{
984 pyrf_evsel__type.tp_new = PyType_GenericNew;
985 return PyType_Ready(&pyrf_evsel__type);
986}
987
988struct pyrf_evlist {
989 PyObject_HEAD
990
991 struct evlist evlist;
992};
993
994static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
995 PyObject *args, PyObject *kwargs __maybe_unused)
996{
997 PyObject *pcpus = NULL, *pthreads = NULL;
998 struct perf_cpu_map *cpus;
999 struct perf_thread_map *threads;
1000
1001 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
1002 return -1;
1003
1004 threads = ((struct pyrf_thread_map *)pthreads)->threads;
1005 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
1006 evlist__init(&pevlist->evlist, cpus, threads);
1007 return 0;
1008}
1009
1010static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
1011{
1012 evlist__exit(&pevlist->evlist);
1013 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
1014}
1015
1016static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
1017 PyObject *args, PyObject *kwargs)
1018{
1019 struct evlist *evlist = &pevlist->evlist;
1020 static char *kwlist[] = { "pages", "overwrite", NULL };
1021 int pages = 128, overwrite = false;
1022
1023 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
1024 &pages, &overwrite))
1025 return NULL;
1026
1027 if (evlist__mmap(evlist, pages) < 0) {
1028 PyErr_SetFromErrno(PyExc_OSError);
1029 return NULL;
1030 }
1031
1032 Py_INCREF(Py_None);
1033 return Py_None;
1034}
1035
1036static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
1037 PyObject *args, PyObject *kwargs)
1038{
1039 struct evlist *evlist = &pevlist->evlist;
1040 static char *kwlist[] = { "timeout", NULL };
1041 int timeout = -1, n;
1042
1043 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
1044 return NULL;
1045
1046 n = evlist__poll(evlist, timeout);
1047 if (n < 0) {
1048 PyErr_SetFromErrno(PyExc_OSError);
1049 return NULL;
1050 }
1051
1052 return Py_BuildValue("i", n);
1053}
1054
1055static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1056 PyObject *args __maybe_unused,
1057 PyObject *kwargs __maybe_unused)
1058{
1059 struct evlist *evlist = &pevlist->evlist;
1060 PyObject *list = PyList_New(0);
1061 int i;
1062
1063 for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1064 PyObject *file;
1065#if PY_MAJOR_VERSION < 3
1066 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1067
1068 if (fp == NULL)
1069 goto free_list;
1070
1071 file = PyFile_FromFile(fp, "perf", "r", NULL);
1072#else
1073 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1074 NULL, NULL, NULL, 0);
1075#endif
1076 if (file == NULL)
1077 goto free_list;
1078
1079 if (PyList_Append(list, file) != 0) {
1080 Py_DECREF(file);
1081 goto free_list;
1082 }
1083
1084 Py_DECREF(file);
1085 }
1086
1087 return list;
1088free_list:
1089 return PyErr_NoMemory();
1090}
1091
1092
1093static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1094 PyObject *args,
1095 PyObject *kwargs __maybe_unused)
1096{
1097 struct evlist *evlist = &pevlist->evlist;
1098 PyObject *pevsel;
1099 struct evsel *evsel;
1100
1101 if (!PyArg_ParseTuple(args, "O", &pevsel))
1102 return NULL;
1103
1104 Py_INCREF(pevsel);
1105 evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1106 evsel->core.idx = evlist->core.nr_entries;
1107 evlist__add(evlist, evsel);
1108
1109 return Py_BuildValue("i", evlist->core.nr_entries);
1110}
1111
1112static struct mmap *get_md(struct evlist *evlist, int cpu)
1113{
1114 int i;
1115
1116 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1117 struct mmap *md = &evlist->mmap[i];
1118
1119 if (md->core.cpu.cpu == cpu)
1120 return md;
1121 }
1122
1123 return NULL;
1124}
1125
1126static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1127 PyObject *args, PyObject *kwargs)
1128{
1129 struct evlist *evlist = &pevlist->evlist;
1130 union perf_event *event;
1131 int sample_id_all = 1, cpu;
1132 static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1133 struct mmap *md;
1134 int err;
1135
1136 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1137 &cpu, &sample_id_all))
1138 return NULL;
1139
1140 md = get_md(evlist, cpu);
1141 if (!md)
1142 return NULL;
1143
1144 if (perf_mmap__read_init(&md->core) < 0)
1145 goto end;
1146
1147 event = perf_mmap__read_event(&md->core);
1148 if (event != NULL) {
1149 PyObject *pyevent = pyrf_event__new(event);
1150 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1151 struct evsel *evsel;
1152
1153 if (pyevent == NULL)
1154 return PyErr_NoMemory();
1155
1156 evsel = evlist__event2evsel(evlist, event);
1157 if (!evsel) {
1158 Py_INCREF(Py_None);
1159 return Py_None;
1160 }
1161
1162 pevent->evsel = evsel;
1163
1164 err = evsel__parse_sample(evsel, event, &pevent->sample);
1165
1166 /* Consume the even only after we parsed it out. */
1167 perf_mmap__consume(&md->core);
1168
1169 if (err)
1170 return PyErr_Format(PyExc_OSError,
1171 "perf: can't parse sample, err=%d", err);
1172 return pyevent;
1173 }
1174end:
1175 Py_INCREF(Py_None);
1176 return Py_None;
1177}
1178
1179static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1180 PyObject *args, PyObject *kwargs)
1181{
1182 struct evlist *evlist = &pevlist->evlist;
1183
1184 if (evlist__open(evlist) < 0) {
1185 PyErr_SetFromErrno(PyExc_OSError);
1186 return NULL;
1187 }
1188
1189 Py_INCREF(Py_None);
1190 return Py_None;
1191}
1192
1193static PyMethodDef pyrf_evlist__methods[] = {
1194 {
1195 .ml_name = "mmap",
1196 .ml_meth = (PyCFunction)pyrf_evlist__mmap,
1197 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1198 .ml_doc = PyDoc_STR("mmap the file descriptor table.")
1199 },
1200 {
1201 .ml_name = "open",
1202 .ml_meth = (PyCFunction)pyrf_evlist__open,
1203 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1204 .ml_doc = PyDoc_STR("open the file descriptors.")
1205 },
1206 {
1207 .ml_name = "poll",
1208 .ml_meth = (PyCFunction)pyrf_evlist__poll,
1209 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1210 .ml_doc = PyDoc_STR("poll the file descriptor table.")
1211 },
1212 {
1213 .ml_name = "get_pollfd",
1214 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
1215 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1216 .ml_doc = PyDoc_STR("get the poll file descriptor table.")
1217 },
1218 {
1219 .ml_name = "add",
1220 .ml_meth = (PyCFunction)pyrf_evlist__add,
1221 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1222 .ml_doc = PyDoc_STR("adds an event selector to the list.")
1223 },
1224 {
1225 .ml_name = "read_on_cpu",
1226 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
1227 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1228 .ml_doc = PyDoc_STR("reads an event.")
1229 },
1230 { .ml_name = NULL, }
1231};
1232
1233static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1234{
1235 struct pyrf_evlist *pevlist = (void *)obj;
1236
1237 return pevlist->evlist.core.nr_entries;
1238}
1239
1240static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1241{
1242 struct pyrf_evlist *pevlist = (void *)obj;
1243 struct evsel *pos;
1244
1245 if (i >= pevlist->evlist.core.nr_entries)
1246 return NULL;
1247
1248 evlist__for_each_entry(&pevlist->evlist, pos) {
1249 if (i-- == 0)
1250 break;
1251 }
1252
1253 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1254}
1255
1256static PySequenceMethods pyrf_evlist__sequence_methods = {
1257 .sq_length = pyrf_evlist__length,
1258 .sq_item = pyrf_evlist__item,
1259};
1260
1261static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1262
1263static PyTypeObject pyrf_evlist__type = {
1264 PyVarObject_HEAD_INIT(NULL, 0)
1265 .tp_name = "perf.evlist",
1266 .tp_basicsize = sizeof(struct pyrf_evlist),
1267 .tp_dealloc = (destructor)pyrf_evlist__delete,
1268 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1269 .tp_as_sequence = &pyrf_evlist__sequence_methods,
1270 .tp_doc = pyrf_evlist__doc,
1271 .tp_methods = pyrf_evlist__methods,
1272 .tp_init = (initproc)pyrf_evlist__init,
1273};
1274
1275static int pyrf_evlist__setup_types(void)
1276{
1277 pyrf_evlist__type.tp_new = PyType_GenericNew;
1278 return PyType_Ready(&pyrf_evlist__type);
1279}
1280
1281#define PERF_CONST(name) { #name, PERF_##name }
1282
1283static struct {
1284 const char *name;
1285 int value;
1286} perf__constants[] = {
1287 PERF_CONST(TYPE_HARDWARE),
1288 PERF_CONST(TYPE_SOFTWARE),
1289 PERF_CONST(TYPE_TRACEPOINT),
1290 PERF_CONST(TYPE_HW_CACHE),
1291 PERF_CONST(TYPE_RAW),
1292 PERF_CONST(TYPE_BREAKPOINT),
1293
1294 PERF_CONST(COUNT_HW_CPU_CYCLES),
1295 PERF_CONST(COUNT_HW_INSTRUCTIONS),
1296 PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1297 PERF_CONST(COUNT_HW_CACHE_MISSES),
1298 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1299 PERF_CONST(COUNT_HW_BRANCH_MISSES),
1300 PERF_CONST(COUNT_HW_BUS_CYCLES),
1301 PERF_CONST(COUNT_HW_CACHE_L1D),
1302 PERF_CONST(COUNT_HW_CACHE_L1I),
1303 PERF_CONST(COUNT_HW_CACHE_LL),
1304 PERF_CONST(COUNT_HW_CACHE_DTLB),
1305 PERF_CONST(COUNT_HW_CACHE_ITLB),
1306 PERF_CONST(COUNT_HW_CACHE_BPU),
1307 PERF_CONST(COUNT_HW_CACHE_OP_READ),
1308 PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1309 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1310 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1311 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1312
1313 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1314 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1315
1316 PERF_CONST(COUNT_SW_CPU_CLOCK),
1317 PERF_CONST(COUNT_SW_TASK_CLOCK),
1318 PERF_CONST(COUNT_SW_PAGE_FAULTS),
1319 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1320 PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1321 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1322 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1323 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1324 PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1325 PERF_CONST(COUNT_SW_DUMMY),
1326
1327 PERF_CONST(SAMPLE_IP),
1328 PERF_CONST(SAMPLE_TID),
1329 PERF_CONST(SAMPLE_TIME),
1330 PERF_CONST(SAMPLE_ADDR),
1331 PERF_CONST(SAMPLE_READ),
1332 PERF_CONST(SAMPLE_CALLCHAIN),
1333 PERF_CONST(SAMPLE_ID),
1334 PERF_CONST(SAMPLE_CPU),
1335 PERF_CONST(SAMPLE_PERIOD),
1336 PERF_CONST(SAMPLE_STREAM_ID),
1337 PERF_CONST(SAMPLE_RAW),
1338
1339 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1340 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1341 PERF_CONST(FORMAT_ID),
1342 PERF_CONST(FORMAT_GROUP),
1343
1344 PERF_CONST(RECORD_MMAP),
1345 PERF_CONST(RECORD_LOST),
1346 PERF_CONST(RECORD_COMM),
1347 PERF_CONST(RECORD_EXIT),
1348 PERF_CONST(RECORD_THROTTLE),
1349 PERF_CONST(RECORD_UNTHROTTLE),
1350 PERF_CONST(RECORD_FORK),
1351 PERF_CONST(RECORD_READ),
1352 PERF_CONST(RECORD_SAMPLE),
1353 PERF_CONST(RECORD_MMAP2),
1354 PERF_CONST(RECORD_AUX),
1355 PERF_CONST(RECORD_ITRACE_START),
1356 PERF_CONST(RECORD_LOST_SAMPLES),
1357 PERF_CONST(RECORD_SWITCH),
1358 PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1359
1360 PERF_CONST(RECORD_MISC_SWITCH_OUT),
1361 { .name = NULL, },
1362};
1363
1364static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1365 PyObject *args, PyObject *kwargs)
1366{
1367#ifndef HAVE_LIBTRACEEVENT
1368 return NULL;
1369#else
1370 struct tep_event *tp_format;
1371 static char *kwlist[] = { "sys", "name", NULL };
1372 char *sys = NULL;
1373 char *name = NULL;
1374
1375 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1376 &sys, &name))
1377 return NULL;
1378
1379 tp_format = trace_event__tp_format(sys, name);
1380 if (IS_ERR(tp_format))
1381 return _PyLong_FromLong(-1);
1382
1383 return _PyLong_FromLong(tp_format->id);
1384#endif // HAVE_LIBTRACEEVENT
1385}
1386
1387static PyMethodDef perf__methods[] = {
1388 {
1389 .ml_name = "tracepoint",
1390 .ml_meth = (PyCFunction) pyrf__tracepoint,
1391 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1392 .ml_doc = PyDoc_STR("Get tracepoint config.")
1393 },
1394 { .ml_name = NULL, }
1395};
1396
1397#if PY_MAJOR_VERSION < 3
1398PyMODINIT_FUNC initperf(void)
1399#else
1400PyMODINIT_FUNC PyInit_perf(void)
1401#endif
1402{
1403 PyObject *obj;
1404 int i;
1405 PyObject *dict;
1406#if PY_MAJOR_VERSION < 3
1407 PyObject *module = Py_InitModule("perf", perf__methods);
1408#else
1409 static struct PyModuleDef moduledef = {
1410 PyModuleDef_HEAD_INIT,
1411 "perf", /* m_name */
1412 "", /* m_doc */
1413 -1, /* m_size */
1414 perf__methods, /* m_methods */
1415 NULL, /* m_reload */
1416 NULL, /* m_traverse */
1417 NULL, /* m_clear */
1418 NULL, /* m_free */
1419 };
1420 PyObject *module = PyModule_Create(&moduledef);
1421#endif
1422
1423 if (module == NULL ||
1424 pyrf_event__setup_types() < 0 ||
1425 pyrf_evlist__setup_types() < 0 ||
1426 pyrf_evsel__setup_types() < 0 ||
1427 pyrf_thread_map__setup_types() < 0 ||
1428 pyrf_cpu_map__setup_types() < 0)
1429#if PY_MAJOR_VERSION < 3
1430 return;
1431#else
1432 return module;
1433#endif
1434
1435 /* The page_size is placed in util object. */
1436 page_size = sysconf(_SC_PAGE_SIZE);
1437
1438 Py_INCREF(&pyrf_evlist__type);
1439 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1440
1441 Py_INCREF(&pyrf_evsel__type);
1442 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1443
1444 Py_INCREF(&pyrf_mmap_event__type);
1445 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1446
1447 Py_INCREF(&pyrf_lost_event__type);
1448 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1449
1450 Py_INCREF(&pyrf_comm_event__type);
1451 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1452
1453 Py_INCREF(&pyrf_task_event__type);
1454 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1455
1456 Py_INCREF(&pyrf_throttle_event__type);
1457 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1458
1459 Py_INCREF(&pyrf_task_event__type);
1460 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1461
1462 Py_INCREF(&pyrf_read_event__type);
1463 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1464
1465 Py_INCREF(&pyrf_sample_event__type);
1466 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1467
1468 Py_INCREF(&pyrf_context_switch_event__type);
1469 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1470
1471 Py_INCREF(&pyrf_thread_map__type);
1472 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1473
1474 Py_INCREF(&pyrf_cpu_map__type);
1475 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1476
1477 dict = PyModule_GetDict(module);
1478 if (dict == NULL)
1479 goto error;
1480
1481 for (i = 0; perf__constants[i].name != NULL; i++) {
1482 obj = _PyLong_FromLong(perf__constants[i].value);
1483 if (obj == NULL)
1484 goto error;
1485 PyDict_SetItemString(dict, perf__constants[i].name, obj);
1486 Py_DECREF(obj);
1487 }
1488
1489error:
1490 if (PyErr_Occurred())
1491 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1492#if PY_MAJOR_VERSION >= 3
1493 return module;
1494#endif
1495}
1496
1497/*
1498 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1499 * binding.
1500 */
1501void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
1502 int fd, int group_fd, unsigned long flags)
1503{
1504}
1505
1506void evlist__free_stats(struct evlist *evlist)
1507{
1508}