1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #include <traceevent/event-parse.h>
9 #include <perf/mmap.h>
10 #include "evlist.h"
11 #include "callchain.h"
12 #include "evsel.h"
13 #include "event.h"
14 #include "print_binary.h"
15 #include "thread_map.h"
16 #include "trace-event.h"
17 #include "mmap.h"
18 #include "stat.h"
19 #include "metricgroup.h"
20 #include "util/env.h"
21 #include <internal/lib.h>
22 #include "util.h"
23
24 #if PY_MAJOR_VERSION < 3
25 #define _PyUnicode_FromString(arg) \
26 PyString_FromString(arg)
27 #define _PyUnicode_AsString(arg) \
28 PyString_AsString(arg)
29 #define _PyUnicode_FromFormat(...) \
30 PyString_FromFormat(__VA_ARGS__)
31 #define _PyLong_FromLong(arg) \
32 PyInt_FromLong(arg)
33
34 #else
35
36 #define _PyUnicode_FromString(arg) \
37 PyUnicode_FromString(arg)
38 #define _PyUnicode_FromFormat(...) \
39 PyUnicode_FromFormat(__VA_ARGS__)
40 #define _PyLong_FromLong(arg) \
41 PyLong_FromLong(arg)
42 #endif
43
44 #ifndef Py_TYPE
45 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
46 #endif
47
48 /*
49 * Provide these two so that we don't have to link against callchain.c and
50 * start dragging hist.c, etc.
51 */
52 struct callchain_param callchain_param;
53
parse_callchain_record(const char * arg __maybe_unused,struct callchain_param * param __maybe_unused)54 int parse_callchain_record(const char *arg __maybe_unused,
55 struct callchain_param *param __maybe_unused)
56 {
57 return 0;
58 }
59
60 /*
61 * Add these not to drag util/env.c
62 */
63 struct perf_env perf_env;
64
perf_env__cpuid(struct perf_env * env __maybe_unused)65 const char *perf_env__cpuid(struct perf_env *env __maybe_unused)
66 {
67 return NULL;
68 }
69
70 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here
perf_env__arch(struct perf_env * env __maybe_unused)71 const char *perf_env__arch(struct perf_env *env __maybe_unused)
72 {
73 return NULL;
74 }
75
76 /*
77 * Add this one here not to drag util/stat-shadow.c
78 */
perf_stat__collect_metric_expr(struct evlist * evsel_list)79 void perf_stat__collect_metric_expr(struct evlist *evsel_list)
80 {
81 }
82
83 /*
84 * This one is needed not to drag the PMU bandwagon, jevents generated
85 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
86 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
87 * far, for the perf python binding known usecases, revisit if this become
88 * necessary.
89 */
evsel__find_pmu(struct evsel * evsel __maybe_unused)90 struct perf_pmu *evsel__find_pmu(struct evsel *evsel __maybe_unused)
91 {
92 return NULL;
93 }
94
95 /*
96 * Add this one here not to drag util/metricgroup.c
97 */
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)98 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
99 struct rblist *new_metric_events,
100 struct rblist *old_metric_events)
101 {
102 return 0;
103 }
104
105 /*
106 * XXX: All these evsel destructors need some better mechanism, like a linked
107 * list of destructors registered when the relevant code indeed is used instead
108 * of having more and more calls in perf_evsel__delete(). -- acme
109 *
110 * For now, add some more:
111 *
112 * Not to drag the BPF bandwagon...
113 */
114 void bpf_counter__destroy(struct evsel *evsel);
115 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
116 int bpf_counter__disable(struct evsel *evsel);
117
bpf_counter__destroy(struct evsel * evsel __maybe_unused)118 void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
119 {
120 }
121
bpf_counter__install_pe(struct evsel * evsel __maybe_unused,int cpu __maybe_unused,int fd __maybe_unused)122 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
123 {
124 return 0;
125 }
126
bpf_counter__disable(struct evsel * evsel __maybe_unused)127 int bpf_counter__disable(struct evsel *evsel __maybe_unused)
128 {
129 return 0;
130 }
131
132 /*
133 * Support debug printing even though util/debug.c is not linked. That means
134 * implementing 'verbose' and 'eprintf'.
135 */
136 int verbose;
137 int debug_peo_args;
138
139 int eprintf(int level, int var, const char *fmt, ...);
140
eprintf(int level,int var,const char * fmt,...)141 int eprintf(int level, int var, const char *fmt, ...)
142 {
143 va_list args;
144 int ret = 0;
145
146 if (var >= level) {
147 va_start(args, fmt);
148 ret = vfprintf(stderr, fmt, args);
149 va_end(args);
150 }
151
152 return ret;
153 }
154
155 /* Define PyVarObject_HEAD_INIT for python 2.5 */
156 #ifndef PyVarObject_HEAD_INIT
157 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
158 #endif
159
160 #if PY_MAJOR_VERSION < 3
161 PyMODINIT_FUNC initperf(void);
162 #else
163 PyMODINIT_FUNC PyInit_perf(void);
164 #endif
165
166 #define member_def(type, member, ptype, help) \
167 { #member, ptype, \
168 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
169 0, help }
170
171 #define sample_member_def(name, member, ptype, help) \
172 { #name, ptype, \
173 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
174 0, help }
175
176 struct pyrf_event {
177 PyObject_HEAD
178 struct evsel *evsel;
179 struct perf_sample sample;
180 union perf_event event;
181 };
182
183 #define sample_members \
184 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
185 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
186 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
187 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
188 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
189 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
190 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
191 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
192 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
193
194 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
195
196 static PyMemberDef pyrf_mmap_event__members[] = {
197 sample_members
198 member_def(perf_event_header, type, T_UINT, "event type"),
199 member_def(perf_event_header, misc, T_UINT, "event misc"),
200 member_def(perf_record_mmap, pid, T_UINT, "event pid"),
201 member_def(perf_record_mmap, tid, T_UINT, "event tid"),
202 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
203 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
204 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
205 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
206 { .name = NULL, },
207 };
208
pyrf_mmap_event__repr(struct pyrf_event * pevent)209 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
210 {
211 PyObject *ret;
212 char *s;
213
214 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
215 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
216 "filename: %s }",
217 pevent->event.mmap.pid, pevent->event.mmap.tid,
218 pevent->event.mmap.start, pevent->event.mmap.len,
219 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
220 ret = PyErr_NoMemory();
221 } else {
222 ret = _PyUnicode_FromString(s);
223 free(s);
224 }
225 return ret;
226 }
227
228 static PyTypeObject pyrf_mmap_event__type = {
229 PyVarObject_HEAD_INIT(NULL, 0)
230 .tp_name = "perf.mmap_event",
231 .tp_basicsize = sizeof(struct pyrf_event),
232 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
233 .tp_doc = pyrf_mmap_event__doc,
234 .tp_members = pyrf_mmap_event__members,
235 .tp_repr = (reprfunc)pyrf_mmap_event__repr,
236 };
237
238 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
239
240 static PyMemberDef pyrf_task_event__members[] = {
241 sample_members
242 member_def(perf_event_header, type, T_UINT, "event type"),
243 member_def(perf_record_fork, pid, T_UINT, "event pid"),
244 member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
245 member_def(perf_record_fork, tid, T_UINT, "event tid"),
246 member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
247 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
248 { .name = NULL, },
249 };
250
pyrf_task_event__repr(struct pyrf_event * pevent)251 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
252 {
253 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
254 "ptid: %u, time: %" PRI_lu64 "}",
255 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
256 pevent->event.fork.pid,
257 pevent->event.fork.ppid,
258 pevent->event.fork.tid,
259 pevent->event.fork.ptid,
260 pevent->event.fork.time);
261 }
262
263 static PyTypeObject pyrf_task_event__type = {
264 PyVarObject_HEAD_INIT(NULL, 0)
265 .tp_name = "perf.task_event",
266 .tp_basicsize = sizeof(struct pyrf_event),
267 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
268 .tp_doc = pyrf_task_event__doc,
269 .tp_members = pyrf_task_event__members,
270 .tp_repr = (reprfunc)pyrf_task_event__repr,
271 };
272
273 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
274
275 static PyMemberDef pyrf_comm_event__members[] = {
276 sample_members
277 member_def(perf_event_header, type, T_UINT, "event type"),
278 member_def(perf_record_comm, pid, T_UINT, "event pid"),
279 member_def(perf_record_comm, tid, T_UINT, "event tid"),
280 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
281 { .name = NULL, },
282 };
283
pyrf_comm_event__repr(struct pyrf_event * pevent)284 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
285 {
286 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
287 pevent->event.comm.pid,
288 pevent->event.comm.tid,
289 pevent->event.comm.comm);
290 }
291
292 static PyTypeObject pyrf_comm_event__type = {
293 PyVarObject_HEAD_INIT(NULL, 0)
294 .tp_name = "perf.comm_event",
295 .tp_basicsize = sizeof(struct pyrf_event),
296 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
297 .tp_doc = pyrf_comm_event__doc,
298 .tp_members = pyrf_comm_event__members,
299 .tp_repr = (reprfunc)pyrf_comm_event__repr,
300 };
301
302 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
303
304 static PyMemberDef pyrf_throttle_event__members[] = {
305 sample_members
306 member_def(perf_event_header, type, T_UINT, "event type"),
307 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
308 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
309 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
310 { .name = NULL, },
311 };
312
pyrf_throttle_event__repr(struct pyrf_event * pevent)313 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
314 {
315 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
316
317 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
318 ", stream_id: %" PRI_lu64 " }",
319 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
320 te->time, te->id, te->stream_id);
321 }
322
323 static PyTypeObject pyrf_throttle_event__type = {
324 PyVarObject_HEAD_INIT(NULL, 0)
325 .tp_name = "perf.throttle_event",
326 .tp_basicsize = sizeof(struct pyrf_event),
327 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
328 .tp_doc = pyrf_throttle_event__doc,
329 .tp_members = pyrf_throttle_event__members,
330 .tp_repr = (reprfunc)pyrf_throttle_event__repr,
331 };
332
333 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
334
335 static PyMemberDef pyrf_lost_event__members[] = {
336 sample_members
337 member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
338 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
339 { .name = NULL, },
340 };
341
pyrf_lost_event__repr(struct pyrf_event * pevent)342 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
343 {
344 PyObject *ret;
345 char *s;
346
347 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
348 "lost: %#" PRI_lx64 " }",
349 pevent->event.lost.id, pevent->event.lost.lost) < 0) {
350 ret = PyErr_NoMemory();
351 } else {
352 ret = _PyUnicode_FromString(s);
353 free(s);
354 }
355 return ret;
356 }
357
358 static PyTypeObject pyrf_lost_event__type = {
359 PyVarObject_HEAD_INIT(NULL, 0)
360 .tp_name = "perf.lost_event",
361 .tp_basicsize = sizeof(struct pyrf_event),
362 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
363 .tp_doc = pyrf_lost_event__doc,
364 .tp_members = pyrf_lost_event__members,
365 .tp_repr = (reprfunc)pyrf_lost_event__repr,
366 };
367
368 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
369
370 static PyMemberDef pyrf_read_event__members[] = {
371 sample_members
372 member_def(perf_record_read, pid, T_UINT, "event pid"),
373 member_def(perf_record_read, tid, T_UINT, "event tid"),
374 { .name = NULL, },
375 };
376
pyrf_read_event__repr(struct pyrf_event * pevent)377 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
378 {
379 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
380 pevent->event.read.pid,
381 pevent->event.read.tid);
382 /*
383 * FIXME: return the array of read values,
384 * making this method useful ;-)
385 */
386 }
387
388 static PyTypeObject pyrf_read_event__type = {
389 PyVarObject_HEAD_INIT(NULL, 0)
390 .tp_name = "perf.read_event",
391 .tp_basicsize = sizeof(struct pyrf_event),
392 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
393 .tp_doc = pyrf_read_event__doc,
394 .tp_members = pyrf_read_event__members,
395 .tp_repr = (reprfunc)pyrf_read_event__repr,
396 };
397
398 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
399
400 static PyMemberDef pyrf_sample_event__members[] = {
401 sample_members
402 member_def(perf_event_header, type, T_UINT, "event type"),
403 { .name = NULL, },
404 };
405
pyrf_sample_event__repr(struct pyrf_event * pevent)406 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
407 {
408 PyObject *ret;
409 char *s;
410
411 if (asprintf(&s, "{ type: sample }") < 0) {
412 ret = PyErr_NoMemory();
413 } else {
414 ret = _PyUnicode_FromString(s);
415 free(s);
416 }
417 return ret;
418 }
419
is_tracepoint(struct pyrf_event * pevent)420 static bool is_tracepoint(struct pyrf_event *pevent)
421 {
422 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
423 }
424
425 static PyObject*
tracepoint_field(struct pyrf_event * pe,struct tep_format_field * field)426 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
427 {
428 struct tep_handle *pevent = field->event->tep;
429 void *data = pe->sample.raw_data;
430 PyObject *ret = NULL;
431 unsigned long long val;
432 unsigned int offset, len;
433
434 if (field->flags & TEP_FIELD_IS_ARRAY) {
435 offset = field->offset;
436 len = field->size;
437 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
438 val = tep_read_number(pevent, data + offset, len);
439 offset = val;
440 len = offset >> 16;
441 offset &= 0xffff;
442 if (field->flags & TEP_FIELD_IS_RELATIVE)
443 offset += field->offset + field->size;
444 }
445 if (field->flags & TEP_FIELD_IS_STRING &&
446 is_printable_array(data + offset, len)) {
447 ret = _PyUnicode_FromString((char *)data + offset);
448 } else {
449 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
450 field->flags &= ~TEP_FIELD_IS_STRING;
451 }
452 } else {
453 val = tep_read_number(pevent, data + field->offset,
454 field->size);
455 if (field->flags & TEP_FIELD_IS_POINTER)
456 ret = PyLong_FromUnsignedLong((unsigned long) val);
457 else if (field->flags & TEP_FIELD_IS_SIGNED)
458 ret = PyLong_FromLong((long) val);
459 else
460 ret = PyLong_FromUnsignedLong((unsigned long) val);
461 }
462
463 return ret;
464 }
465
466 static PyObject*
get_tracepoint_field(struct pyrf_event * pevent,PyObject * attr_name)467 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
468 {
469 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
470 struct evsel *evsel = pevent->evsel;
471 struct tep_format_field *field;
472
473 if (!evsel->tp_format) {
474 struct tep_event *tp_format;
475
476 tp_format = trace_event__tp_format_id(evsel->core.attr.config);
477 if (IS_ERR_OR_NULL(tp_format))
478 return NULL;
479
480 evsel->tp_format = tp_format;
481 }
482
483 field = tep_find_any_field(evsel->tp_format, str);
484 if (!field)
485 return NULL;
486
487 return tracepoint_field(pevent, field);
488 }
489
490 static PyObject*
pyrf_sample_event__getattro(struct pyrf_event * pevent,PyObject * attr_name)491 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
492 {
493 PyObject *obj = NULL;
494
495 if (is_tracepoint(pevent))
496 obj = get_tracepoint_field(pevent, attr_name);
497
498 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
499 }
500
501 static PyTypeObject pyrf_sample_event__type = {
502 PyVarObject_HEAD_INIT(NULL, 0)
503 .tp_name = "perf.sample_event",
504 .tp_basicsize = sizeof(struct pyrf_event),
505 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
506 .tp_doc = pyrf_sample_event__doc,
507 .tp_members = pyrf_sample_event__members,
508 .tp_repr = (reprfunc)pyrf_sample_event__repr,
509 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
510 };
511
512 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
513
514 static PyMemberDef pyrf_context_switch_event__members[] = {
515 sample_members
516 member_def(perf_event_header, type, T_UINT, "event type"),
517 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
518 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
519 { .name = NULL, },
520 };
521
pyrf_context_switch_event__repr(struct pyrf_event * pevent)522 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
523 {
524 PyObject *ret;
525 char *s;
526
527 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
528 pevent->event.context_switch.next_prev_pid,
529 pevent->event.context_switch.next_prev_tid,
530 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
531 ret = PyErr_NoMemory();
532 } else {
533 ret = _PyUnicode_FromString(s);
534 free(s);
535 }
536 return ret;
537 }
538
539 static PyTypeObject pyrf_context_switch_event__type = {
540 PyVarObject_HEAD_INIT(NULL, 0)
541 .tp_name = "perf.context_switch_event",
542 .tp_basicsize = sizeof(struct pyrf_event),
543 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
544 .tp_doc = pyrf_context_switch_event__doc,
545 .tp_members = pyrf_context_switch_event__members,
546 .tp_repr = (reprfunc)pyrf_context_switch_event__repr,
547 };
548
pyrf_event__setup_types(void)549 static int pyrf_event__setup_types(void)
550 {
551 int err;
552 pyrf_mmap_event__type.tp_new =
553 pyrf_task_event__type.tp_new =
554 pyrf_comm_event__type.tp_new =
555 pyrf_lost_event__type.tp_new =
556 pyrf_read_event__type.tp_new =
557 pyrf_sample_event__type.tp_new =
558 pyrf_context_switch_event__type.tp_new =
559 pyrf_throttle_event__type.tp_new = PyType_GenericNew;
560 err = PyType_Ready(&pyrf_mmap_event__type);
561 if (err < 0)
562 goto out;
563 err = PyType_Ready(&pyrf_lost_event__type);
564 if (err < 0)
565 goto out;
566 err = PyType_Ready(&pyrf_task_event__type);
567 if (err < 0)
568 goto out;
569 err = PyType_Ready(&pyrf_comm_event__type);
570 if (err < 0)
571 goto out;
572 err = PyType_Ready(&pyrf_throttle_event__type);
573 if (err < 0)
574 goto out;
575 err = PyType_Ready(&pyrf_read_event__type);
576 if (err < 0)
577 goto out;
578 err = PyType_Ready(&pyrf_sample_event__type);
579 if (err < 0)
580 goto out;
581 err = PyType_Ready(&pyrf_context_switch_event__type);
582 if (err < 0)
583 goto out;
584 out:
585 return err;
586 }
587
588 static PyTypeObject *pyrf_event__type[] = {
589 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
590 [PERF_RECORD_LOST] = &pyrf_lost_event__type,
591 [PERF_RECORD_COMM] = &pyrf_comm_event__type,
592 [PERF_RECORD_EXIT] = &pyrf_task_event__type,
593 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
594 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
595 [PERF_RECORD_FORK] = &pyrf_task_event__type,
596 [PERF_RECORD_READ] = &pyrf_read_event__type,
597 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
598 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
599 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
600 };
601
pyrf_event__new(union perf_event * event)602 static PyObject *pyrf_event__new(union perf_event *event)
603 {
604 struct pyrf_event *pevent;
605 PyTypeObject *ptype;
606
607 if ((event->header.type < PERF_RECORD_MMAP ||
608 event->header.type > PERF_RECORD_SAMPLE) &&
609 !(event->header.type == PERF_RECORD_SWITCH ||
610 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
611 return NULL;
612
613 ptype = pyrf_event__type[event->header.type];
614 pevent = PyObject_New(struct pyrf_event, ptype);
615 if (pevent != NULL)
616 memcpy(&pevent->event, event, event->header.size);
617 return (PyObject *)pevent;
618 }
619
620 struct pyrf_cpu_map {
621 PyObject_HEAD
622
623 struct perf_cpu_map *cpus;
624 };
625
pyrf_cpu_map__init(struct pyrf_cpu_map * pcpus,PyObject * args,PyObject * kwargs)626 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
627 PyObject *args, PyObject *kwargs)
628 {
629 static char *kwlist[] = { "cpustr", NULL };
630 char *cpustr = NULL;
631
632 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
633 kwlist, &cpustr))
634 return -1;
635
636 pcpus->cpus = perf_cpu_map__new(cpustr);
637 if (pcpus->cpus == NULL)
638 return -1;
639 return 0;
640 }
641
pyrf_cpu_map__delete(struct pyrf_cpu_map * pcpus)642 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
643 {
644 perf_cpu_map__put(pcpus->cpus);
645 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
646 }
647
pyrf_cpu_map__length(PyObject * obj)648 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
649 {
650 struct pyrf_cpu_map *pcpus = (void *)obj;
651
652 return perf_cpu_map__nr(pcpus->cpus);
653 }
654
pyrf_cpu_map__item(PyObject * obj,Py_ssize_t i)655 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
656 {
657 struct pyrf_cpu_map *pcpus = (void *)obj;
658
659 if (i >= perf_cpu_map__nr(pcpus->cpus))
660 return NULL;
661
662 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
663 }
664
665 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
666 .sq_length = pyrf_cpu_map__length,
667 .sq_item = pyrf_cpu_map__item,
668 };
669
670 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
671
672 static PyTypeObject pyrf_cpu_map__type = {
673 PyVarObject_HEAD_INIT(NULL, 0)
674 .tp_name = "perf.cpu_map",
675 .tp_basicsize = sizeof(struct pyrf_cpu_map),
676 .tp_dealloc = (destructor)pyrf_cpu_map__delete,
677 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
678 .tp_doc = pyrf_cpu_map__doc,
679 .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
680 .tp_init = (initproc)pyrf_cpu_map__init,
681 };
682
pyrf_cpu_map__setup_types(void)683 static int pyrf_cpu_map__setup_types(void)
684 {
685 pyrf_cpu_map__type.tp_new = PyType_GenericNew;
686 return PyType_Ready(&pyrf_cpu_map__type);
687 }
688
689 struct pyrf_thread_map {
690 PyObject_HEAD
691
692 struct perf_thread_map *threads;
693 };
694
pyrf_thread_map__init(struct pyrf_thread_map * pthreads,PyObject * args,PyObject * kwargs)695 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
696 PyObject *args, PyObject *kwargs)
697 {
698 static char *kwlist[] = { "pid", "tid", "uid", NULL };
699 int pid = -1, tid = -1, uid = UINT_MAX;
700
701 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
702 kwlist, &pid, &tid, &uid))
703 return -1;
704
705 pthreads->threads = thread_map__new(pid, tid, uid);
706 if (pthreads->threads == NULL)
707 return -1;
708 return 0;
709 }
710
pyrf_thread_map__delete(struct pyrf_thread_map * pthreads)711 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
712 {
713 perf_thread_map__put(pthreads->threads);
714 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
715 }
716
pyrf_thread_map__length(PyObject * obj)717 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
718 {
719 struct pyrf_thread_map *pthreads = (void *)obj;
720
721 return pthreads->threads->nr;
722 }
723
pyrf_thread_map__item(PyObject * obj,Py_ssize_t i)724 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
725 {
726 struct pyrf_thread_map *pthreads = (void *)obj;
727
728 if (i >= pthreads->threads->nr)
729 return NULL;
730
731 return Py_BuildValue("i", pthreads->threads->map[i]);
732 }
733
734 static PySequenceMethods pyrf_thread_map__sequence_methods = {
735 .sq_length = pyrf_thread_map__length,
736 .sq_item = pyrf_thread_map__item,
737 };
738
739 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
740
741 static PyTypeObject pyrf_thread_map__type = {
742 PyVarObject_HEAD_INIT(NULL, 0)
743 .tp_name = "perf.thread_map",
744 .tp_basicsize = sizeof(struct pyrf_thread_map),
745 .tp_dealloc = (destructor)pyrf_thread_map__delete,
746 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
747 .tp_doc = pyrf_thread_map__doc,
748 .tp_as_sequence = &pyrf_thread_map__sequence_methods,
749 .tp_init = (initproc)pyrf_thread_map__init,
750 };
751
pyrf_thread_map__setup_types(void)752 static int pyrf_thread_map__setup_types(void)
753 {
754 pyrf_thread_map__type.tp_new = PyType_GenericNew;
755 return PyType_Ready(&pyrf_thread_map__type);
756 }
757
758 struct pyrf_evsel {
759 PyObject_HEAD
760
761 struct evsel evsel;
762 };
763
pyrf_evsel__init(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)764 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
765 PyObject *args, PyObject *kwargs)
766 {
767 struct perf_event_attr attr = {
768 .type = PERF_TYPE_HARDWARE,
769 .config = PERF_COUNT_HW_CPU_CYCLES,
770 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
771 };
772 static char *kwlist[] = {
773 "type",
774 "config",
775 "sample_freq",
776 "sample_period",
777 "sample_type",
778 "read_format",
779 "disabled",
780 "inherit",
781 "pinned",
782 "exclusive",
783 "exclude_user",
784 "exclude_kernel",
785 "exclude_hv",
786 "exclude_idle",
787 "mmap",
788 "context_switch",
789 "comm",
790 "freq",
791 "inherit_stat",
792 "enable_on_exec",
793 "task",
794 "watermark",
795 "precise_ip",
796 "mmap_data",
797 "sample_id_all",
798 "wakeup_events",
799 "bp_type",
800 "bp_addr",
801 "bp_len",
802 NULL
803 };
804 u64 sample_period = 0;
805 u32 disabled = 0,
806 inherit = 0,
807 pinned = 0,
808 exclusive = 0,
809 exclude_user = 0,
810 exclude_kernel = 0,
811 exclude_hv = 0,
812 exclude_idle = 0,
813 mmap = 0,
814 context_switch = 0,
815 comm = 0,
816 freq = 1,
817 inherit_stat = 0,
818 enable_on_exec = 0,
819 task = 0,
820 watermark = 0,
821 precise_ip = 0,
822 mmap_data = 0,
823 sample_id_all = 1;
824 int idx = 0;
825
826 if (!PyArg_ParseTupleAndKeywords(args, kwargs,
827 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
828 &attr.type, &attr.config, &attr.sample_freq,
829 &sample_period, &attr.sample_type,
830 &attr.read_format, &disabled, &inherit,
831 &pinned, &exclusive, &exclude_user,
832 &exclude_kernel, &exclude_hv, &exclude_idle,
833 &mmap, &context_switch, &comm, &freq, &inherit_stat,
834 &enable_on_exec, &task, &watermark,
835 &precise_ip, &mmap_data, &sample_id_all,
836 &attr.wakeup_events, &attr.bp_type,
837 &attr.bp_addr, &attr.bp_len, &idx))
838 return -1;
839
840 /* union... */
841 if (sample_period != 0) {
842 if (attr.sample_freq != 0)
843 return -1; /* FIXME: throw right exception */
844 attr.sample_period = sample_period;
845 }
846
847 /* Bitfields */
848 attr.disabled = disabled;
849 attr.inherit = inherit;
850 attr.pinned = pinned;
851 attr.exclusive = exclusive;
852 attr.exclude_user = exclude_user;
853 attr.exclude_kernel = exclude_kernel;
854 attr.exclude_hv = exclude_hv;
855 attr.exclude_idle = exclude_idle;
856 attr.mmap = mmap;
857 attr.context_switch = context_switch;
858 attr.comm = comm;
859 attr.freq = freq;
860 attr.inherit_stat = inherit_stat;
861 attr.enable_on_exec = enable_on_exec;
862 attr.task = task;
863 attr.watermark = watermark;
864 attr.precise_ip = precise_ip;
865 attr.mmap_data = mmap_data;
866 attr.sample_id_all = sample_id_all;
867 attr.size = sizeof(attr);
868
869 evsel__init(&pevsel->evsel, &attr, idx);
870 return 0;
871 }
872
pyrf_evsel__delete(struct pyrf_evsel * pevsel)873 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
874 {
875 evsel__exit(&pevsel->evsel);
876 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
877 }
878
pyrf_evsel__open(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)879 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
880 PyObject *args, PyObject *kwargs)
881 {
882 struct evsel *evsel = &pevsel->evsel;
883 struct perf_cpu_map *cpus = NULL;
884 struct perf_thread_map *threads = NULL;
885 PyObject *pcpus = NULL, *pthreads = NULL;
886 int group = 0, inherit = 0;
887 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
888
889 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
890 &pcpus, &pthreads, &group, &inherit))
891 return NULL;
892
893 if (pthreads != NULL)
894 threads = ((struct pyrf_thread_map *)pthreads)->threads;
895
896 if (pcpus != NULL)
897 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
898
899 evsel->core.attr.inherit = inherit;
900 /*
901 * This will group just the fds for this single evsel, to group
902 * multiple events, use evlist.open().
903 */
904 if (evsel__open(evsel, cpus, threads) < 0) {
905 PyErr_SetFromErrno(PyExc_OSError);
906 return NULL;
907 }
908
909 Py_INCREF(Py_None);
910 return Py_None;
911 }
912
913 static PyMethodDef pyrf_evsel__methods[] = {
914 {
915 .ml_name = "open",
916 .ml_meth = (PyCFunction)pyrf_evsel__open,
917 .ml_flags = METH_VARARGS | METH_KEYWORDS,
918 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
919 },
920 { .ml_name = NULL, }
921 };
922
923 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
924
925 static PyTypeObject pyrf_evsel__type = {
926 PyVarObject_HEAD_INIT(NULL, 0)
927 .tp_name = "perf.evsel",
928 .tp_basicsize = sizeof(struct pyrf_evsel),
929 .tp_dealloc = (destructor)pyrf_evsel__delete,
930 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
931 .tp_doc = pyrf_evsel__doc,
932 .tp_methods = pyrf_evsel__methods,
933 .tp_init = (initproc)pyrf_evsel__init,
934 };
935
pyrf_evsel__setup_types(void)936 static int pyrf_evsel__setup_types(void)
937 {
938 pyrf_evsel__type.tp_new = PyType_GenericNew;
939 return PyType_Ready(&pyrf_evsel__type);
940 }
941
942 struct pyrf_evlist {
943 PyObject_HEAD
944
945 struct evlist evlist;
946 };
947
pyrf_evlist__init(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)948 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
949 PyObject *args, PyObject *kwargs __maybe_unused)
950 {
951 PyObject *pcpus = NULL, *pthreads = NULL;
952 struct perf_cpu_map *cpus;
953 struct perf_thread_map *threads;
954
955 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
956 return -1;
957
958 threads = ((struct pyrf_thread_map *)pthreads)->threads;
959 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
960 evlist__init(&pevlist->evlist, cpus, threads);
961 return 0;
962 }
963
pyrf_evlist__delete(struct pyrf_evlist * pevlist)964 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
965 {
966 evlist__exit(&pevlist->evlist);
967 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
968 }
969
pyrf_evlist__mmap(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)970 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
971 PyObject *args, PyObject *kwargs)
972 {
973 struct evlist *evlist = &pevlist->evlist;
974 static char *kwlist[] = { "pages", "overwrite", NULL };
975 int pages = 128, overwrite = false;
976
977 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
978 &pages, &overwrite))
979 return NULL;
980
981 if (evlist__mmap(evlist, pages) < 0) {
982 PyErr_SetFromErrno(PyExc_OSError);
983 return NULL;
984 }
985
986 Py_INCREF(Py_None);
987 return Py_None;
988 }
989
pyrf_evlist__poll(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)990 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
991 PyObject *args, PyObject *kwargs)
992 {
993 struct evlist *evlist = &pevlist->evlist;
994 static char *kwlist[] = { "timeout", NULL };
995 int timeout = -1, n;
996
997 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
998 return NULL;
999
1000 n = evlist__poll(evlist, timeout);
1001 if (n < 0) {
1002 PyErr_SetFromErrno(PyExc_OSError);
1003 return NULL;
1004 }
1005
1006 return Py_BuildValue("i", n);
1007 }
1008
pyrf_evlist__get_pollfd(struct pyrf_evlist * pevlist,PyObject * args __maybe_unused,PyObject * kwargs __maybe_unused)1009 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1010 PyObject *args __maybe_unused,
1011 PyObject *kwargs __maybe_unused)
1012 {
1013 struct evlist *evlist = &pevlist->evlist;
1014 PyObject *list = PyList_New(0);
1015 int i;
1016
1017 for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1018 PyObject *file;
1019 #if PY_MAJOR_VERSION < 3
1020 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1021
1022 if (fp == NULL)
1023 goto free_list;
1024
1025 file = PyFile_FromFile(fp, "perf", "r", NULL);
1026 #else
1027 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1028 NULL, NULL, NULL, 0);
1029 #endif
1030 if (file == NULL)
1031 goto free_list;
1032
1033 if (PyList_Append(list, file) != 0) {
1034 Py_DECREF(file);
1035 goto free_list;
1036 }
1037
1038 Py_DECREF(file);
1039 }
1040
1041 return list;
1042 free_list:
1043 return PyErr_NoMemory();
1044 }
1045
1046
pyrf_evlist__add(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)1047 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1048 PyObject *args,
1049 PyObject *kwargs __maybe_unused)
1050 {
1051 struct evlist *evlist = &pevlist->evlist;
1052 PyObject *pevsel;
1053 struct evsel *evsel;
1054
1055 if (!PyArg_ParseTuple(args, "O", &pevsel))
1056 return NULL;
1057
1058 Py_INCREF(pevsel);
1059 evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1060 evsel->core.idx = evlist->core.nr_entries;
1061 evlist__add(evlist, evsel);
1062
1063 return Py_BuildValue("i", evlist->core.nr_entries);
1064 }
1065
get_md(struct evlist * evlist,int cpu)1066 static struct mmap *get_md(struct evlist *evlist, int cpu)
1067 {
1068 int i;
1069
1070 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1071 struct mmap *md = &evlist->mmap[i];
1072
1073 if (md->core.cpu.cpu == cpu)
1074 return md;
1075 }
1076
1077 return NULL;
1078 }
1079
pyrf_evlist__read_on_cpu(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1080 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1081 PyObject *args, PyObject *kwargs)
1082 {
1083 struct evlist *evlist = &pevlist->evlist;
1084 union perf_event *event;
1085 int sample_id_all = 1, cpu;
1086 static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1087 struct mmap *md;
1088 int err;
1089
1090 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1091 &cpu, &sample_id_all))
1092 return NULL;
1093
1094 md = get_md(evlist, cpu);
1095 if (!md)
1096 return NULL;
1097
1098 if (perf_mmap__read_init(&md->core) < 0)
1099 goto end;
1100
1101 event = perf_mmap__read_event(&md->core);
1102 if (event != NULL) {
1103 PyObject *pyevent = pyrf_event__new(event);
1104 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1105 struct evsel *evsel;
1106
1107 if (pyevent == NULL)
1108 return PyErr_NoMemory();
1109
1110 evsel = evlist__event2evsel(evlist, event);
1111 if (!evsel) {
1112 Py_INCREF(Py_None);
1113 return Py_None;
1114 }
1115
1116 pevent->evsel = evsel;
1117
1118 err = evsel__parse_sample(evsel, event, &pevent->sample);
1119
1120 /* Consume the even only after we parsed it out. */
1121 perf_mmap__consume(&md->core);
1122
1123 if (err)
1124 return PyErr_Format(PyExc_OSError,
1125 "perf: can't parse sample, err=%d", err);
1126 return pyevent;
1127 }
1128 end:
1129 Py_INCREF(Py_None);
1130 return Py_None;
1131 }
1132
pyrf_evlist__open(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1133 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1134 PyObject *args, PyObject *kwargs)
1135 {
1136 struct evlist *evlist = &pevlist->evlist;
1137 int group = 0;
1138 static char *kwlist[] = { "group", NULL };
1139
1140 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group))
1141 return NULL;
1142
1143 if (group)
1144 evlist__set_leader(evlist);
1145
1146 if (evlist__open(evlist) < 0) {
1147 PyErr_SetFromErrno(PyExc_OSError);
1148 return NULL;
1149 }
1150
1151 Py_INCREF(Py_None);
1152 return Py_None;
1153 }
1154
1155 static PyMethodDef pyrf_evlist__methods[] = {
1156 {
1157 .ml_name = "mmap",
1158 .ml_meth = (PyCFunction)pyrf_evlist__mmap,
1159 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1160 .ml_doc = PyDoc_STR("mmap the file descriptor table.")
1161 },
1162 {
1163 .ml_name = "open",
1164 .ml_meth = (PyCFunction)pyrf_evlist__open,
1165 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1166 .ml_doc = PyDoc_STR("open the file descriptors.")
1167 },
1168 {
1169 .ml_name = "poll",
1170 .ml_meth = (PyCFunction)pyrf_evlist__poll,
1171 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1172 .ml_doc = PyDoc_STR("poll the file descriptor table.")
1173 },
1174 {
1175 .ml_name = "get_pollfd",
1176 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
1177 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1178 .ml_doc = PyDoc_STR("get the poll file descriptor table.")
1179 },
1180 {
1181 .ml_name = "add",
1182 .ml_meth = (PyCFunction)pyrf_evlist__add,
1183 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1184 .ml_doc = PyDoc_STR("adds an event selector to the list.")
1185 },
1186 {
1187 .ml_name = "read_on_cpu",
1188 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
1189 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1190 .ml_doc = PyDoc_STR("reads an event.")
1191 },
1192 { .ml_name = NULL, }
1193 };
1194
pyrf_evlist__length(PyObject * obj)1195 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1196 {
1197 struct pyrf_evlist *pevlist = (void *)obj;
1198
1199 return pevlist->evlist.core.nr_entries;
1200 }
1201
pyrf_evlist__item(PyObject * obj,Py_ssize_t i)1202 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1203 {
1204 struct pyrf_evlist *pevlist = (void *)obj;
1205 struct evsel *pos;
1206
1207 if (i >= pevlist->evlist.core.nr_entries)
1208 return NULL;
1209
1210 evlist__for_each_entry(&pevlist->evlist, pos) {
1211 if (i-- == 0)
1212 break;
1213 }
1214
1215 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1216 }
1217
1218 static PySequenceMethods pyrf_evlist__sequence_methods = {
1219 .sq_length = pyrf_evlist__length,
1220 .sq_item = pyrf_evlist__item,
1221 };
1222
1223 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1224
1225 static PyTypeObject pyrf_evlist__type = {
1226 PyVarObject_HEAD_INIT(NULL, 0)
1227 .tp_name = "perf.evlist",
1228 .tp_basicsize = sizeof(struct pyrf_evlist),
1229 .tp_dealloc = (destructor)pyrf_evlist__delete,
1230 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1231 .tp_as_sequence = &pyrf_evlist__sequence_methods,
1232 .tp_doc = pyrf_evlist__doc,
1233 .tp_methods = pyrf_evlist__methods,
1234 .tp_init = (initproc)pyrf_evlist__init,
1235 };
1236
pyrf_evlist__setup_types(void)1237 static int pyrf_evlist__setup_types(void)
1238 {
1239 pyrf_evlist__type.tp_new = PyType_GenericNew;
1240 return PyType_Ready(&pyrf_evlist__type);
1241 }
1242
1243 #define PERF_CONST(name) { #name, PERF_##name }
1244
1245 static struct {
1246 const char *name;
1247 int value;
1248 } perf__constants[] = {
1249 PERF_CONST(TYPE_HARDWARE),
1250 PERF_CONST(TYPE_SOFTWARE),
1251 PERF_CONST(TYPE_TRACEPOINT),
1252 PERF_CONST(TYPE_HW_CACHE),
1253 PERF_CONST(TYPE_RAW),
1254 PERF_CONST(TYPE_BREAKPOINT),
1255
1256 PERF_CONST(COUNT_HW_CPU_CYCLES),
1257 PERF_CONST(COUNT_HW_INSTRUCTIONS),
1258 PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1259 PERF_CONST(COUNT_HW_CACHE_MISSES),
1260 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1261 PERF_CONST(COUNT_HW_BRANCH_MISSES),
1262 PERF_CONST(COUNT_HW_BUS_CYCLES),
1263 PERF_CONST(COUNT_HW_CACHE_L1D),
1264 PERF_CONST(COUNT_HW_CACHE_L1I),
1265 PERF_CONST(COUNT_HW_CACHE_LL),
1266 PERF_CONST(COUNT_HW_CACHE_DTLB),
1267 PERF_CONST(COUNT_HW_CACHE_ITLB),
1268 PERF_CONST(COUNT_HW_CACHE_BPU),
1269 PERF_CONST(COUNT_HW_CACHE_OP_READ),
1270 PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1271 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1272 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1273 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1274
1275 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1276 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1277
1278 PERF_CONST(COUNT_SW_CPU_CLOCK),
1279 PERF_CONST(COUNT_SW_TASK_CLOCK),
1280 PERF_CONST(COUNT_SW_PAGE_FAULTS),
1281 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1282 PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1283 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1284 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1285 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1286 PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1287 PERF_CONST(COUNT_SW_DUMMY),
1288
1289 PERF_CONST(SAMPLE_IP),
1290 PERF_CONST(SAMPLE_TID),
1291 PERF_CONST(SAMPLE_TIME),
1292 PERF_CONST(SAMPLE_ADDR),
1293 PERF_CONST(SAMPLE_READ),
1294 PERF_CONST(SAMPLE_CALLCHAIN),
1295 PERF_CONST(SAMPLE_ID),
1296 PERF_CONST(SAMPLE_CPU),
1297 PERF_CONST(SAMPLE_PERIOD),
1298 PERF_CONST(SAMPLE_STREAM_ID),
1299 PERF_CONST(SAMPLE_RAW),
1300
1301 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1302 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1303 PERF_CONST(FORMAT_ID),
1304 PERF_CONST(FORMAT_GROUP),
1305
1306 PERF_CONST(RECORD_MMAP),
1307 PERF_CONST(RECORD_LOST),
1308 PERF_CONST(RECORD_COMM),
1309 PERF_CONST(RECORD_EXIT),
1310 PERF_CONST(RECORD_THROTTLE),
1311 PERF_CONST(RECORD_UNTHROTTLE),
1312 PERF_CONST(RECORD_FORK),
1313 PERF_CONST(RECORD_READ),
1314 PERF_CONST(RECORD_SAMPLE),
1315 PERF_CONST(RECORD_MMAP2),
1316 PERF_CONST(RECORD_AUX),
1317 PERF_CONST(RECORD_ITRACE_START),
1318 PERF_CONST(RECORD_LOST_SAMPLES),
1319 PERF_CONST(RECORD_SWITCH),
1320 PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1321
1322 PERF_CONST(RECORD_MISC_SWITCH_OUT),
1323 { .name = NULL, },
1324 };
1325
pyrf__tracepoint(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)1326 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1327 PyObject *args, PyObject *kwargs)
1328 {
1329 struct tep_event *tp_format;
1330 static char *kwlist[] = { "sys", "name", NULL };
1331 char *sys = NULL;
1332 char *name = NULL;
1333
1334 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1335 &sys, &name))
1336 return NULL;
1337
1338 tp_format = trace_event__tp_format(sys, name);
1339 if (IS_ERR(tp_format))
1340 return _PyLong_FromLong(-1);
1341
1342 return _PyLong_FromLong(tp_format->id);
1343 }
1344
1345 static PyMethodDef perf__methods[] = {
1346 {
1347 .ml_name = "tracepoint",
1348 .ml_meth = (PyCFunction) pyrf__tracepoint,
1349 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1350 .ml_doc = PyDoc_STR("Get tracepoint config.")
1351 },
1352 { .ml_name = NULL, }
1353 };
1354
1355 #if PY_MAJOR_VERSION < 3
initperf(void)1356 PyMODINIT_FUNC initperf(void)
1357 #else
1358 PyMODINIT_FUNC PyInit_perf(void)
1359 #endif
1360 {
1361 PyObject *obj;
1362 int i;
1363 PyObject *dict;
1364 #if PY_MAJOR_VERSION < 3
1365 PyObject *module = Py_InitModule("perf", perf__methods);
1366 #else
1367 static struct PyModuleDef moduledef = {
1368 PyModuleDef_HEAD_INIT,
1369 "perf", /* m_name */
1370 "", /* m_doc */
1371 -1, /* m_size */
1372 perf__methods, /* m_methods */
1373 NULL, /* m_reload */
1374 NULL, /* m_traverse */
1375 NULL, /* m_clear */
1376 NULL, /* m_free */
1377 };
1378 PyObject *module = PyModule_Create(&moduledef);
1379 #endif
1380
1381 if (module == NULL ||
1382 pyrf_event__setup_types() < 0 ||
1383 pyrf_evlist__setup_types() < 0 ||
1384 pyrf_evsel__setup_types() < 0 ||
1385 pyrf_thread_map__setup_types() < 0 ||
1386 pyrf_cpu_map__setup_types() < 0)
1387 #if PY_MAJOR_VERSION < 3
1388 return;
1389 #else
1390 return module;
1391 #endif
1392
1393 /* The page_size is placed in util object. */
1394 page_size = sysconf(_SC_PAGE_SIZE);
1395
1396 Py_INCREF(&pyrf_evlist__type);
1397 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1398
1399 Py_INCREF(&pyrf_evsel__type);
1400 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1401
1402 Py_INCREF(&pyrf_mmap_event__type);
1403 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1404
1405 Py_INCREF(&pyrf_lost_event__type);
1406 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1407
1408 Py_INCREF(&pyrf_comm_event__type);
1409 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1410
1411 Py_INCREF(&pyrf_task_event__type);
1412 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1413
1414 Py_INCREF(&pyrf_throttle_event__type);
1415 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1416
1417 Py_INCREF(&pyrf_task_event__type);
1418 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1419
1420 Py_INCREF(&pyrf_read_event__type);
1421 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1422
1423 Py_INCREF(&pyrf_sample_event__type);
1424 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1425
1426 Py_INCREF(&pyrf_context_switch_event__type);
1427 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1428
1429 Py_INCREF(&pyrf_thread_map__type);
1430 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1431
1432 Py_INCREF(&pyrf_cpu_map__type);
1433 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1434
1435 dict = PyModule_GetDict(module);
1436 if (dict == NULL)
1437 goto error;
1438
1439 for (i = 0; perf__constants[i].name != NULL; i++) {
1440 obj = _PyLong_FromLong(perf__constants[i].value);
1441 if (obj == NULL)
1442 goto error;
1443 PyDict_SetItemString(dict, perf__constants[i].name, obj);
1444 Py_DECREF(obj);
1445 }
1446
1447 error:
1448 if (PyErr_Occurred())
1449 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1450 #if PY_MAJOR_VERSION >= 3
1451 return module;
1452 #endif
1453 }
1454
1455 /*
1456 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1457 * binding.
1458 */
test_attr__open(struct perf_event_attr * attr,pid_t pid,struct perf_cpu cpu,int fd,int group_fd,unsigned long flags)1459 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
1460 int fd, int group_fd, unsigned long flags)
1461 {
1462 }
1463