1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CTF writing support via babeltrace.
4  *
5  * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
6  * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7  */
8 
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/zalloc.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include <traceevent/event-parse.h>
23 #include "asm/bug.h"
24 #include "data-convert-bt.h"
25 #include "session.h"
26 #include "debug.h"
27 #include "tool.h"
28 #include "evlist.h"
29 #include "evsel.h"
30 #include "machine.h"
31 #include "config.h"
32 #include <linux/ctype.h>
33 #include <linux/err.h>
34 
35 #define pr_N(n, fmt, ...) \
36 	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
37 
38 #define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
39 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
40 
41 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
42 
43 struct evsel_priv {
44 	struct bt_ctf_event_class *event_class;
45 };
46 
47 #define MAX_CPUS	4096
48 
49 struct ctf_stream {
50 	struct bt_ctf_stream *stream;
51 	int cpu;
52 	u32 count;
53 };
54 
55 struct ctf_writer {
56 	/* writer primitives */
57 	struct bt_ctf_writer		 *writer;
58 	struct ctf_stream		**stream;
59 	int				  stream_cnt;
60 	struct bt_ctf_stream_class	 *stream_class;
61 	struct bt_ctf_clock		 *clock;
62 
63 	/* data types */
64 	union {
65 		struct {
66 			struct bt_ctf_field_type	*s64;
67 			struct bt_ctf_field_type	*u64;
68 			struct bt_ctf_field_type	*s32;
69 			struct bt_ctf_field_type	*u32;
70 			struct bt_ctf_field_type	*string;
71 			struct bt_ctf_field_type	*u32_hex;
72 			struct bt_ctf_field_type	*u64_hex;
73 		};
74 		struct bt_ctf_field_type *array[6];
75 	} data;
76 	struct bt_ctf_event_class	*comm_class;
77 	struct bt_ctf_event_class	*exit_class;
78 	struct bt_ctf_event_class	*fork_class;
79 	struct bt_ctf_event_class	*mmap_class;
80 	struct bt_ctf_event_class	*mmap2_class;
81 };
82 
83 struct convert {
84 	struct perf_tool	tool;
85 	struct ctf_writer	writer;
86 
87 	u64			events_size;
88 	u64			events_count;
89 	u64			non_sample_count;
90 
91 	/* Ordered events configured queue size. */
92 	u64			queue_size;
93 };
94 
value_set(struct bt_ctf_field_type * type,struct bt_ctf_event * event,const char * name,u64 val)95 static int value_set(struct bt_ctf_field_type *type,
96 		     struct bt_ctf_event *event,
97 		     const char *name, u64 val)
98 {
99 	struct bt_ctf_field *field;
100 	bool sign = bt_ctf_field_type_integer_get_signed(type);
101 	int ret;
102 
103 	field = bt_ctf_field_create(type);
104 	if (!field) {
105 		pr_err("failed to create a field %s\n", name);
106 		return -1;
107 	}
108 
109 	if (sign) {
110 		ret = bt_ctf_field_signed_integer_set_value(field, val);
111 		if (ret) {
112 			pr_err("failed to set field value %s\n", name);
113 			goto err;
114 		}
115 	} else {
116 		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
117 		if (ret) {
118 			pr_err("failed to set field value %s\n", name);
119 			goto err;
120 		}
121 	}
122 
123 	ret = bt_ctf_event_set_payload(event, name, field);
124 	if (ret) {
125 		pr_err("failed to set payload %s\n", name);
126 		goto err;
127 	}
128 
129 	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
130 
131 err:
132 	bt_ctf_field_put(field);
133 	return ret;
134 }
135 
136 #define __FUNC_VALUE_SET(_name, _val_type)				\
137 static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
138 			     struct bt_ctf_event *event,		\
139 			     const char *name,				\
140 			     _val_type val)				\
141 {									\
142 	struct bt_ctf_field_type *type = cw->data._name;		\
143 	return value_set(type, event, name, (u64) val);			\
144 }
145 
146 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
147 
148 FUNC_VALUE_SET(s32)
149 FUNC_VALUE_SET(u32)
150 FUNC_VALUE_SET(s64)
151 FUNC_VALUE_SET(u64)
152 __FUNC_VALUE_SET(u64_hex, u64)
153 
154 static int string_set_value(struct bt_ctf_field *field, const char *string);
155 static __maybe_unused int
value_set_string(struct ctf_writer * cw,struct bt_ctf_event * event,const char * name,const char * string)156 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
157 		 const char *name, const char *string)
158 {
159 	struct bt_ctf_field_type *type = cw->data.string;
160 	struct bt_ctf_field *field;
161 	int ret = 0;
162 
163 	field = bt_ctf_field_create(type);
164 	if (!field) {
165 		pr_err("failed to create a field %s\n", name);
166 		return -1;
167 	}
168 
169 	ret = string_set_value(field, string);
170 	if (ret) {
171 		pr_err("failed to set value %s\n", name);
172 		goto err_put_field;
173 	}
174 
175 	ret = bt_ctf_event_set_payload(event, name, field);
176 	if (ret)
177 		pr_err("failed to set payload %s\n", name);
178 
179 err_put_field:
180 	bt_ctf_field_put(field);
181 	return ret;
182 }
183 
184 static struct bt_ctf_field_type*
get_tracepoint_field_type(struct ctf_writer * cw,struct tep_format_field * field)185 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
186 {
187 	unsigned long flags = field->flags;
188 
189 	if (flags & TEP_FIELD_IS_STRING)
190 		return cw->data.string;
191 
192 	if (!(flags & TEP_FIELD_IS_SIGNED)) {
193 		/* unsigned long are mostly pointers */
194 		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
195 			return cw->data.u64_hex;
196 	}
197 
198 	if (flags & TEP_FIELD_IS_SIGNED) {
199 		if (field->size == 8)
200 			return cw->data.s64;
201 		else
202 			return cw->data.s32;
203 	}
204 
205 	if (field->size == 8)
206 		return cw->data.u64;
207 	else
208 		return cw->data.u32;
209 }
210 
adjust_signedness(unsigned long long value_int,int size)211 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
212 {
213 	unsigned long long value_mask;
214 
215 	/*
216 	 * value_mask = (1 << (size * 8 - 1)) - 1.
217 	 * Directly set value_mask for code readers.
218 	 */
219 	switch (size) {
220 	case 1:
221 		value_mask = 0x7fULL;
222 		break;
223 	case 2:
224 		value_mask = 0x7fffULL;
225 		break;
226 	case 4:
227 		value_mask = 0x7fffffffULL;
228 		break;
229 	case 8:
230 		/*
231 		 * For 64 bit value, return it self. There is no need
232 		 * to fill high bit.
233 		 */
234 		/* Fall through */
235 	default:
236 		/* BUG! */
237 		return value_int;
238 	}
239 
240 	/* If it is a positive value, don't adjust. */
241 	if ((value_int & (~0ULL - value_mask)) == 0)
242 		return value_int;
243 
244 	/* Fill upper part of value_int with 1 to make it a negative long long. */
245 	return (value_int & value_mask) | ~value_mask;
246 }
247 
string_set_value(struct bt_ctf_field * field,const char * string)248 static int string_set_value(struct bt_ctf_field *field, const char *string)
249 {
250 	char *buffer = NULL;
251 	size_t len = strlen(string), i, p;
252 	int err;
253 
254 	for (i = p = 0; i < len; i++, p++) {
255 		if (isprint(string[i])) {
256 			if (!buffer)
257 				continue;
258 			buffer[p] = string[i];
259 		} else {
260 			char numstr[5];
261 
262 			snprintf(numstr, sizeof(numstr), "\\x%02x",
263 				 (unsigned int)(string[i]) & 0xff);
264 
265 			if (!buffer) {
266 				buffer = zalloc(i + (len - i) * 4 + 2);
267 				if (!buffer) {
268 					pr_err("failed to set unprintable string '%s'\n", string);
269 					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
270 				}
271 				if (i > 0)
272 					strncpy(buffer, string, i);
273 			}
274 			memcpy(buffer + p, numstr, 4);
275 			p += 3;
276 		}
277 	}
278 
279 	if (!buffer)
280 		return bt_ctf_field_string_set_value(field, string);
281 	err = bt_ctf_field_string_set_value(field, buffer);
282 	free(buffer);
283 	return err;
284 }
285 
add_tracepoint_field_value(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct perf_sample * sample,struct tep_format_field * fmtf)286 static int add_tracepoint_field_value(struct ctf_writer *cw,
287 				      struct bt_ctf_event_class *event_class,
288 				      struct bt_ctf_event *event,
289 				      struct perf_sample *sample,
290 				      struct tep_format_field *fmtf)
291 {
292 	struct bt_ctf_field_type *type;
293 	struct bt_ctf_field *array_field;
294 	struct bt_ctf_field *field;
295 	const char *name = fmtf->name;
296 	void *data = sample->raw_data;
297 	unsigned long flags = fmtf->flags;
298 	unsigned int n_items;
299 	unsigned int i;
300 	unsigned int offset;
301 	unsigned int len;
302 	int ret;
303 
304 	name = fmtf->alias;
305 	offset = fmtf->offset;
306 	len = fmtf->size;
307 	if (flags & TEP_FIELD_IS_STRING)
308 		flags &= ~TEP_FIELD_IS_ARRAY;
309 
310 	if (flags & TEP_FIELD_IS_DYNAMIC) {
311 		unsigned long long tmp_val;
312 
313 		tmp_val = tep_read_number(fmtf->event->tep,
314 					  data + offset, len);
315 		offset = tmp_val;
316 		len = offset >> 16;
317 		offset &= 0xffff;
318 	}
319 
320 	if (flags & TEP_FIELD_IS_ARRAY) {
321 
322 		type = bt_ctf_event_class_get_field_by_name(
323 				event_class, name);
324 		array_field = bt_ctf_field_create(type);
325 		bt_ctf_field_type_put(type);
326 		if (!array_field) {
327 			pr_err("Failed to create array type %s\n", name);
328 			return -1;
329 		}
330 
331 		len = fmtf->size / fmtf->arraylen;
332 		n_items = fmtf->arraylen;
333 	} else {
334 		n_items = 1;
335 		array_field = NULL;
336 	}
337 
338 	type = get_tracepoint_field_type(cw, fmtf);
339 
340 	for (i = 0; i < n_items; i++) {
341 		if (flags & TEP_FIELD_IS_ARRAY)
342 			field = bt_ctf_field_array_get_field(array_field, i);
343 		else
344 			field = bt_ctf_field_create(type);
345 
346 		if (!field) {
347 			pr_err("failed to create a field %s\n", name);
348 			return -1;
349 		}
350 
351 		if (flags & TEP_FIELD_IS_STRING)
352 			ret = string_set_value(field, data + offset + i * len);
353 		else {
354 			unsigned long long value_int;
355 
356 			value_int = tep_read_number(
357 					fmtf->event->tep,
358 					data + offset + i * len, len);
359 
360 			if (!(flags & TEP_FIELD_IS_SIGNED))
361 				ret = bt_ctf_field_unsigned_integer_set_value(
362 						field, value_int);
363 			else
364 				ret = bt_ctf_field_signed_integer_set_value(
365 						field, adjust_signedness(value_int, len));
366 		}
367 
368 		if (ret) {
369 			pr_err("failed to set file value %s\n", name);
370 			goto err_put_field;
371 		}
372 		if (!(flags & TEP_FIELD_IS_ARRAY)) {
373 			ret = bt_ctf_event_set_payload(event, name, field);
374 			if (ret) {
375 				pr_err("failed to set payload %s\n", name);
376 				goto err_put_field;
377 			}
378 		}
379 		bt_ctf_field_put(field);
380 	}
381 	if (flags & TEP_FIELD_IS_ARRAY) {
382 		ret = bt_ctf_event_set_payload(event, name, array_field);
383 		if (ret) {
384 			pr_err("Failed add payload array %s\n", name);
385 			return -1;
386 		}
387 		bt_ctf_field_put(array_field);
388 	}
389 	return 0;
390 
391 err_put_field:
392 	bt_ctf_field_put(field);
393 	return -1;
394 }
395 
add_tracepoint_fields_values(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct tep_format_field * fields,struct perf_sample * sample)396 static int add_tracepoint_fields_values(struct ctf_writer *cw,
397 					struct bt_ctf_event_class *event_class,
398 					struct bt_ctf_event *event,
399 					struct tep_format_field *fields,
400 					struct perf_sample *sample)
401 {
402 	struct tep_format_field *field;
403 	int ret;
404 
405 	for (field = fields; field; field = field->next) {
406 		ret = add_tracepoint_field_value(cw, event_class, event, sample,
407 				field);
408 		if (ret)
409 			return -1;
410 	}
411 	return 0;
412 }
413 
add_tracepoint_values(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct evsel * evsel,struct perf_sample * sample)414 static int add_tracepoint_values(struct ctf_writer *cw,
415 				 struct bt_ctf_event_class *event_class,
416 				 struct bt_ctf_event *event,
417 				 struct evsel *evsel,
418 				 struct perf_sample *sample)
419 {
420 	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
421 	struct tep_format_field *fields        = evsel->tp_format->format.fields;
422 	int ret;
423 
424 	ret = add_tracepoint_fields_values(cw, event_class, event,
425 					   common_fields, sample);
426 	if (!ret)
427 		ret = add_tracepoint_fields_values(cw, event_class, event,
428 						   fields, sample);
429 
430 	return ret;
431 }
432 
433 static int
add_bpf_output_values(struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct perf_sample * sample)434 add_bpf_output_values(struct bt_ctf_event_class *event_class,
435 		      struct bt_ctf_event *event,
436 		      struct perf_sample *sample)
437 {
438 	struct bt_ctf_field_type *len_type, *seq_type;
439 	struct bt_ctf_field *len_field, *seq_field;
440 	unsigned int raw_size = sample->raw_size;
441 	unsigned int nr_elements = raw_size / sizeof(u32);
442 	unsigned int i;
443 	int ret;
444 
445 	if (nr_elements * sizeof(u32) != raw_size)
446 		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
447 			   raw_size, nr_elements * sizeof(u32) - raw_size);
448 
449 	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
450 	len_field = bt_ctf_field_create(len_type);
451 	if (!len_field) {
452 		pr_err("failed to create 'raw_len' for bpf output event\n");
453 		ret = -1;
454 		goto put_len_type;
455 	}
456 
457 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
458 	if (ret) {
459 		pr_err("failed to set field value for raw_len\n");
460 		goto put_len_field;
461 	}
462 	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
463 	if (ret) {
464 		pr_err("failed to set payload to raw_len\n");
465 		goto put_len_field;
466 	}
467 
468 	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
469 	seq_field = bt_ctf_field_create(seq_type);
470 	if (!seq_field) {
471 		pr_err("failed to create 'raw_data' for bpf output event\n");
472 		ret = -1;
473 		goto put_seq_type;
474 	}
475 
476 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
477 	if (ret) {
478 		pr_err("failed to set length of 'raw_data'\n");
479 		goto put_seq_field;
480 	}
481 
482 	for (i = 0; i < nr_elements; i++) {
483 		struct bt_ctf_field *elem_field =
484 			bt_ctf_field_sequence_get_field(seq_field, i);
485 
486 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
487 				((u32 *)(sample->raw_data))[i]);
488 
489 		bt_ctf_field_put(elem_field);
490 		if (ret) {
491 			pr_err("failed to set raw_data[%d]\n", i);
492 			goto put_seq_field;
493 		}
494 	}
495 
496 	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
497 	if (ret)
498 		pr_err("failed to set payload for raw_data\n");
499 
500 put_seq_field:
501 	bt_ctf_field_put(seq_field);
502 put_seq_type:
503 	bt_ctf_field_type_put(seq_type);
504 put_len_field:
505 	bt_ctf_field_put(len_field);
506 put_len_type:
507 	bt_ctf_field_type_put(len_type);
508 	return ret;
509 }
510 
511 static int
add_callchain_output_values(struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct ip_callchain * callchain)512 add_callchain_output_values(struct bt_ctf_event_class *event_class,
513 		      struct bt_ctf_event *event,
514 		      struct ip_callchain *callchain)
515 {
516 	struct bt_ctf_field_type *len_type, *seq_type;
517 	struct bt_ctf_field *len_field, *seq_field;
518 	unsigned int nr_elements = callchain->nr;
519 	unsigned int i;
520 	int ret;
521 
522 	len_type = bt_ctf_event_class_get_field_by_name(
523 			event_class, "perf_callchain_size");
524 	len_field = bt_ctf_field_create(len_type);
525 	if (!len_field) {
526 		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
527 		ret = -1;
528 		goto put_len_type;
529 	}
530 
531 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
532 	if (ret) {
533 		pr_err("failed to set field value for perf_callchain_size\n");
534 		goto put_len_field;
535 	}
536 	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
537 	if (ret) {
538 		pr_err("failed to set payload to perf_callchain_size\n");
539 		goto put_len_field;
540 	}
541 
542 	seq_type = bt_ctf_event_class_get_field_by_name(
543 			event_class, "perf_callchain");
544 	seq_field = bt_ctf_field_create(seq_type);
545 	if (!seq_field) {
546 		pr_err("failed to create 'perf_callchain' for callchain output event\n");
547 		ret = -1;
548 		goto put_seq_type;
549 	}
550 
551 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
552 	if (ret) {
553 		pr_err("failed to set length of 'perf_callchain'\n");
554 		goto put_seq_field;
555 	}
556 
557 	for (i = 0; i < nr_elements; i++) {
558 		struct bt_ctf_field *elem_field =
559 			bt_ctf_field_sequence_get_field(seq_field, i);
560 
561 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
562 				((u64 *)(callchain->ips))[i]);
563 
564 		bt_ctf_field_put(elem_field);
565 		if (ret) {
566 			pr_err("failed to set callchain[%d]\n", i);
567 			goto put_seq_field;
568 		}
569 	}
570 
571 	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
572 	if (ret)
573 		pr_err("failed to set payload for raw_data\n");
574 
575 put_seq_field:
576 	bt_ctf_field_put(seq_field);
577 put_seq_type:
578 	bt_ctf_field_type_put(seq_type);
579 put_len_field:
580 	bt_ctf_field_put(len_field);
581 put_len_type:
582 	bt_ctf_field_type_put(len_type);
583 	return ret;
584 }
585 
add_generic_values(struct ctf_writer * cw,struct bt_ctf_event * event,struct evsel * evsel,struct perf_sample * sample)586 static int add_generic_values(struct ctf_writer *cw,
587 			      struct bt_ctf_event *event,
588 			      struct evsel *evsel,
589 			      struct perf_sample *sample)
590 {
591 	u64 type = evsel->core.attr.sample_type;
592 	int ret;
593 
594 	/*
595 	 * missing:
596 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
597 	 *                              ctf event header
598 	 *   PERF_SAMPLE_READ         - TODO
599 	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
600 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
601 	 *   PERF_SAMPLE_REGS_USER    - TODO
602 	 *   PERF_SAMPLE_STACK_USER   - TODO
603 	 */
604 
605 	if (type & PERF_SAMPLE_IP) {
606 		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
607 		if (ret)
608 			return -1;
609 	}
610 
611 	if (type & PERF_SAMPLE_TID) {
612 		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
613 		if (ret)
614 			return -1;
615 
616 		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
617 		if (ret)
618 			return -1;
619 	}
620 
621 	if ((type & PERF_SAMPLE_ID) ||
622 	    (type & PERF_SAMPLE_IDENTIFIER)) {
623 		ret = value_set_u64(cw, event, "perf_id", sample->id);
624 		if (ret)
625 			return -1;
626 	}
627 
628 	if (type & PERF_SAMPLE_STREAM_ID) {
629 		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
630 		if (ret)
631 			return -1;
632 	}
633 
634 	if (type & PERF_SAMPLE_PERIOD) {
635 		ret = value_set_u64(cw, event, "perf_period", sample->period);
636 		if (ret)
637 			return -1;
638 	}
639 
640 	if (type & PERF_SAMPLE_WEIGHT) {
641 		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
642 		if (ret)
643 			return -1;
644 	}
645 
646 	if (type & PERF_SAMPLE_DATA_SRC) {
647 		ret = value_set_u64(cw, event, "perf_data_src",
648 				sample->data_src);
649 		if (ret)
650 			return -1;
651 	}
652 
653 	if (type & PERF_SAMPLE_TRANSACTION) {
654 		ret = value_set_u64(cw, event, "perf_transaction",
655 				sample->transaction);
656 		if (ret)
657 			return -1;
658 	}
659 
660 	return 0;
661 }
662 
ctf_stream__flush(struct ctf_stream * cs)663 static int ctf_stream__flush(struct ctf_stream *cs)
664 {
665 	int err = 0;
666 
667 	if (cs) {
668 		err = bt_ctf_stream_flush(cs->stream);
669 		if (err)
670 			pr_err("CTF stream %d flush failed\n", cs->cpu);
671 
672 		pr("Flush stream for cpu %d (%u samples)\n",
673 		   cs->cpu, cs->count);
674 
675 		cs->count = 0;
676 	}
677 
678 	return err;
679 }
680 
ctf_stream__create(struct ctf_writer * cw,int cpu)681 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
682 {
683 	struct ctf_stream *cs;
684 	struct bt_ctf_field *pkt_ctx   = NULL;
685 	struct bt_ctf_field *cpu_field = NULL;
686 	struct bt_ctf_stream *stream   = NULL;
687 	int ret;
688 
689 	cs = zalloc(sizeof(*cs));
690 	if (!cs) {
691 		pr_err("Failed to allocate ctf stream\n");
692 		return NULL;
693 	}
694 
695 	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
696 	if (!stream) {
697 		pr_err("Failed to create CTF stream\n");
698 		goto out;
699 	}
700 
701 	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
702 	if (!pkt_ctx) {
703 		pr_err("Failed to obtain packet context\n");
704 		goto out;
705 	}
706 
707 	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
708 	bt_ctf_field_put(pkt_ctx);
709 	if (!cpu_field) {
710 		pr_err("Failed to obtain cpu field\n");
711 		goto out;
712 	}
713 
714 	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
715 	if (ret) {
716 		pr_err("Failed to update CPU number\n");
717 		goto out;
718 	}
719 
720 	bt_ctf_field_put(cpu_field);
721 
722 	cs->cpu    = cpu;
723 	cs->stream = stream;
724 	return cs;
725 
726 out:
727 	if (cpu_field)
728 		bt_ctf_field_put(cpu_field);
729 	if (stream)
730 		bt_ctf_stream_put(stream);
731 
732 	free(cs);
733 	return NULL;
734 }
735 
ctf_stream__delete(struct ctf_stream * cs)736 static void ctf_stream__delete(struct ctf_stream *cs)
737 {
738 	if (cs) {
739 		bt_ctf_stream_put(cs->stream);
740 		free(cs);
741 	}
742 }
743 
ctf_stream(struct ctf_writer * cw,int cpu)744 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
745 {
746 	struct ctf_stream *cs = cw->stream[cpu];
747 
748 	if (!cs) {
749 		cs = ctf_stream__create(cw, cpu);
750 		cw->stream[cpu] = cs;
751 	}
752 
753 	return cs;
754 }
755 
get_sample_cpu(struct ctf_writer * cw,struct perf_sample * sample,struct evsel * evsel)756 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
757 			  struct evsel *evsel)
758 {
759 	int cpu = 0;
760 
761 	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
762 		cpu = sample->cpu;
763 
764 	if (cpu > cw->stream_cnt) {
765 		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
766 			cpu, cw->stream_cnt);
767 		cpu = 0;
768 	}
769 
770 	return cpu;
771 }
772 
773 #define STREAM_FLUSH_COUNT 100000
774 
775 /*
776  * Currently we have no other way to determine the
777  * time for the stream flush other than keep track
778  * of the number of events and check it against
779  * threshold.
780  */
is_flush_needed(struct ctf_stream * cs)781 static bool is_flush_needed(struct ctf_stream *cs)
782 {
783 	return cs->count >= STREAM_FLUSH_COUNT;
784 }
785 
process_sample_event(struct perf_tool * tool,union perf_event * _event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine __maybe_unused)786 static int process_sample_event(struct perf_tool *tool,
787 				union perf_event *_event,
788 				struct perf_sample *sample,
789 				struct evsel *evsel,
790 				struct machine *machine __maybe_unused)
791 {
792 	struct convert *c = container_of(tool, struct convert, tool);
793 	struct evsel_priv *priv = evsel->priv;
794 	struct ctf_writer *cw = &c->writer;
795 	struct ctf_stream *cs;
796 	struct bt_ctf_event_class *event_class;
797 	struct bt_ctf_event *event;
798 	int ret;
799 	unsigned long type = evsel->core.attr.sample_type;
800 
801 	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
802 		return 0;
803 
804 	event_class = priv->event_class;
805 
806 	/* update stats */
807 	c->events_count++;
808 	c->events_size += _event->header.size;
809 
810 	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
811 
812 	event = bt_ctf_event_create(event_class);
813 	if (!event) {
814 		pr_err("Failed to create an CTF event\n");
815 		return -1;
816 	}
817 
818 	bt_ctf_clock_set_time(cw->clock, sample->time);
819 
820 	ret = add_generic_values(cw, event, evsel, sample);
821 	if (ret)
822 		return -1;
823 
824 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
825 		ret = add_tracepoint_values(cw, event_class, event,
826 					    evsel, sample);
827 		if (ret)
828 			return -1;
829 	}
830 
831 	if (type & PERF_SAMPLE_CALLCHAIN) {
832 		ret = add_callchain_output_values(event_class,
833 				event, sample->callchain);
834 		if (ret)
835 			return -1;
836 	}
837 
838 	if (perf_evsel__is_bpf_output(evsel)) {
839 		ret = add_bpf_output_values(event_class, event, sample);
840 		if (ret)
841 			return -1;
842 	}
843 
844 	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
845 	if (cs) {
846 		if (is_flush_needed(cs))
847 			ctf_stream__flush(cs);
848 
849 		cs->count++;
850 		bt_ctf_stream_append_event(cs->stream, event);
851 	}
852 
853 	bt_ctf_event_put(event);
854 	return cs ? 0 : -1;
855 }
856 
857 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
858 do {							\
859 	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
860 	if (ret)					\
861 		return -1;				\
862 } while(0)
863 
864 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
865 static int process_##_name##_event(struct perf_tool *tool,	\
866 				   union perf_event *_event,	\
867 				   struct perf_sample *sample,	\
868 				   struct machine *machine)	\
869 {								\
870 	struct convert *c = container_of(tool, struct convert, tool);\
871 	struct ctf_writer *cw = &c->writer;			\
872 	struct bt_ctf_event_class *event_class = cw->_name##_class;\
873 	struct bt_ctf_event *event;				\
874 	struct ctf_stream *cs;					\
875 	int ret;						\
876 								\
877 	c->non_sample_count++;					\
878 	c->events_size += _event->header.size;			\
879 	event = bt_ctf_event_create(event_class);		\
880 	if (!event) {						\
881 		pr_err("Failed to create an CTF event\n");	\
882 		return -1;					\
883 	}							\
884 								\
885 	bt_ctf_clock_set_time(cw->clock, sample->time);		\
886 	body							\
887 	cs = ctf_stream(cw, 0);					\
888 	if (cs) {						\
889 		if (is_flush_needed(cs))			\
890 			ctf_stream__flush(cs);			\
891 								\
892 		cs->count++;					\
893 		bt_ctf_stream_append_event(cs->stream, event);	\
894 	}							\
895 	bt_ctf_event_put(event);				\
896 								\
897 	return perf_event__process_##_name(tool, _event, sample, machine);\
898 }
899 
__FUNC_PROCESS_NON_SAMPLE(comm,__NON_SAMPLE_SET_FIELD (comm,u32,pid);__NON_SAMPLE_SET_FIELD (comm,u32,tid);__NON_SAMPLE_SET_FIELD (comm,string,comm);)900 __FUNC_PROCESS_NON_SAMPLE(comm,
901 	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
902 	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
903 	__NON_SAMPLE_SET_FIELD(comm, string, comm);
904 )
905 __FUNC_PROCESS_NON_SAMPLE(fork,
906 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
907 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
908 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
909 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
910 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
911 )
912 
913 __FUNC_PROCESS_NON_SAMPLE(exit,
914 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
915 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
916 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
917 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
918 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
919 )
920 __FUNC_PROCESS_NON_SAMPLE(mmap,
921 	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
922 	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
923 	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
924 	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
925 )
926 __FUNC_PROCESS_NON_SAMPLE(mmap2,
927 	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
928 	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
929 	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
930 	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
931 )
932 #undef __NON_SAMPLE_SET_FIELD
933 #undef __FUNC_PROCESS_NON_SAMPLE
934 
935 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
936 static char *change_name(char *name, char *orig_name, int dup)
937 {
938 	char *new_name = NULL;
939 	size_t len;
940 
941 	if (!name)
942 		name = orig_name;
943 
944 	if (dup >= 10)
945 		goto out;
946 	/*
947 	 * Add '_' prefix to potential keywork.  According to
948 	 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
949 	 * futher CTF spec updating may require us to use '$'.
950 	 */
951 	if (dup < 0)
952 		len = strlen(name) + sizeof("_");
953 	else
954 		len = strlen(orig_name) + sizeof("_dupl_X");
955 
956 	new_name = malloc(len);
957 	if (!new_name)
958 		goto out;
959 
960 	if (dup < 0)
961 		snprintf(new_name, len, "_%s", name);
962 	else
963 		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
964 
965 out:
966 	if (name != orig_name)
967 		free(name);
968 	return new_name;
969 }
970 
event_class_add_field(struct bt_ctf_event_class * event_class,struct bt_ctf_field_type * type,struct tep_format_field * field)971 static int event_class_add_field(struct bt_ctf_event_class *event_class,
972 		struct bt_ctf_field_type *type,
973 		struct tep_format_field *field)
974 {
975 	struct bt_ctf_field_type *t = NULL;
976 	char *name;
977 	int dup = 1;
978 	int ret;
979 
980 	/* alias was already assigned */
981 	if (field->alias != field->name)
982 		return bt_ctf_event_class_add_field(event_class, type,
983 				(char *)field->alias);
984 
985 	name = field->name;
986 
987 	/* If 'name' is a keywork, add prefix. */
988 	if (bt_ctf_validate_identifier(name))
989 		name = change_name(name, field->name, -1);
990 
991 	if (!name) {
992 		pr_err("Failed to fix invalid identifier.");
993 		return -1;
994 	}
995 	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
996 		bt_ctf_field_type_put(t);
997 		name = change_name(name, field->name, dup++);
998 		if (!name) {
999 			pr_err("Failed to create dup name for '%s'\n", field->name);
1000 			return -1;
1001 		}
1002 	}
1003 
1004 	ret = bt_ctf_event_class_add_field(event_class, type, name);
1005 	if (!ret)
1006 		field->alias = name;
1007 
1008 	return ret;
1009 }
1010 
add_tracepoint_fields_types(struct ctf_writer * cw,struct tep_format_field * fields,struct bt_ctf_event_class * event_class)1011 static int add_tracepoint_fields_types(struct ctf_writer *cw,
1012 				       struct tep_format_field *fields,
1013 				       struct bt_ctf_event_class *event_class)
1014 {
1015 	struct tep_format_field *field;
1016 	int ret;
1017 
1018 	for (field = fields; field; field = field->next) {
1019 		struct bt_ctf_field_type *type;
1020 		unsigned long flags = field->flags;
1021 
1022 		pr2("  field '%s'\n", field->name);
1023 
1024 		type = get_tracepoint_field_type(cw, field);
1025 		if (!type)
1026 			return -1;
1027 
1028 		/*
1029 		 * A string is an array of chars. For this we use the string
1030 		 * type and don't care that it is an array. What we don't
1031 		 * support is an array of strings.
1032 		 */
1033 		if (flags & TEP_FIELD_IS_STRING)
1034 			flags &= ~TEP_FIELD_IS_ARRAY;
1035 
1036 		if (flags & TEP_FIELD_IS_ARRAY)
1037 			type = bt_ctf_field_type_array_create(type, field->arraylen);
1038 
1039 		ret = event_class_add_field(event_class, type, field);
1040 
1041 		if (flags & TEP_FIELD_IS_ARRAY)
1042 			bt_ctf_field_type_put(type);
1043 
1044 		if (ret) {
1045 			pr_err("Failed to add field '%s': %d\n",
1046 					field->name, ret);
1047 			return -1;
1048 		}
1049 	}
1050 
1051 	return 0;
1052 }
1053 
add_tracepoint_types(struct ctf_writer * cw,struct evsel * evsel,struct bt_ctf_event_class * class)1054 static int add_tracepoint_types(struct ctf_writer *cw,
1055 				struct evsel *evsel,
1056 				struct bt_ctf_event_class *class)
1057 {
1058 	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1059 	struct tep_format_field *fields        = evsel->tp_format->format.fields;
1060 	int ret;
1061 
1062 	ret = add_tracepoint_fields_types(cw, common_fields, class);
1063 	if (!ret)
1064 		ret = add_tracepoint_fields_types(cw, fields, class);
1065 
1066 	return ret;
1067 }
1068 
add_bpf_output_types(struct ctf_writer * cw,struct bt_ctf_event_class * class)1069 static int add_bpf_output_types(struct ctf_writer *cw,
1070 				struct bt_ctf_event_class *class)
1071 {
1072 	struct bt_ctf_field_type *len_type = cw->data.u32;
1073 	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1074 	struct bt_ctf_field_type *seq_type;
1075 	int ret;
1076 
1077 	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1078 	if (ret)
1079 		return ret;
1080 
1081 	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1082 	if (!seq_type)
1083 		return -1;
1084 
1085 	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1086 }
1087 
add_generic_types(struct ctf_writer * cw,struct evsel * evsel,struct bt_ctf_event_class * event_class)1088 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1089 			     struct bt_ctf_event_class *event_class)
1090 {
1091 	u64 type = evsel->core.attr.sample_type;
1092 
1093 	/*
1094 	 * missing:
1095 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1096 	 *                              ctf event header
1097 	 *   PERF_SAMPLE_READ         - TODO
1098 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1099 	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1100 	 *                              are handled separately
1101 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1102 	 *   PERF_SAMPLE_REGS_USER    - TODO
1103 	 *   PERF_SAMPLE_STACK_USER   - TODO
1104 	 */
1105 
1106 #define ADD_FIELD(cl, t, n)						\
1107 	do {								\
1108 		pr2("  field '%s'\n", n);				\
1109 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1110 			pr_err("Failed to add field '%s';\n", n);	\
1111 			return -1;					\
1112 		}							\
1113 	} while (0)
1114 
1115 	if (type & PERF_SAMPLE_IP)
1116 		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1117 
1118 	if (type & PERF_SAMPLE_TID) {
1119 		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1120 		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1121 	}
1122 
1123 	if ((type & PERF_SAMPLE_ID) ||
1124 	    (type & PERF_SAMPLE_IDENTIFIER))
1125 		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1126 
1127 	if (type & PERF_SAMPLE_STREAM_ID)
1128 		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1129 
1130 	if (type & PERF_SAMPLE_PERIOD)
1131 		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1132 
1133 	if (type & PERF_SAMPLE_WEIGHT)
1134 		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1135 
1136 	if (type & PERF_SAMPLE_DATA_SRC)
1137 		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1138 
1139 	if (type & PERF_SAMPLE_TRANSACTION)
1140 		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1141 
1142 	if (type & PERF_SAMPLE_CALLCHAIN) {
1143 		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1144 		ADD_FIELD(event_class,
1145 			bt_ctf_field_type_sequence_create(
1146 				cw->data.u64_hex, "perf_callchain_size"),
1147 			"perf_callchain");
1148 	}
1149 
1150 #undef ADD_FIELD
1151 	return 0;
1152 }
1153 
add_event(struct ctf_writer * cw,struct evsel * evsel)1154 static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1155 {
1156 	struct bt_ctf_event_class *event_class;
1157 	struct evsel_priv *priv;
1158 	const char *name = perf_evsel__name(evsel);
1159 	int ret;
1160 
1161 	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1162 
1163 	event_class = bt_ctf_event_class_create(name);
1164 	if (!event_class)
1165 		return -1;
1166 
1167 	ret = add_generic_types(cw, evsel, event_class);
1168 	if (ret)
1169 		goto err;
1170 
1171 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1172 		ret = add_tracepoint_types(cw, evsel, event_class);
1173 		if (ret)
1174 			goto err;
1175 	}
1176 
1177 	if (perf_evsel__is_bpf_output(evsel)) {
1178 		ret = add_bpf_output_types(cw, event_class);
1179 		if (ret)
1180 			goto err;
1181 	}
1182 
1183 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1184 	if (ret) {
1185 		pr("Failed to add event class into stream.\n");
1186 		goto err;
1187 	}
1188 
1189 	priv = malloc(sizeof(*priv));
1190 	if (!priv)
1191 		goto err;
1192 
1193 	priv->event_class = event_class;
1194 	evsel->priv       = priv;
1195 	return 0;
1196 
1197 err:
1198 	bt_ctf_event_class_put(event_class);
1199 	pr_err("Failed to add event '%s'.\n", name);
1200 	return -1;
1201 }
1202 
setup_events(struct ctf_writer * cw,struct perf_session * session)1203 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1204 {
1205 	struct evlist *evlist = session->evlist;
1206 	struct evsel *evsel;
1207 	int ret;
1208 
1209 	evlist__for_each_entry(evlist, evsel) {
1210 		ret = add_event(cw, evsel);
1211 		if (ret)
1212 			return ret;
1213 	}
1214 	return 0;
1215 }
1216 
1217 #define __NON_SAMPLE_ADD_FIELD(t, n)						\
1218 	do {							\
1219 		pr2("  field '%s'\n", #n);			\
1220 		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1221 			pr_err("Failed to add field '%s';\n", #n);\
1222 			return -1;				\
1223 		}						\
1224 	} while(0)
1225 
1226 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1227 static int add_##_name##_event(struct ctf_writer *cw)		\
1228 {								\
1229 	struct bt_ctf_event_class *event_class;			\
1230 	int ret;						\
1231 								\
1232 	pr("Adding "#_name" event\n");				\
1233 	event_class = bt_ctf_event_class_create("perf_" #_name);\
1234 	if (!event_class)					\
1235 		return -1;					\
1236 	body							\
1237 								\
1238 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1239 	if (ret) {						\
1240 		pr("Failed to add event class '"#_name"' into stream.\n");\
1241 		return ret;					\
1242 	}							\
1243 								\
1244 	cw->_name##_class = event_class;			\
1245 	bt_ctf_event_class_put(event_class);			\
1246 	return 0;						\
1247 }
1248 
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,__NON_SAMPLE_ADD_FIELD (u32,pid);__NON_SAMPLE_ADD_FIELD (u32,tid);__NON_SAMPLE_ADD_FIELD (string,comm);)1249 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1250 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1251 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1252 	__NON_SAMPLE_ADD_FIELD(string, comm);
1253 )
1254 
1255 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1256 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1257 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1258 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1259 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1260 	__NON_SAMPLE_ADD_FIELD(u64, time);
1261 )
1262 
1263 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1264 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1265 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1266 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1267 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1268 	__NON_SAMPLE_ADD_FIELD(u64, time);
1269 )
1270 
1271 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1272 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1273 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1274 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1275 	__NON_SAMPLE_ADD_FIELD(string, filename);
1276 )
1277 
1278 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1279 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1280 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1281 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1282 	__NON_SAMPLE_ADD_FIELD(string, filename);
1283 )
1284 #undef __NON_SAMPLE_ADD_FIELD
1285 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1286 
1287 static int setup_non_sample_events(struct ctf_writer *cw,
1288 				   struct perf_session *session __maybe_unused)
1289 {
1290 	int ret;
1291 
1292 	ret = add_comm_event(cw);
1293 	if (ret)
1294 		return ret;
1295 	ret = add_exit_event(cw);
1296 	if (ret)
1297 		return ret;
1298 	ret = add_fork_event(cw);
1299 	if (ret)
1300 		return ret;
1301 	ret = add_mmap_event(cw);
1302 	if (ret)
1303 		return ret;
1304 	ret = add_mmap2_event(cw);
1305 	if (ret)
1306 		return ret;
1307 	return 0;
1308 }
1309 
cleanup_events(struct perf_session * session)1310 static void cleanup_events(struct perf_session *session)
1311 {
1312 	struct evlist *evlist = session->evlist;
1313 	struct evsel *evsel;
1314 
1315 	evlist__for_each_entry(evlist, evsel) {
1316 		struct evsel_priv *priv;
1317 
1318 		priv = evsel->priv;
1319 		bt_ctf_event_class_put(priv->event_class);
1320 		zfree(&evsel->priv);
1321 	}
1322 
1323 	evlist__delete(evlist);
1324 	session->evlist = NULL;
1325 }
1326 
setup_streams(struct ctf_writer * cw,struct perf_session * session)1327 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1328 {
1329 	struct ctf_stream **stream;
1330 	struct perf_header *ph = &session->header;
1331 	int ncpus;
1332 
1333 	/*
1334 	 * Try to get the number of cpus used in the data file,
1335 	 * if not present fallback to the MAX_CPUS.
1336 	 */
1337 	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1338 
1339 	stream = zalloc(sizeof(*stream) * ncpus);
1340 	if (!stream) {
1341 		pr_err("Failed to allocate streams.\n");
1342 		return -ENOMEM;
1343 	}
1344 
1345 	cw->stream     = stream;
1346 	cw->stream_cnt = ncpus;
1347 	return 0;
1348 }
1349 
free_streams(struct ctf_writer * cw)1350 static void free_streams(struct ctf_writer *cw)
1351 {
1352 	int cpu;
1353 
1354 	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1355 		ctf_stream__delete(cw->stream[cpu]);
1356 
1357 	zfree(&cw->stream);
1358 }
1359 
ctf_writer__setup_env(struct ctf_writer * cw,struct perf_session * session)1360 static int ctf_writer__setup_env(struct ctf_writer *cw,
1361 				 struct perf_session *session)
1362 {
1363 	struct perf_header *header = &session->header;
1364 	struct bt_ctf_writer *writer = cw->writer;
1365 
1366 #define ADD(__n, __v)							\
1367 do {									\
1368 	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1369 		return -1;						\
1370 } while (0)
1371 
1372 	ADD("host",    header->env.hostname);
1373 	ADD("sysname", "Linux");
1374 	ADD("release", header->env.os_release);
1375 	ADD("version", header->env.version);
1376 	ADD("machine", header->env.arch);
1377 	ADD("domain", "kernel");
1378 	ADD("tracer_name", "perf");
1379 
1380 #undef ADD
1381 	return 0;
1382 }
1383 
ctf_writer__setup_clock(struct ctf_writer * cw)1384 static int ctf_writer__setup_clock(struct ctf_writer *cw)
1385 {
1386 	struct bt_ctf_clock *clock = cw->clock;
1387 
1388 	bt_ctf_clock_set_description(clock, "perf clock");
1389 
1390 #define SET(__n, __v)				\
1391 do {						\
1392 	if (bt_ctf_clock_set_##__n(clock, __v))	\
1393 		return -1;			\
1394 } while (0)
1395 
1396 	SET(frequency,   1000000000);
1397 	SET(offset_s,    0);
1398 	SET(offset,      0);
1399 	SET(precision,   10);
1400 	SET(is_absolute, 0);
1401 
1402 #undef SET
1403 	return 0;
1404 }
1405 
create_int_type(int size,bool sign,bool hex)1406 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1407 {
1408 	struct bt_ctf_field_type *type;
1409 
1410 	type = bt_ctf_field_type_integer_create(size);
1411 	if (!type)
1412 		return NULL;
1413 
1414 	if (sign &&
1415 	    bt_ctf_field_type_integer_set_signed(type, 1))
1416 		goto err;
1417 
1418 	if (hex &&
1419 	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1420 		goto err;
1421 
1422 #if __BYTE_ORDER == __BIG_ENDIAN
1423 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1424 #else
1425 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1426 #endif
1427 
1428 	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1429 	    size, sign ? "un" : "", hex ? "hex" : "");
1430 	return type;
1431 
1432 err:
1433 	bt_ctf_field_type_put(type);
1434 	return NULL;
1435 }
1436 
ctf_writer__cleanup_data(struct ctf_writer * cw)1437 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1438 {
1439 	unsigned int i;
1440 
1441 	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1442 		bt_ctf_field_type_put(cw->data.array[i]);
1443 }
1444 
ctf_writer__init_data(struct ctf_writer * cw)1445 static int ctf_writer__init_data(struct ctf_writer *cw)
1446 {
1447 #define CREATE_INT_TYPE(type, size, sign, hex)		\
1448 do {							\
1449 	(type) = create_int_type(size, sign, hex);	\
1450 	if (!(type))					\
1451 		goto err;				\
1452 } while (0)
1453 
1454 	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1455 	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1456 	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1457 	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1458 	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1459 	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1460 
1461 	cw->data.string  = bt_ctf_field_type_string_create();
1462 	if (cw->data.string)
1463 		return 0;
1464 
1465 err:
1466 	ctf_writer__cleanup_data(cw);
1467 	pr_err("Failed to create data types.\n");
1468 	return -1;
1469 }
1470 
ctf_writer__cleanup(struct ctf_writer * cw)1471 static void ctf_writer__cleanup(struct ctf_writer *cw)
1472 {
1473 	ctf_writer__cleanup_data(cw);
1474 
1475 	bt_ctf_clock_put(cw->clock);
1476 	free_streams(cw);
1477 	bt_ctf_stream_class_put(cw->stream_class);
1478 	bt_ctf_writer_put(cw->writer);
1479 
1480 	/* and NULL all the pointers */
1481 	memset(cw, 0, sizeof(*cw));
1482 }
1483 
ctf_writer__init(struct ctf_writer * cw,const char * path)1484 static int ctf_writer__init(struct ctf_writer *cw, const char *path)
1485 {
1486 	struct bt_ctf_writer		*writer;
1487 	struct bt_ctf_stream_class	*stream_class;
1488 	struct bt_ctf_clock		*clock;
1489 	struct bt_ctf_field_type	*pkt_ctx_type;
1490 	int				ret;
1491 
1492 	/* CTF writer */
1493 	writer = bt_ctf_writer_create(path);
1494 	if (!writer)
1495 		goto err;
1496 
1497 	cw->writer = writer;
1498 
1499 	/* CTF clock */
1500 	clock = bt_ctf_clock_create("perf_clock");
1501 	if (!clock) {
1502 		pr("Failed to create CTF clock.\n");
1503 		goto err_cleanup;
1504 	}
1505 
1506 	cw->clock = clock;
1507 
1508 	if (ctf_writer__setup_clock(cw)) {
1509 		pr("Failed to setup CTF clock.\n");
1510 		goto err_cleanup;
1511 	}
1512 
1513 	/* CTF stream class */
1514 	stream_class = bt_ctf_stream_class_create("perf_stream");
1515 	if (!stream_class) {
1516 		pr("Failed to create CTF stream class.\n");
1517 		goto err_cleanup;
1518 	}
1519 
1520 	cw->stream_class = stream_class;
1521 
1522 	/* CTF clock stream setup */
1523 	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1524 		pr("Failed to assign CTF clock to stream class.\n");
1525 		goto err_cleanup;
1526 	}
1527 
1528 	if (ctf_writer__init_data(cw))
1529 		goto err_cleanup;
1530 
1531 	/* Add cpu_id for packet context */
1532 	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1533 	if (!pkt_ctx_type)
1534 		goto err_cleanup;
1535 
1536 	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1537 	bt_ctf_field_type_put(pkt_ctx_type);
1538 	if (ret)
1539 		goto err_cleanup;
1540 
1541 	/* CTF clock writer setup */
1542 	if (bt_ctf_writer_add_clock(writer, clock)) {
1543 		pr("Failed to assign CTF clock to writer.\n");
1544 		goto err_cleanup;
1545 	}
1546 
1547 	return 0;
1548 
1549 err_cleanup:
1550 	ctf_writer__cleanup(cw);
1551 err:
1552 	pr_err("Failed to setup CTF writer.\n");
1553 	return -1;
1554 }
1555 
ctf_writer__flush_streams(struct ctf_writer * cw)1556 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1557 {
1558 	int cpu, ret = 0;
1559 
1560 	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1561 		ret = ctf_stream__flush(cw->stream[cpu]);
1562 
1563 	return ret;
1564 }
1565 
convert__config(const char * var,const char * value,void * cb)1566 static int convert__config(const char *var, const char *value, void *cb)
1567 {
1568 	struct convert *c = cb;
1569 
1570 	if (!strcmp(var, "convert.queue-size"))
1571 		return perf_config_u64(&c->queue_size, var, value);
1572 
1573 	return 0;
1574 }
1575 
bt_convert__perf2ctf(const char * input,const char * path,struct perf_data_convert_opts * opts)1576 int bt_convert__perf2ctf(const char *input, const char *path,
1577 			 struct perf_data_convert_opts *opts)
1578 {
1579 	struct perf_session *session;
1580 	struct perf_data data = {
1581 		.path	   = input,
1582 		.mode      = PERF_DATA_MODE_READ,
1583 		.force     = opts->force,
1584 	};
1585 	struct convert c = {
1586 		.tool = {
1587 			.sample          = process_sample_event,
1588 			.mmap            = perf_event__process_mmap,
1589 			.mmap2           = perf_event__process_mmap2,
1590 			.comm            = perf_event__process_comm,
1591 			.exit            = perf_event__process_exit,
1592 			.fork            = perf_event__process_fork,
1593 			.lost            = perf_event__process_lost,
1594 			.tracing_data    = perf_event__process_tracing_data,
1595 			.build_id        = perf_event__process_build_id,
1596 			.namespaces      = perf_event__process_namespaces,
1597 			.ordered_events  = true,
1598 			.ordering_requires_timestamps = true,
1599 		},
1600 	};
1601 	struct ctf_writer *cw = &c.writer;
1602 	int err;
1603 
1604 	if (opts->all) {
1605 		c.tool.comm = process_comm_event;
1606 		c.tool.exit = process_exit_event;
1607 		c.tool.fork = process_fork_event;
1608 		c.tool.mmap = process_mmap_event;
1609 		c.tool.mmap2 = process_mmap2_event;
1610 	}
1611 
1612 	err = perf_config(convert__config, &c);
1613 	if (err)
1614 		return err;
1615 
1616 	/* CTF writer */
1617 	if (ctf_writer__init(cw, path))
1618 		return -1;
1619 
1620 	err = -1;
1621 	/* perf.data session */
1622 	session = perf_session__new(&data, 0, &c.tool);
1623 	if (IS_ERR(session)) {
1624 		err = PTR_ERR(session);
1625 		goto free_writer;
1626 	}
1627 
1628 	if (c.queue_size) {
1629 		ordered_events__set_alloc_size(&session->ordered_events,
1630 					       c.queue_size);
1631 	}
1632 
1633 	/* CTF writer env/clock setup  */
1634 	if (ctf_writer__setup_env(cw, session))
1635 		goto free_session;
1636 
1637 	/* CTF events setup */
1638 	if (setup_events(cw, session))
1639 		goto free_session;
1640 
1641 	if (opts->all && setup_non_sample_events(cw, session))
1642 		goto free_session;
1643 
1644 	if (setup_streams(cw, session))
1645 		goto free_session;
1646 
1647 	err = perf_session__process_events(session);
1648 	if (!err)
1649 		err = ctf_writer__flush_streams(cw);
1650 	else
1651 		pr_err("Error during conversion.\n");
1652 
1653 	fprintf(stderr,
1654 		"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1655 		data.path, path);
1656 
1657 	fprintf(stderr,
1658 		"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1659 		(double) c.events_size / 1024.0 / 1024.0,
1660 		c.events_count);
1661 
1662 	if (!c.non_sample_count)
1663 		fprintf(stderr, ") ]\n");
1664 	else
1665 		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1666 
1667 	cleanup_events(session);
1668 	perf_session__delete(session);
1669 	ctf_writer__cleanup(cw);
1670 
1671 	return err;
1672 
1673 free_session:
1674 	perf_session__delete(session);
1675 free_writer:
1676 	ctf_writer__cleanup(cw);
1677 	pr_err("Error during conversion setup.\n");
1678 	return err;
1679 }
1680