1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_synth - synthetic trace events
4 *
5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20
21 #include "trace_synth.h"
22
23 #undef ERRORS
24 #define ERRORS \
25 C(BAD_NAME, "Illegal name"), \
26 C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\
27 C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
28 C(EVENT_EXISTS, "Event already exists"), \
29 C(TOO_MANY_FIELDS, "Too many fields"), \
30 C(INCOMPLETE_TYPE, "Incomplete type"), \
31 C(INVALID_TYPE, "Invalid type"), \
32 C(INVALID_FIELD, "Invalid field"), \
33 C(INVALID_ARRAY_SPEC, "Invalid array specification"),
34
35 #undef C
36 #define C(a, b) SYNTH_ERR_##a
37
38 enum { ERRORS };
39
40 #undef C
41 #define C(a, b) b
42
43 static const char *err_text[] = { ERRORS };
44
45 static char last_cmd[MAX_FILTER_STR_VAL];
46
errpos(const char * str)47 static int errpos(const char *str)
48 {
49 return err_pos(last_cmd, str);
50 }
51
last_cmd_set(const char * str)52 static void last_cmd_set(const char *str)
53 {
54 if (!str)
55 return;
56
57 strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
58 }
59
synth_err(u8 err_type,u8 err_pos)60 static void synth_err(u8 err_type, u8 err_pos)
61 {
62 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
63 err_type, err_pos);
64 }
65
66 static int create_synth_event(const char *raw_command);
67 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
68 static int synth_event_release(struct dyn_event *ev);
69 static bool synth_event_is_busy(struct dyn_event *ev);
70 static bool synth_event_match(const char *system, const char *event,
71 int argc, const char **argv, struct dyn_event *ev);
72
73 static struct dyn_event_operations synth_event_ops = {
74 .create = create_synth_event,
75 .show = synth_event_show,
76 .is_busy = synth_event_is_busy,
77 .free = synth_event_release,
78 .match = synth_event_match,
79 };
80
is_synth_event(struct dyn_event * ev)81 static bool is_synth_event(struct dyn_event *ev)
82 {
83 return ev->ops == &synth_event_ops;
84 }
85
to_synth_event(struct dyn_event * ev)86 static struct synth_event *to_synth_event(struct dyn_event *ev)
87 {
88 return container_of(ev, struct synth_event, devent);
89 }
90
synth_event_is_busy(struct dyn_event * ev)91 static bool synth_event_is_busy(struct dyn_event *ev)
92 {
93 struct synth_event *event = to_synth_event(ev);
94
95 return event->ref != 0;
96 }
97
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)98 static bool synth_event_match(const char *system, const char *event,
99 int argc, const char **argv, struct dyn_event *ev)
100 {
101 struct synth_event *sev = to_synth_event(ev);
102
103 return strcmp(sev->name, event) == 0 &&
104 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
105 }
106
107 struct synth_trace_event {
108 struct trace_entry ent;
109 u64 fields[];
110 };
111
synth_event_define_fields(struct trace_event_call * call)112 static int synth_event_define_fields(struct trace_event_call *call)
113 {
114 struct synth_trace_event trace;
115 int offset = offsetof(typeof(trace), fields);
116 struct synth_event *event = call->data;
117 unsigned int i, size, n_u64;
118 char *name, *type;
119 bool is_signed;
120 int ret = 0;
121
122 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
123 size = event->fields[i]->size;
124 is_signed = event->fields[i]->is_signed;
125 type = event->fields[i]->type;
126 name = event->fields[i]->name;
127 ret = trace_define_field(call, type, name, offset, size,
128 is_signed, FILTER_OTHER);
129 if (ret)
130 break;
131
132 event->fields[i]->offset = n_u64;
133
134 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
135 offset += STR_VAR_LEN_MAX;
136 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
137 } else {
138 offset += sizeof(u64);
139 n_u64++;
140 }
141 }
142
143 event->n_u64 = n_u64;
144
145 return ret;
146 }
147
synth_field_signed(char * type)148 static bool synth_field_signed(char *type)
149 {
150 if (str_has_prefix(type, "u"))
151 return false;
152 if (strcmp(type, "gfp_t") == 0)
153 return false;
154
155 return true;
156 }
157
synth_field_is_string(char * type)158 static int synth_field_is_string(char *type)
159 {
160 if (strstr(type, "char[") != NULL)
161 return true;
162
163 return false;
164 }
165
synth_field_string_size(char * type)166 static int synth_field_string_size(char *type)
167 {
168 char buf[4], *end, *start;
169 unsigned int len;
170 int size, err;
171
172 start = strstr(type, "char[");
173 if (start == NULL)
174 return -EINVAL;
175 start += sizeof("char[") - 1;
176
177 end = strchr(type, ']');
178 if (!end || end < start || type + strlen(type) > end + 1)
179 return -EINVAL;
180
181 len = end - start;
182 if (len > 3)
183 return -EINVAL;
184
185 if (len == 0)
186 return 0; /* variable-length string */
187
188 strncpy(buf, start, len);
189 buf[len] = '\0';
190
191 err = kstrtouint(buf, 0, &size);
192 if (err)
193 return err;
194
195 if (size > STR_VAR_LEN_MAX)
196 return -EINVAL;
197
198 return size;
199 }
200
synth_field_size(char * type)201 static int synth_field_size(char *type)
202 {
203 int size = 0;
204
205 if (strcmp(type, "s64") == 0)
206 size = sizeof(s64);
207 else if (strcmp(type, "u64") == 0)
208 size = sizeof(u64);
209 else if (strcmp(type, "s32") == 0)
210 size = sizeof(s32);
211 else if (strcmp(type, "u32") == 0)
212 size = sizeof(u32);
213 else if (strcmp(type, "s16") == 0)
214 size = sizeof(s16);
215 else if (strcmp(type, "u16") == 0)
216 size = sizeof(u16);
217 else if (strcmp(type, "s8") == 0)
218 size = sizeof(s8);
219 else if (strcmp(type, "u8") == 0)
220 size = sizeof(u8);
221 else if (strcmp(type, "char") == 0)
222 size = sizeof(char);
223 else if (strcmp(type, "unsigned char") == 0)
224 size = sizeof(unsigned char);
225 else if (strcmp(type, "int") == 0)
226 size = sizeof(int);
227 else if (strcmp(type, "unsigned int") == 0)
228 size = sizeof(unsigned int);
229 else if (strcmp(type, "long") == 0)
230 size = sizeof(long);
231 else if (strcmp(type, "unsigned long") == 0)
232 size = sizeof(unsigned long);
233 else if (strcmp(type, "bool") == 0)
234 size = sizeof(bool);
235 else if (strcmp(type, "pid_t") == 0)
236 size = sizeof(pid_t);
237 else if (strcmp(type, "gfp_t") == 0)
238 size = sizeof(gfp_t);
239 else if (synth_field_is_string(type))
240 size = synth_field_string_size(type);
241
242 return size;
243 }
244
synth_field_fmt(char * type)245 static const char *synth_field_fmt(char *type)
246 {
247 const char *fmt = "%llu";
248
249 if (strcmp(type, "s64") == 0)
250 fmt = "%lld";
251 else if (strcmp(type, "u64") == 0)
252 fmt = "%llu";
253 else if (strcmp(type, "s32") == 0)
254 fmt = "%d";
255 else if (strcmp(type, "u32") == 0)
256 fmt = "%u";
257 else if (strcmp(type, "s16") == 0)
258 fmt = "%d";
259 else if (strcmp(type, "u16") == 0)
260 fmt = "%u";
261 else if (strcmp(type, "s8") == 0)
262 fmt = "%d";
263 else if (strcmp(type, "u8") == 0)
264 fmt = "%u";
265 else if (strcmp(type, "char") == 0)
266 fmt = "%d";
267 else if (strcmp(type, "unsigned char") == 0)
268 fmt = "%u";
269 else if (strcmp(type, "int") == 0)
270 fmt = "%d";
271 else if (strcmp(type, "unsigned int") == 0)
272 fmt = "%u";
273 else if (strcmp(type, "long") == 0)
274 fmt = "%ld";
275 else if (strcmp(type, "unsigned long") == 0)
276 fmt = "%lu";
277 else if (strcmp(type, "bool") == 0)
278 fmt = "%d";
279 else if (strcmp(type, "pid_t") == 0)
280 fmt = "%d";
281 else if (strcmp(type, "gfp_t") == 0)
282 fmt = "%x";
283 else if (synth_field_is_string(type))
284 fmt = "%.*s";
285
286 return fmt;
287 }
288
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,u64 val,char * space)289 static void print_synth_event_num_val(struct trace_seq *s,
290 char *print_fmt, char *name,
291 int size, u64 val, char *space)
292 {
293 switch (size) {
294 case 1:
295 trace_seq_printf(s, print_fmt, name, (u8)val, space);
296 break;
297
298 case 2:
299 trace_seq_printf(s, print_fmt, name, (u16)val, space);
300 break;
301
302 case 4:
303 trace_seq_printf(s, print_fmt, name, (u32)val, space);
304 break;
305
306 default:
307 trace_seq_printf(s, print_fmt, name, val, space);
308 break;
309 }
310 }
311
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)312 static enum print_line_t print_synth_event(struct trace_iterator *iter,
313 int flags,
314 struct trace_event *event)
315 {
316 struct trace_array *tr = iter->tr;
317 struct trace_seq *s = &iter->seq;
318 struct synth_trace_event *entry;
319 struct synth_event *se;
320 unsigned int i, n_u64;
321 char print_fmt[32];
322 const char *fmt;
323
324 entry = (struct synth_trace_event *)iter->ent;
325 se = container_of(event, struct synth_event, call.event);
326
327 trace_seq_printf(s, "%s: ", se->name);
328
329 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
330 if (trace_seq_has_overflowed(s))
331 goto end;
332
333 fmt = synth_field_fmt(se->fields[i]->type);
334
335 /* parameter types */
336 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
337 trace_seq_printf(s, "%s ", fmt);
338
339 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
340
341 /* parameter values */
342 if (se->fields[i]->is_string) {
343 if (se->fields[i]->is_dynamic) {
344 u32 offset, data_offset;
345 char *str_field;
346
347 offset = (u32)entry->fields[n_u64];
348 data_offset = offset & 0xffff;
349
350 str_field = (char *)entry + data_offset;
351
352 trace_seq_printf(s, print_fmt, se->fields[i]->name,
353 STR_VAR_LEN_MAX,
354 str_field,
355 i == se->n_fields - 1 ? "" : " ");
356 n_u64++;
357 } else {
358 trace_seq_printf(s, print_fmt, se->fields[i]->name,
359 STR_VAR_LEN_MAX,
360 (char *)&entry->fields[n_u64],
361 i == se->n_fields - 1 ? "" : " ");
362 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
363 }
364 } else {
365 struct trace_print_flags __flags[] = {
366 __def_gfpflag_names, {-1, NULL} };
367 char *space = (i == se->n_fields - 1 ? "" : " ");
368
369 print_synth_event_num_val(s, print_fmt,
370 se->fields[i]->name,
371 se->fields[i]->size,
372 entry->fields[n_u64],
373 space);
374
375 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
376 trace_seq_puts(s, " (");
377 trace_print_flags_seq(s, "|",
378 entry->fields[n_u64],
379 __flags);
380 trace_seq_putc(s, ')');
381 }
382 n_u64++;
383 }
384 }
385 end:
386 trace_seq_putc(s, '\n');
387
388 return trace_handle_return(s);
389 }
390
391 static struct trace_event_functions synth_event_funcs = {
392 .trace = print_synth_event
393 };
394
trace_string(struct synth_trace_event * entry,struct synth_event * event,char * str_val,bool is_dynamic,unsigned int data_size,unsigned int * n_u64)395 static unsigned int trace_string(struct synth_trace_event *entry,
396 struct synth_event *event,
397 char *str_val,
398 bool is_dynamic,
399 unsigned int data_size,
400 unsigned int *n_u64)
401 {
402 unsigned int len = 0;
403 char *str_field;
404
405 if (is_dynamic) {
406 u32 data_offset;
407
408 data_offset = offsetof(typeof(*entry), fields);
409 data_offset += event->n_u64 * sizeof(u64);
410 data_offset += data_size;
411
412 str_field = (char *)entry + data_offset;
413
414 len = strlen(str_val) + 1;
415 strscpy(str_field, str_val, len);
416
417 data_offset |= len << 16;
418 *(u32 *)&entry->fields[*n_u64] = data_offset;
419
420 (*n_u64)++;
421 } else {
422 str_field = (char *)&entry->fields[*n_u64];
423
424 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
425 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
426 }
427
428 return len;
429 }
430
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)431 static notrace void trace_event_raw_event_synth(void *__data,
432 u64 *var_ref_vals,
433 unsigned int *var_ref_idx)
434 {
435 unsigned int i, n_u64, val_idx, len, data_size = 0;
436 struct trace_event_file *trace_file = __data;
437 struct synth_trace_event *entry;
438 struct trace_event_buffer fbuffer;
439 struct trace_buffer *buffer;
440 struct synth_event *event;
441 int fields_size = 0;
442
443 event = trace_file->event_call->data;
444
445 if (trace_trigger_soft_disabled(trace_file))
446 return;
447
448 fields_size = event->n_u64 * sizeof(u64);
449
450 for (i = 0; i < event->n_dynamic_fields; i++) {
451 unsigned int field_pos = event->dynamic_fields[i]->field_pos;
452 char *str_val;
453
454 val_idx = var_ref_idx[field_pos];
455 str_val = (char *)(long)var_ref_vals[val_idx];
456
457 len = strlen(str_val) + 1;
458
459 fields_size += len;
460 }
461
462 /*
463 * Avoid ring buffer recursion detection, as this event
464 * is being performed within another event.
465 */
466 buffer = trace_file->tr->array_buffer.buffer;
467 ring_buffer_nest_start(buffer);
468
469 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
470 sizeof(*entry) + fields_size);
471 if (!entry)
472 goto out;
473
474 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
475 val_idx = var_ref_idx[i];
476 if (event->fields[i]->is_string) {
477 char *str_val = (char *)(long)var_ref_vals[val_idx];
478
479 len = trace_string(entry, event, str_val,
480 event->fields[i]->is_dynamic,
481 data_size, &n_u64);
482 data_size += len; /* only dynamic string increments */
483 } else {
484 struct synth_field *field = event->fields[i];
485 u64 val = var_ref_vals[val_idx];
486
487 switch (field->size) {
488 case 1:
489 *(u8 *)&entry->fields[n_u64] = (u8)val;
490 break;
491
492 case 2:
493 *(u16 *)&entry->fields[n_u64] = (u16)val;
494 break;
495
496 case 4:
497 *(u32 *)&entry->fields[n_u64] = (u32)val;
498 break;
499
500 default:
501 entry->fields[n_u64] = val;
502 break;
503 }
504 n_u64++;
505 }
506 }
507
508 trace_event_buffer_commit(&fbuffer);
509 out:
510 ring_buffer_nest_end(buffer);
511 }
512
free_synth_event_print_fmt(struct trace_event_call * call)513 static void free_synth_event_print_fmt(struct trace_event_call *call)
514 {
515 if (call) {
516 kfree(call->print_fmt);
517 call->print_fmt = NULL;
518 }
519 }
520
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)521 static int __set_synth_event_print_fmt(struct synth_event *event,
522 char *buf, int len)
523 {
524 const char *fmt;
525 int pos = 0;
526 int i;
527
528 /* When len=0, we just calculate the needed length */
529 #define LEN_OR_ZERO (len ? len - pos : 0)
530
531 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
532 for (i = 0; i < event->n_fields; i++) {
533 fmt = synth_field_fmt(event->fields[i]->type);
534 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
535 event->fields[i]->name, fmt,
536 i == event->n_fields - 1 ? "" : ", ");
537 }
538 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
539
540 for (i = 0; i < event->n_fields; i++) {
541 if (event->fields[i]->is_string &&
542 event->fields[i]->is_dynamic)
543 pos += snprintf(buf + pos, LEN_OR_ZERO,
544 ", __get_str(%s)", event->fields[i]->name);
545 else
546 pos += snprintf(buf + pos, LEN_OR_ZERO,
547 ", REC->%s", event->fields[i]->name);
548 }
549
550 #undef LEN_OR_ZERO
551
552 /* return the length of print_fmt */
553 return pos;
554 }
555
set_synth_event_print_fmt(struct trace_event_call * call)556 static int set_synth_event_print_fmt(struct trace_event_call *call)
557 {
558 struct synth_event *event = call->data;
559 char *print_fmt;
560 int len;
561
562 /* First: called with 0 length to calculate the needed length */
563 len = __set_synth_event_print_fmt(event, NULL, 0);
564
565 print_fmt = kmalloc(len + 1, GFP_KERNEL);
566 if (!print_fmt)
567 return -ENOMEM;
568
569 /* Second: actually write the @print_fmt */
570 __set_synth_event_print_fmt(event, print_fmt, len + 1);
571 call->print_fmt = print_fmt;
572
573 return 0;
574 }
575
free_synth_field(struct synth_field * field)576 static void free_synth_field(struct synth_field *field)
577 {
578 kfree(field->type);
579 kfree(field->name);
580 kfree(field);
581 }
582
check_field_version(const char * prefix,const char * field_type,const char * field_name)583 static int check_field_version(const char *prefix, const char *field_type,
584 const char *field_name)
585 {
586 /*
587 * For backward compatibility, the old synthetic event command
588 * format did not require semicolons, and in order to not
589 * break user space, that old format must still work. If a new
590 * feature is added, then the format that uses the new feature
591 * will be required to have semicolons, as nothing that uses
592 * the old format would be using the new, yet to be created,
593 * feature. When a new feature is added, this will detect it,
594 * and return a number greater than 1, and require the format
595 * to use semicolons.
596 */
597 return 1;
598 }
599
parse_synth_field(int argc,char ** argv,int * consumed,int * field_version)600 static struct synth_field *parse_synth_field(int argc, char **argv,
601 int *consumed, int *field_version)
602 {
603 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
604 struct synth_field *field;
605 int len, ret = -ENOMEM;
606 struct seq_buf s;
607 ssize_t size;
608
609 if (!strcmp(field_type, "unsigned")) {
610 if (argc < 3) {
611 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
612 return ERR_PTR(-EINVAL);
613 }
614 prefix = "unsigned ";
615 field_type = argv[1];
616 field_name = argv[2];
617 *consumed += 3;
618 } else {
619 field_name = argv[1];
620 *consumed += 2;
621 }
622
623 if (!field_name) {
624 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
625 return ERR_PTR(-EINVAL);
626 }
627
628 *field_version = check_field_version(prefix, field_type, field_name);
629
630 field = kzalloc(sizeof(*field), GFP_KERNEL);
631 if (!field)
632 return ERR_PTR(-ENOMEM);
633
634 len = strlen(field_name);
635 array = strchr(field_name, '[');
636 if (array)
637 len -= strlen(array);
638
639 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
640 if (!field->name)
641 goto free;
642
643 if (!is_good_name(field->name)) {
644 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
645 ret = -EINVAL;
646 goto free;
647 }
648
649 len = strlen(field_type) + 1;
650
651 if (array)
652 len += strlen(array);
653
654 if (prefix)
655 len += strlen(prefix);
656
657 field->type = kzalloc(len, GFP_KERNEL);
658 if (!field->type)
659 goto free;
660
661 seq_buf_init(&s, field->type, len);
662 if (prefix)
663 seq_buf_puts(&s, prefix);
664 seq_buf_puts(&s, field_type);
665 if (array)
666 seq_buf_puts(&s, array);
667 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
668 goto free;
669
670 s.buffer[s.len] = '\0';
671
672 size = synth_field_size(field->type);
673 if (size < 0) {
674 if (array)
675 synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
676 else
677 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
678 ret = -EINVAL;
679 goto free;
680 } else if (size == 0) {
681 if (synth_field_is_string(field->type)) {
682 char *type;
683
684 len = sizeof("__data_loc ") + strlen(field->type) + 1;
685 type = kzalloc(len, GFP_KERNEL);
686 if (!type)
687 goto free;
688
689 seq_buf_init(&s, type, len);
690 seq_buf_puts(&s, "__data_loc ");
691 seq_buf_puts(&s, field->type);
692
693 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
694 goto free;
695 s.buffer[s.len] = '\0';
696
697 kfree(field->type);
698 field->type = type;
699
700 field->is_dynamic = true;
701 size = sizeof(u64);
702 } else {
703 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
704 ret = -EINVAL;
705 goto free;
706 }
707 }
708 field->size = size;
709
710 if (synth_field_is_string(field->type))
711 field->is_string = true;
712
713 field->is_signed = synth_field_signed(field->type);
714 out:
715 return field;
716 free:
717 free_synth_field(field);
718 field = ERR_PTR(ret);
719 goto out;
720 }
721
free_synth_tracepoint(struct tracepoint * tp)722 static void free_synth_tracepoint(struct tracepoint *tp)
723 {
724 if (!tp)
725 return;
726
727 kfree(tp->name);
728 kfree(tp);
729 }
730
alloc_synth_tracepoint(char * name)731 static struct tracepoint *alloc_synth_tracepoint(char *name)
732 {
733 struct tracepoint *tp;
734
735 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
736 if (!tp)
737 return ERR_PTR(-ENOMEM);
738
739 tp->name = kstrdup(name, GFP_KERNEL);
740 if (!tp->name) {
741 kfree(tp);
742 return ERR_PTR(-ENOMEM);
743 }
744
745 return tp;
746 }
747
find_synth_event(const char * name)748 struct synth_event *find_synth_event(const char *name)
749 {
750 struct dyn_event *pos;
751 struct synth_event *event;
752
753 for_each_dyn_event(pos) {
754 if (!is_synth_event(pos))
755 continue;
756 event = to_synth_event(pos);
757 if (strcmp(event->name, name) == 0)
758 return event;
759 }
760
761 return NULL;
762 }
763
764 static struct trace_event_fields synth_event_fields_array[] = {
765 { .type = TRACE_FUNCTION_TYPE,
766 .define_fields = synth_event_define_fields },
767 {}
768 };
769
register_synth_event(struct synth_event * event)770 static int register_synth_event(struct synth_event *event)
771 {
772 struct trace_event_call *call = &event->call;
773 int ret = 0;
774
775 event->call.class = &event->class;
776 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
777 if (!event->class.system) {
778 ret = -ENOMEM;
779 goto out;
780 }
781
782 event->tp = alloc_synth_tracepoint(event->name);
783 if (IS_ERR(event->tp)) {
784 ret = PTR_ERR(event->tp);
785 event->tp = NULL;
786 goto out;
787 }
788
789 INIT_LIST_HEAD(&call->class->fields);
790 call->event.funcs = &synth_event_funcs;
791 call->class->fields_array = synth_event_fields_array;
792
793 ret = register_trace_event(&call->event);
794 if (!ret) {
795 ret = -ENODEV;
796 goto out;
797 }
798 call->flags = TRACE_EVENT_FL_TRACEPOINT;
799 call->class->reg = trace_event_reg;
800 call->class->probe = trace_event_raw_event_synth;
801 call->data = event;
802 call->tp = event->tp;
803
804 ret = trace_add_event_call(call);
805 if (ret) {
806 pr_warn("Failed to register synthetic event: %s\n",
807 trace_event_name(call));
808 goto err;
809 }
810
811 ret = set_synth_event_print_fmt(call);
812 if (ret < 0) {
813 trace_remove_event_call(call);
814 goto err;
815 }
816 out:
817 return ret;
818 err:
819 unregister_trace_event(&call->event);
820 goto out;
821 }
822
unregister_synth_event(struct synth_event * event)823 static int unregister_synth_event(struct synth_event *event)
824 {
825 struct trace_event_call *call = &event->call;
826 int ret;
827
828 ret = trace_remove_event_call(call);
829
830 return ret;
831 }
832
free_synth_event(struct synth_event * event)833 static void free_synth_event(struct synth_event *event)
834 {
835 unsigned int i;
836
837 if (!event)
838 return;
839
840 for (i = 0; i < event->n_fields; i++)
841 free_synth_field(event->fields[i]);
842
843 kfree(event->fields);
844 kfree(event->dynamic_fields);
845 kfree(event->name);
846 kfree(event->class.system);
847 free_synth_tracepoint(event->tp);
848 free_synth_event_print_fmt(&event->call);
849 kfree(event);
850 }
851
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)852 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
853 struct synth_field **fields)
854 {
855 unsigned int i, j, n_dynamic_fields = 0;
856 struct synth_event *event;
857
858 event = kzalloc(sizeof(*event), GFP_KERNEL);
859 if (!event) {
860 event = ERR_PTR(-ENOMEM);
861 goto out;
862 }
863
864 event->name = kstrdup(name, GFP_KERNEL);
865 if (!event->name) {
866 kfree(event);
867 event = ERR_PTR(-ENOMEM);
868 goto out;
869 }
870
871 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
872 if (!event->fields) {
873 free_synth_event(event);
874 event = ERR_PTR(-ENOMEM);
875 goto out;
876 }
877
878 for (i = 0; i < n_fields; i++)
879 if (fields[i]->is_dynamic)
880 n_dynamic_fields++;
881
882 if (n_dynamic_fields) {
883 event->dynamic_fields = kcalloc(n_dynamic_fields,
884 sizeof(*event->dynamic_fields),
885 GFP_KERNEL);
886 if (!event->dynamic_fields) {
887 free_synth_event(event);
888 event = ERR_PTR(-ENOMEM);
889 goto out;
890 }
891 }
892
893 dyn_event_init(&event->devent, &synth_event_ops);
894
895 for (i = 0, j = 0; i < n_fields; i++) {
896 fields[i]->field_pos = i;
897 event->fields[i] = fields[i];
898
899 if (fields[i]->is_dynamic)
900 event->dynamic_fields[j++] = fields[i];
901 }
902 event->n_dynamic_fields = j;
903 event->n_fields = n_fields;
904 out:
905 return event;
906 }
907
synth_event_check_arg_fn(void * data)908 static int synth_event_check_arg_fn(void *data)
909 {
910 struct dynevent_arg_pair *arg_pair = data;
911 int size;
912
913 size = synth_field_size((char *)arg_pair->lhs);
914 if (size == 0) {
915 if (strstr((char *)arg_pair->lhs, "["))
916 return 0;
917 }
918
919 return size ? 0 : -EINVAL;
920 }
921
922 /**
923 * synth_event_add_field - Add a new field to a synthetic event cmd
924 * @cmd: A pointer to the dynevent_cmd struct representing the new event
925 * @type: The type of the new field to add
926 * @name: The name of the new field to add
927 *
928 * Add a new field to a synthetic event cmd object. Field ordering is in
929 * the same order the fields are added.
930 *
931 * See synth_field_size() for available types. If field_name contains
932 * [n] the field is considered to be an array.
933 *
934 * Return: 0 if successful, error otherwise.
935 */
synth_event_add_field(struct dynevent_cmd * cmd,const char * type,const char * name)936 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
937 const char *name)
938 {
939 struct dynevent_arg_pair arg_pair;
940 int ret;
941
942 if (cmd->type != DYNEVENT_TYPE_SYNTH)
943 return -EINVAL;
944
945 if (!type || !name)
946 return -EINVAL;
947
948 dynevent_arg_pair_init(&arg_pair, 0, ';');
949
950 arg_pair.lhs = type;
951 arg_pair.rhs = name;
952
953 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
954 if (ret)
955 return ret;
956
957 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
958 ret = -EINVAL;
959
960 return ret;
961 }
962 EXPORT_SYMBOL_GPL(synth_event_add_field);
963
964 /**
965 * synth_event_add_field_str - Add a new field to a synthetic event cmd
966 * @cmd: A pointer to the dynevent_cmd struct representing the new event
967 * @type_name: The type and name of the new field to add, as a single string
968 *
969 * Add a new field to a synthetic event cmd object, as a single
970 * string. The @type_name string is expected to be of the form 'type
971 * name', which will be appended by ';'. No sanity checking is done -
972 * what's passed in is assumed to already be well-formed. Field
973 * ordering is in the same order the fields are added.
974 *
975 * See synth_field_size() for available types. If field_name contains
976 * [n] the field is considered to be an array.
977 *
978 * Return: 0 if successful, error otherwise.
979 */
synth_event_add_field_str(struct dynevent_cmd * cmd,const char * type_name)980 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
981 {
982 struct dynevent_arg arg;
983 int ret;
984
985 if (cmd->type != DYNEVENT_TYPE_SYNTH)
986 return -EINVAL;
987
988 if (!type_name)
989 return -EINVAL;
990
991 dynevent_arg_init(&arg, ';');
992
993 arg.str = type_name;
994
995 ret = dynevent_arg_add(cmd, &arg, NULL);
996 if (ret)
997 return ret;
998
999 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1000 ret = -EINVAL;
1001
1002 return ret;
1003 }
1004 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1005
1006 /**
1007 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1008 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1009 * @fields: An array of type/name field descriptions
1010 * @n_fields: The number of field descriptions contained in the fields array
1011 *
1012 * Add a new set of fields to a synthetic event cmd object. The event
1013 * fields that will be defined for the event should be passed in as an
1014 * array of struct synth_field_desc, and the number of elements in the
1015 * array passed in as n_fields. Field ordering will retain the
1016 * ordering given in the fields array.
1017 *
1018 * See synth_field_size() for available types. If field_name contains
1019 * [n] the field is considered to be an array.
1020 *
1021 * Return: 0 if successful, error otherwise.
1022 */
synth_event_add_fields(struct dynevent_cmd * cmd,struct synth_field_desc * fields,unsigned int n_fields)1023 int synth_event_add_fields(struct dynevent_cmd *cmd,
1024 struct synth_field_desc *fields,
1025 unsigned int n_fields)
1026 {
1027 unsigned int i;
1028 int ret = 0;
1029
1030 for (i = 0; i < n_fields; i++) {
1031 if (fields[i].type == NULL || fields[i].name == NULL) {
1032 ret = -EINVAL;
1033 break;
1034 }
1035
1036 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1037 if (ret)
1038 break;
1039 }
1040
1041 return ret;
1042 }
1043 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1044
1045 /**
1046 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1047 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1048 * @name: The name of the synthetic event
1049 * @mod: The module creating the event, NULL if not created from a module
1050 * @args: Variable number of arg (pairs), one pair for each field
1051 *
1052 * NOTE: Users normally won't want to call this function directly, but
1053 * rather use the synth_event_gen_cmd_start() wrapper, which
1054 * automatically adds a NULL to the end of the arg list. If this
1055 * function is used directly, make sure the last arg in the variable
1056 * arg list is NULL.
1057 *
1058 * Generate a synthetic event command to be executed by
1059 * synth_event_gen_cmd_end(). This function can be used to generate
1060 * the complete command or only the first part of it; in the latter
1061 * case, synth_event_add_field(), synth_event_add_field_str(), or
1062 * synth_event_add_fields() can be used to add more fields following
1063 * this.
1064 *
1065 * There should be an even number variable args, each pair consisting
1066 * of a type followed by a field name.
1067 *
1068 * See synth_field_size() for available types. If field_name contains
1069 * [n] the field is considered to be an array.
1070 *
1071 * Return: 0 if successful, error otherwise.
1072 */
__synth_event_gen_cmd_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,...)1073 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1074 struct module *mod, ...)
1075 {
1076 struct dynevent_arg arg;
1077 va_list args;
1078 int ret;
1079
1080 cmd->event_name = name;
1081 cmd->private_data = mod;
1082
1083 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1084 return -EINVAL;
1085
1086 dynevent_arg_init(&arg, 0);
1087 arg.str = name;
1088 ret = dynevent_arg_add(cmd, &arg, NULL);
1089 if (ret)
1090 return ret;
1091
1092 va_start(args, mod);
1093 for (;;) {
1094 const char *type, *name;
1095
1096 type = va_arg(args, const char *);
1097 if (!type)
1098 break;
1099 name = va_arg(args, const char *);
1100 if (!name)
1101 break;
1102
1103 if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1104 ret = -EINVAL;
1105 break;
1106 }
1107
1108 ret = synth_event_add_field(cmd, type, name);
1109 if (ret)
1110 break;
1111 }
1112 va_end(args);
1113
1114 return ret;
1115 }
1116 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1117
1118 /**
1119 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1120 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1121 * @name: The name of the synthetic event
1122 * @fields: An array of type/name field descriptions
1123 * @n_fields: The number of field descriptions contained in the fields array
1124 *
1125 * Generate a synthetic event command to be executed by
1126 * synth_event_gen_cmd_end(). This function can be used to generate
1127 * the complete command or only the first part of it; in the latter
1128 * case, synth_event_add_field(), synth_event_add_field_str(), or
1129 * synth_event_add_fields() can be used to add more fields following
1130 * this.
1131 *
1132 * The event fields that will be defined for the event should be
1133 * passed in as an array of struct synth_field_desc, and the number of
1134 * elements in the array passed in as n_fields. Field ordering will
1135 * retain the ordering given in the fields array.
1136 *
1137 * See synth_field_size() for available types. If field_name contains
1138 * [n] the field is considered to be an array.
1139 *
1140 * Return: 0 if successful, error otherwise.
1141 */
synth_event_gen_cmd_array_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,struct synth_field_desc * fields,unsigned int n_fields)1142 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1143 struct module *mod,
1144 struct synth_field_desc *fields,
1145 unsigned int n_fields)
1146 {
1147 struct dynevent_arg arg;
1148 unsigned int i;
1149 int ret = 0;
1150
1151 cmd->event_name = name;
1152 cmd->private_data = mod;
1153
1154 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1155 return -EINVAL;
1156
1157 if (n_fields > SYNTH_FIELDS_MAX)
1158 return -EINVAL;
1159
1160 dynevent_arg_init(&arg, 0);
1161 arg.str = name;
1162 ret = dynevent_arg_add(cmd, &arg, NULL);
1163 if (ret)
1164 return ret;
1165
1166 for (i = 0; i < n_fields; i++) {
1167 if (fields[i].type == NULL || fields[i].name == NULL)
1168 return -EINVAL;
1169
1170 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1171 if (ret)
1172 break;
1173 }
1174
1175 return ret;
1176 }
1177 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1178
__create_synth_event(const char * name,const char * raw_fields)1179 static int __create_synth_event(const char *name, const char *raw_fields)
1180 {
1181 char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1182 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1183 int consumed, cmd_version = 1, n_fields_this_loop;
1184 int i, argc, n_fields = 0, ret = 0;
1185 struct synth_event *event = NULL;
1186
1187 /*
1188 * Argument syntax:
1189 * - Add synthetic event: <event_name> field[;field] ...
1190 * - Remove synthetic event: !<event_name> field[;field] ...
1191 * where 'field' = type field_name
1192 */
1193
1194 if (name[0] == '\0') {
1195 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1196 return -EINVAL;
1197 }
1198
1199 if (!is_good_name(name)) {
1200 synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1201 return -EINVAL;
1202 }
1203
1204 mutex_lock(&event_mutex);
1205
1206 event = find_synth_event(name);
1207 if (event) {
1208 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1209 ret = -EEXIST;
1210 goto err;
1211 }
1212
1213 tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1214 if (!tmp_fields) {
1215 ret = -ENOMEM;
1216 goto err;
1217 }
1218
1219 while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1220 argv = argv_split(GFP_KERNEL, field_str, &argc);
1221 if (!argv) {
1222 ret = -ENOMEM;
1223 goto err;
1224 }
1225
1226 if (!argc) {
1227 argv_free(argv);
1228 continue;
1229 }
1230
1231 n_fields_this_loop = 0;
1232 consumed = 0;
1233 while (argc > consumed) {
1234 int field_version;
1235
1236 field = parse_synth_field(argc - consumed,
1237 argv + consumed, &consumed,
1238 &field_version);
1239 if (IS_ERR(field)) {
1240 argv_free(argv);
1241 ret = PTR_ERR(field);
1242 goto err;
1243 }
1244
1245 /*
1246 * Track the highest version of any field we
1247 * found in the command.
1248 */
1249 if (field_version > cmd_version)
1250 cmd_version = field_version;
1251
1252 /*
1253 * Now sort out what is and isn't valid for
1254 * each supported version.
1255 *
1256 * If we see more than 1 field per loop, it
1257 * means we have multiple fields between
1258 * semicolons, and that's something we no
1259 * longer support in a version 2 or greater
1260 * command.
1261 */
1262 if (cmd_version > 1 && n_fields_this_loop >= 1) {
1263 synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1264 ret = -EINVAL;
1265 goto err;
1266 }
1267
1268 fields[n_fields++] = field;
1269 if (n_fields == SYNTH_FIELDS_MAX) {
1270 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1271 ret = -EINVAL;
1272 goto err;
1273 }
1274
1275 n_fields_this_loop++;
1276 }
1277
1278 if (consumed < argc) {
1279 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1280 ret = -EINVAL;
1281 goto err;
1282 }
1283
1284 argv_free(argv);
1285 }
1286
1287 if (n_fields == 0) {
1288 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1289 ret = -EINVAL;
1290 goto err;
1291 }
1292
1293 event = alloc_synth_event(name, n_fields, fields);
1294 if (IS_ERR(event)) {
1295 ret = PTR_ERR(event);
1296 event = NULL;
1297 goto err;
1298 }
1299 ret = register_synth_event(event);
1300 if (!ret)
1301 dyn_event_add(&event->devent, &event->call);
1302 else
1303 free_synth_event(event);
1304 out:
1305 mutex_unlock(&event_mutex);
1306
1307 kfree(saved_fields);
1308
1309 return ret;
1310 err:
1311 for (i = 0; i < n_fields; i++)
1312 free_synth_field(fields[i]);
1313
1314 goto out;
1315 }
1316
1317 /**
1318 * synth_event_create - Create a new synthetic event
1319 * @name: The name of the new synthetic event
1320 * @fields: An array of type/name field descriptions
1321 * @n_fields: The number of field descriptions contained in the fields array
1322 * @mod: The module creating the event, NULL if not created from a module
1323 *
1324 * Create a new synthetic event with the given name under the
1325 * trace/events/synthetic/ directory. The event fields that will be
1326 * defined for the event should be passed in as an array of struct
1327 * synth_field_desc, and the number elements in the array passed in as
1328 * n_fields. Field ordering will retain the ordering given in the
1329 * fields array.
1330 *
1331 * If the new synthetic event is being created from a module, the mod
1332 * param must be non-NULL. This will ensure that the trace buffer
1333 * won't contain unreadable events.
1334 *
1335 * The new synth event should be deleted using synth_event_delete()
1336 * function. The new synthetic event can be generated from modules or
1337 * other kernel code using trace_synth_event() and related functions.
1338 *
1339 * Return: 0 if successful, error otherwise.
1340 */
synth_event_create(const char * name,struct synth_field_desc * fields,unsigned int n_fields,struct module * mod)1341 int synth_event_create(const char *name, struct synth_field_desc *fields,
1342 unsigned int n_fields, struct module *mod)
1343 {
1344 struct dynevent_cmd cmd;
1345 char *buf;
1346 int ret;
1347
1348 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1349 if (!buf)
1350 return -ENOMEM;
1351
1352 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1353
1354 ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1355 fields, n_fields);
1356 if (ret)
1357 goto out;
1358
1359 ret = synth_event_gen_cmd_end(&cmd);
1360 out:
1361 kfree(buf);
1362
1363 return ret;
1364 }
1365 EXPORT_SYMBOL_GPL(synth_event_create);
1366
destroy_synth_event(struct synth_event * se)1367 static int destroy_synth_event(struct synth_event *se)
1368 {
1369 int ret;
1370
1371 if (se->ref)
1372 return -EBUSY;
1373
1374 if (trace_event_dyn_busy(&se->call))
1375 return -EBUSY;
1376
1377 ret = unregister_synth_event(se);
1378 if (!ret) {
1379 dyn_event_remove(&se->devent);
1380 free_synth_event(se);
1381 }
1382
1383 return ret;
1384 }
1385
1386 /**
1387 * synth_event_delete - Delete a synthetic event
1388 * @event_name: The name of the new synthetic event
1389 *
1390 * Delete a synthetic event that was created with synth_event_create().
1391 *
1392 * Return: 0 if successful, error otherwise.
1393 */
synth_event_delete(const char * event_name)1394 int synth_event_delete(const char *event_name)
1395 {
1396 struct synth_event *se = NULL;
1397 struct module *mod = NULL;
1398 int ret = -ENOENT;
1399
1400 mutex_lock(&event_mutex);
1401 se = find_synth_event(event_name);
1402 if (se) {
1403 mod = se->mod;
1404 ret = destroy_synth_event(se);
1405 }
1406 mutex_unlock(&event_mutex);
1407
1408 if (mod) {
1409 mutex_lock(&trace_types_lock);
1410 /*
1411 * It is safest to reset the ring buffer if the module
1412 * being unloaded registered any events that were
1413 * used. The only worry is if a new module gets
1414 * loaded, and takes on the same id as the events of
1415 * this module. When printing out the buffer, traced
1416 * events left over from this module may be passed to
1417 * the new module events and unexpected results may
1418 * occur.
1419 */
1420 tracing_reset_all_online_cpus();
1421 mutex_unlock(&trace_types_lock);
1422 }
1423
1424 return ret;
1425 }
1426 EXPORT_SYMBOL_GPL(synth_event_delete);
1427
check_command(const char * raw_command)1428 static int check_command(const char *raw_command)
1429 {
1430 char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1431 int argc, ret = 0;
1432
1433 cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1434 if (!cmd)
1435 return -ENOMEM;
1436
1437 name_and_field = strsep(&cmd, ";");
1438 if (!name_and_field) {
1439 ret = -EINVAL;
1440 goto free;
1441 }
1442
1443 if (name_and_field[0] == '!')
1444 goto free;
1445
1446 argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1447 if (!argv) {
1448 ret = -ENOMEM;
1449 goto free;
1450 }
1451 argv_free(argv);
1452
1453 if (argc < 3)
1454 ret = -EINVAL;
1455 free:
1456 kfree(saved_cmd);
1457
1458 return ret;
1459 }
1460
create_or_delete_synth_event(const char * raw_command)1461 static int create_or_delete_synth_event(const char *raw_command)
1462 {
1463 char *name = NULL, *fields, *p;
1464 int ret = 0;
1465
1466 raw_command = skip_spaces(raw_command);
1467 if (raw_command[0] == '\0')
1468 return ret;
1469
1470 last_cmd_set(raw_command);
1471
1472 ret = check_command(raw_command);
1473 if (ret) {
1474 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1475 return ret;
1476 }
1477
1478 p = strpbrk(raw_command, " \t");
1479 if (!p && raw_command[0] != '!') {
1480 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1481 ret = -EINVAL;
1482 goto free;
1483 }
1484
1485 name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1486 if (!name)
1487 return -ENOMEM;
1488
1489 if (name[0] == '!') {
1490 ret = synth_event_delete(name + 1);
1491 goto free;
1492 }
1493
1494 fields = skip_spaces(p);
1495
1496 ret = __create_synth_event(name, fields);
1497 free:
1498 kfree(name);
1499
1500 return ret;
1501 }
1502
synth_event_run_command(struct dynevent_cmd * cmd)1503 static int synth_event_run_command(struct dynevent_cmd *cmd)
1504 {
1505 struct synth_event *se;
1506 int ret;
1507
1508 ret = create_or_delete_synth_event(cmd->seq.buffer);
1509 if (ret)
1510 return ret;
1511
1512 se = find_synth_event(cmd->event_name);
1513 if (WARN_ON(!se))
1514 return -ENOENT;
1515
1516 se->mod = cmd->private_data;
1517
1518 return ret;
1519 }
1520
1521 /**
1522 * synth_event_cmd_init - Initialize a synthetic event command object
1523 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1524 * @buf: A pointer to the buffer used to build the command
1525 * @maxlen: The length of the buffer passed in @buf
1526 *
1527 * Initialize a synthetic event command object. Use this before
1528 * calling any of the other dyenvent_cmd functions.
1529 */
synth_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1530 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1531 {
1532 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1533 synth_event_run_command);
1534 }
1535 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1536
1537 static inline int
__synth_event_trace_init(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1538 __synth_event_trace_init(struct trace_event_file *file,
1539 struct synth_event_trace_state *trace_state)
1540 {
1541 int ret = 0;
1542
1543 memset(trace_state, '\0', sizeof(*trace_state));
1544
1545 /*
1546 * Normal event tracing doesn't get called at all unless the
1547 * ENABLED bit is set (which attaches the probe thus allowing
1548 * this code to be called, etc). Because this is called
1549 * directly by the user, we don't have that but we still need
1550 * to honor not logging when disabled. For the iterated
1551 * trace case, we save the enabled state upon start and just
1552 * ignore the following data calls.
1553 */
1554 if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1555 trace_trigger_soft_disabled(file)) {
1556 trace_state->disabled = true;
1557 ret = -ENOENT;
1558 goto out;
1559 }
1560
1561 trace_state->event = file->event_call->data;
1562 out:
1563 return ret;
1564 }
1565
1566 static inline int
__synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state,int dynamic_fields_size)1567 __synth_event_trace_start(struct trace_event_file *file,
1568 struct synth_event_trace_state *trace_state,
1569 int dynamic_fields_size)
1570 {
1571 int entry_size, fields_size = 0;
1572 int ret = 0;
1573
1574 fields_size = trace_state->event->n_u64 * sizeof(u64);
1575 fields_size += dynamic_fields_size;
1576
1577 /*
1578 * Avoid ring buffer recursion detection, as this event
1579 * is being performed within another event.
1580 */
1581 trace_state->buffer = file->tr->array_buffer.buffer;
1582 ring_buffer_nest_start(trace_state->buffer);
1583
1584 entry_size = sizeof(*trace_state->entry) + fields_size;
1585 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1586 file,
1587 entry_size);
1588 if (!trace_state->entry) {
1589 ring_buffer_nest_end(trace_state->buffer);
1590 ret = -EINVAL;
1591 }
1592
1593 return ret;
1594 }
1595
1596 static inline void
__synth_event_trace_end(struct synth_event_trace_state * trace_state)1597 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1598 {
1599 trace_event_buffer_commit(&trace_state->fbuffer);
1600
1601 ring_buffer_nest_end(trace_state->buffer);
1602 }
1603
1604 /**
1605 * synth_event_trace - Trace a synthetic event
1606 * @file: The trace_event_file representing the synthetic event
1607 * @n_vals: The number of values in vals
1608 * @args: Variable number of args containing the event values
1609 *
1610 * Trace a synthetic event using the values passed in the variable
1611 * argument list.
1612 *
1613 * The argument list should be a list 'n_vals' u64 values. The number
1614 * of vals must match the number of field in the synthetic event, and
1615 * must be in the same order as the synthetic event fields.
1616 *
1617 * All vals should be cast to u64, and string vals are just pointers
1618 * to strings, cast to u64. Strings will be copied into space
1619 * reserved in the event for the string, using these pointers.
1620 *
1621 * Return: 0 on success, err otherwise.
1622 */
synth_event_trace(struct trace_event_file * file,unsigned int n_vals,...)1623 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1624 {
1625 unsigned int i, n_u64, len, data_size = 0;
1626 struct synth_event_trace_state state;
1627 va_list args;
1628 int ret;
1629
1630 ret = __synth_event_trace_init(file, &state);
1631 if (ret) {
1632 if (ret == -ENOENT)
1633 ret = 0; /* just disabled, not really an error */
1634 return ret;
1635 }
1636
1637 if (state.event->n_dynamic_fields) {
1638 va_start(args, n_vals);
1639
1640 for (i = 0; i < state.event->n_fields; i++) {
1641 u64 val = va_arg(args, u64);
1642
1643 if (state.event->fields[i]->is_string &&
1644 state.event->fields[i]->is_dynamic) {
1645 char *str_val = (char *)(long)val;
1646
1647 data_size += strlen(str_val) + 1;
1648 }
1649 }
1650
1651 va_end(args);
1652 }
1653
1654 ret = __synth_event_trace_start(file, &state, data_size);
1655 if (ret)
1656 return ret;
1657
1658 if (n_vals != state.event->n_fields) {
1659 ret = -EINVAL;
1660 goto out;
1661 }
1662
1663 data_size = 0;
1664
1665 va_start(args, n_vals);
1666 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1667 u64 val;
1668
1669 val = va_arg(args, u64);
1670
1671 if (state.event->fields[i]->is_string) {
1672 char *str_val = (char *)(long)val;
1673
1674 len = trace_string(state.entry, state.event, str_val,
1675 state.event->fields[i]->is_dynamic,
1676 data_size, &n_u64);
1677 data_size += len; /* only dynamic string increments */
1678 } else {
1679 struct synth_field *field = state.event->fields[i];
1680
1681 switch (field->size) {
1682 case 1:
1683 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1684 break;
1685
1686 case 2:
1687 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1688 break;
1689
1690 case 4:
1691 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1692 break;
1693
1694 default:
1695 state.entry->fields[n_u64] = val;
1696 break;
1697 }
1698 n_u64++;
1699 }
1700 }
1701 va_end(args);
1702 out:
1703 __synth_event_trace_end(&state);
1704
1705 return ret;
1706 }
1707 EXPORT_SYMBOL_GPL(synth_event_trace);
1708
1709 /**
1710 * synth_event_trace_array - Trace a synthetic event from an array
1711 * @file: The trace_event_file representing the synthetic event
1712 * @vals: Array of values
1713 * @n_vals: The number of values in vals
1714 *
1715 * Trace a synthetic event using the values passed in as 'vals'.
1716 *
1717 * The 'vals' array is just an array of 'n_vals' u64. The number of
1718 * vals must match the number of field in the synthetic event, and
1719 * must be in the same order as the synthetic event fields.
1720 *
1721 * All vals should be cast to u64, and string vals are just pointers
1722 * to strings, cast to u64. Strings will be copied into space
1723 * reserved in the event for the string, using these pointers.
1724 *
1725 * Return: 0 on success, err otherwise.
1726 */
synth_event_trace_array(struct trace_event_file * file,u64 * vals,unsigned int n_vals)1727 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1728 unsigned int n_vals)
1729 {
1730 unsigned int i, n_u64, field_pos, len, data_size = 0;
1731 struct synth_event_trace_state state;
1732 char *str_val;
1733 int ret;
1734
1735 ret = __synth_event_trace_init(file, &state);
1736 if (ret) {
1737 if (ret == -ENOENT)
1738 ret = 0; /* just disabled, not really an error */
1739 return ret;
1740 }
1741
1742 if (state.event->n_dynamic_fields) {
1743 for (i = 0; i < state.event->n_dynamic_fields; i++) {
1744 field_pos = state.event->dynamic_fields[i]->field_pos;
1745 str_val = (char *)(long)vals[field_pos];
1746 len = strlen(str_val) + 1;
1747 data_size += len;
1748 }
1749 }
1750
1751 ret = __synth_event_trace_start(file, &state, data_size);
1752 if (ret)
1753 return ret;
1754
1755 if (n_vals != state.event->n_fields) {
1756 ret = -EINVAL;
1757 goto out;
1758 }
1759
1760 data_size = 0;
1761
1762 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1763 if (state.event->fields[i]->is_string) {
1764 char *str_val = (char *)(long)vals[i];
1765
1766 len = trace_string(state.entry, state.event, str_val,
1767 state.event->fields[i]->is_dynamic,
1768 data_size, &n_u64);
1769 data_size += len; /* only dynamic string increments */
1770 } else {
1771 struct synth_field *field = state.event->fields[i];
1772 u64 val = vals[i];
1773
1774 switch (field->size) {
1775 case 1:
1776 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1777 break;
1778
1779 case 2:
1780 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1781 break;
1782
1783 case 4:
1784 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1785 break;
1786
1787 default:
1788 state.entry->fields[n_u64] = val;
1789 break;
1790 }
1791 n_u64++;
1792 }
1793 }
1794 out:
1795 __synth_event_trace_end(&state);
1796
1797 return ret;
1798 }
1799 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1800
1801 /**
1802 * synth_event_trace_start - Start piecewise synthetic event trace
1803 * @file: The trace_event_file representing the synthetic event
1804 * @trace_state: A pointer to object tracking the piecewise trace state
1805 *
1806 * Start the trace of a synthetic event field-by-field rather than all
1807 * at once.
1808 *
1809 * This function 'opens' an event trace, which means space is reserved
1810 * for the event in the trace buffer, after which the event's
1811 * individual field values can be set through either
1812 * synth_event_add_next_val() or synth_event_add_val().
1813 *
1814 * A pointer to a trace_state object is passed in, which will keep
1815 * track of the current event trace state until the event trace is
1816 * closed (and the event finally traced) using
1817 * synth_event_trace_end().
1818 *
1819 * Note that synth_event_trace_end() must be called after all values
1820 * have been added for each event trace, regardless of whether adding
1821 * all field values succeeded or not.
1822 *
1823 * Note also that for a given event trace, all fields must be added
1824 * using either synth_event_add_next_val() or synth_event_add_val()
1825 * but not both together or interleaved.
1826 *
1827 * Return: 0 on success, err otherwise.
1828 */
synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1829 int synth_event_trace_start(struct trace_event_file *file,
1830 struct synth_event_trace_state *trace_state)
1831 {
1832 int ret;
1833
1834 if (!trace_state)
1835 return -EINVAL;
1836
1837 ret = __synth_event_trace_init(file, trace_state);
1838 if (ret) {
1839 if (ret == -ENOENT)
1840 ret = 0; /* just disabled, not really an error */
1841 return ret;
1842 }
1843
1844 if (trace_state->event->n_dynamic_fields)
1845 return -ENOTSUPP;
1846
1847 ret = __synth_event_trace_start(file, trace_state, 0);
1848
1849 return ret;
1850 }
1851 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1852
__synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)1853 static int __synth_event_add_val(const char *field_name, u64 val,
1854 struct synth_event_trace_state *trace_state)
1855 {
1856 struct synth_field *field = NULL;
1857 struct synth_trace_event *entry;
1858 struct synth_event *event;
1859 int i, ret = 0;
1860
1861 if (!trace_state) {
1862 ret = -EINVAL;
1863 goto out;
1864 }
1865
1866 /* can't mix add_next_synth_val() with add_synth_val() */
1867 if (field_name) {
1868 if (trace_state->add_next) {
1869 ret = -EINVAL;
1870 goto out;
1871 }
1872 trace_state->add_name = true;
1873 } else {
1874 if (trace_state->add_name) {
1875 ret = -EINVAL;
1876 goto out;
1877 }
1878 trace_state->add_next = true;
1879 }
1880
1881 if (trace_state->disabled)
1882 goto out;
1883
1884 event = trace_state->event;
1885 if (trace_state->add_name) {
1886 for (i = 0; i < event->n_fields; i++) {
1887 field = event->fields[i];
1888 if (strcmp(field->name, field_name) == 0)
1889 break;
1890 }
1891 if (!field) {
1892 ret = -EINVAL;
1893 goto out;
1894 }
1895 } else {
1896 if (trace_state->cur_field >= event->n_fields) {
1897 ret = -EINVAL;
1898 goto out;
1899 }
1900 field = event->fields[trace_state->cur_field++];
1901 }
1902
1903 entry = trace_state->entry;
1904 if (field->is_string) {
1905 char *str_val = (char *)(long)val;
1906 char *str_field;
1907
1908 if (field->is_dynamic) { /* add_val can't do dynamic strings */
1909 ret = -EINVAL;
1910 goto out;
1911 }
1912
1913 if (!str_val) {
1914 ret = -EINVAL;
1915 goto out;
1916 }
1917
1918 str_field = (char *)&entry->fields[field->offset];
1919 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
1920 } else {
1921 switch (field->size) {
1922 case 1:
1923 *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
1924 break;
1925
1926 case 2:
1927 *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
1928 break;
1929
1930 case 4:
1931 *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
1932 break;
1933
1934 default:
1935 trace_state->entry->fields[field->offset] = val;
1936 break;
1937 }
1938 }
1939 out:
1940 return ret;
1941 }
1942
1943 /**
1944 * synth_event_add_next_val - Add the next field's value to an open synth trace
1945 * @val: The value to set the next field to
1946 * @trace_state: A pointer to object tracking the piecewise trace state
1947 *
1948 * Set the value of the next field in an event that's been opened by
1949 * synth_event_trace_start().
1950 *
1951 * The val param should be the value cast to u64. If the value points
1952 * to a string, the val param should be a char * cast to u64.
1953 *
1954 * This function assumes all the fields in an event are to be set one
1955 * after another - successive calls to this function are made, one for
1956 * each field, in the order of the fields in the event, until all
1957 * fields have been set. If you'd rather set each field individually
1958 * without regard to ordering, synth_event_add_val() can be used
1959 * instead.
1960 *
1961 * Note however that synth_event_add_next_val() and
1962 * synth_event_add_val() can't be intermixed for a given event trace -
1963 * one or the other but not both can be used at the same time.
1964 *
1965 * Note also that synth_event_trace_end() must be called after all
1966 * values have been added for each event trace, regardless of whether
1967 * adding all field values succeeded or not.
1968 *
1969 * Return: 0 on success, err otherwise.
1970 */
synth_event_add_next_val(u64 val,struct synth_event_trace_state * trace_state)1971 int synth_event_add_next_val(u64 val,
1972 struct synth_event_trace_state *trace_state)
1973 {
1974 return __synth_event_add_val(NULL, val, trace_state);
1975 }
1976 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
1977
1978 /**
1979 * synth_event_add_val - Add a named field's value to an open synth trace
1980 * @field_name: The name of the synthetic event field value to set
1981 * @val: The value to set the next field to
1982 * @trace_state: A pointer to object tracking the piecewise trace state
1983 *
1984 * Set the value of the named field in an event that's been opened by
1985 * synth_event_trace_start().
1986 *
1987 * The val param should be the value cast to u64. If the value points
1988 * to a string, the val param should be a char * cast to u64.
1989 *
1990 * This function looks up the field name, and if found, sets the field
1991 * to the specified value. This lookup makes this function more
1992 * expensive than synth_event_add_next_val(), so use that or the
1993 * none-piecewise synth_event_trace() instead if efficiency is more
1994 * important.
1995 *
1996 * Note however that synth_event_add_next_val() and
1997 * synth_event_add_val() can't be intermixed for a given event trace -
1998 * one or the other but not both can be used at the same time.
1999 *
2000 * Note also that synth_event_trace_end() must be called after all
2001 * values have been added for each event trace, regardless of whether
2002 * adding all field values succeeded or not.
2003 *
2004 * Return: 0 on success, err otherwise.
2005 */
synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)2006 int synth_event_add_val(const char *field_name, u64 val,
2007 struct synth_event_trace_state *trace_state)
2008 {
2009 return __synth_event_add_val(field_name, val, trace_state);
2010 }
2011 EXPORT_SYMBOL_GPL(synth_event_add_val);
2012
2013 /**
2014 * synth_event_trace_end - End piecewise synthetic event trace
2015 * @trace_state: A pointer to object tracking the piecewise trace state
2016 *
2017 * End the trace of a synthetic event opened by
2018 * synth_event_trace__start().
2019 *
2020 * This function 'closes' an event trace, which basically means that
2021 * it commits the reserved event and cleans up other loose ends.
2022 *
2023 * A pointer to a trace_state object is passed in, which will keep
2024 * track of the current event trace state opened with
2025 * synth_event_trace_start().
2026 *
2027 * Note that this function must be called after all values have been
2028 * added for each event trace, regardless of whether adding all field
2029 * values succeeded or not.
2030 *
2031 * Return: 0 on success, err otherwise.
2032 */
synth_event_trace_end(struct synth_event_trace_state * trace_state)2033 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2034 {
2035 if (!trace_state)
2036 return -EINVAL;
2037
2038 __synth_event_trace_end(trace_state);
2039
2040 return 0;
2041 }
2042 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2043
create_synth_event(const char * raw_command)2044 static int create_synth_event(const char *raw_command)
2045 {
2046 char *fields, *p;
2047 const char *name;
2048 int len, ret = 0;
2049
2050 raw_command = skip_spaces(raw_command);
2051 if (raw_command[0] == '\0')
2052 return ret;
2053
2054 last_cmd_set(raw_command);
2055
2056 p = strpbrk(raw_command, " \t");
2057 if (!p) {
2058 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2059 return -EINVAL;
2060 }
2061
2062 fields = skip_spaces(p);
2063
2064 name = raw_command;
2065
2066 if (name[0] != 's' || name[1] != ':')
2067 return -ECANCELED;
2068 name += 2;
2069
2070 /* This interface accepts group name prefix */
2071 if (strchr(name, '/')) {
2072 len = str_has_prefix(name, SYNTH_SYSTEM "/");
2073 if (len == 0) {
2074 synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2075 return -EINVAL;
2076 }
2077 name += len;
2078 }
2079
2080 len = name - raw_command;
2081
2082 ret = check_command(raw_command + len);
2083 if (ret) {
2084 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2085 return ret;
2086 }
2087
2088 name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2089 if (!name)
2090 return -ENOMEM;
2091
2092 ret = __create_synth_event(name, fields);
2093
2094 kfree(name);
2095
2096 return ret;
2097 }
2098
synth_event_release(struct dyn_event * ev)2099 static int synth_event_release(struct dyn_event *ev)
2100 {
2101 struct synth_event *event = to_synth_event(ev);
2102 int ret;
2103
2104 if (event->ref)
2105 return -EBUSY;
2106
2107 if (trace_event_dyn_busy(&event->call))
2108 return -EBUSY;
2109
2110 ret = unregister_synth_event(event);
2111 if (ret)
2112 return ret;
2113
2114 dyn_event_remove(ev);
2115 free_synth_event(event);
2116 return 0;
2117 }
2118
__synth_event_show(struct seq_file * m,struct synth_event * event)2119 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2120 {
2121 struct synth_field *field;
2122 unsigned int i;
2123 char *type, *t;
2124
2125 seq_printf(m, "%s\t", event->name);
2126
2127 for (i = 0; i < event->n_fields; i++) {
2128 field = event->fields[i];
2129
2130 type = field->type;
2131 t = strstr(type, "__data_loc");
2132 if (t) { /* __data_loc belongs in format but not event desc */
2133 t += sizeof("__data_loc");
2134 type = t;
2135 }
2136
2137 /* parameter values */
2138 seq_printf(m, "%s %s%s", type, field->name,
2139 i == event->n_fields - 1 ? "" : "; ");
2140 }
2141
2142 seq_putc(m, '\n');
2143
2144 return 0;
2145 }
2146
synth_event_show(struct seq_file * m,struct dyn_event * ev)2147 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2148 {
2149 struct synth_event *event = to_synth_event(ev);
2150
2151 seq_printf(m, "s:%s/", event->class.system);
2152
2153 return __synth_event_show(m, event);
2154 }
2155
synth_events_seq_show(struct seq_file * m,void * v)2156 static int synth_events_seq_show(struct seq_file *m, void *v)
2157 {
2158 struct dyn_event *ev = v;
2159
2160 if (!is_synth_event(ev))
2161 return 0;
2162
2163 return __synth_event_show(m, to_synth_event(ev));
2164 }
2165
2166 static const struct seq_operations synth_events_seq_op = {
2167 .start = dyn_event_seq_start,
2168 .next = dyn_event_seq_next,
2169 .stop = dyn_event_seq_stop,
2170 .show = synth_events_seq_show,
2171 };
2172
synth_events_open(struct inode * inode,struct file * file)2173 static int synth_events_open(struct inode *inode, struct file *file)
2174 {
2175 int ret;
2176
2177 ret = security_locked_down(LOCKDOWN_TRACEFS);
2178 if (ret)
2179 return ret;
2180
2181 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2182 ret = dyn_events_release_all(&synth_event_ops);
2183 if (ret < 0)
2184 return ret;
2185 }
2186
2187 return seq_open(file, &synth_events_seq_op);
2188 }
2189
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)2190 static ssize_t synth_events_write(struct file *file,
2191 const char __user *buffer,
2192 size_t count, loff_t *ppos)
2193 {
2194 return trace_parse_run_command(file, buffer, count, ppos,
2195 create_or_delete_synth_event);
2196 }
2197
2198 static const struct file_operations synth_events_fops = {
2199 .open = synth_events_open,
2200 .write = synth_events_write,
2201 .read = seq_read,
2202 .llseek = seq_lseek,
2203 .release = seq_release,
2204 };
2205
2206 /*
2207 * Register dynevent at core_initcall. This allows kernel to setup kprobe
2208 * events in postcore_initcall without tracefs.
2209 */
trace_events_synth_init_early(void)2210 static __init int trace_events_synth_init_early(void)
2211 {
2212 int err = 0;
2213
2214 err = dyn_event_register(&synth_event_ops);
2215 if (err)
2216 pr_warn("Could not register synth_event_ops\n");
2217
2218 return err;
2219 }
2220 core_initcall(trace_events_synth_init_early);
2221
trace_events_synth_init(void)2222 static __init int trace_events_synth_init(void)
2223 {
2224 struct dentry *entry = NULL;
2225 int err = 0;
2226 err = tracing_init_dentry();
2227 if (err)
2228 goto err;
2229
2230 entry = tracefs_create_file("synthetic_events", 0644, NULL,
2231 NULL, &synth_events_fops);
2232 if (!entry) {
2233 err = -ENODEV;
2234 goto err;
2235 }
2236
2237 return err;
2238 err:
2239 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2240
2241 return err;
2242 }
2243
2244 fs_initcall(trace_events_synth_init);
2245