1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_trigger - trace event triggers
4 *
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
14
15 #include "trace.h"
16
17 static LIST_HEAD(trigger_commands);
18 static DEFINE_MUTEX(trigger_cmd_mutex);
19
trigger_data_free(struct event_trigger_data * data)20 void trigger_data_free(struct event_trigger_data *data)
21 {
22 if (data->cmd_ops->set_filter)
23 data->cmd_ops->set_filter(NULL, data, NULL);
24
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
27
28 kfree(data);
29 }
30
31 /**
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @rec: The trace entry for the event, NULL for unconditional invocation
35 *
36 * For each trigger associated with an event, invoke the trigger
37 * function registered with the associated trigger command. If rec is
38 * non-NULL, it means that the trigger requires further processing and
39 * shouldn't be unconditionally invoked. If rec is non-NULL and the
40 * trigger has a filter associated with it, rec will checked against
41 * the filter and if the record matches the trigger will be invoked.
42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43 * in any case until the current event is written, the trigger
44 * function isn't invoked but the bit associated with the deferred
45 * trigger is set in the return value.
46 *
47 * Returns an enum event_trigger_type value containing a set bit for
48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 *
50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 *
52 * Return: an enum event_trigger_type value containing a set bit for
53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 */
55 enum event_trigger_type
event_triggers_call(struct trace_event_file * file,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)56 event_triggers_call(struct trace_event_file *file,
57 struct trace_buffer *buffer, void *rec,
58 struct ring_buffer_event *event)
59 {
60 struct event_trigger_data *data;
61 enum event_trigger_type tt = ETT_NONE;
62 struct event_filter *filter;
63
64 if (list_empty(&file->triggers))
65 return tt;
66
67 list_for_each_entry_rcu(data, &file->triggers, list) {
68 if (data->paused)
69 continue;
70 if (!rec) {
71 data->ops->func(data, buffer, rec, event);
72 continue;
73 }
74 filter = rcu_dereference_sched(data->filter);
75 if (filter && !filter_match_preds(filter, rec))
76 continue;
77 if (event_command_post_trigger(data->cmd_ops)) {
78 tt |= data->cmd_ops->trigger_type;
79 continue;
80 }
81 data->ops->func(data, buffer, rec, event);
82 }
83 return tt;
84 }
85 EXPORT_SYMBOL_GPL(event_triggers_call);
86
87 /**
88 * event_triggers_post_call - Call 'post_triggers' for a trace event
89 * @file: The trace_event_file associated with the event
90 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
91 *
92 * For each trigger associated with an event, invoke the trigger
93 * function registered with the associated trigger command, if the
94 * corresponding bit is set in the tt enum passed into this function.
95 * See @event_triggers_call for details on how those bits are set.
96 *
97 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
98 */
99 void
event_triggers_post_call(struct trace_event_file * file,enum event_trigger_type tt)100 event_triggers_post_call(struct trace_event_file *file,
101 enum event_trigger_type tt)
102 {
103 struct event_trigger_data *data;
104
105 list_for_each_entry_rcu(data, &file->triggers, list) {
106 if (data->paused)
107 continue;
108 if (data->cmd_ops->trigger_type & tt)
109 data->ops->func(data, NULL, NULL, NULL);
110 }
111 }
112 EXPORT_SYMBOL_GPL(event_triggers_post_call);
113
114 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
115
trigger_next(struct seq_file * m,void * t,loff_t * pos)116 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
117 {
118 struct trace_event_file *event_file = event_file_data(m->private);
119
120 if (t == SHOW_AVAILABLE_TRIGGERS) {
121 (*pos)++;
122 return NULL;
123 }
124 return seq_list_next(t, &event_file->triggers, pos);
125 }
126
check_user_trigger(struct trace_event_file * file)127 static bool check_user_trigger(struct trace_event_file *file)
128 {
129 struct event_trigger_data *data;
130
131 list_for_each_entry_rcu(data, &file->triggers, list) {
132 if (data->flags & EVENT_TRIGGER_FL_PROBE)
133 continue;
134 return true;
135 }
136 return false;
137 }
138
trigger_start(struct seq_file * m,loff_t * pos)139 static void *trigger_start(struct seq_file *m, loff_t *pos)
140 {
141 struct trace_event_file *event_file;
142
143 /* ->stop() is called even if ->start() fails */
144 mutex_lock(&event_mutex);
145 event_file = event_file_data(m->private);
146 if (unlikely(!event_file))
147 return ERR_PTR(-ENODEV);
148
149 if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
150 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
151
152 return seq_list_start(&event_file->triggers, *pos);
153 }
154
trigger_stop(struct seq_file * m,void * t)155 static void trigger_stop(struct seq_file *m, void *t)
156 {
157 mutex_unlock(&event_mutex);
158 }
159
trigger_show(struct seq_file * m,void * v)160 static int trigger_show(struct seq_file *m, void *v)
161 {
162 struct event_trigger_data *data;
163 struct event_command *p;
164
165 if (v == SHOW_AVAILABLE_TRIGGERS) {
166 seq_puts(m, "# Available triggers:\n");
167 seq_putc(m, '#');
168 mutex_lock(&trigger_cmd_mutex);
169 list_for_each_entry_reverse(p, &trigger_commands, list)
170 seq_printf(m, " %s", p->name);
171 seq_putc(m, '\n');
172 mutex_unlock(&trigger_cmd_mutex);
173 return 0;
174 }
175
176 data = list_entry(v, struct event_trigger_data, list);
177 data->ops->print(m, data->ops, data);
178
179 return 0;
180 }
181
182 static const struct seq_operations event_triggers_seq_ops = {
183 .start = trigger_start,
184 .next = trigger_next,
185 .stop = trigger_stop,
186 .show = trigger_show,
187 };
188
event_trigger_regex_open(struct inode * inode,struct file * file)189 static int event_trigger_regex_open(struct inode *inode, struct file *file)
190 {
191 int ret;
192
193 ret = security_locked_down(LOCKDOWN_TRACEFS);
194 if (ret)
195 return ret;
196
197 mutex_lock(&event_mutex);
198
199 if (unlikely(!event_file_data(file))) {
200 mutex_unlock(&event_mutex);
201 return -ENODEV;
202 }
203
204 if ((file->f_mode & FMODE_WRITE) &&
205 (file->f_flags & O_TRUNC)) {
206 struct trace_event_file *event_file;
207 struct event_command *p;
208
209 event_file = event_file_data(file);
210
211 list_for_each_entry(p, &trigger_commands, list) {
212 if (p->unreg_all)
213 p->unreg_all(event_file);
214 }
215 }
216
217 if (file->f_mode & FMODE_READ) {
218 ret = seq_open(file, &event_triggers_seq_ops);
219 if (!ret) {
220 struct seq_file *m = file->private_data;
221 m->private = file;
222 }
223 }
224
225 mutex_unlock(&event_mutex);
226
227 return ret;
228 }
229
trigger_process_regex(struct trace_event_file * file,char * buff)230 int trigger_process_regex(struct trace_event_file *file, char *buff)
231 {
232 char *command, *next;
233 struct event_command *p;
234 int ret = -EINVAL;
235
236 next = buff = skip_spaces(buff);
237 command = strsep(&next, ": \t");
238 if (next) {
239 next = skip_spaces(next);
240 if (!*next)
241 next = NULL;
242 }
243 command = (command[0] != '!') ? command : command + 1;
244
245 mutex_lock(&trigger_cmd_mutex);
246 list_for_each_entry(p, &trigger_commands, list) {
247 if (strcmp(p->name, command) == 0) {
248 ret = p->func(p, file, buff, command, next);
249 goto out_unlock;
250 }
251 }
252 out_unlock:
253 mutex_unlock(&trigger_cmd_mutex);
254
255 return ret;
256 }
257
event_trigger_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)258 static ssize_t event_trigger_regex_write(struct file *file,
259 const char __user *ubuf,
260 size_t cnt, loff_t *ppos)
261 {
262 struct trace_event_file *event_file;
263 ssize_t ret;
264 char *buf;
265
266 if (!cnt)
267 return 0;
268
269 if (cnt >= PAGE_SIZE)
270 return -EINVAL;
271
272 buf = memdup_user_nul(ubuf, cnt);
273 if (IS_ERR(buf))
274 return PTR_ERR(buf);
275
276 strim(buf);
277
278 mutex_lock(&event_mutex);
279 event_file = event_file_data(file);
280 if (unlikely(!event_file)) {
281 mutex_unlock(&event_mutex);
282 kfree(buf);
283 return -ENODEV;
284 }
285 ret = trigger_process_regex(event_file, buf);
286 mutex_unlock(&event_mutex);
287
288 kfree(buf);
289 if (ret < 0)
290 goto out;
291
292 *ppos += cnt;
293 ret = cnt;
294 out:
295 return ret;
296 }
297
event_trigger_regex_release(struct inode * inode,struct file * file)298 static int event_trigger_regex_release(struct inode *inode, struct file *file)
299 {
300 mutex_lock(&event_mutex);
301
302 if (file->f_mode & FMODE_READ)
303 seq_release(inode, file);
304
305 mutex_unlock(&event_mutex);
306
307 return 0;
308 }
309
310 static ssize_t
event_trigger_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)311 event_trigger_write(struct file *filp, const char __user *ubuf,
312 size_t cnt, loff_t *ppos)
313 {
314 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
315 }
316
317 static int
event_trigger_open(struct inode * inode,struct file * filp)318 event_trigger_open(struct inode *inode, struct file *filp)
319 {
320 /* Checks for tracefs lockdown */
321 return event_trigger_regex_open(inode, filp);
322 }
323
324 static int
event_trigger_release(struct inode * inode,struct file * file)325 event_trigger_release(struct inode *inode, struct file *file)
326 {
327 return event_trigger_regex_release(inode, file);
328 }
329
330 const struct file_operations event_trigger_fops = {
331 .open = event_trigger_open,
332 .read = seq_read,
333 .write = event_trigger_write,
334 .llseek = tracing_lseek,
335 .release = event_trigger_release,
336 };
337
338 /*
339 * Currently we only register event commands from __init, so mark this
340 * __init too.
341 */
register_event_command(struct event_command * cmd)342 __init int register_event_command(struct event_command *cmd)
343 {
344 struct event_command *p;
345 int ret = 0;
346
347 mutex_lock(&trigger_cmd_mutex);
348 list_for_each_entry(p, &trigger_commands, list) {
349 if (strcmp(cmd->name, p->name) == 0) {
350 ret = -EBUSY;
351 goto out_unlock;
352 }
353 }
354 list_add(&cmd->list, &trigger_commands);
355 out_unlock:
356 mutex_unlock(&trigger_cmd_mutex);
357
358 return ret;
359 }
360
361 /*
362 * Currently we only unregister event commands from __init, so mark
363 * this __init too.
364 */
unregister_event_command(struct event_command * cmd)365 __init int unregister_event_command(struct event_command *cmd)
366 {
367 struct event_command *p, *n;
368 int ret = -ENODEV;
369
370 mutex_lock(&trigger_cmd_mutex);
371 list_for_each_entry_safe(p, n, &trigger_commands, list) {
372 if (strcmp(cmd->name, p->name) == 0) {
373 ret = 0;
374 list_del_init(&p->list);
375 goto out_unlock;
376 }
377 }
378 out_unlock:
379 mutex_unlock(&trigger_cmd_mutex);
380
381 return ret;
382 }
383
384 /**
385 * event_trigger_print - Generic event_trigger_ops @print implementation
386 * @name: The name of the event trigger
387 * @m: The seq_file being printed to
388 * @data: Trigger-specific data
389 * @filter_str: filter_str to print, if present
390 *
391 * Common implementation for event triggers to print themselves.
392 *
393 * Usually wrapped by a function that simply sets the @name of the
394 * trigger command and then invokes this.
395 *
396 * Return: 0 on success, errno otherwise
397 */
398 static int
event_trigger_print(const char * name,struct seq_file * m,void * data,char * filter_str)399 event_trigger_print(const char *name, struct seq_file *m,
400 void *data, char *filter_str)
401 {
402 long count = (long)data;
403
404 seq_puts(m, name);
405
406 if (count == -1)
407 seq_puts(m, ":unlimited");
408 else
409 seq_printf(m, ":count=%ld", count);
410
411 if (filter_str)
412 seq_printf(m, " if %s\n", filter_str);
413 else
414 seq_putc(m, '\n');
415
416 return 0;
417 }
418
419 /**
420 * event_trigger_init - Generic event_trigger_ops @init implementation
421 * @ops: The trigger ops associated with the trigger
422 * @data: Trigger-specific data
423 *
424 * Common implementation of event trigger initialization.
425 *
426 * Usually used directly as the @init method in event trigger
427 * implementations.
428 *
429 * Return: 0 on success, errno otherwise
430 */
event_trigger_init(struct event_trigger_ops * ops,struct event_trigger_data * data)431 int event_trigger_init(struct event_trigger_ops *ops,
432 struct event_trigger_data *data)
433 {
434 data->ref++;
435 return 0;
436 }
437
438 /**
439 * event_trigger_free - Generic event_trigger_ops @free implementation
440 * @ops: The trigger ops associated with the trigger
441 * @data: Trigger-specific data
442 *
443 * Common implementation of event trigger de-initialization.
444 *
445 * Usually used directly as the @free method in event trigger
446 * implementations.
447 */
448 static void
event_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)449 event_trigger_free(struct event_trigger_ops *ops,
450 struct event_trigger_data *data)
451 {
452 if (WARN_ON_ONCE(data->ref <= 0))
453 return;
454
455 data->ref--;
456 if (!data->ref)
457 trigger_data_free(data);
458 }
459
trace_event_trigger_enable_disable(struct trace_event_file * file,int trigger_enable)460 int trace_event_trigger_enable_disable(struct trace_event_file *file,
461 int trigger_enable)
462 {
463 int ret = 0;
464
465 if (trigger_enable) {
466 if (atomic_inc_return(&file->tm_ref) > 1)
467 return ret;
468 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
469 ret = trace_event_enable_disable(file, 1, 1);
470 } else {
471 if (atomic_dec_return(&file->tm_ref) > 0)
472 return ret;
473 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
474 ret = trace_event_enable_disable(file, 0, 1);
475 }
476
477 return ret;
478 }
479
480 /**
481 * clear_event_triggers - Clear all triggers associated with a trace array
482 * @tr: The trace array to clear
483 *
484 * For each trigger, the triggering event has its tm_ref decremented
485 * via trace_event_trigger_enable_disable(), and any associated event
486 * (in the case of enable/disable_event triggers) will have its sm_ref
487 * decremented via free()->trace_event_enable_disable(). That
488 * combination effectively reverses the soft-mode/trigger state added
489 * by trigger registration.
490 *
491 * Must be called with event_mutex held.
492 */
493 void
clear_event_triggers(struct trace_array * tr)494 clear_event_triggers(struct trace_array *tr)
495 {
496 struct trace_event_file *file;
497
498 list_for_each_entry(file, &tr->events, list) {
499 struct event_trigger_data *data, *n;
500 list_for_each_entry_safe(data, n, &file->triggers, list) {
501 trace_event_trigger_enable_disable(file, 0);
502 list_del_rcu(&data->list);
503 if (data->ops->free)
504 data->ops->free(data->ops, data);
505 }
506 }
507 }
508
509 /**
510 * update_cond_flag - Set or reset the TRIGGER_COND bit
511 * @file: The trace_event_file associated with the event
512 *
513 * If an event has triggers and any of those triggers has a filter or
514 * a post_trigger, trigger invocation needs to be deferred until after
515 * the current event has logged its data, and the event should have
516 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
517 * cleared.
518 */
update_cond_flag(struct trace_event_file * file)519 void update_cond_flag(struct trace_event_file *file)
520 {
521 struct event_trigger_data *data;
522 bool set_cond = false;
523
524 lockdep_assert_held(&event_mutex);
525
526 list_for_each_entry(data, &file->triggers, list) {
527 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
528 event_command_needs_rec(data->cmd_ops)) {
529 set_cond = true;
530 break;
531 }
532 }
533
534 if (set_cond)
535 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
536 else
537 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
538 }
539
540 /**
541 * register_trigger - Generic event_command @reg implementation
542 * @glob: The raw string used to register the trigger
543 * @ops: The trigger ops associated with the trigger
544 * @data: Trigger-specific data to associate with the trigger
545 * @file: The trace_event_file associated with the event
546 *
547 * Common implementation for event trigger registration.
548 *
549 * Usually used directly as the @reg method in event command
550 * implementations.
551 *
552 * Return: 0 on success, errno otherwise
553 */
register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)554 static int register_trigger(char *glob, struct event_trigger_ops *ops,
555 struct event_trigger_data *data,
556 struct trace_event_file *file)
557 {
558 struct event_trigger_data *test;
559 int ret = 0;
560
561 lockdep_assert_held(&event_mutex);
562
563 list_for_each_entry(test, &file->triggers, list) {
564 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
565 ret = -EEXIST;
566 goto out;
567 }
568 }
569
570 if (data->ops->init) {
571 ret = data->ops->init(data->ops, data);
572 if (ret < 0)
573 goto out;
574 }
575
576 list_add_rcu(&data->list, &file->triggers);
577 ret++;
578
579 update_cond_flag(file);
580 if (trace_event_trigger_enable_disable(file, 1) < 0) {
581 list_del_rcu(&data->list);
582 update_cond_flag(file);
583 ret--;
584 }
585 out:
586 return ret;
587 }
588
589 /**
590 * unregister_trigger - Generic event_command @unreg implementation
591 * @glob: The raw string used to register the trigger
592 * @ops: The trigger ops associated with the trigger
593 * @test: Trigger-specific data used to find the trigger to remove
594 * @file: The trace_event_file associated with the event
595 *
596 * Common implementation for event trigger unregistration.
597 *
598 * Usually used directly as the @unreg method in event command
599 * implementations.
600 */
unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * test,struct trace_event_file * file)601 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
602 struct event_trigger_data *test,
603 struct trace_event_file *file)
604 {
605 struct event_trigger_data *data;
606 bool unregistered = false;
607
608 lockdep_assert_held(&event_mutex);
609
610 list_for_each_entry(data, &file->triggers, list) {
611 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
612 unregistered = true;
613 list_del_rcu(&data->list);
614 trace_event_trigger_enable_disable(file, 0);
615 update_cond_flag(file);
616 break;
617 }
618 }
619
620 if (unregistered && data->ops->free)
621 data->ops->free(data->ops, data);
622 }
623
624 /**
625 * event_trigger_callback - Generic event_command @func implementation
626 * @cmd_ops: The command ops, used for trigger registration
627 * @file: The trace_event_file associated with the event
628 * @glob: The raw string used to register the trigger
629 * @cmd: The cmd portion of the string used to register the trigger
630 * @param: The params portion of the string used to register the trigger
631 *
632 * Common implementation for event command parsing and trigger
633 * instantiation.
634 *
635 * Usually used directly as the @func method in event command
636 * implementations.
637 *
638 * Return: 0 on success, errno otherwise
639 */
640 static int
event_trigger_callback(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)641 event_trigger_callback(struct event_command *cmd_ops,
642 struct trace_event_file *file,
643 char *glob, char *cmd, char *param)
644 {
645 struct event_trigger_data *trigger_data;
646 struct event_trigger_ops *trigger_ops;
647 char *trigger = NULL;
648 char *number;
649 int ret;
650
651 /* separate the trigger from the filter (t:n [if filter]) */
652 if (param && isdigit(param[0])) {
653 trigger = strsep(¶m, " \t");
654 if (param) {
655 param = skip_spaces(param);
656 if (!*param)
657 param = NULL;
658 }
659 }
660
661 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
662
663 ret = -ENOMEM;
664 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
665 if (!trigger_data)
666 goto out;
667
668 trigger_data->count = -1;
669 trigger_data->ops = trigger_ops;
670 trigger_data->cmd_ops = cmd_ops;
671 trigger_data->private_data = file;
672 INIT_LIST_HEAD(&trigger_data->list);
673 INIT_LIST_HEAD(&trigger_data->named_list);
674
675 if (glob[0] == '!') {
676 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
677 kfree(trigger_data);
678 ret = 0;
679 goto out;
680 }
681
682 if (trigger) {
683 number = strsep(&trigger, ":");
684
685 ret = -EINVAL;
686 if (!strlen(number))
687 goto out_free;
688
689 /*
690 * We use the callback data field (which is a pointer)
691 * as our counter.
692 */
693 ret = kstrtoul(number, 0, &trigger_data->count);
694 if (ret)
695 goto out_free;
696 }
697
698 if (!param) /* if param is non-empty, it's supposed to be a filter */
699 goto out_reg;
700
701 if (!cmd_ops->set_filter)
702 goto out_reg;
703
704 ret = cmd_ops->set_filter(param, trigger_data, file);
705 if (ret < 0)
706 goto out_free;
707
708 out_reg:
709 /* Up the trigger_data count to make sure reg doesn't free it on failure */
710 event_trigger_init(trigger_ops, trigger_data);
711 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
712 /*
713 * The above returns on success the # of functions enabled,
714 * but if it didn't find any functions it returns zero.
715 * Consider no functions a failure too.
716 */
717 if (!ret) {
718 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
719 ret = -ENOENT;
720 } else if (ret > 0)
721 ret = 0;
722
723 /* Down the counter of trigger_data or free it if not used anymore */
724 event_trigger_free(trigger_ops, trigger_data);
725 out:
726 return ret;
727
728 out_free:
729 if (cmd_ops->set_filter)
730 cmd_ops->set_filter(NULL, trigger_data, NULL);
731 kfree(trigger_data);
732 goto out;
733 }
734
735 /**
736 * set_trigger_filter - Generic event_command @set_filter implementation
737 * @filter_str: The filter string for the trigger, NULL to remove filter
738 * @trigger_data: Trigger-specific data
739 * @file: The trace_event_file associated with the event
740 *
741 * Common implementation for event command filter parsing and filter
742 * instantiation.
743 *
744 * Usually used directly as the @set_filter method in event command
745 * implementations.
746 *
747 * Also used to remove a filter (if filter_str = NULL).
748 *
749 * Return: 0 on success, errno otherwise
750 */
set_trigger_filter(char * filter_str,struct event_trigger_data * trigger_data,struct trace_event_file * file)751 int set_trigger_filter(char *filter_str,
752 struct event_trigger_data *trigger_data,
753 struct trace_event_file *file)
754 {
755 struct event_trigger_data *data = trigger_data;
756 struct event_filter *filter = NULL, *tmp;
757 int ret = -EINVAL;
758 char *s;
759
760 if (!filter_str) /* clear the current filter */
761 goto assign;
762
763 s = strsep(&filter_str, " \t");
764
765 if (!strlen(s) || strcmp(s, "if") != 0)
766 goto out;
767
768 if (!filter_str)
769 goto out;
770
771 /* The filter is for the 'trigger' event, not the triggered event */
772 ret = create_event_filter(file->tr, file->event_call,
773 filter_str, false, &filter);
774 /*
775 * If create_event_filter() fails, filter still needs to be freed.
776 * Which the calling code will do with data->filter.
777 */
778 assign:
779 tmp = rcu_access_pointer(data->filter);
780
781 rcu_assign_pointer(data->filter, filter);
782
783 if (tmp) {
784 /* Make sure the call is done with the filter */
785 tracepoint_synchronize_unregister();
786 free_event_filter(tmp);
787 }
788
789 kfree(data->filter_str);
790 data->filter_str = NULL;
791
792 if (filter_str) {
793 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
794 if (!data->filter_str) {
795 free_event_filter(rcu_access_pointer(data->filter));
796 data->filter = NULL;
797 ret = -ENOMEM;
798 }
799 }
800 out:
801 return ret;
802 }
803
804 static LIST_HEAD(named_triggers);
805
806 /**
807 * find_named_trigger - Find the common named trigger associated with @name
808 * @name: The name of the set of named triggers to find the common data for
809 *
810 * Named triggers are sets of triggers that share a common set of
811 * trigger data. The first named trigger registered with a given name
812 * owns the common trigger data that the others subsequently
813 * registered with the same name will reference. This function
814 * returns the common trigger data associated with that first
815 * registered instance.
816 *
817 * Return: the common trigger data for the given named trigger on
818 * success, NULL otherwise.
819 */
find_named_trigger(const char * name)820 struct event_trigger_data *find_named_trigger(const char *name)
821 {
822 struct event_trigger_data *data;
823
824 if (!name)
825 return NULL;
826
827 list_for_each_entry(data, &named_triggers, named_list) {
828 if (data->named_data)
829 continue;
830 if (strcmp(data->name, name) == 0)
831 return data;
832 }
833
834 return NULL;
835 }
836
837 /**
838 * is_named_trigger - determine if a given trigger is a named trigger
839 * @test: The trigger data to test
840 *
841 * Return: true if 'test' is a named trigger, false otherwise.
842 */
is_named_trigger(struct event_trigger_data * test)843 bool is_named_trigger(struct event_trigger_data *test)
844 {
845 struct event_trigger_data *data;
846
847 list_for_each_entry(data, &named_triggers, named_list) {
848 if (test == data)
849 return true;
850 }
851
852 return false;
853 }
854
855 /**
856 * save_named_trigger - save the trigger in the named trigger list
857 * @name: The name of the named trigger set
858 * @data: The trigger data to save
859 *
860 * Return: 0 if successful, negative error otherwise.
861 */
save_named_trigger(const char * name,struct event_trigger_data * data)862 int save_named_trigger(const char *name, struct event_trigger_data *data)
863 {
864 data->name = kstrdup(name, GFP_KERNEL);
865 if (!data->name)
866 return -ENOMEM;
867
868 list_add(&data->named_list, &named_triggers);
869
870 return 0;
871 }
872
873 /**
874 * del_named_trigger - delete a trigger from the named trigger list
875 * @data: The trigger data to delete
876 */
del_named_trigger(struct event_trigger_data * data)877 void del_named_trigger(struct event_trigger_data *data)
878 {
879 kfree(data->name);
880 data->name = NULL;
881
882 list_del(&data->named_list);
883 }
884
__pause_named_trigger(struct event_trigger_data * data,bool pause)885 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
886 {
887 struct event_trigger_data *test;
888
889 list_for_each_entry(test, &named_triggers, named_list) {
890 if (strcmp(test->name, data->name) == 0) {
891 if (pause) {
892 test->paused_tmp = test->paused;
893 test->paused = true;
894 } else {
895 test->paused = test->paused_tmp;
896 }
897 }
898 }
899 }
900
901 /**
902 * pause_named_trigger - Pause all named triggers with the same name
903 * @data: The trigger data of a named trigger to pause
904 *
905 * Pauses a named trigger along with all other triggers having the
906 * same name. Because named triggers share a common set of data,
907 * pausing only one is meaningless, so pausing one named trigger needs
908 * to pause all triggers with the same name.
909 */
pause_named_trigger(struct event_trigger_data * data)910 void pause_named_trigger(struct event_trigger_data *data)
911 {
912 __pause_named_trigger(data, true);
913 }
914
915 /**
916 * unpause_named_trigger - Un-pause all named triggers with the same name
917 * @data: The trigger data of a named trigger to unpause
918 *
919 * Un-pauses a named trigger along with all other triggers having the
920 * same name. Because named triggers share a common set of data,
921 * unpausing only one is meaningless, so unpausing one named trigger
922 * needs to unpause all triggers with the same name.
923 */
unpause_named_trigger(struct event_trigger_data * data)924 void unpause_named_trigger(struct event_trigger_data *data)
925 {
926 __pause_named_trigger(data, false);
927 }
928
929 /**
930 * set_named_trigger_data - Associate common named trigger data
931 * @data: The trigger data to associate
932 * @named_data: The common named trigger to be associated
933 *
934 * Named triggers are sets of triggers that share a common set of
935 * trigger data. The first named trigger registered with a given name
936 * owns the common trigger data that the others subsequently
937 * registered with the same name will reference. This function
938 * associates the common trigger data from the first trigger with the
939 * given trigger.
940 */
set_named_trigger_data(struct event_trigger_data * data,struct event_trigger_data * named_data)941 void set_named_trigger_data(struct event_trigger_data *data,
942 struct event_trigger_data *named_data)
943 {
944 data->named_data = named_data;
945 }
946
947 struct event_trigger_data *
get_named_trigger_data(struct event_trigger_data * data)948 get_named_trigger_data(struct event_trigger_data *data)
949 {
950 return data->named_data;
951 }
952
953 static void
traceon_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)954 traceon_trigger(struct event_trigger_data *data,
955 struct trace_buffer *buffer, void *rec,
956 struct ring_buffer_event *event)
957 {
958 if (tracing_is_on())
959 return;
960
961 tracing_on();
962 }
963
964 static void
traceon_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)965 traceon_count_trigger(struct event_trigger_data *data,
966 struct trace_buffer *buffer, void *rec,
967 struct ring_buffer_event *event)
968 {
969 if (tracing_is_on())
970 return;
971
972 if (!data->count)
973 return;
974
975 if (data->count != -1)
976 (data->count)--;
977
978 tracing_on();
979 }
980
981 static void
traceoff_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)982 traceoff_trigger(struct event_trigger_data *data,
983 struct trace_buffer *buffer, void *rec,
984 struct ring_buffer_event *event)
985 {
986 if (!tracing_is_on())
987 return;
988
989 tracing_off();
990 }
991
992 static void
traceoff_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)993 traceoff_count_trigger(struct event_trigger_data *data,
994 struct trace_buffer *buffer, void *rec,
995 struct ring_buffer_event *event)
996 {
997 if (!tracing_is_on())
998 return;
999
1000 if (!data->count)
1001 return;
1002
1003 if (data->count != -1)
1004 (data->count)--;
1005
1006 tracing_off();
1007 }
1008
1009 static int
traceon_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1010 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1011 struct event_trigger_data *data)
1012 {
1013 return event_trigger_print("traceon", m, (void *)data->count,
1014 data->filter_str);
1015 }
1016
1017 static int
traceoff_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1018 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1019 struct event_trigger_data *data)
1020 {
1021 return event_trigger_print("traceoff", m, (void *)data->count,
1022 data->filter_str);
1023 }
1024
1025 static struct event_trigger_ops traceon_trigger_ops = {
1026 .func = traceon_trigger,
1027 .print = traceon_trigger_print,
1028 .init = event_trigger_init,
1029 .free = event_trigger_free,
1030 };
1031
1032 static struct event_trigger_ops traceon_count_trigger_ops = {
1033 .func = traceon_count_trigger,
1034 .print = traceon_trigger_print,
1035 .init = event_trigger_init,
1036 .free = event_trigger_free,
1037 };
1038
1039 static struct event_trigger_ops traceoff_trigger_ops = {
1040 .func = traceoff_trigger,
1041 .print = traceoff_trigger_print,
1042 .init = event_trigger_init,
1043 .free = event_trigger_free,
1044 };
1045
1046 static struct event_trigger_ops traceoff_count_trigger_ops = {
1047 .func = traceoff_count_trigger,
1048 .print = traceoff_trigger_print,
1049 .init = event_trigger_init,
1050 .free = event_trigger_free,
1051 };
1052
1053 static struct event_trigger_ops *
onoff_get_trigger_ops(char * cmd,char * param)1054 onoff_get_trigger_ops(char *cmd, char *param)
1055 {
1056 struct event_trigger_ops *ops;
1057
1058 /* we register both traceon and traceoff to this callback */
1059 if (strcmp(cmd, "traceon") == 0)
1060 ops = param ? &traceon_count_trigger_ops :
1061 &traceon_trigger_ops;
1062 else
1063 ops = param ? &traceoff_count_trigger_ops :
1064 &traceoff_trigger_ops;
1065
1066 return ops;
1067 }
1068
1069 static struct event_command trigger_traceon_cmd = {
1070 .name = "traceon",
1071 .trigger_type = ETT_TRACE_ONOFF,
1072 .func = event_trigger_callback,
1073 .reg = register_trigger,
1074 .unreg = unregister_trigger,
1075 .get_trigger_ops = onoff_get_trigger_ops,
1076 .set_filter = set_trigger_filter,
1077 };
1078
1079 static struct event_command trigger_traceoff_cmd = {
1080 .name = "traceoff",
1081 .trigger_type = ETT_TRACE_ONOFF,
1082 .flags = EVENT_CMD_FL_POST_TRIGGER,
1083 .func = event_trigger_callback,
1084 .reg = register_trigger,
1085 .unreg = unregister_trigger,
1086 .get_trigger_ops = onoff_get_trigger_ops,
1087 .set_filter = set_trigger_filter,
1088 };
1089
1090 #ifdef CONFIG_TRACER_SNAPSHOT
1091 static void
snapshot_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1092 snapshot_trigger(struct event_trigger_data *data,
1093 struct trace_buffer *buffer, void *rec,
1094 struct ring_buffer_event *event)
1095 {
1096 struct trace_event_file *file = data->private_data;
1097
1098 if (file)
1099 tracing_snapshot_instance(file->tr);
1100 else
1101 tracing_snapshot();
1102 }
1103
1104 static void
snapshot_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1105 snapshot_count_trigger(struct event_trigger_data *data,
1106 struct trace_buffer *buffer, void *rec,
1107 struct ring_buffer_event *event)
1108 {
1109 if (!data->count)
1110 return;
1111
1112 if (data->count != -1)
1113 (data->count)--;
1114
1115 snapshot_trigger(data, buffer, rec, event);
1116 }
1117
1118 static int
register_snapshot_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1119 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1120 struct event_trigger_data *data,
1121 struct trace_event_file *file)
1122 {
1123 if (tracing_alloc_snapshot_instance(file->tr) != 0)
1124 return 0;
1125
1126 return register_trigger(glob, ops, data, file);
1127 }
1128
1129 static int
snapshot_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1130 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1131 struct event_trigger_data *data)
1132 {
1133 return event_trigger_print("snapshot", m, (void *)data->count,
1134 data->filter_str);
1135 }
1136
1137 static struct event_trigger_ops snapshot_trigger_ops = {
1138 .func = snapshot_trigger,
1139 .print = snapshot_trigger_print,
1140 .init = event_trigger_init,
1141 .free = event_trigger_free,
1142 };
1143
1144 static struct event_trigger_ops snapshot_count_trigger_ops = {
1145 .func = snapshot_count_trigger,
1146 .print = snapshot_trigger_print,
1147 .init = event_trigger_init,
1148 .free = event_trigger_free,
1149 };
1150
1151 static struct event_trigger_ops *
snapshot_get_trigger_ops(char * cmd,char * param)1152 snapshot_get_trigger_ops(char *cmd, char *param)
1153 {
1154 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1155 }
1156
1157 static struct event_command trigger_snapshot_cmd = {
1158 .name = "snapshot",
1159 .trigger_type = ETT_SNAPSHOT,
1160 .func = event_trigger_callback,
1161 .reg = register_snapshot_trigger,
1162 .unreg = unregister_trigger,
1163 .get_trigger_ops = snapshot_get_trigger_ops,
1164 .set_filter = set_trigger_filter,
1165 };
1166
register_trigger_snapshot_cmd(void)1167 static __init int register_trigger_snapshot_cmd(void)
1168 {
1169 int ret;
1170
1171 ret = register_event_command(&trigger_snapshot_cmd);
1172 WARN_ON(ret < 0);
1173
1174 return ret;
1175 }
1176 #else
register_trigger_snapshot_cmd(void)1177 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1178 #endif /* CONFIG_TRACER_SNAPSHOT */
1179
1180 #ifdef CONFIG_STACKTRACE
1181 #ifdef CONFIG_UNWINDER_ORC
1182 /* Skip 2:
1183 * event_triggers_post_call()
1184 * trace_event_raw_event_xxx()
1185 */
1186 # define STACK_SKIP 2
1187 #else
1188 /*
1189 * Skip 4:
1190 * stacktrace_trigger()
1191 * event_triggers_post_call()
1192 * trace_event_buffer_commit()
1193 * trace_event_raw_event_xxx()
1194 */
1195 #define STACK_SKIP 4
1196 #endif
1197
1198 static void
stacktrace_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1199 stacktrace_trigger(struct event_trigger_data *data,
1200 struct trace_buffer *buffer, void *rec,
1201 struct ring_buffer_event *event)
1202 {
1203 trace_dump_stack(STACK_SKIP);
1204 }
1205
1206 static void
stacktrace_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1207 stacktrace_count_trigger(struct event_trigger_data *data,
1208 struct trace_buffer *buffer, void *rec,
1209 struct ring_buffer_event *event)
1210 {
1211 if (!data->count)
1212 return;
1213
1214 if (data->count != -1)
1215 (data->count)--;
1216
1217 stacktrace_trigger(data, buffer, rec, event);
1218 }
1219
1220 static int
stacktrace_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1221 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1222 struct event_trigger_data *data)
1223 {
1224 return event_trigger_print("stacktrace", m, (void *)data->count,
1225 data->filter_str);
1226 }
1227
1228 static struct event_trigger_ops stacktrace_trigger_ops = {
1229 .func = stacktrace_trigger,
1230 .print = stacktrace_trigger_print,
1231 .init = event_trigger_init,
1232 .free = event_trigger_free,
1233 };
1234
1235 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1236 .func = stacktrace_count_trigger,
1237 .print = stacktrace_trigger_print,
1238 .init = event_trigger_init,
1239 .free = event_trigger_free,
1240 };
1241
1242 static struct event_trigger_ops *
stacktrace_get_trigger_ops(char * cmd,char * param)1243 stacktrace_get_trigger_ops(char *cmd, char *param)
1244 {
1245 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1246 }
1247
1248 static struct event_command trigger_stacktrace_cmd = {
1249 .name = "stacktrace",
1250 .trigger_type = ETT_STACKTRACE,
1251 .flags = EVENT_CMD_FL_POST_TRIGGER,
1252 .func = event_trigger_callback,
1253 .reg = register_trigger,
1254 .unreg = unregister_trigger,
1255 .get_trigger_ops = stacktrace_get_trigger_ops,
1256 .set_filter = set_trigger_filter,
1257 };
1258
register_trigger_stacktrace_cmd(void)1259 static __init int register_trigger_stacktrace_cmd(void)
1260 {
1261 int ret;
1262
1263 ret = register_event_command(&trigger_stacktrace_cmd);
1264 WARN_ON(ret < 0);
1265
1266 return ret;
1267 }
1268 #else
register_trigger_stacktrace_cmd(void)1269 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1270 #endif /* CONFIG_STACKTRACE */
1271
unregister_trigger_traceon_traceoff_cmds(void)1272 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1273 {
1274 unregister_event_command(&trigger_traceon_cmd);
1275 unregister_event_command(&trigger_traceoff_cmd);
1276 }
1277
1278 static void
event_enable_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1279 event_enable_trigger(struct event_trigger_data *data,
1280 struct trace_buffer *buffer, void *rec,
1281 struct ring_buffer_event *event)
1282 {
1283 struct enable_trigger_data *enable_data = data->private_data;
1284
1285 if (enable_data->enable)
1286 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1287 else
1288 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1289 }
1290
1291 static void
event_enable_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1292 event_enable_count_trigger(struct event_trigger_data *data,
1293 struct trace_buffer *buffer, void *rec,
1294 struct ring_buffer_event *event)
1295 {
1296 struct enable_trigger_data *enable_data = data->private_data;
1297
1298 if (!data->count)
1299 return;
1300
1301 /* Skip if the event is in a state we want to switch to */
1302 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1303 return;
1304
1305 if (data->count != -1)
1306 (data->count)--;
1307
1308 event_enable_trigger(data, buffer, rec, event);
1309 }
1310
event_enable_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1311 int event_enable_trigger_print(struct seq_file *m,
1312 struct event_trigger_ops *ops,
1313 struct event_trigger_data *data)
1314 {
1315 struct enable_trigger_data *enable_data = data->private_data;
1316
1317 seq_printf(m, "%s:%s:%s",
1318 enable_data->hist ?
1319 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1320 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1321 enable_data->file->event_call->class->system,
1322 trace_event_name(enable_data->file->event_call));
1323
1324 if (data->count == -1)
1325 seq_puts(m, ":unlimited");
1326 else
1327 seq_printf(m, ":count=%ld", data->count);
1328
1329 if (data->filter_str)
1330 seq_printf(m, " if %s\n", data->filter_str);
1331 else
1332 seq_putc(m, '\n');
1333
1334 return 0;
1335 }
1336
event_enable_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)1337 void event_enable_trigger_free(struct event_trigger_ops *ops,
1338 struct event_trigger_data *data)
1339 {
1340 struct enable_trigger_data *enable_data = data->private_data;
1341
1342 if (WARN_ON_ONCE(data->ref <= 0))
1343 return;
1344
1345 data->ref--;
1346 if (!data->ref) {
1347 /* Remove the SOFT_MODE flag */
1348 trace_event_enable_disable(enable_data->file, 0, 1);
1349 trace_event_put_ref(enable_data->file->event_call);
1350 trigger_data_free(data);
1351 kfree(enable_data);
1352 }
1353 }
1354
1355 static struct event_trigger_ops event_enable_trigger_ops = {
1356 .func = event_enable_trigger,
1357 .print = event_enable_trigger_print,
1358 .init = event_trigger_init,
1359 .free = event_enable_trigger_free,
1360 };
1361
1362 static struct event_trigger_ops event_enable_count_trigger_ops = {
1363 .func = event_enable_count_trigger,
1364 .print = event_enable_trigger_print,
1365 .init = event_trigger_init,
1366 .free = event_enable_trigger_free,
1367 };
1368
1369 static struct event_trigger_ops event_disable_trigger_ops = {
1370 .func = event_enable_trigger,
1371 .print = event_enable_trigger_print,
1372 .init = event_trigger_init,
1373 .free = event_enable_trigger_free,
1374 };
1375
1376 static struct event_trigger_ops event_disable_count_trigger_ops = {
1377 .func = event_enable_count_trigger,
1378 .print = event_enable_trigger_print,
1379 .init = event_trigger_init,
1380 .free = event_enable_trigger_free,
1381 };
1382
event_enable_trigger_func(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)1383 int event_enable_trigger_func(struct event_command *cmd_ops,
1384 struct trace_event_file *file,
1385 char *glob, char *cmd, char *param)
1386 {
1387 struct trace_event_file *event_enable_file;
1388 struct enable_trigger_data *enable_data;
1389 struct event_trigger_data *trigger_data;
1390 struct event_trigger_ops *trigger_ops;
1391 struct trace_array *tr = file->tr;
1392 const char *system;
1393 const char *event;
1394 bool hist = false;
1395 char *trigger;
1396 char *number;
1397 bool enable;
1398 int ret;
1399
1400 if (!param)
1401 return -EINVAL;
1402
1403 /* separate the trigger from the filter (s:e:n [if filter]) */
1404 trigger = strsep(¶m, " \t");
1405 if (!trigger)
1406 return -EINVAL;
1407 if (param) {
1408 param = skip_spaces(param);
1409 if (!*param)
1410 param = NULL;
1411 }
1412
1413 system = strsep(&trigger, ":");
1414 if (!trigger)
1415 return -EINVAL;
1416
1417 event = strsep(&trigger, ":");
1418
1419 ret = -EINVAL;
1420 event_enable_file = find_event_file(tr, system, event);
1421 if (!event_enable_file)
1422 goto out;
1423
1424 #ifdef CONFIG_HIST_TRIGGERS
1425 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1426 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1427
1428 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1429 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1430 #else
1431 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1432 #endif
1433 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1434
1435 ret = -ENOMEM;
1436 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1437 if (!trigger_data)
1438 goto out;
1439
1440 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1441 if (!enable_data) {
1442 kfree(trigger_data);
1443 goto out;
1444 }
1445
1446 trigger_data->count = -1;
1447 trigger_data->ops = trigger_ops;
1448 trigger_data->cmd_ops = cmd_ops;
1449 INIT_LIST_HEAD(&trigger_data->list);
1450 RCU_INIT_POINTER(trigger_data->filter, NULL);
1451
1452 enable_data->hist = hist;
1453 enable_data->enable = enable;
1454 enable_data->file = event_enable_file;
1455 trigger_data->private_data = enable_data;
1456
1457 if (glob[0] == '!') {
1458 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1459 kfree(trigger_data);
1460 kfree(enable_data);
1461 ret = 0;
1462 goto out;
1463 }
1464
1465 /* Up the trigger_data count to make sure nothing frees it on failure */
1466 event_trigger_init(trigger_ops, trigger_data);
1467
1468 if (trigger) {
1469 number = strsep(&trigger, ":");
1470
1471 ret = -EINVAL;
1472 if (!strlen(number))
1473 goto out_free;
1474
1475 /*
1476 * We use the callback data field (which is a pointer)
1477 * as our counter.
1478 */
1479 ret = kstrtoul(number, 0, &trigger_data->count);
1480 if (ret)
1481 goto out_free;
1482 }
1483
1484 if (!param) /* if param is non-empty, it's supposed to be a filter */
1485 goto out_reg;
1486
1487 if (!cmd_ops->set_filter)
1488 goto out_reg;
1489
1490 ret = cmd_ops->set_filter(param, trigger_data, file);
1491 if (ret < 0)
1492 goto out_free;
1493
1494 out_reg:
1495 /* Don't let event modules unload while probe registered */
1496 ret = trace_event_try_get_ref(event_enable_file->event_call);
1497 if (!ret) {
1498 ret = -EBUSY;
1499 goto out_free;
1500 }
1501
1502 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1503 if (ret < 0)
1504 goto out_put;
1505 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1506 /*
1507 * The above returns on success the # of functions enabled,
1508 * but if it didn't find any functions it returns zero.
1509 * Consider no functions a failure too.
1510 */
1511 if (!ret) {
1512 ret = -ENOENT;
1513 goto out_disable;
1514 } else if (ret < 0)
1515 goto out_disable;
1516 /* Just return zero, not the number of enabled functions */
1517 ret = 0;
1518 event_trigger_free(trigger_ops, trigger_data);
1519 out:
1520 return ret;
1521
1522 out_disable:
1523 trace_event_enable_disable(event_enable_file, 0, 1);
1524 out_put:
1525 trace_event_put_ref(event_enable_file->event_call);
1526 out_free:
1527 if (cmd_ops->set_filter)
1528 cmd_ops->set_filter(NULL, trigger_data, NULL);
1529 event_trigger_free(trigger_ops, trigger_data);
1530 kfree(enable_data);
1531 goto out;
1532 }
1533
event_enable_register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1534 int event_enable_register_trigger(char *glob,
1535 struct event_trigger_ops *ops,
1536 struct event_trigger_data *data,
1537 struct trace_event_file *file)
1538 {
1539 struct enable_trigger_data *enable_data = data->private_data;
1540 struct enable_trigger_data *test_enable_data;
1541 struct event_trigger_data *test;
1542 int ret = 0;
1543
1544 lockdep_assert_held(&event_mutex);
1545
1546 list_for_each_entry(test, &file->triggers, list) {
1547 test_enable_data = test->private_data;
1548 if (test_enable_data &&
1549 (test->cmd_ops->trigger_type ==
1550 data->cmd_ops->trigger_type) &&
1551 (test_enable_data->file == enable_data->file)) {
1552 ret = -EEXIST;
1553 goto out;
1554 }
1555 }
1556
1557 if (data->ops->init) {
1558 ret = data->ops->init(data->ops, data);
1559 if (ret < 0)
1560 goto out;
1561 }
1562
1563 list_add_rcu(&data->list, &file->triggers);
1564 ret++;
1565
1566 update_cond_flag(file);
1567 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1568 list_del_rcu(&data->list);
1569 update_cond_flag(file);
1570 ret--;
1571 }
1572 out:
1573 return ret;
1574 }
1575
event_enable_unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * test,struct trace_event_file * file)1576 void event_enable_unregister_trigger(char *glob,
1577 struct event_trigger_ops *ops,
1578 struct event_trigger_data *test,
1579 struct trace_event_file *file)
1580 {
1581 struct enable_trigger_data *test_enable_data = test->private_data;
1582 struct enable_trigger_data *enable_data;
1583 struct event_trigger_data *data;
1584 bool unregistered = false;
1585
1586 lockdep_assert_held(&event_mutex);
1587
1588 list_for_each_entry(data, &file->triggers, list) {
1589 enable_data = data->private_data;
1590 if (enable_data &&
1591 (data->cmd_ops->trigger_type ==
1592 test->cmd_ops->trigger_type) &&
1593 (enable_data->file == test_enable_data->file)) {
1594 unregistered = true;
1595 list_del_rcu(&data->list);
1596 trace_event_trigger_enable_disable(file, 0);
1597 update_cond_flag(file);
1598 break;
1599 }
1600 }
1601
1602 if (unregistered && data->ops->free)
1603 data->ops->free(data->ops, data);
1604 }
1605
1606 static struct event_trigger_ops *
event_enable_get_trigger_ops(char * cmd,char * param)1607 event_enable_get_trigger_ops(char *cmd, char *param)
1608 {
1609 struct event_trigger_ops *ops;
1610 bool enable;
1611
1612 #ifdef CONFIG_HIST_TRIGGERS
1613 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1614 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1615 #else
1616 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1617 #endif
1618 if (enable)
1619 ops = param ? &event_enable_count_trigger_ops :
1620 &event_enable_trigger_ops;
1621 else
1622 ops = param ? &event_disable_count_trigger_ops :
1623 &event_disable_trigger_ops;
1624
1625 return ops;
1626 }
1627
1628 static struct event_command trigger_enable_cmd = {
1629 .name = ENABLE_EVENT_STR,
1630 .trigger_type = ETT_EVENT_ENABLE,
1631 .func = event_enable_trigger_func,
1632 .reg = event_enable_register_trigger,
1633 .unreg = event_enable_unregister_trigger,
1634 .get_trigger_ops = event_enable_get_trigger_ops,
1635 .set_filter = set_trigger_filter,
1636 };
1637
1638 static struct event_command trigger_disable_cmd = {
1639 .name = DISABLE_EVENT_STR,
1640 .trigger_type = ETT_EVENT_ENABLE,
1641 .func = event_enable_trigger_func,
1642 .reg = event_enable_register_trigger,
1643 .unreg = event_enable_unregister_trigger,
1644 .get_trigger_ops = event_enable_get_trigger_ops,
1645 .set_filter = set_trigger_filter,
1646 };
1647
unregister_trigger_enable_disable_cmds(void)1648 static __init void unregister_trigger_enable_disable_cmds(void)
1649 {
1650 unregister_event_command(&trigger_enable_cmd);
1651 unregister_event_command(&trigger_disable_cmd);
1652 }
1653
register_trigger_enable_disable_cmds(void)1654 static __init int register_trigger_enable_disable_cmds(void)
1655 {
1656 int ret;
1657
1658 ret = register_event_command(&trigger_enable_cmd);
1659 if (WARN_ON(ret < 0))
1660 return ret;
1661 ret = register_event_command(&trigger_disable_cmd);
1662 if (WARN_ON(ret < 0))
1663 unregister_trigger_enable_disable_cmds();
1664
1665 return ret;
1666 }
1667
register_trigger_traceon_traceoff_cmds(void)1668 static __init int register_trigger_traceon_traceoff_cmds(void)
1669 {
1670 int ret;
1671
1672 ret = register_event_command(&trigger_traceon_cmd);
1673 if (WARN_ON(ret < 0))
1674 return ret;
1675 ret = register_event_command(&trigger_traceoff_cmd);
1676 if (WARN_ON(ret < 0))
1677 unregister_trigger_traceon_traceoff_cmds();
1678
1679 return ret;
1680 }
1681
register_trigger_cmds(void)1682 __init int register_trigger_cmds(void)
1683 {
1684 register_trigger_traceon_traceoff_cmds();
1685 register_trigger_snapshot_cmd();
1686 register_trigger_stacktrace_cmd();
1687 register_trigger_enable_disable_cmds();
1688 register_trigger_hist_enable_disable_cmds();
1689 register_trigger_hist_cmd();
1690
1691 return 0;
1692 }
1693