1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_trigger - trace event triggers
4  *
5  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
14 
15 #include "trace.h"
16 
17 static LIST_HEAD(trigger_commands);
18 static DEFINE_MUTEX(trigger_cmd_mutex);
19 
trigger_data_free(struct event_trigger_data * data)20 void trigger_data_free(struct event_trigger_data *data)
21 {
22 	if (data->cmd_ops->set_filter)
23 		data->cmd_ops->set_filter(NULL, data, NULL);
24 
25 	/* make sure current triggers exit before free */
26 	tracepoint_synchronize_unregister();
27 
28 	kfree(data);
29 }
30 
31 /**
32  * event_triggers_call - Call triggers associated with a trace event
33  * @file: The trace_event_file associated with the event
34  * @rec: The trace entry for the event, NULL for unconditional invocation
35  *
36  * For each trigger associated with an event, invoke the trigger
37  * function registered with the associated trigger command.  If rec is
38  * non-NULL, it means that the trigger requires further processing and
39  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
40  * trigger has a filter associated with it, rec will checked against
41  * the filter and if the record matches the trigger will be invoked.
42  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43  * in any case until the current event is written, the trigger
44  * function isn't invoked but the bit associated with the deferred
45  * trigger is set in the return value.
46  *
47  * Returns an enum event_trigger_type value containing a set bit for
48  * any trigger that should be deferred, ETT_NONE if nothing to defer.
49  *
50  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51  *
52  * Return: an enum event_trigger_type value containing a set bit for
53  * any trigger that should be deferred, ETT_NONE if nothing to defer.
54  */
55 enum event_trigger_type
event_triggers_call(struct trace_event_file * file,void * rec,struct ring_buffer_event * event)56 event_triggers_call(struct trace_event_file *file, void *rec,
57 		    struct ring_buffer_event *event)
58 {
59 	struct event_trigger_data *data;
60 	enum event_trigger_type tt = ETT_NONE;
61 	struct event_filter *filter;
62 
63 	if (list_empty(&file->triggers))
64 		return tt;
65 
66 	list_for_each_entry_rcu(data, &file->triggers, list) {
67 		if (data->paused)
68 			continue;
69 		if (!rec) {
70 			data->ops->func(data, rec, event);
71 			continue;
72 		}
73 		filter = rcu_dereference_sched(data->filter);
74 		if (filter && !filter_match_preds(filter, rec))
75 			continue;
76 		if (event_command_post_trigger(data->cmd_ops)) {
77 			tt |= data->cmd_ops->trigger_type;
78 			continue;
79 		}
80 		data->ops->func(data, rec, event);
81 	}
82 	return tt;
83 }
84 EXPORT_SYMBOL_GPL(event_triggers_call);
85 
86 /**
87  * event_triggers_post_call - Call 'post_triggers' for a trace event
88  * @file: The trace_event_file associated with the event
89  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90  *
91  * For each trigger associated with an event, invoke the trigger
92  * function registered with the associated trigger command, if the
93  * corresponding bit is set in the tt enum passed into this function.
94  * See @event_triggers_call for details on how those bits are set.
95  *
96  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
97  */
98 void
event_triggers_post_call(struct trace_event_file * file,enum event_trigger_type tt)99 event_triggers_post_call(struct trace_event_file *file,
100 			 enum event_trigger_type tt)
101 {
102 	struct event_trigger_data *data;
103 
104 	list_for_each_entry_rcu(data, &file->triggers, list) {
105 		if (data->paused)
106 			continue;
107 		if (data->cmd_ops->trigger_type & tt)
108 			data->ops->func(data, NULL, NULL);
109 	}
110 }
111 EXPORT_SYMBOL_GPL(event_triggers_post_call);
112 
113 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
114 
trigger_next(struct seq_file * m,void * t,loff_t * pos)115 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
116 {
117 	struct trace_event_file *event_file = event_file_data(m->private);
118 
119 	if (t == SHOW_AVAILABLE_TRIGGERS) {
120 		(*pos)++;
121 		return NULL;
122 	}
123 	return seq_list_next(t, &event_file->triggers, pos);
124 }
125 
trigger_start(struct seq_file * m,loff_t * pos)126 static void *trigger_start(struct seq_file *m, loff_t *pos)
127 {
128 	struct trace_event_file *event_file;
129 
130 	/* ->stop() is called even if ->start() fails */
131 	mutex_lock(&event_mutex);
132 	event_file = event_file_data(m->private);
133 	if (unlikely(!event_file))
134 		return ERR_PTR(-ENODEV);
135 
136 	if (list_empty(&event_file->triggers))
137 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
138 
139 	return seq_list_start(&event_file->triggers, *pos);
140 }
141 
trigger_stop(struct seq_file * m,void * t)142 static void trigger_stop(struct seq_file *m, void *t)
143 {
144 	mutex_unlock(&event_mutex);
145 }
146 
trigger_show(struct seq_file * m,void * v)147 static int trigger_show(struct seq_file *m, void *v)
148 {
149 	struct event_trigger_data *data;
150 	struct event_command *p;
151 
152 	if (v == SHOW_AVAILABLE_TRIGGERS) {
153 		seq_puts(m, "# Available triggers:\n");
154 		seq_putc(m, '#');
155 		mutex_lock(&trigger_cmd_mutex);
156 		list_for_each_entry_reverse(p, &trigger_commands, list)
157 			seq_printf(m, " %s", p->name);
158 		seq_putc(m, '\n');
159 		mutex_unlock(&trigger_cmd_mutex);
160 		return 0;
161 	}
162 
163 	data = list_entry(v, struct event_trigger_data, list);
164 	data->ops->print(m, data->ops, data);
165 
166 	return 0;
167 }
168 
169 static const struct seq_operations event_triggers_seq_ops = {
170 	.start = trigger_start,
171 	.next = trigger_next,
172 	.stop = trigger_stop,
173 	.show = trigger_show,
174 };
175 
event_trigger_regex_open(struct inode * inode,struct file * file)176 static int event_trigger_regex_open(struct inode *inode, struct file *file)
177 {
178 	int ret;
179 
180 	ret = security_locked_down(LOCKDOWN_TRACEFS);
181 	if (ret)
182 		return ret;
183 
184 	mutex_lock(&event_mutex);
185 
186 	if (unlikely(!event_file_data(file))) {
187 		mutex_unlock(&event_mutex);
188 		return -ENODEV;
189 	}
190 
191 	if ((file->f_mode & FMODE_WRITE) &&
192 	    (file->f_flags & O_TRUNC)) {
193 		struct trace_event_file *event_file;
194 		struct event_command *p;
195 
196 		event_file = event_file_data(file);
197 
198 		list_for_each_entry(p, &trigger_commands, list) {
199 			if (p->unreg_all)
200 				p->unreg_all(event_file);
201 		}
202 	}
203 
204 	if (file->f_mode & FMODE_READ) {
205 		ret = seq_open(file, &event_triggers_seq_ops);
206 		if (!ret) {
207 			struct seq_file *m = file->private_data;
208 			m->private = file;
209 		}
210 	}
211 
212 	mutex_unlock(&event_mutex);
213 
214 	return ret;
215 }
216 
trigger_process_regex(struct trace_event_file * file,char * buff)217 int trigger_process_regex(struct trace_event_file *file, char *buff)
218 {
219 	char *command, *next;
220 	struct event_command *p;
221 	int ret = -EINVAL;
222 
223 	next = buff = skip_spaces(buff);
224 	command = strsep(&next, ": \t");
225 	if (next) {
226 		next = skip_spaces(next);
227 		if (!*next)
228 			next = NULL;
229 	}
230 	command = (command[0] != '!') ? command : command + 1;
231 
232 	mutex_lock(&trigger_cmd_mutex);
233 	list_for_each_entry(p, &trigger_commands, list) {
234 		if (strcmp(p->name, command) == 0) {
235 			ret = p->func(p, file, buff, command, next);
236 			goto out_unlock;
237 		}
238 	}
239  out_unlock:
240 	mutex_unlock(&trigger_cmd_mutex);
241 
242 	return ret;
243 }
244 
event_trigger_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)245 static ssize_t event_trigger_regex_write(struct file *file,
246 					 const char __user *ubuf,
247 					 size_t cnt, loff_t *ppos)
248 {
249 	struct trace_event_file *event_file;
250 	ssize_t ret;
251 	char *buf;
252 
253 	if (!cnt)
254 		return 0;
255 
256 	if (cnt >= PAGE_SIZE)
257 		return -EINVAL;
258 
259 	buf = memdup_user_nul(ubuf, cnt);
260 	if (IS_ERR(buf))
261 		return PTR_ERR(buf);
262 
263 	strim(buf);
264 
265 	mutex_lock(&event_mutex);
266 	event_file = event_file_data(file);
267 	if (unlikely(!event_file)) {
268 		mutex_unlock(&event_mutex);
269 		kfree(buf);
270 		return -ENODEV;
271 	}
272 	ret = trigger_process_regex(event_file, buf);
273 	mutex_unlock(&event_mutex);
274 
275 	kfree(buf);
276 	if (ret < 0)
277 		goto out;
278 
279 	*ppos += cnt;
280 	ret = cnt;
281  out:
282 	return ret;
283 }
284 
event_trigger_regex_release(struct inode * inode,struct file * file)285 static int event_trigger_regex_release(struct inode *inode, struct file *file)
286 {
287 	mutex_lock(&event_mutex);
288 
289 	if (file->f_mode & FMODE_READ)
290 		seq_release(inode, file);
291 
292 	mutex_unlock(&event_mutex);
293 
294 	return 0;
295 }
296 
297 static ssize_t
event_trigger_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)298 event_trigger_write(struct file *filp, const char __user *ubuf,
299 		    size_t cnt, loff_t *ppos)
300 {
301 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
302 }
303 
304 static int
event_trigger_open(struct inode * inode,struct file * filp)305 event_trigger_open(struct inode *inode, struct file *filp)
306 {
307 	/* Checks for tracefs lockdown */
308 	return event_trigger_regex_open(inode, filp);
309 }
310 
311 static int
event_trigger_release(struct inode * inode,struct file * file)312 event_trigger_release(struct inode *inode, struct file *file)
313 {
314 	return event_trigger_regex_release(inode, file);
315 }
316 
317 const struct file_operations event_trigger_fops = {
318 	.open = event_trigger_open,
319 	.read = seq_read,
320 	.write = event_trigger_write,
321 	.llseek = tracing_lseek,
322 	.release = event_trigger_release,
323 };
324 
325 /*
326  * Currently we only register event commands from __init, so mark this
327  * __init too.
328  */
register_event_command(struct event_command * cmd)329 __init int register_event_command(struct event_command *cmd)
330 {
331 	struct event_command *p;
332 	int ret = 0;
333 
334 	mutex_lock(&trigger_cmd_mutex);
335 	list_for_each_entry(p, &trigger_commands, list) {
336 		if (strcmp(cmd->name, p->name) == 0) {
337 			ret = -EBUSY;
338 			goto out_unlock;
339 		}
340 	}
341 	list_add(&cmd->list, &trigger_commands);
342  out_unlock:
343 	mutex_unlock(&trigger_cmd_mutex);
344 
345 	return ret;
346 }
347 
348 /*
349  * Currently we only unregister event commands from __init, so mark
350  * this __init too.
351  */
unregister_event_command(struct event_command * cmd)352 __init int unregister_event_command(struct event_command *cmd)
353 {
354 	struct event_command *p, *n;
355 	int ret = -ENODEV;
356 
357 	mutex_lock(&trigger_cmd_mutex);
358 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
359 		if (strcmp(cmd->name, p->name) == 0) {
360 			ret = 0;
361 			list_del_init(&p->list);
362 			goto out_unlock;
363 		}
364 	}
365  out_unlock:
366 	mutex_unlock(&trigger_cmd_mutex);
367 
368 	return ret;
369 }
370 
371 /**
372  * event_trigger_print - Generic event_trigger_ops @print implementation
373  * @name: The name of the event trigger
374  * @m: The seq_file being printed to
375  * @data: Trigger-specific data
376  * @filter_str: filter_str to print, if present
377  *
378  * Common implementation for event triggers to print themselves.
379  *
380  * Usually wrapped by a function that simply sets the @name of the
381  * trigger command and then invokes this.
382  *
383  * Return: 0 on success, errno otherwise
384  */
385 static int
event_trigger_print(const char * name,struct seq_file * m,void * data,char * filter_str)386 event_trigger_print(const char *name, struct seq_file *m,
387 		    void *data, char *filter_str)
388 {
389 	long count = (long)data;
390 
391 	seq_puts(m, name);
392 
393 	if (count == -1)
394 		seq_puts(m, ":unlimited");
395 	else
396 		seq_printf(m, ":count=%ld", count);
397 
398 	if (filter_str)
399 		seq_printf(m, " if %s\n", filter_str);
400 	else
401 		seq_putc(m, '\n');
402 
403 	return 0;
404 }
405 
406 /**
407  * event_trigger_init - Generic event_trigger_ops @init implementation
408  * @ops: The trigger ops associated with the trigger
409  * @data: Trigger-specific data
410  *
411  * Common implementation of event trigger initialization.
412  *
413  * Usually used directly as the @init method in event trigger
414  * implementations.
415  *
416  * Return: 0 on success, errno otherwise
417  */
event_trigger_init(struct event_trigger_ops * ops,struct event_trigger_data * data)418 int event_trigger_init(struct event_trigger_ops *ops,
419 		       struct event_trigger_data *data)
420 {
421 	data->ref++;
422 	return 0;
423 }
424 
425 /**
426  * event_trigger_free - Generic event_trigger_ops @free implementation
427  * @ops: The trigger ops associated with the trigger
428  * @data: Trigger-specific data
429  *
430  * Common implementation of event trigger de-initialization.
431  *
432  * Usually used directly as the @free method in event trigger
433  * implementations.
434  */
435 static void
event_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)436 event_trigger_free(struct event_trigger_ops *ops,
437 		   struct event_trigger_data *data)
438 {
439 	if (WARN_ON_ONCE(data->ref <= 0))
440 		return;
441 
442 	data->ref--;
443 	if (!data->ref)
444 		trigger_data_free(data);
445 }
446 
trace_event_trigger_enable_disable(struct trace_event_file * file,int trigger_enable)447 int trace_event_trigger_enable_disable(struct trace_event_file *file,
448 				       int trigger_enable)
449 {
450 	int ret = 0;
451 
452 	if (trigger_enable) {
453 		if (atomic_inc_return(&file->tm_ref) > 1)
454 			return ret;
455 		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
456 		ret = trace_event_enable_disable(file, 1, 1);
457 	} else {
458 		if (atomic_dec_return(&file->tm_ref) > 0)
459 			return ret;
460 		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
461 		ret = trace_event_enable_disable(file, 0, 1);
462 	}
463 
464 	return ret;
465 }
466 
467 /**
468  * clear_event_triggers - Clear all triggers associated with a trace array
469  * @tr: The trace array to clear
470  *
471  * For each trigger, the triggering event has its tm_ref decremented
472  * via trace_event_trigger_enable_disable(), and any associated event
473  * (in the case of enable/disable_event triggers) will have its sm_ref
474  * decremented via free()->trace_event_enable_disable().  That
475  * combination effectively reverses the soft-mode/trigger state added
476  * by trigger registration.
477  *
478  * Must be called with event_mutex held.
479  */
480 void
clear_event_triggers(struct trace_array * tr)481 clear_event_triggers(struct trace_array *tr)
482 {
483 	struct trace_event_file *file;
484 
485 	list_for_each_entry(file, &tr->events, list) {
486 		struct event_trigger_data *data, *n;
487 		list_for_each_entry_safe(data, n, &file->triggers, list) {
488 			trace_event_trigger_enable_disable(file, 0);
489 			list_del_rcu(&data->list);
490 			if (data->ops->free)
491 				data->ops->free(data->ops, data);
492 		}
493 	}
494 }
495 
496 /**
497  * update_cond_flag - Set or reset the TRIGGER_COND bit
498  * @file: The trace_event_file associated with the event
499  *
500  * If an event has triggers and any of those triggers has a filter or
501  * a post_trigger, trigger invocation needs to be deferred until after
502  * the current event has logged its data, and the event should have
503  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
504  * cleared.
505  */
update_cond_flag(struct trace_event_file * file)506 void update_cond_flag(struct trace_event_file *file)
507 {
508 	struct event_trigger_data *data;
509 	bool set_cond = false;
510 
511 	lockdep_assert_held(&event_mutex);
512 
513 	list_for_each_entry(data, &file->triggers, list) {
514 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
515 		    event_command_needs_rec(data->cmd_ops)) {
516 			set_cond = true;
517 			break;
518 		}
519 	}
520 
521 	if (set_cond)
522 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
523 	else
524 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
525 }
526 
527 /**
528  * register_trigger - Generic event_command @reg implementation
529  * @glob: The raw string used to register the trigger
530  * @ops: The trigger ops associated with the trigger
531  * @data: Trigger-specific data to associate with the trigger
532  * @file: The trace_event_file associated with the event
533  *
534  * Common implementation for event trigger registration.
535  *
536  * Usually used directly as the @reg method in event command
537  * implementations.
538  *
539  * Return: 0 on success, errno otherwise
540  */
register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)541 static int register_trigger(char *glob, struct event_trigger_ops *ops,
542 			    struct event_trigger_data *data,
543 			    struct trace_event_file *file)
544 {
545 	struct event_trigger_data *test;
546 	int ret = 0;
547 
548 	lockdep_assert_held(&event_mutex);
549 
550 	list_for_each_entry(test, &file->triggers, list) {
551 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
552 			ret = -EEXIST;
553 			goto out;
554 		}
555 	}
556 
557 	if (data->ops->init) {
558 		ret = data->ops->init(data->ops, data);
559 		if (ret < 0)
560 			goto out;
561 	}
562 
563 	list_add_rcu(&data->list, &file->triggers);
564 	ret++;
565 
566 	update_cond_flag(file);
567 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
568 		list_del_rcu(&data->list);
569 		update_cond_flag(file);
570 		ret--;
571 	}
572 out:
573 	return ret;
574 }
575 
576 /**
577  * unregister_trigger - Generic event_command @unreg implementation
578  * @glob: The raw string used to register the trigger
579  * @ops: The trigger ops associated with the trigger
580  * @test: Trigger-specific data used to find the trigger to remove
581  * @file: The trace_event_file associated with the event
582  *
583  * Common implementation for event trigger unregistration.
584  *
585  * Usually used directly as the @unreg method in event command
586  * implementations.
587  */
unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * test,struct trace_event_file * file)588 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
589 			       struct event_trigger_data *test,
590 			       struct trace_event_file *file)
591 {
592 	struct event_trigger_data *data;
593 	bool unregistered = false;
594 
595 	lockdep_assert_held(&event_mutex);
596 
597 	list_for_each_entry(data, &file->triggers, list) {
598 		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
599 			unregistered = true;
600 			list_del_rcu(&data->list);
601 			trace_event_trigger_enable_disable(file, 0);
602 			update_cond_flag(file);
603 			break;
604 		}
605 	}
606 
607 	if (unregistered && data->ops->free)
608 		data->ops->free(data->ops, data);
609 }
610 
611 /**
612  * event_trigger_callback - Generic event_command @func implementation
613  * @cmd_ops: The command ops, used for trigger registration
614  * @file: The trace_event_file associated with the event
615  * @glob: The raw string used to register the trigger
616  * @cmd: The cmd portion of the string used to register the trigger
617  * @param: The params portion of the string used to register the trigger
618  *
619  * Common implementation for event command parsing and trigger
620  * instantiation.
621  *
622  * Usually used directly as the @func method in event command
623  * implementations.
624  *
625  * Return: 0 on success, errno otherwise
626  */
627 static int
event_trigger_callback(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)628 event_trigger_callback(struct event_command *cmd_ops,
629 		       struct trace_event_file *file,
630 		       char *glob, char *cmd, char *param)
631 {
632 	struct event_trigger_data *trigger_data;
633 	struct event_trigger_ops *trigger_ops;
634 	char *trigger = NULL;
635 	char *number;
636 	int ret;
637 
638 	/* separate the trigger from the filter (t:n [if filter]) */
639 	if (param && isdigit(param[0])) {
640 		trigger = strsep(&param, " \t");
641 		if (param) {
642 			param = skip_spaces(param);
643 			if (!*param)
644 				param = NULL;
645 		}
646 	}
647 
648 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
649 
650 	ret = -ENOMEM;
651 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
652 	if (!trigger_data)
653 		goto out;
654 
655 	trigger_data->count = -1;
656 	trigger_data->ops = trigger_ops;
657 	trigger_data->cmd_ops = cmd_ops;
658 	trigger_data->private_data = file;
659 	INIT_LIST_HEAD(&trigger_data->list);
660 	INIT_LIST_HEAD(&trigger_data->named_list);
661 
662 	if (glob[0] == '!') {
663 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
664 		kfree(trigger_data);
665 		ret = 0;
666 		goto out;
667 	}
668 
669 	if (trigger) {
670 		number = strsep(&trigger, ":");
671 
672 		ret = -EINVAL;
673 		if (!strlen(number))
674 			goto out_free;
675 
676 		/*
677 		 * We use the callback data field (which is a pointer)
678 		 * as our counter.
679 		 */
680 		ret = kstrtoul(number, 0, &trigger_data->count);
681 		if (ret)
682 			goto out_free;
683 	}
684 
685 	if (!param) /* if param is non-empty, it's supposed to be a filter */
686 		goto out_reg;
687 
688 	if (!cmd_ops->set_filter)
689 		goto out_reg;
690 
691 	ret = cmd_ops->set_filter(param, trigger_data, file);
692 	if (ret < 0)
693 		goto out_free;
694 
695  out_reg:
696 	/* Up the trigger_data count to make sure reg doesn't free it on failure */
697 	event_trigger_init(trigger_ops, trigger_data);
698 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
699 	/*
700 	 * The above returns on success the # of functions enabled,
701 	 * but if it didn't find any functions it returns zero.
702 	 * Consider no functions a failure too.
703 	 */
704 	if (!ret) {
705 		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
706 		ret = -ENOENT;
707 	} else if (ret > 0)
708 		ret = 0;
709 
710 	/* Down the counter of trigger_data or free it if not used anymore */
711 	event_trigger_free(trigger_ops, trigger_data);
712  out:
713 	return ret;
714 
715  out_free:
716 	if (cmd_ops->set_filter)
717 		cmd_ops->set_filter(NULL, trigger_data, NULL);
718 	kfree(trigger_data);
719 	goto out;
720 }
721 
722 /**
723  * set_trigger_filter - Generic event_command @set_filter implementation
724  * @filter_str: The filter string for the trigger, NULL to remove filter
725  * @trigger_data: Trigger-specific data
726  * @file: The trace_event_file associated with the event
727  *
728  * Common implementation for event command filter parsing and filter
729  * instantiation.
730  *
731  * Usually used directly as the @set_filter method in event command
732  * implementations.
733  *
734  * Also used to remove a filter (if filter_str = NULL).
735  *
736  * Return: 0 on success, errno otherwise
737  */
set_trigger_filter(char * filter_str,struct event_trigger_data * trigger_data,struct trace_event_file * file)738 int set_trigger_filter(char *filter_str,
739 		       struct event_trigger_data *trigger_data,
740 		       struct trace_event_file *file)
741 {
742 	struct event_trigger_data *data = trigger_data;
743 	struct event_filter *filter = NULL, *tmp;
744 	int ret = -EINVAL;
745 	char *s;
746 
747 	if (!filter_str) /* clear the current filter */
748 		goto assign;
749 
750 	s = strsep(&filter_str, " \t");
751 
752 	if (!strlen(s) || strcmp(s, "if") != 0)
753 		goto out;
754 
755 	if (!filter_str)
756 		goto out;
757 
758 	/* The filter is for the 'trigger' event, not the triggered event */
759 	ret = create_event_filter(file->tr, file->event_call,
760 				  filter_str, false, &filter);
761 	/*
762 	 * If create_event_filter() fails, filter still needs to be freed.
763 	 * Which the calling code will do with data->filter.
764 	 */
765  assign:
766 	tmp = rcu_access_pointer(data->filter);
767 
768 	rcu_assign_pointer(data->filter, filter);
769 
770 	if (tmp) {
771 		/* Make sure the call is done with the filter */
772 		tracepoint_synchronize_unregister();
773 		free_event_filter(tmp);
774 	}
775 
776 	kfree(data->filter_str);
777 	data->filter_str = NULL;
778 
779 	if (filter_str) {
780 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
781 		if (!data->filter_str) {
782 			free_event_filter(rcu_access_pointer(data->filter));
783 			data->filter = NULL;
784 			ret = -ENOMEM;
785 		}
786 	}
787  out:
788 	return ret;
789 }
790 
791 static LIST_HEAD(named_triggers);
792 
793 /**
794  * find_named_trigger - Find the common named trigger associated with @name
795  * @name: The name of the set of named triggers to find the common data for
796  *
797  * Named triggers are sets of triggers that share a common set of
798  * trigger data.  The first named trigger registered with a given name
799  * owns the common trigger data that the others subsequently
800  * registered with the same name will reference.  This function
801  * returns the common trigger data associated with that first
802  * registered instance.
803  *
804  * Return: the common trigger data for the given named trigger on
805  * success, NULL otherwise.
806  */
find_named_trigger(const char * name)807 struct event_trigger_data *find_named_trigger(const char *name)
808 {
809 	struct event_trigger_data *data;
810 
811 	if (!name)
812 		return NULL;
813 
814 	list_for_each_entry(data, &named_triggers, named_list) {
815 		if (data->named_data)
816 			continue;
817 		if (strcmp(data->name, name) == 0)
818 			return data;
819 	}
820 
821 	return NULL;
822 }
823 
824 /**
825  * is_named_trigger - determine if a given trigger is a named trigger
826  * @test: The trigger data to test
827  *
828  * Return: true if 'test' is a named trigger, false otherwise.
829  */
is_named_trigger(struct event_trigger_data * test)830 bool is_named_trigger(struct event_trigger_data *test)
831 {
832 	struct event_trigger_data *data;
833 
834 	list_for_each_entry(data, &named_triggers, named_list) {
835 		if (test == data)
836 			return true;
837 	}
838 
839 	return false;
840 }
841 
842 /**
843  * save_named_trigger - save the trigger in the named trigger list
844  * @name: The name of the named trigger set
845  * @data: The trigger data to save
846  *
847  * Return: 0 if successful, negative error otherwise.
848  */
save_named_trigger(const char * name,struct event_trigger_data * data)849 int save_named_trigger(const char *name, struct event_trigger_data *data)
850 {
851 	data->name = kstrdup(name, GFP_KERNEL);
852 	if (!data->name)
853 		return -ENOMEM;
854 
855 	list_add(&data->named_list, &named_triggers);
856 
857 	return 0;
858 }
859 
860 /**
861  * del_named_trigger - delete a trigger from the named trigger list
862  * @data: The trigger data to delete
863  */
del_named_trigger(struct event_trigger_data * data)864 void del_named_trigger(struct event_trigger_data *data)
865 {
866 	kfree(data->name);
867 	data->name = NULL;
868 
869 	list_del(&data->named_list);
870 }
871 
__pause_named_trigger(struct event_trigger_data * data,bool pause)872 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
873 {
874 	struct event_trigger_data *test;
875 
876 	list_for_each_entry(test, &named_triggers, named_list) {
877 		if (strcmp(test->name, data->name) == 0) {
878 			if (pause) {
879 				test->paused_tmp = test->paused;
880 				test->paused = true;
881 			} else {
882 				test->paused = test->paused_tmp;
883 			}
884 		}
885 	}
886 }
887 
888 /**
889  * pause_named_trigger - Pause all named triggers with the same name
890  * @data: The trigger data of a named trigger to pause
891  *
892  * Pauses a named trigger along with all other triggers having the
893  * same name.  Because named triggers share a common set of data,
894  * pausing only one is meaningless, so pausing one named trigger needs
895  * to pause all triggers with the same name.
896  */
pause_named_trigger(struct event_trigger_data * data)897 void pause_named_trigger(struct event_trigger_data *data)
898 {
899 	__pause_named_trigger(data, true);
900 }
901 
902 /**
903  * unpause_named_trigger - Un-pause all named triggers with the same name
904  * @data: The trigger data of a named trigger to unpause
905  *
906  * Un-pauses a named trigger along with all other triggers having the
907  * same name.  Because named triggers share a common set of data,
908  * unpausing only one is meaningless, so unpausing one named trigger
909  * needs to unpause all triggers with the same name.
910  */
unpause_named_trigger(struct event_trigger_data * data)911 void unpause_named_trigger(struct event_trigger_data *data)
912 {
913 	__pause_named_trigger(data, false);
914 }
915 
916 /**
917  * set_named_trigger_data - Associate common named trigger data
918  * @data: The trigger data of a named trigger to unpause
919  *
920  * Named triggers are sets of triggers that share a common set of
921  * trigger data.  The first named trigger registered with a given name
922  * owns the common trigger data that the others subsequently
923  * registered with the same name will reference.  This function
924  * associates the common trigger data from the first trigger with the
925  * given trigger.
926  */
set_named_trigger_data(struct event_trigger_data * data,struct event_trigger_data * named_data)927 void set_named_trigger_data(struct event_trigger_data *data,
928 			    struct event_trigger_data *named_data)
929 {
930 	data->named_data = named_data;
931 }
932 
933 struct event_trigger_data *
get_named_trigger_data(struct event_trigger_data * data)934 get_named_trigger_data(struct event_trigger_data *data)
935 {
936 	return data->named_data;
937 }
938 
939 static void
traceon_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)940 traceon_trigger(struct event_trigger_data *data, void *rec,
941 		struct ring_buffer_event *event)
942 {
943 	if (tracing_is_on())
944 		return;
945 
946 	tracing_on();
947 }
948 
949 static void
traceon_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)950 traceon_count_trigger(struct event_trigger_data *data, void *rec,
951 		      struct ring_buffer_event *event)
952 {
953 	if (tracing_is_on())
954 		return;
955 
956 	if (!data->count)
957 		return;
958 
959 	if (data->count != -1)
960 		(data->count)--;
961 
962 	tracing_on();
963 }
964 
965 static void
traceoff_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)966 traceoff_trigger(struct event_trigger_data *data, void *rec,
967 		 struct ring_buffer_event *event)
968 {
969 	if (!tracing_is_on())
970 		return;
971 
972 	tracing_off();
973 }
974 
975 static void
traceoff_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)976 traceoff_count_trigger(struct event_trigger_data *data, void *rec,
977 		       struct ring_buffer_event *event)
978 {
979 	if (!tracing_is_on())
980 		return;
981 
982 	if (!data->count)
983 		return;
984 
985 	if (data->count != -1)
986 		(data->count)--;
987 
988 	tracing_off();
989 }
990 
991 static int
traceon_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)992 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
993 		      struct event_trigger_data *data)
994 {
995 	return event_trigger_print("traceon", m, (void *)data->count,
996 				   data->filter_str);
997 }
998 
999 static int
traceoff_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1000 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1001 		       struct event_trigger_data *data)
1002 {
1003 	return event_trigger_print("traceoff", m, (void *)data->count,
1004 				   data->filter_str);
1005 }
1006 
1007 static struct event_trigger_ops traceon_trigger_ops = {
1008 	.func			= traceon_trigger,
1009 	.print			= traceon_trigger_print,
1010 	.init			= event_trigger_init,
1011 	.free			= event_trigger_free,
1012 };
1013 
1014 static struct event_trigger_ops traceon_count_trigger_ops = {
1015 	.func			= traceon_count_trigger,
1016 	.print			= traceon_trigger_print,
1017 	.init			= event_trigger_init,
1018 	.free			= event_trigger_free,
1019 };
1020 
1021 static struct event_trigger_ops traceoff_trigger_ops = {
1022 	.func			= traceoff_trigger,
1023 	.print			= traceoff_trigger_print,
1024 	.init			= event_trigger_init,
1025 	.free			= event_trigger_free,
1026 };
1027 
1028 static struct event_trigger_ops traceoff_count_trigger_ops = {
1029 	.func			= traceoff_count_trigger,
1030 	.print			= traceoff_trigger_print,
1031 	.init			= event_trigger_init,
1032 	.free			= event_trigger_free,
1033 };
1034 
1035 static struct event_trigger_ops *
onoff_get_trigger_ops(char * cmd,char * param)1036 onoff_get_trigger_ops(char *cmd, char *param)
1037 {
1038 	struct event_trigger_ops *ops;
1039 
1040 	/* we register both traceon and traceoff to this callback */
1041 	if (strcmp(cmd, "traceon") == 0)
1042 		ops = param ? &traceon_count_trigger_ops :
1043 			&traceon_trigger_ops;
1044 	else
1045 		ops = param ? &traceoff_count_trigger_ops :
1046 			&traceoff_trigger_ops;
1047 
1048 	return ops;
1049 }
1050 
1051 static struct event_command trigger_traceon_cmd = {
1052 	.name			= "traceon",
1053 	.trigger_type		= ETT_TRACE_ONOFF,
1054 	.func			= event_trigger_callback,
1055 	.reg			= register_trigger,
1056 	.unreg			= unregister_trigger,
1057 	.get_trigger_ops	= onoff_get_trigger_ops,
1058 	.set_filter		= set_trigger_filter,
1059 };
1060 
1061 static struct event_command trigger_traceoff_cmd = {
1062 	.name			= "traceoff",
1063 	.trigger_type		= ETT_TRACE_ONOFF,
1064 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1065 	.func			= event_trigger_callback,
1066 	.reg			= register_trigger,
1067 	.unreg			= unregister_trigger,
1068 	.get_trigger_ops	= onoff_get_trigger_ops,
1069 	.set_filter		= set_trigger_filter,
1070 };
1071 
1072 #ifdef CONFIG_TRACER_SNAPSHOT
1073 static void
snapshot_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1074 snapshot_trigger(struct event_trigger_data *data, void *rec,
1075 		 struct ring_buffer_event *event)
1076 {
1077 	struct trace_event_file *file = data->private_data;
1078 
1079 	if (file)
1080 		tracing_snapshot_instance(file->tr);
1081 	else
1082 		tracing_snapshot();
1083 }
1084 
1085 static void
snapshot_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1086 snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1087 		       struct ring_buffer_event *event)
1088 {
1089 	if (!data->count)
1090 		return;
1091 
1092 	if (data->count != -1)
1093 		(data->count)--;
1094 
1095 	snapshot_trigger(data, rec, event);
1096 }
1097 
1098 static int
register_snapshot_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1099 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1100 			  struct event_trigger_data *data,
1101 			  struct trace_event_file *file)
1102 {
1103 	if (tracing_alloc_snapshot_instance(file->tr) != 0)
1104 		return 0;
1105 
1106 	return register_trigger(glob, ops, data, file);
1107 }
1108 
1109 static int
snapshot_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1110 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1111 		       struct event_trigger_data *data)
1112 {
1113 	return event_trigger_print("snapshot", m, (void *)data->count,
1114 				   data->filter_str);
1115 }
1116 
1117 static struct event_trigger_ops snapshot_trigger_ops = {
1118 	.func			= snapshot_trigger,
1119 	.print			= snapshot_trigger_print,
1120 	.init			= event_trigger_init,
1121 	.free			= event_trigger_free,
1122 };
1123 
1124 static struct event_trigger_ops snapshot_count_trigger_ops = {
1125 	.func			= snapshot_count_trigger,
1126 	.print			= snapshot_trigger_print,
1127 	.init			= event_trigger_init,
1128 	.free			= event_trigger_free,
1129 };
1130 
1131 static struct event_trigger_ops *
snapshot_get_trigger_ops(char * cmd,char * param)1132 snapshot_get_trigger_ops(char *cmd, char *param)
1133 {
1134 	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1135 }
1136 
1137 static struct event_command trigger_snapshot_cmd = {
1138 	.name			= "snapshot",
1139 	.trigger_type		= ETT_SNAPSHOT,
1140 	.func			= event_trigger_callback,
1141 	.reg			= register_snapshot_trigger,
1142 	.unreg			= unregister_trigger,
1143 	.get_trigger_ops	= snapshot_get_trigger_ops,
1144 	.set_filter		= set_trigger_filter,
1145 };
1146 
register_trigger_snapshot_cmd(void)1147 static __init int register_trigger_snapshot_cmd(void)
1148 {
1149 	int ret;
1150 
1151 	ret = register_event_command(&trigger_snapshot_cmd);
1152 	WARN_ON(ret < 0);
1153 
1154 	return ret;
1155 }
1156 #else
register_trigger_snapshot_cmd(void)1157 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1158 #endif /* CONFIG_TRACER_SNAPSHOT */
1159 
1160 #ifdef CONFIG_STACKTRACE
1161 #ifdef CONFIG_UNWINDER_ORC
1162 /* Skip 2:
1163  *   event_triggers_post_call()
1164  *   trace_event_raw_event_xxx()
1165  */
1166 # define STACK_SKIP 2
1167 #else
1168 /*
1169  * Skip 4:
1170  *   stacktrace_trigger()
1171  *   event_triggers_post_call()
1172  *   trace_event_buffer_commit()
1173  *   trace_event_raw_event_xxx()
1174  */
1175 #define STACK_SKIP 4
1176 #endif
1177 
1178 static void
stacktrace_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1179 stacktrace_trigger(struct event_trigger_data *data, void *rec,
1180 		   struct ring_buffer_event *event)
1181 {
1182 	trace_dump_stack(STACK_SKIP);
1183 }
1184 
1185 static void
stacktrace_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1186 stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1187 			 struct ring_buffer_event *event)
1188 {
1189 	if (!data->count)
1190 		return;
1191 
1192 	if (data->count != -1)
1193 		(data->count)--;
1194 
1195 	stacktrace_trigger(data, rec, event);
1196 }
1197 
1198 static int
stacktrace_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1199 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1200 			 struct event_trigger_data *data)
1201 {
1202 	return event_trigger_print("stacktrace", m, (void *)data->count,
1203 				   data->filter_str);
1204 }
1205 
1206 static struct event_trigger_ops stacktrace_trigger_ops = {
1207 	.func			= stacktrace_trigger,
1208 	.print			= stacktrace_trigger_print,
1209 	.init			= event_trigger_init,
1210 	.free			= event_trigger_free,
1211 };
1212 
1213 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1214 	.func			= stacktrace_count_trigger,
1215 	.print			= stacktrace_trigger_print,
1216 	.init			= event_trigger_init,
1217 	.free			= event_trigger_free,
1218 };
1219 
1220 static struct event_trigger_ops *
stacktrace_get_trigger_ops(char * cmd,char * param)1221 stacktrace_get_trigger_ops(char *cmd, char *param)
1222 {
1223 	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1224 }
1225 
1226 static struct event_command trigger_stacktrace_cmd = {
1227 	.name			= "stacktrace",
1228 	.trigger_type		= ETT_STACKTRACE,
1229 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1230 	.func			= event_trigger_callback,
1231 	.reg			= register_trigger,
1232 	.unreg			= unregister_trigger,
1233 	.get_trigger_ops	= stacktrace_get_trigger_ops,
1234 	.set_filter		= set_trigger_filter,
1235 };
1236 
register_trigger_stacktrace_cmd(void)1237 static __init int register_trigger_stacktrace_cmd(void)
1238 {
1239 	int ret;
1240 
1241 	ret = register_event_command(&trigger_stacktrace_cmd);
1242 	WARN_ON(ret < 0);
1243 
1244 	return ret;
1245 }
1246 #else
register_trigger_stacktrace_cmd(void)1247 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1248 #endif /* CONFIG_STACKTRACE */
1249 
unregister_trigger_traceon_traceoff_cmds(void)1250 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1251 {
1252 	unregister_event_command(&trigger_traceon_cmd);
1253 	unregister_event_command(&trigger_traceoff_cmd);
1254 }
1255 
1256 static void
event_enable_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1257 event_enable_trigger(struct event_trigger_data *data, void *rec,
1258 		     struct ring_buffer_event *event)
1259 {
1260 	struct enable_trigger_data *enable_data = data->private_data;
1261 
1262 	if (enable_data->enable)
1263 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1264 	else
1265 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1266 }
1267 
1268 static void
event_enable_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1269 event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1270 			   struct ring_buffer_event *event)
1271 {
1272 	struct enable_trigger_data *enable_data = data->private_data;
1273 
1274 	if (!data->count)
1275 		return;
1276 
1277 	/* Skip if the event is in a state we want to switch to */
1278 	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1279 		return;
1280 
1281 	if (data->count != -1)
1282 		(data->count)--;
1283 
1284 	event_enable_trigger(data, rec, event);
1285 }
1286 
event_enable_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1287 int event_enable_trigger_print(struct seq_file *m,
1288 			       struct event_trigger_ops *ops,
1289 			       struct event_trigger_data *data)
1290 {
1291 	struct enable_trigger_data *enable_data = data->private_data;
1292 
1293 	seq_printf(m, "%s:%s:%s",
1294 		   enable_data->hist ?
1295 		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1296 		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1297 		   enable_data->file->event_call->class->system,
1298 		   trace_event_name(enable_data->file->event_call));
1299 
1300 	if (data->count == -1)
1301 		seq_puts(m, ":unlimited");
1302 	else
1303 		seq_printf(m, ":count=%ld", data->count);
1304 
1305 	if (data->filter_str)
1306 		seq_printf(m, " if %s\n", data->filter_str);
1307 	else
1308 		seq_putc(m, '\n');
1309 
1310 	return 0;
1311 }
1312 
event_enable_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)1313 void event_enable_trigger_free(struct event_trigger_ops *ops,
1314 			       struct event_trigger_data *data)
1315 {
1316 	struct enable_trigger_data *enable_data = data->private_data;
1317 
1318 	if (WARN_ON_ONCE(data->ref <= 0))
1319 		return;
1320 
1321 	data->ref--;
1322 	if (!data->ref) {
1323 		/* Remove the SOFT_MODE flag */
1324 		trace_event_enable_disable(enable_data->file, 0, 1);
1325 		module_put(enable_data->file->event_call->mod);
1326 		trigger_data_free(data);
1327 		kfree(enable_data);
1328 	}
1329 }
1330 
1331 static struct event_trigger_ops event_enable_trigger_ops = {
1332 	.func			= event_enable_trigger,
1333 	.print			= event_enable_trigger_print,
1334 	.init			= event_trigger_init,
1335 	.free			= event_enable_trigger_free,
1336 };
1337 
1338 static struct event_trigger_ops event_enable_count_trigger_ops = {
1339 	.func			= event_enable_count_trigger,
1340 	.print			= event_enable_trigger_print,
1341 	.init			= event_trigger_init,
1342 	.free			= event_enable_trigger_free,
1343 };
1344 
1345 static struct event_trigger_ops event_disable_trigger_ops = {
1346 	.func			= event_enable_trigger,
1347 	.print			= event_enable_trigger_print,
1348 	.init			= event_trigger_init,
1349 	.free			= event_enable_trigger_free,
1350 };
1351 
1352 static struct event_trigger_ops event_disable_count_trigger_ops = {
1353 	.func			= event_enable_count_trigger,
1354 	.print			= event_enable_trigger_print,
1355 	.init			= event_trigger_init,
1356 	.free			= event_enable_trigger_free,
1357 };
1358 
event_enable_trigger_func(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)1359 int event_enable_trigger_func(struct event_command *cmd_ops,
1360 			      struct trace_event_file *file,
1361 			      char *glob, char *cmd, char *param)
1362 {
1363 	struct trace_event_file *event_enable_file;
1364 	struct enable_trigger_data *enable_data;
1365 	struct event_trigger_data *trigger_data;
1366 	struct event_trigger_ops *trigger_ops;
1367 	struct trace_array *tr = file->tr;
1368 	const char *system;
1369 	const char *event;
1370 	bool hist = false;
1371 	char *trigger;
1372 	char *number;
1373 	bool enable;
1374 	int ret;
1375 
1376 	if (!param)
1377 		return -EINVAL;
1378 
1379 	/* separate the trigger from the filter (s:e:n [if filter]) */
1380 	trigger = strsep(&param, " \t");
1381 	if (!trigger)
1382 		return -EINVAL;
1383 	if (param) {
1384 		param = skip_spaces(param);
1385 		if (!*param)
1386 			param = NULL;
1387 	}
1388 
1389 	system = strsep(&trigger, ":");
1390 	if (!trigger)
1391 		return -EINVAL;
1392 
1393 	event = strsep(&trigger, ":");
1394 
1395 	ret = -EINVAL;
1396 	event_enable_file = find_event_file(tr, system, event);
1397 	if (!event_enable_file)
1398 		goto out;
1399 
1400 #ifdef CONFIG_HIST_TRIGGERS
1401 	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1402 		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1403 
1404 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1405 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1406 #else
1407 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1408 #endif
1409 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1410 
1411 	ret = -ENOMEM;
1412 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1413 	if (!trigger_data)
1414 		goto out;
1415 
1416 	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1417 	if (!enable_data) {
1418 		kfree(trigger_data);
1419 		goto out;
1420 	}
1421 
1422 	trigger_data->count = -1;
1423 	trigger_data->ops = trigger_ops;
1424 	trigger_data->cmd_ops = cmd_ops;
1425 	INIT_LIST_HEAD(&trigger_data->list);
1426 	RCU_INIT_POINTER(trigger_data->filter, NULL);
1427 
1428 	enable_data->hist = hist;
1429 	enable_data->enable = enable;
1430 	enable_data->file = event_enable_file;
1431 	trigger_data->private_data = enable_data;
1432 
1433 	if (glob[0] == '!') {
1434 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1435 		kfree(trigger_data);
1436 		kfree(enable_data);
1437 		ret = 0;
1438 		goto out;
1439 	}
1440 
1441 	/* Up the trigger_data count to make sure nothing frees it on failure */
1442 	event_trigger_init(trigger_ops, trigger_data);
1443 
1444 	if (trigger) {
1445 		number = strsep(&trigger, ":");
1446 
1447 		ret = -EINVAL;
1448 		if (!strlen(number))
1449 			goto out_free;
1450 
1451 		/*
1452 		 * We use the callback data field (which is a pointer)
1453 		 * as our counter.
1454 		 */
1455 		ret = kstrtoul(number, 0, &trigger_data->count);
1456 		if (ret)
1457 			goto out_free;
1458 	}
1459 
1460 	if (!param) /* if param is non-empty, it's supposed to be a filter */
1461 		goto out_reg;
1462 
1463 	if (!cmd_ops->set_filter)
1464 		goto out_reg;
1465 
1466 	ret = cmd_ops->set_filter(param, trigger_data, file);
1467 	if (ret < 0)
1468 		goto out_free;
1469 
1470  out_reg:
1471 	/* Don't let event modules unload while probe registered */
1472 	ret = try_module_get(event_enable_file->event_call->mod);
1473 	if (!ret) {
1474 		ret = -EBUSY;
1475 		goto out_free;
1476 	}
1477 
1478 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1479 	if (ret < 0)
1480 		goto out_put;
1481 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1482 	/*
1483 	 * The above returns on success the # of functions enabled,
1484 	 * but if it didn't find any functions it returns zero.
1485 	 * Consider no functions a failure too.
1486 	 */
1487 	if (!ret) {
1488 		ret = -ENOENT;
1489 		goto out_disable;
1490 	} else if (ret < 0)
1491 		goto out_disable;
1492 	/* Just return zero, not the number of enabled functions */
1493 	ret = 0;
1494 	event_trigger_free(trigger_ops, trigger_data);
1495  out:
1496 	return ret;
1497 
1498  out_disable:
1499 	trace_event_enable_disable(event_enable_file, 0, 1);
1500  out_put:
1501 	module_put(event_enable_file->event_call->mod);
1502  out_free:
1503 	if (cmd_ops->set_filter)
1504 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1505 	event_trigger_free(trigger_ops, trigger_data);
1506 	kfree(enable_data);
1507 	goto out;
1508 }
1509 
event_enable_register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1510 int event_enable_register_trigger(char *glob,
1511 				  struct event_trigger_ops *ops,
1512 				  struct event_trigger_data *data,
1513 				  struct trace_event_file *file)
1514 {
1515 	struct enable_trigger_data *enable_data = data->private_data;
1516 	struct enable_trigger_data *test_enable_data;
1517 	struct event_trigger_data *test;
1518 	int ret = 0;
1519 
1520 	lockdep_assert_held(&event_mutex);
1521 
1522 	list_for_each_entry(test, &file->triggers, list) {
1523 		test_enable_data = test->private_data;
1524 		if (test_enable_data &&
1525 		    (test->cmd_ops->trigger_type ==
1526 		     data->cmd_ops->trigger_type) &&
1527 		    (test_enable_data->file == enable_data->file)) {
1528 			ret = -EEXIST;
1529 			goto out;
1530 		}
1531 	}
1532 
1533 	if (data->ops->init) {
1534 		ret = data->ops->init(data->ops, data);
1535 		if (ret < 0)
1536 			goto out;
1537 	}
1538 
1539 	list_add_rcu(&data->list, &file->triggers);
1540 	ret++;
1541 
1542 	update_cond_flag(file);
1543 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1544 		list_del_rcu(&data->list);
1545 		update_cond_flag(file);
1546 		ret--;
1547 	}
1548 out:
1549 	return ret;
1550 }
1551 
event_enable_unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * test,struct trace_event_file * file)1552 void event_enable_unregister_trigger(char *glob,
1553 				     struct event_trigger_ops *ops,
1554 				     struct event_trigger_data *test,
1555 				     struct trace_event_file *file)
1556 {
1557 	struct enable_trigger_data *test_enable_data = test->private_data;
1558 	struct enable_trigger_data *enable_data;
1559 	struct event_trigger_data *data;
1560 	bool unregistered = false;
1561 
1562 	lockdep_assert_held(&event_mutex);
1563 
1564 	list_for_each_entry(data, &file->triggers, list) {
1565 		enable_data = data->private_data;
1566 		if (enable_data &&
1567 		    (data->cmd_ops->trigger_type ==
1568 		     test->cmd_ops->trigger_type) &&
1569 		    (enable_data->file == test_enable_data->file)) {
1570 			unregistered = true;
1571 			list_del_rcu(&data->list);
1572 			trace_event_trigger_enable_disable(file, 0);
1573 			update_cond_flag(file);
1574 			break;
1575 		}
1576 	}
1577 
1578 	if (unregistered && data->ops->free)
1579 		data->ops->free(data->ops, data);
1580 }
1581 
1582 static struct event_trigger_ops *
event_enable_get_trigger_ops(char * cmd,char * param)1583 event_enable_get_trigger_ops(char *cmd, char *param)
1584 {
1585 	struct event_trigger_ops *ops;
1586 	bool enable;
1587 
1588 #ifdef CONFIG_HIST_TRIGGERS
1589 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1590 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1591 #else
1592 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1593 #endif
1594 	if (enable)
1595 		ops = param ? &event_enable_count_trigger_ops :
1596 			&event_enable_trigger_ops;
1597 	else
1598 		ops = param ? &event_disable_count_trigger_ops :
1599 			&event_disable_trigger_ops;
1600 
1601 	return ops;
1602 }
1603 
1604 static struct event_command trigger_enable_cmd = {
1605 	.name			= ENABLE_EVENT_STR,
1606 	.trigger_type		= ETT_EVENT_ENABLE,
1607 	.func			= event_enable_trigger_func,
1608 	.reg			= event_enable_register_trigger,
1609 	.unreg			= event_enable_unregister_trigger,
1610 	.get_trigger_ops	= event_enable_get_trigger_ops,
1611 	.set_filter		= set_trigger_filter,
1612 };
1613 
1614 static struct event_command trigger_disable_cmd = {
1615 	.name			= DISABLE_EVENT_STR,
1616 	.trigger_type		= ETT_EVENT_ENABLE,
1617 	.func			= event_enable_trigger_func,
1618 	.reg			= event_enable_register_trigger,
1619 	.unreg			= event_enable_unregister_trigger,
1620 	.get_trigger_ops	= event_enable_get_trigger_ops,
1621 	.set_filter		= set_trigger_filter,
1622 };
1623 
unregister_trigger_enable_disable_cmds(void)1624 static __init void unregister_trigger_enable_disable_cmds(void)
1625 {
1626 	unregister_event_command(&trigger_enable_cmd);
1627 	unregister_event_command(&trigger_disable_cmd);
1628 }
1629 
register_trigger_enable_disable_cmds(void)1630 static __init int register_trigger_enable_disable_cmds(void)
1631 {
1632 	int ret;
1633 
1634 	ret = register_event_command(&trigger_enable_cmd);
1635 	if (WARN_ON(ret < 0))
1636 		return ret;
1637 	ret = register_event_command(&trigger_disable_cmd);
1638 	if (WARN_ON(ret < 0))
1639 		unregister_trigger_enable_disable_cmds();
1640 
1641 	return ret;
1642 }
1643 
register_trigger_traceon_traceoff_cmds(void)1644 static __init int register_trigger_traceon_traceoff_cmds(void)
1645 {
1646 	int ret;
1647 
1648 	ret = register_event_command(&trigger_traceon_cmd);
1649 	if (WARN_ON(ret < 0))
1650 		return ret;
1651 	ret = register_event_command(&trigger_traceoff_cmd);
1652 	if (WARN_ON(ret < 0))
1653 		unregister_trigger_traceon_traceoff_cmds();
1654 
1655 	return ret;
1656 }
1657 
register_trigger_cmds(void)1658 __init int register_trigger_cmds(void)
1659 {
1660 	register_trigger_traceon_traceoff_cmds();
1661 	register_trigger_snapshot_cmd();
1662 	register_trigger_stacktrace_cmd();
1663 	register_trigger_enable_disable_cmds();
1664 	register_trigger_hist_enable_disable_cmds();
1665 	register_trigger_hist_cmd();
1666 
1667 	return 0;
1668 }
1669