1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
44 #include <linux/fs.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
51 
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
53 
54 #include "trace.h"
55 #include "trace_output.h"
56 
57 /*
58  * On boot up, the ring buffer is set to the minimum size, so that
59  * we do not waste memory on systems that are not using tracing.
60  */
61 bool ring_buffer_expanded;
62 
63 #ifdef CONFIG_FTRACE_STARTUP_TEST
64 /*
65  * We need to change this state when a selftest is running.
66  * A selftest will lurk into the ring-buffer to count the
67  * entries inserted during the selftest although some concurrent
68  * insertions into the ring-buffer such as trace_printk could occurred
69  * at the same time, giving false positive or negative results.
70  */
71 static bool __read_mostly tracing_selftest_running;
72 
73 /*
74  * If boot-time tracing including tracers/events via kernel cmdline
75  * is running, we do not want to run SELFTEST.
76  */
77 bool __read_mostly tracing_selftest_disabled;
78 
disable_tracing_selftest(const char * reason)79 void __init disable_tracing_selftest(const char *reason)
80 {
81 	if (!tracing_selftest_disabled) {
82 		tracing_selftest_disabled = true;
83 		pr_info("Ftrace startup test is disabled due to %s\n", reason);
84 	}
85 }
86 #else
87 #define tracing_selftest_running	0
88 #define tracing_selftest_disabled	0
89 #endif
90 
91 /* Pipe tracepoints to printk */
92 static struct trace_iterator *tracepoint_print_iter;
93 int tracepoint_printk;
94 static bool tracepoint_printk_stop_on_boot __initdata;
95 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
96 
97 /* For tracers that don't implement custom flags */
98 static struct tracer_opt dummy_tracer_opt[] = {
99 	{ }
100 };
101 
102 static int
dummy_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
104 {
105 	return 0;
106 }
107 
108 /*
109  * To prevent the comm cache from being overwritten when no
110  * tracing is active, only save the comm when a trace event
111  * occurred.
112  */
113 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
114 
115 /*
116  * Kill all tracing for good (never come back).
117  * It is initialized to 1 but will turn to zero if the initialization
118  * of the tracer is successful. But that is the only place that sets
119  * this back to zero.
120  */
121 static int tracing_disabled = 1;
122 
123 cpumask_var_t __read_mostly	tracing_buffer_mask;
124 
125 /*
126  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
127  *
128  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
129  * is set, then ftrace_dump is called. This will output the contents
130  * of the ftrace buffers to the console.  This is very useful for
131  * capturing traces that lead to crashes and outputing it to a
132  * serial console.
133  *
134  * It is default off, but you can enable it with either specifying
135  * "ftrace_dump_on_oops" in the kernel command line, or setting
136  * /proc/sys/kernel/ftrace_dump_on_oops
137  * Set 1 if you want to dump buffers of all CPUs
138  * Set 2 if you want to dump the buffer of the CPU that triggered oops
139  */
140 
141 enum ftrace_dump_mode ftrace_dump_on_oops;
142 
143 /* When set, tracing will stop when a WARN*() is hit */
144 int __disable_trace_on_warning;
145 
146 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
147 /* Map of enums to their values, for "eval_map" file */
148 struct trace_eval_map_head {
149 	struct module			*mod;
150 	unsigned long			length;
151 };
152 
153 union trace_eval_map_item;
154 
155 struct trace_eval_map_tail {
156 	/*
157 	 * "end" is first and points to NULL as it must be different
158 	 * than "mod" or "eval_string"
159 	 */
160 	union trace_eval_map_item	*next;
161 	const char			*end;	/* points to NULL */
162 };
163 
164 static DEFINE_MUTEX(trace_eval_mutex);
165 
166 /*
167  * The trace_eval_maps are saved in an array with two extra elements,
168  * one at the beginning, and one at the end. The beginning item contains
169  * the count of the saved maps (head.length), and the module they
170  * belong to if not built in (head.mod). The ending item contains a
171  * pointer to the next array of saved eval_map items.
172  */
173 union trace_eval_map_item {
174 	struct trace_eval_map		map;
175 	struct trace_eval_map_head	head;
176 	struct trace_eval_map_tail	tail;
177 };
178 
179 static union trace_eval_map_item *trace_eval_maps;
180 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
181 
182 int tracing_set_tracer(struct trace_array *tr, const char *buf);
183 static void ftrace_trace_userstack(struct trace_array *tr,
184 				   struct trace_buffer *buffer,
185 				   unsigned int trace_ctx);
186 
187 #define MAX_TRACER_SIZE		100
188 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
189 static char *default_bootup_tracer;
190 
191 static bool allocate_snapshot;
192 static bool snapshot_at_boot;
193 
194 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_instance_index;
196 
197 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
198 static int boot_snapshot_index;
199 
set_cmdline_ftrace(char * str)200 static int __init set_cmdline_ftrace(char *str)
201 {
202 	strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
203 	default_bootup_tracer = bootup_tracer_buf;
204 	/* We are using ftrace early, expand it */
205 	ring_buffer_expanded = true;
206 	return 1;
207 }
208 __setup("ftrace=", set_cmdline_ftrace);
209 
set_ftrace_dump_on_oops(char * str)210 static int __init set_ftrace_dump_on_oops(char *str)
211 {
212 	if (*str++ != '=' || !*str || !strcmp("1", str)) {
213 		ftrace_dump_on_oops = DUMP_ALL;
214 		return 1;
215 	}
216 
217 	if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
218 		ftrace_dump_on_oops = DUMP_ORIG;
219                 return 1;
220         }
221 
222         return 0;
223 }
224 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
225 
stop_trace_on_warning(char * str)226 static int __init stop_trace_on_warning(char *str)
227 {
228 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
229 		__disable_trace_on_warning = 1;
230 	return 1;
231 }
232 __setup("traceoff_on_warning", stop_trace_on_warning);
233 
boot_alloc_snapshot(char * str)234 static int __init boot_alloc_snapshot(char *str)
235 {
236 	char *slot = boot_snapshot_info + boot_snapshot_index;
237 	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
238 	int ret;
239 
240 	if (str[0] == '=') {
241 		str++;
242 		if (strlen(str) >= left)
243 			return -1;
244 
245 		ret = snprintf(slot, left, "%s\t", str);
246 		boot_snapshot_index += ret;
247 	} else {
248 		allocate_snapshot = true;
249 		/* We also need the main ring buffer expanded */
250 		ring_buffer_expanded = true;
251 	}
252 	return 1;
253 }
254 __setup("alloc_snapshot", boot_alloc_snapshot);
255 
256 
boot_snapshot(char * str)257 static int __init boot_snapshot(char *str)
258 {
259 	snapshot_at_boot = true;
260 	boot_alloc_snapshot(str);
261 	return 1;
262 }
263 __setup("ftrace_boot_snapshot", boot_snapshot);
264 
265 
boot_instance(char * str)266 static int __init boot_instance(char *str)
267 {
268 	char *slot = boot_instance_info + boot_instance_index;
269 	int left = sizeof(boot_instance_info) - boot_instance_index;
270 	int ret;
271 
272 	if (strlen(str) >= left)
273 		return -1;
274 
275 	ret = snprintf(slot, left, "%s\t", str);
276 	boot_instance_index += ret;
277 
278 	return 1;
279 }
280 __setup("trace_instance=", boot_instance);
281 
282 
283 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
284 
set_trace_boot_options(char * str)285 static int __init set_trace_boot_options(char *str)
286 {
287 	strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
288 	return 1;
289 }
290 __setup("trace_options=", set_trace_boot_options);
291 
292 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
293 static char *trace_boot_clock __initdata;
294 
set_trace_boot_clock(char * str)295 static int __init set_trace_boot_clock(char *str)
296 {
297 	strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
298 	trace_boot_clock = trace_boot_clock_buf;
299 	return 1;
300 }
301 __setup("trace_clock=", set_trace_boot_clock);
302 
set_tracepoint_printk(char * str)303 static int __init set_tracepoint_printk(char *str)
304 {
305 	/* Ignore the "tp_printk_stop_on_boot" param */
306 	if (*str == '_')
307 		return 0;
308 
309 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
310 		tracepoint_printk = 1;
311 	return 1;
312 }
313 __setup("tp_printk", set_tracepoint_printk);
314 
set_tracepoint_printk_stop(char * str)315 static int __init set_tracepoint_printk_stop(char *str)
316 {
317 	tracepoint_printk_stop_on_boot = true;
318 	return 1;
319 }
320 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
321 
ns2usecs(u64 nsec)322 unsigned long long ns2usecs(u64 nsec)
323 {
324 	nsec += 500;
325 	do_div(nsec, 1000);
326 	return nsec;
327 }
328 
329 static void
trace_process_export(struct trace_export * export,struct ring_buffer_event * event,int flag)330 trace_process_export(struct trace_export *export,
331 	       struct ring_buffer_event *event, int flag)
332 {
333 	struct trace_entry *entry;
334 	unsigned int size = 0;
335 
336 	if (export->flags & flag) {
337 		entry = ring_buffer_event_data(event);
338 		size = ring_buffer_event_length(event);
339 		export->write(export, entry, size);
340 	}
341 }
342 
343 static DEFINE_MUTEX(ftrace_export_lock);
344 
345 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
346 
347 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
348 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
349 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
350 
ftrace_exports_enable(struct trace_export * export)351 static inline void ftrace_exports_enable(struct trace_export *export)
352 {
353 	if (export->flags & TRACE_EXPORT_FUNCTION)
354 		static_branch_inc(&trace_function_exports_enabled);
355 
356 	if (export->flags & TRACE_EXPORT_EVENT)
357 		static_branch_inc(&trace_event_exports_enabled);
358 
359 	if (export->flags & TRACE_EXPORT_MARKER)
360 		static_branch_inc(&trace_marker_exports_enabled);
361 }
362 
ftrace_exports_disable(struct trace_export * export)363 static inline void ftrace_exports_disable(struct trace_export *export)
364 {
365 	if (export->flags & TRACE_EXPORT_FUNCTION)
366 		static_branch_dec(&trace_function_exports_enabled);
367 
368 	if (export->flags & TRACE_EXPORT_EVENT)
369 		static_branch_dec(&trace_event_exports_enabled);
370 
371 	if (export->flags & TRACE_EXPORT_MARKER)
372 		static_branch_dec(&trace_marker_exports_enabled);
373 }
374 
ftrace_exports(struct ring_buffer_event * event,int flag)375 static void ftrace_exports(struct ring_buffer_event *event, int flag)
376 {
377 	struct trace_export *export;
378 
379 	preempt_disable_notrace();
380 
381 	export = rcu_dereference_raw_check(ftrace_exports_list);
382 	while (export) {
383 		trace_process_export(export, event, flag);
384 		export = rcu_dereference_raw_check(export->next);
385 	}
386 
387 	preempt_enable_notrace();
388 }
389 
390 static inline void
add_trace_export(struct trace_export ** list,struct trace_export * export)391 add_trace_export(struct trace_export **list, struct trace_export *export)
392 {
393 	rcu_assign_pointer(export->next, *list);
394 	/*
395 	 * We are entering export into the list but another
396 	 * CPU might be walking that list. We need to make sure
397 	 * the export->next pointer is valid before another CPU sees
398 	 * the export pointer included into the list.
399 	 */
400 	rcu_assign_pointer(*list, export);
401 }
402 
403 static inline int
rm_trace_export(struct trace_export ** list,struct trace_export * export)404 rm_trace_export(struct trace_export **list, struct trace_export *export)
405 {
406 	struct trace_export **p;
407 
408 	for (p = list; *p != NULL; p = &(*p)->next)
409 		if (*p == export)
410 			break;
411 
412 	if (*p != export)
413 		return -1;
414 
415 	rcu_assign_pointer(*p, (*p)->next);
416 
417 	return 0;
418 }
419 
420 static inline void
add_ftrace_export(struct trace_export ** list,struct trace_export * export)421 add_ftrace_export(struct trace_export **list, struct trace_export *export)
422 {
423 	ftrace_exports_enable(export);
424 
425 	add_trace_export(list, export);
426 }
427 
428 static inline int
rm_ftrace_export(struct trace_export ** list,struct trace_export * export)429 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
430 {
431 	int ret;
432 
433 	ret = rm_trace_export(list, export);
434 	ftrace_exports_disable(export);
435 
436 	return ret;
437 }
438 
register_ftrace_export(struct trace_export * export)439 int register_ftrace_export(struct trace_export *export)
440 {
441 	if (WARN_ON_ONCE(!export->write))
442 		return -1;
443 
444 	mutex_lock(&ftrace_export_lock);
445 
446 	add_ftrace_export(&ftrace_exports_list, export);
447 
448 	mutex_unlock(&ftrace_export_lock);
449 
450 	return 0;
451 }
452 EXPORT_SYMBOL_GPL(register_ftrace_export);
453 
unregister_ftrace_export(struct trace_export * export)454 int unregister_ftrace_export(struct trace_export *export)
455 {
456 	int ret;
457 
458 	mutex_lock(&ftrace_export_lock);
459 
460 	ret = rm_ftrace_export(&ftrace_exports_list, export);
461 
462 	mutex_unlock(&ftrace_export_lock);
463 
464 	return ret;
465 }
466 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
467 
468 /* trace_flags holds trace_options default values */
469 #define TRACE_DEFAULT_FLAGS						\
470 	(FUNCTION_DEFAULT_FLAGS |					\
471 	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
472 	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
473 	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
474 	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
475 	 TRACE_ITER_HASH_PTR)
476 
477 /* trace_options that are only supported by global_trace */
478 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
479 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
480 
481 /* trace_flags that are default zero for instances */
482 #define ZEROED_TRACE_FLAGS \
483 	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
484 
485 /*
486  * The global_trace is the descriptor that holds the top-level tracing
487  * buffers for the live tracing.
488  */
489 static struct trace_array global_trace = {
490 	.trace_flags = TRACE_DEFAULT_FLAGS,
491 };
492 
493 LIST_HEAD(ftrace_trace_arrays);
494 
trace_array_get(struct trace_array * this_tr)495 int trace_array_get(struct trace_array *this_tr)
496 {
497 	struct trace_array *tr;
498 	int ret = -ENODEV;
499 
500 	mutex_lock(&trace_types_lock);
501 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
502 		if (tr == this_tr) {
503 			tr->ref++;
504 			ret = 0;
505 			break;
506 		}
507 	}
508 	mutex_unlock(&trace_types_lock);
509 
510 	return ret;
511 }
512 
__trace_array_put(struct trace_array * this_tr)513 static void __trace_array_put(struct trace_array *this_tr)
514 {
515 	WARN_ON(!this_tr->ref);
516 	this_tr->ref--;
517 }
518 
519 /**
520  * trace_array_put - Decrement the reference counter for this trace array.
521  * @this_tr : pointer to the trace array
522  *
523  * NOTE: Use this when we no longer need the trace array returned by
524  * trace_array_get_by_name(). This ensures the trace array can be later
525  * destroyed.
526  *
527  */
trace_array_put(struct trace_array * this_tr)528 void trace_array_put(struct trace_array *this_tr)
529 {
530 	if (!this_tr)
531 		return;
532 
533 	mutex_lock(&trace_types_lock);
534 	__trace_array_put(this_tr);
535 	mutex_unlock(&trace_types_lock);
536 }
537 EXPORT_SYMBOL_GPL(trace_array_put);
538 
tracing_check_open_get_tr(struct trace_array * tr)539 int tracing_check_open_get_tr(struct trace_array *tr)
540 {
541 	int ret;
542 
543 	ret = security_locked_down(LOCKDOWN_TRACEFS);
544 	if (ret)
545 		return ret;
546 
547 	if (tracing_disabled)
548 		return -ENODEV;
549 
550 	if (tr && trace_array_get(tr) < 0)
551 		return -ENODEV;
552 
553 	return 0;
554 }
555 
call_filter_check_discard(struct trace_event_call * call,void * rec,struct trace_buffer * buffer,struct ring_buffer_event * event)556 int call_filter_check_discard(struct trace_event_call *call, void *rec,
557 			      struct trace_buffer *buffer,
558 			      struct ring_buffer_event *event)
559 {
560 	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
561 	    !filter_match_preds(call->filter, rec)) {
562 		__trace_event_discard_commit(buffer, event);
563 		return 1;
564 	}
565 
566 	return 0;
567 }
568 
569 /**
570  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
571  * @filtered_pids: The list of pids to check
572  * @search_pid: The PID to find in @filtered_pids
573  *
574  * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
575  */
576 bool
trace_find_filtered_pid(struct trace_pid_list * filtered_pids,pid_t search_pid)577 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
578 {
579 	return trace_pid_list_is_set(filtered_pids, search_pid);
580 }
581 
582 /**
583  * trace_ignore_this_task - should a task be ignored for tracing
584  * @filtered_pids: The list of pids to check
585  * @filtered_no_pids: The list of pids not to be traced
586  * @task: The task that should be ignored if not filtered
587  *
588  * Checks if @task should be traced or not from @filtered_pids.
589  * Returns true if @task should *NOT* be traced.
590  * Returns false if @task should be traced.
591  */
592 bool
trace_ignore_this_task(struct trace_pid_list * filtered_pids,struct trace_pid_list * filtered_no_pids,struct task_struct * task)593 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
594 		       struct trace_pid_list *filtered_no_pids,
595 		       struct task_struct *task)
596 {
597 	/*
598 	 * If filtered_no_pids is not empty, and the task's pid is listed
599 	 * in filtered_no_pids, then return true.
600 	 * Otherwise, if filtered_pids is empty, that means we can
601 	 * trace all tasks. If it has content, then only trace pids
602 	 * within filtered_pids.
603 	 */
604 
605 	return (filtered_pids &&
606 		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
607 		(filtered_no_pids &&
608 		 trace_find_filtered_pid(filtered_no_pids, task->pid));
609 }
610 
611 /**
612  * trace_filter_add_remove_task - Add or remove a task from a pid_list
613  * @pid_list: The list to modify
614  * @self: The current task for fork or NULL for exit
615  * @task: The task to add or remove
616  *
617  * If adding a task, if @self is defined, the task is only added if @self
618  * is also included in @pid_list. This happens on fork and tasks should
619  * only be added when the parent is listed. If @self is NULL, then the
620  * @task pid will be removed from the list, which would happen on exit
621  * of a task.
622  */
trace_filter_add_remove_task(struct trace_pid_list * pid_list,struct task_struct * self,struct task_struct * task)623 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
624 				  struct task_struct *self,
625 				  struct task_struct *task)
626 {
627 	if (!pid_list)
628 		return;
629 
630 	/* For forks, we only add if the forking task is listed */
631 	if (self) {
632 		if (!trace_find_filtered_pid(pid_list, self->pid))
633 			return;
634 	}
635 
636 	/* "self" is set for forks, and NULL for exits */
637 	if (self)
638 		trace_pid_list_set(pid_list, task->pid);
639 	else
640 		trace_pid_list_clear(pid_list, task->pid);
641 }
642 
643 /**
644  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
645  * @pid_list: The pid list to show
646  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
647  * @pos: The position of the file
648  *
649  * This is used by the seq_file "next" operation to iterate the pids
650  * listed in a trace_pid_list structure.
651  *
652  * Returns the pid+1 as we want to display pid of zero, but NULL would
653  * stop the iteration.
654  */
trace_pid_next(struct trace_pid_list * pid_list,void * v,loff_t * pos)655 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
656 {
657 	long pid = (unsigned long)v;
658 	unsigned int next;
659 
660 	(*pos)++;
661 
662 	/* pid already is +1 of the actual previous bit */
663 	if (trace_pid_list_next(pid_list, pid, &next) < 0)
664 		return NULL;
665 
666 	pid = next;
667 
668 	/* Return pid + 1 to allow zero to be represented */
669 	return (void *)(pid + 1);
670 }
671 
672 /**
673  * trace_pid_start - Used for seq_file to start reading pid lists
674  * @pid_list: The pid list to show
675  * @pos: The position of the file
676  *
677  * This is used by seq_file "start" operation to start the iteration
678  * of listing pids.
679  *
680  * Returns the pid+1 as we want to display pid of zero, but NULL would
681  * stop the iteration.
682  */
trace_pid_start(struct trace_pid_list * pid_list,loff_t * pos)683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
684 {
685 	unsigned long pid;
686 	unsigned int first;
687 	loff_t l = 0;
688 
689 	if (trace_pid_list_first(pid_list, &first) < 0)
690 		return NULL;
691 
692 	pid = first;
693 
694 	/* Return pid + 1 so that zero can be the exit value */
695 	for (pid++; pid && l < *pos;
696 	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
697 		;
698 	return (void *)pid;
699 }
700 
701 /**
702  * trace_pid_show - show the current pid in seq_file processing
703  * @m: The seq_file structure to write into
704  * @v: A void pointer of the pid (+1) value to display
705  *
706  * Can be directly used by seq_file operations to display the current
707  * pid value.
708  */
trace_pid_show(struct seq_file * m,void * v)709 int trace_pid_show(struct seq_file *m, void *v)
710 {
711 	unsigned long pid = (unsigned long)v - 1;
712 
713 	seq_printf(m, "%lu\n", pid);
714 	return 0;
715 }
716 
717 /* 128 should be much more than enough */
718 #define PID_BUF_SIZE		127
719 
trace_pid_write(struct trace_pid_list * filtered_pids,struct trace_pid_list ** new_pid_list,const char __user * ubuf,size_t cnt)720 int trace_pid_write(struct trace_pid_list *filtered_pids,
721 		    struct trace_pid_list **new_pid_list,
722 		    const char __user *ubuf, size_t cnt)
723 {
724 	struct trace_pid_list *pid_list;
725 	struct trace_parser parser;
726 	unsigned long val;
727 	int nr_pids = 0;
728 	ssize_t read = 0;
729 	ssize_t ret;
730 	loff_t pos;
731 	pid_t pid;
732 
733 	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
734 		return -ENOMEM;
735 
736 	/*
737 	 * Always recreate a new array. The write is an all or nothing
738 	 * operation. Always create a new array when adding new pids by
739 	 * the user. If the operation fails, then the current list is
740 	 * not modified.
741 	 */
742 	pid_list = trace_pid_list_alloc();
743 	if (!pid_list) {
744 		trace_parser_put(&parser);
745 		return -ENOMEM;
746 	}
747 
748 	if (filtered_pids) {
749 		/* copy the current bits to the new max */
750 		ret = trace_pid_list_first(filtered_pids, &pid);
751 		while (!ret) {
752 			trace_pid_list_set(pid_list, pid);
753 			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
754 			nr_pids++;
755 		}
756 	}
757 
758 	ret = 0;
759 	while (cnt > 0) {
760 
761 		pos = 0;
762 
763 		ret = trace_get_user(&parser, ubuf, cnt, &pos);
764 		if (ret < 0)
765 			break;
766 
767 		read += ret;
768 		ubuf += ret;
769 		cnt -= ret;
770 
771 		if (!trace_parser_loaded(&parser))
772 			break;
773 
774 		ret = -EINVAL;
775 		if (kstrtoul(parser.buffer, 0, &val))
776 			break;
777 
778 		pid = (pid_t)val;
779 
780 		if (trace_pid_list_set(pid_list, pid) < 0) {
781 			ret = -1;
782 			break;
783 		}
784 		nr_pids++;
785 
786 		trace_parser_clear(&parser);
787 		ret = 0;
788 	}
789 	trace_parser_put(&parser);
790 
791 	if (ret < 0) {
792 		trace_pid_list_free(pid_list);
793 		return ret;
794 	}
795 
796 	if (!nr_pids) {
797 		/* Cleared the list of pids */
798 		trace_pid_list_free(pid_list);
799 		pid_list = NULL;
800 	}
801 
802 	*new_pid_list = pid_list;
803 
804 	return read;
805 }
806 
buffer_ftrace_now(struct array_buffer * buf,int cpu)807 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
808 {
809 	u64 ts;
810 
811 	/* Early boot up does not have a buffer yet */
812 	if (!buf->buffer)
813 		return trace_clock_local();
814 
815 	ts = ring_buffer_time_stamp(buf->buffer);
816 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
817 
818 	return ts;
819 }
820 
ftrace_now(int cpu)821 u64 ftrace_now(int cpu)
822 {
823 	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
824 }
825 
826 /**
827  * tracing_is_enabled - Show if global_trace has been enabled
828  *
829  * Shows if the global trace has been enabled or not. It uses the
830  * mirror flag "buffer_disabled" to be used in fast paths such as for
831  * the irqsoff tracer. But it may be inaccurate due to races. If you
832  * need to know the accurate state, use tracing_is_on() which is a little
833  * slower, but accurate.
834  */
tracing_is_enabled(void)835 int tracing_is_enabled(void)
836 {
837 	/*
838 	 * For quick access (irqsoff uses this in fast path), just
839 	 * return the mirror variable of the state of the ring buffer.
840 	 * It's a little racy, but we don't really care.
841 	 */
842 	smp_rmb();
843 	return !global_trace.buffer_disabled;
844 }
845 
846 /*
847  * trace_buf_size is the size in bytes that is allocated
848  * for a buffer. Note, the number of bytes is always rounded
849  * to page size.
850  *
851  * This number is purposely set to a low number of 16384.
852  * If the dump on oops happens, it will be much appreciated
853  * to not have to wait for all that output. Anyway this can be
854  * boot time and run time configurable.
855  */
856 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
857 
858 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
859 
860 /* trace_types holds a link list of available tracers. */
861 static struct tracer		*trace_types __read_mostly;
862 
863 /*
864  * trace_types_lock is used to protect the trace_types list.
865  */
866 DEFINE_MUTEX(trace_types_lock);
867 
868 /*
869  * serialize the access of the ring buffer
870  *
871  * ring buffer serializes readers, but it is low level protection.
872  * The validity of the events (which returns by ring_buffer_peek() ..etc)
873  * are not protected by ring buffer.
874  *
875  * The content of events may become garbage if we allow other process consumes
876  * these events concurrently:
877  *   A) the page of the consumed events may become a normal page
878  *      (not reader page) in ring buffer, and this page will be rewritten
879  *      by events producer.
880  *   B) The page of the consumed events may become a page for splice_read,
881  *      and this page will be returned to system.
882  *
883  * These primitives allow multi process access to different cpu ring buffer
884  * concurrently.
885  *
886  * These primitives don't distinguish read-only and read-consume access.
887  * Multi read-only access are also serialized.
888  */
889 
890 #ifdef CONFIG_SMP
891 static DECLARE_RWSEM(all_cpu_access_lock);
892 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
893 
trace_access_lock(int cpu)894 static inline void trace_access_lock(int cpu)
895 {
896 	if (cpu == RING_BUFFER_ALL_CPUS) {
897 		/* gain it for accessing the whole ring buffer. */
898 		down_write(&all_cpu_access_lock);
899 	} else {
900 		/* gain it for accessing a cpu ring buffer. */
901 
902 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
903 		down_read(&all_cpu_access_lock);
904 
905 		/* Secondly block other access to this @cpu ring buffer. */
906 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
907 	}
908 }
909 
trace_access_unlock(int cpu)910 static inline void trace_access_unlock(int cpu)
911 {
912 	if (cpu == RING_BUFFER_ALL_CPUS) {
913 		up_write(&all_cpu_access_lock);
914 	} else {
915 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
916 		up_read(&all_cpu_access_lock);
917 	}
918 }
919 
trace_access_lock_init(void)920 static inline void trace_access_lock_init(void)
921 {
922 	int cpu;
923 
924 	for_each_possible_cpu(cpu)
925 		mutex_init(&per_cpu(cpu_access_lock, cpu));
926 }
927 
928 #else
929 
930 static DEFINE_MUTEX(access_lock);
931 
trace_access_lock(int cpu)932 static inline void trace_access_lock(int cpu)
933 {
934 	(void)cpu;
935 	mutex_lock(&access_lock);
936 }
937 
trace_access_unlock(int cpu)938 static inline void trace_access_unlock(int cpu)
939 {
940 	(void)cpu;
941 	mutex_unlock(&access_lock);
942 }
943 
trace_access_lock_init(void)944 static inline void trace_access_lock_init(void)
945 {
946 }
947 
948 #endif
949 
950 #ifdef CONFIG_STACKTRACE
951 static void __ftrace_trace_stack(struct trace_buffer *buffer,
952 				 unsigned int trace_ctx,
953 				 int skip, struct pt_regs *regs);
954 static inline void ftrace_trace_stack(struct trace_array *tr,
955 				      struct trace_buffer *buffer,
956 				      unsigned int trace_ctx,
957 				      int skip, struct pt_regs *regs);
958 
959 #else
__ftrace_trace_stack(struct trace_buffer * buffer,unsigned int trace_ctx,int skip,struct pt_regs * regs)960 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
961 					unsigned int trace_ctx,
962 					int skip, struct pt_regs *regs)
963 {
964 }
ftrace_trace_stack(struct trace_array * tr,struct trace_buffer * buffer,unsigned long trace_ctx,int skip,struct pt_regs * regs)965 static inline void ftrace_trace_stack(struct trace_array *tr,
966 				      struct trace_buffer *buffer,
967 				      unsigned long trace_ctx,
968 				      int skip, struct pt_regs *regs)
969 {
970 }
971 
972 #endif
973 
974 static __always_inline void
trace_event_setup(struct ring_buffer_event * event,int type,unsigned int trace_ctx)975 trace_event_setup(struct ring_buffer_event *event,
976 		  int type, unsigned int trace_ctx)
977 {
978 	struct trace_entry *ent = ring_buffer_event_data(event);
979 
980 	tracing_generic_entry_update(ent, type, trace_ctx);
981 }
982 
983 static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct trace_buffer * buffer,int type,unsigned long len,unsigned int trace_ctx)984 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
985 			  int type,
986 			  unsigned long len,
987 			  unsigned int trace_ctx)
988 {
989 	struct ring_buffer_event *event;
990 
991 	event = ring_buffer_lock_reserve(buffer, len);
992 	if (event != NULL)
993 		trace_event_setup(event, type, trace_ctx);
994 
995 	return event;
996 }
997 
tracer_tracing_on(struct trace_array * tr)998 void tracer_tracing_on(struct trace_array *tr)
999 {
1000 	if (tr->array_buffer.buffer)
1001 		ring_buffer_record_on(tr->array_buffer.buffer);
1002 	/*
1003 	 * This flag is looked at when buffers haven't been allocated
1004 	 * yet, or by some tracers (like irqsoff), that just want to
1005 	 * know if the ring buffer has been disabled, but it can handle
1006 	 * races of where it gets disabled but we still do a record.
1007 	 * As the check is in the fast path of the tracers, it is more
1008 	 * important to be fast than accurate.
1009 	 */
1010 	tr->buffer_disabled = 0;
1011 	/* Make the flag seen by readers */
1012 	smp_wmb();
1013 }
1014 
1015 /**
1016  * tracing_on - enable tracing buffers
1017  *
1018  * This function enables tracing buffers that may have been
1019  * disabled with tracing_off.
1020  */
tracing_on(void)1021 void tracing_on(void)
1022 {
1023 	tracer_tracing_on(&global_trace);
1024 }
1025 EXPORT_SYMBOL_GPL(tracing_on);
1026 
1027 
1028 static __always_inline void
__buffer_unlock_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)1029 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1030 {
1031 	__this_cpu_write(trace_taskinfo_save, true);
1032 
1033 	/* If this is the temp buffer, we need to commit fully */
1034 	if (this_cpu_read(trace_buffered_event) == event) {
1035 		/* Length is in event->array[0] */
1036 		ring_buffer_write(buffer, event->array[0], &event->array[1]);
1037 		/* Release the temp buffer */
1038 		this_cpu_dec(trace_buffered_event_cnt);
1039 		/* ring_buffer_unlock_commit() enables preemption */
1040 		preempt_enable_notrace();
1041 	} else
1042 		ring_buffer_unlock_commit(buffer);
1043 }
1044 
__trace_array_puts(struct trace_array * tr,unsigned long ip,const char * str,int size)1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1046 		       const char *str, int size)
1047 {
1048 	struct ring_buffer_event *event;
1049 	struct trace_buffer *buffer;
1050 	struct print_entry *entry;
1051 	unsigned int trace_ctx;
1052 	int alloc;
1053 
1054 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1055 		return 0;
1056 
1057 	if (unlikely(tracing_selftest_running && tr == &global_trace))
1058 		return 0;
1059 
1060 	if (unlikely(tracing_disabled))
1061 		return 0;
1062 
1063 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
1064 
1065 	trace_ctx = tracing_gen_ctx();
1066 	buffer = tr->array_buffer.buffer;
1067 	ring_buffer_nest_start(buffer);
1068 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1069 					    trace_ctx);
1070 	if (!event) {
1071 		size = 0;
1072 		goto out;
1073 	}
1074 
1075 	entry = ring_buffer_event_data(event);
1076 	entry->ip = ip;
1077 
1078 	memcpy(&entry->buf, str, size);
1079 
1080 	/* Add a newline if necessary */
1081 	if (entry->buf[size - 1] != '\n') {
1082 		entry->buf[size] = '\n';
1083 		entry->buf[size + 1] = '\0';
1084 	} else
1085 		entry->buf[size] = '\0';
1086 
1087 	__buffer_unlock_commit(buffer, event);
1088 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1089  out:
1090 	ring_buffer_nest_end(buffer);
1091 	return size;
1092 }
1093 EXPORT_SYMBOL_GPL(__trace_array_puts);
1094 
1095 /**
1096  * __trace_puts - write a constant string into the trace buffer.
1097  * @ip:	   The address of the caller
1098  * @str:   The constant string to write
1099  * @size:  The size of the string.
1100  */
__trace_puts(unsigned long ip,const char * str,int size)1101 int __trace_puts(unsigned long ip, const char *str, int size)
1102 {
1103 	return __trace_array_puts(&global_trace, ip, str, size);
1104 }
1105 EXPORT_SYMBOL_GPL(__trace_puts);
1106 
1107 /**
1108  * __trace_bputs - write the pointer to a constant string into trace buffer
1109  * @ip:	   The address of the caller
1110  * @str:   The constant string to write to the buffer to
1111  */
__trace_bputs(unsigned long ip,const char * str)1112 int __trace_bputs(unsigned long ip, const char *str)
1113 {
1114 	struct ring_buffer_event *event;
1115 	struct trace_buffer *buffer;
1116 	struct bputs_entry *entry;
1117 	unsigned int trace_ctx;
1118 	int size = sizeof(struct bputs_entry);
1119 	int ret = 0;
1120 
1121 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1122 		return 0;
1123 
1124 	if (unlikely(tracing_selftest_running || tracing_disabled))
1125 		return 0;
1126 
1127 	trace_ctx = tracing_gen_ctx();
1128 	buffer = global_trace.array_buffer.buffer;
1129 
1130 	ring_buffer_nest_start(buffer);
1131 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1132 					    trace_ctx);
1133 	if (!event)
1134 		goto out;
1135 
1136 	entry = ring_buffer_event_data(event);
1137 	entry->ip			= ip;
1138 	entry->str			= str;
1139 
1140 	__buffer_unlock_commit(buffer, event);
1141 	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1142 
1143 	ret = 1;
1144  out:
1145 	ring_buffer_nest_end(buffer);
1146 	return ret;
1147 }
1148 EXPORT_SYMBOL_GPL(__trace_bputs);
1149 
1150 #ifdef CONFIG_TRACER_SNAPSHOT
tracing_snapshot_instance_cond(struct trace_array * tr,void * cond_data)1151 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1152 					   void *cond_data)
1153 {
1154 	struct tracer *tracer = tr->current_trace;
1155 	unsigned long flags;
1156 
1157 	if (in_nmi()) {
1158 		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1159 		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
1160 		return;
1161 	}
1162 
1163 	if (!tr->allocated_snapshot) {
1164 		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1165 		trace_array_puts(tr, "*** stopping trace here!   ***\n");
1166 		tracer_tracing_off(tr);
1167 		return;
1168 	}
1169 
1170 	/* Note, snapshot can not be used when the tracer uses it */
1171 	if (tracer->use_max_tr) {
1172 		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1173 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1174 		return;
1175 	}
1176 
1177 	local_irq_save(flags);
1178 	update_max_tr(tr, current, smp_processor_id(), cond_data);
1179 	local_irq_restore(flags);
1180 }
1181 
tracing_snapshot_instance(struct trace_array * tr)1182 void tracing_snapshot_instance(struct trace_array *tr)
1183 {
1184 	tracing_snapshot_instance_cond(tr, NULL);
1185 }
1186 
1187 /**
1188  * tracing_snapshot - take a snapshot of the current buffer.
1189  *
1190  * This causes a swap between the snapshot buffer and the current live
1191  * tracing buffer. You can use this to take snapshots of the live
1192  * trace when some condition is triggered, but continue to trace.
1193  *
1194  * Note, make sure to allocate the snapshot with either
1195  * a tracing_snapshot_alloc(), or by doing it manually
1196  * with: echo 1 > /sys/kernel/tracing/snapshot
1197  *
1198  * If the snapshot buffer is not allocated, it will stop tracing.
1199  * Basically making a permanent snapshot.
1200  */
tracing_snapshot(void)1201 void tracing_snapshot(void)
1202 {
1203 	struct trace_array *tr = &global_trace;
1204 
1205 	tracing_snapshot_instance(tr);
1206 }
1207 EXPORT_SYMBOL_GPL(tracing_snapshot);
1208 
1209 /**
1210  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1211  * @tr:		The tracing instance to snapshot
1212  * @cond_data:	The data to be tested conditionally, and possibly saved
1213  *
1214  * This is the same as tracing_snapshot() except that the snapshot is
1215  * conditional - the snapshot will only happen if the
1216  * cond_snapshot.update() implementation receiving the cond_data
1217  * returns true, which means that the trace array's cond_snapshot
1218  * update() operation used the cond_data to determine whether the
1219  * snapshot should be taken, and if it was, presumably saved it along
1220  * with the snapshot.
1221  */
tracing_snapshot_cond(struct trace_array * tr,void * cond_data)1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1223 {
1224 	tracing_snapshot_instance_cond(tr, cond_data);
1225 }
1226 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1227 
1228 /**
1229  * tracing_cond_snapshot_data - get the user data associated with a snapshot
1230  * @tr:		The tracing instance
1231  *
1232  * When the user enables a conditional snapshot using
1233  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1234  * with the snapshot.  This accessor is used to retrieve it.
1235  *
1236  * Should not be called from cond_snapshot.update(), since it takes
1237  * the tr->max_lock lock, which the code calling
1238  * cond_snapshot.update() has already done.
1239  *
1240  * Returns the cond_data associated with the trace array's snapshot.
1241  */
tracing_cond_snapshot_data(struct trace_array * tr)1242 void *tracing_cond_snapshot_data(struct trace_array *tr)
1243 {
1244 	void *cond_data = NULL;
1245 
1246 	local_irq_disable();
1247 	arch_spin_lock(&tr->max_lock);
1248 
1249 	if (tr->cond_snapshot)
1250 		cond_data = tr->cond_snapshot->cond_data;
1251 
1252 	arch_spin_unlock(&tr->max_lock);
1253 	local_irq_enable();
1254 
1255 	return cond_data;
1256 }
1257 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1258 
1259 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1260 					struct array_buffer *size_buf, int cpu_id);
1261 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1262 
tracing_alloc_snapshot_instance(struct trace_array * tr)1263 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1264 {
1265 	int ret;
1266 
1267 	if (!tr->allocated_snapshot) {
1268 
1269 		/* allocate spare buffer */
1270 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1271 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1272 		if (ret < 0)
1273 			return ret;
1274 
1275 		tr->allocated_snapshot = true;
1276 	}
1277 
1278 	return 0;
1279 }
1280 
free_snapshot(struct trace_array * tr)1281 static void free_snapshot(struct trace_array *tr)
1282 {
1283 	/*
1284 	 * We don't free the ring buffer. instead, resize it because
1285 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1286 	 * we want preserve it.
1287 	 */
1288 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1289 	set_buffer_entries(&tr->max_buffer, 1);
1290 	tracing_reset_online_cpus(&tr->max_buffer);
1291 	tr->allocated_snapshot = false;
1292 }
1293 
1294 /**
1295  * tracing_alloc_snapshot - allocate snapshot buffer.
1296  *
1297  * This only allocates the snapshot buffer if it isn't already
1298  * allocated - it doesn't also take a snapshot.
1299  *
1300  * This is meant to be used in cases where the snapshot buffer needs
1301  * to be set up for events that can't sleep but need to be able to
1302  * trigger a snapshot.
1303  */
tracing_alloc_snapshot(void)1304 int tracing_alloc_snapshot(void)
1305 {
1306 	struct trace_array *tr = &global_trace;
1307 	int ret;
1308 
1309 	ret = tracing_alloc_snapshot_instance(tr);
1310 	WARN_ON(ret < 0);
1311 
1312 	return ret;
1313 }
1314 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1315 
1316 /**
1317  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1318  *
1319  * This is similar to tracing_snapshot(), but it will allocate the
1320  * snapshot buffer if it isn't already allocated. Use this only
1321  * where it is safe to sleep, as the allocation may sleep.
1322  *
1323  * This causes a swap between the snapshot buffer and the current live
1324  * tracing buffer. You can use this to take snapshots of the live
1325  * trace when some condition is triggered, but continue to trace.
1326  */
tracing_snapshot_alloc(void)1327 void tracing_snapshot_alloc(void)
1328 {
1329 	int ret;
1330 
1331 	ret = tracing_alloc_snapshot();
1332 	if (ret < 0)
1333 		return;
1334 
1335 	tracing_snapshot();
1336 }
1337 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1338 
1339 /**
1340  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1341  * @tr:		The tracing instance
1342  * @cond_data:	User data to associate with the snapshot
1343  * @update:	Implementation of the cond_snapshot update function
1344  *
1345  * Check whether the conditional snapshot for the given instance has
1346  * already been enabled, or if the current tracer is already using a
1347  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1348  * save the cond_data and update function inside.
1349  *
1350  * Returns 0 if successful, error otherwise.
1351  */
tracing_snapshot_cond_enable(struct trace_array * tr,void * cond_data,cond_update_fn_t update)1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1353 				 cond_update_fn_t update)
1354 {
1355 	struct cond_snapshot *cond_snapshot;
1356 	int ret = 0;
1357 
1358 	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1359 	if (!cond_snapshot)
1360 		return -ENOMEM;
1361 
1362 	cond_snapshot->cond_data = cond_data;
1363 	cond_snapshot->update = update;
1364 
1365 	mutex_lock(&trace_types_lock);
1366 
1367 	ret = tracing_alloc_snapshot_instance(tr);
1368 	if (ret)
1369 		goto fail_unlock;
1370 
1371 	if (tr->current_trace->use_max_tr) {
1372 		ret = -EBUSY;
1373 		goto fail_unlock;
1374 	}
1375 
1376 	/*
1377 	 * The cond_snapshot can only change to NULL without the
1378 	 * trace_types_lock. We don't care if we race with it going
1379 	 * to NULL, but we want to make sure that it's not set to
1380 	 * something other than NULL when we get here, which we can
1381 	 * do safely with only holding the trace_types_lock and not
1382 	 * having to take the max_lock.
1383 	 */
1384 	if (tr->cond_snapshot) {
1385 		ret = -EBUSY;
1386 		goto fail_unlock;
1387 	}
1388 
1389 	local_irq_disable();
1390 	arch_spin_lock(&tr->max_lock);
1391 	tr->cond_snapshot = cond_snapshot;
1392 	arch_spin_unlock(&tr->max_lock);
1393 	local_irq_enable();
1394 
1395 	mutex_unlock(&trace_types_lock);
1396 
1397 	return ret;
1398 
1399  fail_unlock:
1400 	mutex_unlock(&trace_types_lock);
1401 	kfree(cond_snapshot);
1402 	return ret;
1403 }
1404 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1405 
1406 /**
1407  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1408  * @tr:		The tracing instance
1409  *
1410  * Check whether the conditional snapshot for the given instance is
1411  * enabled; if so, free the cond_snapshot associated with it,
1412  * otherwise return -EINVAL.
1413  *
1414  * Returns 0 if successful, error otherwise.
1415  */
tracing_snapshot_cond_disable(struct trace_array * tr)1416 int tracing_snapshot_cond_disable(struct trace_array *tr)
1417 {
1418 	int ret = 0;
1419 
1420 	local_irq_disable();
1421 	arch_spin_lock(&tr->max_lock);
1422 
1423 	if (!tr->cond_snapshot)
1424 		ret = -EINVAL;
1425 	else {
1426 		kfree(tr->cond_snapshot);
1427 		tr->cond_snapshot = NULL;
1428 	}
1429 
1430 	arch_spin_unlock(&tr->max_lock);
1431 	local_irq_enable();
1432 
1433 	return ret;
1434 }
1435 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1436 #else
tracing_snapshot(void)1437 void tracing_snapshot(void)
1438 {
1439 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1440 }
1441 EXPORT_SYMBOL_GPL(tracing_snapshot);
tracing_snapshot_cond(struct trace_array * tr,void * cond_data)1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1443 {
1444 	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1445 }
1446 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
tracing_alloc_snapshot(void)1447 int tracing_alloc_snapshot(void)
1448 {
1449 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1450 	return -ENODEV;
1451 }
1452 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
tracing_snapshot_alloc(void)1453 void tracing_snapshot_alloc(void)
1454 {
1455 	/* Give warning */
1456 	tracing_snapshot();
1457 }
1458 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
tracing_cond_snapshot_data(struct trace_array * tr)1459 void *tracing_cond_snapshot_data(struct trace_array *tr)
1460 {
1461 	return NULL;
1462 }
1463 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
tracing_snapshot_cond_enable(struct trace_array * tr,void * cond_data,cond_update_fn_t update)1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1465 {
1466 	return -ENODEV;
1467 }
1468 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
tracing_snapshot_cond_disable(struct trace_array * tr)1469 int tracing_snapshot_cond_disable(struct trace_array *tr)
1470 {
1471 	return false;
1472 }
1473 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1474 #define free_snapshot(tr)	do { } while (0)
1475 #endif /* CONFIG_TRACER_SNAPSHOT */
1476 
tracer_tracing_off(struct trace_array * tr)1477 void tracer_tracing_off(struct trace_array *tr)
1478 {
1479 	if (tr->array_buffer.buffer)
1480 		ring_buffer_record_off(tr->array_buffer.buffer);
1481 	/*
1482 	 * This flag is looked at when buffers haven't been allocated
1483 	 * yet, or by some tracers (like irqsoff), that just want to
1484 	 * know if the ring buffer has been disabled, but it can handle
1485 	 * races of where it gets disabled but we still do a record.
1486 	 * As the check is in the fast path of the tracers, it is more
1487 	 * important to be fast than accurate.
1488 	 */
1489 	tr->buffer_disabled = 1;
1490 	/* Make the flag seen by readers */
1491 	smp_wmb();
1492 }
1493 
1494 /**
1495  * tracing_off - turn off tracing buffers
1496  *
1497  * This function stops the tracing buffers from recording data.
1498  * It does not disable any overhead the tracers themselves may
1499  * be causing. This function simply causes all recording to
1500  * the ring buffers to fail.
1501  */
tracing_off(void)1502 void tracing_off(void)
1503 {
1504 	tracer_tracing_off(&global_trace);
1505 }
1506 EXPORT_SYMBOL_GPL(tracing_off);
1507 
disable_trace_on_warning(void)1508 void disable_trace_on_warning(void)
1509 {
1510 	if (__disable_trace_on_warning) {
1511 		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1512 			"Disabling tracing due to warning\n");
1513 		tracing_off();
1514 	}
1515 }
1516 
1517 /**
1518  * tracer_tracing_is_on - show real state of ring buffer enabled
1519  * @tr : the trace array to know if ring buffer is enabled
1520  *
1521  * Shows real state of the ring buffer if it is enabled or not.
1522  */
tracer_tracing_is_on(struct trace_array * tr)1523 bool tracer_tracing_is_on(struct trace_array *tr)
1524 {
1525 	if (tr->array_buffer.buffer)
1526 		return ring_buffer_record_is_on(tr->array_buffer.buffer);
1527 	return !tr->buffer_disabled;
1528 }
1529 
1530 /**
1531  * tracing_is_on - show state of ring buffers enabled
1532  */
tracing_is_on(void)1533 int tracing_is_on(void)
1534 {
1535 	return tracer_tracing_is_on(&global_trace);
1536 }
1537 EXPORT_SYMBOL_GPL(tracing_is_on);
1538 
set_buf_size(char * str)1539 static int __init set_buf_size(char *str)
1540 {
1541 	unsigned long buf_size;
1542 
1543 	if (!str)
1544 		return 0;
1545 	buf_size = memparse(str, &str);
1546 	/*
1547 	 * nr_entries can not be zero and the startup
1548 	 * tests require some buffer space. Therefore
1549 	 * ensure we have at least 4096 bytes of buffer.
1550 	 */
1551 	trace_buf_size = max(4096UL, buf_size);
1552 	return 1;
1553 }
1554 __setup("trace_buf_size=", set_buf_size);
1555 
set_tracing_thresh(char * str)1556 static int __init set_tracing_thresh(char *str)
1557 {
1558 	unsigned long threshold;
1559 	int ret;
1560 
1561 	if (!str)
1562 		return 0;
1563 	ret = kstrtoul(str, 0, &threshold);
1564 	if (ret < 0)
1565 		return 0;
1566 	tracing_thresh = threshold * 1000;
1567 	return 1;
1568 }
1569 __setup("tracing_thresh=", set_tracing_thresh);
1570 
nsecs_to_usecs(unsigned long nsecs)1571 unsigned long nsecs_to_usecs(unsigned long nsecs)
1572 {
1573 	return nsecs / 1000;
1574 }
1575 
1576 /*
1577  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1578  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1579  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1580  * of strings in the order that the evals (enum) were defined.
1581  */
1582 #undef C
1583 #define C(a, b) b
1584 
1585 /* These must match the bit positions in trace_iterator_flags */
1586 static const char *trace_options[] = {
1587 	TRACE_FLAGS
1588 	NULL
1589 };
1590 
1591 static struct {
1592 	u64 (*func)(void);
1593 	const char *name;
1594 	int in_ns;		/* is this clock in nanoseconds? */
1595 } trace_clocks[] = {
1596 	{ trace_clock_local,		"local",	1 },
1597 	{ trace_clock_global,		"global",	1 },
1598 	{ trace_clock_counter,		"counter",	0 },
1599 	{ trace_clock_jiffies,		"uptime",	0 },
1600 	{ trace_clock,			"perf",		1 },
1601 	{ ktime_get_mono_fast_ns,	"mono",		1 },
1602 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1603 	{ ktime_get_boot_fast_ns,	"boot",		1 },
1604 	{ ktime_get_tai_fast_ns,	"tai",		1 },
1605 	ARCH_TRACE_CLOCKS
1606 };
1607 
trace_clock_in_ns(struct trace_array * tr)1608 bool trace_clock_in_ns(struct trace_array *tr)
1609 {
1610 	if (trace_clocks[tr->clock_id].in_ns)
1611 		return true;
1612 
1613 	return false;
1614 }
1615 
1616 /*
1617  * trace_parser_get_init - gets the buffer for trace parser
1618  */
trace_parser_get_init(struct trace_parser * parser,int size)1619 int trace_parser_get_init(struct trace_parser *parser, int size)
1620 {
1621 	memset(parser, 0, sizeof(*parser));
1622 
1623 	parser->buffer = kmalloc(size, GFP_KERNEL);
1624 	if (!parser->buffer)
1625 		return 1;
1626 
1627 	parser->size = size;
1628 	return 0;
1629 }
1630 
1631 /*
1632  * trace_parser_put - frees the buffer for trace parser
1633  */
trace_parser_put(struct trace_parser * parser)1634 void trace_parser_put(struct trace_parser *parser)
1635 {
1636 	kfree(parser->buffer);
1637 	parser->buffer = NULL;
1638 }
1639 
1640 /*
1641  * trace_get_user - reads the user input string separated by  space
1642  * (matched by isspace(ch))
1643  *
1644  * For each string found the 'struct trace_parser' is updated,
1645  * and the function returns.
1646  *
1647  * Returns number of bytes read.
1648  *
1649  * See kernel/trace/trace.h for 'struct trace_parser' details.
1650  */
trace_get_user(struct trace_parser * parser,const char __user * ubuf,size_t cnt,loff_t * ppos)1651 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1652 	size_t cnt, loff_t *ppos)
1653 {
1654 	char ch;
1655 	size_t read = 0;
1656 	ssize_t ret;
1657 
1658 	if (!*ppos)
1659 		trace_parser_clear(parser);
1660 
1661 	ret = get_user(ch, ubuf++);
1662 	if (ret)
1663 		goto out;
1664 
1665 	read++;
1666 	cnt--;
1667 
1668 	/*
1669 	 * The parser is not finished with the last write,
1670 	 * continue reading the user input without skipping spaces.
1671 	 */
1672 	if (!parser->cont) {
1673 		/* skip white space */
1674 		while (cnt && isspace(ch)) {
1675 			ret = get_user(ch, ubuf++);
1676 			if (ret)
1677 				goto out;
1678 			read++;
1679 			cnt--;
1680 		}
1681 
1682 		parser->idx = 0;
1683 
1684 		/* only spaces were written */
1685 		if (isspace(ch) || !ch) {
1686 			*ppos += read;
1687 			ret = read;
1688 			goto out;
1689 		}
1690 	}
1691 
1692 	/* read the non-space input */
1693 	while (cnt && !isspace(ch) && ch) {
1694 		if (parser->idx < parser->size - 1)
1695 			parser->buffer[parser->idx++] = ch;
1696 		else {
1697 			ret = -EINVAL;
1698 			goto out;
1699 		}
1700 		ret = get_user(ch, ubuf++);
1701 		if (ret)
1702 			goto out;
1703 		read++;
1704 		cnt--;
1705 	}
1706 
1707 	/* We either got finished input or we have to wait for another call. */
1708 	if (isspace(ch) || !ch) {
1709 		parser->buffer[parser->idx] = 0;
1710 		parser->cont = false;
1711 	} else if (parser->idx < parser->size - 1) {
1712 		parser->cont = true;
1713 		parser->buffer[parser->idx++] = ch;
1714 		/* Make sure the parsed string always terminates with '\0'. */
1715 		parser->buffer[parser->idx] = 0;
1716 	} else {
1717 		ret = -EINVAL;
1718 		goto out;
1719 	}
1720 
1721 	*ppos += read;
1722 	ret = read;
1723 
1724 out:
1725 	return ret;
1726 }
1727 
1728 /* TODO add a seq_buf_to_buffer() */
trace_seq_to_buffer(struct trace_seq * s,void * buf,size_t cnt)1729 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1730 {
1731 	int len;
1732 
1733 	if (trace_seq_used(s) <= s->seq.readpos)
1734 		return -EBUSY;
1735 
1736 	len = trace_seq_used(s) - s->seq.readpos;
1737 	if (cnt > len)
1738 		cnt = len;
1739 	memcpy(buf, s->buffer + s->seq.readpos, cnt);
1740 
1741 	s->seq.readpos += cnt;
1742 	return cnt;
1743 }
1744 
1745 unsigned long __read_mostly	tracing_thresh;
1746 
1747 #ifdef CONFIG_TRACER_MAX_TRACE
1748 static const struct file_operations tracing_max_lat_fops;
1749 
1750 #ifdef LATENCY_FS_NOTIFY
1751 
1752 static struct workqueue_struct *fsnotify_wq;
1753 
latency_fsnotify_workfn(struct work_struct * work)1754 static void latency_fsnotify_workfn(struct work_struct *work)
1755 {
1756 	struct trace_array *tr = container_of(work, struct trace_array,
1757 					      fsnotify_work);
1758 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1759 }
1760 
latency_fsnotify_workfn_irq(struct irq_work * iwork)1761 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1762 {
1763 	struct trace_array *tr = container_of(iwork, struct trace_array,
1764 					      fsnotify_irqwork);
1765 	queue_work(fsnotify_wq, &tr->fsnotify_work);
1766 }
1767 
trace_create_maxlat_file(struct trace_array * tr,struct dentry * d_tracer)1768 static void trace_create_maxlat_file(struct trace_array *tr,
1769 				     struct dentry *d_tracer)
1770 {
1771 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1772 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1773 	tr->d_max_latency = trace_create_file("tracing_max_latency",
1774 					      TRACE_MODE_WRITE,
1775 					      d_tracer, tr,
1776 					      &tracing_max_lat_fops);
1777 }
1778 
latency_fsnotify_init(void)1779 __init static int latency_fsnotify_init(void)
1780 {
1781 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1782 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
1783 	if (!fsnotify_wq) {
1784 		pr_err("Unable to allocate tr_max_lat_wq\n");
1785 		return -ENOMEM;
1786 	}
1787 	return 0;
1788 }
1789 
1790 late_initcall_sync(latency_fsnotify_init);
1791 
latency_fsnotify(struct trace_array * tr)1792 void latency_fsnotify(struct trace_array *tr)
1793 {
1794 	if (!fsnotify_wq)
1795 		return;
1796 	/*
1797 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1798 	 * possible that we are called from __schedule() or do_idle(), which
1799 	 * could cause a deadlock.
1800 	 */
1801 	irq_work_queue(&tr->fsnotify_irqwork);
1802 }
1803 
1804 #else /* !LATENCY_FS_NOTIFY */
1805 
1806 #define trace_create_maxlat_file(tr, d_tracer)				\
1807 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
1808 			  d_tracer, tr, &tracing_max_lat_fops)
1809 
1810 #endif
1811 
1812 /*
1813  * Copy the new maximum trace into the separate maximum-trace
1814  * structure. (this way the maximum trace is permanently saved,
1815  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1816  */
1817 static void
__update_max_tr(struct trace_array * tr,struct task_struct * tsk,int cpu)1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1819 {
1820 	struct array_buffer *trace_buf = &tr->array_buffer;
1821 	struct array_buffer *max_buf = &tr->max_buffer;
1822 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1823 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1824 
1825 	max_buf->cpu = cpu;
1826 	max_buf->time_start = data->preempt_timestamp;
1827 
1828 	max_data->saved_latency = tr->max_latency;
1829 	max_data->critical_start = data->critical_start;
1830 	max_data->critical_end = data->critical_end;
1831 
1832 	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1833 	max_data->pid = tsk->pid;
1834 	/*
1835 	 * If tsk == current, then use current_uid(), as that does not use
1836 	 * RCU. The irq tracer can be called out of RCU scope.
1837 	 */
1838 	if (tsk == current)
1839 		max_data->uid = current_uid();
1840 	else
1841 		max_data->uid = task_uid(tsk);
1842 
1843 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1844 	max_data->policy = tsk->policy;
1845 	max_data->rt_priority = tsk->rt_priority;
1846 
1847 	/* record this tasks comm */
1848 	tracing_record_cmdline(tsk);
1849 	latency_fsnotify(tr);
1850 }
1851 
1852 /**
1853  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1854  * @tr: tracer
1855  * @tsk: the task with the latency
1856  * @cpu: The cpu that initiated the trace.
1857  * @cond_data: User data associated with a conditional snapshot
1858  *
1859  * Flip the buffers between the @tr and the max_tr and record information
1860  * about which task was the cause of this latency.
1861  */
1862 void
update_max_tr(struct trace_array * tr,struct task_struct * tsk,int cpu,void * cond_data)1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1864 	      void *cond_data)
1865 {
1866 	if (tr->stop_count)
1867 		return;
1868 
1869 	WARN_ON_ONCE(!irqs_disabled());
1870 
1871 	if (!tr->allocated_snapshot) {
1872 		/* Only the nop tracer should hit this when disabling */
1873 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1874 		return;
1875 	}
1876 
1877 	arch_spin_lock(&tr->max_lock);
1878 
1879 	/* Inherit the recordable setting from array_buffer */
1880 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1881 		ring_buffer_record_on(tr->max_buffer.buffer);
1882 	else
1883 		ring_buffer_record_off(tr->max_buffer.buffer);
1884 
1885 #ifdef CONFIG_TRACER_SNAPSHOT
1886 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1887 		arch_spin_unlock(&tr->max_lock);
1888 		return;
1889 	}
1890 #endif
1891 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1892 
1893 	__update_max_tr(tr, tsk, cpu);
1894 
1895 	arch_spin_unlock(&tr->max_lock);
1896 }
1897 
1898 /**
1899  * update_max_tr_single - only copy one trace over, and reset the rest
1900  * @tr: tracer
1901  * @tsk: task with the latency
1902  * @cpu: the cpu of the buffer to copy.
1903  *
1904  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1905  */
1906 void
update_max_tr_single(struct trace_array * tr,struct task_struct * tsk,int cpu)1907 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1908 {
1909 	int ret;
1910 
1911 	if (tr->stop_count)
1912 		return;
1913 
1914 	WARN_ON_ONCE(!irqs_disabled());
1915 	if (!tr->allocated_snapshot) {
1916 		/* Only the nop tracer should hit this when disabling */
1917 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1918 		return;
1919 	}
1920 
1921 	arch_spin_lock(&tr->max_lock);
1922 
1923 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1924 
1925 	if (ret == -EBUSY) {
1926 		/*
1927 		 * We failed to swap the buffer due to a commit taking
1928 		 * place on this CPU. We fail to record, but we reset
1929 		 * the max trace buffer (no one writes directly to it)
1930 		 * and flag that it failed.
1931 		 * Another reason is resize is in progress.
1932 		 */
1933 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1934 			"Failed to swap buffers due to commit or resize in progress\n");
1935 	}
1936 
1937 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1938 
1939 	__update_max_tr(tr, tsk, cpu);
1940 	arch_spin_unlock(&tr->max_lock);
1941 }
1942 
1943 #endif /* CONFIG_TRACER_MAX_TRACE */
1944 
wait_on_pipe(struct trace_iterator * iter,int full)1945 static int wait_on_pipe(struct trace_iterator *iter, int full)
1946 {
1947 	/* Iterators are static, they should be filled or empty */
1948 	if (trace_buffer_iter(iter, iter->cpu_file))
1949 		return 0;
1950 
1951 	return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1952 				full);
1953 }
1954 
1955 #ifdef CONFIG_FTRACE_STARTUP_TEST
1956 static bool selftests_can_run;
1957 
1958 struct trace_selftests {
1959 	struct list_head		list;
1960 	struct tracer			*type;
1961 };
1962 
1963 static LIST_HEAD(postponed_selftests);
1964 
save_selftest(struct tracer * type)1965 static int save_selftest(struct tracer *type)
1966 {
1967 	struct trace_selftests *selftest;
1968 
1969 	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1970 	if (!selftest)
1971 		return -ENOMEM;
1972 
1973 	selftest->type = type;
1974 	list_add(&selftest->list, &postponed_selftests);
1975 	return 0;
1976 }
1977 
run_tracer_selftest(struct tracer * type)1978 static int run_tracer_selftest(struct tracer *type)
1979 {
1980 	struct trace_array *tr = &global_trace;
1981 	struct tracer *saved_tracer = tr->current_trace;
1982 	int ret;
1983 
1984 	if (!type->selftest || tracing_selftest_disabled)
1985 		return 0;
1986 
1987 	/*
1988 	 * If a tracer registers early in boot up (before scheduling is
1989 	 * initialized and such), then do not run its selftests yet.
1990 	 * Instead, run it a little later in the boot process.
1991 	 */
1992 	if (!selftests_can_run)
1993 		return save_selftest(type);
1994 
1995 	if (!tracing_is_on()) {
1996 		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1997 			type->name);
1998 		return 0;
1999 	}
2000 
2001 	/*
2002 	 * Run a selftest on this tracer.
2003 	 * Here we reset the trace buffer, and set the current
2004 	 * tracer to be this tracer. The tracer can then run some
2005 	 * internal tracing to verify that everything is in order.
2006 	 * If we fail, we do not register this tracer.
2007 	 */
2008 	tracing_reset_online_cpus(&tr->array_buffer);
2009 
2010 	tr->current_trace = type;
2011 
2012 #ifdef CONFIG_TRACER_MAX_TRACE
2013 	if (type->use_max_tr) {
2014 		/* If we expanded the buffers, make sure the max is expanded too */
2015 		if (ring_buffer_expanded)
2016 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2017 					   RING_BUFFER_ALL_CPUS);
2018 		tr->allocated_snapshot = true;
2019 	}
2020 #endif
2021 
2022 	/* the test is responsible for initializing and enabling */
2023 	pr_info("Testing tracer %s: ", type->name);
2024 	ret = type->selftest(type, tr);
2025 	/* the test is responsible for resetting too */
2026 	tr->current_trace = saved_tracer;
2027 	if (ret) {
2028 		printk(KERN_CONT "FAILED!\n");
2029 		/* Add the warning after printing 'FAILED' */
2030 		WARN_ON(1);
2031 		return -1;
2032 	}
2033 	/* Only reset on passing, to avoid touching corrupted buffers */
2034 	tracing_reset_online_cpus(&tr->array_buffer);
2035 
2036 #ifdef CONFIG_TRACER_MAX_TRACE
2037 	if (type->use_max_tr) {
2038 		tr->allocated_snapshot = false;
2039 
2040 		/* Shrink the max buffer again */
2041 		if (ring_buffer_expanded)
2042 			ring_buffer_resize(tr->max_buffer.buffer, 1,
2043 					   RING_BUFFER_ALL_CPUS);
2044 	}
2045 #endif
2046 
2047 	printk(KERN_CONT "PASSED\n");
2048 	return 0;
2049 }
2050 
do_run_tracer_selftest(struct tracer * type)2051 static int do_run_tracer_selftest(struct tracer *type)
2052 {
2053 	int ret;
2054 
2055 	/*
2056 	 * Tests can take a long time, especially if they are run one after the
2057 	 * other, as does happen during bootup when all the tracers are
2058 	 * registered. This could cause the soft lockup watchdog to trigger.
2059 	 */
2060 	cond_resched();
2061 
2062 	tracing_selftest_running = true;
2063 	ret = run_tracer_selftest(type);
2064 	tracing_selftest_running = false;
2065 
2066 	return ret;
2067 }
2068 
init_trace_selftests(void)2069 static __init int init_trace_selftests(void)
2070 {
2071 	struct trace_selftests *p, *n;
2072 	struct tracer *t, **last;
2073 	int ret;
2074 
2075 	selftests_can_run = true;
2076 
2077 	mutex_lock(&trace_types_lock);
2078 
2079 	if (list_empty(&postponed_selftests))
2080 		goto out;
2081 
2082 	pr_info("Running postponed tracer tests:\n");
2083 
2084 	tracing_selftest_running = true;
2085 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2086 		/* This loop can take minutes when sanitizers are enabled, so
2087 		 * lets make sure we allow RCU processing.
2088 		 */
2089 		cond_resched();
2090 		ret = run_tracer_selftest(p->type);
2091 		/* If the test fails, then warn and remove from available_tracers */
2092 		if (ret < 0) {
2093 			WARN(1, "tracer: %s failed selftest, disabling\n",
2094 			     p->type->name);
2095 			last = &trace_types;
2096 			for (t = trace_types; t; t = t->next) {
2097 				if (t == p->type) {
2098 					*last = t->next;
2099 					break;
2100 				}
2101 				last = &t->next;
2102 			}
2103 		}
2104 		list_del(&p->list);
2105 		kfree(p);
2106 	}
2107 	tracing_selftest_running = false;
2108 
2109  out:
2110 	mutex_unlock(&trace_types_lock);
2111 
2112 	return 0;
2113 }
2114 core_initcall(init_trace_selftests);
2115 #else
run_tracer_selftest(struct tracer * type)2116 static inline int run_tracer_selftest(struct tracer *type)
2117 {
2118 	return 0;
2119 }
do_run_tracer_selftest(struct tracer * type)2120 static inline int do_run_tracer_selftest(struct tracer *type)
2121 {
2122 	return 0;
2123 }
2124 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2125 
2126 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2127 
2128 static void __init apply_trace_boot_options(void);
2129 
2130 /**
2131  * register_tracer - register a tracer with the ftrace system.
2132  * @type: the plugin for the tracer
2133  *
2134  * Register a new plugin tracer.
2135  */
register_tracer(struct tracer * type)2136 int __init register_tracer(struct tracer *type)
2137 {
2138 	struct tracer *t;
2139 	int ret = 0;
2140 
2141 	if (!type->name) {
2142 		pr_info("Tracer must have a name\n");
2143 		return -1;
2144 	}
2145 
2146 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
2147 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2148 		return -1;
2149 	}
2150 
2151 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
2152 		pr_warn("Can not register tracer %s due to lockdown\n",
2153 			   type->name);
2154 		return -EPERM;
2155 	}
2156 
2157 	mutex_lock(&trace_types_lock);
2158 
2159 	for (t = trace_types; t; t = t->next) {
2160 		if (strcmp(type->name, t->name) == 0) {
2161 			/* already found */
2162 			pr_info("Tracer %s already registered\n",
2163 				type->name);
2164 			ret = -1;
2165 			goto out;
2166 		}
2167 	}
2168 
2169 	if (!type->set_flag)
2170 		type->set_flag = &dummy_set_flag;
2171 	if (!type->flags) {
2172 		/*allocate a dummy tracer_flags*/
2173 		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2174 		if (!type->flags) {
2175 			ret = -ENOMEM;
2176 			goto out;
2177 		}
2178 		type->flags->val = 0;
2179 		type->flags->opts = dummy_tracer_opt;
2180 	} else
2181 		if (!type->flags->opts)
2182 			type->flags->opts = dummy_tracer_opt;
2183 
2184 	/* store the tracer for __set_tracer_option */
2185 	type->flags->trace = type;
2186 
2187 	ret = do_run_tracer_selftest(type);
2188 	if (ret < 0)
2189 		goto out;
2190 
2191 	type->next = trace_types;
2192 	trace_types = type;
2193 	add_tracer_options(&global_trace, type);
2194 
2195  out:
2196 	mutex_unlock(&trace_types_lock);
2197 
2198 	if (ret || !default_bootup_tracer)
2199 		goto out_unlock;
2200 
2201 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2202 		goto out_unlock;
2203 
2204 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2205 	/* Do we want this tracer to start on bootup? */
2206 	tracing_set_tracer(&global_trace, type->name);
2207 	default_bootup_tracer = NULL;
2208 
2209 	apply_trace_boot_options();
2210 
2211 	/* disable other selftests, since this will break it. */
2212 	disable_tracing_selftest("running a tracer");
2213 
2214  out_unlock:
2215 	return ret;
2216 }
2217 
tracing_reset_cpu(struct array_buffer * buf,int cpu)2218 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2219 {
2220 	struct trace_buffer *buffer = buf->buffer;
2221 
2222 	if (!buffer)
2223 		return;
2224 
2225 	ring_buffer_record_disable(buffer);
2226 
2227 	/* Make sure all commits have finished */
2228 	synchronize_rcu();
2229 	ring_buffer_reset_cpu(buffer, cpu);
2230 
2231 	ring_buffer_record_enable(buffer);
2232 }
2233 
tracing_reset_online_cpus(struct array_buffer * buf)2234 void tracing_reset_online_cpus(struct array_buffer *buf)
2235 {
2236 	struct trace_buffer *buffer = buf->buffer;
2237 
2238 	if (!buffer)
2239 		return;
2240 
2241 	ring_buffer_record_disable(buffer);
2242 
2243 	/* Make sure all commits have finished */
2244 	synchronize_rcu();
2245 
2246 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2247 
2248 	ring_buffer_reset_online_cpus(buffer);
2249 
2250 	ring_buffer_record_enable(buffer);
2251 }
2252 
2253 /* Must have trace_types_lock held */
tracing_reset_all_online_cpus_unlocked(void)2254 void tracing_reset_all_online_cpus_unlocked(void)
2255 {
2256 	struct trace_array *tr;
2257 
2258 	lockdep_assert_held(&trace_types_lock);
2259 
2260 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2261 		if (!tr->clear_trace)
2262 			continue;
2263 		tr->clear_trace = false;
2264 		tracing_reset_online_cpus(&tr->array_buffer);
2265 #ifdef CONFIG_TRACER_MAX_TRACE
2266 		tracing_reset_online_cpus(&tr->max_buffer);
2267 #endif
2268 	}
2269 }
2270 
tracing_reset_all_online_cpus(void)2271 void tracing_reset_all_online_cpus(void)
2272 {
2273 	mutex_lock(&trace_types_lock);
2274 	tracing_reset_all_online_cpus_unlocked();
2275 	mutex_unlock(&trace_types_lock);
2276 }
2277 
2278 /*
2279  * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2280  * is the tgid last observed corresponding to pid=i.
2281  */
2282 static int *tgid_map;
2283 
2284 /* The maximum valid index into tgid_map. */
2285 static size_t tgid_map_max;
2286 
2287 #define SAVED_CMDLINES_DEFAULT 128
2288 #define NO_CMDLINE_MAP UINT_MAX
2289 /*
2290  * Preemption must be disabled before acquiring trace_cmdline_lock.
2291  * The various trace_arrays' max_lock must be acquired in a context
2292  * where interrupt is disabled.
2293  */
2294 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2295 struct saved_cmdlines_buffer {
2296 	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2297 	unsigned *map_cmdline_to_pid;
2298 	unsigned cmdline_num;
2299 	int cmdline_idx;
2300 	char *saved_cmdlines;
2301 };
2302 static struct saved_cmdlines_buffer *savedcmd;
2303 
get_saved_cmdlines(int idx)2304 static inline char *get_saved_cmdlines(int idx)
2305 {
2306 	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2307 }
2308 
set_cmdline(int idx,const char * cmdline)2309 static inline void set_cmdline(int idx, const char *cmdline)
2310 {
2311 	strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2312 }
2313 
allocate_cmdlines_buffer(unsigned int val,struct saved_cmdlines_buffer * s)2314 static int allocate_cmdlines_buffer(unsigned int val,
2315 				    struct saved_cmdlines_buffer *s)
2316 {
2317 	s->map_cmdline_to_pid = kmalloc_array(val,
2318 					      sizeof(*s->map_cmdline_to_pid),
2319 					      GFP_KERNEL);
2320 	if (!s->map_cmdline_to_pid)
2321 		return -ENOMEM;
2322 
2323 	s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2324 	if (!s->saved_cmdlines) {
2325 		kfree(s->map_cmdline_to_pid);
2326 		return -ENOMEM;
2327 	}
2328 
2329 	s->cmdline_idx = 0;
2330 	s->cmdline_num = val;
2331 	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2332 	       sizeof(s->map_pid_to_cmdline));
2333 	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2334 	       val * sizeof(*s->map_cmdline_to_pid));
2335 
2336 	return 0;
2337 }
2338 
trace_create_savedcmd(void)2339 static int trace_create_savedcmd(void)
2340 {
2341 	int ret;
2342 
2343 	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2344 	if (!savedcmd)
2345 		return -ENOMEM;
2346 
2347 	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2348 	if (ret < 0) {
2349 		kfree(savedcmd);
2350 		savedcmd = NULL;
2351 		return -ENOMEM;
2352 	}
2353 
2354 	return 0;
2355 }
2356 
is_tracing_stopped(void)2357 int is_tracing_stopped(void)
2358 {
2359 	return global_trace.stop_count;
2360 }
2361 
2362 /**
2363  * tracing_start - quick start of the tracer
2364  *
2365  * If tracing is enabled but was stopped by tracing_stop,
2366  * this will start the tracer back up.
2367  */
tracing_start(void)2368 void tracing_start(void)
2369 {
2370 	struct trace_buffer *buffer;
2371 	unsigned long flags;
2372 
2373 	if (tracing_disabled)
2374 		return;
2375 
2376 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2377 	if (--global_trace.stop_count) {
2378 		if (global_trace.stop_count < 0) {
2379 			/* Someone screwed up their debugging */
2380 			WARN_ON_ONCE(1);
2381 			global_trace.stop_count = 0;
2382 		}
2383 		goto out;
2384 	}
2385 
2386 	/* Prevent the buffers from switching */
2387 	arch_spin_lock(&global_trace.max_lock);
2388 
2389 	buffer = global_trace.array_buffer.buffer;
2390 	if (buffer)
2391 		ring_buffer_record_enable(buffer);
2392 
2393 #ifdef CONFIG_TRACER_MAX_TRACE
2394 	buffer = global_trace.max_buffer.buffer;
2395 	if (buffer)
2396 		ring_buffer_record_enable(buffer);
2397 #endif
2398 
2399 	arch_spin_unlock(&global_trace.max_lock);
2400 
2401  out:
2402 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2403 }
2404 
tracing_start_tr(struct trace_array * tr)2405 static void tracing_start_tr(struct trace_array *tr)
2406 {
2407 	struct trace_buffer *buffer;
2408 	unsigned long flags;
2409 
2410 	if (tracing_disabled)
2411 		return;
2412 
2413 	/* If global, we need to also start the max tracer */
2414 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2415 		return tracing_start();
2416 
2417 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2418 
2419 	if (--tr->stop_count) {
2420 		if (tr->stop_count < 0) {
2421 			/* Someone screwed up their debugging */
2422 			WARN_ON_ONCE(1);
2423 			tr->stop_count = 0;
2424 		}
2425 		goto out;
2426 	}
2427 
2428 	buffer = tr->array_buffer.buffer;
2429 	if (buffer)
2430 		ring_buffer_record_enable(buffer);
2431 
2432  out:
2433 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2434 }
2435 
2436 /**
2437  * tracing_stop - quick stop of the tracer
2438  *
2439  * Light weight way to stop tracing. Use in conjunction with
2440  * tracing_start.
2441  */
tracing_stop(void)2442 void tracing_stop(void)
2443 {
2444 	struct trace_buffer *buffer;
2445 	unsigned long flags;
2446 
2447 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2448 	if (global_trace.stop_count++)
2449 		goto out;
2450 
2451 	/* Prevent the buffers from switching */
2452 	arch_spin_lock(&global_trace.max_lock);
2453 
2454 	buffer = global_trace.array_buffer.buffer;
2455 	if (buffer)
2456 		ring_buffer_record_disable(buffer);
2457 
2458 #ifdef CONFIG_TRACER_MAX_TRACE
2459 	buffer = global_trace.max_buffer.buffer;
2460 	if (buffer)
2461 		ring_buffer_record_disable(buffer);
2462 #endif
2463 
2464 	arch_spin_unlock(&global_trace.max_lock);
2465 
2466  out:
2467 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2468 }
2469 
tracing_stop_tr(struct trace_array * tr)2470 static void tracing_stop_tr(struct trace_array *tr)
2471 {
2472 	struct trace_buffer *buffer;
2473 	unsigned long flags;
2474 
2475 	/* If global, we need to also stop the max tracer */
2476 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2477 		return tracing_stop();
2478 
2479 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2480 	if (tr->stop_count++)
2481 		goto out;
2482 
2483 	buffer = tr->array_buffer.buffer;
2484 	if (buffer)
2485 		ring_buffer_record_disable(buffer);
2486 
2487  out:
2488 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2489 }
2490 
trace_save_cmdline(struct task_struct * tsk)2491 static int trace_save_cmdline(struct task_struct *tsk)
2492 {
2493 	unsigned tpid, idx;
2494 
2495 	/* treat recording of idle task as a success */
2496 	if (!tsk->pid)
2497 		return 1;
2498 
2499 	tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2500 
2501 	/*
2502 	 * It's not the end of the world if we don't get
2503 	 * the lock, but we also don't want to spin
2504 	 * nor do we want to disable interrupts,
2505 	 * so if we miss here, then better luck next time.
2506 	 *
2507 	 * This is called within the scheduler and wake up, so interrupts
2508 	 * had better been disabled and run queue lock been held.
2509 	 */
2510 	lockdep_assert_preemption_disabled();
2511 	if (!arch_spin_trylock(&trace_cmdline_lock))
2512 		return 0;
2513 
2514 	idx = savedcmd->map_pid_to_cmdline[tpid];
2515 	if (idx == NO_CMDLINE_MAP) {
2516 		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2517 
2518 		savedcmd->map_pid_to_cmdline[tpid] = idx;
2519 		savedcmd->cmdline_idx = idx;
2520 	}
2521 
2522 	savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2523 	set_cmdline(idx, tsk->comm);
2524 
2525 	arch_spin_unlock(&trace_cmdline_lock);
2526 
2527 	return 1;
2528 }
2529 
__trace_find_cmdline(int pid,char comm[])2530 static void __trace_find_cmdline(int pid, char comm[])
2531 {
2532 	unsigned map;
2533 	int tpid;
2534 
2535 	if (!pid) {
2536 		strcpy(comm, "<idle>");
2537 		return;
2538 	}
2539 
2540 	if (WARN_ON_ONCE(pid < 0)) {
2541 		strcpy(comm, "<XXX>");
2542 		return;
2543 	}
2544 
2545 	tpid = pid & (PID_MAX_DEFAULT - 1);
2546 	map = savedcmd->map_pid_to_cmdline[tpid];
2547 	if (map != NO_CMDLINE_MAP) {
2548 		tpid = savedcmd->map_cmdline_to_pid[map];
2549 		if (tpid == pid) {
2550 			strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2551 			return;
2552 		}
2553 	}
2554 	strcpy(comm, "<...>");
2555 }
2556 
trace_find_cmdline(int pid,char comm[])2557 void trace_find_cmdline(int pid, char comm[])
2558 {
2559 	preempt_disable();
2560 	arch_spin_lock(&trace_cmdline_lock);
2561 
2562 	__trace_find_cmdline(pid, comm);
2563 
2564 	arch_spin_unlock(&trace_cmdline_lock);
2565 	preempt_enable();
2566 }
2567 
trace_find_tgid_ptr(int pid)2568 static int *trace_find_tgid_ptr(int pid)
2569 {
2570 	/*
2571 	 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2572 	 * if we observe a non-NULL tgid_map then we also observe the correct
2573 	 * tgid_map_max.
2574 	 */
2575 	int *map = smp_load_acquire(&tgid_map);
2576 
2577 	if (unlikely(!map || pid > tgid_map_max))
2578 		return NULL;
2579 
2580 	return &map[pid];
2581 }
2582 
trace_find_tgid(int pid)2583 int trace_find_tgid(int pid)
2584 {
2585 	int *ptr = trace_find_tgid_ptr(pid);
2586 
2587 	return ptr ? *ptr : 0;
2588 }
2589 
trace_save_tgid(struct task_struct * tsk)2590 static int trace_save_tgid(struct task_struct *tsk)
2591 {
2592 	int *ptr;
2593 
2594 	/* treat recording of idle task as a success */
2595 	if (!tsk->pid)
2596 		return 1;
2597 
2598 	ptr = trace_find_tgid_ptr(tsk->pid);
2599 	if (!ptr)
2600 		return 0;
2601 
2602 	*ptr = tsk->tgid;
2603 	return 1;
2604 }
2605 
tracing_record_taskinfo_skip(int flags)2606 static bool tracing_record_taskinfo_skip(int flags)
2607 {
2608 	if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2609 		return true;
2610 	if (!__this_cpu_read(trace_taskinfo_save))
2611 		return true;
2612 	return false;
2613 }
2614 
2615 /**
2616  * tracing_record_taskinfo - record the task info of a task
2617  *
2618  * @task:  task to record
2619  * @flags: TRACE_RECORD_CMDLINE for recording comm
2620  *         TRACE_RECORD_TGID for recording tgid
2621  */
tracing_record_taskinfo(struct task_struct * task,int flags)2622 void tracing_record_taskinfo(struct task_struct *task, int flags)
2623 {
2624 	bool done;
2625 
2626 	if (tracing_record_taskinfo_skip(flags))
2627 		return;
2628 
2629 	/*
2630 	 * Record as much task information as possible. If some fail, continue
2631 	 * to try to record the others.
2632 	 */
2633 	done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2634 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2635 
2636 	/* If recording any information failed, retry again soon. */
2637 	if (!done)
2638 		return;
2639 
2640 	__this_cpu_write(trace_taskinfo_save, false);
2641 }
2642 
2643 /**
2644  * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2645  *
2646  * @prev: previous task during sched_switch
2647  * @next: next task during sched_switch
2648  * @flags: TRACE_RECORD_CMDLINE for recording comm
2649  *         TRACE_RECORD_TGID for recording tgid
2650  */
tracing_record_taskinfo_sched_switch(struct task_struct * prev,struct task_struct * next,int flags)2651 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2652 					  struct task_struct *next, int flags)
2653 {
2654 	bool done;
2655 
2656 	if (tracing_record_taskinfo_skip(flags))
2657 		return;
2658 
2659 	/*
2660 	 * Record as much task information as possible. If some fail, continue
2661 	 * to try to record the others.
2662 	 */
2663 	done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2664 	done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2665 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2666 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2667 
2668 	/* If recording any information failed, retry again soon. */
2669 	if (!done)
2670 		return;
2671 
2672 	__this_cpu_write(trace_taskinfo_save, false);
2673 }
2674 
2675 /* Helpers to record a specific task information */
tracing_record_cmdline(struct task_struct * task)2676 void tracing_record_cmdline(struct task_struct *task)
2677 {
2678 	tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2679 }
2680 
tracing_record_tgid(struct task_struct * task)2681 void tracing_record_tgid(struct task_struct *task)
2682 {
2683 	tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2684 }
2685 
2686 /*
2687  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2688  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2689  * simplifies those functions and keeps them in sync.
2690  */
trace_handle_return(struct trace_seq * s)2691 enum print_line_t trace_handle_return(struct trace_seq *s)
2692 {
2693 	return trace_seq_has_overflowed(s) ?
2694 		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2695 }
2696 EXPORT_SYMBOL_GPL(trace_handle_return);
2697 
migration_disable_value(void)2698 static unsigned short migration_disable_value(void)
2699 {
2700 #if defined(CONFIG_SMP)
2701 	return current->migration_disabled;
2702 #else
2703 	return 0;
2704 #endif
2705 }
2706 
tracing_gen_ctx_irq_test(unsigned int irqs_status)2707 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2708 {
2709 	unsigned int trace_flags = irqs_status;
2710 	unsigned int pc;
2711 
2712 	pc = preempt_count();
2713 
2714 	if (pc & NMI_MASK)
2715 		trace_flags |= TRACE_FLAG_NMI;
2716 	if (pc & HARDIRQ_MASK)
2717 		trace_flags |= TRACE_FLAG_HARDIRQ;
2718 	if (in_serving_softirq())
2719 		trace_flags |= TRACE_FLAG_SOFTIRQ;
2720 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2721 		trace_flags |= TRACE_FLAG_BH_OFF;
2722 
2723 	if (tif_need_resched())
2724 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2725 	if (test_preempt_need_resched())
2726 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2727 	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2728 		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2729 }
2730 
2731 struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer * buffer,int type,unsigned long len,unsigned int trace_ctx)2732 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2733 			  int type,
2734 			  unsigned long len,
2735 			  unsigned int trace_ctx)
2736 {
2737 	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2738 }
2739 
2740 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2741 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2742 static int trace_buffered_event_ref;
2743 
2744 /**
2745  * trace_buffered_event_enable - enable buffering events
2746  *
2747  * When events are being filtered, it is quicker to use a temporary
2748  * buffer to write the event data into if there's a likely chance
2749  * that it will not be committed. The discard of the ring buffer
2750  * is not as fast as committing, and is much slower than copying
2751  * a commit.
2752  *
2753  * When an event is to be filtered, allocate per cpu buffers to
2754  * write the event data into, and if the event is filtered and discarded
2755  * it is simply dropped, otherwise, the entire data is to be committed
2756  * in one shot.
2757  */
trace_buffered_event_enable(void)2758 void trace_buffered_event_enable(void)
2759 {
2760 	struct ring_buffer_event *event;
2761 	struct page *page;
2762 	int cpu;
2763 
2764 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2765 
2766 	if (trace_buffered_event_ref++)
2767 		return;
2768 
2769 	for_each_tracing_cpu(cpu) {
2770 		page = alloc_pages_node(cpu_to_node(cpu),
2771 					GFP_KERNEL | __GFP_NORETRY, 0);
2772 		if (!page)
2773 			goto failed;
2774 
2775 		event = page_address(page);
2776 		memset(event, 0, sizeof(*event));
2777 
2778 		per_cpu(trace_buffered_event, cpu) = event;
2779 
2780 		preempt_disable();
2781 		if (cpu == smp_processor_id() &&
2782 		    __this_cpu_read(trace_buffered_event) !=
2783 		    per_cpu(trace_buffered_event, cpu))
2784 			WARN_ON_ONCE(1);
2785 		preempt_enable();
2786 	}
2787 
2788 	return;
2789  failed:
2790 	trace_buffered_event_disable();
2791 }
2792 
enable_trace_buffered_event(void * data)2793 static void enable_trace_buffered_event(void *data)
2794 {
2795 	/* Probably not needed, but do it anyway */
2796 	smp_rmb();
2797 	this_cpu_dec(trace_buffered_event_cnt);
2798 }
2799 
disable_trace_buffered_event(void * data)2800 static void disable_trace_buffered_event(void *data)
2801 {
2802 	this_cpu_inc(trace_buffered_event_cnt);
2803 }
2804 
2805 /**
2806  * trace_buffered_event_disable - disable buffering events
2807  *
2808  * When a filter is removed, it is faster to not use the buffered
2809  * events, and to commit directly into the ring buffer. Free up
2810  * the temp buffers when there are no more users. This requires
2811  * special synchronization with current events.
2812  */
trace_buffered_event_disable(void)2813 void trace_buffered_event_disable(void)
2814 {
2815 	int cpu;
2816 
2817 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2818 
2819 	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2820 		return;
2821 
2822 	if (--trace_buffered_event_ref)
2823 		return;
2824 
2825 	preempt_disable();
2826 	/* For each CPU, set the buffer as used. */
2827 	smp_call_function_many(tracing_buffer_mask,
2828 			       disable_trace_buffered_event, NULL, 1);
2829 	preempt_enable();
2830 
2831 	/* Wait for all current users to finish */
2832 	synchronize_rcu();
2833 
2834 	for_each_tracing_cpu(cpu) {
2835 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2836 		per_cpu(trace_buffered_event, cpu) = NULL;
2837 	}
2838 	/*
2839 	 * Make sure trace_buffered_event is NULL before clearing
2840 	 * trace_buffered_event_cnt.
2841 	 */
2842 	smp_wmb();
2843 
2844 	preempt_disable();
2845 	/* Do the work on each cpu */
2846 	smp_call_function_many(tracing_buffer_mask,
2847 			       enable_trace_buffered_event, NULL, 1);
2848 	preempt_enable();
2849 }
2850 
2851 static struct trace_buffer *temp_buffer;
2852 
2853 struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer ** current_rb,struct trace_event_file * trace_file,int type,unsigned long len,unsigned int trace_ctx)2854 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2855 			  struct trace_event_file *trace_file,
2856 			  int type, unsigned long len,
2857 			  unsigned int trace_ctx)
2858 {
2859 	struct ring_buffer_event *entry;
2860 	struct trace_array *tr = trace_file->tr;
2861 	int val;
2862 
2863 	*current_rb = tr->array_buffer.buffer;
2864 
2865 	if (!tr->no_filter_buffering_ref &&
2866 	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2867 		preempt_disable_notrace();
2868 		/*
2869 		 * Filtering is on, so try to use the per cpu buffer first.
2870 		 * This buffer will simulate a ring_buffer_event,
2871 		 * where the type_len is zero and the array[0] will
2872 		 * hold the full length.
2873 		 * (see include/linux/ring-buffer.h for details on
2874 		 *  how the ring_buffer_event is structured).
2875 		 *
2876 		 * Using a temp buffer during filtering and copying it
2877 		 * on a matched filter is quicker than writing directly
2878 		 * into the ring buffer and then discarding it when
2879 		 * it doesn't match. That is because the discard
2880 		 * requires several atomic operations to get right.
2881 		 * Copying on match and doing nothing on a failed match
2882 		 * is still quicker than no copy on match, but having
2883 		 * to discard out of the ring buffer on a failed match.
2884 		 */
2885 		if ((entry = __this_cpu_read(trace_buffered_event))) {
2886 			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2887 
2888 			val = this_cpu_inc_return(trace_buffered_event_cnt);
2889 
2890 			/*
2891 			 * Preemption is disabled, but interrupts and NMIs
2892 			 * can still come in now. If that happens after
2893 			 * the above increment, then it will have to go
2894 			 * back to the old method of allocating the event
2895 			 * on the ring buffer, and if the filter fails, it
2896 			 * will have to call ring_buffer_discard_commit()
2897 			 * to remove it.
2898 			 *
2899 			 * Need to also check the unlikely case that the
2900 			 * length is bigger than the temp buffer size.
2901 			 * If that happens, then the reserve is pretty much
2902 			 * guaranteed to fail, as the ring buffer currently
2903 			 * only allows events less than a page. But that may
2904 			 * change in the future, so let the ring buffer reserve
2905 			 * handle the failure in that case.
2906 			 */
2907 			if (val == 1 && likely(len <= max_len)) {
2908 				trace_event_setup(entry, type, trace_ctx);
2909 				entry->array[0] = len;
2910 				/* Return with preemption disabled */
2911 				return entry;
2912 			}
2913 			this_cpu_dec(trace_buffered_event_cnt);
2914 		}
2915 		/* __trace_buffer_lock_reserve() disables preemption */
2916 		preempt_enable_notrace();
2917 	}
2918 
2919 	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2920 					    trace_ctx);
2921 	/*
2922 	 * If tracing is off, but we have triggers enabled
2923 	 * we still need to look at the event data. Use the temp_buffer
2924 	 * to store the trace event for the trigger to use. It's recursive
2925 	 * safe and will not be recorded anywhere.
2926 	 */
2927 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2928 		*current_rb = temp_buffer;
2929 		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2930 						    trace_ctx);
2931 	}
2932 	return entry;
2933 }
2934 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2935 
2936 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2937 static DEFINE_MUTEX(tracepoint_printk_mutex);
2938 
output_printk(struct trace_event_buffer * fbuffer)2939 static void output_printk(struct trace_event_buffer *fbuffer)
2940 {
2941 	struct trace_event_call *event_call;
2942 	struct trace_event_file *file;
2943 	struct trace_event *event;
2944 	unsigned long flags;
2945 	struct trace_iterator *iter = tracepoint_print_iter;
2946 
2947 	/* We should never get here if iter is NULL */
2948 	if (WARN_ON_ONCE(!iter))
2949 		return;
2950 
2951 	event_call = fbuffer->trace_file->event_call;
2952 	if (!event_call || !event_call->event.funcs ||
2953 	    !event_call->event.funcs->trace)
2954 		return;
2955 
2956 	file = fbuffer->trace_file;
2957 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2958 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2959 	     !filter_match_preds(file->filter, fbuffer->entry)))
2960 		return;
2961 
2962 	event = &fbuffer->trace_file->event_call->event;
2963 
2964 	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2965 	trace_seq_init(&iter->seq);
2966 	iter->ent = fbuffer->entry;
2967 	event_call->event.funcs->trace(iter, 0, event);
2968 	trace_seq_putc(&iter->seq, 0);
2969 	printk("%s", iter->seq.buffer);
2970 
2971 	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2972 }
2973 
tracepoint_printk_sysctl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2974 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2975 			     void *buffer, size_t *lenp,
2976 			     loff_t *ppos)
2977 {
2978 	int save_tracepoint_printk;
2979 	int ret;
2980 
2981 	mutex_lock(&tracepoint_printk_mutex);
2982 	save_tracepoint_printk = tracepoint_printk;
2983 
2984 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2985 
2986 	/*
2987 	 * This will force exiting early, as tracepoint_printk
2988 	 * is always zero when tracepoint_printk_iter is not allocated
2989 	 */
2990 	if (!tracepoint_print_iter)
2991 		tracepoint_printk = 0;
2992 
2993 	if (save_tracepoint_printk == tracepoint_printk)
2994 		goto out;
2995 
2996 	if (tracepoint_printk)
2997 		static_key_enable(&tracepoint_printk_key.key);
2998 	else
2999 		static_key_disable(&tracepoint_printk_key.key);
3000 
3001  out:
3002 	mutex_unlock(&tracepoint_printk_mutex);
3003 
3004 	return ret;
3005 }
3006 
trace_event_buffer_commit(struct trace_event_buffer * fbuffer)3007 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
3008 {
3009 	enum event_trigger_type tt = ETT_NONE;
3010 	struct trace_event_file *file = fbuffer->trace_file;
3011 
3012 	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
3013 			fbuffer->entry, &tt))
3014 		goto discard;
3015 
3016 	if (static_key_false(&tracepoint_printk_key.key))
3017 		output_printk(fbuffer);
3018 
3019 	if (static_branch_unlikely(&trace_event_exports_enabled))
3020 		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
3021 
3022 	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
3023 			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
3024 
3025 discard:
3026 	if (tt)
3027 		event_triggers_post_call(file, tt);
3028 
3029 }
3030 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
3031 
3032 /*
3033  * Skip 3:
3034  *
3035  *   trace_buffer_unlock_commit_regs()
3036  *   trace_event_buffer_commit()
3037  *   trace_event_raw_event_xxx()
3038  */
3039 # define STACK_SKIP 3
3040 
trace_buffer_unlock_commit_regs(struct trace_array * tr,struct trace_buffer * buffer,struct ring_buffer_event * event,unsigned int trace_ctx,struct pt_regs * regs)3041 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
3042 				     struct trace_buffer *buffer,
3043 				     struct ring_buffer_event *event,
3044 				     unsigned int trace_ctx,
3045 				     struct pt_regs *regs)
3046 {
3047 	__buffer_unlock_commit(buffer, event);
3048 
3049 	/*
3050 	 * If regs is not set, then skip the necessary functions.
3051 	 * Note, we can still get here via blktrace, wakeup tracer
3052 	 * and mmiotrace, but that's ok if they lose a function or
3053 	 * two. They are not that meaningful.
3054 	 */
3055 	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3056 	ftrace_trace_userstack(tr, buffer, trace_ctx);
3057 }
3058 
3059 /*
3060  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3061  */
3062 void
trace_buffer_unlock_commit_nostack(struct trace_buffer * buffer,struct ring_buffer_event * event)3063 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3064 				   struct ring_buffer_event *event)
3065 {
3066 	__buffer_unlock_commit(buffer, event);
3067 }
3068 
3069 void
trace_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)3070 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3071 	       parent_ip, unsigned int trace_ctx)
3072 {
3073 	struct trace_event_call *call = &event_function;
3074 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3075 	struct ring_buffer_event *event;
3076 	struct ftrace_entry *entry;
3077 
3078 	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3079 					    trace_ctx);
3080 	if (!event)
3081 		return;
3082 	entry	= ring_buffer_event_data(event);
3083 	entry->ip			= ip;
3084 	entry->parent_ip		= parent_ip;
3085 
3086 	if (!call_filter_check_discard(call, entry, buffer, event)) {
3087 		if (static_branch_unlikely(&trace_function_exports_enabled))
3088 			ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3089 		__buffer_unlock_commit(buffer, event);
3090 	}
3091 }
3092 
3093 #ifdef CONFIG_STACKTRACE
3094 
3095 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3096 #define FTRACE_KSTACK_NESTING	4
3097 
3098 #define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)
3099 
3100 struct ftrace_stack {
3101 	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
3102 };
3103 
3104 
3105 struct ftrace_stacks {
3106 	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
3107 };
3108 
3109 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3110 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3111 
__ftrace_trace_stack(struct trace_buffer * buffer,unsigned int trace_ctx,int skip,struct pt_regs * regs)3112 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3113 				 unsigned int trace_ctx,
3114 				 int skip, struct pt_regs *regs)
3115 {
3116 	struct trace_event_call *call = &event_kernel_stack;
3117 	struct ring_buffer_event *event;
3118 	unsigned int size, nr_entries;
3119 	struct ftrace_stack *fstack;
3120 	struct stack_entry *entry;
3121 	int stackidx;
3122 
3123 	/*
3124 	 * Add one, for this function and the call to save_stack_trace()
3125 	 * If regs is set, then these functions will not be in the way.
3126 	 */
3127 #ifndef CONFIG_UNWINDER_ORC
3128 	if (!regs)
3129 		skip++;
3130 #endif
3131 
3132 	preempt_disable_notrace();
3133 
3134 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3135 
3136 	/* This should never happen. If it does, yell once and skip */
3137 	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3138 		goto out;
3139 
3140 	/*
3141 	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3142 	 * interrupt will either see the value pre increment or post
3143 	 * increment. If the interrupt happens pre increment it will have
3144 	 * restored the counter when it returns.  We just need a barrier to
3145 	 * keep gcc from moving things around.
3146 	 */
3147 	barrier();
3148 
3149 	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3150 	size = ARRAY_SIZE(fstack->calls);
3151 
3152 	if (regs) {
3153 		nr_entries = stack_trace_save_regs(regs, fstack->calls,
3154 						   size, skip);
3155 	} else {
3156 		nr_entries = stack_trace_save(fstack->calls, size, skip);
3157 	}
3158 
3159 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3160 				    struct_size(entry, caller, nr_entries),
3161 				    trace_ctx);
3162 	if (!event)
3163 		goto out;
3164 	entry = ring_buffer_event_data(event);
3165 
3166 	entry->size = nr_entries;
3167 	memcpy(&entry->caller, fstack->calls,
3168 	       flex_array_size(entry, caller, nr_entries));
3169 
3170 	if (!call_filter_check_discard(call, entry, buffer, event))
3171 		__buffer_unlock_commit(buffer, event);
3172 
3173  out:
3174 	/* Again, don't let gcc optimize things here */
3175 	barrier();
3176 	__this_cpu_dec(ftrace_stack_reserve);
3177 	preempt_enable_notrace();
3178 
3179 }
3180 
ftrace_trace_stack(struct trace_array * tr,struct trace_buffer * buffer,unsigned int trace_ctx,int skip,struct pt_regs * regs)3181 static inline void ftrace_trace_stack(struct trace_array *tr,
3182 				      struct trace_buffer *buffer,
3183 				      unsigned int trace_ctx,
3184 				      int skip, struct pt_regs *regs)
3185 {
3186 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3187 		return;
3188 
3189 	__ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3190 }
3191 
__trace_stack(struct trace_array * tr,unsigned int trace_ctx,int skip)3192 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3193 		   int skip)
3194 {
3195 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3196 
3197 	if (rcu_is_watching()) {
3198 		__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3199 		return;
3200 	}
3201 
3202 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3203 		return;
3204 
3205 	/*
3206 	 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3207 	 * but if the above rcu_is_watching() failed, then the NMI
3208 	 * triggered someplace critical, and ct_irq_enter() should
3209 	 * not be called from NMI.
3210 	 */
3211 	if (unlikely(in_nmi()))
3212 		return;
3213 
3214 	ct_irq_enter_irqson();
3215 	__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3216 	ct_irq_exit_irqson();
3217 }
3218 
3219 /**
3220  * trace_dump_stack - record a stack back trace in the trace buffer
3221  * @skip: Number of functions to skip (helper handlers)
3222  */
trace_dump_stack(int skip)3223 void trace_dump_stack(int skip)
3224 {
3225 	if (tracing_disabled || tracing_selftest_running)
3226 		return;
3227 
3228 #ifndef CONFIG_UNWINDER_ORC
3229 	/* Skip 1 to skip this function. */
3230 	skip++;
3231 #endif
3232 	__ftrace_trace_stack(global_trace.array_buffer.buffer,
3233 			     tracing_gen_ctx(), skip, NULL);
3234 }
3235 EXPORT_SYMBOL_GPL(trace_dump_stack);
3236 
3237 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3238 static DEFINE_PER_CPU(int, user_stack_count);
3239 
3240 static void
ftrace_trace_userstack(struct trace_array * tr,struct trace_buffer * buffer,unsigned int trace_ctx)3241 ftrace_trace_userstack(struct trace_array *tr,
3242 		       struct trace_buffer *buffer, unsigned int trace_ctx)
3243 {
3244 	struct trace_event_call *call = &event_user_stack;
3245 	struct ring_buffer_event *event;
3246 	struct userstack_entry *entry;
3247 
3248 	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3249 		return;
3250 
3251 	/*
3252 	 * NMIs can not handle page faults, even with fix ups.
3253 	 * The save user stack can (and often does) fault.
3254 	 */
3255 	if (unlikely(in_nmi()))
3256 		return;
3257 
3258 	/*
3259 	 * prevent recursion, since the user stack tracing may
3260 	 * trigger other kernel events.
3261 	 */
3262 	preempt_disable();
3263 	if (__this_cpu_read(user_stack_count))
3264 		goto out;
3265 
3266 	__this_cpu_inc(user_stack_count);
3267 
3268 	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3269 					    sizeof(*entry), trace_ctx);
3270 	if (!event)
3271 		goto out_drop_count;
3272 	entry	= ring_buffer_event_data(event);
3273 
3274 	entry->tgid		= current->tgid;
3275 	memset(&entry->caller, 0, sizeof(entry->caller));
3276 
3277 	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3278 	if (!call_filter_check_discard(call, entry, buffer, event))
3279 		__buffer_unlock_commit(buffer, event);
3280 
3281  out_drop_count:
3282 	__this_cpu_dec(user_stack_count);
3283  out:
3284 	preempt_enable();
3285 }
3286 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
ftrace_trace_userstack(struct trace_array * tr,struct trace_buffer * buffer,unsigned int trace_ctx)3287 static void ftrace_trace_userstack(struct trace_array *tr,
3288 				   struct trace_buffer *buffer,
3289 				   unsigned int trace_ctx)
3290 {
3291 }
3292 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3293 
3294 #endif /* CONFIG_STACKTRACE */
3295 
3296 static inline void
func_repeats_set_delta_ts(struct func_repeats_entry * entry,unsigned long long delta)3297 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3298 			  unsigned long long delta)
3299 {
3300 	entry->bottom_delta_ts = delta & U32_MAX;
3301 	entry->top_delta_ts = (delta >> 32);
3302 }
3303 
trace_last_func_repeats(struct trace_array * tr,struct trace_func_repeats * last_info,unsigned int trace_ctx)3304 void trace_last_func_repeats(struct trace_array *tr,
3305 			     struct trace_func_repeats *last_info,
3306 			     unsigned int trace_ctx)
3307 {
3308 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3309 	struct func_repeats_entry *entry;
3310 	struct ring_buffer_event *event;
3311 	u64 delta;
3312 
3313 	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3314 					    sizeof(*entry), trace_ctx);
3315 	if (!event)
3316 		return;
3317 
3318 	delta = ring_buffer_event_time_stamp(buffer, event) -
3319 		last_info->ts_last_call;
3320 
3321 	entry = ring_buffer_event_data(event);
3322 	entry->ip = last_info->ip;
3323 	entry->parent_ip = last_info->parent_ip;
3324 	entry->count = last_info->count;
3325 	func_repeats_set_delta_ts(entry, delta);
3326 
3327 	__buffer_unlock_commit(buffer, event);
3328 }
3329 
3330 /* created for use with alloc_percpu */
3331 struct trace_buffer_struct {
3332 	int nesting;
3333 	char buffer[4][TRACE_BUF_SIZE];
3334 };
3335 
3336 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3337 
3338 /*
3339  * This allows for lockless recording.  If we're nested too deeply, then
3340  * this returns NULL.
3341  */
get_trace_buf(void)3342 static char *get_trace_buf(void)
3343 {
3344 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3345 
3346 	if (!trace_percpu_buffer || buffer->nesting >= 4)
3347 		return NULL;
3348 
3349 	buffer->nesting++;
3350 
3351 	/* Interrupts must see nesting incremented before we use the buffer */
3352 	barrier();
3353 	return &buffer->buffer[buffer->nesting - 1][0];
3354 }
3355 
put_trace_buf(void)3356 static void put_trace_buf(void)
3357 {
3358 	/* Don't let the decrement of nesting leak before this */
3359 	barrier();
3360 	this_cpu_dec(trace_percpu_buffer->nesting);
3361 }
3362 
alloc_percpu_trace_buffer(void)3363 static int alloc_percpu_trace_buffer(void)
3364 {
3365 	struct trace_buffer_struct __percpu *buffers;
3366 
3367 	if (trace_percpu_buffer)
3368 		return 0;
3369 
3370 	buffers = alloc_percpu(struct trace_buffer_struct);
3371 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3372 		return -ENOMEM;
3373 
3374 	trace_percpu_buffer = buffers;
3375 	return 0;
3376 }
3377 
3378 static int buffers_allocated;
3379 
trace_printk_init_buffers(void)3380 void trace_printk_init_buffers(void)
3381 {
3382 	if (buffers_allocated)
3383 		return;
3384 
3385 	if (alloc_percpu_trace_buffer())
3386 		return;
3387 
3388 	/* trace_printk() is for debug use only. Don't use it in production. */
3389 
3390 	pr_warn("\n");
3391 	pr_warn("**********************************************************\n");
3392 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3393 	pr_warn("**                                                      **\n");
3394 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3395 	pr_warn("**                                                      **\n");
3396 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3397 	pr_warn("** unsafe for production use.                           **\n");
3398 	pr_warn("**                                                      **\n");
3399 	pr_warn("** If you see this message and you are not debugging    **\n");
3400 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3401 	pr_warn("**                                                      **\n");
3402 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3403 	pr_warn("**********************************************************\n");
3404 
3405 	/* Expand the buffers to set size */
3406 	tracing_update_buffers();
3407 
3408 	buffers_allocated = 1;
3409 
3410 	/*
3411 	 * trace_printk_init_buffers() can be called by modules.
3412 	 * If that happens, then we need to start cmdline recording
3413 	 * directly here. If the global_trace.buffer is already
3414 	 * allocated here, then this was called by module code.
3415 	 */
3416 	if (global_trace.array_buffer.buffer)
3417 		tracing_start_cmdline_record();
3418 }
3419 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3420 
trace_printk_start_comm(void)3421 void trace_printk_start_comm(void)
3422 {
3423 	/* Start tracing comms if trace printk is set */
3424 	if (!buffers_allocated)
3425 		return;
3426 	tracing_start_cmdline_record();
3427 }
3428 
trace_printk_start_stop_comm(int enabled)3429 static void trace_printk_start_stop_comm(int enabled)
3430 {
3431 	if (!buffers_allocated)
3432 		return;
3433 
3434 	if (enabled)
3435 		tracing_start_cmdline_record();
3436 	else
3437 		tracing_stop_cmdline_record();
3438 }
3439 
3440 /**
3441  * trace_vbprintk - write binary msg to tracing buffer
3442  * @ip:    The address of the caller
3443  * @fmt:   The string format to write to the buffer
3444  * @args:  Arguments for @fmt
3445  */
trace_vbprintk(unsigned long ip,const char * fmt,va_list args)3446 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3447 {
3448 	struct trace_event_call *call = &event_bprint;
3449 	struct ring_buffer_event *event;
3450 	struct trace_buffer *buffer;
3451 	struct trace_array *tr = &global_trace;
3452 	struct bprint_entry *entry;
3453 	unsigned int trace_ctx;
3454 	char *tbuffer;
3455 	int len = 0, size;
3456 
3457 	if (unlikely(tracing_selftest_running || tracing_disabled))
3458 		return 0;
3459 
3460 	/* Don't pollute graph traces with trace_vprintk internals */
3461 	pause_graph_tracing();
3462 
3463 	trace_ctx = tracing_gen_ctx();
3464 	preempt_disable_notrace();
3465 
3466 	tbuffer = get_trace_buf();
3467 	if (!tbuffer) {
3468 		len = 0;
3469 		goto out_nobuffer;
3470 	}
3471 
3472 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3473 
3474 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3475 		goto out_put;
3476 
3477 	size = sizeof(*entry) + sizeof(u32) * len;
3478 	buffer = tr->array_buffer.buffer;
3479 	ring_buffer_nest_start(buffer);
3480 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3481 					    trace_ctx);
3482 	if (!event)
3483 		goto out;
3484 	entry = ring_buffer_event_data(event);
3485 	entry->ip			= ip;
3486 	entry->fmt			= fmt;
3487 
3488 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3489 	if (!call_filter_check_discard(call, entry, buffer, event)) {
3490 		__buffer_unlock_commit(buffer, event);
3491 		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3492 	}
3493 
3494 out:
3495 	ring_buffer_nest_end(buffer);
3496 out_put:
3497 	put_trace_buf();
3498 
3499 out_nobuffer:
3500 	preempt_enable_notrace();
3501 	unpause_graph_tracing();
3502 
3503 	return len;
3504 }
3505 EXPORT_SYMBOL_GPL(trace_vbprintk);
3506 
3507 __printf(3, 0)
3508 static int
__trace_array_vprintk(struct trace_buffer * buffer,unsigned long ip,const char * fmt,va_list args)3509 __trace_array_vprintk(struct trace_buffer *buffer,
3510 		      unsigned long ip, const char *fmt, va_list args)
3511 {
3512 	struct trace_event_call *call = &event_print;
3513 	struct ring_buffer_event *event;
3514 	int len = 0, size;
3515 	struct print_entry *entry;
3516 	unsigned int trace_ctx;
3517 	char *tbuffer;
3518 
3519 	if (tracing_disabled)
3520 		return 0;
3521 
3522 	/* Don't pollute graph traces with trace_vprintk internals */
3523 	pause_graph_tracing();
3524 
3525 	trace_ctx = tracing_gen_ctx();
3526 	preempt_disable_notrace();
3527 
3528 
3529 	tbuffer = get_trace_buf();
3530 	if (!tbuffer) {
3531 		len = 0;
3532 		goto out_nobuffer;
3533 	}
3534 
3535 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3536 
3537 	size = sizeof(*entry) + len + 1;
3538 	ring_buffer_nest_start(buffer);
3539 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3540 					    trace_ctx);
3541 	if (!event)
3542 		goto out;
3543 	entry = ring_buffer_event_data(event);
3544 	entry->ip = ip;
3545 
3546 	memcpy(&entry->buf, tbuffer, len + 1);
3547 	if (!call_filter_check_discard(call, entry, buffer, event)) {
3548 		__buffer_unlock_commit(buffer, event);
3549 		ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3550 	}
3551 
3552 out:
3553 	ring_buffer_nest_end(buffer);
3554 	put_trace_buf();
3555 
3556 out_nobuffer:
3557 	preempt_enable_notrace();
3558 	unpause_graph_tracing();
3559 
3560 	return len;
3561 }
3562 
3563 __printf(3, 0)
trace_array_vprintk(struct trace_array * tr,unsigned long ip,const char * fmt,va_list args)3564 int trace_array_vprintk(struct trace_array *tr,
3565 			unsigned long ip, const char *fmt, va_list args)
3566 {
3567 	if (tracing_selftest_running && tr == &global_trace)
3568 		return 0;
3569 
3570 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3571 }
3572 
3573 /**
3574  * trace_array_printk - Print a message to a specific instance
3575  * @tr: The instance trace_array descriptor
3576  * @ip: The instruction pointer that this is called from.
3577  * @fmt: The format to print (printf format)
3578  *
3579  * If a subsystem sets up its own instance, they have the right to
3580  * printk strings into their tracing instance buffer using this
3581  * function. Note, this function will not write into the top level
3582  * buffer (use trace_printk() for that), as writing into the top level
3583  * buffer should only have events that can be individually disabled.
3584  * trace_printk() is only used for debugging a kernel, and should not
3585  * be ever incorporated in normal use.
3586  *
3587  * trace_array_printk() can be used, as it will not add noise to the
3588  * top level tracing buffer.
3589  *
3590  * Note, trace_array_init_printk() must be called on @tr before this
3591  * can be used.
3592  */
3593 __printf(3, 0)
trace_array_printk(struct trace_array * tr,unsigned long ip,const char * fmt,...)3594 int trace_array_printk(struct trace_array *tr,
3595 		       unsigned long ip, const char *fmt, ...)
3596 {
3597 	int ret;
3598 	va_list ap;
3599 
3600 	if (!tr)
3601 		return -ENOENT;
3602 
3603 	/* This is only allowed for created instances */
3604 	if (tr == &global_trace)
3605 		return 0;
3606 
3607 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3608 		return 0;
3609 
3610 	va_start(ap, fmt);
3611 	ret = trace_array_vprintk(tr, ip, fmt, ap);
3612 	va_end(ap);
3613 	return ret;
3614 }
3615 EXPORT_SYMBOL_GPL(trace_array_printk);
3616 
3617 /**
3618  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3619  * @tr: The trace array to initialize the buffers for
3620  *
3621  * As trace_array_printk() only writes into instances, they are OK to
3622  * have in the kernel (unlike trace_printk()). This needs to be called
3623  * before trace_array_printk() can be used on a trace_array.
3624  */
trace_array_init_printk(struct trace_array * tr)3625 int trace_array_init_printk(struct trace_array *tr)
3626 {
3627 	if (!tr)
3628 		return -ENOENT;
3629 
3630 	/* This is only allowed for created instances */
3631 	if (tr == &global_trace)
3632 		return -EINVAL;
3633 
3634 	return alloc_percpu_trace_buffer();
3635 }
3636 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3637 
3638 __printf(3, 4)
trace_array_printk_buf(struct trace_buffer * buffer,unsigned long ip,const char * fmt,...)3639 int trace_array_printk_buf(struct trace_buffer *buffer,
3640 			   unsigned long ip, const char *fmt, ...)
3641 {
3642 	int ret;
3643 	va_list ap;
3644 
3645 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3646 		return 0;
3647 
3648 	va_start(ap, fmt);
3649 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3650 	va_end(ap);
3651 	return ret;
3652 }
3653 
3654 __printf(2, 0)
trace_vprintk(unsigned long ip,const char * fmt,va_list args)3655 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3656 {
3657 	return trace_array_vprintk(&global_trace, ip, fmt, args);
3658 }
3659 EXPORT_SYMBOL_GPL(trace_vprintk);
3660 
trace_iterator_increment(struct trace_iterator * iter)3661 static void trace_iterator_increment(struct trace_iterator *iter)
3662 {
3663 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3664 
3665 	iter->idx++;
3666 	if (buf_iter)
3667 		ring_buffer_iter_advance(buf_iter);
3668 }
3669 
3670 static struct trace_entry *
peek_next_entry(struct trace_iterator * iter,int cpu,u64 * ts,unsigned long * lost_events)3671 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3672 		unsigned long *lost_events)
3673 {
3674 	struct ring_buffer_event *event;
3675 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3676 
3677 	if (buf_iter) {
3678 		event = ring_buffer_iter_peek(buf_iter, ts);
3679 		if (lost_events)
3680 			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
3681 				(unsigned long)-1 : 0;
3682 	} else {
3683 		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3684 					 lost_events);
3685 	}
3686 
3687 	if (event) {
3688 		iter->ent_size = ring_buffer_event_length(event);
3689 		return ring_buffer_event_data(event);
3690 	}
3691 	iter->ent_size = 0;
3692 	return NULL;
3693 }
3694 
3695 static struct trace_entry *
__find_next_entry(struct trace_iterator * iter,int * ent_cpu,unsigned long * missing_events,u64 * ent_ts)3696 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3697 		  unsigned long *missing_events, u64 *ent_ts)
3698 {
3699 	struct trace_buffer *buffer = iter->array_buffer->buffer;
3700 	struct trace_entry *ent, *next = NULL;
3701 	unsigned long lost_events = 0, next_lost = 0;
3702 	int cpu_file = iter->cpu_file;
3703 	u64 next_ts = 0, ts;
3704 	int next_cpu = -1;
3705 	int next_size = 0;
3706 	int cpu;
3707 
3708 	/*
3709 	 * If we are in a per_cpu trace file, don't bother by iterating over
3710 	 * all cpu and peek directly.
3711 	 */
3712 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3713 		if (ring_buffer_empty_cpu(buffer, cpu_file))
3714 			return NULL;
3715 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3716 		if (ent_cpu)
3717 			*ent_cpu = cpu_file;
3718 
3719 		return ent;
3720 	}
3721 
3722 	for_each_tracing_cpu(cpu) {
3723 
3724 		if (ring_buffer_empty_cpu(buffer, cpu))
3725 			continue;
3726 
3727 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3728 
3729 		/*
3730 		 * Pick the entry with the smallest timestamp:
3731 		 */
3732 		if (ent && (!next || ts < next_ts)) {
3733 			next = ent;
3734 			next_cpu = cpu;
3735 			next_ts = ts;
3736 			next_lost = lost_events;
3737 			next_size = iter->ent_size;
3738 		}
3739 	}
3740 
3741 	iter->ent_size = next_size;
3742 
3743 	if (ent_cpu)
3744 		*ent_cpu = next_cpu;
3745 
3746 	if (ent_ts)
3747 		*ent_ts = next_ts;
3748 
3749 	if (missing_events)
3750 		*missing_events = next_lost;
3751 
3752 	return next;
3753 }
3754 
3755 #define STATIC_FMT_BUF_SIZE	128
3756 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3757 
trace_iter_expand_format(struct trace_iterator * iter)3758 char *trace_iter_expand_format(struct trace_iterator *iter)
3759 {
3760 	char *tmp;
3761 
3762 	/*
3763 	 * iter->tr is NULL when used with tp_printk, which makes
3764 	 * this get called where it is not safe to call krealloc().
3765 	 */
3766 	if (!iter->tr || iter->fmt == static_fmt_buf)
3767 		return NULL;
3768 
3769 	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3770 		       GFP_KERNEL);
3771 	if (tmp) {
3772 		iter->fmt_size += STATIC_FMT_BUF_SIZE;
3773 		iter->fmt = tmp;
3774 	}
3775 
3776 	return tmp;
3777 }
3778 
3779 /* Returns true if the string is safe to dereference from an event */
trace_safe_str(struct trace_iterator * iter,const char * str,bool star,int len)3780 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3781 			   bool star, int len)
3782 {
3783 	unsigned long addr = (unsigned long)str;
3784 	struct trace_event *trace_event;
3785 	struct trace_event_call *event;
3786 
3787 	/* Ignore strings with no length */
3788 	if (star && !len)
3789 		return true;
3790 
3791 	/* OK if part of the event data */
3792 	if ((addr >= (unsigned long)iter->ent) &&
3793 	    (addr < (unsigned long)iter->ent + iter->ent_size))
3794 		return true;
3795 
3796 	/* OK if part of the temp seq buffer */
3797 	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3798 	    (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3799 		return true;
3800 
3801 	/* Core rodata can not be freed */
3802 	if (is_kernel_rodata(addr))
3803 		return true;
3804 
3805 	if (trace_is_tracepoint_string(str))
3806 		return true;
3807 
3808 	/*
3809 	 * Now this could be a module event, referencing core module
3810 	 * data, which is OK.
3811 	 */
3812 	if (!iter->ent)
3813 		return false;
3814 
3815 	trace_event = ftrace_find_event(iter->ent->type);
3816 	if (!trace_event)
3817 		return false;
3818 
3819 	event = container_of(trace_event, struct trace_event_call, event);
3820 	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3821 		return false;
3822 
3823 	/* Would rather have rodata, but this will suffice */
3824 	if (within_module_core(addr, event->module))
3825 		return true;
3826 
3827 	return false;
3828 }
3829 
show_buffer(struct trace_seq * s)3830 static const char *show_buffer(struct trace_seq *s)
3831 {
3832 	struct seq_buf *seq = &s->seq;
3833 
3834 	seq_buf_terminate(seq);
3835 
3836 	return seq->buffer;
3837 }
3838 
3839 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3840 
test_can_verify_check(const char * fmt,...)3841 static int test_can_verify_check(const char *fmt, ...)
3842 {
3843 	char buf[16];
3844 	va_list ap;
3845 	int ret;
3846 
3847 	/*
3848 	 * The verifier is dependent on vsnprintf() modifies the va_list
3849 	 * passed to it, where it is sent as a reference. Some architectures
3850 	 * (like x86_32) passes it by value, which means that vsnprintf()
3851 	 * does not modify the va_list passed to it, and the verifier
3852 	 * would then need to be able to understand all the values that
3853 	 * vsnprintf can use. If it is passed by value, then the verifier
3854 	 * is disabled.
3855 	 */
3856 	va_start(ap, fmt);
3857 	vsnprintf(buf, 16, "%d", ap);
3858 	ret = va_arg(ap, int);
3859 	va_end(ap);
3860 
3861 	return ret;
3862 }
3863 
test_can_verify(void)3864 static void test_can_verify(void)
3865 {
3866 	if (!test_can_verify_check("%d %d", 0, 1)) {
3867 		pr_info("trace event string verifier disabled\n");
3868 		static_branch_inc(&trace_no_verify);
3869 	}
3870 }
3871 
3872 /**
3873  * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3874  * @iter: The iterator that holds the seq buffer and the event being printed
3875  * @fmt: The format used to print the event
3876  * @ap: The va_list holding the data to print from @fmt.
3877  *
3878  * This writes the data into the @iter->seq buffer using the data from
3879  * @fmt and @ap. If the format has a %s, then the source of the string
3880  * is examined to make sure it is safe to print, otherwise it will
3881  * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3882  * pointer.
3883  */
trace_check_vprintf(struct trace_iterator * iter,const char * fmt,va_list ap)3884 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3885 			 va_list ap)
3886 {
3887 	const char *p = fmt;
3888 	const char *str;
3889 	int i, j;
3890 
3891 	if (WARN_ON_ONCE(!fmt))
3892 		return;
3893 
3894 	if (static_branch_unlikely(&trace_no_verify))
3895 		goto print;
3896 
3897 	/* Don't bother checking when doing a ftrace_dump() */
3898 	if (iter->fmt == static_fmt_buf)
3899 		goto print;
3900 
3901 	while (*p) {
3902 		bool star = false;
3903 		int len = 0;
3904 
3905 		j = 0;
3906 
3907 		/* We only care about %s and variants */
3908 		for (i = 0; p[i]; i++) {
3909 			if (i + 1 >= iter->fmt_size) {
3910 				/*
3911 				 * If we can't expand the copy buffer,
3912 				 * just print it.
3913 				 */
3914 				if (!trace_iter_expand_format(iter))
3915 					goto print;
3916 			}
3917 
3918 			if (p[i] == '\\' && p[i+1]) {
3919 				i++;
3920 				continue;
3921 			}
3922 			if (p[i] == '%') {
3923 				/* Need to test cases like %08.*s */
3924 				for (j = 1; p[i+j]; j++) {
3925 					if (isdigit(p[i+j]) ||
3926 					    p[i+j] == '.')
3927 						continue;
3928 					if (p[i+j] == '*') {
3929 						star = true;
3930 						continue;
3931 					}
3932 					break;
3933 				}
3934 				if (p[i+j] == 's')
3935 					break;
3936 				star = false;
3937 			}
3938 			j = 0;
3939 		}
3940 		/* If no %s found then just print normally */
3941 		if (!p[i])
3942 			break;
3943 
3944 		/* Copy up to the %s, and print that */
3945 		strncpy(iter->fmt, p, i);
3946 		iter->fmt[i] = '\0';
3947 		trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3948 
3949 		/*
3950 		 * If iter->seq is full, the above call no longer guarantees
3951 		 * that ap is in sync with fmt processing, and further calls
3952 		 * to va_arg() can return wrong positional arguments.
3953 		 *
3954 		 * Ensure that ap is no longer used in this case.
3955 		 */
3956 		if (iter->seq.full) {
3957 			p = "";
3958 			break;
3959 		}
3960 
3961 		if (star)
3962 			len = va_arg(ap, int);
3963 
3964 		/* The ap now points to the string data of the %s */
3965 		str = va_arg(ap, const char *);
3966 
3967 		/*
3968 		 * If you hit this warning, it is likely that the
3969 		 * trace event in question used %s on a string that
3970 		 * was saved at the time of the event, but may not be
3971 		 * around when the trace is read. Use __string(),
3972 		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3973 		 * instead. See samples/trace_events/trace-events-sample.h
3974 		 * for reference.
3975 		 */
3976 		if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3977 			      "fmt: '%s' current_buffer: '%s'",
3978 			      fmt, show_buffer(&iter->seq))) {
3979 			int ret;
3980 
3981 			/* Try to safely read the string */
3982 			if (star) {
3983 				if (len + 1 > iter->fmt_size)
3984 					len = iter->fmt_size - 1;
3985 				if (len < 0)
3986 					len = 0;
3987 				ret = copy_from_kernel_nofault(iter->fmt, str, len);
3988 				iter->fmt[len] = 0;
3989 				star = false;
3990 			} else {
3991 				ret = strncpy_from_kernel_nofault(iter->fmt, str,
3992 								  iter->fmt_size);
3993 			}
3994 			if (ret < 0)
3995 				trace_seq_printf(&iter->seq, "(0x%px)", str);
3996 			else
3997 				trace_seq_printf(&iter->seq, "(0x%px:%s)",
3998 						 str, iter->fmt);
3999 			str = "[UNSAFE-MEMORY]";
4000 			strcpy(iter->fmt, "%s");
4001 		} else {
4002 			strncpy(iter->fmt, p + i, j + 1);
4003 			iter->fmt[j+1] = '\0';
4004 		}
4005 		if (star)
4006 			trace_seq_printf(&iter->seq, iter->fmt, len, str);
4007 		else
4008 			trace_seq_printf(&iter->seq, iter->fmt, str);
4009 
4010 		p += i + j + 1;
4011 	}
4012  print:
4013 	if (*p)
4014 		trace_seq_vprintf(&iter->seq, p, ap);
4015 }
4016 
trace_event_format(struct trace_iterator * iter,const char * fmt)4017 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
4018 {
4019 	const char *p, *new_fmt;
4020 	char *q;
4021 
4022 	if (WARN_ON_ONCE(!fmt))
4023 		return fmt;
4024 
4025 	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
4026 		return fmt;
4027 
4028 	p = fmt;
4029 	new_fmt = q = iter->fmt;
4030 	while (*p) {
4031 		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
4032 			if (!trace_iter_expand_format(iter))
4033 				return fmt;
4034 
4035 			q += iter->fmt - new_fmt;
4036 			new_fmt = iter->fmt;
4037 		}
4038 
4039 		*q++ = *p++;
4040 
4041 		/* Replace %p with %px */
4042 		if (p[-1] == '%') {
4043 			if (p[0] == '%') {
4044 				*q++ = *p++;
4045 			} else if (p[0] == 'p' && !isalnum(p[1])) {
4046 				*q++ = *p++;
4047 				*q++ = 'x';
4048 			}
4049 		}
4050 	}
4051 	*q = '\0';
4052 
4053 	return new_fmt;
4054 }
4055 
4056 #define STATIC_TEMP_BUF_SIZE	128
4057 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4058 
4059 /* Find the next real entry, without updating the iterator itself */
trace_find_next_entry(struct trace_iterator * iter,int * ent_cpu,u64 * ent_ts)4060 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4061 					  int *ent_cpu, u64 *ent_ts)
4062 {
4063 	/* __find_next_entry will reset ent_size */
4064 	int ent_size = iter->ent_size;
4065 	struct trace_entry *entry;
4066 
4067 	/*
4068 	 * If called from ftrace_dump(), then the iter->temp buffer
4069 	 * will be the static_temp_buf and not created from kmalloc.
4070 	 * If the entry size is greater than the buffer, we can
4071 	 * not save it. Just return NULL in that case. This is only
4072 	 * used to add markers when two consecutive events' time
4073 	 * stamps have a large delta. See trace_print_lat_context()
4074 	 */
4075 	if (iter->temp == static_temp_buf &&
4076 	    STATIC_TEMP_BUF_SIZE < ent_size)
4077 		return NULL;
4078 
4079 	/*
4080 	 * The __find_next_entry() may call peek_next_entry(), which may
4081 	 * call ring_buffer_peek() that may make the contents of iter->ent
4082 	 * undefined. Need to copy iter->ent now.
4083 	 */
4084 	if (iter->ent && iter->ent != iter->temp) {
4085 		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4086 		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4087 			void *temp;
4088 			temp = kmalloc(iter->ent_size, GFP_KERNEL);
4089 			if (!temp)
4090 				return NULL;
4091 			kfree(iter->temp);
4092 			iter->temp = temp;
4093 			iter->temp_size = iter->ent_size;
4094 		}
4095 		memcpy(iter->temp, iter->ent, iter->ent_size);
4096 		iter->ent = iter->temp;
4097 	}
4098 	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4099 	/* Put back the original ent_size */
4100 	iter->ent_size = ent_size;
4101 
4102 	return entry;
4103 }
4104 
4105 /* Find the next real entry, and increment the iterator to the next entry */
trace_find_next_entry_inc(struct trace_iterator * iter)4106 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4107 {
4108 	iter->ent = __find_next_entry(iter, &iter->cpu,
4109 				      &iter->lost_events, &iter->ts);
4110 
4111 	if (iter->ent)
4112 		trace_iterator_increment(iter);
4113 
4114 	return iter->ent ? iter : NULL;
4115 }
4116 
trace_consume(struct trace_iterator * iter)4117 static void trace_consume(struct trace_iterator *iter)
4118 {
4119 	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4120 			    &iter->lost_events);
4121 }
4122 
s_next(struct seq_file * m,void * v,loff_t * pos)4123 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4124 {
4125 	struct trace_iterator *iter = m->private;
4126 	int i = (int)*pos;
4127 	void *ent;
4128 
4129 	WARN_ON_ONCE(iter->leftover);
4130 
4131 	(*pos)++;
4132 
4133 	/* can't go backwards */
4134 	if (iter->idx > i)
4135 		return NULL;
4136 
4137 	if (iter->idx < 0)
4138 		ent = trace_find_next_entry_inc(iter);
4139 	else
4140 		ent = iter;
4141 
4142 	while (ent && iter->idx < i)
4143 		ent = trace_find_next_entry_inc(iter);
4144 
4145 	iter->pos = *pos;
4146 
4147 	return ent;
4148 }
4149 
tracing_iter_reset(struct trace_iterator * iter,int cpu)4150 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4151 {
4152 	struct ring_buffer_iter *buf_iter;
4153 	unsigned long entries = 0;
4154 	u64 ts;
4155 
4156 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4157 
4158 	buf_iter = trace_buffer_iter(iter, cpu);
4159 	if (!buf_iter)
4160 		return;
4161 
4162 	ring_buffer_iter_reset(buf_iter);
4163 
4164 	/*
4165 	 * We could have the case with the max latency tracers
4166 	 * that a reset never took place on a cpu. This is evident
4167 	 * by the timestamp being before the start of the buffer.
4168 	 */
4169 	while (ring_buffer_iter_peek(buf_iter, &ts)) {
4170 		if (ts >= iter->array_buffer->time_start)
4171 			break;
4172 		entries++;
4173 		ring_buffer_iter_advance(buf_iter);
4174 	}
4175 
4176 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4177 }
4178 
4179 /*
4180  * The current tracer is copied to avoid a global locking
4181  * all around.
4182  */
s_start(struct seq_file * m,loff_t * pos)4183 static void *s_start(struct seq_file *m, loff_t *pos)
4184 {
4185 	struct trace_iterator *iter = m->private;
4186 	struct trace_array *tr = iter->tr;
4187 	int cpu_file = iter->cpu_file;
4188 	void *p = NULL;
4189 	loff_t l = 0;
4190 	int cpu;
4191 
4192 	mutex_lock(&trace_types_lock);
4193 	if (unlikely(tr->current_trace != iter->trace)) {
4194 		/* Close iter->trace before switching to the new current tracer */
4195 		if (iter->trace->close)
4196 			iter->trace->close(iter);
4197 		iter->trace = tr->current_trace;
4198 		/* Reopen the new current tracer */
4199 		if (iter->trace->open)
4200 			iter->trace->open(iter);
4201 	}
4202 	mutex_unlock(&trace_types_lock);
4203 
4204 #ifdef CONFIG_TRACER_MAX_TRACE
4205 	if (iter->snapshot && iter->trace->use_max_tr)
4206 		return ERR_PTR(-EBUSY);
4207 #endif
4208 
4209 	if (*pos != iter->pos) {
4210 		iter->ent = NULL;
4211 		iter->cpu = 0;
4212 		iter->idx = -1;
4213 
4214 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
4215 			for_each_tracing_cpu(cpu)
4216 				tracing_iter_reset(iter, cpu);
4217 		} else
4218 			tracing_iter_reset(iter, cpu_file);
4219 
4220 		iter->leftover = 0;
4221 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4222 			;
4223 
4224 	} else {
4225 		/*
4226 		 * If we overflowed the seq_file before, then we want
4227 		 * to just reuse the trace_seq buffer again.
4228 		 */
4229 		if (iter->leftover)
4230 			p = iter;
4231 		else {
4232 			l = *pos - 1;
4233 			p = s_next(m, p, &l);
4234 		}
4235 	}
4236 
4237 	trace_event_read_lock();
4238 	trace_access_lock(cpu_file);
4239 	return p;
4240 }
4241 
s_stop(struct seq_file * m,void * p)4242 static void s_stop(struct seq_file *m, void *p)
4243 {
4244 	struct trace_iterator *iter = m->private;
4245 
4246 #ifdef CONFIG_TRACER_MAX_TRACE
4247 	if (iter->snapshot && iter->trace->use_max_tr)
4248 		return;
4249 #endif
4250 
4251 	trace_access_unlock(iter->cpu_file);
4252 	trace_event_read_unlock();
4253 }
4254 
4255 static void
get_total_entries_cpu(struct array_buffer * buf,unsigned long * total,unsigned long * entries,int cpu)4256 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4257 		      unsigned long *entries, int cpu)
4258 {
4259 	unsigned long count;
4260 
4261 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
4262 	/*
4263 	 * If this buffer has skipped entries, then we hold all
4264 	 * entries for the trace and we need to ignore the
4265 	 * ones before the time stamp.
4266 	 */
4267 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4268 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4269 		/* total is the same as the entries */
4270 		*total = count;
4271 	} else
4272 		*total = count +
4273 			ring_buffer_overrun_cpu(buf->buffer, cpu);
4274 	*entries = count;
4275 }
4276 
4277 static void
get_total_entries(struct array_buffer * buf,unsigned long * total,unsigned long * entries)4278 get_total_entries(struct array_buffer *buf,
4279 		  unsigned long *total, unsigned long *entries)
4280 {
4281 	unsigned long t, e;
4282 	int cpu;
4283 
4284 	*total = 0;
4285 	*entries = 0;
4286 
4287 	for_each_tracing_cpu(cpu) {
4288 		get_total_entries_cpu(buf, &t, &e, cpu);
4289 		*total += t;
4290 		*entries += e;
4291 	}
4292 }
4293 
trace_total_entries_cpu(struct trace_array * tr,int cpu)4294 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4295 {
4296 	unsigned long total, entries;
4297 
4298 	if (!tr)
4299 		tr = &global_trace;
4300 
4301 	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4302 
4303 	return entries;
4304 }
4305 
trace_total_entries(struct trace_array * tr)4306 unsigned long trace_total_entries(struct trace_array *tr)
4307 {
4308 	unsigned long total, entries;
4309 
4310 	if (!tr)
4311 		tr = &global_trace;
4312 
4313 	get_total_entries(&tr->array_buffer, &total, &entries);
4314 
4315 	return entries;
4316 }
4317 
print_lat_help_header(struct seq_file * m)4318 static void print_lat_help_header(struct seq_file *m)
4319 {
4320 	seq_puts(m, "#                    _------=> CPU#            \n"
4321 		    "#                   / _-----=> irqs-off/BH-disabled\n"
4322 		    "#                  | / _----=> need-resched    \n"
4323 		    "#                  || / _---=> hardirq/softirq \n"
4324 		    "#                  ||| / _--=> preempt-depth   \n"
4325 		    "#                  |||| / _-=> migrate-disable \n"
4326 		    "#                  ||||| /     delay           \n"
4327 		    "#  cmd     pid     |||||| time  |   caller     \n"
4328 		    "#     \\   /        ||||||  \\    |    /       \n");
4329 }
4330 
print_event_info(struct array_buffer * buf,struct seq_file * m)4331 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4332 {
4333 	unsigned long total;
4334 	unsigned long entries;
4335 
4336 	get_total_entries(buf, &total, &entries);
4337 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
4338 		   entries, total, num_online_cpus());
4339 	seq_puts(m, "#\n");
4340 }
4341 
print_func_help_header(struct array_buffer * buf,struct seq_file * m,unsigned int flags)4342 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4343 				   unsigned int flags)
4344 {
4345 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4346 
4347 	print_event_info(buf, m);
4348 
4349 	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
4350 	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
4351 }
4352 
print_func_help_header_irq(struct array_buffer * buf,struct seq_file * m,unsigned int flags)4353 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4354 				       unsigned int flags)
4355 {
4356 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4357 	static const char space[] = "            ";
4358 	int prec = tgid ? 12 : 2;
4359 
4360 	print_event_info(buf, m);
4361 
4362 	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
4363 	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
4364 	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
4365 	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
4366 	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
4367 	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
4368 	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
4369 	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
4370 }
4371 
4372 void
print_trace_header(struct seq_file * m,struct trace_iterator * iter)4373 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4374 {
4375 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4376 	struct array_buffer *buf = iter->array_buffer;
4377 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4378 	struct tracer *type = iter->trace;
4379 	unsigned long entries;
4380 	unsigned long total;
4381 	const char *name = type->name;
4382 
4383 	get_total_entries(buf, &total, &entries);
4384 
4385 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4386 		   name, UTS_RELEASE);
4387 	seq_puts(m, "# -----------------------------------"
4388 		 "---------------------------------\n");
4389 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4390 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4391 		   nsecs_to_usecs(data->saved_latency),
4392 		   entries,
4393 		   total,
4394 		   buf->cpu,
4395 		   preempt_model_none()      ? "server" :
4396 		   preempt_model_voluntary() ? "desktop" :
4397 		   preempt_model_full()      ? "preempt" :
4398 		   preempt_model_rt()        ? "preempt_rt" :
4399 		   "unknown",
4400 		   /* These are reserved for later use */
4401 		   0, 0, 0, 0);
4402 #ifdef CONFIG_SMP
4403 	seq_printf(m, " #P:%d)\n", num_online_cpus());
4404 #else
4405 	seq_puts(m, ")\n");
4406 #endif
4407 	seq_puts(m, "#    -----------------\n");
4408 	seq_printf(m, "#    | task: %.16s-%d "
4409 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4410 		   data->comm, data->pid,
4411 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4412 		   data->policy, data->rt_priority);
4413 	seq_puts(m, "#    -----------------\n");
4414 
4415 	if (data->critical_start) {
4416 		seq_puts(m, "#  => started at: ");
4417 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4418 		trace_print_seq(m, &iter->seq);
4419 		seq_puts(m, "\n#  => ended at:   ");
4420 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4421 		trace_print_seq(m, &iter->seq);
4422 		seq_puts(m, "\n#\n");
4423 	}
4424 
4425 	seq_puts(m, "#\n");
4426 }
4427 
test_cpu_buff_start(struct trace_iterator * iter)4428 static void test_cpu_buff_start(struct trace_iterator *iter)
4429 {
4430 	struct trace_seq *s = &iter->seq;
4431 	struct trace_array *tr = iter->tr;
4432 
4433 	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4434 		return;
4435 
4436 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4437 		return;
4438 
4439 	if (cpumask_available(iter->started) &&
4440 	    cpumask_test_cpu(iter->cpu, iter->started))
4441 		return;
4442 
4443 	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4444 		return;
4445 
4446 	if (cpumask_available(iter->started))
4447 		cpumask_set_cpu(iter->cpu, iter->started);
4448 
4449 	/* Don't print started cpu buffer for the first entry of the trace */
4450 	if (iter->idx > 1)
4451 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4452 				iter->cpu);
4453 }
4454 
print_trace_fmt(struct trace_iterator * iter)4455 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4456 {
4457 	struct trace_array *tr = iter->tr;
4458 	struct trace_seq *s = &iter->seq;
4459 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4460 	struct trace_entry *entry;
4461 	struct trace_event *event;
4462 
4463 	entry = iter->ent;
4464 
4465 	test_cpu_buff_start(iter);
4466 
4467 	event = ftrace_find_event(entry->type);
4468 
4469 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4470 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4471 			trace_print_lat_context(iter);
4472 		else
4473 			trace_print_context(iter);
4474 	}
4475 
4476 	if (trace_seq_has_overflowed(s))
4477 		return TRACE_TYPE_PARTIAL_LINE;
4478 
4479 	if (event) {
4480 		if (tr->trace_flags & TRACE_ITER_FIELDS)
4481 			return print_event_fields(iter, event);
4482 		return event->funcs->trace(iter, sym_flags, event);
4483 	}
4484 
4485 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
4486 
4487 	return trace_handle_return(s);
4488 }
4489 
print_raw_fmt(struct trace_iterator * iter)4490 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4491 {
4492 	struct trace_array *tr = iter->tr;
4493 	struct trace_seq *s = &iter->seq;
4494 	struct trace_entry *entry;
4495 	struct trace_event *event;
4496 
4497 	entry = iter->ent;
4498 
4499 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4500 		trace_seq_printf(s, "%d %d %llu ",
4501 				 entry->pid, iter->cpu, iter->ts);
4502 
4503 	if (trace_seq_has_overflowed(s))
4504 		return TRACE_TYPE_PARTIAL_LINE;
4505 
4506 	event = ftrace_find_event(entry->type);
4507 	if (event)
4508 		return event->funcs->raw(iter, 0, event);
4509 
4510 	trace_seq_printf(s, "%d ?\n", entry->type);
4511 
4512 	return trace_handle_return(s);
4513 }
4514 
print_hex_fmt(struct trace_iterator * iter)4515 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4516 {
4517 	struct trace_array *tr = iter->tr;
4518 	struct trace_seq *s = &iter->seq;
4519 	unsigned char newline = '\n';
4520 	struct trace_entry *entry;
4521 	struct trace_event *event;
4522 
4523 	entry = iter->ent;
4524 
4525 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4526 		SEQ_PUT_HEX_FIELD(s, entry->pid);
4527 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
4528 		SEQ_PUT_HEX_FIELD(s, iter->ts);
4529 		if (trace_seq_has_overflowed(s))
4530 			return TRACE_TYPE_PARTIAL_LINE;
4531 	}
4532 
4533 	event = ftrace_find_event(entry->type);
4534 	if (event) {
4535 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
4536 		if (ret != TRACE_TYPE_HANDLED)
4537 			return ret;
4538 	}
4539 
4540 	SEQ_PUT_FIELD(s, newline);
4541 
4542 	return trace_handle_return(s);
4543 }
4544 
print_bin_fmt(struct trace_iterator * iter)4545 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4546 {
4547 	struct trace_array *tr = iter->tr;
4548 	struct trace_seq *s = &iter->seq;
4549 	struct trace_entry *entry;
4550 	struct trace_event *event;
4551 
4552 	entry = iter->ent;
4553 
4554 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4555 		SEQ_PUT_FIELD(s, entry->pid);
4556 		SEQ_PUT_FIELD(s, iter->cpu);
4557 		SEQ_PUT_FIELD(s, iter->ts);
4558 		if (trace_seq_has_overflowed(s))
4559 			return TRACE_TYPE_PARTIAL_LINE;
4560 	}
4561 
4562 	event = ftrace_find_event(entry->type);
4563 	return event ? event->funcs->binary(iter, 0, event) :
4564 		TRACE_TYPE_HANDLED;
4565 }
4566 
trace_empty(struct trace_iterator * iter)4567 int trace_empty(struct trace_iterator *iter)
4568 {
4569 	struct ring_buffer_iter *buf_iter;
4570 	int cpu;
4571 
4572 	/* If we are looking at one CPU buffer, only check that one */
4573 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4574 		cpu = iter->cpu_file;
4575 		buf_iter = trace_buffer_iter(iter, cpu);
4576 		if (buf_iter) {
4577 			if (!ring_buffer_iter_empty(buf_iter))
4578 				return 0;
4579 		} else {
4580 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4581 				return 0;
4582 		}
4583 		return 1;
4584 	}
4585 
4586 	for_each_tracing_cpu(cpu) {
4587 		buf_iter = trace_buffer_iter(iter, cpu);
4588 		if (buf_iter) {
4589 			if (!ring_buffer_iter_empty(buf_iter))
4590 				return 0;
4591 		} else {
4592 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4593 				return 0;
4594 		}
4595 	}
4596 
4597 	return 1;
4598 }
4599 
4600 /*  Called with trace_event_read_lock() held. */
print_trace_line(struct trace_iterator * iter)4601 enum print_line_t print_trace_line(struct trace_iterator *iter)
4602 {
4603 	struct trace_array *tr = iter->tr;
4604 	unsigned long trace_flags = tr->trace_flags;
4605 	enum print_line_t ret;
4606 
4607 	if (iter->lost_events) {
4608 		if (iter->lost_events == (unsigned long)-1)
4609 			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4610 					 iter->cpu);
4611 		else
4612 			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4613 					 iter->cpu, iter->lost_events);
4614 		if (trace_seq_has_overflowed(&iter->seq))
4615 			return TRACE_TYPE_PARTIAL_LINE;
4616 	}
4617 
4618 	if (iter->trace && iter->trace->print_line) {
4619 		ret = iter->trace->print_line(iter);
4620 		if (ret != TRACE_TYPE_UNHANDLED)
4621 			return ret;
4622 	}
4623 
4624 	if (iter->ent->type == TRACE_BPUTS &&
4625 			trace_flags & TRACE_ITER_PRINTK &&
4626 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4627 		return trace_print_bputs_msg_only(iter);
4628 
4629 	if (iter->ent->type == TRACE_BPRINT &&
4630 			trace_flags & TRACE_ITER_PRINTK &&
4631 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4632 		return trace_print_bprintk_msg_only(iter);
4633 
4634 	if (iter->ent->type == TRACE_PRINT &&
4635 			trace_flags & TRACE_ITER_PRINTK &&
4636 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4637 		return trace_print_printk_msg_only(iter);
4638 
4639 	if (trace_flags & TRACE_ITER_BIN)
4640 		return print_bin_fmt(iter);
4641 
4642 	if (trace_flags & TRACE_ITER_HEX)
4643 		return print_hex_fmt(iter);
4644 
4645 	if (trace_flags & TRACE_ITER_RAW)
4646 		return print_raw_fmt(iter);
4647 
4648 	return print_trace_fmt(iter);
4649 }
4650 
trace_latency_header(struct seq_file * m)4651 void trace_latency_header(struct seq_file *m)
4652 {
4653 	struct trace_iterator *iter = m->private;
4654 	struct trace_array *tr = iter->tr;
4655 
4656 	/* print nothing if the buffers are empty */
4657 	if (trace_empty(iter))
4658 		return;
4659 
4660 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4661 		print_trace_header(m, iter);
4662 
4663 	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4664 		print_lat_help_header(m);
4665 }
4666 
trace_default_header(struct seq_file * m)4667 void trace_default_header(struct seq_file *m)
4668 {
4669 	struct trace_iterator *iter = m->private;
4670 	struct trace_array *tr = iter->tr;
4671 	unsigned long trace_flags = tr->trace_flags;
4672 
4673 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4674 		return;
4675 
4676 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4677 		/* print nothing if the buffers are empty */
4678 		if (trace_empty(iter))
4679 			return;
4680 		print_trace_header(m, iter);
4681 		if (!(trace_flags & TRACE_ITER_VERBOSE))
4682 			print_lat_help_header(m);
4683 	} else {
4684 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4685 			if (trace_flags & TRACE_ITER_IRQ_INFO)
4686 				print_func_help_header_irq(iter->array_buffer,
4687 							   m, trace_flags);
4688 			else
4689 				print_func_help_header(iter->array_buffer, m,
4690 						       trace_flags);
4691 		}
4692 	}
4693 }
4694 
test_ftrace_alive(struct seq_file * m)4695 static void test_ftrace_alive(struct seq_file *m)
4696 {
4697 	if (!ftrace_is_dead())
4698 		return;
4699 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4700 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
4701 }
4702 
4703 #ifdef CONFIG_TRACER_MAX_TRACE
show_snapshot_main_help(struct seq_file * m)4704 static void show_snapshot_main_help(struct seq_file *m)
4705 {
4706 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4707 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4708 		    "#                      Takes a snapshot of the main buffer.\n"
4709 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4710 		    "#                      (Doesn't have to be '2' works with any number that\n"
4711 		    "#                       is not a '0' or '1')\n");
4712 }
4713 
show_snapshot_percpu_help(struct seq_file * m)4714 static void show_snapshot_percpu_help(struct seq_file *m)
4715 {
4716 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4717 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4718 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4719 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
4720 #else
4721 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4722 		    "#                     Must use main snapshot file to allocate.\n");
4723 #endif
4724 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4725 		    "#                      (Doesn't have to be '2' works with any number that\n"
4726 		    "#                       is not a '0' or '1')\n");
4727 }
4728 
print_snapshot_help(struct seq_file * m,struct trace_iterator * iter)4729 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4730 {
4731 	if (iter->tr->allocated_snapshot)
4732 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4733 	else
4734 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4735 
4736 	seq_puts(m, "# Snapshot commands:\n");
4737 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4738 		show_snapshot_main_help(m);
4739 	else
4740 		show_snapshot_percpu_help(m);
4741 }
4742 #else
4743 /* Should never be called */
print_snapshot_help(struct seq_file * m,struct trace_iterator * iter)4744 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4745 #endif
4746 
s_show(struct seq_file * m,void * v)4747 static int s_show(struct seq_file *m, void *v)
4748 {
4749 	struct trace_iterator *iter = v;
4750 	int ret;
4751 
4752 	if (iter->ent == NULL) {
4753 		if (iter->tr) {
4754 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4755 			seq_puts(m, "#\n");
4756 			test_ftrace_alive(m);
4757 		}
4758 		if (iter->snapshot && trace_empty(iter))
4759 			print_snapshot_help(m, iter);
4760 		else if (iter->trace && iter->trace->print_header)
4761 			iter->trace->print_header(m);
4762 		else
4763 			trace_default_header(m);
4764 
4765 	} else if (iter->leftover) {
4766 		/*
4767 		 * If we filled the seq_file buffer earlier, we
4768 		 * want to just show it now.
4769 		 */
4770 		ret = trace_print_seq(m, &iter->seq);
4771 
4772 		/* ret should this time be zero, but you never know */
4773 		iter->leftover = ret;
4774 
4775 	} else {
4776 		print_trace_line(iter);
4777 		ret = trace_print_seq(m, &iter->seq);
4778 		/*
4779 		 * If we overflow the seq_file buffer, then it will
4780 		 * ask us for this data again at start up.
4781 		 * Use that instead.
4782 		 *  ret is 0 if seq_file write succeeded.
4783 		 *        -1 otherwise.
4784 		 */
4785 		iter->leftover = ret;
4786 	}
4787 
4788 	return 0;
4789 }
4790 
4791 /*
4792  * Should be used after trace_array_get(), trace_types_lock
4793  * ensures that i_cdev was already initialized.
4794  */
tracing_get_cpu(struct inode * inode)4795 static inline int tracing_get_cpu(struct inode *inode)
4796 {
4797 	if (inode->i_cdev) /* See trace_create_cpu_file() */
4798 		return (long)inode->i_cdev - 1;
4799 	return RING_BUFFER_ALL_CPUS;
4800 }
4801 
4802 static const struct seq_operations tracer_seq_ops = {
4803 	.start		= s_start,
4804 	.next		= s_next,
4805 	.stop		= s_stop,
4806 	.show		= s_show,
4807 };
4808 
4809 /*
4810  * Note, as iter itself can be allocated and freed in different
4811  * ways, this function is only used to free its content, and not
4812  * the iterator itself. The only requirement to all the allocations
4813  * is that it must zero all fields (kzalloc), as freeing works with
4814  * ethier allocated content or NULL.
4815  */
free_trace_iter_content(struct trace_iterator * iter)4816 static void free_trace_iter_content(struct trace_iterator *iter)
4817 {
4818 	/* The fmt is either NULL, allocated or points to static_fmt_buf */
4819 	if (iter->fmt != static_fmt_buf)
4820 		kfree(iter->fmt);
4821 
4822 	kfree(iter->temp);
4823 	kfree(iter->buffer_iter);
4824 	mutex_destroy(&iter->mutex);
4825 	free_cpumask_var(iter->started);
4826 }
4827 
4828 static struct trace_iterator *
__tracing_open(struct inode * inode,struct file * file,bool snapshot)4829 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4830 {
4831 	struct trace_array *tr = inode->i_private;
4832 	struct trace_iterator *iter;
4833 	int cpu;
4834 
4835 	if (tracing_disabled)
4836 		return ERR_PTR(-ENODEV);
4837 
4838 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4839 	if (!iter)
4840 		return ERR_PTR(-ENOMEM);
4841 
4842 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4843 				    GFP_KERNEL);
4844 	if (!iter->buffer_iter)
4845 		goto release;
4846 
4847 	/*
4848 	 * trace_find_next_entry() may need to save off iter->ent.
4849 	 * It will place it into the iter->temp buffer. As most
4850 	 * events are less than 128, allocate a buffer of that size.
4851 	 * If one is greater, then trace_find_next_entry() will
4852 	 * allocate a new buffer to adjust for the bigger iter->ent.
4853 	 * It's not critical if it fails to get allocated here.
4854 	 */
4855 	iter->temp = kmalloc(128, GFP_KERNEL);
4856 	if (iter->temp)
4857 		iter->temp_size = 128;
4858 
4859 	/*
4860 	 * trace_event_printf() may need to modify given format
4861 	 * string to replace %p with %px so that it shows real address
4862 	 * instead of hash value. However, that is only for the event
4863 	 * tracing, other tracer may not need. Defer the allocation
4864 	 * until it is needed.
4865 	 */
4866 	iter->fmt = NULL;
4867 	iter->fmt_size = 0;
4868 
4869 	mutex_lock(&trace_types_lock);
4870 	iter->trace = tr->current_trace;
4871 
4872 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4873 		goto fail;
4874 
4875 	iter->tr = tr;
4876 
4877 #ifdef CONFIG_TRACER_MAX_TRACE
4878 	/* Currently only the top directory has a snapshot */
4879 	if (tr->current_trace->print_max || snapshot)
4880 		iter->array_buffer = &tr->max_buffer;
4881 	else
4882 #endif
4883 		iter->array_buffer = &tr->array_buffer;
4884 	iter->snapshot = snapshot;
4885 	iter->pos = -1;
4886 	iter->cpu_file = tracing_get_cpu(inode);
4887 	mutex_init(&iter->mutex);
4888 
4889 	/* Notify the tracer early; before we stop tracing. */
4890 	if (iter->trace->open)
4891 		iter->trace->open(iter);
4892 
4893 	/* Annotate start of buffers if we had overruns */
4894 	if (ring_buffer_overruns(iter->array_buffer->buffer))
4895 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4896 
4897 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4898 	if (trace_clocks[tr->clock_id].in_ns)
4899 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4900 
4901 	/*
4902 	 * If pause-on-trace is enabled, then stop the trace while
4903 	 * dumping, unless this is the "snapshot" file
4904 	 */
4905 	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4906 		tracing_stop_tr(tr);
4907 
4908 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4909 		for_each_tracing_cpu(cpu) {
4910 			iter->buffer_iter[cpu] =
4911 				ring_buffer_read_prepare(iter->array_buffer->buffer,
4912 							 cpu, GFP_KERNEL);
4913 		}
4914 		ring_buffer_read_prepare_sync();
4915 		for_each_tracing_cpu(cpu) {
4916 			ring_buffer_read_start(iter->buffer_iter[cpu]);
4917 			tracing_iter_reset(iter, cpu);
4918 		}
4919 	} else {
4920 		cpu = iter->cpu_file;
4921 		iter->buffer_iter[cpu] =
4922 			ring_buffer_read_prepare(iter->array_buffer->buffer,
4923 						 cpu, GFP_KERNEL);
4924 		ring_buffer_read_prepare_sync();
4925 		ring_buffer_read_start(iter->buffer_iter[cpu]);
4926 		tracing_iter_reset(iter, cpu);
4927 	}
4928 
4929 	mutex_unlock(&trace_types_lock);
4930 
4931 	return iter;
4932 
4933  fail:
4934 	mutex_unlock(&trace_types_lock);
4935 	free_trace_iter_content(iter);
4936 release:
4937 	seq_release_private(inode, file);
4938 	return ERR_PTR(-ENOMEM);
4939 }
4940 
tracing_open_generic(struct inode * inode,struct file * filp)4941 int tracing_open_generic(struct inode *inode, struct file *filp)
4942 {
4943 	int ret;
4944 
4945 	ret = tracing_check_open_get_tr(NULL);
4946 	if (ret)
4947 		return ret;
4948 
4949 	filp->private_data = inode->i_private;
4950 	return 0;
4951 }
4952 
tracing_is_disabled(void)4953 bool tracing_is_disabled(void)
4954 {
4955 	return (tracing_disabled) ? true: false;
4956 }
4957 
4958 /*
4959  * Open and update trace_array ref count.
4960  * Must have the current trace_array passed to it.
4961  */
tracing_open_generic_tr(struct inode * inode,struct file * filp)4962 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4963 {
4964 	struct trace_array *tr = inode->i_private;
4965 	int ret;
4966 
4967 	ret = tracing_check_open_get_tr(tr);
4968 	if (ret)
4969 		return ret;
4970 
4971 	filp->private_data = inode->i_private;
4972 
4973 	return 0;
4974 }
4975 
4976 /*
4977  * The private pointer of the inode is the trace_event_file.
4978  * Update the tr ref count associated to it.
4979  */
tracing_open_file_tr(struct inode * inode,struct file * filp)4980 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4981 {
4982 	struct trace_event_file *file = inode->i_private;
4983 	int ret;
4984 
4985 	ret = tracing_check_open_get_tr(file->tr);
4986 	if (ret)
4987 		return ret;
4988 
4989 	filp->private_data = inode->i_private;
4990 
4991 	return 0;
4992 }
4993 
tracing_release_file_tr(struct inode * inode,struct file * filp)4994 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4995 {
4996 	struct trace_event_file *file = inode->i_private;
4997 
4998 	trace_array_put(file->tr);
4999 
5000 	return 0;
5001 }
5002 
tracing_mark_open(struct inode * inode,struct file * filp)5003 static int tracing_mark_open(struct inode *inode, struct file *filp)
5004 {
5005 	stream_open(inode, filp);
5006 	return tracing_open_generic_tr(inode, filp);
5007 }
5008 
tracing_release(struct inode * inode,struct file * file)5009 static int tracing_release(struct inode *inode, struct file *file)
5010 {
5011 	struct trace_array *tr = inode->i_private;
5012 	struct seq_file *m = file->private_data;
5013 	struct trace_iterator *iter;
5014 	int cpu;
5015 
5016 	if (!(file->f_mode & FMODE_READ)) {
5017 		trace_array_put(tr);
5018 		return 0;
5019 	}
5020 
5021 	/* Writes do not use seq_file */
5022 	iter = m->private;
5023 	mutex_lock(&trace_types_lock);
5024 
5025 	for_each_tracing_cpu(cpu) {
5026 		if (iter->buffer_iter[cpu])
5027 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
5028 	}
5029 
5030 	if (iter->trace && iter->trace->close)
5031 		iter->trace->close(iter);
5032 
5033 	if (!iter->snapshot && tr->stop_count)
5034 		/* reenable tracing if it was previously enabled */
5035 		tracing_start_tr(tr);
5036 
5037 	__trace_array_put(tr);
5038 
5039 	mutex_unlock(&trace_types_lock);
5040 
5041 	free_trace_iter_content(iter);
5042 	seq_release_private(inode, file);
5043 
5044 	return 0;
5045 }
5046 
tracing_release_generic_tr(struct inode * inode,struct file * file)5047 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
5048 {
5049 	struct trace_array *tr = inode->i_private;
5050 
5051 	trace_array_put(tr);
5052 	return 0;
5053 }
5054 
tracing_single_release_tr(struct inode * inode,struct file * file)5055 static int tracing_single_release_tr(struct inode *inode, struct file *file)
5056 {
5057 	struct trace_array *tr = inode->i_private;
5058 
5059 	trace_array_put(tr);
5060 
5061 	return single_release(inode, file);
5062 }
5063 
tracing_open(struct inode * inode,struct file * file)5064 static int tracing_open(struct inode *inode, struct file *file)
5065 {
5066 	struct trace_array *tr = inode->i_private;
5067 	struct trace_iterator *iter;
5068 	int ret;
5069 
5070 	ret = tracing_check_open_get_tr(tr);
5071 	if (ret)
5072 		return ret;
5073 
5074 	/* If this file was open for write, then erase contents */
5075 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5076 		int cpu = tracing_get_cpu(inode);
5077 		struct array_buffer *trace_buf = &tr->array_buffer;
5078 
5079 #ifdef CONFIG_TRACER_MAX_TRACE
5080 		if (tr->current_trace->print_max)
5081 			trace_buf = &tr->max_buffer;
5082 #endif
5083 
5084 		if (cpu == RING_BUFFER_ALL_CPUS)
5085 			tracing_reset_online_cpus(trace_buf);
5086 		else
5087 			tracing_reset_cpu(trace_buf, cpu);
5088 	}
5089 
5090 	if (file->f_mode & FMODE_READ) {
5091 		iter = __tracing_open(inode, file, false);
5092 		if (IS_ERR(iter))
5093 			ret = PTR_ERR(iter);
5094 		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5095 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
5096 	}
5097 
5098 	if (ret < 0)
5099 		trace_array_put(tr);
5100 
5101 	return ret;
5102 }
5103 
5104 /*
5105  * Some tracers are not suitable for instance buffers.
5106  * A tracer is always available for the global array (toplevel)
5107  * or if it explicitly states that it is.
5108  */
5109 static bool
trace_ok_for_array(struct tracer * t,struct trace_array * tr)5110 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5111 {
5112 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5113 }
5114 
5115 /* Find the next tracer that this trace array may use */
5116 static struct tracer *
get_tracer_for_array(struct trace_array * tr,struct tracer * t)5117 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5118 {
5119 	while (t && !trace_ok_for_array(t, tr))
5120 		t = t->next;
5121 
5122 	return t;
5123 }
5124 
5125 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)5126 t_next(struct seq_file *m, void *v, loff_t *pos)
5127 {
5128 	struct trace_array *tr = m->private;
5129 	struct tracer *t = v;
5130 
5131 	(*pos)++;
5132 
5133 	if (t)
5134 		t = get_tracer_for_array(tr, t->next);
5135 
5136 	return t;
5137 }
5138 
t_start(struct seq_file * m,loff_t * pos)5139 static void *t_start(struct seq_file *m, loff_t *pos)
5140 {
5141 	struct trace_array *tr = m->private;
5142 	struct tracer *t;
5143 	loff_t l = 0;
5144 
5145 	mutex_lock(&trace_types_lock);
5146 
5147 	t = get_tracer_for_array(tr, trace_types);
5148 	for (; t && l < *pos; t = t_next(m, t, &l))
5149 			;
5150 
5151 	return t;
5152 }
5153 
t_stop(struct seq_file * m,void * p)5154 static void t_stop(struct seq_file *m, void *p)
5155 {
5156 	mutex_unlock(&trace_types_lock);
5157 }
5158 
t_show(struct seq_file * m,void * v)5159 static int t_show(struct seq_file *m, void *v)
5160 {
5161 	struct tracer *t = v;
5162 
5163 	if (!t)
5164 		return 0;
5165 
5166 	seq_puts(m, t->name);
5167 	if (t->next)
5168 		seq_putc(m, ' ');
5169 	else
5170 		seq_putc(m, '\n');
5171 
5172 	return 0;
5173 }
5174 
5175 static const struct seq_operations show_traces_seq_ops = {
5176 	.start		= t_start,
5177 	.next		= t_next,
5178 	.stop		= t_stop,
5179 	.show		= t_show,
5180 };
5181 
show_traces_open(struct inode * inode,struct file * file)5182 static int show_traces_open(struct inode *inode, struct file *file)
5183 {
5184 	struct trace_array *tr = inode->i_private;
5185 	struct seq_file *m;
5186 	int ret;
5187 
5188 	ret = tracing_check_open_get_tr(tr);
5189 	if (ret)
5190 		return ret;
5191 
5192 	ret = seq_open(file, &show_traces_seq_ops);
5193 	if (ret) {
5194 		trace_array_put(tr);
5195 		return ret;
5196 	}
5197 
5198 	m = file->private_data;
5199 	m->private = tr;
5200 
5201 	return 0;
5202 }
5203 
show_traces_release(struct inode * inode,struct file * file)5204 static int show_traces_release(struct inode *inode, struct file *file)
5205 {
5206 	struct trace_array *tr = inode->i_private;
5207 
5208 	trace_array_put(tr);
5209 	return seq_release(inode, file);
5210 }
5211 
5212 static ssize_t
tracing_write_stub(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)5213 tracing_write_stub(struct file *filp, const char __user *ubuf,
5214 		   size_t count, loff_t *ppos)
5215 {
5216 	return count;
5217 }
5218 
tracing_lseek(struct file * file,loff_t offset,int whence)5219 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5220 {
5221 	int ret;
5222 
5223 	if (file->f_mode & FMODE_READ)
5224 		ret = seq_lseek(file, offset, whence);
5225 	else
5226 		file->f_pos = ret = 0;
5227 
5228 	return ret;
5229 }
5230 
5231 static const struct file_operations tracing_fops = {
5232 	.open		= tracing_open,
5233 	.read		= seq_read,
5234 	.read_iter	= seq_read_iter,
5235 	.splice_read	= copy_splice_read,
5236 	.write		= tracing_write_stub,
5237 	.llseek		= tracing_lseek,
5238 	.release	= tracing_release,
5239 };
5240 
5241 static const struct file_operations show_traces_fops = {
5242 	.open		= show_traces_open,
5243 	.read		= seq_read,
5244 	.llseek		= seq_lseek,
5245 	.release	= show_traces_release,
5246 };
5247 
5248 static ssize_t
tracing_cpumask_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)5249 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5250 		     size_t count, loff_t *ppos)
5251 {
5252 	struct trace_array *tr = file_inode(filp)->i_private;
5253 	char *mask_str;
5254 	int len;
5255 
5256 	len = snprintf(NULL, 0, "%*pb\n",
5257 		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
5258 	mask_str = kmalloc(len, GFP_KERNEL);
5259 	if (!mask_str)
5260 		return -ENOMEM;
5261 
5262 	len = snprintf(mask_str, len, "%*pb\n",
5263 		       cpumask_pr_args(tr->tracing_cpumask));
5264 	if (len >= count) {
5265 		count = -EINVAL;
5266 		goto out_err;
5267 	}
5268 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5269 
5270 out_err:
5271 	kfree(mask_str);
5272 
5273 	return count;
5274 }
5275 
tracing_set_cpumask(struct trace_array * tr,cpumask_var_t tracing_cpumask_new)5276 int tracing_set_cpumask(struct trace_array *tr,
5277 			cpumask_var_t tracing_cpumask_new)
5278 {
5279 	int cpu;
5280 
5281 	if (!tr)
5282 		return -EINVAL;
5283 
5284 	local_irq_disable();
5285 	arch_spin_lock(&tr->max_lock);
5286 	for_each_tracing_cpu(cpu) {
5287 		/*
5288 		 * Increase/decrease the disabled counter if we are
5289 		 * about to flip a bit in the cpumask:
5290 		 */
5291 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5292 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5293 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5294 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5295 #ifdef CONFIG_TRACER_MAX_TRACE
5296 			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5297 #endif
5298 		}
5299 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5300 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5301 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5302 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5303 #ifdef CONFIG_TRACER_MAX_TRACE
5304 			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5305 #endif
5306 		}
5307 	}
5308 	arch_spin_unlock(&tr->max_lock);
5309 	local_irq_enable();
5310 
5311 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5312 
5313 	return 0;
5314 }
5315 
5316 static ssize_t
tracing_cpumask_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)5317 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5318 		      size_t count, loff_t *ppos)
5319 {
5320 	struct trace_array *tr = file_inode(filp)->i_private;
5321 	cpumask_var_t tracing_cpumask_new;
5322 	int err;
5323 
5324 	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5325 		return -ENOMEM;
5326 
5327 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5328 	if (err)
5329 		goto err_free;
5330 
5331 	err = tracing_set_cpumask(tr, tracing_cpumask_new);
5332 	if (err)
5333 		goto err_free;
5334 
5335 	free_cpumask_var(tracing_cpumask_new);
5336 
5337 	return count;
5338 
5339 err_free:
5340 	free_cpumask_var(tracing_cpumask_new);
5341 
5342 	return err;
5343 }
5344 
5345 static const struct file_operations tracing_cpumask_fops = {
5346 	.open		= tracing_open_generic_tr,
5347 	.read		= tracing_cpumask_read,
5348 	.write		= tracing_cpumask_write,
5349 	.release	= tracing_release_generic_tr,
5350 	.llseek		= generic_file_llseek,
5351 };
5352 
tracing_trace_options_show(struct seq_file * m,void * v)5353 static int tracing_trace_options_show(struct seq_file *m, void *v)
5354 {
5355 	struct tracer_opt *trace_opts;
5356 	struct trace_array *tr = m->private;
5357 	u32 tracer_flags;
5358 	int i;
5359 
5360 	mutex_lock(&trace_types_lock);
5361 	tracer_flags = tr->current_trace->flags->val;
5362 	trace_opts = tr->current_trace->flags->opts;
5363 
5364 	for (i = 0; trace_options[i]; i++) {
5365 		if (tr->trace_flags & (1 << i))
5366 			seq_printf(m, "%s\n", trace_options[i]);
5367 		else
5368 			seq_printf(m, "no%s\n", trace_options[i]);
5369 	}
5370 
5371 	for (i = 0; trace_opts[i].name; i++) {
5372 		if (tracer_flags & trace_opts[i].bit)
5373 			seq_printf(m, "%s\n", trace_opts[i].name);
5374 		else
5375 			seq_printf(m, "no%s\n", trace_opts[i].name);
5376 	}
5377 	mutex_unlock(&trace_types_lock);
5378 
5379 	return 0;
5380 }
5381 
__set_tracer_option(struct trace_array * tr,struct tracer_flags * tracer_flags,struct tracer_opt * opts,int neg)5382 static int __set_tracer_option(struct trace_array *tr,
5383 			       struct tracer_flags *tracer_flags,
5384 			       struct tracer_opt *opts, int neg)
5385 {
5386 	struct tracer *trace = tracer_flags->trace;
5387 	int ret;
5388 
5389 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5390 	if (ret)
5391 		return ret;
5392 
5393 	if (neg)
5394 		tracer_flags->val &= ~opts->bit;
5395 	else
5396 		tracer_flags->val |= opts->bit;
5397 	return 0;
5398 }
5399 
5400 /* Try to assign a tracer specific option */
set_tracer_option(struct trace_array * tr,char * cmp,int neg)5401 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5402 {
5403 	struct tracer *trace = tr->current_trace;
5404 	struct tracer_flags *tracer_flags = trace->flags;
5405 	struct tracer_opt *opts = NULL;
5406 	int i;
5407 
5408 	for (i = 0; tracer_flags->opts[i].name; i++) {
5409 		opts = &tracer_flags->opts[i];
5410 
5411 		if (strcmp(cmp, opts->name) == 0)
5412 			return __set_tracer_option(tr, trace->flags, opts, neg);
5413 	}
5414 
5415 	return -EINVAL;
5416 }
5417 
5418 /* Some tracers require overwrite to stay enabled */
trace_keep_overwrite(struct tracer * tracer,u32 mask,int set)5419 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5420 {
5421 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5422 		return -1;
5423 
5424 	return 0;
5425 }
5426 
set_tracer_flag(struct trace_array * tr,unsigned int mask,int enabled)5427 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5428 {
5429 	int *map;
5430 
5431 	if ((mask == TRACE_ITER_RECORD_TGID) ||
5432 	    (mask == TRACE_ITER_RECORD_CMD))
5433 		lockdep_assert_held(&event_mutex);
5434 
5435 	/* do nothing if flag is already set */
5436 	if (!!(tr->trace_flags & mask) == !!enabled)
5437 		return 0;
5438 
5439 	/* Give the tracer a chance to approve the change */
5440 	if (tr->current_trace->flag_changed)
5441 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5442 			return -EINVAL;
5443 
5444 	if (enabled)
5445 		tr->trace_flags |= mask;
5446 	else
5447 		tr->trace_flags &= ~mask;
5448 
5449 	if (mask == TRACE_ITER_RECORD_CMD)
5450 		trace_event_enable_cmd_record(enabled);
5451 
5452 	if (mask == TRACE_ITER_RECORD_TGID) {
5453 		if (!tgid_map) {
5454 			tgid_map_max = pid_max;
5455 			map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5456 				       GFP_KERNEL);
5457 
5458 			/*
5459 			 * Pairs with smp_load_acquire() in
5460 			 * trace_find_tgid_ptr() to ensure that if it observes
5461 			 * the tgid_map we just allocated then it also observes
5462 			 * the corresponding tgid_map_max value.
5463 			 */
5464 			smp_store_release(&tgid_map, map);
5465 		}
5466 		if (!tgid_map) {
5467 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5468 			return -ENOMEM;
5469 		}
5470 
5471 		trace_event_enable_tgid_record(enabled);
5472 	}
5473 
5474 	if (mask == TRACE_ITER_EVENT_FORK)
5475 		trace_event_follow_fork(tr, enabled);
5476 
5477 	if (mask == TRACE_ITER_FUNC_FORK)
5478 		ftrace_pid_follow_fork(tr, enabled);
5479 
5480 	if (mask == TRACE_ITER_OVERWRITE) {
5481 		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5482 #ifdef CONFIG_TRACER_MAX_TRACE
5483 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5484 #endif
5485 	}
5486 
5487 	if (mask == TRACE_ITER_PRINTK) {
5488 		trace_printk_start_stop_comm(enabled);
5489 		trace_printk_control(enabled);
5490 	}
5491 
5492 	return 0;
5493 }
5494 
trace_set_options(struct trace_array * tr,char * option)5495 int trace_set_options(struct trace_array *tr, char *option)
5496 {
5497 	char *cmp;
5498 	int neg = 0;
5499 	int ret;
5500 	size_t orig_len = strlen(option);
5501 	int len;
5502 
5503 	cmp = strstrip(option);
5504 
5505 	len = str_has_prefix(cmp, "no");
5506 	if (len)
5507 		neg = 1;
5508 
5509 	cmp += len;
5510 
5511 	mutex_lock(&event_mutex);
5512 	mutex_lock(&trace_types_lock);
5513 
5514 	ret = match_string(trace_options, -1, cmp);
5515 	/* If no option could be set, test the specific tracer options */
5516 	if (ret < 0)
5517 		ret = set_tracer_option(tr, cmp, neg);
5518 	else
5519 		ret = set_tracer_flag(tr, 1 << ret, !neg);
5520 
5521 	mutex_unlock(&trace_types_lock);
5522 	mutex_unlock(&event_mutex);
5523 
5524 	/*
5525 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
5526 	 * turn it back into a space.
5527 	 */
5528 	if (orig_len > strlen(option))
5529 		option[strlen(option)] = ' ';
5530 
5531 	return ret;
5532 }
5533 
apply_trace_boot_options(void)5534 static void __init apply_trace_boot_options(void)
5535 {
5536 	char *buf = trace_boot_options_buf;
5537 	char *option;
5538 
5539 	while (true) {
5540 		option = strsep(&buf, ",");
5541 
5542 		if (!option)
5543 			break;
5544 
5545 		if (*option)
5546 			trace_set_options(&global_trace, option);
5547 
5548 		/* Put back the comma to allow this to be called again */
5549 		if (buf)
5550 			*(buf - 1) = ',';
5551 	}
5552 }
5553 
5554 static ssize_t
tracing_trace_options_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)5555 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5556 			size_t cnt, loff_t *ppos)
5557 {
5558 	struct seq_file *m = filp->private_data;
5559 	struct trace_array *tr = m->private;
5560 	char buf[64];
5561 	int ret;
5562 
5563 	if (cnt >= sizeof(buf))
5564 		return -EINVAL;
5565 
5566 	if (copy_from_user(buf, ubuf, cnt))
5567 		return -EFAULT;
5568 
5569 	buf[cnt] = 0;
5570 
5571 	ret = trace_set_options(tr, buf);
5572 	if (ret < 0)
5573 		return ret;
5574 
5575 	*ppos += cnt;
5576 
5577 	return cnt;
5578 }
5579 
tracing_trace_options_open(struct inode * inode,struct file * file)5580 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5581 {
5582 	struct trace_array *tr = inode->i_private;
5583 	int ret;
5584 
5585 	ret = tracing_check_open_get_tr(tr);
5586 	if (ret)
5587 		return ret;
5588 
5589 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
5590 	if (ret < 0)
5591 		trace_array_put(tr);
5592 
5593 	return ret;
5594 }
5595 
5596 static const struct file_operations tracing_iter_fops = {
5597 	.open		= tracing_trace_options_open,
5598 	.read		= seq_read,
5599 	.llseek		= seq_lseek,
5600 	.release	= tracing_single_release_tr,
5601 	.write		= tracing_trace_options_write,
5602 };
5603 
5604 static const char readme_msg[] =
5605 	"tracing mini-HOWTO:\n\n"
5606 	"# echo 0 > tracing_on : quick way to disable tracing\n"
5607 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5608 	" Important files:\n"
5609 	"  trace\t\t\t- The static contents of the buffer\n"
5610 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
5611 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5612 	"  current_tracer\t- function and latency tracers\n"
5613 	"  available_tracers\t- list of configured tracers for current_tracer\n"
5614 	"  error_log\t- error log for failed commands (that support it)\n"
5615 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5616 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5617 	"  trace_clock\t\t- change the clock used to order events\n"
5618 	"       local:   Per cpu clock but may not be synced across CPUs\n"
5619 	"      global:   Synced across CPUs but slows tracing down.\n"
5620 	"     counter:   Not a clock, but just an increment\n"
5621 	"      uptime:   Jiffy counter from time of boot\n"
5622 	"        perf:   Same clock that perf events use\n"
5623 #ifdef CONFIG_X86_64
5624 	"     x86-tsc:   TSC cycle counter\n"
5625 #endif
5626 	"\n  timestamp_mode\t- view the mode used to timestamp events\n"
5627 	"       delta:   Delta difference against a buffer-wide timestamp\n"
5628 	"    absolute:   Absolute (standalone) timestamp\n"
5629 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5630 	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5631 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
5632 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5633 	"\t\t\t  Remove sub-buffer with rmdir\n"
5634 	"  trace_options\t\t- Set format or modify how tracing happens\n"
5635 	"\t\t\t  Disable an option by prefixing 'no' to the\n"
5636 	"\t\t\t  option name\n"
5637 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5638 #ifdef CONFIG_DYNAMIC_FTRACE
5639 	"\n  available_filter_functions - list of functions that can be filtered on\n"
5640 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
5641 	"\t\t\t  functions\n"
5642 	"\t     accepts: func_full_name or glob-matching-pattern\n"
5643 	"\t     modules: Can select a group via module\n"
5644 	"\t      Format: :mod:<module-name>\n"
5645 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5646 	"\t    triggers: a command to perform when function is hit\n"
5647 	"\t      Format: <function>:<trigger>[:count]\n"
5648 	"\t     trigger: traceon, traceoff\n"
5649 	"\t\t      enable_event:<system>:<event>\n"
5650 	"\t\t      disable_event:<system>:<event>\n"
5651 #ifdef CONFIG_STACKTRACE
5652 	"\t\t      stacktrace\n"
5653 #endif
5654 #ifdef CONFIG_TRACER_SNAPSHOT
5655 	"\t\t      snapshot\n"
5656 #endif
5657 	"\t\t      dump\n"
5658 	"\t\t      cpudump\n"
5659 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5660 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5661 	"\t     The first one will disable tracing every time do_fault is hit\n"
5662 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5663 	"\t       The first time do trap is hit and it disables tracing, the\n"
5664 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
5665 	"\t       the counter will not decrement. It only decrements when the\n"
5666 	"\t       trigger did work\n"
5667 	"\t     To remove trigger without count:\n"
5668 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5669 	"\t     To remove trigger with a count:\n"
5670 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5671 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5672 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5673 	"\t    modules: Can select a group via module command :mod:\n"
5674 	"\t    Does not accept triggers\n"
5675 #endif /* CONFIG_DYNAMIC_FTRACE */
5676 #ifdef CONFIG_FUNCTION_TRACER
5677 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5678 	"\t\t    (function)\n"
5679 	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5680 	"\t\t    (function)\n"
5681 #endif
5682 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5683 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5684 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5685 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5686 #endif
5687 #ifdef CONFIG_TRACER_SNAPSHOT
5688 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5689 	"\t\t\t  snapshot buffer. Read the contents for more\n"
5690 	"\t\t\t  information\n"
5691 #endif
5692 #ifdef CONFIG_STACK_TRACER
5693 	"  stack_trace\t\t- Shows the max stack trace when active\n"
5694 	"  stack_max_size\t- Shows current max stack size that was traced\n"
5695 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
5696 	"\t\t\t  new trace)\n"
5697 #ifdef CONFIG_DYNAMIC_FTRACE
5698 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5699 	"\t\t\t  traces\n"
5700 #endif
5701 #endif /* CONFIG_STACK_TRACER */
5702 #ifdef CONFIG_DYNAMIC_EVENTS
5703 	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5704 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5705 #endif
5706 #ifdef CONFIG_KPROBE_EVENTS
5707 	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5708 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5709 #endif
5710 #ifdef CONFIG_UPROBE_EVENTS
5711 	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5712 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5713 #endif
5714 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5715     defined(CONFIG_FPROBE_EVENTS)
5716 	"\t  accepts: event-definitions (one definition per line)\n"
5717 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5718 	"\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5719 	"\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5720 #endif
5721 #ifdef CONFIG_FPROBE_EVENTS
5722 	"\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5723 	"\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5724 #endif
5725 #ifdef CONFIG_HIST_TRIGGERS
5726 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
5727 #endif
5728 	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5729 	"\t           -:[<group>/][<event>]\n"
5730 #ifdef CONFIG_KPROBE_EVENTS
5731 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5732   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5733 #endif
5734 #ifdef CONFIG_UPROBE_EVENTS
5735   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5736 #endif
5737 	"\t     args: <name>=fetcharg[:type]\n"
5738 	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5739 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5740 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5741 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5742 	"\t           <argname>[->field[->field|.field...]],\n"
5743 #else
5744 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5745 #endif
5746 #else
5747 	"\t           $stack<index>, $stack, $retval, $comm,\n"
5748 #endif
5749 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5750 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5751 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5752 	"\t           symstr, <type>\\[<array-size>\\]\n"
5753 #ifdef CONFIG_HIST_TRIGGERS
5754 	"\t    field: <stype> <name>;\n"
5755 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5756 	"\t           [unsigned] char/int/long\n"
5757 #endif
5758 	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
5759 	"\t            of the <attached-group>/<attached-event>.\n"
5760 #endif
5761 	"  events/\t\t- Directory containing all trace event subsystems:\n"
5762 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5763 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
5764 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5765 	"\t\t\t  events\n"
5766 	"      filter\t\t- If set, only events passing filter are traced\n"
5767 	"  events/<system>/<event>/\t- Directory containing control files for\n"
5768 	"\t\t\t  <event>:\n"
5769 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5770 	"      filter\t\t- If set, only events passing filter are traced\n"
5771 	"      trigger\t\t- If set, a command to perform when event is hit\n"
5772 	"\t    Format: <trigger>[:count][if <filter>]\n"
5773 	"\t   trigger: traceon, traceoff\n"
5774 	"\t            enable_event:<system>:<event>\n"
5775 	"\t            disable_event:<system>:<event>\n"
5776 #ifdef CONFIG_HIST_TRIGGERS
5777 	"\t            enable_hist:<system>:<event>\n"
5778 	"\t            disable_hist:<system>:<event>\n"
5779 #endif
5780 #ifdef CONFIG_STACKTRACE
5781 	"\t\t    stacktrace\n"
5782 #endif
5783 #ifdef CONFIG_TRACER_SNAPSHOT
5784 	"\t\t    snapshot\n"
5785 #endif
5786 #ifdef CONFIG_HIST_TRIGGERS
5787 	"\t\t    hist (see below)\n"
5788 #endif
5789 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5790 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5791 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5792 	"\t                  events/block/block_unplug/trigger\n"
5793 	"\t   The first disables tracing every time block_unplug is hit.\n"
5794 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5795 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5796 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5797 	"\t   Like function triggers, the counter is only decremented if it\n"
5798 	"\t    enabled or disabled tracing.\n"
5799 	"\t   To remove a trigger without a count:\n"
5800 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
5801 	"\t   To remove a trigger with a count:\n"
5802 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5803 	"\t   Filters can be ignored when removing a trigger.\n"
5804 #ifdef CONFIG_HIST_TRIGGERS
5805 	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5806 	"\t    Format: hist:keys=<field1[,field2,...]>\n"
5807 	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5808 	"\t            [:values=<field1[,field2,...]>]\n"
5809 	"\t            [:sort=<field1[,field2,...]>]\n"
5810 	"\t            [:size=#entries]\n"
5811 	"\t            [:pause][:continue][:clear]\n"
5812 	"\t            [:name=histname1]\n"
5813 	"\t            [:nohitcount]\n"
5814 	"\t            [:<handler>.<action>]\n"
5815 	"\t            [if <filter>]\n\n"
5816 	"\t    Note, special fields can be used as well:\n"
5817 	"\t            common_timestamp - to record current timestamp\n"
5818 	"\t            common_cpu - to record the CPU the event happened on\n"
5819 	"\n"
5820 	"\t    A hist trigger variable can be:\n"
5821 	"\t        - a reference to a field e.g. x=current_timestamp,\n"
5822 	"\t        - a reference to another variable e.g. y=$x,\n"
5823 	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
5824 	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5825 	"\n"
5826 	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5827 	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
5828 	"\t    variable reference, field or numeric literal.\n"
5829 	"\n"
5830 	"\t    When a matching event is hit, an entry is added to a hash\n"
5831 	"\t    table using the key(s) and value(s) named, and the value of a\n"
5832 	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
5833 	"\t    correspond to fields in the event's format description.  Keys\n"
5834 	"\t    can be any field, or the special string 'common_stacktrace'.\n"
5835 	"\t    Compound keys consisting of up to two fields can be specified\n"
5836 	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5837 	"\t    fields.  Sort keys consisting of up to two fields can be\n"
5838 	"\t    specified using the 'sort' keyword.  The sort direction can\n"
5839 	"\t    be modified by appending '.descending' or '.ascending' to a\n"
5840 	"\t    sort field.  The 'size' parameter can be used to specify more\n"
5841 	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
5842 	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
5843 	"\t    its histogram data will be shared with other triggers of the\n"
5844 	"\t    same name, and trigger hits will update this common data.\n\n"
5845 	"\t    Reading the 'hist' file for the event will dump the hash\n"
5846 	"\t    table in its entirety to stdout.  If there are multiple hist\n"
5847 	"\t    triggers attached to an event, there will be a table for each\n"
5848 	"\t    trigger in the output.  The table displayed for a named\n"
5849 	"\t    trigger will be the same as any other instance having the\n"
5850 	"\t    same name.  The default format used to display a given field\n"
5851 	"\t    can be modified by appending any of the following modifiers\n"
5852 	"\t    to the field name, as applicable:\n\n"
5853 	"\t            .hex        display a number as a hex value\n"
5854 	"\t            .sym        display an address as a symbol\n"
5855 	"\t            .sym-offset display an address as a symbol and offset\n"
5856 	"\t            .execname   display a common_pid as a program name\n"
5857 	"\t            .syscall    display a syscall id as a syscall name\n"
5858 	"\t            .log2       display log2 value rather than raw number\n"
5859 	"\t            .buckets=size  display values in groups of size rather than raw number\n"
5860 	"\t            .usecs      display a common_timestamp in microseconds\n"
5861 	"\t            .percent    display a number of percentage value\n"
5862 	"\t            .graph      display a bar-graph of a value\n\n"
5863 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
5864 	"\t    trigger or to start a hist trigger but not log any events\n"
5865 	"\t    until told to do so.  'continue' can be used to start or\n"
5866 	"\t    restart a paused hist trigger.\n\n"
5867 	"\t    The 'clear' parameter will clear the contents of a running\n"
5868 	"\t    hist trigger and leave its current paused/active state\n"
5869 	"\t    unchanged.\n\n"
5870 	"\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5871 	"\t    raw hitcount in the histogram.\n\n"
5872 	"\t    The enable_hist and disable_hist triggers can be used to\n"
5873 	"\t    have one event conditionally start and stop another event's\n"
5874 	"\t    already-attached hist trigger.  The syntax is analogous to\n"
5875 	"\t    the enable_event and disable_event triggers.\n\n"
5876 	"\t    Hist trigger handlers and actions are executed whenever a\n"
5877 	"\t    a histogram entry is added or updated.  They take the form:\n\n"
5878 	"\t        <handler>.<action>\n\n"
5879 	"\t    The available handlers are:\n\n"
5880 	"\t        onmatch(matching.event)  - invoke on addition or update\n"
5881 	"\t        onmax(var)               - invoke if var exceeds current max\n"
5882 	"\t        onchange(var)            - invoke action if var changes\n\n"
5883 	"\t    The available actions are:\n\n"
5884 	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5885 	"\t        save(field,...)                      - save current event fields\n"
5886 #ifdef CONFIG_TRACER_SNAPSHOT
5887 	"\t        snapshot()                           - snapshot the trace buffer\n\n"
5888 #endif
5889 #ifdef CONFIG_SYNTH_EVENTS
5890 	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5891 	"\t  Write into this file to define/undefine new synthetic events.\n"
5892 	"\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5893 #endif
5894 #endif
5895 ;
5896 
5897 static ssize_t
tracing_readme_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)5898 tracing_readme_read(struct file *filp, char __user *ubuf,
5899 		       size_t cnt, loff_t *ppos)
5900 {
5901 	return simple_read_from_buffer(ubuf, cnt, ppos,
5902 					readme_msg, strlen(readme_msg));
5903 }
5904 
5905 static const struct file_operations tracing_readme_fops = {
5906 	.open		= tracing_open_generic,
5907 	.read		= tracing_readme_read,
5908 	.llseek		= generic_file_llseek,
5909 };
5910 
saved_tgids_next(struct seq_file * m,void * v,loff_t * pos)5911 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5912 {
5913 	int pid = ++(*pos);
5914 
5915 	return trace_find_tgid_ptr(pid);
5916 }
5917 
saved_tgids_start(struct seq_file * m,loff_t * pos)5918 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5919 {
5920 	int pid = *pos;
5921 
5922 	return trace_find_tgid_ptr(pid);
5923 }
5924 
saved_tgids_stop(struct seq_file * m,void * v)5925 static void saved_tgids_stop(struct seq_file *m, void *v)
5926 {
5927 }
5928 
saved_tgids_show(struct seq_file * m,void * v)5929 static int saved_tgids_show(struct seq_file *m, void *v)
5930 {
5931 	int *entry = (int *)v;
5932 	int pid = entry - tgid_map;
5933 	int tgid = *entry;
5934 
5935 	if (tgid == 0)
5936 		return SEQ_SKIP;
5937 
5938 	seq_printf(m, "%d %d\n", pid, tgid);
5939 	return 0;
5940 }
5941 
5942 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5943 	.start		= saved_tgids_start,
5944 	.stop		= saved_tgids_stop,
5945 	.next		= saved_tgids_next,
5946 	.show		= saved_tgids_show,
5947 };
5948 
tracing_saved_tgids_open(struct inode * inode,struct file * filp)5949 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5950 {
5951 	int ret;
5952 
5953 	ret = tracing_check_open_get_tr(NULL);
5954 	if (ret)
5955 		return ret;
5956 
5957 	return seq_open(filp, &tracing_saved_tgids_seq_ops);
5958 }
5959 
5960 
5961 static const struct file_operations tracing_saved_tgids_fops = {
5962 	.open		= tracing_saved_tgids_open,
5963 	.read		= seq_read,
5964 	.llseek		= seq_lseek,
5965 	.release	= seq_release,
5966 };
5967 
saved_cmdlines_next(struct seq_file * m,void * v,loff_t * pos)5968 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5969 {
5970 	unsigned int *ptr = v;
5971 
5972 	if (*pos || m->count)
5973 		ptr++;
5974 
5975 	(*pos)++;
5976 
5977 	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5978 	     ptr++) {
5979 		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5980 			continue;
5981 
5982 		return ptr;
5983 	}
5984 
5985 	return NULL;
5986 }
5987 
saved_cmdlines_start(struct seq_file * m,loff_t * pos)5988 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5989 {
5990 	void *v;
5991 	loff_t l = 0;
5992 
5993 	preempt_disable();
5994 	arch_spin_lock(&trace_cmdline_lock);
5995 
5996 	v = &savedcmd->map_cmdline_to_pid[0];
5997 	while (l <= *pos) {
5998 		v = saved_cmdlines_next(m, v, &l);
5999 		if (!v)
6000 			return NULL;
6001 	}
6002 
6003 	return v;
6004 }
6005 
saved_cmdlines_stop(struct seq_file * m,void * v)6006 static void saved_cmdlines_stop(struct seq_file *m, void *v)
6007 {
6008 	arch_spin_unlock(&trace_cmdline_lock);
6009 	preempt_enable();
6010 }
6011 
saved_cmdlines_show(struct seq_file * m,void * v)6012 static int saved_cmdlines_show(struct seq_file *m, void *v)
6013 {
6014 	char buf[TASK_COMM_LEN];
6015 	unsigned int *pid = v;
6016 
6017 	__trace_find_cmdline(*pid, buf);
6018 	seq_printf(m, "%d %s\n", *pid, buf);
6019 	return 0;
6020 }
6021 
6022 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
6023 	.start		= saved_cmdlines_start,
6024 	.next		= saved_cmdlines_next,
6025 	.stop		= saved_cmdlines_stop,
6026 	.show		= saved_cmdlines_show,
6027 };
6028 
tracing_saved_cmdlines_open(struct inode * inode,struct file * filp)6029 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
6030 {
6031 	int ret;
6032 
6033 	ret = tracing_check_open_get_tr(NULL);
6034 	if (ret)
6035 		return ret;
6036 
6037 	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
6038 }
6039 
6040 static const struct file_operations tracing_saved_cmdlines_fops = {
6041 	.open		= tracing_saved_cmdlines_open,
6042 	.read		= seq_read,
6043 	.llseek		= seq_lseek,
6044 	.release	= seq_release,
6045 };
6046 
6047 static ssize_t
tracing_saved_cmdlines_size_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6048 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
6049 				 size_t cnt, loff_t *ppos)
6050 {
6051 	char buf[64];
6052 	int r;
6053 
6054 	preempt_disable();
6055 	arch_spin_lock(&trace_cmdline_lock);
6056 	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6057 	arch_spin_unlock(&trace_cmdline_lock);
6058 	preempt_enable();
6059 
6060 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6061 }
6062 
free_saved_cmdlines_buffer(struct saved_cmdlines_buffer * s)6063 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
6064 {
6065 	kfree(s->saved_cmdlines);
6066 	kfree(s->map_cmdline_to_pid);
6067 	kfree(s);
6068 }
6069 
tracing_resize_saved_cmdlines(unsigned int val)6070 static int tracing_resize_saved_cmdlines(unsigned int val)
6071 {
6072 	struct saved_cmdlines_buffer *s, *savedcmd_temp;
6073 
6074 	s = kmalloc(sizeof(*s), GFP_KERNEL);
6075 	if (!s)
6076 		return -ENOMEM;
6077 
6078 	if (allocate_cmdlines_buffer(val, s) < 0) {
6079 		kfree(s);
6080 		return -ENOMEM;
6081 	}
6082 
6083 	preempt_disable();
6084 	arch_spin_lock(&trace_cmdline_lock);
6085 	savedcmd_temp = savedcmd;
6086 	savedcmd = s;
6087 	arch_spin_unlock(&trace_cmdline_lock);
6088 	preempt_enable();
6089 	free_saved_cmdlines_buffer(savedcmd_temp);
6090 
6091 	return 0;
6092 }
6093 
6094 static ssize_t
tracing_saved_cmdlines_size_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6095 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6096 				  size_t cnt, loff_t *ppos)
6097 {
6098 	unsigned long val;
6099 	int ret;
6100 
6101 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6102 	if (ret)
6103 		return ret;
6104 
6105 	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
6106 	if (!val || val > PID_MAX_DEFAULT)
6107 		return -EINVAL;
6108 
6109 	ret = tracing_resize_saved_cmdlines((unsigned int)val);
6110 	if (ret < 0)
6111 		return ret;
6112 
6113 	*ppos += cnt;
6114 
6115 	return cnt;
6116 }
6117 
6118 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6119 	.open		= tracing_open_generic,
6120 	.read		= tracing_saved_cmdlines_size_read,
6121 	.write		= tracing_saved_cmdlines_size_write,
6122 };
6123 
6124 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6125 static union trace_eval_map_item *
update_eval_map(union trace_eval_map_item * ptr)6126 update_eval_map(union trace_eval_map_item *ptr)
6127 {
6128 	if (!ptr->map.eval_string) {
6129 		if (ptr->tail.next) {
6130 			ptr = ptr->tail.next;
6131 			/* Set ptr to the next real item (skip head) */
6132 			ptr++;
6133 		} else
6134 			return NULL;
6135 	}
6136 	return ptr;
6137 }
6138 
eval_map_next(struct seq_file * m,void * v,loff_t * pos)6139 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6140 {
6141 	union trace_eval_map_item *ptr = v;
6142 
6143 	/*
6144 	 * Paranoid! If ptr points to end, we don't want to increment past it.
6145 	 * This really should never happen.
6146 	 */
6147 	(*pos)++;
6148 	ptr = update_eval_map(ptr);
6149 	if (WARN_ON_ONCE(!ptr))
6150 		return NULL;
6151 
6152 	ptr++;
6153 	ptr = update_eval_map(ptr);
6154 
6155 	return ptr;
6156 }
6157 
eval_map_start(struct seq_file * m,loff_t * pos)6158 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6159 {
6160 	union trace_eval_map_item *v;
6161 	loff_t l = 0;
6162 
6163 	mutex_lock(&trace_eval_mutex);
6164 
6165 	v = trace_eval_maps;
6166 	if (v)
6167 		v++;
6168 
6169 	while (v && l < *pos) {
6170 		v = eval_map_next(m, v, &l);
6171 	}
6172 
6173 	return v;
6174 }
6175 
eval_map_stop(struct seq_file * m,void * v)6176 static void eval_map_stop(struct seq_file *m, void *v)
6177 {
6178 	mutex_unlock(&trace_eval_mutex);
6179 }
6180 
eval_map_show(struct seq_file * m,void * v)6181 static int eval_map_show(struct seq_file *m, void *v)
6182 {
6183 	union trace_eval_map_item *ptr = v;
6184 
6185 	seq_printf(m, "%s %ld (%s)\n",
6186 		   ptr->map.eval_string, ptr->map.eval_value,
6187 		   ptr->map.system);
6188 
6189 	return 0;
6190 }
6191 
6192 static const struct seq_operations tracing_eval_map_seq_ops = {
6193 	.start		= eval_map_start,
6194 	.next		= eval_map_next,
6195 	.stop		= eval_map_stop,
6196 	.show		= eval_map_show,
6197 };
6198 
tracing_eval_map_open(struct inode * inode,struct file * filp)6199 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6200 {
6201 	int ret;
6202 
6203 	ret = tracing_check_open_get_tr(NULL);
6204 	if (ret)
6205 		return ret;
6206 
6207 	return seq_open(filp, &tracing_eval_map_seq_ops);
6208 }
6209 
6210 static const struct file_operations tracing_eval_map_fops = {
6211 	.open		= tracing_eval_map_open,
6212 	.read		= seq_read,
6213 	.llseek		= seq_lseek,
6214 	.release	= seq_release,
6215 };
6216 
6217 static inline union trace_eval_map_item *
trace_eval_jmp_to_tail(union trace_eval_map_item * ptr)6218 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6219 {
6220 	/* Return tail of array given the head */
6221 	return ptr + ptr->head.length + 1;
6222 }
6223 
6224 static void
trace_insert_eval_map_file(struct module * mod,struct trace_eval_map ** start,int len)6225 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6226 			   int len)
6227 {
6228 	struct trace_eval_map **stop;
6229 	struct trace_eval_map **map;
6230 	union trace_eval_map_item *map_array;
6231 	union trace_eval_map_item *ptr;
6232 
6233 	stop = start + len;
6234 
6235 	/*
6236 	 * The trace_eval_maps contains the map plus a head and tail item,
6237 	 * where the head holds the module and length of array, and the
6238 	 * tail holds a pointer to the next list.
6239 	 */
6240 	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6241 	if (!map_array) {
6242 		pr_warn("Unable to allocate trace eval mapping\n");
6243 		return;
6244 	}
6245 
6246 	mutex_lock(&trace_eval_mutex);
6247 
6248 	if (!trace_eval_maps)
6249 		trace_eval_maps = map_array;
6250 	else {
6251 		ptr = trace_eval_maps;
6252 		for (;;) {
6253 			ptr = trace_eval_jmp_to_tail(ptr);
6254 			if (!ptr->tail.next)
6255 				break;
6256 			ptr = ptr->tail.next;
6257 
6258 		}
6259 		ptr->tail.next = map_array;
6260 	}
6261 	map_array->head.mod = mod;
6262 	map_array->head.length = len;
6263 	map_array++;
6264 
6265 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6266 		map_array->map = **map;
6267 		map_array++;
6268 	}
6269 	memset(map_array, 0, sizeof(*map_array));
6270 
6271 	mutex_unlock(&trace_eval_mutex);
6272 }
6273 
trace_create_eval_file(struct dentry * d_tracer)6274 static void trace_create_eval_file(struct dentry *d_tracer)
6275 {
6276 	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6277 			  NULL, &tracing_eval_map_fops);
6278 }
6279 
6280 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
trace_create_eval_file(struct dentry * d_tracer)6281 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
trace_insert_eval_map_file(struct module * mod,struct trace_eval_map ** start,int len)6282 static inline void trace_insert_eval_map_file(struct module *mod,
6283 			      struct trace_eval_map **start, int len) { }
6284 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6285 
trace_insert_eval_map(struct module * mod,struct trace_eval_map ** start,int len)6286 static void trace_insert_eval_map(struct module *mod,
6287 				  struct trace_eval_map **start, int len)
6288 {
6289 	struct trace_eval_map **map;
6290 
6291 	if (len <= 0)
6292 		return;
6293 
6294 	map = start;
6295 
6296 	trace_event_eval_update(map, len);
6297 
6298 	trace_insert_eval_map_file(mod, start, len);
6299 }
6300 
6301 static ssize_t
tracing_set_trace_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6302 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6303 		       size_t cnt, loff_t *ppos)
6304 {
6305 	struct trace_array *tr = filp->private_data;
6306 	char buf[MAX_TRACER_SIZE+2];
6307 	int r;
6308 
6309 	mutex_lock(&trace_types_lock);
6310 	r = sprintf(buf, "%s\n", tr->current_trace->name);
6311 	mutex_unlock(&trace_types_lock);
6312 
6313 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6314 }
6315 
tracer_init(struct tracer * t,struct trace_array * tr)6316 int tracer_init(struct tracer *t, struct trace_array *tr)
6317 {
6318 	tracing_reset_online_cpus(&tr->array_buffer);
6319 	return t->init(tr);
6320 }
6321 
set_buffer_entries(struct array_buffer * buf,unsigned long val)6322 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6323 {
6324 	int cpu;
6325 
6326 	for_each_tracing_cpu(cpu)
6327 		per_cpu_ptr(buf->data, cpu)->entries = val;
6328 }
6329 
update_buffer_entries(struct array_buffer * buf,int cpu)6330 static void update_buffer_entries(struct array_buffer *buf, int cpu)
6331 {
6332 	if (cpu == RING_BUFFER_ALL_CPUS) {
6333 		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
6334 	} else {
6335 		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
6336 	}
6337 }
6338 
6339 #ifdef CONFIG_TRACER_MAX_TRACE
6340 /* resize @tr's buffer to the size of @size_tr's entries */
resize_buffer_duplicate_size(struct array_buffer * trace_buf,struct array_buffer * size_buf,int cpu_id)6341 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6342 					struct array_buffer *size_buf, int cpu_id)
6343 {
6344 	int cpu, ret = 0;
6345 
6346 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
6347 		for_each_tracing_cpu(cpu) {
6348 			ret = ring_buffer_resize(trace_buf->buffer,
6349 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6350 			if (ret < 0)
6351 				break;
6352 			per_cpu_ptr(trace_buf->data, cpu)->entries =
6353 				per_cpu_ptr(size_buf->data, cpu)->entries;
6354 		}
6355 	} else {
6356 		ret = ring_buffer_resize(trace_buf->buffer,
6357 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6358 		if (ret == 0)
6359 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6360 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
6361 	}
6362 
6363 	return ret;
6364 }
6365 #endif /* CONFIG_TRACER_MAX_TRACE */
6366 
__tracing_resize_ring_buffer(struct trace_array * tr,unsigned long size,int cpu)6367 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6368 					unsigned long size, int cpu)
6369 {
6370 	int ret;
6371 
6372 	/*
6373 	 * If kernel or user changes the size of the ring buffer
6374 	 * we use the size that was given, and we can forget about
6375 	 * expanding it later.
6376 	 */
6377 	ring_buffer_expanded = true;
6378 
6379 	/* May be called before buffers are initialized */
6380 	if (!tr->array_buffer.buffer)
6381 		return 0;
6382 
6383 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6384 	if (ret < 0)
6385 		return ret;
6386 
6387 #ifdef CONFIG_TRACER_MAX_TRACE
6388 	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6389 	    !tr->current_trace->use_max_tr)
6390 		goto out;
6391 
6392 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6393 	if (ret < 0) {
6394 		int r = resize_buffer_duplicate_size(&tr->array_buffer,
6395 						     &tr->array_buffer, cpu);
6396 		if (r < 0) {
6397 			/*
6398 			 * AARGH! We are left with different
6399 			 * size max buffer!!!!
6400 			 * The max buffer is our "snapshot" buffer.
6401 			 * When a tracer needs a snapshot (one of the
6402 			 * latency tracers), it swaps the max buffer
6403 			 * with the saved snap shot. We succeeded to
6404 			 * update the size of the main buffer, but failed to
6405 			 * update the size of the max buffer. But when we tried
6406 			 * to reset the main buffer to the original size, we
6407 			 * failed there too. This is very unlikely to
6408 			 * happen, but if it does, warn and kill all
6409 			 * tracing.
6410 			 */
6411 			WARN_ON(1);
6412 			tracing_disabled = 1;
6413 		}
6414 		return ret;
6415 	}
6416 
6417 	update_buffer_entries(&tr->max_buffer, cpu);
6418 
6419  out:
6420 #endif /* CONFIG_TRACER_MAX_TRACE */
6421 
6422 	update_buffer_entries(&tr->array_buffer, cpu);
6423 
6424 	return ret;
6425 }
6426 
tracing_resize_ring_buffer(struct trace_array * tr,unsigned long size,int cpu_id)6427 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6428 				  unsigned long size, int cpu_id)
6429 {
6430 	int ret;
6431 
6432 	mutex_lock(&trace_types_lock);
6433 
6434 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
6435 		/* make sure, this cpu is enabled in the mask */
6436 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6437 			ret = -EINVAL;
6438 			goto out;
6439 		}
6440 	}
6441 
6442 	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6443 	if (ret < 0)
6444 		ret = -ENOMEM;
6445 
6446 out:
6447 	mutex_unlock(&trace_types_lock);
6448 
6449 	return ret;
6450 }
6451 
6452 
6453 /**
6454  * tracing_update_buffers - used by tracing facility to expand ring buffers
6455  *
6456  * To save on memory when the tracing is never used on a system with it
6457  * configured in. The ring buffers are set to a minimum size. But once
6458  * a user starts to use the tracing facility, then they need to grow
6459  * to their default size.
6460  *
6461  * This function is to be called when a tracer is about to be used.
6462  */
tracing_update_buffers(void)6463 int tracing_update_buffers(void)
6464 {
6465 	int ret = 0;
6466 
6467 	mutex_lock(&trace_types_lock);
6468 	if (!ring_buffer_expanded)
6469 		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6470 						RING_BUFFER_ALL_CPUS);
6471 	mutex_unlock(&trace_types_lock);
6472 
6473 	return ret;
6474 }
6475 
6476 struct trace_option_dentry;
6477 
6478 static void
6479 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6480 
6481 /*
6482  * Used to clear out the tracer before deletion of an instance.
6483  * Must have trace_types_lock held.
6484  */
tracing_set_nop(struct trace_array * tr)6485 static void tracing_set_nop(struct trace_array *tr)
6486 {
6487 	if (tr->current_trace == &nop_trace)
6488 		return;
6489 
6490 	tr->current_trace->enabled--;
6491 
6492 	if (tr->current_trace->reset)
6493 		tr->current_trace->reset(tr);
6494 
6495 	tr->current_trace = &nop_trace;
6496 }
6497 
6498 static bool tracer_options_updated;
6499 
add_tracer_options(struct trace_array * tr,struct tracer * t)6500 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6501 {
6502 	/* Only enable if the directory has been created already. */
6503 	if (!tr->dir)
6504 		return;
6505 
6506 	/* Only create trace option files after update_tracer_options finish */
6507 	if (!tracer_options_updated)
6508 		return;
6509 
6510 	create_trace_option_files(tr, t);
6511 }
6512 
tracing_set_tracer(struct trace_array * tr,const char * buf)6513 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6514 {
6515 	struct tracer *t;
6516 #ifdef CONFIG_TRACER_MAX_TRACE
6517 	bool had_max_tr;
6518 #endif
6519 	int ret = 0;
6520 
6521 	mutex_lock(&trace_types_lock);
6522 
6523 	if (!ring_buffer_expanded) {
6524 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6525 						RING_BUFFER_ALL_CPUS);
6526 		if (ret < 0)
6527 			goto out;
6528 		ret = 0;
6529 	}
6530 
6531 	for (t = trace_types; t; t = t->next) {
6532 		if (strcmp(t->name, buf) == 0)
6533 			break;
6534 	}
6535 	if (!t) {
6536 		ret = -EINVAL;
6537 		goto out;
6538 	}
6539 	if (t == tr->current_trace)
6540 		goto out;
6541 
6542 #ifdef CONFIG_TRACER_SNAPSHOT
6543 	if (t->use_max_tr) {
6544 		local_irq_disable();
6545 		arch_spin_lock(&tr->max_lock);
6546 		if (tr->cond_snapshot)
6547 			ret = -EBUSY;
6548 		arch_spin_unlock(&tr->max_lock);
6549 		local_irq_enable();
6550 		if (ret)
6551 			goto out;
6552 	}
6553 #endif
6554 	/* Some tracers won't work on kernel command line */
6555 	if (system_state < SYSTEM_RUNNING && t->noboot) {
6556 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6557 			t->name);
6558 		goto out;
6559 	}
6560 
6561 	/* Some tracers are only allowed for the top level buffer */
6562 	if (!trace_ok_for_array(t, tr)) {
6563 		ret = -EINVAL;
6564 		goto out;
6565 	}
6566 
6567 	/* If trace pipe files are being read, we can't change the tracer */
6568 	if (tr->trace_ref) {
6569 		ret = -EBUSY;
6570 		goto out;
6571 	}
6572 
6573 	trace_branch_disable();
6574 
6575 	tr->current_trace->enabled--;
6576 
6577 	if (tr->current_trace->reset)
6578 		tr->current_trace->reset(tr);
6579 
6580 #ifdef CONFIG_TRACER_MAX_TRACE
6581 	had_max_tr = tr->current_trace->use_max_tr;
6582 
6583 	/* Current trace needs to be nop_trace before synchronize_rcu */
6584 	tr->current_trace = &nop_trace;
6585 
6586 	if (had_max_tr && !t->use_max_tr) {
6587 		/*
6588 		 * We need to make sure that the update_max_tr sees that
6589 		 * current_trace changed to nop_trace to keep it from
6590 		 * swapping the buffers after we resize it.
6591 		 * The update_max_tr is called from interrupts disabled
6592 		 * so a synchronized_sched() is sufficient.
6593 		 */
6594 		synchronize_rcu();
6595 		free_snapshot(tr);
6596 	}
6597 
6598 	if (t->use_max_tr && !tr->allocated_snapshot) {
6599 		ret = tracing_alloc_snapshot_instance(tr);
6600 		if (ret < 0)
6601 			goto out;
6602 	}
6603 #else
6604 	tr->current_trace = &nop_trace;
6605 #endif
6606 
6607 	if (t->init) {
6608 		ret = tracer_init(t, tr);
6609 		if (ret)
6610 			goto out;
6611 	}
6612 
6613 	tr->current_trace = t;
6614 	tr->current_trace->enabled++;
6615 	trace_branch_enable(tr);
6616  out:
6617 	mutex_unlock(&trace_types_lock);
6618 
6619 	return ret;
6620 }
6621 
6622 static ssize_t
tracing_set_trace_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6623 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6624 			size_t cnt, loff_t *ppos)
6625 {
6626 	struct trace_array *tr = filp->private_data;
6627 	char buf[MAX_TRACER_SIZE+1];
6628 	char *name;
6629 	size_t ret;
6630 	int err;
6631 
6632 	ret = cnt;
6633 
6634 	if (cnt > MAX_TRACER_SIZE)
6635 		cnt = MAX_TRACER_SIZE;
6636 
6637 	if (copy_from_user(buf, ubuf, cnt))
6638 		return -EFAULT;
6639 
6640 	buf[cnt] = 0;
6641 
6642 	name = strim(buf);
6643 
6644 	err = tracing_set_tracer(tr, name);
6645 	if (err)
6646 		return err;
6647 
6648 	*ppos += ret;
6649 
6650 	return ret;
6651 }
6652 
6653 static ssize_t
tracing_nsecs_read(unsigned long * ptr,char __user * ubuf,size_t cnt,loff_t * ppos)6654 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6655 		   size_t cnt, loff_t *ppos)
6656 {
6657 	char buf[64];
6658 	int r;
6659 
6660 	r = snprintf(buf, sizeof(buf), "%ld\n",
6661 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6662 	if (r > sizeof(buf))
6663 		r = sizeof(buf);
6664 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6665 }
6666 
6667 static ssize_t
tracing_nsecs_write(unsigned long * ptr,const char __user * ubuf,size_t cnt,loff_t * ppos)6668 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6669 		    size_t cnt, loff_t *ppos)
6670 {
6671 	unsigned long val;
6672 	int ret;
6673 
6674 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6675 	if (ret)
6676 		return ret;
6677 
6678 	*ptr = val * 1000;
6679 
6680 	return cnt;
6681 }
6682 
6683 static ssize_t
tracing_thresh_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6684 tracing_thresh_read(struct file *filp, char __user *ubuf,
6685 		    size_t cnt, loff_t *ppos)
6686 {
6687 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6688 }
6689 
6690 static ssize_t
tracing_thresh_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6691 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6692 		     size_t cnt, loff_t *ppos)
6693 {
6694 	struct trace_array *tr = filp->private_data;
6695 	int ret;
6696 
6697 	mutex_lock(&trace_types_lock);
6698 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6699 	if (ret < 0)
6700 		goto out;
6701 
6702 	if (tr->current_trace->update_thresh) {
6703 		ret = tr->current_trace->update_thresh(tr);
6704 		if (ret < 0)
6705 			goto out;
6706 	}
6707 
6708 	ret = cnt;
6709 out:
6710 	mutex_unlock(&trace_types_lock);
6711 
6712 	return ret;
6713 }
6714 
6715 #ifdef CONFIG_TRACER_MAX_TRACE
6716 
6717 static ssize_t
tracing_max_lat_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6718 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6719 		     size_t cnt, loff_t *ppos)
6720 {
6721 	struct trace_array *tr = filp->private_data;
6722 
6723 	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6724 }
6725 
6726 static ssize_t
tracing_max_lat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6727 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6728 		      size_t cnt, loff_t *ppos)
6729 {
6730 	struct trace_array *tr = filp->private_data;
6731 
6732 	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6733 }
6734 
6735 #endif
6736 
open_pipe_on_cpu(struct trace_array * tr,int cpu)6737 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6738 {
6739 	if (cpu == RING_BUFFER_ALL_CPUS) {
6740 		if (cpumask_empty(tr->pipe_cpumask)) {
6741 			cpumask_setall(tr->pipe_cpumask);
6742 			return 0;
6743 		}
6744 	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6745 		cpumask_set_cpu(cpu, tr->pipe_cpumask);
6746 		return 0;
6747 	}
6748 	return -EBUSY;
6749 }
6750 
close_pipe_on_cpu(struct trace_array * tr,int cpu)6751 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6752 {
6753 	if (cpu == RING_BUFFER_ALL_CPUS) {
6754 		WARN_ON(!cpumask_full(tr->pipe_cpumask));
6755 		cpumask_clear(tr->pipe_cpumask);
6756 	} else {
6757 		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6758 		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6759 	}
6760 }
6761 
tracing_open_pipe(struct inode * inode,struct file * filp)6762 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6763 {
6764 	struct trace_array *tr = inode->i_private;
6765 	struct trace_iterator *iter;
6766 	int cpu;
6767 	int ret;
6768 
6769 	ret = tracing_check_open_get_tr(tr);
6770 	if (ret)
6771 		return ret;
6772 
6773 	mutex_lock(&trace_types_lock);
6774 	cpu = tracing_get_cpu(inode);
6775 	ret = open_pipe_on_cpu(tr, cpu);
6776 	if (ret)
6777 		goto fail_pipe_on_cpu;
6778 
6779 	/* create a buffer to store the information to pass to userspace */
6780 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6781 	if (!iter) {
6782 		ret = -ENOMEM;
6783 		goto fail_alloc_iter;
6784 	}
6785 
6786 	trace_seq_init(&iter->seq);
6787 	iter->trace = tr->current_trace;
6788 
6789 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6790 		ret = -ENOMEM;
6791 		goto fail;
6792 	}
6793 
6794 	/* trace pipe does not show start of buffer */
6795 	cpumask_setall(iter->started);
6796 
6797 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6798 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
6799 
6800 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6801 	if (trace_clocks[tr->clock_id].in_ns)
6802 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6803 
6804 	iter->tr = tr;
6805 	iter->array_buffer = &tr->array_buffer;
6806 	iter->cpu_file = cpu;
6807 	mutex_init(&iter->mutex);
6808 	filp->private_data = iter;
6809 
6810 	if (iter->trace->pipe_open)
6811 		iter->trace->pipe_open(iter);
6812 
6813 	nonseekable_open(inode, filp);
6814 
6815 	tr->trace_ref++;
6816 
6817 	mutex_unlock(&trace_types_lock);
6818 	return ret;
6819 
6820 fail:
6821 	kfree(iter);
6822 fail_alloc_iter:
6823 	close_pipe_on_cpu(tr, cpu);
6824 fail_pipe_on_cpu:
6825 	__trace_array_put(tr);
6826 	mutex_unlock(&trace_types_lock);
6827 	return ret;
6828 }
6829 
tracing_release_pipe(struct inode * inode,struct file * file)6830 static int tracing_release_pipe(struct inode *inode, struct file *file)
6831 {
6832 	struct trace_iterator *iter = file->private_data;
6833 	struct trace_array *tr = inode->i_private;
6834 
6835 	mutex_lock(&trace_types_lock);
6836 
6837 	tr->trace_ref--;
6838 
6839 	if (iter->trace->pipe_close)
6840 		iter->trace->pipe_close(iter);
6841 	close_pipe_on_cpu(tr, iter->cpu_file);
6842 	mutex_unlock(&trace_types_lock);
6843 
6844 	free_trace_iter_content(iter);
6845 	kfree(iter);
6846 
6847 	trace_array_put(tr);
6848 
6849 	return 0;
6850 }
6851 
6852 static __poll_t
trace_poll(struct trace_iterator * iter,struct file * filp,poll_table * poll_table)6853 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6854 {
6855 	struct trace_array *tr = iter->tr;
6856 
6857 	/* Iterators are static, they should be filled or empty */
6858 	if (trace_buffer_iter(iter, iter->cpu_file))
6859 		return EPOLLIN | EPOLLRDNORM;
6860 
6861 	if (tr->trace_flags & TRACE_ITER_BLOCK)
6862 		/*
6863 		 * Always select as readable when in blocking mode
6864 		 */
6865 		return EPOLLIN | EPOLLRDNORM;
6866 	else
6867 		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6868 					     filp, poll_table, iter->tr->buffer_percent);
6869 }
6870 
6871 static __poll_t
tracing_poll_pipe(struct file * filp,poll_table * poll_table)6872 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6873 {
6874 	struct trace_iterator *iter = filp->private_data;
6875 
6876 	return trace_poll(iter, filp, poll_table);
6877 }
6878 
6879 /* Must be called with iter->mutex held. */
tracing_wait_pipe(struct file * filp)6880 static int tracing_wait_pipe(struct file *filp)
6881 {
6882 	struct trace_iterator *iter = filp->private_data;
6883 	int ret;
6884 
6885 	while (trace_empty(iter)) {
6886 
6887 		if ((filp->f_flags & O_NONBLOCK)) {
6888 			return -EAGAIN;
6889 		}
6890 
6891 		/*
6892 		 * We block until we read something and tracing is disabled.
6893 		 * We still block if tracing is disabled, but we have never
6894 		 * read anything. This allows a user to cat this file, and
6895 		 * then enable tracing. But after we have read something,
6896 		 * we give an EOF when tracing is again disabled.
6897 		 *
6898 		 * iter->pos will be 0 if we haven't read anything.
6899 		 */
6900 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6901 			break;
6902 
6903 		mutex_unlock(&iter->mutex);
6904 
6905 		ret = wait_on_pipe(iter, 0);
6906 
6907 		mutex_lock(&iter->mutex);
6908 
6909 		if (ret)
6910 			return ret;
6911 	}
6912 
6913 	return 1;
6914 }
6915 
6916 /*
6917  * Consumer reader.
6918  */
6919 static ssize_t
tracing_read_pipe(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6920 tracing_read_pipe(struct file *filp, char __user *ubuf,
6921 		  size_t cnt, loff_t *ppos)
6922 {
6923 	struct trace_iterator *iter = filp->private_data;
6924 	ssize_t sret;
6925 
6926 	/*
6927 	 * Avoid more than one consumer on a single file descriptor
6928 	 * This is just a matter of traces coherency, the ring buffer itself
6929 	 * is protected.
6930 	 */
6931 	mutex_lock(&iter->mutex);
6932 
6933 	/* return any leftover data */
6934 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6935 	if (sret != -EBUSY)
6936 		goto out;
6937 
6938 	trace_seq_init(&iter->seq);
6939 
6940 	if (iter->trace->read) {
6941 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6942 		if (sret)
6943 			goto out;
6944 	}
6945 
6946 waitagain:
6947 	sret = tracing_wait_pipe(filp);
6948 	if (sret <= 0)
6949 		goto out;
6950 
6951 	/* stop when tracing is finished */
6952 	if (trace_empty(iter)) {
6953 		sret = 0;
6954 		goto out;
6955 	}
6956 
6957 	if (cnt >= PAGE_SIZE)
6958 		cnt = PAGE_SIZE - 1;
6959 
6960 	/* reset all but tr, trace, and overruns */
6961 	trace_iterator_reset(iter);
6962 	cpumask_clear(iter->started);
6963 	trace_seq_init(&iter->seq);
6964 
6965 	trace_event_read_lock();
6966 	trace_access_lock(iter->cpu_file);
6967 	while (trace_find_next_entry_inc(iter) != NULL) {
6968 		enum print_line_t ret;
6969 		int save_len = iter->seq.seq.len;
6970 
6971 		ret = print_trace_line(iter);
6972 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6973 			/*
6974 			 * If one print_trace_line() fills entire trace_seq in one shot,
6975 			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6976 			 * In this case, we need to consume it, otherwise, loop will peek
6977 			 * this event next time, resulting in an infinite loop.
6978 			 */
6979 			if (save_len == 0) {
6980 				iter->seq.full = 0;
6981 				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6982 				trace_consume(iter);
6983 				break;
6984 			}
6985 
6986 			/* In other cases, don't print partial lines */
6987 			iter->seq.seq.len = save_len;
6988 			break;
6989 		}
6990 		if (ret != TRACE_TYPE_NO_CONSUME)
6991 			trace_consume(iter);
6992 
6993 		if (trace_seq_used(&iter->seq) >= cnt)
6994 			break;
6995 
6996 		/*
6997 		 * Setting the full flag means we reached the trace_seq buffer
6998 		 * size and we should leave by partial output condition above.
6999 		 * One of the trace_seq_* functions is not used properly.
7000 		 */
7001 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
7002 			  iter->ent->type);
7003 	}
7004 	trace_access_unlock(iter->cpu_file);
7005 	trace_event_read_unlock();
7006 
7007 	/* Now copy what we have to the user */
7008 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
7009 	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
7010 		trace_seq_init(&iter->seq);
7011 
7012 	/*
7013 	 * If there was nothing to send to user, in spite of consuming trace
7014 	 * entries, go back to wait for more entries.
7015 	 */
7016 	if (sret == -EBUSY)
7017 		goto waitagain;
7018 
7019 out:
7020 	mutex_unlock(&iter->mutex);
7021 
7022 	return sret;
7023 }
7024 
tracing_spd_release_pipe(struct splice_pipe_desc * spd,unsigned int idx)7025 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
7026 				     unsigned int idx)
7027 {
7028 	__free_page(spd->pages[idx]);
7029 }
7030 
7031 static size_t
tracing_fill_pipe_page(size_t rem,struct trace_iterator * iter)7032 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
7033 {
7034 	size_t count;
7035 	int save_len;
7036 	int ret;
7037 
7038 	/* Seq buffer is page-sized, exactly what we need. */
7039 	for (;;) {
7040 		save_len = iter->seq.seq.len;
7041 		ret = print_trace_line(iter);
7042 
7043 		if (trace_seq_has_overflowed(&iter->seq)) {
7044 			iter->seq.seq.len = save_len;
7045 			break;
7046 		}
7047 
7048 		/*
7049 		 * This should not be hit, because it should only
7050 		 * be set if the iter->seq overflowed. But check it
7051 		 * anyway to be safe.
7052 		 */
7053 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
7054 			iter->seq.seq.len = save_len;
7055 			break;
7056 		}
7057 
7058 		count = trace_seq_used(&iter->seq) - save_len;
7059 		if (rem < count) {
7060 			rem = 0;
7061 			iter->seq.seq.len = save_len;
7062 			break;
7063 		}
7064 
7065 		if (ret != TRACE_TYPE_NO_CONSUME)
7066 			trace_consume(iter);
7067 		rem -= count;
7068 		if (!trace_find_next_entry_inc(iter))	{
7069 			rem = 0;
7070 			iter->ent = NULL;
7071 			break;
7072 		}
7073 	}
7074 
7075 	return rem;
7076 }
7077 
tracing_splice_read_pipe(struct file * filp,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)7078 static ssize_t tracing_splice_read_pipe(struct file *filp,
7079 					loff_t *ppos,
7080 					struct pipe_inode_info *pipe,
7081 					size_t len,
7082 					unsigned int flags)
7083 {
7084 	struct page *pages_def[PIPE_DEF_BUFFERS];
7085 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
7086 	struct trace_iterator *iter = filp->private_data;
7087 	struct splice_pipe_desc spd = {
7088 		.pages		= pages_def,
7089 		.partial	= partial_def,
7090 		.nr_pages	= 0, /* This gets updated below. */
7091 		.nr_pages_max	= PIPE_DEF_BUFFERS,
7092 		.ops		= &default_pipe_buf_ops,
7093 		.spd_release	= tracing_spd_release_pipe,
7094 	};
7095 	ssize_t ret;
7096 	size_t rem;
7097 	unsigned int i;
7098 
7099 	if (splice_grow_spd(pipe, &spd))
7100 		return -ENOMEM;
7101 
7102 	mutex_lock(&iter->mutex);
7103 
7104 	if (iter->trace->splice_read) {
7105 		ret = iter->trace->splice_read(iter, filp,
7106 					       ppos, pipe, len, flags);
7107 		if (ret)
7108 			goto out_err;
7109 	}
7110 
7111 	ret = tracing_wait_pipe(filp);
7112 	if (ret <= 0)
7113 		goto out_err;
7114 
7115 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7116 		ret = -EFAULT;
7117 		goto out_err;
7118 	}
7119 
7120 	trace_event_read_lock();
7121 	trace_access_lock(iter->cpu_file);
7122 
7123 	/* Fill as many pages as possible. */
7124 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7125 		spd.pages[i] = alloc_page(GFP_KERNEL);
7126 		if (!spd.pages[i])
7127 			break;
7128 
7129 		rem = tracing_fill_pipe_page(rem, iter);
7130 
7131 		/* Copy the data into the page, so we can start over. */
7132 		ret = trace_seq_to_buffer(&iter->seq,
7133 					  page_address(spd.pages[i]),
7134 					  trace_seq_used(&iter->seq));
7135 		if (ret < 0) {
7136 			__free_page(spd.pages[i]);
7137 			break;
7138 		}
7139 		spd.partial[i].offset = 0;
7140 		spd.partial[i].len = trace_seq_used(&iter->seq);
7141 
7142 		trace_seq_init(&iter->seq);
7143 	}
7144 
7145 	trace_access_unlock(iter->cpu_file);
7146 	trace_event_read_unlock();
7147 	mutex_unlock(&iter->mutex);
7148 
7149 	spd.nr_pages = i;
7150 
7151 	if (i)
7152 		ret = splice_to_pipe(pipe, &spd);
7153 	else
7154 		ret = 0;
7155 out:
7156 	splice_shrink_spd(&spd);
7157 	return ret;
7158 
7159 out_err:
7160 	mutex_unlock(&iter->mutex);
7161 	goto out;
7162 }
7163 
7164 static ssize_t
tracing_entries_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)7165 tracing_entries_read(struct file *filp, char __user *ubuf,
7166 		     size_t cnt, loff_t *ppos)
7167 {
7168 	struct inode *inode = file_inode(filp);
7169 	struct trace_array *tr = inode->i_private;
7170 	int cpu = tracing_get_cpu(inode);
7171 	char buf[64];
7172 	int r = 0;
7173 	ssize_t ret;
7174 
7175 	mutex_lock(&trace_types_lock);
7176 
7177 	if (cpu == RING_BUFFER_ALL_CPUS) {
7178 		int cpu, buf_size_same;
7179 		unsigned long size;
7180 
7181 		size = 0;
7182 		buf_size_same = 1;
7183 		/* check if all cpu sizes are same */
7184 		for_each_tracing_cpu(cpu) {
7185 			/* fill in the size from first enabled cpu */
7186 			if (size == 0)
7187 				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7188 			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7189 				buf_size_same = 0;
7190 				break;
7191 			}
7192 		}
7193 
7194 		if (buf_size_same) {
7195 			if (!ring_buffer_expanded)
7196 				r = sprintf(buf, "%lu (expanded: %lu)\n",
7197 					    size >> 10,
7198 					    trace_buf_size >> 10);
7199 			else
7200 				r = sprintf(buf, "%lu\n", size >> 10);
7201 		} else
7202 			r = sprintf(buf, "X\n");
7203 	} else
7204 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7205 
7206 	mutex_unlock(&trace_types_lock);
7207 
7208 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7209 	return ret;
7210 }
7211 
7212 static ssize_t
tracing_entries_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7213 tracing_entries_write(struct file *filp, const char __user *ubuf,
7214 		      size_t cnt, loff_t *ppos)
7215 {
7216 	struct inode *inode = file_inode(filp);
7217 	struct trace_array *tr = inode->i_private;
7218 	unsigned long val;
7219 	int ret;
7220 
7221 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7222 	if (ret)
7223 		return ret;
7224 
7225 	/* must have at least 1 entry */
7226 	if (!val)
7227 		return -EINVAL;
7228 
7229 	/* value is in KB */
7230 	val <<= 10;
7231 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7232 	if (ret < 0)
7233 		return ret;
7234 
7235 	*ppos += cnt;
7236 
7237 	return cnt;
7238 }
7239 
7240 static ssize_t
tracing_total_entries_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)7241 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7242 				size_t cnt, loff_t *ppos)
7243 {
7244 	struct trace_array *tr = filp->private_data;
7245 	char buf[64];
7246 	int r, cpu;
7247 	unsigned long size = 0, expanded_size = 0;
7248 
7249 	mutex_lock(&trace_types_lock);
7250 	for_each_tracing_cpu(cpu) {
7251 		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7252 		if (!ring_buffer_expanded)
7253 			expanded_size += trace_buf_size >> 10;
7254 	}
7255 	if (ring_buffer_expanded)
7256 		r = sprintf(buf, "%lu\n", size);
7257 	else
7258 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7259 	mutex_unlock(&trace_types_lock);
7260 
7261 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7262 }
7263 
7264 static ssize_t
tracing_free_buffer_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7265 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7266 			  size_t cnt, loff_t *ppos)
7267 {
7268 	/*
7269 	 * There is no need to read what the user has written, this function
7270 	 * is just to make sure that there is no error when "echo" is used
7271 	 */
7272 
7273 	*ppos += cnt;
7274 
7275 	return cnt;
7276 }
7277 
7278 static int
tracing_free_buffer_release(struct inode * inode,struct file * filp)7279 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7280 {
7281 	struct trace_array *tr = inode->i_private;
7282 
7283 	/* disable tracing ? */
7284 	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7285 		tracer_tracing_off(tr);
7286 	/* resize the ring buffer to 0 */
7287 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7288 
7289 	trace_array_put(tr);
7290 
7291 	return 0;
7292 }
7293 
7294 static ssize_t
tracing_mark_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)7295 tracing_mark_write(struct file *filp, const char __user *ubuf,
7296 					size_t cnt, loff_t *fpos)
7297 {
7298 	struct trace_array *tr = filp->private_data;
7299 	struct ring_buffer_event *event;
7300 	enum event_trigger_type tt = ETT_NONE;
7301 	struct trace_buffer *buffer;
7302 	struct print_entry *entry;
7303 	ssize_t written;
7304 	int size;
7305 	int len;
7306 
7307 /* Used in tracing_mark_raw_write() as well */
7308 #define FAULTED_STR "<faulted>"
7309 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7310 
7311 	if (tracing_disabled)
7312 		return -EINVAL;
7313 
7314 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7315 		return -EINVAL;
7316 
7317 	if (cnt > TRACE_BUF_SIZE)
7318 		cnt = TRACE_BUF_SIZE;
7319 
7320 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7321 
7322 	size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7323 
7324 	/* If less than "<faulted>", then make sure we can still add that */
7325 	if (cnt < FAULTED_SIZE)
7326 		size += FAULTED_SIZE - cnt;
7327 
7328 	buffer = tr->array_buffer.buffer;
7329 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7330 					    tracing_gen_ctx());
7331 	if (unlikely(!event))
7332 		/* Ring buffer disabled, return as if not open for write */
7333 		return -EBADF;
7334 
7335 	entry = ring_buffer_event_data(event);
7336 	entry->ip = _THIS_IP_;
7337 
7338 	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7339 	if (len) {
7340 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7341 		cnt = FAULTED_SIZE;
7342 		written = -EFAULT;
7343 	} else
7344 		written = cnt;
7345 
7346 	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7347 		/* do not add \n before testing triggers, but add \0 */
7348 		entry->buf[cnt] = '\0';
7349 		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7350 	}
7351 
7352 	if (entry->buf[cnt - 1] != '\n') {
7353 		entry->buf[cnt] = '\n';
7354 		entry->buf[cnt + 1] = '\0';
7355 	} else
7356 		entry->buf[cnt] = '\0';
7357 
7358 	if (static_branch_unlikely(&trace_marker_exports_enabled))
7359 		ftrace_exports(event, TRACE_EXPORT_MARKER);
7360 	__buffer_unlock_commit(buffer, event);
7361 
7362 	if (tt)
7363 		event_triggers_post_call(tr->trace_marker_file, tt);
7364 
7365 	return written;
7366 }
7367 
7368 /* Limit it for now to 3K (including tag) */
7369 #define RAW_DATA_MAX_SIZE (1024*3)
7370 
7371 static ssize_t
tracing_mark_raw_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)7372 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7373 					size_t cnt, loff_t *fpos)
7374 {
7375 	struct trace_array *tr = filp->private_data;
7376 	struct ring_buffer_event *event;
7377 	struct trace_buffer *buffer;
7378 	struct raw_data_entry *entry;
7379 	ssize_t written;
7380 	int size;
7381 	int len;
7382 
7383 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7384 
7385 	if (tracing_disabled)
7386 		return -EINVAL;
7387 
7388 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7389 		return -EINVAL;
7390 
7391 	/* The marker must at least have a tag id */
7392 	if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7393 		return -EINVAL;
7394 
7395 	if (cnt > TRACE_BUF_SIZE)
7396 		cnt = TRACE_BUF_SIZE;
7397 
7398 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7399 
7400 	size = sizeof(*entry) + cnt;
7401 	if (cnt < FAULT_SIZE_ID)
7402 		size += FAULT_SIZE_ID - cnt;
7403 
7404 	buffer = tr->array_buffer.buffer;
7405 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7406 					    tracing_gen_ctx());
7407 	if (!event)
7408 		/* Ring buffer disabled, return as if not open for write */
7409 		return -EBADF;
7410 
7411 	entry = ring_buffer_event_data(event);
7412 
7413 	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7414 	if (len) {
7415 		entry->id = -1;
7416 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7417 		written = -EFAULT;
7418 	} else
7419 		written = cnt;
7420 
7421 	__buffer_unlock_commit(buffer, event);
7422 
7423 	return written;
7424 }
7425 
tracing_clock_show(struct seq_file * m,void * v)7426 static int tracing_clock_show(struct seq_file *m, void *v)
7427 {
7428 	struct trace_array *tr = m->private;
7429 	int i;
7430 
7431 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7432 		seq_printf(m,
7433 			"%s%s%s%s", i ? " " : "",
7434 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7435 			i == tr->clock_id ? "]" : "");
7436 	seq_putc(m, '\n');
7437 
7438 	return 0;
7439 }
7440 
tracing_set_clock(struct trace_array * tr,const char * clockstr)7441 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7442 {
7443 	int i;
7444 
7445 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7446 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
7447 			break;
7448 	}
7449 	if (i == ARRAY_SIZE(trace_clocks))
7450 		return -EINVAL;
7451 
7452 	mutex_lock(&trace_types_lock);
7453 
7454 	tr->clock_id = i;
7455 
7456 	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7457 
7458 	/*
7459 	 * New clock may not be consistent with the previous clock.
7460 	 * Reset the buffer so that it doesn't have incomparable timestamps.
7461 	 */
7462 	tracing_reset_online_cpus(&tr->array_buffer);
7463 
7464 #ifdef CONFIG_TRACER_MAX_TRACE
7465 	if (tr->max_buffer.buffer)
7466 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7467 	tracing_reset_online_cpus(&tr->max_buffer);
7468 #endif
7469 
7470 	mutex_unlock(&trace_types_lock);
7471 
7472 	return 0;
7473 }
7474 
tracing_clock_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)7475 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7476 				   size_t cnt, loff_t *fpos)
7477 {
7478 	struct seq_file *m = filp->private_data;
7479 	struct trace_array *tr = m->private;
7480 	char buf[64];
7481 	const char *clockstr;
7482 	int ret;
7483 
7484 	if (cnt >= sizeof(buf))
7485 		return -EINVAL;
7486 
7487 	if (copy_from_user(buf, ubuf, cnt))
7488 		return -EFAULT;
7489 
7490 	buf[cnt] = 0;
7491 
7492 	clockstr = strstrip(buf);
7493 
7494 	ret = tracing_set_clock(tr, clockstr);
7495 	if (ret)
7496 		return ret;
7497 
7498 	*fpos += cnt;
7499 
7500 	return cnt;
7501 }
7502 
tracing_clock_open(struct inode * inode,struct file * file)7503 static int tracing_clock_open(struct inode *inode, struct file *file)
7504 {
7505 	struct trace_array *tr = inode->i_private;
7506 	int ret;
7507 
7508 	ret = tracing_check_open_get_tr(tr);
7509 	if (ret)
7510 		return ret;
7511 
7512 	ret = single_open(file, tracing_clock_show, inode->i_private);
7513 	if (ret < 0)
7514 		trace_array_put(tr);
7515 
7516 	return ret;
7517 }
7518 
tracing_time_stamp_mode_show(struct seq_file * m,void * v)7519 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7520 {
7521 	struct trace_array *tr = m->private;
7522 
7523 	mutex_lock(&trace_types_lock);
7524 
7525 	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7526 		seq_puts(m, "delta [absolute]\n");
7527 	else
7528 		seq_puts(m, "[delta] absolute\n");
7529 
7530 	mutex_unlock(&trace_types_lock);
7531 
7532 	return 0;
7533 }
7534 
tracing_time_stamp_mode_open(struct inode * inode,struct file * file)7535 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7536 {
7537 	struct trace_array *tr = inode->i_private;
7538 	int ret;
7539 
7540 	ret = tracing_check_open_get_tr(tr);
7541 	if (ret)
7542 		return ret;
7543 
7544 	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7545 	if (ret < 0)
7546 		trace_array_put(tr);
7547 
7548 	return ret;
7549 }
7550 
tracing_event_time_stamp(struct trace_buffer * buffer,struct ring_buffer_event * rbe)7551 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7552 {
7553 	if (rbe == this_cpu_read(trace_buffered_event))
7554 		return ring_buffer_time_stamp(buffer);
7555 
7556 	return ring_buffer_event_time_stamp(buffer, rbe);
7557 }
7558 
7559 /*
7560  * Set or disable using the per CPU trace_buffer_event when possible.
7561  */
tracing_set_filter_buffering(struct trace_array * tr,bool set)7562 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7563 {
7564 	int ret = 0;
7565 
7566 	mutex_lock(&trace_types_lock);
7567 
7568 	if (set && tr->no_filter_buffering_ref++)
7569 		goto out;
7570 
7571 	if (!set) {
7572 		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7573 			ret = -EINVAL;
7574 			goto out;
7575 		}
7576 
7577 		--tr->no_filter_buffering_ref;
7578 	}
7579  out:
7580 	mutex_unlock(&trace_types_lock);
7581 
7582 	return ret;
7583 }
7584 
7585 struct ftrace_buffer_info {
7586 	struct trace_iterator	iter;
7587 	void			*spare;
7588 	unsigned int		spare_cpu;
7589 	unsigned int		read;
7590 };
7591 
7592 #ifdef CONFIG_TRACER_SNAPSHOT
tracing_snapshot_open(struct inode * inode,struct file * file)7593 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7594 {
7595 	struct trace_array *tr = inode->i_private;
7596 	struct trace_iterator *iter;
7597 	struct seq_file *m;
7598 	int ret;
7599 
7600 	ret = tracing_check_open_get_tr(tr);
7601 	if (ret)
7602 		return ret;
7603 
7604 	if (file->f_mode & FMODE_READ) {
7605 		iter = __tracing_open(inode, file, true);
7606 		if (IS_ERR(iter))
7607 			ret = PTR_ERR(iter);
7608 	} else {
7609 		/* Writes still need the seq_file to hold the private data */
7610 		ret = -ENOMEM;
7611 		m = kzalloc(sizeof(*m), GFP_KERNEL);
7612 		if (!m)
7613 			goto out;
7614 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7615 		if (!iter) {
7616 			kfree(m);
7617 			goto out;
7618 		}
7619 		ret = 0;
7620 
7621 		iter->tr = tr;
7622 		iter->array_buffer = &tr->max_buffer;
7623 		iter->cpu_file = tracing_get_cpu(inode);
7624 		m->private = iter;
7625 		file->private_data = m;
7626 	}
7627 out:
7628 	if (ret < 0)
7629 		trace_array_put(tr);
7630 
7631 	return ret;
7632 }
7633 
tracing_swap_cpu_buffer(void * tr)7634 static void tracing_swap_cpu_buffer(void *tr)
7635 {
7636 	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7637 }
7638 
7639 static ssize_t
tracing_snapshot_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7640 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7641 		       loff_t *ppos)
7642 {
7643 	struct seq_file *m = filp->private_data;
7644 	struct trace_iterator *iter = m->private;
7645 	struct trace_array *tr = iter->tr;
7646 	unsigned long val;
7647 	int ret;
7648 
7649 	ret = tracing_update_buffers();
7650 	if (ret < 0)
7651 		return ret;
7652 
7653 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7654 	if (ret)
7655 		return ret;
7656 
7657 	mutex_lock(&trace_types_lock);
7658 
7659 	if (tr->current_trace->use_max_tr) {
7660 		ret = -EBUSY;
7661 		goto out;
7662 	}
7663 
7664 	local_irq_disable();
7665 	arch_spin_lock(&tr->max_lock);
7666 	if (tr->cond_snapshot)
7667 		ret = -EBUSY;
7668 	arch_spin_unlock(&tr->max_lock);
7669 	local_irq_enable();
7670 	if (ret)
7671 		goto out;
7672 
7673 	switch (val) {
7674 	case 0:
7675 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7676 			ret = -EINVAL;
7677 			break;
7678 		}
7679 		if (tr->allocated_snapshot)
7680 			free_snapshot(tr);
7681 		break;
7682 	case 1:
7683 /* Only allow per-cpu swap if the ring buffer supports it */
7684 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7685 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7686 			ret = -EINVAL;
7687 			break;
7688 		}
7689 #endif
7690 		if (tr->allocated_snapshot)
7691 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
7692 					&tr->array_buffer, iter->cpu_file);
7693 		else
7694 			ret = tracing_alloc_snapshot_instance(tr);
7695 		if (ret < 0)
7696 			break;
7697 		/* Now, we're going to swap */
7698 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7699 			local_irq_disable();
7700 			update_max_tr(tr, current, smp_processor_id(), NULL);
7701 			local_irq_enable();
7702 		} else {
7703 			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7704 						 (void *)tr, 1);
7705 		}
7706 		break;
7707 	default:
7708 		if (tr->allocated_snapshot) {
7709 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7710 				tracing_reset_online_cpus(&tr->max_buffer);
7711 			else
7712 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7713 		}
7714 		break;
7715 	}
7716 
7717 	if (ret >= 0) {
7718 		*ppos += cnt;
7719 		ret = cnt;
7720 	}
7721 out:
7722 	mutex_unlock(&trace_types_lock);
7723 	return ret;
7724 }
7725 
tracing_snapshot_release(struct inode * inode,struct file * file)7726 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7727 {
7728 	struct seq_file *m = file->private_data;
7729 	int ret;
7730 
7731 	ret = tracing_release(inode, file);
7732 
7733 	if (file->f_mode & FMODE_READ)
7734 		return ret;
7735 
7736 	/* If write only, the seq_file is just a stub */
7737 	if (m)
7738 		kfree(m->private);
7739 	kfree(m);
7740 
7741 	return 0;
7742 }
7743 
7744 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7745 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7746 				    size_t count, loff_t *ppos);
7747 static int tracing_buffers_release(struct inode *inode, struct file *file);
7748 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7749 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7750 
snapshot_raw_open(struct inode * inode,struct file * filp)7751 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7752 {
7753 	struct ftrace_buffer_info *info;
7754 	int ret;
7755 
7756 	/* The following checks for tracefs lockdown */
7757 	ret = tracing_buffers_open(inode, filp);
7758 	if (ret < 0)
7759 		return ret;
7760 
7761 	info = filp->private_data;
7762 
7763 	if (info->iter.trace->use_max_tr) {
7764 		tracing_buffers_release(inode, filp);
7765 		return -EBUSY;
7766 	}
7767 
7768 	info->iter.snapshot = true;
7769 	info->iter.array_buffer = &info->iter.tr->max_buffer;
7770 
7771 	return ret;
7772 }
7773 
7774 #endif /* CONFIG_TRACER_SNAPSHOT */
7775 
7776 
7777 static const struct file_operations tracing_thresh_fops = {
7778 	.open		= tracing_open_generic,
7779 	.read		= tracing_thresh_read,
7780 	.write		= tracing_thresh_write,
7781 	.llseek		= generic_file_llseek,
7782 };
7783 
7784 #ifdef CONFIG_TRACER_MAX_TRACE
7785 static const struct file_operations tracing_max_lat_fops = {
7786 	.open		= tracing_open_generic_tr,
7787 	.read		= tracing_max_lat_read,
7788 	.write		= tracing_max_lat_write,
7789 	.llseek		= generic_file_llseek,
7790 	.release	= tracing_release_generic_tr,
7791 };
7792 #endif
7793 
7794 static const struct file_operations set_tracer_fops = {
7795 	.open		= tracing_open_generic_tr,
7796 	.read		= tracing_set_trace_read,
7797 	.write		= tracing_set_trace_write,
7798 	.llseek		= generic_file_llseek,
7799 	.release	= tracing_release_generic_tr,
7800 };
7801 
7802 static const struct file_operations tracing_pipe_fops = {
7803 	.open		= tracing_open_pipe,
7804 	.poll		= tracing_poll_pipe,
7805 	.read		= tracing_read_pipe,
7806 	.splice_read	= tracing_splice_read_pipe,
7807 	.release	= tracing_release_pipe,
7808 	.llseek		= no_llseek,
7809 };
7810 
7811 static const struct file_operations tracing_entries_fops = {
7812 	.open		= tracing_open_generic_tr,
7813 	.read		= tracing_entries_read,
7814 	.write		= tracing_entries_write,
7815 	.llseek		= generic_file_llseek,
7816 	.release	= tracing_release_generic_tr,
7817 };
7818 
7819 static const struct file_operations tracing_total_entries_fops = {
7820 	.open		= tracing_open_generic_tr,
7821 	.read		= tracing_total_entries_read,
7822 	.llseek		= generic_file_llseek,
7823 	.release	= tracing_release_generic_tr,
7824 };
7825 
7826 static const struct file_operations tracing_free_buffer_fops = {
7827 	.open		= tracing_open_generic_tr,
7828 	.write		= tracing_free_buffer_write,
7829 	.release	= tracing_free_buffer_release,
7830 };
7831 
7832 static const struct file_operations tracing_mark_fops = {
7833 	.open		= tracing_mark_open,
7834 	.write		= tracing_mark_write,
7835 	.release	= tracing_release_generic_tr,
7836 };
7837 
7838 static const struct file_operations tracing_mark_raw_fops = {
7839 	.open		= tracing_mark_open,
7840 	.write		= tracing_mark_raw_write,
7841 	.release	= tracing_release_generic_tr,
7842 };
7843 
7844 static const struct file_operations trace_clock_fops = {
7845 	.open		= tracing_clock_open,
7846 	.read		= seq_read,
7847 	.llseek		= seq_lseek,
7848 	.release	= tracing_single_release_tr,
7849 	.write		= tracing_clock_write,
7850 };
7851 
7852 static const struct file_operations trace_time_stamp_mode_fops = {
7853 	.open		= tracing_time_stamp_mode_open,
7854 	.read		= seq_read,
7855 	.llseek		= seq_lseek,
7856 	.release	= tracing_single_release_tr,
7857 };
7858 
7859 #ifdef CONFIG_TRACER_SNAPSHOT
7860 static const struct file_operations snapshot_fops = {
7861 	.open		= tracing_snapshot_open,
7862 	.read		= seq_read,
7863 	.write		= tracing_snapshot_write,
7864 	.llseek		= tracing_lseek,
7865 	.release	= tracing_snapshot_release,
7866 };
7867 
7868 static const struct file_operations snapshot_raw_fops = {
7869 	.open		= snapshot_raw_open,
7870 	.read		= tracing_buffers_read,
7871 	.release	= tracing_buffers_release,
7872 	.splice_read	= tracing_buffers_splice_read,
7873 	.llseek		= no_llseek,
7874 };
7875 
7876 #endif /* CONFIG_TRACER_SNAPSHOT */
7877 
7878 /*
7879  * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7880  * @filp: The active open file structure
7881  * @ubuf: The userspace provided buffer to read value into
7882  * @cnt: The maximum number of bytes to read
7883  * @ppos: The current "file" position
7884  *
7885  * This function implements the write interface for a struct trace_min_max_param.
7886  * The filp->private_data must point to a trace_min_max_param structure that
7887  * defines where to write the value, the min and the max acceptable values,
7888  * and a lock to protect the write.
7889  */
7890 static ssize_t
trace_min_max_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7891 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7892 {
7893 	struct trace_min_max_param *param = filp->private_data;
7894 	u64 val;
7895 	int err;
7896 
7897 	if (!param)
7898 		return -EFAULT;
7899 
7900 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7901 	if (err)
7902 		return err;
7903 
7904 	if (param->lock)
7905 		mutex_lock(param->lock);
7906 
7907 	if (param->min && val < *param->min)
7908 		err = -EINVAL;
7909 
7910 	if (param->max && val > *param->max)
7911 		err = -EINVAL;
7912 
7913 	if (!err)
7914 		*param->val = val;
7915 
7916 	if (param->lock)
7917 		mutex_unlock(param->lock);
7918 
7919 	if (err)
7920 		return err;
7921 
7922 	return cnt;
7923 }
7924 
7925 /*
7926  * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7927  * @filp: The active open file structure
7928  * @ubuf: The userspace provided buffer to read value into
7929  * @cnt: The maximum number of bytes to read
7930  * @ppos: The current "file" position
7931  *
7932  * This function implements the read interface for a struct trace_min_max_param.
7933  * The filp->private_data must point to a trace_min_max_param struct with valid
7934  * data.
7935  */
7936 static ssize_t
trace_min_max_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)7937 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7938 {
7939 	struct trace_min_max_param *param = filp->private_data;
7940 	char buf[U64_STR_SIZE];
7941 	int len;
7942 	u64 val;
7943 
7944 	if (!param)
7945 		return -EFAULT;
7946 
7947 	val = *param->val;
7948 
7949 	if (cnt > sizeof(buf))
7950 		cnt = sizeof(buf);
7951 
7952 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
7953 
7954 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7955 }
7956 
7957 const struct file_operations trace_min_max_fops = {
7958 	.open		= tracing_open_generic,
7959 	.read		= trace_min_max_read,
7960 	.write		= trace_min_max_write,
7961 };
7962 
7963 #define TRACING_LOG_ERRS_MAX	8
7964 #define TRACING_LOG_LOC_MAX	128
7965 
7966 #define CMD_PREFIX "  Command: "
7967 
7968 struct err_info {
7969 	const char	**errs;	/* ptr to loc-specific array of err strings */
7970 	u8		type;	/* index into errs -> specific err string */
7971 	u16		pos;	/* caret position */
7972 	u64		ts;
7973 };
7974 
7975 struct tracing_log_err {
7976 	struct list_head	list;
7977 	struct err_info		info;
7978 	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
7979 	char			*cmd;                     /* what caused err */
7980 };
7981 
7982 static DEFINE_MUTEX(tracing_err_log_lock);
7983 
alloc_tracing_log_err(int len)7984 static struct tracing_log_err *alloc_tracing_log_err(int len)
7985 {
7986 	struct tracing_log_err *err;
7987 
7988 	err = kzalloc(sizeof(*err), GFP_KERNEL);
7989 	if (!err)
7990 		return ERR_PTR(-ENOMEM);
7991 
7992 	err->cmd = kzalloc(len, GFP_KERNEL);
7993 	if (!err->cmd) {
7994 		kfree(err);
7995 		return ERR_PTR(-ENOMEM);
7996 	}
7997 
7998 	return err;
7999 }
8000 
free_tracing_log_err(struct tracing_log_err * err)8001 static void free_tracing_log_err(struct tracing_log_err *err)
8002 {
8003 	kfree(err->cmd);
8004 	kfree(err);
8005 }
8006 
get_tracing_log_err(struct trace_array * tr,int len)8007 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
8008 						   int len)
8009 {
8010 	struct tracing_log_err *err;
8011 	char *cmd;
8012 
8013 	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
8014 		err = alloc_tracing_log_err(len);
8015 		if (PTR_ERR(err) != -ENOMEM)
8016 			tr->n_err_log_entries++;
8017 
8018 		return err;
8019 	}
8020 	cmd = kzalloc(len, GFP_KERNEL);
8021 	if (!cmd)
8022 		return ERR_PTR(-ENOMEM);
8023 	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8024 	kfree(err->cmd);
8025 	err->cmd = cmd;
8026 	list_del(&err->list);
8027 
8028 	return err;
8029 }
8030 
8031 /**
8032  * err_pos - find the position of a string within a command for error careting
8033  * @cmd: The tracing command that caused the error
8034  * @str: The string to position the caret at within @cmd
8035  *
8036  * Finds the position of the first occurrence of @str within @cmd.  The
8037  * return value can be passed to tracing_log_err() for caret placement
8038  * within @cmd.
8039  *
8040  * Returns the index within @cmd of the first occurrence of @str or 0
8041  * if @str was not found.
8042  */
err_pos(char * cmd,const char * str)8043 unsigned int err_pos(char *cmd, const char *str)
8044 {
8045 	char *found;
8046 
8047 	if (WARN_ON(!strlen(cmd)))
8048 		return 0;
8049 
8050 	found = strstr(cmd, str);
8051 	if (found)
8052 		return found - cmd;
8053 
8054 	return 0;
8055 }
8056 
8057 /**
8058  * tracing_log_err - write an error to the tracing error log
8059  * @tr: The associated trace array for the error (NULL for top level array)
8060  * @loc: A string describing where the error occurred
8061  * @cmd: The tracing command that caused the error
8062  * @errs: The array of loc-specific static error strings
8063  * @type: The index into errs[], which produces the specific static err string
8064  * @pos: The position the caret should be placed in the cmd
8065  *
8066  * Writes an error into tracing/error_log of the form:
8067  *
8068  * <loc>: error: <text>
8069  *   Command: <cmd>
8070  *              ^
8071  *
8072  * tracing/error_log is a small log file containing the last
8073  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
8074  * unless there has been a tracing error, and the error log can be
8075  * cleared and have its memory freed by writing the empty string in
8076  * truncation mode to it i.e. echo > tracing/error_log.
8077  *
8078  * NOTE: the @errs array along with the @type param are used to
8079  * produce a static error string - this string is not copied and saved
8080  * when the error is logged - only a pointer to it is saved.  See
8081  * existing callers for examples of how static strings are typically
8082  * defined for use with tracing_log_err().
8083  */
tracing_log_err(struct trace_array * tr,const char * loc,const char * cmd,const char ** errs,u8 type,u16 pos)8084 void tracing_log_err(struct trace_array *tr,
8085 		     const char *loc, const char *cmd,
8086 		     const char **errs, u8 type, u16 pos)
8087 {
8088 	struct tracing_log_err *err;
8089 	int len = 0;
8090 
8091 	if (!tr)
8092 		tr = &global_trace;
8093 
8094 	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8095 
8096 	mutex_lock(&tracing_err_log_lock);
8097 	err = get_tracing_log_err(tr, len);
8098 	if (PTR_ERR(err) == -ENOMEM) {
8099 		mutex_unlock(&tracing_err_log_lock);
8100 		return;
8101 	}
8102 
8103 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8104 	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8105 
8106 	err->info.errs = errs;
8107 	err->info.type = type;
8108 	err->info.pos = pos;
8109 	err->info.ts = local_clock();
8110 
8111 	list_add_tail(&err->list, &tr->err_log);
8112 	mutex_unlock(&tracing_err_log_lock);
8113 }
8114 
clear_tracing_err_log(struct trace_array * tr)8115 static void clear_tracing_err_log(struct trace_array *tr)
8116 {
8117 	struct tracing_log_err *err, *next;
8118 
8119 	mutex_lock(&tracing_err_log_lock);
8120 	list_for_each_entry_safe(err, next, &tr->err_log, list) {
8121 		list_del(&err->list);
8122 		free_tracing_log_err(err);
8123 	}
8124 
8125 	tr->n_err_log_entries = 0;
8126 	mutex_unlock(&tracing_err_log_lock);
8127 }
8128 
tracing_err_log_seq_start(struct seq_file * m,loff_t * pos)8129 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8130 {
8131 	struct trace_array *tr = m->private;
8132 
8133 	mutex_lock(&tracing_err_log_lock);
8134 
8135 	return seq_list_start(&tr->err_log, *pos);
8136 }
8137 
tracing_err_log_seq_next(struct seq_file * m,void * v,loff_t * pos)8138 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8139 {
8140 	struct trace_array *tr = m->private;
8141 
8142 	return seq_list_next(v, &tr->err_log, pos);
8143 }
8144 
tracing_err_log_seq_stop(struct seq_file * m,void * v)8145 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8146 {
8147 	mutex_unlock(&tracing_err_log_lock);
8148 }
8149 
tracing_err_log_show_pos(struct seq_file * m,u16 pos)8150 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8151 {
8152 	u16 i;
8153 
8154 	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8155 		seq_putc(m, ' ');
8156 	for (i = 0; i < pos; i++)
8157 		seq_putc(m, ' ');
8158 	seq_puts(m, "^\n");
8159 }
8160 
tracing_err_log_seq_show(struct seq_file * m,void * v)8161 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8162 {
8163 	struct tracing_log_err *err = v;
8164 
8165 	if (err) {
8166 		const char *err_text = err->info.errs[err->info.type];
8167 		u64 sec = err->info.ts;
8168 		u32 nsec;
8169 
8170 		nsec = do_div(sec, NSEC_PER_SEC);
8171 		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8172 			   err->loc, err_text);
8173 		seq_printf(m, "%s", err->cmd);
8174 		tracing_err_log_show_pos(m, err->info.pos);
8175 	}
8176 
8177 	return 0;
8178 }
8179 
8180 static const struct seq_operations tracing_err_log_seq_ops = {
8181 	.start  = tracing_err_log_seq_start,
8182 	.next   = tracing_err_log_seq_next,
8183 	.stop   = tracing_err_log_seq_stop,
8184 	.show   = tracing_err_log_seq_show
8185 };
8186 
tracing_err_log_open(struct inode * inode,struct file * file)8187 static int tracing_err_log_open(struct inode *inode, struct file *file)
8188 {
8189 	struct trace_array *tr = inode->i_private;
8190 	int ret = 0;
8191 
8192 	ret = tracing_check_open_get_tr(tr);
8193 	if (ret)
8194 		return ret;
8195 
8196 	/* If this file was opened for write, then erase contents */
8197 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8198 		clear_tracing_err_log(tr);
8199 
8200 	if (file->f_mode & FMODE_READ) {
8201 		ret = seq_open(file, &tracing_err_log_seq_ops);
8202 		if (!ret) {
8203 			struct seq_file *m = file->private_data;
8204 			m->private = tr;
8205 		} else {
8206 			trace_array_put(tr);
8207 		}
8208 	}
8209 	return ret;
8210 }
8211 
tracing_err_log_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)8212 static ssize_t tracing_err_log_write(struct file *file,
8213 				     const char __user *buffer,
8214 				     size_t count, loff_t *ppos)
8215 {
8216 	return count;
8217 }
8218 
tracing_err_log_release(struct inode * inode,struct file * file)8219 static int tracing_err_log_release(struct inode *inode, struct file *file)
8220 {
8221 	struct trace_array *tr = inode->i_private;
8222 
8223 	trace_array_put(tr);
8224 
8225 	if (file->f_mode & FMODE_READ)
8226 		seq_release(inode, file);
8227 
8228 	return 0;
8229 }
8230 
8231 static const struct file_operations tracing_err_log_fops = {
8232 	.open           = tracing_err_log_open,
8233 	.write		= tracing_err_log_write,
8234 	.read           = seq_read,
8235 	.llseek         = tracing_lseek,
8236 	.release        = tracing_err_log_release,
8237 };
8238 
tracing_buffers_open(struct inode * inode,struct file * filp)8239 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8240 {
8241 	struct trace_array *tr = inode->i_private;
8242 	struct ftrace_buffer_info *info;
8243 	int ret;
8244 
8245 	ret = tracing_check_open_get_tr(tr);
8246 	if (ret)
8247 		return ret;
8248 
8249 	info = kvzalloc(sizeof(*info), GFP_KERNEL);
8250 	if (!info) {
8251 		trace_array_put(tr);
8252 		return -ENOMEM;
8253 	}
8254 
8255 	mutex_lock(&trace_types_lock);
8256 
8257 	info->iter.tr		= tr;
8258 	info->iter.cpu_file	= tracing_get_cpu(inode);
8259 	info->iter.trace	= tr->current_trace;
8260 	info->iter.array_buffer = &tr->array_buffer;
8261 	info->spare		= NULL;
8262 	/* Force reading ring buffer for first read */
8263 	info->read		= (unsigned int)-1;
8264 
8265 	filp->private_data = info;
8266 
8267 	tr->trace_ref++;
8268 
8269 	mutex_unlock(&trace_types_lock);
8270 
8271 	ret = nonseekable_open(inode, filp);
8272 	if (ret < 0)
8273 		trace_array_put(tr);
8274 
8275 	return ret;
8276 }
8277 
8278 static __poll_t
tracing_buffers_poll(struct file * filp,poll_table * poll_table)8279 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8280 {
8281 	struct ftrace_buffer_info *info = filp->private_data;
8282 	struct trace_iterator *iter = &info->iter;
8283 
8284 	return trace_poll(iter, filp, poll_table);
8285 }
8286 
8287 static ssize_t
tracing_buffers_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)8288 tracing_buffers_read(struct file *filp, char __user *ubuf,
8289 		     size_t count, loff_t *ppos)
8290 {
8291 	struct ftrace_buffer_info *info = filp->private_data;
8292 	struct trace_iterator *iter = &info->iter;
8293 	ssize_t ret = 0;
8294 	ssize_t size;
8295 
8296 	if (!count)
8297 		return 0;
8298 
8299 #ifdef CONFIG_TRACER_MAX_TRACE
8300 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8301 		return -EBUSY;
8302 #endif
8303 
8304 	if (!info->spare) {
8305 		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8306 							  iter->cpu_file);
8307 		if (IS_ERR(info->spare)) {
8308 			ret = PTR_ERR(info->spare);
8309 			info->spare = NULL;
8310 		} else {
8311 			info->spare_cpu = iter->cpu_file;
8312 		}
8313 	}
8314 	if (!info->spare)
8315 		return ret;
8316 
8317 	/* Do we have previous read data to read? */
8318 	if (info->read < PAGE_SIZE)
8319 		goto read;
8320 
8321  again:
8322 	trace_access_lock(iter->cpu_file);
8323 	ret = ring_buffer_read_page(iter->array_buffer->buffer,
8324 				    &info->spare,
8325 				    count,
8326 				    iter->cpu_file, 0);
8327 	trace_access_unlock(iter->cpu_file);
8328 
8329 	if (ret < 0) {
8330 		if (trace_empty(iter)) {
8331 			if ((filp->f_flags & O_NONBLOCK))
8332 				return -EAGAIN;
8333 
8334 			ret = wait_on_pipe(iter, 0);
8335 			if (ret)
8336 				return ret;
8337 
8338 			goto again;
8339 		}
8340 		return 0;
8341 	}
8342 
8343 	info->read = 0;
8344  read:
8345 	size = PAGE_SIZE - info->read;
8346 	if (size > count)
8347 		size = count;
8348 
8349 	ret = copy_to_user(ubuf, info->spare + info->read, size);
8350 	if (ret == size)
8351 		return -EFAULT;
8352 
8353 	size -= ret;
8354 
8355 	*ppos += size;
8356 	info->read += size;
8357 
8358 	return size;
8359 }
8360 
tracing_buffers_release(struct inode * inode,struct file * file)8361 static int tracing_buffers_release(struct inode *inode, struct file *file)
8362 {
8363 	struct ftrace_buffer_info *info = file->private_data;
8364 	struct trace_iterator *iter = &info->iter;
8365 
8366 	mutex_lock(&trace_types_lock);
8367 
8368 	iter->tr->trace_ref--;
8369 
8370 	__trace_array_put(iter->tr);
8371 
8372 	iter->wait_index++;
8373 	/* Make sure the waiters see the new wait_index */
8374 	smp_wmb();
8375 
8376 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8377 
8378 	if (info->spare)
8379 		ring_buffer_free_read_page(iter->array_buffer->buffer,
8380 					   info->spare_cpu, info->spare);
8381 	kvfree(info);
8382 
8383 	mutex_unlock(&trace_types_lock);
8384 
8385 	return 0;
8386 }
8387 
8388 struct buffer_ref {
8389 	struct trace_buffer	*buffer;
8390 	void			*page;
8391 	int			cpu;
8392 	refcount_t		refcount;
8393 };
8394 
buffer_ref_release(struct buffer_ref * ref)8395 static void buffer_ref_release(struct buffer_ref *ref)
8396 {
8397 	if (!refcount_dec_and_test(&ref->refcount))
8398 		return;
8399 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8400 	kfree(ref);
8401 }
8402 
buffer_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)8403 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8404 				    struct pipe_buffer *buf)
8405 {
8406 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8407 
8408 	buffer_ref_release(ref);
8409 	buf->private = 0;
8410 }
8411 
buffer_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)8412 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8413 				struct pipe_buffer *buf)
8414 {
8415 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8416 
8417 	if (refcount_read(&ref->refcount) > INT_MAX/2)
8418 		return false;
8419 
8420 	refcount_inc(&ref->refcount);
8421 	return true;
8422 }
8423 
8424 /* Pipe buffer operations for a buffer. */
8425 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8426 	.release		= buffer_pipe_buf_release,
8427 	.get			= buffer_pipe_buf_get,
8428 };
8429 
8430 /*
8431  * Callback from splice_to_pipe(), if we need to release some pages
8432  * at the end of the spd in case we error'ed out in filling the pipe.
8433  */
buffer_spd_release(struct splice_pipe_desc * spd,unsigned int i)8434 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8435 {
8436 	struct buffer_ref *ref =
8437 		(struct buffer_ref *)spd->partial[i].private;
8438 
8439 	buffer_ref_release(ref);
8440 	spd->partial[i].private = 0;
8441 }
8442 
8443 static ssize_t
tracing_buffers_splice_read(struct file * file,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)8444 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8445 			    struct pipe_inode_info *pipe, size_t len,
8446 			    unsigned int flags)
8447 {
8448 	struct ftrace_buffer_info *info = file->private_data;
8449 	struct trace_iterator *iter = &info->iter;
8450 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
8451 	struct page *pages_def[PIPE_DEF_BUFFERS];
8452 	struct splice_pipe_desc spd = {
8453 		.pages		= pages_def,
8454 		.partial	= partial_def,
8455 		.nr_pages_max	= PIPE_DEF_BUFFERS,
8456 		.ops		= &buffer_pipe_buf_ops,
8457 		.spd_release	= buffer_spd_release,
8458 	};
8459 	struct buffer_ref *ref;
8460 	int entries, i;
8461 	ssize_t ret = 0;
8462 
8463 #ifdef CONFIG_TRACER_MAX_TRACE
8464 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8465 		return -EBUSY;
8466 #endif
8467 
8468 	if (*ppos & (PAGE_SIZE - 1))
8469 		return -EINVAL;
8470 
8471 	if (len & (PAGE_SIZE - 1)) {
8472 		if (len < PAGE_SIZE)
8473 			return -EINVAL;
8474 		len &= PAGE_MASK;
8475 	}
8476 
8477 	if (splice_grow_spd(pipe, &spd))
8478 		return -ENOMEM;
8479 
8480  again:
8481 	trace_access_lock(iter->cpu_file);
8482 	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8483 
8484 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8485 		struct page *page;
8486 		int r;
8487 
8488 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8489 		if (!ref) {
8490 			ret = -ENOMEM;
8491 			break;
8492 		}
8493 
8494 		refcount_set(&ref->refcount, 1);
8495 		ref->buffer = iter->array_buffer->buffer;
8496 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8497 		if (IS_ERR(ref->page)) {
8498 			ret = PTR_ERR(ref->page);
8499 			ref->page = NULL;
8500 			kfree(ref);
8501 			break;
8502 		}
8503 		ref->cpu = iter->cpu_file;
8504 
8505 		r = ring_buffer_read_page(ref->buffer, &ref->page,
8506 					  len, iter->cpu_file, 1);
8507 		if (r < 0) {
8508 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
8509 						   ref->page);
8510 			kfree(ref);
8511 			break;
8512 		}
8513 
8514 		page = virt_to_page(ref->page);
8515 
8516 		spd.pages[i] = page;
8517 		spd.partial[i].len = PAGE_SIZE;
8518 		spd.partial[i].offset = 0;
8519 		spd.partial[i].private = (unsigned long)ref;
8520 		spd.nr_pages++;
8521 		*ppos += PAGE_SIZE;
8522 
8523 		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8524 	}
8525 
8526 	trace_access_unlock(iter->cpu_file);
8527 	spd.nr_pages = i;
8528 
8529 	/* did we read anything? */
8530 	if (!spd.nr_pages) {
8531 		long wait_index;
8532 
8533 		if (ret)
8534 			goto out;
8535 
8536 		ret = -EAGAIN;
8537 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8538 			goto out;
8539 
8540 		wait_index = READ_ONCE(iter->wait_index);
8541 
8542 		ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8543 		if (ret)
8544 			goto out;
8545 
8546 		/* No need to wait after waking up when tracing is off */
8547 		if (!tracer_tracing_is_on(iter->tr))
8548 			goto out;
8549 
8550 		/* Make sure we see the new wait_index */
8551 		smp_rmb();
8552 		if (wait_index != iter->wait_index)
8553 			goto out;
8554 
8555 		goto again;
8556 	}
8557 
8558 	ret = splice_to_pipe(pipe, &spd);
8559 out:
8560 	splice_shrink_spd(&spd);
8561 
8562 	return ret;
8563 }
8564 
8565 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
tracing_buffers_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8566 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8567 {
8568 	struct ftrace_buffer_info *info = file->private_data;
8569 	struct trace_iterator *iter = &info->iter;
8570 
8571 	if (cmd)
8572 		return -ENOIOCTLCMD;
8573 
8574 	mutex_lock(&trace_types_lock);
8575 
8576 	iter->wait_index++;
8577 	/* Make sure the waiters see the new wait_index */
8578 	smp_wmb();
8579 
8580 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8581 
8582 	mutex_unlock(&trace_types_lock);
8583 	return 0;
8584 }
8585 
8586 static const struct file_operations tracing_buffers_fops = {
8587 	.open		= tracing_buffers_open,
8588 	.read		= tracing_buffers_read,
8589 	.poll		= tracing_buffers_poll,
8590 	.release	= tracing_buffers_release,
8591 	.splice_read	= tracing_buffers_splice_read,
8592 	.unlocked_ioctl = tracing_buffers_ioctl,
8593 	.llseek		= no_llseek,
8594 };
8595 
8596 static ssize_t
tracing_stats_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)8597 tracing_stats_read(struct file *filp, char __user *ubuf,
8598 		   size_t count, loff_t *ppos)
8599 {
8600 	struct inode *inode = file_inode(filp);
8601 	struct trace_array *tr = inode->i_private;
8602 	struct array_buffer *trace_buf = &tr->array_buffer;
8603 	int cpu = tracing_get_cpu(inode);
8604 	struct trace_seq *s;
8605 	unsigned long cnt;
8606 	unsigned long long t;
8607 	unsigned long usec_rem;
8608 
8609 	s = kmalloc(sizeof(*s), GFP_KERNEL);
8610 	if (!s)
8611 		return -ENOMEM;
8612 
8613 	trace_seq_init(s);
8614 
8615 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8616 	trace_seq_printf(s, "entries: %ld\n", cnt);
8617 
8618 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8619 	trace_seq_printf(s, "overrun: %ld\n", cnt);
8620 
8621 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8622 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8623 
8624 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8625 	trace_seq_printf(s, "bytes: %ld\n", cnt);
8626 
8627 	if (trace_clocks[tr->clock_id].in_ns) {
8628 		/* local or global for trace_clock */
8629 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8630 		usec_rem = do_div(t, USEC_PER_SEC);
8631 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8632 								t, usec_rem);
8633 
8634 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8635 		usec_rem = do_div(t, USEC_PER_SEC);
8636 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8637 	} else {
8638 		/* counter or tsc mode for trace_clock */
8639 		trace_seq_printf(s, "oldest event ts: %llu\n",
8640 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8641 
8642 		trace_seq_printf(s, "now ts: %llu\n",
8643 				ring_buffer_time_stamp(trace_buf->buffer));
8644 	}
8645 
8646 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8647 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
8648 
8649 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8650 	trace_seq_printf(s, "read events: %ld\n", cnt);
8651 
8652 	count = simple_read_from_buffer(ubuf, count, ppos,
8653 					s->buffer, trace_seq_used(s));
8654 
8655 	kfree(s);
8656 
8657 	return count;
8658 }
8659 
8660 static const struct file_operations tracing_stats_fops = {
8661 	.open		= tracing_open_generic_tr,
8662 	.read		= tracing_stats_read,
8663 	.llseek		= generic_file_llseek,
8664 	.release	= tracing_release_generic_tr,
8665 };
8666 
8667 #ifdef CONFIG_DYNAMIC_FTRACE
8668 
8669 static ssize_t
tracing_read_dyn_info(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8670 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8671 		  size_t cnt, loff_t *ppos)
8672 {
8673 	ssize_t ret;
8674 	char *buf;
8675 	int r;
8676 
8677 	/* 256 should be plenty to hold the amount needed */
8678 	buf = kmalloc(256, GFP_KERNEL);
8679 	if (!buf)
8680 		return -ENOMEM;
8681 
8682 	r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8683 		      ftrace_update_tot_cnt,
8684 		      ftrace_number_of_pages,
8685 		      ftrace_number_of_groups);
8686 
8687 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8688 	kfree(buf);
8689 	return ret;
8690 }
8691 
8692 static const struct file_operations tracing_dyn_info_fops = {
8693 	.open		= tracing_open_generic,
8694 	.read		= tracing_read_dyn_info,
8695 	.llseek		= generic_file_llseek,
8696 };
8697 #endif /* CONFIG_DYNAMIC_FTRACE */
8698 
8699 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8700 static void
ftrace_snapshot(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)8701 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8702 		struct trace_array *tr, struct ftrace_probe_ops *ops,
8703 		void *data)
8704 {
8705 	tracing_snapshot_instance(tr);
8706 }
8707 
8708 static void
ftrace_count_snapshot(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)8709 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8710 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
8711 		      void *data)
8712 {
8713 	struct ftrace_func_mapper *mapper = data;
8714 	long *count = NULL;
8715 
8716 	if (mapper)
8717 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8718 
8719 	if (count) {
8720 
8721 		if (*count <= 0)
8722 			return;
8723 
8724 		(*count)--;
8725 	}
8726 
8727 	tracing_snapshot_instance(tr);
8728 }
8729 
8730 static int
ftrace_snapshot_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)8731 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8732 		      struct ftrace_probe_ops *ops, void *data)
8733 {
8734 	struct ftrace_func_mapper *mapper = data;
8735 	long *count = NULL;
8736 
8737 	seq_printf(m, "%ps:", (void *)ip);
8738 
8739 	seq_puts(m, "snapshot");
8740 
8741 	if (mapper)
8742 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8743 
8744 	if (count)
8745 		seq_printf(m, ":count=%ld\n", *count);
8746 	else
8747 		seq_puts(m, ":unlimited\n");
8748 
8749 	return 0;
8750 }
8751 
8752 static int
ftrace_snapshot_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)8753 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8754 		     unsigned long ip, void *init_data, void **data)
8755 {
8756 	struct ftrace_func_mapper *mapper = *data;
8757 
8758 	if (!mapper) {
8759 		mapper = allocate_ftrace_func_mapper();
8760 		if (!mapper)
8761 			return -ENOMEM;
8762 		*data = mapper;
8763 	}
8764 
8765 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8766 }
8767 
8768 static void
ftrace_snapshot_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)8769 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8770 		     unsigned long ip, void *data)
8771 {
8772 	struct ftrace_func_mapper *mapper = data;
8773 
8774 	if (!ip) {
8775 		if (!mapper)
8776 			return;
8777 		free_ftrace_func_mapper(mapper, NULL);
8778 		return;
8779 	}
8780 
8781 	ftrace_func_mapper_remove_ip(mapper, ip);
8782 }
8783 
8784 static struct ftrace_probe_ops snapshot_probe_ops = {
8785 	.func			= ftrace_snapshot,
8786 	.print			= ftrace_snapshot_print,
8787 };
8788 
8789 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8790 	.func			= ftrace_count_snapshot,
8791 	.print			= ftrace_snapshot_print,
8792 	.init			= ftrace_snapshot_init,
8793 	.free			= ftrace_snapshot_free,
8794 };
8795 
8796 static int
ftrace_trace_snapshot_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)8797 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8798 			       char *glob, char *cmd, char *param, int enable)
8799 {
8800 	struct ftrace_probe_ops *ops;
8801 	void *count = (void *)-1;
8802 	char *number;
8803 	int ret;
8804 
8805 	if (!tr)
8806 		return -ENODEV;
8807 
8808 	/* hash funcs only work with set_ftrace_filter */
8809 	if (!enable)
8810 		return -EINVAL;
8811 
8812 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8813 
8814 	if (glob[0] == '!')
8815 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8816 
8817 	if (!param)
8818 		goto out_reg;
8819 
8820 	number = strsep(&param, ":");
8821 
8822 	if (!strlen(number))
8823 		goto out_reg;
8824 
8825 	/*
8826 	 * We use the callback data field (which is a pointer)
8827 	 * as our counter.
8828 	 */
8829 	ret = kstrtoul(number, 0, (unsigned long *)&count);
8830 	if (ret)
8831 		return ret;
8832 
8833  out_reg:
8834 	ret = tracing_alloc_snapshot_instance(tr);
8835 	if (ret < 0)
8836 		goto out;
8837 
8838 	ret = register_ftrace_function_probe(glob, tr, ops, count);
8839 
8840  out:
8841 	return ret < 0 ? ret : 0;
8842 }
8843 
8844 static struct ftrace_func_command ftrace_snapshot_cmd = {
8845 	.name			= "snapshot",
8846 	.func			= ftrace_trace_snapshot_callback,
8847 };
8848 
register_snapshot_cmd(void)8849 static __init int register_snapshot_cmd(void)
8850 {
8851 	return register_ftrace_command(&ftrace_snapshot_cmd);
8852 }
8853 #else
register_snapshot_cmd(void)8854 static inline __init int register_snapshot_cmd(void) { return 0; }
8855 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8856 
tracing_get_dentry(struct trace_array * tr)8857 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8858 {
8859 	if (WARN_ON(!tr->dir))
8860 		return ERR_PTR(-ENODEV);
8861 
8862 	/* Top directory uses NULL as the parent */
8863 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8864 		return NULL;
8865 
8866 	/* All sub buffers have a descriptor */
8867 	return tr->dir;
8868 }
8869 
tracing_dentry_percpu(struct trace_array * tr,int cpu)8870 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8871 {
8872 	struct dentry *d_tracer;
8873 
8874 	if (tr->percpu_dir)
8875 		return tr->percpu_dir;
8876 
8877 	d_tracer = tracing_get_dentry(tr);
8878 	if (IS_ERR(d_tracer))
8879 		return NULL;
8880 
8881 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8882 
8883 	MEM_FAIL(!tr->percpu_dir,
8884 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8885 
8886 	return tr->percpu_dir;
8887 }
8888 
8889 static struct dentry *
trace_create_cpu_file(const char * name,umode_t mode,struct dentry * parent,void * data,long cpu,const struct file_operations * fops)8890 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8891 		      void *data, long cpu, const struct file_operations *fops)
8892 {
8893 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8894 
8895 	if (ret) /* See tracing_get_cpu() */
8896 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
8897 	return ret;
8898 }
8899 
8900 static void
tracing_init_tracefs_percpu(struct trace_array * tr,long cpu)8901 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8902 {
8903 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8904 	struct dentry *d_cpu;
8905 	char cpu_dir[30]; /* 30 characters should be more than enough */
8906 
8907 	if (!d_percpu)
8908 		return;
8909 
8910 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8911 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8912 	if (!d_cpu) {
8913 		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8914 		return;
8915 	}
8916 
8917 	/* per cpu trace_pipe */
8918 	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8919 				tr, cpu, &tracing_pipe_fops);
8920 
8921 	/* per cpu trace */
8922 	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8923 				tr, cpu, &tracing_fops);
8924 
8925 	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8926 				tr, cpu, &tracing_buffers_fops);
8927 
8928 	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8929 				tr, cpu, &tracing_stats_fops);
8930 
8931 	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8932 				tr, cpu, &tracing_entries_fops);
8933 
8934 #ifdef CONFIG_TRACER_SNAPSHOT
8935 	trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8936 				tr, cpu, &snapshot_fops);
8937 
8938 	trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8939 				tr, cpu, &snapshot_raw_fops);
8940 #endif
8941 }
8942 
8943 #ifdef CONFIG_FTRACE_SELFTEST
8944 /* Let selftest have access to static functions in this file */
8945 #include "trace_selftest.c"
8946 #endif
8947 
8948 static ssize_t
trace_options_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8949 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8950 			loff_t *ppos)
8951 {
8952 	struct trace_option_dentry *topt = filp->private_data;
8953 	char *buf;
8954 
8955 	if (topt->flags->val & topt->opt->bit)
8956 		buf = "1\n";
8957 	else
8958 		buf = "0\n";
8959 
8960 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8961 }
8962 
8963 static ssize_t
trace_options_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8964 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8965 			 loff_t *ppos)
8966 {
8967 	struct trace_option_dentry *topt = filp->private_data;
8968 	unsigned long val;
8969 	int ret;
8970 
8971 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8972 	if (ret)
8973 		return ret;
8974 
8975 	if (val != 0 && val != 1)
8976 		return -EINVAL;
8977 
8978 	if (!!(topt->flags->val & topt->opt->bit) != val) {
8979 		mutex_lock(&trace_types_lock);
8980 		ret = __set_tracer_option(topt->tr, topt->flags,
8981 					  topt->opt, !val);
8982 		mutex_unlock(&trace_types_lock);
8983 		if (ret)
8984 			return ret;
8985 	}
8986 
8987 	*ppos += cnt;
8988 
8989 	return cnt;
8990 }
8991 
tracing_open_options(struct inode * inode,struct file * filp)8992 static int tracing_open_options(struct inode *inode, struct file *filp)
8993 {
8994 	struct trace_option_dentry *topt = inode->i_private;
8995 	int ret;
8996 
8997 	ret = tracing_check_open_get_tr(topt->tr);
8998 	if (ret)
8999 		return ret;
9000 
9001 	filp->private_data = inode->i_private;
9002 	return 0;
9003 }
9004 
tracing_release_options(struct inode * inode,struct file * file)9005 static int tracing_release_options(struct inode *inode, struct file *file)
9006 {
9007 	struct trace_option_dentry *topt = file->private_data;
9008 
9009 	trace_array_put(topt->tr);
9010 	return 0;
9011 }
9012 
9013 static const struct file_operations trace_options_fops = {
9014 	.open = tracing_open_options,
9015 	.read = trace_options_read,
9016 	.write = trace_options_write,
9017 	.llseek	= generic_file_llseek,
9018 	.release = tracing_release_options,
9019 };
9020 
9021 /*
9022  * In order to pass in both the trace_array descriptor as well as the index
9023  * to the flag that the trace option file represents, the trace_array
9024  * has a character array of trace_flags_index[], which holds the index
9025  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
9026  * The address of this character array is passed to the flag option file
9027  * read/write callbacks.
9028  *
9029  * In order to extract both the index and the trace_array descriptor,
9030  * get_tr_index() uses the following algorithm.
9031  *
9032  *   idx = *ptr;
9033  *
9034  * As the pointer itself contains the address of the index (remember
9035  * index[1] == 1).
9036  *
9037  * Then to get the trace_array descriptor, by subtracting that index
9038  * from the ptr, we get to the start of the index itself.
9039  *
9040  *   ptr - idx == &index[0]
9041  *
9042  * Then a simple container_of() from that pointer gets us to the
9043  * trace_array descriptor.
9044  */
get_tr_index(void * data,struct trace_array ** ptr,unsigned int * pindex)9045 static void get_tr_index(void *data, struct trace_array **ptr,
9046 			 unsigned int *pindex)
9047 {
9048 	*pindex = *(unsigned char *)data;
9049 
9050 	*ptr = container_of(data - *pindex, struct trace_array,
9051 			    trace_flags_index);
9052 }
9053 
9054 static ssize_t
trace_options_core_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)9055 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
9056 			loff_t *ppos)
9057 {
9058 	void *tr_index = filp->private_data;
9059 	struct trace_array *tr;
9060 	unsigned int index;
9061 	char *buf;
9062 
9063 	get_tr_index(tr_index, &tr, &index);
9064 
9065 	if (tr->trace_flags & (1 << index))
9066 		buf = "1\n";
9067 	else
9068 		buf = "0\n";
9069 
9070 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
9071 }
9072 
9073 static ssize_t
trace_options_core_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)9074 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
9075 			 loff_t *ppos)
9076 {
9077 	void *tr_index = filp->private_data;
9078 	struct trace_array *tr;
9079 	unsigned int index;
9080 	unsigned long val;
9081 	int ret;
9082 
9083 	get_tr_index(tr_index, &tr, &index);
9084 
9085 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9086 	if (ret)
9087 		return ret;
9088 
9089 	if (val != 0 && val != 1)
9090 		return -EINVAL;
9091 
9092 	mutex_lock(&event_mutex);
9093 	mutex_lock(&trace_types_lock);
9094 	ret = set_tracer_flag(tr, 1 << index, val);
9095 	mutex_unlock(&trace_types_lock);
9096 	mutex_unlock(&event_mutex);
9097 
9098 	if (ret < 0)
9099 		return ret;
9100 
9101 	*ppos += cnt;
9102 
9103 	return cnt;
9104 }
9105 
9106 static const struct file_operations trace_options_core_fops = {
9107 	.open = tracing_open_generic,
9108 	.read = trace_options_core_read,
9109 	.write = trace_options_core_write,
9110 	.llseek = generic_file_llseek,
9111 };
9112 
trace_create_file(const char * name,umode_t mode,struct dentry * parent,void * data,const struct file_operations * fops)9113 struct dentry *trace_create_file(const char *name,
9114 				 umode_t mode,
9115 				 struct dentry *parent,
9116 				 void *data,
9117 				 const struct file_operations *fops)
9118 {
9119 	struct dentry *ret;
9120 
9121 	ret = tracefs_create_file(name, mode, parent, data, fops);
9122 	if (!ret)
9123 		pr_warn("Could not create tracefs '%s' entry\n", name);
9124 
9125 	return ret;
9126 }
9127 
9128 
trace_options_init_dentry(struct trace_array * tr)9129 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9130 {
9131 	struct dentry *d_tracer;
9132 
9133 	if (tr->options)
9134 		return tr->options;
9135 
9136 	d_tracer = tracing_get_dentry(tr);
9137 	if (IS_ERR(d_tracer))
9138 		return NULL;
9139 
9140 	tr->options = tracefs_create_dir("options", d_tracer);
9141 	if (!tr->options) {
9142 		pr_warn("Could not create tracefs directory 'options'\n");
9143 		return NULL;
9144 	}
9145 
9146 	return tr->options;
9147 }
9148 
9149 static void
create_trace_option_file(struct trace_array * tr,struct trace_option_dentry * topt,struct tracer_flags * flags,struct tracer_opt * opt)9150 create_trace_option_file(struct trace_array *tr,
9151 			 struct trace_option_dentry *topt,
9152 			 struct tracer_flags *flags,
9153 			 struct tracer_opt *opt)
9154 {
9155 	struct dentry *t_options;
9156 
9157 	t_options = trace_options_init_dentry(tr);
9158 	if (!t_options)
9159 		return;
9160 
9161 	topt->flags = flags;
9162 	topt->opt = opt;
9163 	topt->tr = tr;
9164 
9165 	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9166 					t_options, topt, &trace_options_fops);
9167 
9168 }
9169 
9170 static void
create_trace_option_files(struct trace_array * tr,struct tracer * tracer)9171 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9172 {
9173 	struct trace_option_dentry *topts;
9174 	struct trace_options *tr_topts;
9175 	struct tracer_flags *flags;
9176 	struct tracer_opt *opts;
9177 	int cnt;
9178 	int i;
9179 
9180 	if (!tracer)
9181 		return;
9182 
9183 	flags = tracer->flags;
9184 
9185 	if (!flags || !flags->opts)
9186 		return;
9187 
9188 	/*
9189 	 * If this is an instance, only create flags for tracers
9190 	 * the instance may have.
9191 	 */
9192 	if (!trace_ok_for_array(tracer, tr))
9193 		return;
9194 
9195 	for (i = 0; i < tr->nr_topts; i++) {
9196 		/* Make sure there's no duplicate flags. */
9197 		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9198 			return;
9199 	}
9200 
9201 	opts = flags->opts;
9202 
9203 	for (cnt = 0; opts[cnt].name; cnt++)
9204 		;
9205 
9206 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9207 	if (!topts)
9208 		return;
9209 
9210 	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9211 			    GFP_KERNEL);
9212 	if (!tr_topts) {
9213 		kfree(topts);
9214 		return;
9215 	}
9216 
9217 	tr->topts = tr_topts;
9218 	tr->topts[tr->nr_topts].tracer = tracer;
9219 	tr->topts[tr->nr_topts].topts = topts;
9220 	tr->nr_topts++;
9221 
9222 	for (cnt = 0; opts[cnt].name; cnt++) {
9223 		create_trace_option_file(tr, &topts[cnt], flags,
9224 					 &opts[cnt]);
9225 		MEM_FAIL(topts[cnt].entry == NULL,
9226 			  "Failed to create trace option: %s",
9227 			  opts[cnt].name);
9228 	}
9229 }
9230 
9231 static struct dentry *
create_trace_option_core_file(struct trace_array * tr,const char * option,long index)9232 create_trace_option_core_file(struct trace_array *tr,
9233 			      const char *option, long index)
9234 {
9235 	struct dentry *t_options;
9236 
9237 	t_options = trace_options_init_dentry(tr);
9238 	if (!t_options)
9239 		return NULL;
9240 
9241 	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9242 				 (void *)&tr->trace_flags_index[index],
9243 				 &trace_options_core_fops);
9244 }
9245 
create_trace_options_dir(struct trace_array * tr)9246 static void create_trace_options_dir(struct trace_array *tr)
9247 {
9248 	struct dentry *t_options;
9249 	bool top_level = tr == &global_trace;
9250 	int i;
9251 
9252 	t_options = trace_options_init_dentry(tr);
9253 	if (!t_options)
9254 		return;
9255 
9256 	for (i = 0; trace_options[i]; i++) {
9257 		if (top_level ||
9258 		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9259 			create_trace_option_core_file(tr, trace_options[i], i);
9260 	}
9261 }
9262 
9263 static ssize_t
rb_simple_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)9264 rb_simple_read(struct file *filp, char __user *ubuf,
9265 	       size_t cnt, loff_t *ppos)
9266 {
9267 	struct trace_array *tr = filp->private_data;
9268 	char buf[64];
9269 	int r;
9270 
9271 	r = tracer_tracing_is_on(tr);
9272 	r = sprintf(buf, "%d\n", r);
9273 
9274 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9275 }
9276 
9277 static ssize_t
rb_simple_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)9278 rb_simple_write(struct file *filp, const char __user *ubuf,
9279 		size_t cnt, loff_t *ppos)
9280 {
9281 	struct trace_array *tr = filp->private_data;
9282 	struct trace_buffer *buffer = tr->array_buffer.buffer;
9283 	unsigned long val;
9284 	int ret;
9285 
9286 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9287 	if (ret)
9288 		return ret;
9289 
9290 	if (buffer) {
9291 		mutex_lock(&trace_types_lock);
9292 		if (!!val == tracer_tracing_is_on(tr)) {
9293 			val = 0; /* do nothing */
9294 		} else if (val) {
9295 			tracer_tracing_on(tr);
9296 			if (tr->current_trace->start)
9297 				tr->current_trace->start(tr);
9298 		} else {
9299 			tracer_tracing_off(tr);
9300 			if (tr->current_trace->stop)
9301 				tr->current_trace->stop(tr);
9302 			/* Wake up any waiters */
9303 			ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9304 		}
9305 		mutex_unlock(&trace_types_lock);
9306 	}
9307 
9308 	(*ppos)++;
9309 
9310 	return cnt;
9311 }
9312 
9313 static const struct file_operations rb_simple_fops = {
9314 	.open		= tracing_open_generic_tr,
9315 	.read		= rb_simple_read,
9316 	.write		= rb_simple_write,
9317 	.release	= tracing_release_generic_tr,
9318 	.llseek		= default_llseek,
9319 };
9320 
9321 static ssize_t
buffer_percent_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)9322 buffer_percent_read(struct file *filp, char __user *ubuf,
9323 		    size_t cnt, loff_t *ppos)
9324 {
9325 	struct trace_array *tr = filp->private_data;
9326 	char buf[64];
9327 	int r;
9328 
9329 	r = tr->buffer_percent;
9330 	r = sprintf(buf, "%d\n", r);
9331 
9332 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9333 }
9334 
9335 static ssize_t
buffer_percent_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)9336 buffer_percent_write(struct file *filp, const char __user *ubuf,
9337 		     size_t cnt, loff_t *ppos)
9338 {
9339 	struct trace_array *tr = filp->private_data;
9340 	unsigned long val;
9341 	int ret;
9342 
9343 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9344 	if (ret)
9345 		return ret;
9346 
9347 	if (val > 100)
9348 		return -EINVAL;
9349 
9350 	tr->buffer_percent = val;
9351 
9352 	(*ppos)++;
9353 
9354 	return cnt;
9355 }
9356 
9357 static const struct file_operations buffer_percent_fops = {
9358 	.open		= tracing_open_generic_tr,
9359 	.read		= buffer_percent_read,
9360 	.write		= buffer_percent_write,
9361 	.release	= tracing_release_generic_tr,
9362 	.llseek		= default_llseek,
9363 };
9364 
9365 static struct dentry *trace_instance_dir;
9366 
9367 static void
9368 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9369 
9370 static int
allocate_trace_buffer(struct trace_array * tr,struct array_buffer * buf,int size)9371 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9372 {
9373 	enum ring_buffer_flags rb_flags;
9374 
9375 	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9376 
9377 	buf->tr = tr;
9378 
9379 	buf->buffer = ring_buffer_alloc(size, rb_flags);
9380 	if (!buf->buffer)
9381 		return -ENOMEM;
9382 
9383 	buf->data = alloc_percpu(struct trace_array_cpu);
9384 	if (!buf->data) {
9385 		ring_buffer_free(buf->buffer);
9386 		buf->buffer = NULL;
9387 		return -ENOMEM;
9388 	}
9389 
9390 	/* Allocate the first page for all buffers */
9391 	set_buffer_entries(&tr->array_buffer,
9392 			   ring_buffer_size(tr->array_buffer.buffer, 0));
9393 
9394 	return 0;
9395 }
9396 
free_trace_buffer(struct array_buffer * buf)9397 static void free_trace_buffer(struct array_buffer *buf)
9398 {
9399 	if (buf->buffer) {
9400 		ring_buffer_free(buf->buffer);
9401 		buf->buffer = NULL;
9402 		free_percpu(buf->data);
9403 		buf->data = NULL;
9404 	}
9405 }
9406 
allocate_trace_buffers(struct trace_array * tr,int size)9407 static int allocate_trace_buffers(struct trace_array *tr, int size)
9408 {
9409 	int ret;
9410 
9411 	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9412 	if (ret)
9413 		return ret;
9414 
9415 #ifdef CONFIG_TRACER_MAX_TRACE
9416 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
9417 				    allocate_snapshot ? size : 1);
9418 	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9419 		free_trace_buffer(&tr->array_buffer);
9420 		return -ENOMEM;
9421 	}
9422 	tr->allocated_snapshot = allocate_snapshot;
9423 
9424 	allocate_snapshot = false;
9425 #endif
9426 
9427 	return 0;
9428 }
9429 
free_trace_buffers(struct trace_array * tr)9430 static void free_trace_buffers(struct trace_array *tr)
9431 {
9432 	if (!tr)
9433 		return;
9434 
9435 	free_trace_buffer(&tr->array_buffer);
9436 
9437 #ifdef CONFIG_TRACER_MAX_TRACE
9438 	free_trace_buffer(&tr->max_buffer);
9439 #endif
9440 }
9441 
init_trace_flags_index(struct trace_array * tr)9442 static void init_trace_flags_index(struct trace_array *tr)
9443 {
9444 	int i;
9445 
9446 	/* Used by the trace options files */
9447 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9448 		tr->trace_flags_index[i] = i;
9449 }
9450 
__update_tracer_options(struct trace_array * tr)9451 static void __update_tracer_options(struct trace_array *tr)
9452 {
9453 	struct tracer *t;
9454 
9455 	for (t = trace_types; t; t = t->next)
9456 		add_tracer_options(tr, t);
9457 }
9458 
update_tracer_options(struct trace_array * tr)9459 static void update_tracer_options(struct trace_array *tr)
9460 {
9461 	mutex_lock(&trace_types_lock);
9462 	tracer_options_updated = true;
9463 	__update_tracer_options(tr);
9464 	mutex_unlock(&trace_types_lock);
9465 }
9466 
9467 /* Must have trace_types_lock held */
trace_array_find(const char * instance)9468 struct trace_array *trace_array_find(const char *instance)
9469 {
9470 	struct trace_array *tr, *found = NULL;
9471 
9472 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9473 		if (tr->name && strcmp(tr->name, instance) == 0) {
9474 			found = tr;
9475 			break;
9476 		}
9477 	}
9478 
9479 	return found;
9480 }
9481 
trace_array_find_get(const char * instance)9482 struct trace_array *trace_array_find_get(const char *instance)
9483 {
9484 	struct trace_array *tr;
9485 
9486 	mutex_lock(&trace_types_lock);
9487 	tr = trace_array_find(instance);
9488 	if (tr)
9489 		tr->ref++;
9490 	mutex_unlock(&trace_types_lock);
9491 
9492 	return tr;
9493 }
9494 
trace_array_create_dir(struct trace_array * tr)9495 static int trace_array_create_dir(struct trace_array *tr)
9496 {
9497 	int ret;
9498 
9499 	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9500 	if (!tr->dir)
9501 		return -EINVAL;
9502 
9503 	ret = event_trace_add_tracer(tr->dir, tr);
9504 	if (ret) {
9505 		tracefs_remove(tr->dir);
9506 		return ret;
9507 	}
9508 
9509 	init_tracer_tracefs(tr, tr->dir);
9510 	__update_tracer_options(tr);
9511 
9512 	return ret;
9513 }
9514 
trace_array_create(const char * name)9515 static struct trace_array *trace_array_create(const char *name)
9516 {
9517 	struct trace_array *tr;
9518 	int ret;
9519 
9520 	ret = -ENOMEM;
9521 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9522 	if (!tr)
9523 		return ERR_PTR(ret);
9524 
9525 	tr->name = kstrdup(name, GFP_KERNEL);
9526 	if (!tr->name)
9527 		goto out_free_tr;
9528 
9529 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9530 		goto out_free_tr;
9531 
9532 	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9533 		goto out_free_tr;
9534 
9535 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9536 
9537 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9538 
9539 	raw_spin_lock_init(&tr->start_lock);
9540 
9541 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9542 
9543 	tr->current_trace = &nop_trace;
9544 
9545 	INIT_LIST_HEAD(&tr->systems);
9546 	INIT_LIST_HEAD(&tr->events);
9547 	INIT_LIST_HEAD(&tr->hist_vars);
9548 	INIT_LIST_HEAD(&tr->err_log);
9549 
9550 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9551 		goto out_free_tr;
9552 
9553 	if (ftrace_allocate_ftrace_ops(tr) < 0)
9554 		goto out_free_tr;
9555 
9556 	ftrace_init_trace_array(tr);
9557 
9558 	init_trace_flags_index(tr);
9559 
9560 	if (trace_instance_dir) {
9561 		ret = trace_array_create_dir(tr);
9562 		if (ret)
9563 			goto out_free_tr;
9564 	} else
9565 		__trace_early_add_events(tr);
9566 
9567 	list_add(&tr->list, &ftrace_trace_arrays);
9568 
9569 	tr->ref++;
9570 
9571 	return tr;
9572 
9573  out_free_tr:
9574 	ftrace_free_ftrace_ops(tr);
9575 	free_trace_buffers(tr);
9576 	free_cpumask_var(tr->pipe_cpumask);
9577 	free_cpumask_var(tr->tracing_cpumask);
9578 	kfree(tr->name);
9579 	kfree(tr);
9580 
9581 	return ERR_PTR(ret);
9582 }
9583 
instance_mkdir(const char * name)9584 static int instance_mkdir(const char *name)
9585 {
9586 	struct trace_array *tr;
9587 	int ret;
9588 
9589 	mutex_lock(&event_mutex);
9590 	mutex_lock(&trace_types_lock);
9591 
9592 	ret = -EEXIST;
9593 	if (trace_array_find(name))
9594 		goto out_unlock;
9595 
9596 	tr = trace_array_create(name);
9597 
9598 	ret = PTR_ERR_OR_ZERO(tr);
9599 
9600 out_unlock:
9601 	mutex_unlock(&trace_types_lock);
9602 	mutex_unlock(&event_mutex);
9603 	return ret;
9604 }
9605 
9606 /**
9607  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9608  * @name: The name of the trace array to be looked up/created.
9609  *
9610  * Returns pointer to trace array with given name.
9611  * NULL, if it cannot be created.
9612  *
9613  * NOTE: This function increments the reference counter associated with the
9614  * trace array returned. This makes sure it cannot be freed while in use.
9615  * Use trace_array_put() once the trace array is no longer needed.
9616  * If the trace_array is to be freed, trace_array_destroy() needs to
9617  * be called after the trace_array_put(), or simply let user space delete
9618  * it from the tracefs instances directory. But until the
9619  * trace_array_put() is called, user space can not delete it.
9620  *
9621  */
trace_array_get_by_name(const char * name)9622 struct trace_array *trace_array_get_by_name(const char *name)
9623 {
9624 	struct trace_array *tr;
9625 
9626 	mutex_lock(&event_mutex);
9627 	mutex_lock(&trace_types_lock);
9628 
9629 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9630 		if (tr->name && strcmp(tr->name, name) == 0)
9631 			goto out_unlock;
9632 	}
9633 
9634 	tr = trace_array_create(name);
9635 
9636 	if (IS_ERR(tr))
9637 		tr = NULL;
9638 out_unlock:
9639 	if (tr)
9640 		tr->ref++;
9641 
9642 	mutex_unlock(&trace_types_lock);
9643 	mutex_unlock(&event_mutex);
9644 	return tr;
9645 }
9646 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9647 
__remove_instance(struct trace_array * tr)9648 static int __remove_instance(struct trace_array *tr)
9649 {
9650 	int i;
9651 
9652 	/* Reference counter for a newly created trace array = 1. */
9653 	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9654 		return -EBUSY;
9655 
9656 	list_del(&tr->list);
9657 
9658 	/* Disable all the flags that were enabled coming in */
9659 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9660 		if ((1 << i) & ZEROED_TRACE_FLAGS)
9661 			set_tracer_flag(tr, 1 << i, 0);
9662 	}
9663 
9664 	tracing_set_nop(tr);
9665 	clear_ftrace_function_probes(tr);
9666 	event_trace_del_tracer(tr);
9667 	ftrace_clear_pids(tr);
9668 	ftrace_destroy_function_files(tr);
9669 	tracefs_remove(tr->dir);
9670 	free_percpu(tr->last_func_repeats);
9671 	free_trace_buffers(tr);
9672 	clear_tracing_err_log(tr);
9673 
9674 	for (i = 0; i < tr->nr_topts; i++) {
9675 		kfree(tr->topts[i].topts);
9676 	}
9677 	kfree(tr->topts);
9678 
9679 	free_cpumask_var(tr->pipe_cpumask);
9680 	free_cpumask_var(tr->tracing_cpumask);
9681 	kfree(tr->name);
9682 	kfree(tr);
9683 
9684 	return 0;
9685 }
9686 
trace_array_destroy(struct trace_array * this_tr)9687 int trace_array_destroy(struct trace_array *this_tr)
9688 {
9689 	struct trace_array *tr;
9690 	int ret;
9691 
9692 	if (!this_tr)
9693 		return -EINVAL;
9694 
9695 	mutex_lock(&event_mutex);
9696 	mutex_lock(&trace_types_lock);
9697 
9698 	ret = -ENODEV;
9699 
9700 	/* Making sure trace array exists before destroying it. */
9701 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9702 		if (tr == this_tr) {
9703 			ret = __remove_instance(tr);
9704 			break;
9705 		}
9706 	}
9707 
9708 	mutex_unlock(&trace_types_lock);
9709 	mutex_unlock(&event_mutex);
9710 
9711 	return ret;
9712 }
9713 EXPORT_SYMBOL_GPL(trace_array_destroy);
9714 
instance_rmdir(const char * name)9715 static int instance_rmdir(const char *name)
9716 {
9717 	struct trace_array *tr;
9718 	int ret;
9719 
9720 	mutex_lock(&event_mutex);
9721 	mutex_lock(&trace_types_lock);
9722 
9723 	ret = -ENODEV;
9724 	tr = trace_array_find(name);
9725 	if (tr)
9726 		ret = __remove_instance(tr);
9727 
9728 	mutex_unlock(&trace_types_lock);
9729 	mutex_unlock(&event_mutex);
9730 
9731 	return ret;
9732 }
9733 
create_trace_instances(struct dentry * d_tracer)9734 static __init void create_trace_instances(struct dentry *d_tracer)
9735 {
9736 	struct trace_array *tr;
9737 
9738 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9739 							 instance_mkdir,
9740 							 instance_rmdir);
9741 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9742 		return;
9743 
9744 	mutex_lock(&event_mutex);
9745 	mutex_lock(&trace_types_lock);
9746 
9747 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9748 		if (!tr->name)
9749 			continue;
9750 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9751 			     "Failed to create instance directory\n"))
9752 			break;
9753 	}
9754 
9755 	mutex_unlock(&trace_types_lock);
9756 	mutex_unlock(&event_mutex);
9757 }
9758 
9759 static void
init_tracer_tracefs(struct trace_array * tr,struct dentry * d_tracer)9760 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9761 {
9762 	struct trace_event_file *file;
9763 	int cpu;
9764 
9765 	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9766 			tr, &show_traces_fops);
9767 
9768 	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9769 			tr, &set_tracer_fops);
9770 
9771 	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9772 			  tr, &tracing_cpumask_fops);
9773 
9774 	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9775 			  tr, &tracing_iter_fops);
9776 
9777 	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9778 			  tr, &tracing_fops);
9779 
9780 	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9781 			  tr, &tracing_pipe_fops);
9782 
9783 	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9784 			  tr, &tracing_entries_fops);
9785 
9786 	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9787 			  tr, &tracing_total_entries_fops);
9788 
9789 	trace_create_file("free_buffer", 0200, d_tracer,
9790 			  tr, &tracing_free_buffer_fops);
9791 
9792 	trace_create_file("trace_marker", 0220, d_tracer,
9793 			  tr, &tracing_mark_fops);
9794 
9795 	file = __find_event_file(tr, "ftrace", "print");
9796 	if (file && file->ef)
9797 		eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
9798 				  file, &event_trigger_fops);
9799 	tr->trace_marker_file = file;
9800 
9801 	trace_create_file("trace_marker_raw", 0220, d_tracer,
9802 			  tr, &tracing_mark_raw_fops);
9803 
9804 	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9805 			  &trace_clock_fops);
9806 
9807 	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9808 			  tr, &rb_simple_fops);
9809 
9810 	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9811 			  &trace_time_stamp_mode_fops);
9812 
9813 	tr->buffer_percent = 50;
9814 
9815 	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9816 			tr, &buffer_percent_fops);
9817 
9818 	create_trace_options_dir(tr);
9819 
9820 #ifdef CONFIG_TRACER_MAX_TRACE
9821 	trace_create_maxlat_file(tr, d_tracer);
9822 #endif
9823 
9824 	if (ftrace_create_function_files(tr, d_tracer))
9825 		MEM_FAIL(1, "Could not allocate function filter files");
9826 
9827 #ifdef CONFIG_TRACER_SNAPSHOT
9828 	trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9829 			  tr, &snapshot_fops);
9830 #endif
9831 
9832 	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9833 			  tr, &tracing_err_log_fops);
9834 
9835 	for_each_tracing_cpu(cpu)
9836 		tracing_init_tracefs_percpu(tr, cpu);
9837 
9838 	ftrace_init_tracefs(tr, d_tracer);
9839 }
9840 
trace_automount(struct dentry * mntpt,void * ingore)9841 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9842 {
9843 	struct vfsmount *mnt;
9844 	struct file_system_type *type;
9845 
9846 	/*
9847 	 * To maintain backward compatibility for tools that mount
9848 	 * debugfs to get to the tracing facility, tracefs is automatically
9849 	 * mounted to the debugfs/tracing directory.
9850 	 */
9851 	type = get_fs_type("tracefs");
9852 	if (!type)
9853 		return NULL;
9854 	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9855 	put_filesystem(type);
9856 	if (IS_ERR(mnt))
9857 		return NULL;
9858 	mntget(mnt);
9859 
9860 	return mnt;
9861 }
9862 
9863 /**
9864  * tracing_init_dentry - initialize top level trace array
9865  *
9866  * This is called when creating files or directories in the tracing
9867  * directory. It is called via fs_initcall() by any of the boot up code
9868  * and expects to return the dentry of the top level tracing directory.
9869  */
tracing_init_dentry(void)9870 int tracing_init_dentry(void)
9871 {
9872 	struct trace_array *tr = &global_trace;
9873 
9874 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9875 		pr_warn("Tracing disabled due to lockdown\n");
9876 		return -EPERM;
9877 	}
9878 
9879 	/* The top level trace array uses  NULL as parent */
9880 	if (tr->dir)
9881 		return 0;
9882 
9883 	if (WARN_ON(!tracefs_initialized()))
9884 		return -ENODEV;
9885 
9886 	/*
9887 	 * As there may still be users that expect the tracing
9888 	 * files to exist in debugfs/tracing, we must automount
9889 	 * the tracefs file system there, so older tools still
9890 	 * work with the newer kernel.
9891 	 */
9892 	tr->dir = debugfs_create_automount("tracing", NULL,
9893 					   trace_automount, NULL);
9894 
9895 	return 0;
9896 }
9897 
9898 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9899 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9900 
9901 static struct workqueue_struct *eval_map_wq __initdata;
9902 static struct work_struct eval_map_work __initdata;
9903 static struct work_struct tracerfs_init_work __initdata;
9904 
eval_map_work_func(struct work_struct * work)9905 static void __init eval_map_work_func(struct work_struct *work)
9906 {
9907 	int len;
9908 
9909 	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9910 	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9911 }
9912 
trace_eval_init(void)9913 static int __init trace_eval_init(void)
9914 {
9915 	INIT_WORK(&eval_map_work, eval_map_work_func);
9916 
9917 	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9918 	if (!eval_map_wq) {
9919 		pr_err("Unable to allocate eval_map_wq\n");
9920 		/* Do work here */
9921 		eval_map_work_func(&eval_map_work);
9922 		return -ENOMEM;
9923 	}
9924 
9925 	queue_work(eval_map_wq, &eval_map_work);
9926 	return 0;
9927 }
9928 
9929 subsys_initcall(trace_eval_init);
9930 
trace_eval_sync(void)9931 static int __init trace_eval_sync(void)
9932 {
9933 	/* Make sure the eval map updates are finished */
9934 	if (eval_map_wq)
9935 		destroy_workqueue(eval_map_wq);
9936 	return 0;
9937 }
9938 
9939 late_initcall_sync(trace_eval_sync);
9940 
9941 
9942 #ifdef CONFIG_MODULES
trace_module_add_evals(struct module * mod)9943 static void trace_module_add_evals(struct module *mod)
9944 {
9945 	if (!mod->num_trace_evals)
9946 		return;
9947 
9948 	/*
9949 	 * Modules with bad taint do not have events created, do
9950 	 * not bother with enums either.
9951 	 */
9952 	if (trace_module_has_bad_taint(mod))
9953 		return;
9954 
9955 	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9956 }
9957 
9958 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
trace_module_remove_evals(struct module * mod)9959 static void trace_module_remove_evals(struct module *mod)
9960 {
9961 	union trace_eval_map_item *map;
9962 	union trace_eval_map_item **last = &trace_eval_maps;
9963 
9964 	if (!mod->num_trace_evals)
9965 		return;
9966 
9967 	mutex_lock(&trace_eval_mutex);
9968 
9969 	map = trace_eval_maps;
9970 
9971 	while (map) {
9972 		if (map->head.mod == mod)
9973 			break;
9974 		map = trace_eval_jmp_to_tail(map);
9975 		last = &map->tail.next;
9976 		map = map->tail.next;
9977 	}
9978 	if (!map)
9979 		goto out;
9980 
9981 	*last = trace_eval_jmp_to_tail(map)->tail.next;
9982 	kfree(map);
9983  out:
9984 	mutex_unlock(&trace_eval_mutex);
9985 }
9986 #else
trace_module_remove_evals(struct module * mod)9987 static inline void trace_module_remove_evals(struct module *mod) { }
9988 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9989 
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)9990 static int trace_module_notify(struct notifier_block *self,
9991 			       unsigned long val, void *data)
9992 {
9993 	struct module *mod = data;
9994 
9995 	switch (val) {
9996 	case MODULE_STATE_COMING:
9997 		trace_module_add_evals(mod);
9998 		break;
9999 	case MODULE_STATE_GOING:
10000 		trace_module_remove_evals(mod);
10001 		break;
10002 	}
10003 
10004 	return NOTIFY_OK;
10005 }
10006 
10007 static struct notifier_block trace_module_nb = {
10008 	.notifier_call = trace_module_notify,
10009 	.priority = 0,
10010 };
10011 #endif /* CONFIG_MODULES */
10012 
tracer_init_tracefs_work_func(struct work_struct * work)10013 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
10014 {
10015 
10016 	event_trace_init();
10017 
10018 	init_tracer_tracefs(&global_trace, NULL);
10019 	ftrace_init_tracefs_toplevel(&global_trace, NULL);
10020 
10021 	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
10022 			&global_trace, &tracing_thresh_fops);
10023 
10024 	trace_create_file("README", TRACE_MODE_READ, NULL,
10025 			NULL, &tracing_readme_fops);
10026 
10027 	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10028 			NULL, &tracing_saved_cmdlines_fops);
10029 
10030 	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10031 			  NULL, &tracing_saved_cmdlines_size_fops);
10032 
10033 	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10034 			NULL, &tracing_saved_tgids_fops);
10035 
10036 	trace_create_eval_file(NULL);
10037 
10038 #ifdef CONFIG_MODULES
10039 	register_module_notifier(&trace_module_nb);
10040 #endif
10041 
10042 #ifdef CONFIG_DYNAMIC_FTRACE
10043 	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10044 			NULL, &tracing_dyn_info_fops);
10045 #endif
10046 
10047 	create_trace_instances(NULL);
10048 
10049 	update_tracer_options(&global_trace);
10050 }
10051 
tracer_init_tracefs(void)10052 static __init int tracer_init_tracefs(void)
10053 {
10054 	int ret;
10055 
10056 	trace_access_lock_init();
10057 
10058 	ret = tracing_init_dentry();
10059 	if (ret)
10060 		return 0;
10061 
10062 	if (eval_map_wq) {
10063 		INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10064 		queue_work(eval_map_wq, &tracerfs_init_work);
10065 	} else {
10066 		tracer_init_tracefs_work_func(NULL);
10067 	}
10068 
10069 	rv_init_interface();
10070 
10071 	return 0;
10072 }
10073 
10074 fs_initcall(tracer_init_tracefs);
10075 
10076 static int trace_die_panic_handler(struct notifier_block *self,
10077 				unsigned long ev, void *unused);
10078 
10079 static struct notifier_block trace_panic_notifier = {
10080 	.notifier_call = trace_die_panic_handler,
10081 	.priority = INT_MAX - 1,
10082 };
10083 
10084 static struct notifier_block trace_die_notifier = {
10085 	.notifier_call = trace_die_panic_handler,
10086 	.priority = INT_MAX - 1,
10087 };
10088 
10089 /*
10090  * The idea is to execute the following die/panic callback early, in order
10091  * to avoid showing irrelevant information in the trace (like other panic
10092  * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10093  * warnings get disabled (to prevent potential log flooding).
10094  */
trace_die_panic_handler(struct notifier_block * self,unsigned long ev,void * unused)10095 static int trace_die_panic_handler(struct notifier_block *self,
10096 				unsigned long ev, void *unused)
10097 {
10098 	if (!ftrace_dump_on_oops)
10099 		return NOTIFY_DONE;
10100 
10101 	/* The die notifier requires DIE_OOPS to trigger */
10102 	if (self == &trace_die_notifier && ev != DIE_OOPS)
10103 		return NOTIFY_DONE;
10104 
10105 	ftrace_dump(ftrace_dump_on_oops);
10106 
10107 	return NOTIFY_DONE;
10108 }
10109 
10110 /*
10111  * printk is set to max of 1024, we really don't need it that big.
10112  * Nothing should be printing 1000 characters anyway.
10113  */
10114 #define TRACE_MAX_PRINT		1000
10115 
10116 /*
10117  * Define here KERN_TRACE so that we have one place to modify
10118  * it if we decide to change what log level the ftrace dump
10119  * should be at.
10120  */
10121 #define KERN_TRACE		KERN_EMERG
10122 
10123 void
trace_printk_seq(struct trace_seq * s)10124 trace_printk_seq(struct trace_seq *s)
10125 {
10126 	/* Probably should print a warning here. */
10127 	if (s->seq.len >= TRACE_MAX_PRINT)
10128 		s->seq.len = TRACE_MAX_PRINT;
10129 
10130 	/*
10131 	 * More paranoid code. Although the buffer size is set to
10132 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10133 	 * an extra layer of protection.
10134 	 */
10135 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10136 		s->seq.len = s->seq.size - 1;
10137 
10138 	/* should be zero ended, but we are paranoid. */
10139 	s->buffer[s->seq.len] = 0;
10140 
10141 	printk(KERN_TRACE "%s", s->buffer);
10142 
10143 	trace_seq_init(s);
10144 }
10145 
trace_init_global_iter(struct trace_iterator * iter)10146 void trace_init_global_iter(struct trace_iterator *iter)
10147 {
10148 	iter->tr = &global_trace;
10149 	iter->trace = iter->tr->current_trace;
10150 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
10151 	iter->array_buffer = &global_trace.array_buffer;
10152 
10153 	if (iter->trace && iter->trace->open)
10154 		iter->trace->open(iter);
10155 
10156 	/* Annotate start of buffers if we had overruns */
10157 	if (ring_buffer_overruns(iter->array_buffer->buffer))
10158 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
10159 
10160 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
10161 	if (trace_clocks[iter->tr->clock_id].in_ns)
10162 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10163 
10164 	/* Can not use kmalloc for iter.temp and iter.fmt */
10165 	iter->temp = static_temp_buf;
10166 	iter->temp_size = STATIC_TEMP_BUF_SIZE;
10167 	iter->fmt = static_fmt_buf;
10168 	iter->fmt_size = STATIC_FMT_BUF_SIZE;
10169 }
10170 
ftrace_dump(enum ftrace_dump_mode oops_dump_mode)10171 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10172 {
10173 	/* use static because iter can be a bit big for the stack */
10174 	static struct trace_iterator iter;
10175 	static atomic_t dump_running;
10176 	struct trace_array *tr = &global_trace;
10177 	unsigned int old_userobj;
10178 	unsigned long flags;
10179 	int cnt = 0, cpu;
10180 
10181 	/* Only allow one dump user at a time. */
10182 	if (atomic_inc_return(&dump_running) != 1) {
10183 		atomic_dec(&dump_running);
10184 		return;
10185 	}
10186 
10187 	/*
10188 	 * Always turn off tracing when we dump.
10189 	 * We don't need to show trace output of what happens
10190 	 * between multiple crashes.
10191 	 *
10192 	 * If the user does a sysrq-z, then they can re-enable
10193 	 * tracing with echo 1 > tracing_on.
10194 	 */
10195 	tracing_off();
10196 
10197 	local_irq_save(flags);
10198 
10199 	/* Simulate the iterator */
10200 	trace_init_global_iter(&iter);
10201 
10202 	for_each_tracing_cpu(cpu) {
10203 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10204 	}
10205 
10206 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10207 
10208 	/* don't look at user memory in panic mode */
10209 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10210 
10211 	switch (oops_dump_mode) {
10212 	case DUMP_ALL:
10213 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10214 		break;
10215 	case DUMP_ORIG:
10216 		iter.cpu_file = raw_smp_processor_id();
10217 		break;
10218 	case DUMP_NONE:
10219 		goto out_enable;
10220 	default:
10221 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10222 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10223 	}
10224 
10225 	printk(KERN_TRACE "Dumping ftrace buffer:\n");
10226 
10227 	/* Did function tracer already get disabled? */
10228 	if (ftrace_is_dead()) {
10229 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10230 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
10231 	}
10232 
10233 	/*
10234 	 * We need to stop all tracing on all CPUS to read
10235 	 * the next buffer. This is a bit expensive, but is
10236 	 * not done often. We fill all what we can read,
10237 	 * and then release the locks again.
10238 	 */
10239 
10240 	while (!trace_empty(&iter)) {
10241 
10242 		if (!cnt)
10243 			printk(KERN_TRACE "---------------------------------\n");
10244 
10245 		cnt++;
10246 
10247 		trace_iterator_reset(&iter);
10248 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
10249 
10250 		if (trace_find_next_entry_inc(&iter) != NULL) {
10251 			int ret;
10252 
10253 			ret = print_trace_line(&iter);
10254 			if (ret != TRACE_TYPE_NO_CONSUME)
10255 				trace_consume(&iter);
10256 		}
10257 		touch_nmi_watchdog();
10258 
10259 		trace_printk_seq(&iter.seq);
10260 	}
10261 
10262 	if (!cnt)
10263 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
10264 	else
10265 		printk(KERN_TRACE "---------------------------------\n");
10266 
10267  out_enable:
10268 	tr->trace_flags |= old_userobj;
10269 
10270 	for_each_tracing_cpu(cpu) {
10271 		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10272 	}
10273 	atomic_dec(&dump_running);
10274 	local_irq_restore(flags);
10275 }
10276 EXPORT_SYMBOL_GPL(ftrace_dump);
10277 
10278 #define WRITE_BUFSIZE  4096
10279 
trace_parse_run_command(struct file * file,const char __user * buffer,size_t count,loff_t * ppos,int (* createfn)(const char *))10280 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10281 				size_t count, loff_t *ppos,
10282 				int (*createfn)(const char *))
10283 {
10284 	char *kbuf, *buf, *tmp;
10285 	int ret = 0;
10286 	size_t done = 0;
10287 	size_t size;
10288 
10289 	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10290 	if (!kbuf)
10291 		return -ENOMEM;
10292 
10293 	while (done < count) {
10294 		size = count - done;
10295 
10296 		if (size >= WRITE_BUFSIZE)
10297 			size = WRITE_BUFSIZE - 1;
10298 
10299 		if (copy_from_user(kbuf, buffer + done, size)) {
10300 			ret = -EFAULT;
10301 			goto out;
10302 		}
10303 		kbuf[size] = '\0';
10304 		buf = kbuf;
10305 		do {
10306 			tmp = strchr(buf, '\n');
10307 			if (tmp) {
10308 				*tmp = '\0';
10309 				size = tmp - buf + 1;
10310 			} else {
10311 				size = strlen(buf);
10312 				if (done + size < count) {
10313 					if (buf != kbuf)
10314 						break;
10315 					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10316 					pr_warn("Line length is too long: Should be less than %d\n",
10317 						WRITE_BUFSIZE - 2);
10318 					ret = -EINVAL;
10319 					goto out;
10320 				}
10321 			}
10322 			done += size;
10323 
10324 			/* Remove comments */
10325 			tmp = strchr(buf, '#');
10326 
10327 			if (tmp)
10328 				*tmp = '\0';
10329 
10330 			ret = createfn(buf);
10331 			if (ret)
10332 				goto out;
10333 			buf += size;
10334 
10335 		} while (done < count);
10336 	}
10337 	ret = done;
10338 
10339 out:
10340 	kfree(kbuf);
10341 
10342 	return ret;
10343 }
10344 
10345 #ifdef CONFIG_TRACER_MAX_TRACE
tr_needs_alloc_snapshot(const char * name)10346 __init static bool tr_needs_alloc_snapshot(const char *name)
10347 {
10348 	char *test;
10349 	int len = strlen(name);
10350 	bool ret;
10351 
10352 	if (!boot_snapshot_index)
10353 		return false;
10354 
10355 	if (strncmp(name, boot_snapshot_info, len) == 0 &&
10356 	    boot_snapshot_info[len] == '\t')
10357 		return true;
10358 
10359 	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10360 	if (!test)
10361 		return false;
10362 
10363 	sprintf(test, "\t%s\t", name);
10364 	ret = strstr(boot_snapshot_info, test) == NULL;
10365 	kfree(test);
10366 	return ret;
10367 }
10368 
do_allocate_snapshot(const char * name)10369 __init static void do_allocate_snapshot(const char *name)
10370 {
10371 	if (!tr_needs_alloc_snapshot(name))
10372 		return;
10373 
10374 	/*
10375 	 * When allocate_snapshot is set, the next call to
10376 	 * allocate_trace_buffers() (called by trace_array_get_by_name())
10377 	 * will allocate the snapshot buffer. That will alse clear
10378 	 * this flag.
10379 	 */
10380 	allocate_snapshot = true;
10381 }
10382 #else
do_allocate_snapshot(const char * name)10383 static inline void do_allocate_snapshot(const char *name) { }
10384 #endif
10385 
enable_instances(void)10386 __init static void enable_instances(void)
10387 {
10388 	struct trace_array *tr;
10389 	char *curr_str;
10390 	char *str;
10391 	char *tok;
10392 
10393 	/* A tab is always appended */
10394 	boot_instance_info[boot_instance_index - 1] = '\0';
10395 	str = boot_instance_info;
10396 
10397 	while ((curr_str = strsep(&str, "\t"))) {
10398 
10399 		tok = strsep(&curr_str, ",");
10400 
10401 		if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10402 			do_allocate_snapshot(tok);
10403 
10404 		tr = trace_array_get_by_name(tok);
10405 		if (!tr) {
10406 			pr_warn("Failed to create instance buffer %s\n", curr_str);
10407 			continue;
10408 		}
10409 		/* Allow user space to delete it */
10410 		trace_array_put(tr);
10411 
10412 		while ((tok = strsep(&curr_str, ","))) {
10413 			early_enable_events(tr, tok, true);
10414 		}
10415 	}
10416 }
10417 
tracer_alloc_buffers(void)10418 __init static int tracer_alloc_buffers(void)
10419 {
10420 	int ring_buf_size;
10421 	int ret = -ENOMEM;
10422 
10423 
10424 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10425 		pr_warn("Tracing disabled due to lockdown\n");
10426 		return -EPERM;
10427 	}
10428 
10429 	/*
10430 	 * Make sure we don't accidentally add more trace options
10431 	 * than we have bits for.
10432 	 */
10433 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10434 
10435 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10436 		goto out;
10437 
10438 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10439 		goto out_free_buffer_mask;
10440 
10441 	/* Only allocate trace_printk buffers if a trace_printk exists */
10442 	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10443 		/* Must be called before global_trace.buffer is allocated */
10444 		trace_printk_init_buffers();
10445 
10446 	/* To save memory, keep the ring buffer size to its minimum */
10447 	if (ring_buffer_expanded)
10448 		ring_buf_size = trace_buf_size;
10449 	else
10450 		ring_buf_size = 1;
10451 
10452 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10453 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10454 
10455 	raw_spin_lock_init(&global_trace.start_lock);
10456 
10457 	/*
10458 	 * The prepare callbacks allocates some memory for the ring buffer. We
10459 	 * don't free the buffer if the CPU goes down. If we were to free
10460 	 * the buffer, then the user would lose any trace that was in the
10461 	 * buffer. The memory will be removed once the "instance" is removed.
10462 	 */
10463 	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10464 				      "trace/RB:prepare", trace_rb_cpu_prepare,
10465 				      NULL);
10466 	if (ret < 0)
10467 		goto out_free_cpumask;
10468 	/* Used for event triggers */
10469 	ret = -ENOMEM;
10470 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10471 	if (!temp_buffer)
10472 		goto out_rm_hp_state;
10473 
10474 	if (trace_create_savedcmd() < 0)
10475 		goto out_free_temp_buffer;
10476 
10477 	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10478 		goto out_free_savedcmd;
10479 
10480 	/* TODO: make the number of buffers hot pluggable with CPUS */
10481 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10482 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10483 		goto out_free_pipe_cpumask;
10484 	}
10485 	if (global_trace.buffer_disabled)
10486 		tracing_off();
10487 
10488 	if (trace_boot_clock) {
10489 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10490 		if (ret < 0)
10491 			pr_warn("Trace clock %s not defined, going back to default\n",
10492 				trace_boot_clock);
10493 	}
10494 
10495 	/*
10496 	 * register_tracer() might reference current_trace, so it
10497 	 * needs to be set before we register anything. This is
10498 	 * just a bootstrap of current_trace anyway.
10499 	 */
10500 	global_trace.current_trace = &nop_trace;
10501 
10502 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10503 
10504 	ftrace_init_global_array_ops(&global_trace);
10505 
10506 	init_trace_flags_index(&global_trace);
10507 
10508 	register_tracer(&nop_trace);
10509 
10510 	/* Function tracing may start here (via kernel command line) */
10511 	init_function_trace();
10512 
10513 	/* All seems OK, enable tracing */
10514 	tracing_disabled = 0;
10515 
10516 	atomic_notifier_chain_register(&panic_notifier_list,
10517 				       &trace_panic_notifier);
10518 
10519 	register_die_notifier(&trace_die_notifier);
10520 
10521 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10522 
10523 	INIT_LIST_HEAD(&global_trace.systems);
10524 	INIT_LIST_HEAD(&global_trace.events);
10525 	INIT_LIST_HEAD(&global_trace.hist_vars);
10526 	INIT_LIST_HEAD(&global_trace.err_log);
10527 	list_add(&global_trace.list, &ftrace_trace_arrays);
10528 
10529 	apply_trace_boot_options();
10530 
10531 	register_snapshot_cmd();
10532 
10533 	test_can_verify();
10534 
10535 	return 0;
10536 
10537 out_free_pipe_cpumask:
10538 	free_cpumask_var(global_trace.pipe_cpumask);
10539 out_free_savedcmd:
10540 	free_saved_cmdlines_buffer(savedcmd);
10541 out_free_temp_buffer:
10542 	ring_buffer_free(temp_buffer);
10543 out_rm_hp_state:
10544 	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10545 out_free_cpumask:
10546 	free_cpumask_var(global_trace.tracing_cpumask);
10547 out_free_buffer_mask:
10548 	free_cpumask_var(tracing_buffer_mask);
10549 out:
10550 	return ret;
10551 }
10552 
ftrace_boot_snapshot(void)10553 void __init ftrace_boot_snapshot(void)
10554 {
10555 #ifdef CONFIG_TRACER_MAX_TRACE
10556 	struct trace_array *tr;
10557 
10558 	if (!snapshot_at_boot)
10559 		return;
10560 
10561 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10562 		if (!tr->allocated_snapshot)
10563 			continue;
10564 
10565 		tracing_snapshot_instance(tr);
10566 		trace_array_puts(tr, "** Boot snapshot taken **\n");
10567 	}
10568 #endif
10569 }
10570 
early_trace_init(void)10571 void __init early_trace_init(void)
10572 {
10573 	if (tracepoint_printk) {
10574 		tracepoint_print_iter =
10575 			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10576 		if (MEM_FAIL(!tracepoint_print_iter,
10577 			     "Failed to allocate trace iterator\n"))
10578 			tracepoint_printk = 0;
10579 		else
10580 			static_key_enable(&tracepoint_printk_key.key);
10581 	}
10582 	tracer_alloc_buffers();
10583 
10584 	init_events();
10585 }
10586 
trace_init(void)10587 void __init trace_init(void)
10588 {
10589 	trace_event_init();
10590 
10591 	if (boot_instance_index)
10592 		enable_instances();
10593 }
10594 
clear_boot_tracer(void)10595 __init static void clear_boot_tracer(void)
10596 {
10597 	/*
10598 	 * The default tracer at boot buffer is an init section.
10599 	 * This function is called in lateinit. If we did not
10600 	 * find the boot tracer, then clear it out, to prevent
10601 	 * later registration from accessing the buffer that is
10602 	 * about to be freed.
10603 	 */
10604 	if (!default_bootup_tracer)
10605 		return;
10606 
10607 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10608 	       default_bootup_tracer);
10609 	default_bootup_tracer = NULL;
10610 }
10611 
10612 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
tracing_set_default_clock(void)10613 __init static void tracing_set_default_clock(void)
10614 {
10615 	/* sched_clock_stable() is determined in late_initcall */
10616 	if (!trace_boot_clock && !sched_clock_stable()) {
10617 		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10618 			pr_warn("Can not set tracing clock due to lockdown\n");
10619 			return;
10620 		}
10621 
10622 		printk(KERN_WARNING
10623 		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10624 		       "If you want to keep using the local clock, then add:\n"
10625 		       "  \"trace_clock=local\"\n"
10626 		       "on the kernel command line\n");
10627 		tracing_set_clock(&global_trace, "global");
10628 	}
10629 }
10630 #else
tracing_set_default_clock(void)10631 static inline void tracing_set_default_clock(void) { }
10632 #endif
10633 
late_trace_init(void)10634 __init static int late_trace_init(void)
10635 {
10636 	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10637 		static_key_disable(&tracepoint_printk_key.key);
10638 		tracepoint_printk = 0;
10639 	}
10640 
10641 	tracing_set_default_clock();
10642 	clear_boot_tracer();
10643 	return 0;
10644 }
10645 
10646 late_initcall_sync(late_trace_init);
10647