1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace irqs off critical timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * From code in the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17
18 #include "trace.h"
19
20 #include <trace/events/preemptirq.h>
21
22 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
23 static struct trace_array *irqsoff_trace __read_mostly;
24 static int tracer_enabled __read_mostly;
25
26 static DEFINE_PER_CPU(int, tracing_cpu);
27
28 static DEFINE_RAW_SPINLOCK(max_trace_lock);
29
30 enum {
31 TRACER_IRQS_OFF = (1 << 1),
32 TRACER_PREEMPT_OFF = (1 << 2),
33 };
34
35 static int trace_type __read_mostly;
36
37 static int save_flags;
38
39 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
40 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
41
42 #ifdef CONFIG_PREEMPT_TRACER
43 static inline int
preempt_trace(int pc)44 preempt_trace(int pc)
45 {
46 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
47 }
48 #else
49 # define preempt_trace(pc) (0)
50 #endif
51
52 #ifdef CONFIG_IRQSOFF_TRACER
53 static inline int
irq_trace(void)54 irq_trace(void)
55 {
56 return ((trace_type & TRACER_IRQS_OFF) &&
57 irqs_disabled());
58 }
59 #else
60 # define irq_trace() (0)
61 #endif
62
63 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64 static int irqsoff_display_graph(struct trace_array *tr, int set);
65 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
66 #else
irqsoff_display_graph(struct trace_array * tr,int set)67 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
68 {
69 return -EINVAL;
70 }
71 # define is_graph(tr) false
72 #endif
73
74 /*
75 * Sequence count - we record it when starting a measurement and
76 * skip the latency if the sequence has changed - some other section
77 * did a maximum and could disturb our measurement with serial console
78 * printouts, etc. Truly coinciding maximum latencies should be rare
79 * and what happens together happens separately as well, so this doesn't
80 * decrease the validity of the maximum found:
81 */
82 static __cacheline_aligned_in_smp unsigned long max_sequence;
83
84 #ifdef CONFIG_FUNCTION_TRACER
85 /*
86 * Prologue for the preempt and irqs off function tracers.
87 *
88 * Returns 1 if it is OK to continue, and data->disabled is
89 * incremented.
90 * 0 if the trace is to be ignored, and data->disabled
91 * is kept the same.
92 *
93 * Note, this function is also used outside this ifdef but
94 * inside the #ifdef of the function graph tracer below.
95 * This is OK, since the function graph tracer is
96 * dependent on the function tracer.
97 */
func_prolog_dec(struct trace_array * tr,struct trace_array_cpu ** data,unsigned long * flags)98 static int func_prolog_dec(struct trace_array *tr,
99 struct trace_array_cpu **data,
100 unsigned long *flags)
101 {
102 long disabled;
103 int cpu;
104
105 /*
106 * Does not matter if we preempt. We test the flags
107 * afterward, to see if irqs are disabled or not.
108 * If we preempt and get a false positive, the flags
109 * test will fail.
110 */
111 cpu = raw_smp_processor_id();
112 if (likely(!per_cpu(tracing_cpu, cpu)))
113 return 0;
114
115 local_save_flags(*flags);
116 /*
117 * Slight chance to get a false positive on tracing_cpu,
118 * although I'm starting to think there isn't a chance.
119 * Leave this for now just to be paranoid.
120 */
121 if (!irqs_disabled_flags(*flags) && !preempt_count())
122 return 0;
123
124 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
125 disabled = atomic_inc_return(&(*data)->disabled);
126
127 if (likely(disabled == 1))
128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133 }
134
135 /*
136 * irqsoff uses its own tracer function to keep the overhead down:
137 */
138 static void
irqsoff_tracer_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * pt_regs)139 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
140 struct ftrace_ops *op, struct pt_regs *pt_regs)
141 {
142 struct trace_array *tr = irqsoff_trace;
143 struct trace_array_cpu *data;
144 unsigned long flags;
145
146 if (!func_prolog_dec(tr, &data, &flags))
147 return;
148
149 trace_function(tr, ip, parent_ip, flags, preempt_count());
150
151 atomic_dec(&data->disabled);
152 }
153 #endif /* CONFIG_FUNCTION_TRACER */
154
155 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
irqsoff_display_graph(struct trace_array * tr,int set)156 static int irqsoff_display_graph(struct trace_array *tr, int set)
157 {
158 int cpu;
159
160 if (!(is_graph(tr) ^ set))
161 return 0;
162
163 stop_irqsoff_tracer(irqsoff_trace, !set);
164
165 for_each_possible_cpu(cpu)
166 per_cpu(tracing_cpu, cpu) = 0;
167
168 tr->max_latency = 0;
169 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
170
171 return start_irqsoff_tracer(irqsoff_trace, set);
172 }
173
irqsoff_graph_entry(struct ftrace_graph_ent * trace)174 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
175 {
176 struct trace_array *tr = irqsoff_trace;
177 struct trace_array_cpu *data;
178 unsigned long flags;
179 int ret;
180 int pc;
181
182 if (ftrace_graph_ignore_func(trace))
183 return 0;
184 /*
185 * Do not trace a function if it's filtered by set_graph_notrace.
186 * Make the index of ret stack negative to indicate that it should
187 * ignore further functions. But it needs its own ret stack entry
188 * to recover the original index in order to continue tracing after
189 * returning from the function.
190 */
191 if (ftrace_graph_notrace_addr(trace->func))
192 return 1;
193
194 if (!func_prolog_dec(tr, &data, &flags))
195 return 0;
196
197 pc = preempt_count();
198 ret = __trace_graph_entry(tr, trace, flags, pc);
199 atomic_dec(&data->disabled);
200
201 return ret;
202 }
203
irqsoff_graph_return(struct ftrace_graph_ret * trace)204 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
205 {
206 struct trace_array *tr = irqsoff_trace;
207 struct trace_array_cpu *data;
208 unsigned long flags;
209 int pc;
210
211 if (!func_prolog_dec(tr, &data, &flags))
212 return;
213
214 pc = preempt_count();
215 __trace_graph_return(tr, trace, flags, pc);
216 atomic_dec(&data->disabled);
217 }
218
irqsoff_trace_open(struct trace_iterator * iter)219 static void irqsoff_trace_open(struct trace_iterator *iter)
220 {
221 if (is_graph(iter->tr))
222 graph_trace_open(iter);
223
224 }
225
irqsoff_trace_close(struct trace_iterator * iter)226 static void irqsoff_trace_close(struct trace_iterator *iter)
227 {
228 if (iter->private)
229 graph_trace_close(iter);
230 }
231
232 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
233 TRACE_GRAPH_PRINT_PROC | \
234 TRACE_GRAPH_PRINT_ABS_TIME | \
235 TRACE_GRAPH_PRINT_DURATION)
236
irqsoff_print_line(struct trace_iterator * iter)237 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
238 {
239 /*
240 * In graph mode call the graph tracer output function,
241 * otherwise go with the TRACE_FN event handler
242 */
243 if (is_graph(iter->tr))
244 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
245
246 return TRACE_TYPE_UNHANDLED;
247 }
248
irqsoff_print_header(struct seq_file * s)249 static void irqsoff_print_header(struct seq_file *s)
250 {
251 struct trace_array *tr = irqsoff_trace;
252
253 if (is_graph(tr))
254 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
255 else
256 trace_default_header(s);
257 }
258
259 static void
__trace_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned long flags,int pc)260 __trace_function(struct trace_array *tr,
261 unsigned long ip, unsigned long parent_ip,
262 unsigned long flags, int pc)
263 {
264 if (is_graph(tr))
265 trace_graph_function(tr, ip, parent_ip, flags, pc);
266 else
267 trace_function(tr, ip, parent_ip, flags, pc);
268 }
269
270 #else
271 #define __trace_function trace_function
272
273 #ifdef CONFIG_FUNCTION_TRACER
irqsoff_graph_entry(struct ftrace_graph_ent * trace)274 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
275 {
276 return -1;
277 }
278 #endif
279
irqsoff_print_line(struct trace_iterator * iter)280 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
281 {
282 return TRACE_TYPE_UNHANDLED;
283 }
284
irqsoff_trace_open(struct trace_iterator * iter)285 static void irqsoff_trace_open(struct trace_iterator *iter) { }
irqsoff_trace_close(struct trace_iterator * iter)286 static void irqsoff_trace_close(struct trace_iterator *iter) { }
287
288 #ifdef CONFIG_FUNCTION_TRACER
irqsoff_graph_return(struct ftrace_graph_ret * trace)289 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
irqsoff_print_header(struct seq_file * s)290 static void irqsoff_print_header(struct seq_file *s)
291 {
292 trace_default_header(s);
293 }
294 #else
irqsoff_print_header(struct seq_file * s)295 static void irqsoff_print_header(struct seq_file *s)
296 {
297 trace_latency_header(s);
298 }
299 #endif /* CONFIG_FUNCTION_TRACER */
300 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
301
302 /*
303 * Should this new latency be reported/recorded?
304 */
report_latency(struct trace_array * tr,u64 delta)305 static bool report_latency(struct trace_array *tr, u64 delta)
306 {
307 if (tracing_thresh) {
308 if (delta < tracing_thresh)
309 return false;
310 } else {
311 if (delta <= tr->max_latency)
312 return false;
313 }
314 return true;
315 }
316
317 static void
check_critical_timing(struct trace_array * tr,struct trace_array_cpu * data,unsigned long parent_ip,int cpu)318 check_critical_timing(struct trace_array *tr,
319 struct trace_array_cpu *data,
320 unsigned long parent_ip,
321 int cpu)
322 {
323 u64 T0, T1, delta;
324 unsigned long flags;
325 int pc;
326
327 T0 = data->preempt_timestamp;
328 T1 = ftrace_now(cpu);
329 delta = T1-T0;
330
331 local_save_flags(flags);
332
333 pc = preempt_count();
334
335 if (!report_latency(tr, delta))
336 goto out;
337
338 raw_spin_lock_irqsave(&max_trace_lock, flags);
339
340 /* check if we are still the max latency */
341 if (!report_latency(tr, delta))
342 goto out_unlock;
343
344 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
345 /* Skip 5 functions to get to the irq/preempt enable function */
346 __trace_stack(tr, flags, 5, pc);
347
348 if (data->critical_sequence != max_sequence)
349 goto out_unlock;
350
351 data->critical_end = parent_ip;
352
353 if (likely(!is_tracing_stopped())) {
354 tr->max_latency = delta;
355 update_max_tr_single(tr, current, cpu);
356 }
357
358 max_sequence++;
359
360 out_unlock:
361 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
362
363 out:
364 data->critical_sequence = max_sequence;
365 data->preempt_timestamp = ftrace_now(cpu);
366 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
367 }
368
369 static inline void
start_critical_timing(unsigned long ip,unsigned long parent_ip,int pc)370 start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
371 {
372 int cpu;
373 struct trace_array *tr = irqsoff_trace;
374 struct trace_array_cpu *data;
375 unsigned long flags;
376
377 if (!tracer_enabled || !tracing_is_enabled())
378 return;
379
380 cpu = raw_smp_processor_id();
381
382 if (per_cpu(tracing_cpu, cpu))
383 return;
384
385 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
386
387 if (unlikely(!data) || atomic_read(&data->disabled))
388 return;
389
390 atomic_inc(&data->disabled);
391
392 data->critical_sequence = max_sequence;
393 data->preempt_timestamp = ftrace_now(cpu);
394 data->critical_start = parent_ip ? : ip;
395
396 local_save_flags(flags);
397
398 __trace_function(tr, ip, parent_ip, flags, pc);
399
400 per_cpu(tracing_cpu, cpu) = 1;
401
402 atomic_dec(&data->disabled);
403 }
404
405 static inline void
stop_critical_timing(unsigned long ip,unsigned long parent_ip,int pc)406 stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
407 {
408 int cpu;
409 struct trace_array *tr = irqsoff_trace;
410 struct trace_array_cpu *data;
411 unsigned long flags;
412
413 cpu = raw_smp_processor_id();
414 /* Always clear the tracing cpu on stopping the trace */
415 if (unlikely(per_cpu(tracing_cpu, cpu)))
416 per_cpu(tracing_cpu, cpu) = 0;
417 else
418 return;
419
420 if (!tracer_enabled || !tracing_is_enabled())
421 return;
422
423 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
424
425 if (unlikely(!data) ||
426 !data->critical_start || atomic_read(&data->disabled))
427 return;
428
429 atomic_inc(&data->disabled);
430
431 local_save_flags(flags);
432 __trace_function(tr, ip, parent_ip, flags, pc);
433 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
434 data->critical_start = 0;
435 atomic_dec(&data->disabled);
436 }
437
438 /* start and stop critical timings used to for stoppage (in idle) */
start_critical_timings(void)439 void start_critical_timings(void)
440 {
441 int pc = preempt_count();
442
443 if (preempt_trace(pc) || irq_trace())
444 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
445 }
446 EXPORT_SYMBOL_GPL(start_critical_timings);
447
stop_critical_timings(void)448 void stop_critical_timings(void)
449 {
450 int pc = preempt_count();
451
452 if (preempt_trace(pc) || irq_trace())
453 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
454 }
455 EXPORT_SYMBOL_GPL(stop_critical_timings);
456
457 #ifdef CONFIG_FUNCTION_TRACER
458 static bool function_enabled;
459
register_irqsoff_function(struct trace_array * tr,int graph,int set)460 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
461 {
462 int ret;
463
464 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
465 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
466 return 0;
467
468 if (graph)
469 ret = register_ftrace_graph(&irqsoff_graph_return,
470 &irqsoff_graph_entry);
471 else
472 ret = register_ftrace_function(tr->ops);
473
474 if (!ret)
475 function_enabled = true;
476
477 return ret;
478 }
479
unregister_irqsoff_function(struct trace_array * tr,int graph)480 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
481 {
482 if (!function_enabled)
483 return;
484
485 if (graph)
486 unregister_ftrace_graph();
487 else
488 unregister_ftrace_function(tr->ops);
489
490 function_enabled = false;
491 }
492
irqsoff_function_set(struct trace_array * tr,u32 mask,int set)493 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
494 {
495 if (!(mask & TRACE_ITER_FUNCTION))
496 return 0;
497
498 if (set)
499 register_irqsoff_function(tr, is_graph(tr), 1);
500 else
501 unregister_irqsoff_function(tr, is_graph(tr));
502 return 1;
503 }
504 #else
register_irqsoff_function(struct trace_array * tr,int graph,int set)505 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
506 {
507 return 0;
508 }
unregister_irqsoff_function(struct trace_array * tr,int graph)509 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
irqsoff_function_set(struct trace_array * tr,u32 mask,int set)510 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
511 {
512 return 0;
513 }
514 #endif /* CONFIG_FUNCTION_TRACER */
515
irqsoff_flag_changed(struct trace_array * tr,u32 mask,int set)516 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
517 {
518 struct tracer *tracer = tr->current_trace;
519
520 if (irqsoff_function_set(tr, mask, set))
521 return 0;
522
523 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
524 if (mask & TRACE_ITER_DISPLAY_GRAPH)
525 return irqsoff_display_graph(tr, set);
526 #endif
527
528 return trace_keep_overwrite(tracer, mask, set);
529 }
530
start_irqsoff_tracer(struct trace_array * tr,int graph)531 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
532 {
533 int ret;
534
535 ret = register_irqsoff_function(tr, graph, 0);
536
537 if (!ret && tracing_is_enabled())
538 tracer_enabled = 1;
539 else
540 tracer_enabled = 0;
541
542 return ret;
543 }
544
stop_irqsoff_tracer(struct trace_array * tr,int graph)545 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
546 {
547 tracer_enabled = 0;
548
549 unregister_irqsoff_function(tr, graph);
550 }
551
552 static bool irqsoff_busy;
553
__irqsoff_tracer_init(struct trace_array * tr)554 static int __irqsoff_tracer_init(struct trace_array *tr)
555 {
556 if (irqsoff_busy)
557 return -EBUSY;
558
559 save_flags = tr->trace_flags;
560
561 /* non overwrite screws up the latency tracers */
562 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
563 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
564
565 tr->max_latency = 0;
566 irqsoff_trace = tr;
567 /* make sure that the tracer is visible */
568 smp_wmb();
569
570 ftrace_init_array_ops(tr, irqsoff_tracer_call);
571
572 /* Only toplevel instance supports graph tracing */
573 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
574 is_graph(tr))))
575 printk(KERN_ERR "failed to start irqsoff tracer\n");
576
577 irqsoff_busy = true;
578 return 0;
579 }
580
__irqsoff_tracer_reset(struct trace_array * tr)581 static void __irqsoff_tracer_reset(struct trace_array *tr)
582 {
583 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
584 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
585
586 stop_irqsoff_tracer(tr, is_graph(tr));
587
588 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
589 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
590 ftrace_reset_array_ops(tr);
591
592 irqsoff_busy = false;
593 }
594
irqsoff_tracer_start(struct trace_array * tr)595 static void irqsoff_tracer_start(struct trace_array *tr)
596 {
597 tracer_enabled = 1;
598 }
599
irqsoff_tracer_stop(struct trace_array * tr)600 static void irqsoff_tracer_stop(struct trace_array *tr)
601 {
602 tracer_enabled = 0;
603 }
604
605 #ifdef CONFIG_IRQSOFF_TRACER
606 /*
607 * We are only interested in hardirq on/off events:
608 */
tracer_hardirqs_on(unsigned long a0,unsigned long a1)609 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
610 {
611 unsigned int pc = preempt_count();
612
613 if (!preempt_trace(pc) && irq_trace())
614 stop_critical_timing(a0, a1, pc);
615 }
616
tracer_hardirqs_off(unsigned long a0,unsigned long a1)617 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
618 {
619 unsigned int pc = preempt_count();
620
621 if (!preempt_trace(pc) && irq_trace())
622 start_critical_timing(a0, a1, pc);
623 }
624
irqsoff_tracer_init(struct trace_array * tr)625 static int irqsoff_tracer_init(struct trace_array *tr)
626 {
627 trace_type = TRACER_IRQS_OFF;
628
629 return __irqsoff_tracer_init(tr);
630 }
631
irqsoff_tracer_reset(struct trace_array * tr)632 static void irqsoff_tracer_reset(struct trace_array *tr)
633 {
634 __irqsoff_tracer_reset(tr);
635 }
636
637 static struct tracer irqsoff_tracer __read_mostly =
638 {
639 .name = "irqsoff",
640 .init = irqsoff_tracer_init,
641 .reset = irqsoff_tracer_reset,
642 .start = irqsoff_tracer_start,
643 .stop = irqsoff_tracer_stop,
644 .print_max = true,
645 .print_header = irqsoff_print_header,
646 .print_line = irqsoff_print_line,
647 .flag_changed = irqsoff_flag_changed,
648 #ifdef CONFIG_FTRACE_SELFTEST
649 .selftest = trace_selftest_startup_irqsoff,
650 #endif
651 .open = irqsoff_trace_open,
652 .close = irqsoff_trace_close,
653 .allow_instances = true,
654 .use_max_tr = true,
655 };
656 #endif /* CONFIG_IRQSOFF_TRACER */
657
658 #ifdef CONFIG_PREEMPT_TRACER
tracer_preempt_on(unsigned long a0,unsigned long a1)659 void tracer_preempt_on(unsigned long a0, unsigned long a1)
660 {
661 int pc = preempt_count();
662
663 if (preempt_trace(pc) && !irq_trace())
664 stop_critical_timing(a0, a1, pc);
665 }
666
tracer_preempt_off(unsigned long a0,unsigned long a1)667 void tracer_preempt_off(unsigned long a0, unsigned long a1)
668 {
669 int pc = preempt_count();
670
671 if (preempt_trace(pc) && !irq_trace())
672 start_critical_timing(a0, a1, pc);
673 }
674
preemptoff_tracer_init(struct trace_array * tr)675 static int preemptoff_tracer_init(struct trace_array *tr)
676 {
677 trace_type = TRACER_PREEMPT_OFF;
678
679 return __irqsoff_tracer_init(tr);
680 }
681
preemptoff_tracer_reset(struct trace_array * tr)682 static void preemptoff_tracer_reset(struct trace_array *tr)
683 {
684 __irqsoff_tracer_reset(tr);
685 }
686
687 static struct tracer preemptoff_tracer __read_mostly =
688 {
689 .name = "preemptoff",
690 .init = preemptoff_tracer_init,
691 .reset = preemptoff_tracer_reset,
692 .start = irqsoff_tracer_start,
693 .stop = irqsoff_tracer_stop,
694 .print_max = true,
695 .print_header = irqsoff_print_header,
696 .print_line = irqsoff_print_line,
697 .flag_changed = irqsoff_flag_changed,
698 #ifdef CONFIG_FTRACE_SELFTEST
699 .selftest = trace_selftest_startup_preemptoff,
700 #endif
701 .open = irqsoff_trace_open,
702 .close = irqsoff_trace_close,
703 .allow_instances = true,
704 .use_max_tr = true,
705 };
706 #endif /* CONFIG_PREEMPT_TRACER */
707
708 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
709
preemptirqsoff_tracer_init(struct trace_array * tr)710 static int preemptirqsoff_tracer_init(struct trace_array *tr)
711 {
712 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
713
714 return __irqsoff_tracer_init(tr);
715 }
716
preemptirqsoff_tracer_reset(struct trace_array * tr)717 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
718 {
719 __irqsoff_tracer_reset(tr);
720 }
721
722 static struct tracer preemptirqsoff_tracer __read_mostly =
723 {
724 .name = "preemptirqsoff",
725 .init = preemptirqsoff_tracer_init,
726 .reset = preemptirqsoff_tracer_reset,
727 .start = irqsoff_tracer_start,
728 .stop = irqsoff_tracer_stop,
729 .print_max = true,
730 .print_header = irqsoff_print_header,
731 .print_line = irqsoff_print_line,
732 .flag_changed = irqsoff_flag_changed,
733 #ifdef CONFIG_FTRACE_SELFTEST
734 .selftest = trace_selftest_startup_preemptirqsoff,
735 #endif
736 .open = irqsoff_trace_open,
737 .close = irqsoff_trace_close,
738 .allow_instances = true,
739 .use_max_tr = true,
740 };
741 #endif
742
init_irqsoff_tracer(void)743 __init static int init_irqsoff_tracer(void)
744 {
745 #ifdef CONFIG_IRQSOFF_TRACER
746 register_tracer(&irqsoff_tracer);
747 #endif
748 #ifdef CONFIG_PREEMPT_TRACER
749 register_tracer(&preemptoff_tracer);
750 #endif
751 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
752 register_tracer(&preemptirqsoff_tracer);
753 #endif
754
755 return 0;
756 }
757 core_initcall(init_irqsoff_tracer);
758 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
759