Lines Matching refs:tr
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) in dummy_set_flag() argument
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
276 struct trace_array *tr; in trace_array_get() local
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_get()
281 if (tr == this_tr) { in trace_array_get()
282 tr->ref++; in trace_array_get()
715 static inline void ftrace_trace_stack(struct trace_array *tr,
726 static inline void ftrace_trace_stack(struct trace_array *tr, in ftrace_trace_stack() argument
760 void tracer_tracing_on(struct trace_array *tr) in tracer_tracing_on() argument
762 if (tr->trace_buffer.buffer) in tracer_tracing_on()
763 ring_buffer_record_on(tr->trace_buffer.buffer); in tracer_tracing_on()
772 tr->buffer_disabled = 0; in tracer_tracing_on()
897 void tracing_snapshot_instance(struct trace_array *tr) in tracing_snapshot_instance() argument
899 struct tracer *tracer = tr->current_trace; in tracing_snapshot_instance()
908 if (!tr->allocated_snapshot) { in tracing_snapshot_instance()
923 update_max_tr(tr, current, smp_processor_id()); in tracing_snapshot_instance()
943 struct trace_array *tr = &global_trace; in tracing_snapshot() local
945 tracing_snapshot_instance(tr); in tracing_snapshot()
953 int tracing_alloc_snapshot_instance(struct trace_array *tr) in tracing_alloc_snapshot_instance() argument
957 if (!tr->allocated_snapshot) { in tracing_alloc_snapshot_instance()
960 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_alloc_snapshot_instance()
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS); in tracing_alloc_snapshot_instance()
965 tr->allocated_snapshot = true; in tracing_alloc_snapshot_instance()
971 static void free_snapshot(struct trace_array *tr) in free_snapshot() argument
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); in free_snapshot()
979 set_buffer_entries(&tr->max_buffer, 1); in free_snapshot()
980 tracing_reset_online_cpus(&tr->max_buffer); in free_snapshot()
981 tr->allocated_snapshot = false; in free_snapshot()
996 struct trace_array *tr = &global_trace; in tracing_alloc_snapshot() local
999 ret = tracing_alloc_snapshot_instance(tr); in tracing_alloc_snapshot()
1048 void tracer_tracing_off(struct trace_array *tr) in tracer_tracing_off() argument
1050 if (tr->trace_buffer.buffer) in tracer_tracing_off()
1051 ring_buffer_record_off(tr->trace_buffer.buffer); in tracer_tracing_off()
1060 tr->buffer_disabled = 1; in tracer_tracing_off()
1091 bool tracer_tracing_is_on(struct trace_array *tr) in tracer_tracing_is_on() argument
1093 if (tr->trace_buffer.buffer) in tracer_tracing_is_on()
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer); in tracer_tracing_is_on()
1095 return !tr->buffer_disabled; in tracer_tracing_is_on()
1173 bool trace_clock_in_ns(struct trace_array *tr) in trace_clock_in_ns() argument
1175 if (trace_clocks[tr->clock_id].in_ns) in trace_clock_in_ns()
1319 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) in __update_max_tr() argument
1321 struct trace_buffer *trace_buf = &tr->trace_buffer; in __update_max_tr()
1322 struct trace_buffer *max_buf = &tr->max_buffer; in __update_max_tr()
1329 max_data->saved_latency = tr->max_latency; in __update_max_tr()
1362 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) in update_max_tr() argument
1364 if (tr->stop_count) in update_max_tr()
1369 if (!tr->allocated_snapshot) { in update_max_tr()
1371 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr()
1375 arch_spin_lock(&tr->max_lock); in update_max_tr()
1378 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) in update_max_tr()
1379 ring_buffer_record_on(tr->max_buffer.buffer); in update_max_tr()
1381 ring_buffer_record_off(tr->max_buffer.buffer); in update_max_tr()
1383 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); in update_max_tr()
1385 __update_max_tr(tr, tsk, cpu); in update_max_tr()
1386 arch_spin_unlock(&tr->max_lock); in update_max_tr()
1398 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) in update_max_tr_single() argument
1402 if (tr->stop_count) in update_max_tr_single()
1406 if (!tr->allocated_snapshot) { in update_max_tr_single()
1408 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr_single()
1412 arch_spin_lock(&tr->max_lock); in update_max_tr_single()
1414 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); in update_max_tr_single()
1423 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, in update_max_tr_single()
1429 __update_max_tr(tr, tsk, cpu); in update_max_tr_single()
1430 arch_spin_unlock(&tr->max_lock); in update_max_tr_single()
1469 struct trace_array *tr = &global_trace; in run_tracer_selftest() local
1470 struct tracer *saved_tracer = tr->current_trace; in run_tracer_selftest()
1491 tracing_reset_online_cpus(&tr->trace_buffer); in run_tracer_selftest()
1493 tr->current_trace = type; in run_tracer_selftest()
1499 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, in run_tracer_selftest()
1501 tr->allocated_snapshot = true; in run_tracer_selftest()
1507 ret = type->selftest(type, tr); in run_tracer_selftest()
1509 tr->current_trace = saved_tracer; in run_tracer_selftest()
1517 tracing_reset_online_cpus(&tr->trace_buffer); in run_tracer_selftest()
1521 tr->allocated_snapshot = false; in run_tracer_selftest()
1525 ring_buffer_resize(tr->max_buffer.buffer, 1, in run_tracer_selftest()
1581 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1714 struct trace_array *tr; in tracing_reset_all_online_cpus() local
1716 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in tracing_reset_all_online_cpus()
1717 if (!tr->clear_trace) in tracing_reset_all_online_cpus()
1719 tr->clear_trace = false; in tracing_reset_all_online_cpus()
1720 tracing_reset_online_cpus(&tr->trace_buffer); in tracing_reset_all_online_cpus()
1722 tracing_reset_online_cpus(&tr->max_buffer); in tracing_reset_all_online_cpus()
1845 static void tracing_start_tr(struct trace_array *tr) in tracing_start_tr() argument
1854 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_start_tr()
1857 raw_spin_lock_irqsave(&tr->start_lock, flags); in tracing_start_tr()
1859 if (--tr->stop_count) { in tracing_start_tr()
1860 if (tr->stop_count < 0) { in tracing_start_tr()
1863 tr->stop_count = 0; in tracing_start_tr()
1868 buffer = tr->trace_buffer.buffer; in tracing_start_tr()
1873 raw_spin_unlock_irqrestore(&tr->start_lock, flags); in tracing_start_tr()
1910 static void tracing_stop_tr(struct trace_array *tr) in tracing_stop_tr() argument
1916 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_stop_tr()
1919 raw_spin_lock_irqsave(&tr->start_lock, flags); in tracing_stop_tr()
1920 if (tr->stop_count++) in tracing_stop_tr()
1923 buffer = tr->trace_buffer.buffer; in tracing_stop_tr()
1928 raw_spin_unlock_irqrestore(&tr->start_lock, flags); in tracing_stop_tr()
2283 *current_rb = trace_file->tr->trace_buffer.buffer; in trace_event_buffer_lock_reserve()
2399 void trace_buffer_unlock_commit_regs(struct trace_array *tr, in trace_buffer_unlock_commit_regs() argument
2413 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); in trace_buffer_unlock_commit_regs()
2551 trace_function(struct trace_array *tr, in trace_function() argument
2556 struct ring_buffer *buffer = tr->trace_buffer.buffer; in trace_function()
2675 static inline void ftrace_trace_stack(struct trace_array *tr, in ftrace_trace_stack() argument
2680 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) in ftrace_trace_stack()
2686 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, in __trace_stack() argument
2689 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_stack()
2786 static void __trace_userstack(struct trace_array *tr, unsigned long flags) in __trace_userstack() argument
2788 ftrace_trace_userstack(tr, flags, preempt_count()); in __trace_userstack()
2909 struct trace_array *tr = &global_trace; in trace_vbprintk() local
2937 buffer = tr->trace_buffer.buffer; in trace_vbprintk()
2949 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); in trace_vbprintk()
3019 int trace_array_vprintk(struct trace_array *tr, in trace_array_vprintk() argument
3022 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); in trace_array_vprintk()
3026 int trace_array_printk(struct trace_array *tr, in trace_array_printk() argument
3036 ret = trace_array_vprintk(tr, ip, fmt, ap); in trace_array_printk()
3243 struct trace_array *tr = iter->tr; in s_start() local
3256 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) in s_start()
3257 *iter->trace = *tr->current_trace; in s_start()
3467 struct trace_array *tr = iter->tr; in test_cpu_buff_start() local
3469 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) in test_cpu_buff_start()
3493 struct trace_array *tr = iter->tr; in print_trace_fmt() local
3495 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); in print_trace_fmt()
3505 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_trace_fmt()
3525 struct trace_array *tr = iter->tr; in print_raw_fmt() local
3532 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) in print_raw_fmt()
3550 struct trace_array *tr = iter->tr; in print_hex_fmt() local
3558 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_hex_fmt()
3580 struct trace_array *tr = iter->tr; in print_bin_fmt() local
3587 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_bin_fmt()
3636 struct trace_array *tr = iter->tr; in print_trace_line() local
3637 unsigned long trace_flags = tr->trace_flags; in print_trace_line()
3683 struct trace_array *tr = iter->tr; in trace_latency_header() local
3692 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) in trace_latency_header()
3699 struct trace_array *tr = iter->tr; in trace_default_header() local
3700 unsigned long trace_flags = tr->trace_flags; in trace_default_header()
3760 if (iter->tr->allocated_snapshot) in print_snapshot_help()
3782 if (iter->tr) { in s_show()
3841 struct trace_array *tr = inode->i_private; in __tracing_open() local
3866 *iter->trace = *tr->current_trace; in __tracing_open()
3871 iter->tr = tr; in __tracing_open()
3875 if (tr->current_trace->print_max || snapshot) in __tracing_open()
3876 iter->trace_buffer = &tr->max_buffer; in __tracing_open()
3879 iter->trace_buffer = &tr->trace_buffer; in __tracing_open()
3894 if (trace_clocks[tr->clock_id].in_ns) in __tracing_open()
3899 tracing_stop_tr(tr); in __tracing_open()
3953 struct trace_array *tr = inode->i_private; in tracing_open_generic_tr() local
3958 if (trace_array_get(tr) < 0) in tracing_open_generic_tr()
3968 struct trace_array *tr = inode->i_private; in tracing_release() local
3974 trace_array_put(tr); in tracing_release()
3992 tracing_start_tr(tr); in tracing_release()
3994 __trace_array_put(tr); in tracing_release()
4009 struct trace_array *tr = inode->i_private; in tracing_release_generic_tr() local
4011 trace_array_put(tr); in tracing_release_generic_tr()
4017 struct trace_array *tr = inode->i_private; in tracing_single_release_tr() local
4019 trace_array_put(tr); in tracing_single_release_tr()
4026 struct trace_array *tr = inode->i_private; in tracing_open() local
4030 if (trace_array_get(tr) < 0) in tracing_open()
4036 struct trace_buffer *trace_buf = &tr->trace_buffer; in tracing_open()
4039 if (tr->current_trace->print_max) in tracing_open()
4040 trace_buf = &tr->max_buffer; in tracing_open()
4053 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open()
4058 trace_array_put(tr); in tracing_open()
4069 trace_ok_for_array(struct tracer *t, struct trace_array *tr) in trace_ok_for_array() argument
4071 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; in trace_ok_for_array()
4076 get_tracer_for_array(struct trace_array *tr, struct tracer *t) in get_tracer_for_array() argument
4078 while (t && !trace_ok_for_array(t, tr)) in get_tracer_for_array()
4087 struct trace_array *tr = m->private; in t_next() local
4093 t = get_tracer_for_array(tr, t->next); in t_next()
4100 struct trace_array *tr = m->private; in t_start() local
4106 t = get_tracer_for_array(tr, trace_types); in t_start()
4143 struct trace_array *tr = inode->i_private; in show_traces_open() local
4155 m->private = tr; in show_traces_open()
4198 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_read() local
4203 cpumask_pr_args(tr->tracing_cpumask)) + 1; in tracing_cpumask_read()
4209 cpumask_pr_args(tr->tracing_cpumask)); in tracing_cpumask_read()
4226 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_write() local
4238 arch_spin_lock(&tr->max_lock); in tracing_cpumask_write()
4244 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_cpumask_write()
4246 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write()
4247 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); in tracing_cpumask_write()
4249 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_cpumask_write()
4251 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write()
4252 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); in tracing_cpumask_write()
4255 arch_spin_unlock(&tr->max_lock); in tracing_cpumask_write()
4258 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); in tracing_cpumask_write()
4280 struct trace_array *tr = m->private; in tracing_trace_options_show() local
4285 tracer_flags = tr->current_trace->flags->val; in tracing_trace_options_show()
4286 trace_opts = tr->current_trace->flags->opts; in tracing_trace_options_show()
4289 if (tr->trace_flags & (1 << i)) in tracing_trace_options_show()
4306 static int __set_tracer_option(struct trace_array *tr, in __set_tracer_option() argument
4313 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); in __set_tracer_option()
4325 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) in set_tracer_option() argument
4327 struct tracer *trace = tr->current_trace; in set_tracer_option()
4336 return __set_tracer_option(tr, trace->flags, opts, neg); in set_tracer_option()
4351 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) in set_tracer_flag() argument
4354 if (!!(tr->trace_flags & mask) == !!enabled) in set_tracer_flag()
4358 if (tr->current_trace->flag_changed) in set_tracer_flag()
4359 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) in set_tracer_flag()
4363 tr->trace_flags |= mask; in set_tracer_flag()
4365 tr->trace_flags &= ~mask; in set_tracer_flag()
4376 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; in set_tracer_flag()
4384 trace_event_follow_fork(tr, enabled); in set_tracer_flag()
4387 ftrace_pid_follow_fork(tr, enabled); in set_tracer_flag()
4390 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); in set_tracer_flag()
4392 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); in set_tracer_flag()
4404 static int trace_set_options(struct trace_array *tr, char *option) in trace_set_options() argument
4423 ret = set_tracer_option(tr, cmp, neg); in trace_set_options()
4425 ret = set_tracer_flag(tr, 1 << ret, !neg); in trace_set_options()
4464 struct trace_array *tr = m->private; in tracing_trace_options_write() local
4476 ret = trace_set_options(tr, buf); in tracing_trace_options_write()
4487 struct trace_array *tr = inode->i_private; in tracing_trace_options_open() local
4493 if (trace_array_get(tr) < 0) in tracing_trace_options_open()
4498 trace_array_put(tr); in tracing_trace_options_open()
5141 struct trace_array *tr = filp->private_data; in tracing_set_trace_read() local
5146 r = sprintf(buf, "%s\n", tr->current_trace->name); in tracing_set_trace_read()
5152 int tracer_init(struct tracer *t, struct trace_array *tr) in tracer_init() argument
5154 tracing_reset_online_cpus(&tr->trace_buffer); in tracer_init()
5155 return t->init(tr); in tracer_init()
5194 static int __tracing_resize_ring_buffer(struct trace_array *tr, in __tracing_resize_ring_buffer() argument
5207 if (!tr->trace_buffer.buffer) in __tracing_resize_ring_buffer()
5210 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
5215 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || in __tracing_resize_ring_buffer()
5216 !tr->current_trace->use_max_tr) in __tracing_resize_ring_buffer()
5219 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
5221 int r = resize_buffer_duplicate_size(&tr->trace_buffer, in __tracing_resize_ring_buffer()
5222 &tr->trace_buffer, cpu); in __tracing_resize_ring_buffer()
5245 set_buffer_entries(&tr->max_buffer, size); in __tracing_resize_ring_buffer()
5247 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; in __tracing_resize_ring_buffer()
5253 set_buffer_entries(&tr->trace_buffer, size); in __tracing_resize_ring_buffer()
5255 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; in __tracing_resize_ring_buffer()
5260 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, in tracing_resize_ring_buffer() argument
5275 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); in tracing_resize_ring_buffer()
5312 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5318 static void tracing_set_nop(struct trace_array *tr) in tracing_set_nop() argument
5320 if (tr->current_trace == &nop_trace) in tracing_set_nop()
5323 tr->current_trace->enabled--; in tracing_set_nop()
5325 if (tr->current_trace->reset) in tracing_set_nop()
5326 tr->current_trace->reset(tr); in tracing_set_nop()
5328 tr->current_trace = &nop_trace; in tracing_set_nop()
5331 static void add_tracer_options(struct trace_array *tr, struct tracer *t) in add_tracer_options() argument
5334 if (!tr->dir) in add_tracer_options()
5337 create_trace_option_files(tr, t); in add_tracer_options()
5340 static int tracing_set_tracer(struct trace_array *tr, const char *buf) in tracing_set_tracer() argument
5351 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, in tracing_set_tracer()
5366 if (t == tr->current_trace) in tracing_set_tracer()
5377 if (!trace_ok_for_array(t, tr)) { in tracing_set_tracer()
5383 if (tr->current_trace->ref) { in tracing_set_tracer()
5390 tr->current_trace->enabled--; in tracing_set_tracer()
5392 if (tr->current_trace->reset) in tracing_set_tracer()
5393 tr->current_trace->reset(tr); in tracing_set_tracer()
5396 tr->current_trace = &nop_trace; in tracing_set_tracer()
5399 had_max_tr = tr->allocated_snapshot; in tracing_set_tracer()
5410 free_snapshot(tr); in tracing_set_tracer()
5416 ret = tracing_alloc_snapshot_instance(tr); in tracing_set_tracer()
5423 ret = tracer_init(t, tr); in tracing_set_tracer()
5428 tr->current_trace = t; in tracing_set_tracer()
5429 tr->current_trace->enabled++; in tracing_set_tracer()
5430 trace_branch_enable(tr); in tracing_set_tracer()
5441 struct trace_array *tr = filp->private_data; in tracing_set_trace_write() local
5461 err = tracing_set_tracer(tr, buf); in tracing_set_trace_write()
5511 struct trace_array *tr = filp->private_data; in tracing_thresh_write() local
5519 if (tr->current_trace->update_thresh) { in tracing_thresh_write()
5520 ret = tr->current_trace->update_thresh(tr); in tracing_thresh_write()
5552 struct trace_array *tr = inode->i_private; in tracing_open_pipe() local
5559 if (trace_array_get(tr) < 0) in tracing_open_pipe()
5568 __trace_array_put(tr); in tracing_open_pipe()
5573 iter->trace = tr->current_trace; in tracing_open_pipe()
5583 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open_pipe()
5587 if (trace_clocks[tr->clock_id].in_ns) in tracing_open_pipe()
5590 iter->tr = tr; in tracing_open_pipe()
5591 iter->trace_buffer = &tr->trace_buffer; in tracing_open_pipe()
5601 tr->current_trace->ref++; in tracing_open_pipe()
5609 __trace_array_put(tr); in tracing_open_pipe()
5617 struct trace_array *tr = inode->i_private; in tracing_release_pipe() local
5621 tr->current_trace->ref--; in tracing_release_pipe()
5632 trace_array_put(tr); in tracing_release_pipe()
5640 struct trace_array *tr = iter->tr; in trace_poll() local
5646 if (tr->trace_flags & TRACE_ITER_BLOCK) in trace_poll()
5685 if (!tracer_tracing_is_on(iter->tr) && iter->pos) in tracing_wait_pipe()
5951 struct trace_array *tr = inode->i_private; in tracing_entries_read() local
5969 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; in tracing_entries_read()
5970 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { in tracing_entries_read()
5986 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); in tracing_entries_read()
5999 struct trace_array *tr = inode->i_private; in tracing_entries_write() local
6013 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); in tracing_entries_write()
6026 struct trace_array *tr = filp->private_data; in tracing_total_entries_read() local
6033 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; in tracing_total_entries_read()
6063 struct trace_array *tr = inode->i_private; in tracing_free_buffer_release() local
6066 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) in tracing_free_buffer_release()
6067 tracer_tracing_off(tr); in tracing_free_buffer_release()
6069 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); in tracing_free_buffer_release()
6071 trace_array_put(tr); in tracing_free_buffer_release()
6080 struct trace_array *tr = filp->private_data; in tracing_mark_write() local
6097 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_write()
6112 buffer = tr->trace_buffer.buffer; in tracing_mark_write()
6131 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { in tracing_mark_write()
6134 tt = event_triggers_call(tr->trace_marker_file, entry, event); in tracing_mark_write()
6146 event_triggers_post_call(tr->trace_marker_file, tt); in tracing_mark_write()
6161 struct trace_array *tr = filp->private_data; in tracing_mark_raw_write() local
6176 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_raw_write()
6193 buffer = tr->trace_buffer.buffer; in tracing_mark_raw_write()
6220 struct trace_array *tr = m->private; in tracing_clock_show() local
6226 i == tr->clock_id ? "[" : "", trace_clocks[i].name, in tracing_clock_show()
6227 i == tr->clock_id ? "]" : ""); in tracing_clock_show()
6233 int tracing_set_clock(struct trace_array *tr, const char *clockstr) in tracing_set_clock() argument
6246 tr->clock_id = i; in tracing_set_clock()
6248 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
6254 tracing_reset_online_cpus(&tr->trace_buffer); in tracing_set_clock()
6257 if (tr->max_buffer.buffer) in tracing_set_clock()
6258 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
6259 tracing_reset_online_cpus(&tr->max_buffer); in tracing_set_clock()
6271 struct trace_array *tr = m->private; in tracing_clock_write() local
6286 ret = tracing_set_clock(tr, clockstr); in tracing_clock_write()
6297 struct trace_array *tr = inode->i_private; in tracing_clock_open() local
6303 if (trace_array_get(tr)) in tracing_clock_open()
6308 trace_array_put(tr); in tracing_clock_open()
6315 struct trace_array *tr = m->private; in tracing_time_stamp_mode_show() local
6319 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) in tracing_time_stamp_mode_show()
6331 struct trace_array *tr = inode->i_private; in tracing_time_stamp_mode_open() local
6337 if (trace_array_get(tr)) in tracing_time_stamp_mode_open()
6342 trace_array_put(tr); in tracing_time_stamp_mode_open()
6347 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) in tracing_set_time_stamp_abs() argument
6353 if (abs && tr->time_stamp_abs_ref++) in tracing_set_time_stamp_abs()
6357 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { in tracing_set_time_stamp_abs()
6362 if (--tr->time_stamp_abs_ref) in tracing_set_time_stamp_abs()
6366 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); in tracing_set_time_stamp_abs()
6369 if (tr->max_buffer.buffer) in tracing_set_time_stamp_abs()
6370 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); in tracing_set_time_stamp_abs()
6388 struct trace_array *tr = inode->i_private; in tracing_snapshot_open() local
6393 if (trace_array_get(tr) < 0) in tracing_snapshot_open()
6413 iter->tr = tr; in tracing_snapshot_open()
6414 iter->trace_buffer = &tr->max_buffer; in tracing_snapshot_open()
6421 trace_array_put(tr); in tracing_snapshot_open()
6432 struct trace_array *tr = iter->tr; in tracing_snapshot_write() local
6446 if (tr->current_trace->use_max_tr) { in tracing_snapshot_write()
6457 if (tr->allocated_snapshot) in tracing_snapshot_write()
6458 free_snapshot(tr); in tracing_snapshot_write()
6468 if (!tr->allocated_snapshot) { in tracing_snapshot_write()
6469 ret = tracing_alloc_snapshot_instance(tr); in tracing_snapshot_write()
6476 update_max_tr(tr, current, smp_processor_id()); in tracing_snapshot_write()
6478 update_max_tr_single(tr, current, iter->cpu_file); in tracing_snapshot_write()
6482 if (tr->allocated_snapshot) { in tracing_snapshot_write()
6484 tracing_reset_online_cpus(&tr->max_buffer); in tracing_snapshot_write()
6486 tracing_reset(&tr->max_buffer, iter->cpu_file); in tracing_snapshot_write()
6542 info->iter.trace_buffer = &info->iter.tr->max_buffer; in snapshot_raw_open()
6653 struct trace_array *tr = inode->i_private; in tracing_buffers_open() local
6660 if (trace_array_get(tr) < 0) in tracing_buffers_open()
6665 trace_array_put(tr); in tracing_buffers_open()
6671 info->iter.tr = tr; in tracing_buffers_open()
6673 info->iter.trace = tr->current_trace; in tracing_buffers_open()
6674 info->iter.trace_buffer = &tr->trace_buffer; in tracing_buffers_open()
6681 tr->current_trace->ref++; in tracing_buffers_open()
6687 trace_array_put(tr); in tracing_buffers_open()
6714 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_read()
6782 iter->tr->current_trace->ref--; in tracing_buffers_release()
6784 __trace_array_put(iter->tr); in tracing_buffers_release()
6871 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_splice_read()
6973 struct trace_array *tr = inode->i_private; in tracing_stats_read() local
6974 struct trace_buffer *trace_buf = &tr->trace_buffer; in tracing_stats_read()
6999 if (trace_clocks[tr->clock_id].in_ns) { in tracing_stats_read()
7065 struct trace_array *tr, struct ftrace_probe_ops *ops, in ftrace_snapshot() argument
7068 tracing_snapshot_instance(tr); in ftrace_snapshot()
7073 struct trace_array *tr, struct ftrace_probe_ops *ops, in ftrace_count_snapshot() argument
7090 tracing_snapshot_instance(tr); in ftrace_count_snapshot()
7116 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, in ftrace_snapshot_init() argument
7132 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, in ftrace_snapshot_free() argument
7160 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, in ftrace_trace_snapshot_callback() argument
7168 if (!tr) in ftrace_trace_snapshot_callback()
7178 return unregister_ftrace_function_probe_func(glob+1, tr, ops); in ftrace_trace_snapshot_callback()
7197 ret = tracing_alloc_snapshot_instance(tr); in ftrace_trace_snapshot_callback()
7201 ret = register_ftrace_function_probe(glob, tr, ops, count); in ftrace_trace_snapshot_callback()
7220 static struct dentry *tracing_get_dentry(struct trace_array *tr) in tracing_get_dentry() argument
7222 if (WARN_ON(!tr->dir)) in tracing_get_dentry()
7226 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_get_dentry()
7230 return tr->dir; in tracing_get_dentry()
7233 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) in tracing_dentry_percpu() argument
7237 if (tr->percpu_dir) in tracing_dentry_percpu()
7238 return tr->percpu_dir; in tracing_dentry_percpu()
7240 d_tracer = tracing_get_dentry(tr); in tracing_dentry_percpu()
7244 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); in tracing_dentry_percpu()
7246 WARN_ONCE(!tr->percpu_dir, in tracing_dentry_percpu()
7249 return tr->percpu_dir; in tracing_dentry_percpu()
7264 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) in tracing_init_tracefs_percpu() argument
7266 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); in tracing_init_tracefs_percpu()
7282 tr, cpu, &tracing_pipe_fops); in tracing_init_tracefs_percpu()
7286 tr, cpu, &tracing_fops); in tracing_init_tracefs_percpu()
7289 tr, cpu, &tracing_buffers_fops); in tracing_init_tracefs_percpu()
7292 tr, cpu, &tracing_stats_fops); in tracing_init_tracefs_percpu()
7295 tr, cpu, &tracing_entries_fops); in tracing_init_tracefs_percpu()
7299 tr, cpu, &snapshot_fops); in tracing_init_tracefs_percpu()
7302 tr, cpu, &snapshot_raw_fops); in tracing_init_tracefs_percpu()
7343 ret = __set_tracer_option(topt->tr, topt->flags, in trace_options_write()
7401 struct trace_array *tr; in trace_options_core_read() local
7405 get_tr_index(tr_index, &tr, &index); in trace_options_core_read()
7407 if (tr->trace_flags & (1 << index)) in trace_options_core_read()
7420 struct trace_array *tr; in trace_options_core_write() local
7425 get_tr_index(tr_index, &tr, &index); in trace_options_core_write()
7435 ret = set_tracer_flag(tr, 1 << index, val); in trace_options_core_write()
7469 static struct dentry *trace_options_init_dentry(struct trace_array *tr) in trace_options_init_dentry() argument
7473 if (tr->options) in trace_options_init_dentry()
7474 return tr->options; in trace_options_init_dentry()
7476 d_tracer = tracing_get_dentry(tr); in trace_options_init_dentry()
7480 tr->options = tracefs_create_dir("options", d_tracer); in trace_options_init_dentry()
7481 if (!tr->options) { in trace_options_init_dentry()
7486 return tr->options; in trace_options_init_dentry()
7490 create_trace_option_file(struct trace_array *tr, in create_trace_option_file() argument
7497 t_options = trace_options_init_dentry(tr); in create_trace_option_file()
7503 topt->tr = tr; in create_trace_option_file()
7511 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) in create_trace_option_files() argument
7532 if (!trace_ok_for_array(tracer, tr)) in create_trace_option_files()
7535 for (i = 0; i < tr->nr_topts; i++) { in create_trace_option_files()
7537 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) in create_trace_option_files()
7550 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), in create_trace_option_files()
7557 tr->topts = tr_topts; in create_trace_option_files()
7558 tr->topts[tr->nr_topts].tracer = tracer; in create_trace_option_files()
7559 tr->topts[tr->nr_topts].topts = topts; in create_trace_option_files()
7560 tr->nr_topts++; in create_trace_option_files()
7563 create_trace_option_file(tr, &topts[cnt], flags, in create_trace_option_files()
7572 create_trace_option_core_file(struct trace_array *tr, in create_trace_option_core_file() argument
7577 t_options = trace_options_init_dentry(tr); in create_trace_option_core_file()
7582 (void *)&tr->trace_flags_index[index], in create_trace_option_core_file()
7586 static void create_trace_options_dir(struct trace_array *tr) in create_trace_options_dir() argument
7589 bool top_level = tr == &global_trace; in create_trace_options_dir()
7592 t_options = trace_options_init_dentry(tr); in create_trace_options_dir()
7599 create_trace_option_core_file(tr, trace_options[i], i); in create_trace_options_dir()
7607 struct trace_array *tr = filp->private_data; in rb_simple_read() local
7611 r = tracer_tracing_is_on(tr); in rb_simple_read()
7621 struct trace_array *tr = filp->private_data; in rb_simple_write() local
7622 struct ring_buffer *buffer = tr->trace_buffer.buffer; in rb_simple_write()
7632 if (!!val == tracer_tracing_is_on(tr)) { in rb_simple_write()
7635 tracer_tracing_on(tr); in rb_simple_write()
7636 if (tr->current_trace->start) in rb_simple_write()
7637 tr->current_trace->start(tr); in rb_simple_write()
7639 tracer_tracing_off(tr); in rb_simple_write()
7640 if (tr->current_trace->stop) in rb_simple_write()
7641 tr->current_trace->stop(tr); in rb_simple_write()
7662 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7665 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) in allocate_trace_buffer() argument
7669 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; in allocate_trace_buffer()
7671 buf->tr = tr; in allocate_trace_buffer()
7685 set_buffer_entries(&tr->trace_buffer, in allocate_trace_buffer()
7686 ring_buffer_size(tr->trace_buffer.buffer, 0)); in allocate_trace_buffer()
7691 static int allocate_trace_buffers(struct trace_array *tr, int size) in allocate_trace_buffers() argument
7695 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); in allocate_trace_buffers()
7700 ret = allocate_trace_buffer(tr, &tr->max_buffer, in allocate_trace_buffers()
7703 ring_buffer_free(tr->trace_buffer.buffer); in allocate_trace_buffers()
7704 tr->trace_buffer.buffer = NULL; in allocate_trace_buffers()
7705 free_percpu(tr->trace_buffer.data); in allocate_trace_buffers()
7706 tr->trace_buffer.data = NULL; in allocate_trace_buffers()
7709 tr->allocated_snapshot = allocate_snapshot; in allocate_trace_buffers()
7730 static void free_trace_buffers(struct trace_array *tr) in free_trace_buffers() argument
7732 if (!tr) in free_trace_buffers()
7735 free_trace_buffer(&tr->trace_buffer); in free_trace_buffers()
7738 free_trace_buffer(&tr->max_buffer); in free_trace_buffers()
7742 static void init_trace_flags_index(struct trace_array *tr) in init_trace_flags_index() argument
7748 tr->trace_flags_index[i] = i; in init_trace_flags_index()
7751 static void __update_tracer_options(struct trace_array *tr) in __update_tracer_options() argument
7756 add_tracer_options(tr, t); in __update_tracer_options()
7759 static void update_tracer_options(struct trace_array *tr) in update_tracer_options() argument
7762 __update_tracer_options(tr); in update_tracer_options()
7768 struct trace_array *tr; in instance_mkdir() local
7775 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in instance_mkdir()
7776 if (tr->name && strcmp(tr->name, name) == 0) in instance_mkdir()
7781 tr = kzalloc(sizeof(*tr), GFP_KERNEL); in instance_mkdir()
7782 if (!tr) in instance_mkdir()
7785 tr->name = kstrdup(name, GFP_KERNEL); in instance_mkdir()
7786 if (!tr->name) in instance_mkdir()
7789 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) in instance_mkdir()
7792 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; in instance_mkdir()
7794 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); in instance_mkdir()
7796 raw_spin_lock_init(&tr->start_lock); in instance_mkdir()
7798 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in instance_mkdir()
7800 tr->current_trace = &nop_trace; in instance_mkdir()
7802 INIT_LIST_HEAD(&tr->systems); in instance_mkdir()
7803 INIT_LIST_HEAD(&tr->events); in instance_mkdir()
7804 INIT_LIST_HEAD(&tr->hist_vars); in instance_mkdir()
7806 if (allocate_trace_buffers(tr, trace_buf_size) < 0) in instance_mkdir()
7809 tr->dir = tracefs_create_dir(name, trace_instance_dir); in instance_mkdir()
7810 if (!tr->dir) in instance_mkdir()
7813 ret = event_trace_add_tracer(tr->dir, tr); in instance_mkdir()
7815 tracefs_remove_recursive(tr->dir); in instance_mkdir()
7819 ftrace_init_trace_array(tr); in instance_mkdir()
7821 init_tracer_tracefs(tr, tr->dir); in instance_mkdir()
7822 init_trace_flags_index(tr); in instance_mkdir()
7823 __update_tracer_options(tr); in instance_mkdir()
7825 list_add(&tr->list, &ftrace_trace_arrays); in instance_mkdir()
7833 free_trace_buffers(tr); in instance_mkdir()
7834 free_cpumask_var(tr->tracing_cpumask); in instance_mkdir()
7835 kfree(tr->name); in instance_mkdir()
7836 kfree(tr); in instance_mkdir()
7848 struct trace_array *tr; in instance_rmdir() local
7857 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in instance_rmdir()
7858 if (tr->name && strcmp(tr->name, name) == 0) { in instance_rmdir()
7867 if (tr->ref || (tr->current_trace && tr->current_trace->ref)) in instance_rmdir()
7870 list_del(&tr->list); in instance_rmdir()
7875 set_tracer_flag(tr, 1 << i, 0); in instance_rmdir()
7878 tracing_set_nop(tr); in instance_rmdir()
7879 clear_ftrace_function_probes(tr); in instance_rmdir()
7880 event_trace_del_tracer(tr); in instance_rmdir()
7881 ftrace_clear_pids(tr); in instance_rmdir()
7882 ftrace_destroy_function_files(tr); in instance_rmdir()
7883 tracefs_remove_recursive(tr->dir); in instance_rmdir()
7884 free_trace_buffers(tr); in instance_rmdir()
7886 for (i = 0; i < tr->nr_topts; i++) { in instance_rmdir()
7887 kfree(tr->topts[i].topts); in instance_rmdir()
7889 kfree(tr->topts); in instance_rmdir()
7891 free_cpumask_var(tr->tracing_cpumask); in instance_rmdir()
7892 kfree(tr->name); in instance_rmdir()
7893 kfree(tr); in instance_rmdir()
7914 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) in init_tracer_tracefs() argument
7920 tr, &show_traces_fops); in init_tracer_tracefs()
7923 tr, &set_tracer_fops); in init_tracer_tracefs()
7926 tr, &tracing_cpumask_fops); in init_tracer_tracefs()
7929 tr, &tracing_iter_fops); in init_tracer_tracefs()
7932 tr, &tracing_fops); in init_tracer_tracefs()
7935 tr, &tracing_pipe_fops); in init_tracer_tracefs()
7938 tr, &tracing_entries_fops); in init_tracer_tracefs()
7941 tr, &tracing_total_entries_fops); in init_tracer_tracefs()
7944 tr, &tracing_free_buffer_fops); in init_tracer_tracefs()
7947 tr, &tracing_mark_fops); in init_tracer_tracefs()
7949 file = __find_event_file(tr, "ftrace", "print"); in init_tracer_tracefs()
7953 tr->trace_marker_file = file; in init_tracer_tracefs()
7956 tr, &tracing_mark_raw_fops); in init_tracer_tracefs()
7958 trace_create_file("trace_clock", 0644, d_tracer, tr, in init_tracer_tracefs()
7962 tr, &rb_simple_fops); in init_tracer_tracefs()
7964 trace_create_file("timestamp_mode", 0444, d_tracer, tr, in init_tracer_tracefs()
7967 create_trace_options_dir(tr); in init_tracer_tracefs()
7971 &tr->max_latency, &tracing_max_lat_fops); in init_tracer_tracefs()
7974 if (ftrace_create_function_files(tr, d_tracer)) in init_tracer_tracefs()
7979 tr, &snapshot_fops); in init_tracer_tracefs()
7983 tracing_init_tracefs_percpu(tr, cpu); in init_tracer_tracefs()
7985 ftrace_init_tracefs(tr, d_tracer); in init_tracer_tracefs()
8019 struct trace_array *tr = &global_trace; in tracing_init_dentry() local
8022 if (tr->dir) in tracing_init_dentry()
8036 tr->dir = debugfs_create_automount("tracing", NULL, in tracing_init_dentry()
8038 if (!tr->dir) { in tracing_init_dentry()
8250 iter->tr = &global_trace; in trace_init_global_iter()
8251 iter->trace = iter->tr->current_trace; in trace_init_global_iter()
8263 if (trace_clocks[iter->tr->clock_id].in_ns) in trace_init_global_iter()
8272 struct trace_array *tr = &global_trace; in ftrace_dump() local
8303 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; in ftrace_dump()
8306 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; in ftrace_dump()
8369 tr->trace_flags |= old_userobj; in ftrace_dump()