Lines Matching refs:tr
86 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) in dummy_set_flag() argument
165 int tracing_set_tracer(struct trace_array *tr, const char *buf);
166 static void ftrace_trace_userstack(struct trace_array *tr,
422 struct trace_array *tr; in trace_array_get() local
426 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_get()
427 if (tr == this_tr) { in trace_array_get()
428 tr->ref++; in trace_array_get()
463 int tracing_check_open_get_tr(struct trace_array *tr) in tracing_check_open_get_tr() argument
474 if (tr && trace_array_get(tr) < 0) in tracing_check_open_get_tr()
899 static inline void ftrace_trace_stack(struct trace_array *tr,
910 static inline void ftrace_trace_stack(struct trace_array *tr, in ftrace_trace_stack() argument
943 void tracer_tracing_on(struct trace_array *tr) in tracer_tracing_on() argument
945 if (tr->array_buffer.buffer) in tracer_tracing_on()
946 ring_buffer_record_on(tr->array_buffer.buffer); in tracer_tracing_on()
955 tr->buffer_disabled = 0; in tracer_tracing_on()
1090 static void tracing_snapshot_instance_cond(struct trace_array *tr, in tracing_snapshot_instance_cond() argument
1093 struct tracer *tracer = tr->current_trace; in tracing_snapshot_instance_cond()
1102 if (!tr->allocated_snapshot) { in tracing_snapshot_instance_cond()
1117 update_max_tr(tr, current, smp_processor_id(), cond_data); in tracing_snapshot_instance_cond()
1121 void tracing_snapshot_instance(struct trace_array *tr) in tracing_snapshot_instance() argument
1123 tracing_snapshot_instance_cond(tr, NULL); in tracing_snapshot_instance()
1142 struct trace_array *tr = &global_trace; in tracing_snapshot() local
1144 tracing_snapshot_instance(tr); in tracing_snapshot()
1161 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) in tracing_snapshot_cond() argument
1163 tracing_snapshot_instance_cond(tr, cond_data); in tracing_snapshot_cond()
1181 void *tracing_cond_snapshot_data(struct trace_array *tr) in tracing_cond_snapshot_data() argument
1185 arch_spin_lock(&tr->max_lock); in tracing_cond_snapshot_data()
1187 if (tr->cond_snapshot) in tracing_cond_snapshot_data()
1188 cond_data = tr->cond_snapshot->cond_data; in tracing_cond_snapshot_data()
1190 arch_spin_unlock(&tr->max_lock); in tracing_cond_snapshot_data()
1200 int tracing_alloc_snapshot_instance(struct trace_array *tr) in tracing_alloc_snapshot_instance() argument
1204 if (!tr->allocated_snapshot) { in tracing_alloc_snapshot_instance()
1207 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_alloc_snapshot_instance()
1208 &tr->array_buffer, RING_BUFFER_ALL_CPUS); in tracing_alloc_snapshot_instance()
1212 tr->allocated_snapshot = true; in tracing_alloc_snapshot_instance()
1218 static void free_snapshot(struct trace_array *tr) in free_snapshot() argument
1225 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); in free_snapshot()
1226 set_buffer_entries(&tr->max_buffer, 1); in free_snapshot()
1227 tracing_reset_online_cpus(&tr->max_buffer); in free_snapshot()
1228 tr->allocated_snapshot = false; in free_snapshot()
1243 struct trace_array *tr = &global_trace; in tracing_alloc_snapshot() local
1246 ret = tracing_alloc_snapshot_instance(tr); in tracing_alloc_snapshot()
1289 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, in tracing_snapshot_cond_enable() argument
1304 ret = tracing_alloc_snapshot_instance(tr); in tracing_snapshot_cond_enable()
1308 if (tr->current_trace->use_max_tr) { in tracing_snapshot_cond_enable()
1321 if (tr->cond_snapshot) { in tracing_snapshot_cond_enable()
1326 arch_spin_lock(&tr->max_lock); in tracing_snapshot_cond_enable()
1327 tr->cond_snapshot = cond_snapshot; in tracing_snapshot_cond_enable()
1328 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_cond_enable()
1351 int tracing_snapshot_cond_disable(struct trace_array *tr) in tracing_snapshot_cond_disable() argument
1355 arch_spin_lock(&tr->max_lock); in tracing_snapshot_cond_disable()
1357 if (!tr->cond_snapshot) in tracing_snapshot_cond_disable()
1360 kfree(tr->cond_snapshot); in tracing_snapshot_cond_disable()
1361 tr->cond_snapshot = NULL; in tracing_snapshot_cond_disable()
1364 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_cond_disable()
1375 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) in tracing_snapshot_cond() argument
1392 void *tracing_cond_snapshot_data(struct trace_array *tr) in tracing_cond_snapshot_data() argument
1397 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) in tracing_snapshot_cond_enable() argument
1402 int tracing_snapshot_cond_disable(struct trace_array *tr) in tracing_snapshot_cond_disable() argument
1409 void tracer_tracing_off(struct trace_array *tr) in tracer_tracing_off() argument
1411 if (tr->array_buffer.buffer) in tracer_tracing_off()
1412 ring_buffer_record_off(tr->array_buffer.buffer); in tracer_tracing_off()
1421 tr->buffer_disabled = 1; in tracer_tracing_off()
1455 bool tracer_tracing_is_on(struct trace_array *tr) in tracer_tracing_is_on() argument
1457 if (tr->array_buffer.buffer) in tracer_tracing_is_on()
1458 return ring_buffer_record_is_on(tr->array_buffer.buffer); in tracer_tracing_is_on()
1459 return !tr->buffer_disabled; in tracer_tracing_is_on()
1537 bool trace_clock_in_ns(struct trace_array *tr) in trace_clock_in_ns() argument
1539 if (trace_clocks[tr->clock_id].in_ns) in trace_clock_in_ns()
1684 struct trace_array *tr = container_of(work, struct trace_array, in latency_fsnotify_workfn() local
1686 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); in latency_fsnotify_workfn()
1691 struct trace_array *tr = container_of(iwork, struct trace_array, in latency_fsnotify_workfn_irq() local
1693 queue_work(fsnotify_wq, &tr->fsnotify_work); in latency_fsnotify_workfn_irq()
1696 static void trace_create_maxlat_file(struct trace_array *tr, in trace_create_maxlat_file() argument
1699 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); in trace_create_maxlat_file()
1700 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); in trace_create_maxlat_file()
1701 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644, in trace_create_maxlat_file()
1702 d_tracer, &tr->max_latency, in trace_create_maxlat_file()
1719 void latency_fsnotify(struct trace_array *tr) in latency_fsnotify() argument
1728 irq_work_queue(&tr->fsnotify_irqwork); in latency_fsnotify()
1737 #define trace_create_maxlat_file(tr, d_tracer) \ argument
1739 &tr->max_latency, &tracing_max_lat_fops)
1750 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) in __update_max_tr() argument
1752 struct array_buffer *trace_buf = &tr->array_buffer; in __update_max_tr()
1753 struct array_buffer *max_buf = &tr->max_buffer; in __update_max_tr()
1760 max_data->saved_latency = tr->max_latency; in __update_max_tr()
1781 latency_fsnotify(tr); in __update_max_tr()
1795 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, in update_max_tr() argument
1798 if (tr->stop_count) in update_max_tr()
1803 if (!tr->allocated_snapshot) { in update_max_tr()
1805 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr()
1809 arch_spin_lock(&tr->max_lock); in update_max_tr()
1812 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) in update_max_tr()
1813 ring_buffer_record_on(tr->max_buffer.buffer); in update_max_tr()
1815 ring_buffer_record_off(tr->max_buffer.buffer); in update_max_tr()
1818 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) in update_max_tr()
1821 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); in update_max_tr()
1823 __update_max_tr(tr, tsk, cpu); in update_max_tr()
1826 arch_spin_unlock(&tr->max_lock); in update_max_tr()
1838 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) in update_max_tr_single() argument
1842 if (tr->stop_count) in update_max_tr_single()
1846 if (!tr->allocated_snapshot) { in update_max_tr_single()
1848 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr_single()
1852 arch_spin_lock(&tr->max_lock); in update_max_tr_single()
1854 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); in update_max_tr_single()
1863 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, in update_max_tr_single()
1869 __update_max_tr(tr, tsk, cpu); in update_max_tr_single()
1870 arch_spin_unlock(&tr->max_lock); in update_max_tr_single()
1909 struct trace_array *tr = &global_trace; in run_tracer_selftest() local
1910 struct tracer *saved_tracer = tr->current_trace; in run_tracer_selftest()
1931 tracing_reset_online_cpus(&tr->array_buffer); in run_tracer_selftest()
1933 tr->current_trace = type; in run_tracer_selftest()
1939 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, in run_tracer_selftest()
1941 tr->allocated_snapshot = true; in run_tracer_selftest()
1947 ret = type->selftest(type, tr); in run_tracer_selftest()
1949 tr->current_trace = saved_tracer; in run_tracer_selftest()
1957 tracing_reset_online_cpus(&tr->array_buffer); in run_tracer_selftest()
1961 tr->allocated_snapshot = false; in run_tracer_selftest()
1965 ring_buffer_resize(tr->max_buffer.buffer, 1, in run_tracer_selftest()
2027 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2164 struct trace_array *tr; in tracing_reset_all_online_cpus() local
2166 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in tracing_reset_all_online_cpus()
2167 if (!tr->clear_trace) in tracing_reset_all_online_cpus()
2169 tr->clear_trace = false; in tracing_reset_all_online_cpus()
2170 tracing_reset_online_cpus(&tr->array_buffer); in tracing_reset_all_online_cpus()
2172 tracing_reset_online_cpus(&tr->max_buffer); in tracing_reset_all_online_cpus()
2295 static void tracing_start_tr(struct trace_array *tr) in tracing_start_tr() argument
2304 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_start_tr()
2307 raw_spin_lock_irqsave(&tr->start_lock, flags); in tracing_start_tr()
2309 if (--tr->stop_count) { in tracing_start_tr()
2310 if (tr->stop_count < 0) { in tracing_start_tr()
2313 tr->stop_count = 0; in tracing_start_tr()
2318 buffer = tr->array_buffer.buffer; in tracing_start_tr()
2323 raw_spin_unlock_irqrestore(&tr->start_lock, flags); in tracing_start_tr()
2360 static void tracing_stop_tr(struct trace_array *tr) in tracing_stop_tr() argument
2366 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_stop_tr()
2369 raw_spin_lock_irqsave(&tr->start_lock, flags); in tracing_stop_tr()
2370 if (tr->stop_count++) in tracing_stop_tr()
2373 buffer = tr->array_buffer.buffer; in tracing_stop_tr()
2378 raw_spin_unlock_irqrestore(&tr->start_lock, flags); in tracing_stop_tr()
2734 *current_rb = trace_file->tr->array_buffer.buffer; in trace_event_buffer_lock_reserve()
2859 void trace_buffer_unlock_commit_regs(struct trace_array *tr, in trace_buffer_unlock_commit_regs() argument
2873 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); in trace_buffer_unlock_commit_regs()
2874 ftrace_trace_userstack(tr, buffer, flags, pc); in trace_buffer_unlock_commit_regs()
2888 trace_function(struct trace_array *tr, in trace_function() argument
2893 struct trace_buffer *buffer = tr->array_buffer.buffer; in trace_function()
2999 static inline void ftrace_trace_stack(struct trace_array *tr, in ftrace_trace_stack() argument
3004 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) in ftrace_trace_stack()
3010 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, in __trace_stack() argument
3013 struct trace_buffer *buffer = tr->array_buffer.buffer; in __trace_stack()
3060 ftrace_trace_userstack(struct trace_array *tr, in ftrace_trace_userstack() argument
3067 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) in ftrace_trace_userstack()
3106 static void ftrace_trace_userstack(struct trace_array *tr, in ftrace_trace_userstack() argument
3236 struct trace_array *tr = &global_trace; in trace_vbprintk() local
3264 buffer = tr->array_buffer.buffer; in trace_vbprintk()
3277 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); in trace_vbprintk()
3351 int trace_array_vprintk(struct trace_array *tr, in trace_array_vprintk() argument
3354 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); in trace_array_vprintk()
3378 int trace_array_printk(struct trace_array *tr, in trace_array_printk() argument
3384 if (!tr) in trace_array_printk()
3388 if (tr == &global_trace) in trace_array_printk()
3391 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) in trace_array_printk()
3395 ret = trace_array_vprintk(tr, ip, fmt, ap); in trace_array_printk()
3409 int trace_array_init_printk(struct trace_array *tr) in trace_array_init_printk() argument
3411 if (!tr) in trace_array_init_printk()
3415 if (tr == &global_trace) in trace_array_init_printk()
3669 struct trace_array *tr = iter->tr; in s_start() local
3682 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) in s_start()
3683 *iter->trace = *tr->current_trace; in s_start()
3782 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) in trace_total_entries_cpu() argument
3786 if (!tr) in trace_total_entries_cpu()
3787 tr = &global_trace; in trace_total_entries_cpu()
3789 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); in trace_total_entries_cpu()
3794 unsigned long trace_total_entries(struct trace_array *tr) in trace_total_entries() argument
3798 if (!tr) in trace_total_entries()
3799 tr = &global_trace; in trace_total_entries()
3801 get_total_entries(&tr->array_buffer, &total, &entries); in trace_total_entries()
3925 struct trace_array *tr = iter->tr; in test_cpu_buff_start() local
3927 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) in test_cpu_buff_start()
3951 struct trace_array *tr = iter->tr; in print_trace_fmt() local
3953 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); in print_trace_fmt()
3963 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_trace_fmt()
3983 struct trace_array *tr = iter->tr; in print_raw_fmt() local
3990 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) in print_raw_fmt()
4008 struct trace_array *tr = iter->tr; in print_hex_fmt() local
4016 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_hex_fmt()
4038 struct trace_array *tr = iter->tr; in print_bin_fmt() local
4045 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_bin_fmt()
4094 struct trace_array *tr = iter->tr; in print_trace_line() local
4095 unsigned long trace_flags = tr->trace_flags; in print_trace_line()
4145 struct trace_array *tr = iter->tr; in trace_latency_header() local
4154 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) in trace_latency_header()
4161 struct trace_array *tr = iter->tr; in trace_default_header() local
4162 unsigned long trace_flags = tr->trace_flags; in trace_default_header()
4222 if (iter->tr->allocated_snapshot) in print_snapshot_help()
4244 if (iter->tr) { in s_show()
4303 struct trace_array *tr = inode->i_private; in __tracing_open() local
4340 *iter->trace = *tr->current_trace; in __tracing_open()
4345 iter->tr = tr; in __tracing_open()
4349 if (tr->current_trace->print_max || snapshot) in __tracing_open()
4350 iter->array_buffer = &tr->max_buffer; in __tracing_open()
4353 iter->array_buffer = &tr->array_buffer; in __tracing_open()
4368 if (trace_clocks[tr->clock_id].in_ns) in __tracing_open()
4375 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) in __tracing_open()
4376 tracing_stop_tr(tr); in __tracing_open()
4436 struct trace_array *tr = inode->i_private; in tracing_open_generic_tr() local
4439 ret = tracing_check_open_get_tr(tr); in tracing_open_generic_tr()
4450 struct trace_array *tr = inode->i_private; in tracing_release() local
4456 trace_array_put(tr); in tracing_release()
4472 if (!iter->snapshot && tr->stop_count) in tracing_release()
4474 tracing_start_tr(tr); in tracing_release()
4476 __trace_array_put(tr); in tracing_release()
4492 struct trace_array *tr = inode->i_private; in tracing_release_generic_tr() local
4494 trace_array_put(tr); in tracing_release_generic_tr()
4500 struct trace_array *tr = inode->i_private; in tracing_single_release_tr() local
4502 trace_array_put(tr); in tracing_single_release_tr()
4509 struct trace_array *tr = inode->i_private; in tracing_open() local
4513 ret = tracing_check_open_get_tr(tr); in tracing_open()
4520 struct array_buffer *trace_buf = &tr->array_buffer; in tracing_open()
4523 if (tr->current_trace->print_max) in tracing_open()
4524 trace_buf = &tr->max_buffer; in tracing_open()
4537 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open()
4542 trace_array_put(tr); in tracing_open()
4553 trace_ok_for_array(struct tracer *t, struct trace_array *tr) in trace_ok_for_array() argument
4555 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; in trace_ok_for_array()
4560 get_tracer_for_array(struct trace_array *tr, struct tracer *t) in get_tracer_for_array() argument
4562 while (t && !trace_ok_for_array(t, tr)) in get_tracer_for_array()
4571 struct trace_array *tr = m->private; in t_next() local
4577 t = get_tracer_for_array(tr, t->next); in t_next()
4584 struct trace_array *tr = m->private; in t_start() local
4590 t = get_tracer_for_array(tr, trace_types); in t_start()
4627 struct trace_array *tr = inode->i_private; in show_traces_open() local
4631 ret = tracing_check_open_get_tr(tr); in show_traces_open()
4637 trace_array_put(tr); in show_traces_open()
4642 m->private = tr; in show_traces_open()
4649 struct trace_array *tr = inode->i_private; in show_traces_release() local
4651 trace_array_put(tr); in show_traces_release()
4693 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_read() local
4698 cpumask_pr_args(tr->tracing_cpumask)) + 1; in tracing_cpumask_read()
4704 cpumask_pr_args(tr->tracing_cpumask)); in tracing_cpumask_read()
4717 int tracing_set_cpumask(struct trace_array *tr, in tracing_set_cpumask() argument
4722 if (!tr) in tracing_set_cpumask()
4726 arch_spin_lock(&tr->max_lock); in tracing_set_cpumask()
4732 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_set_cpumask()
4734 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask()
4735 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); in tracing_set_cpumask()
4737 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_set_cpumask()
4739 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask()
4740 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); in tracing_set_cpumask()
4743 arch_spin_unlock(&tr->max_lock); in tracing_set_cpumask()
4746 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); in tracing_set_cpumask()
4755 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_write() local
4766 err = tracing_set_cpumask(tr, tracing_cpumask_new); in tracing_cpumask_write()
4791 struct trace_array *tr = m->private; in tracing_trace_options_show() local
4796 tracer_flags = tr->current_trace->flags->val; in tracing_trace_options_show()
4797 trace_opts = tr->current_trace->flags->opts; in tracing_trace_options_show()
4800 if (tr->trace_flags & (1 << i)) in tracing_trace_options_show()
4817 static int __set_tracer_option(struct trace_array *tr, in __set_tracer_option() argument
4824 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); in __set_tracer_option()
4836 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) in set_tracer_option() argument
4838 struct tracer *trace = tr->current_trace; in set_tracer_option()
4847 return __set_tracer_option(tr, trace->flags, opts, neg); in set_tracer_option()
4862 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) in set_tracer_flag() argument
4869 if (!!(tr->trace_flags & mask) == !!enabled) in set_tracer_flag()
4873 if (tr->current_trace->flag_changed) in set_tracer_flag()
4874 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) in set_tracer_flag()
4878 tr->trace_flags |= mask; in set_tracer_flag()
4880 tr->trace_flags &= ~mask; in set_tracer_flag()
4891 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; in set_tracer_flag()
4899 trace_event_follow_fork(tr, enabled); in set_tracer_flag()
4902 ftrace_pid_follow_fork(tr, enabled); in set_tracer_flag()
4905 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); in set_tracer_flag()
4907 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); in set_tracer_flag()
4919 int trace_set_options(struct trace_array *tr, char *option) in trace_set_options() argument
4941 ret = set_tracer_option(tr, cmp, neg); in trace_set_options()
4943 ret = set_tracer_flag(tr, 1 << ret, !neg); in trace_set_options()
4983 struct trace_array *tr = m->private; in tracing_trace_options_write() local
4995 ret = trace_set_options(tr, buf); in tracing_trace_options_write()
5006 struct trace_array *tr = inode->i_private; in tracing_trace_options_open() local
5009 ret = tracing_check_open_get_tr(tr); in tracing_trace_options_open()
5015 trace_array_put(tr); in tracing_trace_options_open()
5705 struct trace_array *tr = filp->private_data; in tracing_set_trace_read() local
5710 r = sprintf(buf, "%s\n", tr->current_trace->name); in tracing_set_trace_read()
5716 int tracer_init(struct tracer *t, struct trace_array *tr) in tracer_init() argument
5718 tracing_reset_online_cpus(&tr->array_buffer); in tracer_init()
5719 return t->init(tr); in tracer_init()
5758 static int __tracing_resize_ring_buffer(struct trace_array *tr, in __tracing_resize_ring_buffer() argument
5771 if (!tr->array_buffer.buffer) in __tracing_resize_ring_buffer()
5774 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
5779 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || in __tracing_resize_ring_buffer()
5780 !tr->current_trace->use_max_tr) in __tracing_resize_ring_buffer()
5783 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
5785 int r = resize_buffer_duplicate_size(&tr->array_buffer, in __tracing_resize_ring_buffer()
5786 &tr->array_buffer, cpu); in __tracing_resize_ring_buffer()
5809 set_buffer_entries(&tr->max_buffer, size); in __tracing_resize_ring_buffer()
5811 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; in __tracing_resize_ring_buffer()
5817 set_buffer_entries(&tr->array_buffer, size); in __tracing_resize_ring_buffer()
5819 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size; in __tracing_resize_ring_buffer()
5824 ssize_t tracing_resize_ring_buffer(struct trace_array *tr, in tracing_resize_ring_buffer() argument
5839 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); in tracing_resize_ring_buffer()
5876 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5882 static void tracing_set_nop(struct trace_array *tr) in tracing_set_nop() argument
5884 if (tr->current_trace == &nop_trace) in tracing_set_nop()
5887 tr->current_trace->enabled--; in tracing_set_nop()
5889 if (tr->current_trace->reset) in tracing_set_nop()
5890 tr->current_trace->reset(tr); in tracing_set_nop()
5892 tr->current_trace = &nop_trace; in tracing_set_nop()
5895 static void add_tracer_options(struct trace_array *tr, struct tracer *t) in add_tracer_options() argument
5898 if (!tr->dir) in add_tracer_options()
5901 create_trace_option_files(tr, t); in add_tracer_options()
5904 int tracing_set_tracer(struct trace_array *tr, const char *buf) in tracing_set_tracer() argument
5915 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, in tracing_set_tracer()
5930 if (t == tr->current_trace) in tracing_set_tracer()
5935 arch_spin_lock(&tr->max_lock); in tracing_set_tracer()
5936 if (tr->cond_snapshot) in tracing_set_tracer()
5938 arch_spin_unlock(&tr->max_lock); in tracing_set_tracer()
5951 if (!trace_ok_for_array(t, tr)) { in tracing_set_tracer()
5957 if (tr->trace_ref) { in tracing_set_tracer()
5964 tr->current_trace->enabled--; in tracing_set_tracer()
5966 if (tr->current_trace->reset) in tracing_set_tracer()
5967 tr->current_trace->reset(tr); in tracing_set_tracer()
5970 tr->current_trace = &nop_trace; in tracing_set_tracer()
5973 had_max_tr = tr->allocated_snapshot; in tracing_set_tracer()
5984 free_snapshot(tr); in tracing_set_tracer()
5990 ret = tracing_alloc_snapshot_instance(tr); in tracing_set_tracer()
5997 ret = tracer_init(t, tr); in tracing_set_tracer()
6002 tr->current_trace = t; in tracing_set_tracer()
6003 tr->current_trace->enabled++; in tracing_set_tracer()
6004 trace_branch_enable(tr); in tracing_set_tracer()
6015 struct trace_array *tr = filp->private_data; in tracing_set_trace_write() local
6035 err = tracing_set_tracer(tr, buf); in tracing_set_trace_write()
6085 struct trace_array *tr = filp->private_data; in tracing_thresh_write() local
6093 if (tr->current_trace->update_thresh) { in tracing_thresh_write()
6094 ret = tr->current_trace->update_thresh(tr); in tracing_thresh_write()
6126 struct trace_array *tr = inode->i_private; in tracing_open_pipe() local
6130 ret = tracing_check_open_get_tr(tr); in tracing_open_pipe()
6140 __trace_array_put(tr); in tracing_open_pipe()
6145 iter->trace = tr->current_trace; in tracing_open_pipe()
6155 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open_pipe()
6159 if (trace_clocks[tr->clock_id].in_ns) in tracing_open_pipe()
6162 iter->tr = tr; in tracing_open_pipe()
6163 iter->array_buffer = &tr->array_buffer; in tracing_open_pipe()
6173 tr->trace_ref++; in tracing_open_pipe()
6180 __trace_array_put(tr); in tracing_open_pipe()
6188 struct trace_array *tr = inode->i_private; in tracing_release_pipe() local
6192 tr->trace_ref--; in tracing_release_pipe()
6203 trace_array_put(tr); in tracing_release_pipe()
6211 struct trace_array *tr = iter->tr; in trace_poll() local
6217 if (tr->trace_flags & TRACE_ITER_BLOCK) in trace_poll()
6256 if (!tracer_tracing_is_on(iter->tr) && iter->pos) in tracing_wait_pipe()
6515 struct trace_array *tr = inode->i_private; in tracing_entries_read() local
6533 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; in tracing_entries_read()
6534 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { in tracing_entries_read()
6550 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); in tracing_entries_read()
6563 struct trace_array *tr = inode->i_private; in tracing_entries_write() local
6577 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); in tracing_entries_write()
6590 struct trace_array *tr = filp->private_data; in tracing_total_entries_read() local
6597 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; in tracing_total_entries_read()
6627 struct trace_array *tr = inode->i_private; in tracing_free_buffer_release() local
6630 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) in tracing_free_buffer_release()
6631 tracer_tracing_off(tr); in tracing_free_buffer_release()
6633 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); in tracing_free_buffer_release()
6635 trace_array_put(tr); in tracing_free_buffer_release()
6644 struct trace_array *tr = filp->private_data; in tracing_mark_write() local
6661 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_write()
6676 buffer = tr->array_buffer.buffer; in tracing_mark_write()
6694 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { in tracing_mark_write()
6697 tt = event_triggers_call(tr->trace_marker_file, entry, event); in tracing_mark_write()
6711 event_triggers_post_call(tr->trace_marker_file, tt); in tracing_mark_write()
6726 struct trace_array *tr = filp->private_data; in tracing_mark_raw_write() local
6740 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_raw_write()
6757 buffer = tr->array_buffer.buffer; in tracing_mark_raw_write()
6784 struct trace_array *tr = m->private; in tracing_clock_show() local
6790 i == tr->clock_id ? "[" : "", trace_clocks[i].name, in tracing_clock_show()
6791 i == tr->clock_id ? "]" : ""); in tracing_clock_show()
6797 int tracing_set_clock(struct trace_array *tr, const char *clockstr) in tracing_set_clock() argument
6810 tr->clock_id = i; in tracing_set_clock()
6812 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
6818 tracing_reset_online_cpus(&tr->array_buffer); in tracing_set_clock()
6821 if (tr->max_buffer.buffer) in tracing_set_clock()
6822 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
6823 tracing_reset_online_cpus(&tr->max_buffer); in tracing_set_clock()
6835 struct trace_array *tr = m->private; in tracing_clock_write() local
6850 ret = tracing_set_clock(tr, clockstr); in tracing_clock_write()
6861 struct trace_array *tr = inode->i_private; in tracing_clock_open() local
6864 ret = tracing_check_open_get_tr(tr); in tracing_clock_open()
6870 trace_array_put(tr); in tracing_clock_open()
6877 struct trace_array *tr = m->private; in tracing_time_stamp_mode_show() local
6881 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) in tracing_time_stamp_mode_show()
6893 struct trace_array *tr = inode->i_private; in tracing_time_stamp_mode_open() local
6896 ret = tracing_check_open_get_tr(tr); in tracing_time_stamp_mode_open()
6902 trace_array_put(tr); in tracing_time_stamp_mode_open()
6907 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) in tracing_set_time_stamp_abs() argument
6913 if (abs && tr->time_stamp_abs_ref++) in tracing_set_time_stamp_abs()
6917 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { in tracing_set_time_stamp_abs()
6922 if (--tr->time_stamp_abs_ref) in tracing_set_time_stamp_abs()
6926 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs); in tracing_set_time_stamp_abs()
6929 if (tr->max_buffer.buffer) in tracing_set_time_stamp_abs()
6930 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); in tracing_set_time_stamp_abs()
6948 struct trace_array *tr = inode->i_private; in tracing_snapshot_open() local
6953 ret = tracing_check_open_get_tr(tr); in tracing_snapshot_open()
6974 iter->tr = tr; in tracing_snapshot_open()
6975 iter->array_buffer = &tr->max_buffer; in tracing_snapshot_open()
6982 trace_array_put(tr); in tracing_snapshot_open()
6993 struct trace_array *tr = iter->tr; in tracing_snapshot_write() local
7007 if (tr->current_trace->use_max_tr) { in tracing_snapshot_write()
7012 arch_spin_lock(&tr->max_lock); in tracing_snapshot_write()
7013 if (tr->cond_snapshot) in tracing_snapshot_write()
7015 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_write()
7025 if (tr->allocated_snapshot) in tracing_snapshot_write()
7026 free_snapshot(tr); in tracing_snapshot_write()
7036 if (tr->allocated_snapshot) in tracing_snapshot_write()
7037 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_snapshot_write()
7038 &tr->array_buffer, iter->cpu_file); in tracing_snapshot_write()
7040 ret = tracing_alloc_snapshot_instance(tr); in tracing_snapshot_write()
7046 update_max_tr(tr, current, smp_processor_id(), NULL); in tracing_snapshot_write()
7048 update_max_tr_single(tr, current, iter->cpu_file); in tracing_snapshot_write()
7052 if (tr->allocated_snapshot) { in tracing_snapshot_write()
7054 tracing_reset_online_cpus(&tr->max_buffer); in tracing_snapshot_write()
7056 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); in tracing_snapshot_write()
7113 info->iter.array_buffer = &info->iter.tr->max_buffer; in snapshot_raw_open()
7243 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) in get_tracing_log_err() argument
7247 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { in get_tracing_log_err()
7251 tr->n_err_log_entries++; in get_tracing_log_err()
7256 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); in get_tracing_log_err()
7315 void tracing_log_err(struct trace_array *tr, in tracing_log_err() argument
7321 if (!tr) in tracing_log_err()
7322 tr = &global_trace; in tracing_log_err()
7325 err = get_tracing_log_err(tr); in tracing_log_err()
7339 list_add_tail(&err->list, &tr->err_log); in tracing_log_err()
7343 static void clear_tracing_err_log(struct trace_array *tr) in clear_tracing_err_log() argument
7348 list_for_each_entry_safe(err, next, &tr->err_log, list) { in clear_tracing_err_log()
7353 tr->n_err_log_entries = 0; in clear_tracing_err_log()
7359 struct trace_array *tr = m->private; in tracing_err_log_seq_start() local
7363 return seq_list_start(&tr->err_log, *pos); in tracing_err_log_seq_start()
7368 struct trace_array *tr = m->private; in tracing_err_log_seq_next() local
7370 return seq_list_next(v, &tr->err_log, pos); in tracing_err_log_seq_next()
7417 struct trace_array *tr = inode->i_private; in tracing_err_log_open() local
7420 ret = tracing_check_open_get_tr(tr); in tracing_err_log_open()
7426 clear_tracing_err_log(tr); in tracing_err_log_open()
7432 m->private = tr; in tracing_err_log_open()
7434 trace_array_put(tr); in tracing_err_log_open()
7449 struct trace_array *tr = inode->i_private; in tracing_err_log_release() local
7451 trace_array_put(tr); in tracing_err_log_release()
7469 struct trace_array *tr = inode->i_private; in tracing_buffers_open() local
7473 ret = tracing_check_open_get_tr(tr); in tracing_buffers_open()
7479 trace_array_put(tr); in tracing_buffers_open()
7485 info->iter.tr = tr; in tracing_buffers_open()
7487 info->iter.trace = tr->current_trace; in tracing_buffers_open()
7488 info->iter.array_buffer = &tr->array_buffer; in tracing_buffers_open()
7495 tr->trace_ref++; in tracing_buffers_open()
7501 trace_array_put(tr); in tracing_buffers_open()
7528 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_read()
7596 iter->tr->trace_ref--; in tracing_buffers_release()
7598 __trace_array_put(iter->tr); in tracing_buffers_release()
7686 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_splice_read()
7760 ret = wait_on_pipe(iter, iter->tr->buffer_percent); in tracing_buffers_splice_read()
7788 struct trace_array *tr = inode->i_private; in tracing_stats_read() local
7789 struct array_buffer *trace_buf = &tr->array_buffer; in tracing_stats_read()
7814 if (trace_clocks[tr->clock_id].in_ns) { in tracing_stats_read()
7889 struct trace_array *tr, struct ftrace_probe_ops *ops, in ftrace_snapshot() argument
7892 tracing_snapshot_instance(tr); in ftrace_snapshot()
7897 struct trace_array *tr, struct ftrace_probe_ops *ops, in ftrace_count_snapshot() argument
7914 tracing_snapshot_instance(tr); in ftrace_count_snapshot()
7940 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, in ftrace_snapshot_init() argument
7956 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, in ftrace_snapshot_free() argument
7984 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, in ftrace_trace_snapshot_callback() argument
7992 if (!tr) in ftrace_trace_snapshot_callback()
8002 return unregister_ftrace_function_probe_func(glob+1, tr, ops); in ftrace_trace_snapshot_callback()
8021 ret = tracing_alloc_snapshot_instance(tr); in ftrace_trace_snapshot_callback()
8025 ret = register_ftrace_function_probe(glob, tr, ops, count); in ftrace_trace_snapshot_callback()
8044 static struct dentry *tracing_get_dentry(struct trace_array *tr) in tracing_get_dentry() argument
8046 if (WARN_ON(!tr->dir)) in tracing_get_dentry()
8050 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_get_dentry()
8054 return tr->dir; in tracing_get_dentry()
8057 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) in tracing_dentry_percpu() argument
8061 if (tr->percpu_dir) in tracing_dentry_percpu()
8062 return tr->percpu_dir; in tracing_dentry_percpu()
8064 d_tracer = tracing_get_dentry(tr); in tracing_dentry_percpu()
8068 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); in tracing_dentry_percpu()
8070 MEM_FAIL(!tr->percpu_dir, in tracing_dentry_percpu()
8073 return tr->percpu_dir; in tracing_dentry_percpu()
8088 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) in tracing_init_tracefs_percpu() argument
8090 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); in tracing_init_tracefs_percpu()
8106 tr, cpu, &tracing_pipe_fops); in tracing_init_tracefs_percpu()
8110 tr, cpu, &tracing_fops); in tracing_init_tracefs_percpu()
8113 tr, cpu, &tracing_buffers_fops); in tracing_init_tracefs_percpu()
8116 tr, cpu, &tracing_stats_fops); in tracing_init_tracefs_percpu()
8119 tr, cpu, &tracing_entries_fops); in tracing_init_tracefs_percpu()
8123 tr, cpu, &snapshot_fops); in tracing_init_tracefs_percpu()
8126 tr, cpu, &snapshot_raw_fops); in tracing_init_tracefs_percpu()
8167 ret = __set_tracer_option(topt->tr, topt->flags, in trace_options_write()
8225 struct trace_array *tr; in trace_options_core_read() local
8229 get_tr_index(tr_index, &tr, &index); in trace_options_core_read()
8231 if (tr->trace_flags & (1 << index)) in trace_options_core_read()
8244 struct trace_array *tr; in trace_options_core_write() local
8249 get_tr_index(tr_index, &tr, &index); in trace_options_core_write()
8260 ret = set_tracer_flag(tr, 1 << index, val); in trace_options_core_write()
8295 static struct dentry *trace_options_init_dentry(struct trace_array *tr) in trace_options_init_dentry() argument
8299 if (tr->options) in trace_options_init_dentry()
8300 return tr->options; in trace_options_init_dentry()
8302 d_tracer = tracing_get_dentry(tr); in trace_options_init_dentry()
8306 tr->options = tracefs_create_dir("options", d_tracer); in trace_options_init_dentry()
8307 if (!tr->options) { in trace_options_init_dentry()
8312 return tr->options; in trace_options_init_dentry()
8316 create_trace_option_file(struct trace_array *tr, in create_trace_option_file() argument
8323 t_options = trace_options_init_dentry(tr); in create_trace_option_file()
8329 topt->tr = tr; in create_trace_option_file()
8337 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) in create_trace_option_files() argument
8358 if (!trace_ok_for_array(tracer, tr)) in create_trace_option_files()
8361 for (i = 0; i < tr->nr_topts; i++) { in create_trace_option_files()
8363 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) in create_trace_option_files()
8376 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), in create_trace_option_files()
8383 tr->topts = tr_topts; in create_trace_option_files()
8384 tr->topts[tr->nr_topts].tracer = tracer; in create_trace_option_files()
8385 tr->topts[tr->nr_topts].topts = topts; in create_trace_option_files()
8386 tr->nr_topts++; in create_trace_option_files()
8389 create_trace_option_file(tr, &topts[cnt], flags, in create_trace_option_files()
8398 create_trace_option_core_file(struct trace_array *tr, in create_trace_option_core_file() argument
8403 t_options = trace_options_init_dentry(tr); in create_trace_option_core_file()
8408 (void *)&tr->trace_flags_index[index], in create_trace_option_core_file()
8412 static void create_trace_options_dir(struct trace_array *tr) in create_trace_options_dir() argument
8415 bool top_level = tr == &global_trace; in create_trace_options_dir()
8418 t_options = trace_options_init_dentry(tr); in create_trace_options_dir()
8425 create_trace_option_core_file(tr, trace_options[i], i); in create_trace_options_dir()
8433 struct trace_array *tr = filp->private_data; in rb_simple_read() local
8437 r = tracer_tracing_is_on(tr); in rb_simple_read()
8447 struct trace_array *tr = filp->private_data; in rb_simple_write() local
8448 struct trace_buffer *buffer = tr->array_buffer.buffer; in rb_simple_write()
8458 if (!!val == tracer_tracing_is_on(tr)) { in rb_simple_write()
8461 tracer_tracing_on(tr); in rb_simple_write()
8462 if (tr->current_trace->start) in rb_simple_write()
8463 tr->current_trace->start(tr); in rb_simple_write()
8465 tracer_tracing_off(tr); in rb_simple_write()
8466 if (tr->current_trace->stop) in rb_simple_write()
8467 tr->current_trace->stop(tr); in rb_simple_write()
8489 struct trace_array *tr = filp->private_data; in buffer_percent_read() local
8493 r = tr->buffer_percent; in buffer_percent_read()
8503 struct trace_array *tr = filp->private_data; in buffer_percent_write() local
8517 tr->buffer_percent = val; in buffer_percent_write()
8535 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8538 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) in allocate_trace_buffer() argument
8542 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; in allocate_trace_buffer()
8544 buf->tr = tr; in allocate_trace_buffer()
8558 set_buffer_entries(&tr->array_buffer, in allocate_trace_buffer()
8559 ring_buffer_size(tr->array_buffer.buffer, 0)); in allocate_trace_buffer()
8564 static int allocate_trace_buffers(struct trace_array *tr, int size) in allocate_trace_buffers() argument
8568 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); in allocate_trace_buffers()
8573 ret = allocate_trace_buffer(tr, &tr->max_buffer, in allocate_trace_buffers()
8576 ring_buffer_free(tr->array_buffer.buffer); in allocate_trace_buffers()
8577 tr->array_buffer.buffer = NULL; in allocate_trace_buffers()
8578 free_percpu(tr->array_buffer.data); in allocate_trace_buffers()
8579 tr->array_buffer.data = NULL; in allocate_trace_buffers()
8582 tr->allocated_snapshot = allocate_snapshot; in allocate_trace_buffers()
8604 static void free_trace_buffers(struct trace_array *tr) in free_trace_buffers() argument
8606 if (!tr) in free_trace_buffers()
8609 free_trace_buffer(&tr->array_buffer); in free_trace_buffers()
8612 free_trace_buffer(&tr->max_buffer); in free_trace_buffers()
8616 static void init_trace_flags_index(struct trace_array *tr) in init_trace_flags_index() argument
8622 tr->trace_flags_index[i] = i; in init_trace_flags_index()
8625 static void __update_tracer_options(struct trace_array *tr) in __update_tracer_options() argument
8630 add_tracer_options(tr, t); in __update_tracer_options()
8633 static void update_tracer_options(struct trace_array *tr) in update_tracer_options() argument
8636 __update_tracer_options(tr); in update_tracer_options()
8643 struct trace_array *tr, *found = NULL; in trace_array_find() local
8645 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_find()
8646 if (tr->name && strcmp(tr->name, instance) == 0) { in trace_array_find()
8647 found = tr; in trace_array_find()
8657 struct trace_array *tr; in trace_array_find_get() local
8660 tr = trace_array_find(instance); in trace_array_find_get()
8661 if (tr) in trace_array_find_get()
8662 tr->ref++; in trace_array_find_get()
8665 return tr; in trace_array_find_get()
8668 static int trace_array_create_dir(struct trace_array *tr) in trace_array_create_dir() argument
8672 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); in trace_array_create_dir()
8673 if (!tr->dir) in trace_array_create_dir()
8676 ret = event_trace_add_tracer(tr->dir, tr); in trace_array_create_dir()
8678 tracefs_remove(tr->dir); in trace_array_create_dir()
8680 init_tracer_tracefs(tr, tr->dir); in trace_array_create_dir()
8681 __update_tracer_options(tr); in trace_array_create_dir()
8688 struct trace_array *tr; in trace_array_create() local
8692 tr = kzalloc(sizeof(*tr), GFP_KERNEL); in trace_array_create()
8693 if (!tr) in trace_array_create()
8696 tr->name = kstrdup(name, GFP_KERNEL); in trace_array_create()
8697 if (!tr->name) in trace_array_create()
8700 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) in trace_array_create()
8703 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; in trace_array_create()
8705 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); in trace_array_create()
8707 raw_spin_lock_init(&tr->start_lock); in trace_array_create()
8709 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in trace_array_create()
8711 tr->current_trace = &nop_trace; in trace_array_create()
8713 INIT_LIST_HEAD(&tr->systems); in trace_array_create()
8714 INIT_LIST_HEAD(&tr->events); in trace_array_create()
8715 INIT_LIST_HEAD(&tr->hist_vars); in trace_array_create()
8716 INIT_LIST_HEAD(&tr->err_log); in trace_array_create()
8718 if (allocate_trace_buffers(tr, trace_buf_size) < 0) in trace_array_create()
8721 if (ftrace_allocate_ftrace_ops(tr) < 0) in trace_array_create()
8724 ftrace_init_trace_array(tr); in trace_array_create()
8726 init_trace_flags_index(tr); in trace_array_create()
8729 ret = trace_array_create_dir(tr); in trace_array_create()
8733 __trace_early_add_events(tr); in trace_array_create()
8735 list_add(&tr->list, &ftrace_trace_arrays); in trace_array_create()
8737 tr->ref++; in trace_array_create()
8739 return tr; in trace_array_create()
8742 ftrace_free_ftrace_ops(tr); in trace_array_create()
8743 free_trace_buffers(tr); in trace_array_create()
8744 free_cpumask_var(tr->tracing_cpumask); in trace_array_create()
8745 kfree(tr->name); in trace_array_create()
8746 kfree(tr); in trace_array_create()
8753 struct trace_array *tr; in instance_mkdir() local
8763 tr = trace_array_create(name); in instance_mkdir()
8765 ret = PTR_ERR_OR_ZERO(tr); in instance_mkdir()
8791 struct trace_array *tr; in trace_array_get_by_name() local
8796 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_get_by_name()
8797 if (tr->name && strcmp(tr->name, name) == 0) in trace_array_get_by_name()
8801 tr = trace_array_create(name); in trace_array_get_by_name()
8803 if (IS_ERR(tr)) in trace_array_get_by_name()
8804 tr = NULL; in trace_array_get_by_name()
8806 if (tr) in trace_array_get_by_name()
8807 tr->ref++; in trace_array_get_by_name()
8811 return tr; in trace_array_get_by_name()
8815 static int __remove_instance(struct trace_array *tr) in __remove_instance() argument
8820 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) in __remove_instance()
8823 list_del(&tr->list); in __remove_instance()
8828 set_tracer_flag(tr, 1 << i, 0); in __remove_instance()
8831 tracing_set_nop(tr); in __remove_instance()
8832 clear_ftrace_function_probes(tr); in __remove_instance()
8833 event_trace_del_tracer(tr); in __remove_instance()
8834 ftrace_clear_pids(tr); in __remove_instance()
8835 ftrace_destroy_function_files(tr); in __remove_instance()
8836 tracefs_remove(tr->dir); in __remove_instance()
8837 free_trace_buffers(tr); in __remove_instance()
8839 for (i = 0; i < tr->nr_topts; i++) { in __remove_instance()
8840 kfree(tr->topts[i].topts); in __remove_instance()
8842 kfree(tr->topts); in __remove_instance()
8844 free_cpumask_var(tr->tracing_cpumask); in __remove_instance()
8845 kfree(tr->name); in __remove_instance()
8846 kfree(tr); in __remove_instance()
8853 struct trace_array *tr; in trace_array_destroy() local
8865 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_destroy()
8866 if (tr == this_tr) { in trace_array_destroy()
8867 ret = __remove_instance(tr); in trace_array_destroy()
8881 struct trace_array *tr; in instance_rmdir() local
8888 tr = trace_array_find(name); in instance_rmdir()
8889 if (tr) in instance_rmdir()
8890 ret = __remove_instance(tr); in instance_rmdir()
8900 struct trace_array *tr; in create_trace_instances() local
8911 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in create_trace_instances()
8912 if (!tr->name) in create_trace_instances()
8914 if (MEM_FAIL(trace_array_create_dir(tr) < 0, in create_trace_instances()
8924 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) in init_tracer_tracefs() argument
8930 tr, &show_traces_fops); in init_tracer_tracefs()
8933 tr, &set_tracer_fops); in init_tracer_tracefs()
8936 tr, &tracing_cpumask_fops); in init_tracer_tracefs()
8939 tr, &tracing_iter_fops); in init_tracer_tracefs()
8942 tr, &tracing_fops); in init_tracer_tracefs()
8945 tr, &tracing_pipe_fops); in init_tracer_tracefs()
8948 tr, &tracing_entries_fops); in init_tracer_tracefs()
8951 tr, &tracing_total_entries_fops); in init_tracer_tracefs()
8954 tr, &tracing_free_buffer_fops); in init_tracer_tracefs()
8957 tr, &tracing_mark_fops); in init_tracer_tracefs()
8959 file = __find_event_file(tr, "ftrace", "print"); in init_tracer_tracefs()
8963 tr->trace_marker_file = file; in init_tracer_tracefs()
8966 tr, &tracing_mark_raw_fops); in init_tracer_tracefs()
8968 trace_create_file("trace_clock", 0644, d_tracer, tr, in init_tracer_tracefs()
8972 tr, &rb_simple_fops); in init_tracer_tracefs()
8974 trace_create_file("timestamp_mode", 0444, d_tracer, tr, in init_tracer_tracefs()
8977 tr->buffer_percent = 50; in init_tracer_tracefs()
8980 tr, &buffer_percent_fops); in init_tracer_tracefs()
8982 create_trace_options_dir(tr); in init_tracer_tracefs()
8985 trace_create_maxlat_file(tr, d_tracer); in init_tracer_tracefs()
8988 if (ftrace_create_function_files(tr, d_tracer)) in init_tracer_tracefs()
8993 tr, &snapshot_fops); in init_tracer_tracefs()
8997 tr, &tracing_err_log_fops); in init_tracer_tracefs()
9000 tracing_init_tracefs_percpu(tr, cpu); in init_tracer_tracefs()
9002 ftrace_init_tracefs(tr, d_tracer); in init_tracer_tracefs()
9036 struct trace_array *tr = &global_trace; in tracing_init_dentry() local
9044 if (tr->dir) in tracing_init_dentry()
9056 tr->dir = debugfs_create_automount("tracing", NULL, in tracing_init_dentry()
9266 iter->tr = &global_trace; in trace_init_global_iter()
9267 iter->trace = iter->tr->current_trace; in trace_init_global_iter()
9279 if (trace_clocks[iter->tr->clock_id].in_ns) in trace_init_global_iter()
9288 struct trace_array *tr = &global_trace; in ftrace_dump() local
9322 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; in ftrace_dump()
9325 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; in ftrace_dump()
9384 tr->trace_flags |= old_userobj; in ftrace_dump()