Lines Matching refs:tr

103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)  in dummy_set_flag()  argument
182 int tracing_set_tracer(struct trace_array *tr, const char *buf);
183 static void ftrace_trace_userstack(struct trace_array *tr,
497 struct trace_array *tr; in trace_array_get() local
501 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_get()
502 if (tr == this_tr) { in trace_array_get()
503 tr->ref++; in trace_array_get()
539 int tracing_check_open_get_tr(struct trace_array *tr) in tracing_check_open_get_tr() argument
550 if (tr && trace_array_get(tr) < 0) in tracing_check_open_get_tr()
954 static inline void ftrace_trace_stack(struct trace_array *tr,
965 static inline void ftrace_trace_stack(struct trace_array *tr, in ftrace_trace_stack() argument
998 void tracer_tracing_on(struct trace_array *tr) in tracer_tracing_on() argument
1000 if (tr->array_buffer.buffer) in tracer_tracing_on()
1001 ring_buffer_record_on(tr->array_buffer.buffer); in tracer_tracing_on()
1010 tr->buffer_disabled = 0; in tracer_tracing_on()
1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip, in __trace_array_puts() argument
1054 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) in __trace_array_puts()
1057 if (unlikely(tracing_selftest_running && tr == &global_trace)) in __trace_array_puts()
1066 buffer = tr->array_buffer.buffer; in __trace_array_puts()
1088 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); in __trace_array_puts()
1151 static void tracing_snapshot_instance_cond(struct trace_array *tr, in tracing_snapshot_instance_cond() argument
1154 struct tracer *tracer = tr->current_trace; in tracing_snapshot_instance_cond()
1158 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); in tracing_snapshot_instance_cond()
1159 trace_array_puts(tr, "*** snapshot is being ignored ***\n"); in tracing_snapshot_instance_cond()
1163 if (!tr->allocated_snapshot) { in tracing_snapshot_instance_cond()
1164 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); in tracing_snapshot_instance_cond()
1165 trace_array_puts(tr, "*** stopping trace here! ***\n"); in tracing_snapshot_instance_cond()
1166 tracer_tracing_off(tr); in tracing_snapshot_instance_cond()
1172 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); in tracing_snapshot_instance_cond()
1173 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); in tracing_snapshot_instance_cond()
1178 update_max_tr(tr, current, smp_processor_id(), cond_data); in tracing_snapshot_instance_cond()
1182 void tracing_snapshot_instance(struct trace_array *tr) in tracing_snapshot_instance() argument
1184 tracing_snapshot_instance_cond(tr, NULL); in tracing_snapshot_instance()
1203 struct trace_array *tr = &global_trace; in tracing_snapshot() local
1205 tracing_snapshot_instance(tr); in tracing_snapshot()
1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) in tracing_snapshot_cond() argument
1224 tracing_snapshot_instance_cond(tr, cond_data); in tracing_snapshot_cond()
1242 void *tracing_cond_snapshot_data(struct trace_array *tr) in tracing_cond_snapshot_data() argument
1247 arch_spin_lock(&tr->max_lock); in tracing_cond_snapshot_data()
1249 if (tr->cond_snapshot) in tracing_cond_snapshot_data()
1250 cond_data = tr->cond_snapshot->cond_data; in tracing_cond_snapshot_data()
1252 arch_spin_unlock(&tr->max_lock); in tracing_cond_snapshot_data()
1263 int tracing_alloc_snapshot_instance(struct trace_array *tr) in tracing_alloc_snapshot_instance() argument
1267 if (!tr->allocated_snapshot) { in tracing_alloc_snapshot_instance()
1270 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_alloc_snapshot_instance()
1271 &tr->array_buffer, RING_BUFFER_ALL_CPUS); in tracing_alloc_snapshot_instance()
1275 tr->allocated_snapshot = true; in tracing_alloc_snapshot_instance()
1281 static void free_snapshot(struct trace_array *tr) in free_snapshot() argument
1288 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); in free_snapshot()
1289 set_buffer_entries(&tr->max_buffer, 1); in free_snapshot()
1290 tracing_reset_online_cpus(&tr->max_buffer); in free_snapshot()
1291 tr->allocated_snapshot = false; in free_snapshot()
1306 struct trace_array *tr = &global_trace; in tracing_alloc_snapshot() local
1309 ret = tracing_alloc_snapshot_instance(tr); in tracing_alloc_snapshot()
1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, in tracing_snapshot_cond_enable() argument
1367 ret = tracing_alloc_snapshot_instance(tr); in tracing_snapshot_cond_enable()
1371 if (tr->current_trace->use_max_tr) { in tracing_snapshot_cond_enable()
1384 if (tr->cond_snapshot) { in tracing_snapshot_cond_enable()
1390 arch_spin_lock(&tr->max_lock); in tracing_snapshot_cond_enable()
1391 tr->cond_snapshot = cond_snapshot; in tracing_snapshot_cond_enable()
1392 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_cond_enable()
1416 int tracing_snapshot_cond_disable(struct trace_array *tr) in tracing_snapshot_cond_disable() argument
1421 arch_spin_lock(&tr->max_lock); in tracing_snapshot_cond_disable()
1423 if (!tr->cond_snapshot) in tracing_snapshot_cond_disable()
1426 kfree(tr->cond_snapshot); in tracing_snapshot_cond_disable()
1427 tr->cond_snapshot = NULL; in tracing_snapshot_cond_disable()
1430 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_cond_disable()
1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) in tracing_snapshot_cond() argument
1459 void *tracing_cond_snapshot_data(struct trace_array *tr) in tracing_cond_snapshot_data() argument
1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) in tracing_snapshot_cond_enable() argument
1469 int tracing_snapshot_cond_disable(struct trace_array *tr) in tracing_snapshot_cond_disable() argument
1474 #define free_snapshot(tr) do { } while (0) argument
1477 void tracer_tracing_off(struct trace_array *tr) in tracer_tracing_off() argument
1479 if (tr->array_buffer.buffer) in tracer_tracing_off()
1480 ring_buffer_record_off(tr->array_buffer.buffer); in tracer_tracing_off()
1489 tr->buffer_disabled = 1; in tracer_tracing_off()
1523 bool tracer_tracing_is_on(struct trace_array *tr) in tracer_tracing_is_on() argument
1525 if (tr->array_buffer.buffer) in tracer_tracing_is_on()
1526 return ring_buffer_record_is_on(tr->array_buffer.buffer); in tracer_tracing_is_on()
1527 return !tr->buffer_disabled; in tracer_tracing_is_on()
1608 bool trace_clock_in_ns(struct trace_array *tr) in trace_clock_in_ns() argument
1610 if (trace_clocks[tr->clock_id].in_ns) in trace_clock_in_ns()
1756 struct trace_array *tr = container_of(work, struct trace_array, in latency_fsnotify_workfn() local
1758 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); in latency_fsnotify_workfn()
1763 struct trace_array *tr = container_of(iwork, struct trace_array, in latency_fsnotify_workfn_irq() local
1765 queue_work(fsnotify_wq, &tr->fsnotify_work); in latency_fsnotify_workfn_irq()
1768 static void trace_create_maxlat_file(struct trace_array *tr, in trace_create_maxlat_file() argument
1771 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); in trace_create_maxlat_file()
1772 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); in trace_create_maxlat_file()
1773 tr->d_max_latency = trace_create_file("tracing_max_latency", in trace_create_maxlat_file()
1775 d_tracer, tr, in trace_create_maxlat_file()
1792 void latency_fsnotify(struct trace_array *tr) in latency_fsnotify() argument
1801 irq_work_queue(&tr->fsnotify_irqwork); in latency_fsnotify()
1806 #define trace_create_maxlat_file(tr, d_tracer) \ argument
1808 d_tracer, tr, &tracing_max_lat_fops)
1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) in __update_max_tr() argument
1820 struct array_buffer *trace_buf = &tr->array_buffer; in __update_max_tr()
1821 struct array_buffer *max_buf = &tr->max_buffer; in __update_max_tr()
1828 max_data->saved_latency = tr->max_latency; in __update_max_tr()
1849 latency_fsnotify(tr); in __update_max_tr()
1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, in update_max_tr() argument
1866 if (tr->stop_count) in update_max_tr()
1871 if (!tr->allocated_snapshot) { in update_max_tr()
1873 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr()
1877 arch_spin_lock(&tr->max_lock); in update_max_tr()
1880 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) in update_max_tr()
1881 ring_buffer_record_on(tr->max_buffer.buffer); in update_max_tr()
1883 ring_buffer_record_off(tr->max_buffer.buffer); in update_max_tr()
1886 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { in update_max_tr()
1887 arch_spin_unlock(&tr->max_lock); in update_max_tr()
1891 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); in update_max_tr()
1893 __update_max_tr(tr, tsk, cpu); in update_max_tr()
1895 arch_spin_unlock(&tr->max_lock); in update_max_tr()
1907 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) in update_max_tr_single() argument
1911 if (tr->stop_count) in update_max_tr_single()
1915 if (!tr->allocated_snapshot) { in update_max_tr_single()
1917 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr_single()
1921 arch_spin_lock(&tr->max_lock); in update_max_tr_single()
1923 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); in update_max_tr_single()
1933 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, in update_max_tr_single()
1939 __update_max_tr(tr, tsk, cpu); in update_max_tr_single()
1940 arch_spin_unlock(&tr->max_lock); in update_max_tr_single()
1980 struct trace_array *tr = &global_trace; in run_tracer_selftest() local
1981 struct tracer *saved_tracer = tr->current_trace; in run_tracer_selftest()
2008 tracing_reset_online_cpus(&tr->array_buffer); in run_tracer_selftest()
2010 tr->current_trace = type; in run_tracer_selftest()
2016 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, in run_tracer_selftest()
2018 tr->allocated_snapshot = true; in run_tracer_selftest()
2024 ret = type->selftest(type, tr); in run_tracer_selftest()
2026 tr->current_trace = saved_tracer; in run_tracer_selftest()
2034 tracing_reset_online_cpus(&tr->array_buffer); in run_tracer_selftest()
2038 tr->allocated_snapshot = false; in run_tracer_selftest()
2042 ring_buffer_resize(tr->max_buffer.buffer, 1, in run_tracer_selftest()
2126 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2256 struct trace_array *tr; in tracing_reset_all_online_cpus_unlocked() local
2260 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in tracing_reset_all_online_cpus_unlocked()
2261 if (!tr->clear_trace) in tracing_reset_all_online_cpus_unlocked()
2263 tr->clear_trace = false; in tracing_reset_all_online_cpus_unlocked()
2264 tracing_reset_online_cpus(&tr->array_buffer); in tracing_reset_all_online_cpus_unlocked()
2266 tracing_reset_online_cpus(&tr->max_buffer); in tracing_reset_all_online_cpus_unlocked()
2405 static void tracing_start_tr(struct trace_array *tr) in tracing_start_tr() argument
2414 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_start_tr()
2417 raw_spin_lock_irqsave(&tr->start_lock, flags); in tracing_start_tr()
2419 if (--tr->stop_count) { in tracing_start_tr()
2420 if (tr->stop_count < 0) { in tracing_start_tr()
2423 tr->stop_count = 0; in tracing_start_tr()
2428 buffer = tr->array_buffer.buffer; in tracing_start_tr()
2433 raw_spin_unlock_irqrestore(&tr->start_lock, flags); in tracing_start_tr()
2470 static void tracing_stop_tr(struct trace_array *tr) in tracing_stop_tr() argument
2476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_stop_tr()
2479 raw_spin_lock_irqsave(&tr->start_lock, flags); in tracing_stop_tr()
2480 if (tr->stop_count++) in tracing_stop_tr()
2483 buffer = tr->array_buffer.buffer; in tracing_stop_tr()
2488 raw_spin_unlock_irqrestore(&tr->start_lock, flags); in tracing_stop_tr()
2860 struct trace_array *tr = trace_file->tr; in trace_event_buffer_lock_reserve() local
2863 *current_rb = tr->array_buffer.buffer; in trace_event_buffer_lock_reserve()
2865 if (!tr->no_filter_buffering_ref && in trace_event_buffer_lock_reserve()
3022 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, in trace_event_buffer_commit()
3041 void trace_buffer_unlock_commit_regs(struct trace_array *tr, in trace_buffer_unlock_commit_regs() argument
3055 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); in trace_buffer_unlock_commit_regs()
3056 ftrace_trace_userstack(tr, buffer, trace_ctx); in trace_buffer_unlock_commit_regs()
3070 trace_function(struct trace_array *tr, unsigned long ip, unsigned long in trace_function() argument
3074 struct trace_buffer *buffer = tr->array_buffer.buffer; in trace_function()
3181 static inline void ftrace_trace_stack(struct trace_array *tr, in ftrace_trace_stack() argument
3186 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) in ftrace_trace_stack()
3192 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, in __trace_stack() argument
3195 struct trace_buffer *buffer = tr->array_buffer.buffer; in __trace_stack()
3241 ftrace_trace_userstack(struct trace_array *tr, in ftrace_trace_userstack() argument
3248 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) in ftrace_trace_userstack()
3287 static void ftrace_trace_userstack(struct trace_array *tr, in ftrace_trace_userstack() argument
3304 void trace_last_func_repeats(struct trace_array *tr, in trace_last_func_repeats() argument
3308 struct trace_buffer *buffer = tr->array_buffer.buffer; in trace_last_func_repeats()
3451 struct trace_array *tr = &global_trace; in trace_vbprintk() local
3478 buffer = tr->array_buffer.buffer; in trace_vbprintk()
3491 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); in trace_vbprintk()
3564 int trace_array_vprintk(struct trace_array *tr, in trace_array_vprintk() argument
3567 if (tracing_selftest_running && tr == &global_trace) in trace_array_vprintk()
3570 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); in trace_array_vprintk()
3594 int trace_array_printk(struct trace_array *tr, in trace_array_printk() argument
3600 if (!tr) in trace_array_printk()
3604 if (tr == &global_trace) in trace_array_printk()
3607 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) in trace_array_printk()
3611 ret = trace_array_vprintk(tr, ip, fmt, ap); in trace_array_printk()
3625 int trace_array_init_printk(struct trace_array *tr) in trace_array_init_printk() argument
3627 if (!tr) in trace_array_init_printk()
3631 if (tr == &global_trace) in trace_array_init_printk()
3766 if (!iter->tr || iter->fmt == static_fmt_buf) in trace_iter_expand_format()
4025 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR) in trace_event_format()
4186 struct trace_array *tr = iter->tr; in s_start() local
4193 if (unlikely(tr->current_trace != iter->trace)) { in s_start()
4197 iter->trace = tr->current_trace; in s_start()
4294 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) in trace_total_entries_cpu() argument
4298 if (!tr) in trace_total_entries_cpu()
4299 tr = &global_trace; in trace_total_entries_cpu()
4301 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); in trace_total_entries_cpu()
4306 unsigned long trace_total_entries(struct trace_array *tr) in trace_total_entries() argument
4310 if (!tr) in trace_total_entries()
4311 tr = &global_trace; in trace_total_entries()
4313 get_total_entries(&tr->array_buffer, &total, &entries); in trace_total_entries()
4431 struct trace_array *tr = iter->tr; in test_cpu_buff_start() local
4433 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) in test_cpu_buff_start()
4457 struct trace_array *tr = iter->tr; in print_trace_fmt() local
4459 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); in print_trace_fmt()
4469 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_trace_fmt()
4480 if (tr->trace_flags & TRACE_ITER_FIELDS) in print_trace_fmt()
4492 struct trace_array *tr = iter->tr; in print_raw_fmt() local
4499 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) in print_raw_fmt()
4517 struct trace_array *tr = iter->tr; in print_hex_fmt() local
4525 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_hex_fmt()
4547 struct trace_array *tr = iter->tr; in print_bin_fmt() local
4554 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_bin_fmt()
4603 struct trace_array *tr = iter->tr; in print_trace_line() local
4604 unsigned long trace_flags = tr->trace_flags; in print_trace_line()
4654 struct trace_array *tr = iter->tr; in trace_latency_header() local
4663 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) in trace_latency_header()
4670 struct trace_array *tr = iter->tr; in trace_default_header() local
4671 unsigned long trace_flags = tr->trace_flags; in trace_default_header()
4731 if (iter->tr->allocated_snapshot) in print_snapshot_help()
4753 if (iter->tr) { in s_show()
4831 struct trace_array *tr = inode->i_private; in __tracing_open() local
4870 iter->trace = tr->current_trace; in __tracing_open()
4875 iter->tr = tr; in __tracing_open()
4879 if (tr->current_trace->print_max || snapshot) in __tracing_open()
4880 iter->array_buffer = &tr->max_buffer; in __tracing_open()
4883 iter->array_buffer = &tr->array_buffer; in __tracing_open()
4898 if (trace_clocks[tr->clock_id].in_ns) in __tracing_open()
4905 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) in __tracing_open()
4906 tracing_stop_tr(tr); in __tracing_open()
4964 struct trace_array *tr = inode->i_private; in tracing_open_generic_tr() local
4967 ret = tracing_check_open_get_tr(tr); in tracing_open_generic_tr()
4985 ret = tracing_check_open_get_tr(file->tr); in tracing_open_file_tr()
4998 trace_array_put(file->tr); in tracing_release_file_tr()
5011 struct trace_array *tr = inode->i_private; in tracing_release() local
5017 trace_array_put(tr); in tracing_release()
5033 if (!iter->snapshot && tr->stop_count) in tracing_release()
5035 tracing_start_tr(tr); in tracing_release()
5037 __trace_array_put(tr); in tracing_release()
5049 struct trace_array *tr = inode->i_private; in tracing_release_generic_tr() local
5051 trace_array_put(tr); in tracing_release_generic_tr()
5057 struct trace_array *tr = inode->i_private; in tracing_single_release_tr() local
5059 trace_array_put(tr); in tracing_single_release_tr()
5066 struct trace_array *tr = inode->i_private; in tracing_open() local
5070 ret = tracing_check_open_get_tr(tr); in tracing_open()
5077 struct array_buffer *trace_buf = &tr->array_buffer; in tracing_open()
5080 if (tr->current_trace->print_max) in tracing_open()
5081 trace_buf = &tr->max_buffer; in tracing_open()
5094 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open()
5099 trace_array_put(tr); in tracing_open()
5110 trace_ok_for_array(struct tracer *t, struct trace_array *tr) in trace_ok_for_array() argument
5112 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; in trace_ok_for_array()
5117 get_tracer_for_array(struct trace_array *tr, struct tracer *t) in get_tracer_for_array() argument
5119 while (t && !trace_ok_for_array(t, tr)) in get_tracer_for_array()
5128 struct trace_array *tr = m->private; in t_next() local
5134 t = get_tracer_for_array(tr, t->next); in t_next()
5141 struct trace_array *tr = m->private; in t_start() local
5147 t = get_tracer_for_array(tr, trace_types); in t_start()
5184 struct trace_array *tr = inode->i_private; in show_traces_open() local
5188 ret = tracing_check_open_get_tr(tr); in show_traces_open()
5194 trace_array_put(tr); in show_traces_open()
5199 m->private = tr; in show_traces_open()
5206 struct trace_array *tr = inode->i_private; in show_traces_release() local
5208 trace_array_put(tr); in show_traces_release()
5252 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_read() local
5257 cpumask_pr_args(tr->tracing_cpumask)) + 1; in tracing_cpumask_read()
5263 cpumask_pr_args(tr->tracing_cpumask)); in tracing_cpumask_read()
5276 int tracing_set_cpumask(struct trace_array *tr, in tracing_set_cpumask() argument
5281 if (!tr) in tracing_set_cpumask()
5285 arch_spin_lock(&tr->max_lock); in tracing_set_cpumask()
5291 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_set_cpumask()
5293 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask()
5294 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); in tracing_set_cpumask()
5296 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); in tracing_set_cpumask()
5299 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_set_cpumask()
5301 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask()
5302 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); in tracing_set_cpumask()
5304 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); in tracing_set_cpumask()
5308 arch_spin_unlock(&tr->max_lock); in tracing_set_cpumask()
5311 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); in tracing_set_cpumask()
5320 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_write() local
5331 err = tracing_set_cpumask(tr, tracing_cpumask_new); in tracing_cpumask_write()
5356 struct trace_array *tr = m->private; in tracing_trace_options_show() local
5361 tracer_flags = tr->current_trace->flags->val; in tracing_trace_options_show()
5362 trace_opts = tr->current_trace->flags->opts; in tracing_trace_options_show()
5365 if (tr->trace_flags & (1 << i)) in tracing_trace_options_show()
5382 static int __set_tracer_option(struct trace_array *tr, in __set_tracer_option() argument
5389 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); in __set_tracer_option()
5401 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) in set_tracer_option() argument
5403 struct tracer *trace = tr->current_trace; in set_tracer_option()
5412 return __set_tracer_option(tr, trace->flags, opts, neg); in set_tracer_option()
5427 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) in set_tracer_flag() argument
5436 if (!!(tr->trace_flags & mask) == !!enabled) in set_tracer_flag()
5440 if (tr->current_trace->flag_changed) in set_tracer_flag()
5441 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) in set_tracer_flag()
5445 tr->trace_flags |= mask; in set_tracer_flag()
5447 tr->trace_flags &= ~mask; in set_tracer_flag()
5467 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; in set_tracer_flag()
5475 trace_event_follow_fork(tr, enabled); in set_tracer_flag()
5478 ftrace_pid_follow_fork(tr, enabled); in set_tracer_flag()
5481 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); in set_tracer_flag()
5483 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); in set_tracer_flag()
5495 int trace_set_options(struct trace_array *tr, char *option) in trace_set_options() argument
5517 ret = set_tracer_option(tr, cmp, neg); in trace_set_options()
5519 ret = set_tracer_flag(tr, 1 << ret, !neg); in trace_set_options()
5559 struct trace_array *tr = m->private; in tracing_trace_options_write() local
5571 ret = trace_set_options(tr, buf); in tracing_trace_options_write()
5582 struct trace_array *tr = inode->i_private; in tracing_trace_options_open() local
5585 ret = tracing_check_open_get_tr(tr); in tracing_trace_options_open()
5591 trace_array_put(tr); in tracing_trace_options_open()
6305 struct trace_array *tr = filp->private_data; in tracing_set_trace_read() local
6310 r = sprintf(buf, "%s\n", tr->current_trace->name); in tracing_set_trace_read()
6316 int tracer_init(struct tracer *t, struct trace_array *tr) in tracer_init() argument
6318 tracing_reset_online_cpus(&tr->array_buffer); in tracer_init()
6319 return t->init(tr); in tracer_init()
6367 static int __tracing_resize_ring_buffer(struct trace_array *tr, in __tracing_resize_ring_buffer() argument
6380 if (!tr->array_buffer.buffer) in __tracing_resize_ring_buffer()
6383 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
6388 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || in __tracing_resize_ring_buffer()
6389 !tr->current_trace->use_max_tr) in __tracing_resize_ring_buffer()
6392 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
6394 int r = resize_buffer_duplicate_size(&tr->array_buffer, in __tracing_resize_ring_buffer()
6395 &tr->array_buffer, cpu); in __tracing_resize_ring_buffer()
6417 update_buffer_entries(&tr->max_buffer, cpu); in __tracing_resize_ring_buffer()
6422 update_buffer_entries(&tr->array_buffer, cpu); in __tracing_resize_ring_buffer()
6427 ssize_t tracing_resize_ring_buffer(struct trace_array *tr, in tracing_resize_ring_buffer() argument
6442 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); in tracing_resize_ring_buffer()
6479 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6485 static void tracing_set_nop(struct trace_array *tr) in tracing_set_nop() argument
6487 if (tr->current_trace == &nop_trace) in tracing_set_nop()
6490 tr->current_trace->enabled--; in tracing_set_nop()
6492 if (tr->current_trace->reset) in tracing_set_nop()
6493 tr->current_trace->reset(tr); in tracing_set_nop()
6495 tr->current_trace = &nop_trace; in tracing_set_nop()
6500 static void add_tracer_options(struct trace_array *tr, struct tracer *t) in add_tracer_options() argument
6503 if (!tr->dir) in add_tracer_options()
6510 create_trace_option_files(tr, t); in add_tracer_options()
6513 int tracing_set_tracer(struct trace_array *tr, const char *buf) in tracing_set_tracer() argument
6524 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, in tracing_set_tracer()
6539 if (t == tr->current_trace) in tracing_set_tracer()
6545 arch_spin_lock(&tr->max_lock); in tracing_set_tracer()
6546 if (tr->cond_snapshot) in tracing_set_tracer()
6548 arch_spin_unlock(&tr->max_lock); in tracing_set_tracer()
6562 if (!trace_ok_for_array(t, tr)) { in tracing_set_tracer()
6568 if (tr->trace_ref) { in tracing_set_tracer()
6575 tr->current_trace->enabled--; in tracing_set_tracer()
6577 if (tr->current_trace->reset) in tracing_set_tracer()
6578 tr->current_trace->reset(tr); in tracing_set_tracer()
6581 had_max_tr = tr->current_trace->use_max_tr; in tracing_set_tracer()
6584 tr->current_trace = &nop_trace; in tracing_set_tracer()
6595 free_snapshot(tr); in tracing_set_tracer()
6598 if (t->use_max_tr && !tr->allocated_snapshot) { in tracing_set_tracer()
6599 ret = tracing_alloc_snapshot_instance(tr); in tracing_set_tracer()
6604 tr->current_trace = &nop_trace; in tracing_set_tracer()
6608 ret = tracer_init(t, tr); in tracing_set_tracer()
6613 tr->current_trace = t; in tracing_set_tracer()
6614 tr->current_trace->enabled++; in tracing_set_tracer()
6615 trace_branch_enable(tr); in tracing_set_tracer()
6626 struct trace_array *tr = filp->private_data; in tracing_set_trace_write() local
6644 err = tracing_set_tracer(tr, name); in tracing_set_trace_write()
6694 struct trace_array *tr = filp->private_data; in tracing_thresh_write() local
6702 if (tr->current_trace->update_thresh) { in tracing_thresh_write()
6703 ret = tr->current_trace->update_thresh(tr); in tracing_thresh_write()
6721 struct trace_array *tr = filp->private_data; in tracing_max_lat_read() local
6723 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); in tracing_max_lat_read()
6730 struct trace_array *tr = filp->private_data; in tracing_max_lat_write() local
6732 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); in tracing_max_lat_write()
6737 static int open_pipe_on_cpu(struct trace_array *tr, int cpu) in open_pipe_on_cpu() argument
6740 if (cpumask_empty(tr->pipe_cpumask)) { in open_pipe_on_cpu()
6741 cpumask_setall(tr->pipe_cpumask); in open_pipe_on_cpu()
6744 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { in open_pipe_on_cpu()
6745 cpumask_set_cpu(cpu, tr->pipe_cpumask); in open_pipe_on_cpu()
6751 static void close_pipe_on_cpu(struct trace_array *tr, int cpu) in close_pipe_on_cpu() argument
6754 WARN_ON(!cpumask_full(tr->pipe_cpumask)); in close_pipe_on_cpu()
6755 cpumask_clear(tr->pipe_cpumask); in close_pipe_on_cpu()
6757 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); in close_pipe_on_cpu()
6758 cpumask_clear_cpu(cpu, tr->pipe_cpumask); in close_pipe_on_cpu()
6764 struct trace_array *tr = inode->i_private; in tracing_open_pipe() local
6769 ret = tracing_check_open_get_tr(tr); in tracing_open_pipe()
6775 ret = open_pipe_on_cpu(tr, cpu); in tracing_open_pipe()
6787 iter->trace = tr->current_trace; in tracing_open_pipe()
6797 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open_pipe()
6801 if (trace_clocks[tr->clock_id].in_ns) in tracing_open_pipe()
6804 iter->tr = tr; in tracing_open_pipe()
6805 iter->array_buffer = &tr->array_buffer; in tracing_open_pipe()
6815 tr->trace_ref++; in tracing_open_pipe()
6823 close_pipe_on_cpu(tr, cpu); in tracing_open_pipe()
6825 __trace_array_put(tr); in tracing_open_pipe()
6833 struct trace_array *tr = inode->i_private; in tracing_release_pipe() local
6837 tr->trace_ref--; in tracing_release_pipe()
6841 close_pipe_on_cpu(tr, iter->cpu_file); in tracing_release_pipe()
6847 trace_array_put(tr); in tracing_release_pipe()
6855 struct trace_array *tr = iter->tr; in trace_poll() local
6861 if (tr->trace_flags & TRACE_ITER_BLOCK) in trace_poll()
6868 filp, poll_table, iter->tr->buffer_percent); in trace_poll()
6900 if (!tracer_tracing_is_on(iter->tr) && iter->pos) in tracing_wait_pipe()
7169 struct trace_array *tr = inode->i_private; in tracing_entries_read() local
7187 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; in tracing_entries_read()
7188 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { in tracing_entries_read()
7204 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); in tracing_entries_read()
7217 struct trace_array *tr = inode->i_private; in tracing_entries_write() local
7231 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); in tracing_entries_write()
7244 struct trace_array *tr = filp->private_data; in tracing_total_entries_read() local
7251 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; in tracing_total_entries_read()
7281 struct trace_array *tr = inode->i_private; in tracing_free_buffer_release() local
7284 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) in tracing_free_buffer_release()
7285 tracer_tracing_off(tr); in tracing_free_buffer_release()
7287 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); in tracing_free_buffer_release()
7289 trace_array_put(tr); in tracing_free_buffer_release()
7298 struct trace_array *tr = filp->private_data; in tracing_mark_write() local
7314 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_write()
7328 buffer = tr->array_buffer.buffer; in tracing_mark_write()
7346 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { in tracing_mark_write()
7349 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); in tracing_mark_write()
7363 event_triggers_post_call(tr->trace_marker_file, tt); in tracing_mark_write()
7375 struct trace_array *tr = filp->private_data; in tracing_mark_raw_write() local
7388 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_raw_write()
7404 buffer = tr->array_buffer.buffer; in tracing_mark_raw_write()
7428 struct trace_array *tr = m->private; in tracing_clock_show() local
7434 i == tr->clock_id ? "[" : "", trace_clocks[i].name, in tracing_clock_show()
7435 i == tr->clock_id ? "]" : ""); in tracing_clock_show()
7441 int tracing_set_clock(struct trace_array *tr, const char *clockstr) in tracing_set_clock() argument
7454 tr->clock_id = i; in tracing_set_clock()
7456 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
7462 tracing_reset_online_cpus(&tr->array_buffer); in tracing_set_clock()
7465 if (tr->max_buffer.buffer) in tracing_set_clock()
7466 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
7467 tracing_reset_online_cpus(&tr->max_buffer); in tracing_set_clock()
7479 struct trace_array *tr = m->private; in tracing_clock_write() local
7494 ret = tracing_set_clock(tr, clockstr); in tracing_clock_write()
7505 struct trace_array *tr = inode->i_private; in tracing_clock_open() local
7508 ret = tracing_check_open_get_tr(tr); in tracing_clock_open()
7514 trace_array_put(tr); in tracing_clock_open()
7521 struct trace_array *tr = m->private; in tracing_time_stamp_mode_show() local
7525 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) in tracing_time_stamp_mode_show()
7537 struct trace_array *tr = inode->i_private; in tracing_time_stamp_mode_open() local
7540 ret = tracing_check_open_get_tr(tr); in tracing_time_stamp_mode_open()
7546 trace_array_put(tr); in tracing_time_stamp_mode_open()
7562 int tracing_set_filter_buffering(struct trace_array *tr, bool set) in tracing_set_filter_buffering() argument
7568 if (set && tr->no_filter_buffering_ref++) in tracing_set_filter_buffering()
7572 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) { in tracing_set_filter_buffering()
7577 --tr->no_filter_buffering_ref; in tracing_set_filter_buffering()
7595 struct trace_array *tr = inode->i_private; in tracing_snapshot_open() local
7600 ret = tracing_check_open_get_tr(tr); in tracing_snapshot_open()
7621 iter->tr = tr; in tracing_snapshot_open()
7622 iter->array_buffer = &tr->max_buffer; in tracing_snapshot_open()
7629 trace_array_put(tr); in tracing_snapshot_open()
7634 static void tracing_swap_cpu_buffer(void *tr) in tracing_swap_cpu_buffer() argument
7636 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); in tracing_swap_cpu_buffer()
7645 struct trace_array *tr = iter->tr; in tracing_snapshot_write() local
7659 if (tr->current_trace->use_max_tr) { in tracing_snapshot_write()
7665 arch_spin_lock(&tr->max_lock); in tracing_snapshot_write()
7666 if (tr->cond_snapshot) in tracing_snapshot_write()
7668 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_write()
7679 if (tr->allocated_snapshot) in tracing_snapshot_write()
7680 free_snapshot(tr); in tracing_snapshot_write()
7690 if (tr->allocated_snapshot) in tracing_snapshot_write()
7691 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_snapshot_write()
7692 &tr->array_buffer, iter->cpu_file); in tracing_snapshot_write()
7694 ret = tracing_alloc_snapshot_instance(tr); in tracing_snapshot_write()
7700 update_max_tr(tr, current, smp_processor_id(), NULL); in tracing_snapshot_write()
7704 (void *)tr, 1); in tracing_snapshot_write()
7708 if (tr->allocated_snapshot) { in tracing_snapshot_write()
7710 tracing_reset_online_cpus(&tr->max_buffer); in tracing_snapshot_write()
7712 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); in tracing_snapshot_write()
7769 info->iter.array_buffer = &info->iter.tr->max_buffer; in snapshot_raw_open()
8007 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr, in get_tracing_log_err() argument
8013 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { in get_tracing_log_err()
8016 tr->n_err_log_entries++; in get_tracing_log_err()
8023 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); in get_tracing_log_err()
8084 void tracing_log_err(struct trace_array *tr, in tracing_log_err() argument
8091 if (!tr) in tracing_log_err()
8092 tr = &global_trace; in tracing_log_err()
8097 err = get_tracing_log_err(tr, len); in tracing_log_err()
8111 list_add_tail(&err->list, &tr->err_log); in tracing_log_err()
8115 static void clear_tracing_err_log(struct trace_array *tr) in clear_tracing_err_log() argument
8120 list_for_each_entry_safe(err, next, &tr->err_log, list) { in clear_tracing_err_log()
8125 tr->n_err_log_entries = 0; in clear_tracing_err_log()
8131 struct trace_array *tr = m->private; in tracing_err_log_seq_start() local
8135 return seq_list_start(&tr->err_log, *pos); in tracing_err_log_seq_start()
8140 struct trace_array *tr = m->private; in tracing_err_log_seq_next() local
8142 return seq_list_next(v, &tr->err_log, pos); in tracing_err_log_seq_next()
8189 struct trace_array *tr = inode->i_private; in tracing_err_log_open() local
8192 ret = tracing_check_open_get_tr(tr); in tracing_err_log_open()
8198 clear_tracing_err_log(tr); in tracing_err_log_open()
8204 m->private = tr; in tracing_err_log_open()
8206 trace_array_put(tr); in tracing_err_log_open()
8221 struct trace_array *tr = inode->i_private; in tracing_err_log_release() local
8223 trace_array_put(tr); in tracing_err_log_release()
8241 struct trace_array *tr = inode->i_private; in tracing_buffers_open() local
8245 ret = tracing_check_open_get_tr(tr); in tracing_buffers_open()
8251 trace_array_put(tr); in tracing_buffers_open()
8257 info->iter.tr = tr; in tracing_buffers_open()
8259 info->iter.trace = tr->current_trace; in tracing_buffers_open()
8260 info->iter.array_buffer = &tr->array_buffer; in tracing_buffers_open()
8267 tr->trace_ref++; in tracing_buffers_open()
8273 trace_array_put(tr); in tracing_buffers_open()
8300 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_read()
8368 iter->tr->trace_ref--; in tracing_buffers_release()
8370 __trace_array_put(iter->tr); in tracing_buffers_release()
8464 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_splice_read()
8542 ret = wait_on_pipe(iter, iter->tr->buffer_percent); in tracing_buffers_splice_read()
8547 if (!tracer_tracing_is_on(iter->tr)) in tracing_buffers_splice_read()
8601 struct trace_array *tr = inode->i_private; in tracing_stats_read() local
8602 struct array_buffer *trace_buf = &tr->array_buffer; in tracing_stats_read()
8627 if (trace_clocks[tr->clock_id].in_ns) { in tracing_stats_read()
8702 struct trace_array *tr, struct ftrace_probe_ops *ops, in ftrace_snapshot() argument
8705 tracing_snapshot_instance(tr); in ftrace_snapshot()
8710 struct trace_array *tr, struct ftrace_probe_ops *ops, in ftrace_count_snapshot() argument
8727 tracing_snapshot_instance(tr); in ftrace_count_snapshot()
8753 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, in ftrace_snapshot_init() argument
8769 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, in ftrace_snapshot_free() argument
8797 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, in ftrace_trace_snapshot_callback() argument
8805 if (!tr) in ftrace_trace_snapshot_callback()
8815 return unregister_ftrace_function_probe_func(glob+1, tr, ops); in ftrace_trace_snapshot_callback()
8834 ret = tracing_alloc_snapshot_instance(tr); in ftrace_trace_snapshot_callback()
8838 ret = register_ftrace_function_probe(glob, tr, ops, count); in ftrace_trace_snapshot_callback()
8857 static struct dentry *tracing_get_dentry(struct trace_array *tr) in tracing_get_dentry() argument
8859 if (WARN_ON(!tr->dir)) in tracing_get_dentry()
8863 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_get_dentry()
8867 return tr->dir; in tracing_get_dentry()
8870 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) in tracing_dentry_percpu() argument
8874 if (tr->percpu_dir) in tracing_dentry_percpu()
8875 return tr->percpu_dir; in tracing_dentry_percpu()
8877 d_tracer = tracing_get_dentry(tr); in tracing_dentry_percpu()
8881 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); in tracing_dentry_percpu()
8883 MEM_FAIL(!tr->percpu_dir, in tracing_dentry_percpu()
8886 return tr->percpu_dir; in tracing_dentry_percpu()
8901 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) in tracing_init_tracefs_percpu() argument
8903 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); in tracing_init_tracefs_percpu()
8919 tr, cpu, &tracing_pipe_fops); in tracing_init_tracefs_percpu()
8923 tr, cpu, &tracing_fops); in tracing_init_tracefs_percpu()
8926 tr, cpu, &tracing_buffers_fops); in tracing_init_tracefs_percpu()
8929 tr, cpu, &tracing_stats_fops); in tracing_init_tracefs_percpu()
8932 tr, cpu, &tracing_entries_fops); in tracing_init_tracefs_percpu()
8936 tr, cpu, &snapshot_fops); in tracing_init_tracefs_percpu()
8939 tr, cpu, &snapshot_raw_fops); in tracing_init_tracefs_percpu()
8980 ret = __set_tracer_option(topt->tr, topt->flags, in trace_options_write()
8997 ret = tracing_check_open_get_tr(topt->tr); in tracing_open_options()
9009 trace_array_put(topt->tr); in tracing_release_options()
9059 struct trace_array *tr; in trace_options_core_read() local
9063 get_tr_index(tr_index, &tr, &index); in trace_options_core_read()
9065 if (tr->trace_flags & (1 << index)) in trace_options_core_read()
9078 struct trace_array *tr; in trace_options_core_write() local
9083 get_tr_index(tr_index, &tr, &index); in trace_options_core_write()
9094 ret = set_tracer_flag(tr, 1 << index, val); in trace_options_core_write()
9129 static struct dentry *trace_options_init_dentry(struct trace_array *tr) in trace_options_init_dentry() argument
9133 if (tr->options) in trace_options_init_dentry()
9134 return tr->options; in trace_options_init_dentry()
9136 d_tracer = tracing_get_dentry(tr); in trace_options_init_dentry()
9140 tr->options = tracefs_create_dir("options", d_tracer); in trace_options_init_dentry()
9141 if (!tr->options) { in trace_options_init_dentry()
9146 return tr->options; in trace_options_init_dentry()
9150 create_trace_option_file(struct trace_array *tr, in create_trace_option_file() argument
9157 t_options = trace_options_init_dentry(tr); in create_trace_option_file()
9163 topt->tr = tr; in create_trace_option_file()
9171 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) in create_trace_option_files() argument
9192 if (!trace_ok_for_array(tracer, tr)) in create_trace_option_files()
9195 for (i = 0; i < tr->nr_topts; i++) { in create_trace_option_files()
9197 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) in create_trace_option_files()
9210 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), in create_trace_option_files()
9217 tr->topts = tr_topts; in create_trace_option_files()
9218 tr->topts[tr->nr_topts].tracer = tracer; in create_trace_option_files()
9219 tr->topts[tr->nr_topts].topts = topts; in create_trace_option_files()
9220 tr->nr_topts++; in create_trace_option_files()
9223 create_trace_option_file(tr, &topts[cnt], flags, in create_trace_option_files()
9232 create_trace_option_core_file(struct trace_array *tr, in create_trace_option_core_file() argument
9237 t_options = trace_options_init_dentry(tr); in create_trace_option_core_file()
9242 (void *)&tr->trace_flags_index[index], in create_trace_option_core_file()
9246 static void create_trace_options_dir(struct trace_array *tr) in create_trace_options_dir() argument
9249 bool top_level = tr == &global_trace; in create_trace_options_dir()
9252 t_options = trace_options_init_dentry(tr); in create_trace_options_dir()
9259 create_trace_option_core_file(tr, trace_options[i], i); in create_trace_options_dir()
9267 struct trace_array *tr = filp->private_data; in rb_simple_read() local
9271 r = tracer_tracing_is_on(tr); in rb_simple_read()
9281 struct trace_array *tr = filp->private_data; in rb_simple_write() local
9282 struct trace_buffer *buffer = tr->array_buffer.buffer; in rb_simple_write()
9292 if (!!val == tracer_tracing_is_on(tr)) { in rb_simple_write()
9295 tracer_tracing_on(tr); in rb_simple_write()
9296 if (tr->current_trace->start) in rb_simple_write()
9297 tr->current_trace->start(tr); in rb_simple_write()
9299 tracer_tracing_off(tr); in rb_simple_write()
9300 if (tr->current_trace->stop) in rb_simple_write()
9301 tr->current_trace->stop(tr); in rb_simple_write()
9325 struct trace_array *tr = filp->private_data; in buffer_percent_read() local
9329 r = tr->buffer_percent; in buffer_percent_read()
9339 struct trace_array *tr = filp->private_data; in buffer_percent_write() local
9350 tr->buffer_percent = val; in buffer_percent_write()
9368 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9371 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) in allocate_trace_buffer() argument
9375 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; in allocate_trace_buffer()
9377 buf->tr = tr; in allocate_trace_buffer()
9391 set_buffer_entries(&tr->array_buffer, in allocate_trace_buffer()
9392 ring_buffer_size(tr->array_buffer.buffer, 0)); in allocate_trace_buffer()
9407 static int allocate_trace_buffers(struct trace_array *tr, int size) in allocate_trace_buffers() argument
9411 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); in allocate_trace_buffers()
9416 ret = allocate_trace_buffer(tr, &tr->max_buffer, in allocate_trace_buffers()
9419 free_trace_buffer(&tr->array_buffer); in allocate_trace_buffers()
9422 tr->allocated_snapshot = allocate_snapshot; in allocate_trace_buffers()
9430 static void free_trace_buffers(struct trace_array *tr) in free_trace_buffers() argument
9432 if (!tr) in free_trace_buffers()
9435 free_trace_buffer(&tr->array_buffer); in free_trace_buffers()
9438 free_trace_buffer(&tr->max_buffer); in free_trace_buffers()
9442 static void init_trace_flags_index(struct trace_array *tr) in init_trace_flags_index() argument
9448 tr->trace_flags_index[i] = i; in init_trace_flags_index()
9451 static void __update_tracer_options(struct trace_array *tr) in __update_tracer_options() argument
9456 add_tracer_options(tr, t); in __update_tracer_options()
9459 static void update_tracer_options(struct trace_array *tr) in update_tracer_options() argument
9463 __update_tracer_options(tr); in update_tracer_options()
9470 struct trace_array *tr, *found = NULL; in trace_array_find() local
9472 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_find()
9473 if (tr->name && strcmp(tr->name, instance) == 0) { in trace_array_find()
9474 found = tr; in trace_array_find()
9484 struct trace_array *tr; in trace_array_find_get() local
9487 tr = trace_array_find(instance); in trace_array_find_get()
9488 if (tr) in trace_array_find_get()
9489 tr->ref++; in trace_array_find_get()
9492 return tr; in trace_array_find_get()
9495 static int trace_array_create_dir(struct trace_array *tr) in trace_array_create_dir() argument
9499 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); in trace_array_create_dir()
9500 if (!tr->dir) in trace_array_create_dir()
9503 ret = event_trace_add_tracer(tr->dir, tr); in trace_array_create_dir()
9505 tracefs_remove(tr->dir); in trace_array_create_dir()
9509 init_tracer_tracefs(tr, tr->dir); in trace_array_create_dir()
9510 __update_tracer_options(tr); in trace_array_create_dir()
9517 struct trace_array *tr; in trace_array_create() local
9521 tr = kzalloc(sizeof(*tr), GFP_KERNEL); in trace_array_create()
9522 if (!tr) in trace_array_create()
9525 tr->name = kstrdup(name, GFP_KERNEL); in trace_array_create()
9526 if (!tr->name) in trace_array_create()
9529 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) in trace_array_create()
9532 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) in trace_array_create()
9535 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; in trace_array_create()
9537 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); in trace_array_create()
9539 raw_spin_lock_init(&tr->start_lock); in trace_array_create()
9541 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in trace_array_create()
9543 tr->current_trace = &nop_trace; in trace_array_create()
9545 INIT_LIST_HEAD(&tr->systems); in trace_array_create()
9546 INIT_LIST_HEAD(&tr->events); in trace_array_create()
9547 INIT_LIST_HEAD(&tr->hist_vars); in trace_array_create()
9548 INIT_LIST_HEAD(&tr->err_log); in trace_array_create()
9550 if (allocate_trace_buffers(tr, trace_buf_size) < 0) in trace_array_create()
9553 if (ftrace_allocate_ftrace_ops(tr) < 0) in trace_array_create()
9556 ftrace_init_trace_array(tr); in trace_array_create()
9558 init_trace_flags_index(tr); in trace_array_create()
9561 ret = trace_array_create_dir(tr); in trace_array_create()
9565 __trace_early_add_events(tr); in trace_array_create()
9567 list_add(&tr->list, &ftrace_trace_arrays); in trace_array_create()
9569 tr->ref++; in trace_array_create()
9571 return tr; in trace_array_create()
9574 ftrace_free_ftrace_ops(tr); in trace_array_create()
9575 free_trace_buffers(tr); in trace_array_create()
9576 free_cpumask_var(tr->pipe_cpumask); in trace_array_create()
9577 free_cpumask_var(tr->tracing_cpumask); in trace_array_create()
9578 kfree(tr->name); in trace_array_create()
9579 kfree(tr); in trace_array_create()
9586 struct trace_array *tr; in instance_mkdir() local
9596 tr = trace_array_create(name); in instance_mkdir()
9598 ret = PTR_ERR_OR_ZERO(tr); in instance_mkdir()
9624 struct trace_array *tr; in trace_array_get_by_name() local
9629 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_get_by_name()
9630 if (tr->name && strcmp(tr->name, name) == 0) in trace_array_get_by_name()
9634 tr = trace_array_create(name); in trace_array_get_by_name()
9636 if (IS_ERR(tr)) in trace_array_get_by_name()
9637 tr = NULL; in trace_array_get_by_name()
9639 if (tr) in trace_array_get_by_name()
9640 tr->ref++; in trace_array_get_by_name()
9644 return tr; in trace_array_get_by_name()
9648 static int __remove_instance(struct trace_array *tr) in __remove_instance() argument
9653 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) in __remove_instance()
9656 list_del(&tr->list); in __remove_instance()
9661 set_tracer_flag(tr, 1 << i, 0); in __remove_instance()
9664 tracing_set_nop(tr); in __remove_instance()
9665 clear_ftrace_function_probes(tr); in __remove_instance()
9666 event_trace_del_tracer(tr); in __remove_instance()
9667 ftrace_clear_pids(tr); in __remove_instance()
9668 ftrace_destroy_function_files(tr); in __remove_instance()
9669 tracefs_remove(tr->dir); in __remove_instance()
9670 free_percpu(tr->last_func_repeats); in __remove_instance()
9671 free_trace_buffers(tr); in __remove_instance()
9672 clear_tracing_err_log(tr); in __remove_instance()
9674 for (i = 0; i < tr->nr_topts; i++) { in __remove_instance()
9675 kfree(tr->topts[i].topts); in __remove_instance()
9677 kfree(tr->topts); in __remove_instance()
9679 free_cpumask_var(tr->pipe_cpumask); in __remove_instance()
9680 free_cpumask_var(tr->tracing_cpumask); in __remove_instance()
9681 kfree(tr->name); in __remove_instance()
9682 kfree(tr); in __remove_instance()
9689 struct trace_array *tr; in trace_array_destroy() local
9701 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in trace_array_destroy()
9702 if (tr == this_tr) { in trace_array_destroy()
9703 ret = __remove_instance(tr); in trace_array_destroy()
9717 struct trace_array *tr; in instance_rmdir() local
9724 tr = trace_array_find(name); in instance_rmdir()
9725 if (tr) in instance_rmdir()
9726 ret = __remove_instance(tr); in instance_rmdir()
9736 struct trace_array *tr; in create_trace_instances() local
9747 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in create_trace_instances()
9748 if (!tr->name) in create_trace_instances()
9750 if (MEM_FAIL(trace_array_create_dir(tr) < 0, in create_trace_instances()
9760 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) in init_tracer_tracefs() argument
9766 tr, &show_traces_fops); in init_tracer_tracefs()
9769 tr, &set_tracer_fops); in init_tracer_tracefs()
9772 tr, &tracing_cpumask_fops); in init_tracer_tracefs()
9775 tr, &tracing_iter_fops); in init_tracer_tracefs()
9778 tr, &tracing_fops); in init_tracer_tracefs()
9781 tr, &tracing_pipe_fops); in init_tracer_tracefs()
9784 tr, &tracing_entries_fops); in init_tracer_tracefs()
9787 tr, &tracing_total_entries_fops); in init_tracer_tracefs()
9790 tr, &tracing_free_buffer_fops); in init_tracer_tracefs()
9793 tr, &tracing_mark_fops); in init_tracer_tracefs()
9795 file = __find_event_file(tr, "ftrace", "print"); in init_tracer_tracefs()
9799 tr->trace_marker_file = file; in init_tracer_tracefs()
9802 tr, &tracing_mark_raw_fops); in init_tracer_tracefs()
9804 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, in init_tracer_tracefs()
9808 tr, &rb_simple_fops); in init_tracer_tracefs()
9810 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, in init_tracer_tracefs()
9813 tr->buffer_percent = 50; in init_tracer_tracefs()
9816 tr, &buffer_percent_fops); in init_tracer_tracefs()
9818 create_trace_options_dir(tr); in init_tracer_tracefs()
9821 trace_create_maxlat_file(tr, d_tracer); in init_tracer_tracefs()
9824 if (ftrace_create_function_files(tr, d_tracer)) in init_tracer_tracefs()
9829 tr, &snapshot_fops); in init_tracer_tracefs()
9833 tr, &tracing_err_log_fops); in init_tracer_tracefs()
9836 tracing_init_tracefs_percpu(tr, cpu); in init_tracer_tracefs()
9838 ftrace_init_tracefs(tr, d_tracer); in init_tracer_tracefs()
9872 struct trace_array *tr = &global_trace; in tracing_init_dentry() local
9880 if (tr->dir) in tracing_init_dentry()
9892 tr->dir = debugfs_create_automount("tracing", NULL, in tracing_init_dentry()
10148 iter->tr = &global_trace; in trace_init_global_iter()
10149 iter->trace = iter->tr->current_trace; in trace_init_global_iter()
10161 if (trace_clocks[iter->tr->clock_id].in_ns) in trace_init_global_iter()
10176 struct trace_array *tr = &global_trace; in ftrace_dump() local
10206 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; in ftrace_dump()
10209 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; in ftrace_dump()
10268 tr->trace_flags |= old_userobj; in ftrace_dump()
10388 struct trace_array *tr; in enable_instances() local
10404 tr = trace_array_get_by_name(tok); in enable_instances()
10405 if (!tr) { in enable_instances()
10410 trace_array_put(tr); in enable_instances()
10413 early_enable_events(tr, tok, true); in enable_instances()
10556 struct trace_array *tr; in ftrace_boot_snapshot() local
10561 list_for_each_entry(tr, &ftrace_trace_arrays, list) { in ftrace_boot_snapshot()
10562 if (!tr->allocated_snapshot) in ftrace_boot_snapshot()
10565 tracing_snapshot_instance(tr); in ftrace_boot_snapshot()
10566 trace_array_puts(tr, "** Boot snapshot taken **\n"); in ftrace_boot_snapshot()