/Linux-v5.4/tools/testing/selftests/powerpc/pmu/ebb/ |
D | trace.h | 24 struct trace_buffer struct 32 struct trace_buffer *trace_buffer_allocate(u64 size); argument 33 int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value); 34 int trace_log_counter(struct trace_buffer *tb, u64 value); 35 int trace_log_string(struct trace_buffer *tb, char *str); 36 int trace_log_indent(struct trace_buffer *tb); 37 int trace_log_outdent(struct trace_buffer *tb); 38 void trace_buffer_print(struct trace_buffer *tb); 39 void trace_print_location(struct trace_buffer *tb);
|
D | trace.c | 15 struct trace_buffer *trace_buffer_allocate(u64 size) in trace_buffer_allocate() 17 struct trace_buffer *tb; in trace_buffer_allocate() 38 static bool trace_check_bounds(struct trace_buffer *tb, void *p) in trace_check_bounds() 43 static bool trace_check_alloc(struct trace_buffer *tb, void *p) in trace_check_alloc() 62 static void *trace_alloc(struct trace_buffer *tb, int bytes) in trace_alloc() 76 static struct trace_entry *trace_alloc_entry(struct trace_buffer *tb, int payload_size) in trace_alloc_entry() 87 int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value) in trace_log_reg() 104 int trace_log_counter(struct trace_buffer *tb, u64 value) in trace_log_counter() 120 int trace_log_string(struct trace_buffer *tb, char *str) in trace_log_string() 142 int trace_log_indent(struct trace_buffer *tb) in trace_log_indent() [all …]
|
D | ebb.h | 29 struct trace_buffer *trace;
|
/Linux-v5.4/arch/powerpc/oprofile/cell/ |
D | spu_profiler.c | 76 u64 trace_buffer[2]; in spu_pc_extract() local 90 cbe_read_trace_buffer(cpu, trace_buffer); in spu_pc_extract() 97 = (spu_mask & trace_buffer[0]) << 2; in spu_pc_extract() 99 = (spu_mask & trace_buffer[1]) << 2; in spu_pc_extract() 101 trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF; in spu_pc_extract() 102 trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF; in spu_pc_extract()
|
/Linux-v5.4/kernel/trace/ |
D | trace.c | 591 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) in buffer_ftrace_now() 607 return buffer_ftrace_now(&global_trace.trace_buffer, cpu); in ftrace_now() 784 if (tr->trace_buffer.buffer) in tracer_tracing_on() 785 ring_buffer_record_on(tr->trace_buffer.buffer); in tracer_tracing_on() 853 buffer = global_trace.trace_buffer.buffer; in __trace_puts() 901 buffer = global_trace.trace_buffer.buffer; in __trace_bputs() 1024 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 1025 struct trace_buffer *size_buf, int cpu_id); 1026 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); 1036 &tr->trace_buffer, RING_BUFFER_ALL_CPUS); in tracing_alloc_snapshot_instance() [all …]
|
D | trace_selftest.c | 26 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu) in trace_test_buffer_cpu() 63 static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count) in trace_test_buffer() 365 ret = trace_test_buffer(&tr->trace_buffer, &count); in trace_selftest_startup_dynamic_tracing() 386 ret = trace_test_buffer(&tr->trace_buffer, &count); in trace_selftest_startup_dynamic_tracing() 685 ret = trace_test_buffer(&tr->trace_buffer, &count); in trace_selftest_startup_function() 771 tracing_reset_online_cpus(&tr->trace_buffer); in trace_selftest_startup_function_graph() 793 ret = trace_test_buffer(&tr->trace_buffer, &count); in trace_selftest_startup_function_graph() 851 ret = trace_test_buffer(&tr->trace_buffer, NULL); in trace_selftest_startup_irqsoff() 913 ret = trace_test_buffer(&tr->trace_buffer, NULL); in trace_selftest_startup_preemptoff() 979 ret = trace_test_buffer(&tr->trace_buffer, NULL); in trace_selftest_startup_preemptirqsoff() [all …]
|
D | trace_kdb.c | 46 ring_buffer_read_prepare(iter.trace_buffer->buffer, in ftrace_dump_buf() 54 ring_buffer_read_prepare(iter.trace_buffer->buffer, in ftrace_dump_buf() 127 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in kdb_ftdump() 142 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in kdb_ftdump()
|
D | trace_sched_wakeup.c | 85 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_preempt_disable() 381 struct ring_buffer *buffer = tr->trace_buffer.buffer; in tracing_sched_switch_trace() 411 struct ring_buffer *buffer = tr->trace_buffer.buffer; in tracing_sched_wakeup_trace() 462 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch() 474 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); in probe_wakeup_sched_switch() 497 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch() 516 tracing_reset_online_cpus(&tr->trace_buffer); in wakeup_reset() 554 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup() 586 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); in probe_wakeup() 601 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup()
|
D | trace_mmiotrace.c | 35 tracing_reset_online_cpus(&tr->trace_buffer); in mmio_reset_data() 125 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); in count_overruns() 300 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_mmiotrace_rw() 321 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_rw() 330 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_mmiotrace_map() 354 data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_mapping()
|
D | trace_irqsoff.c | 125 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_dec() 170 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); in irqsoff_display_graph() 385 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in start_critical_timing() 423 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in stop_critical_timing()
|
D | trace_functions.c | 104 tr->trace_buffer.cpu = get_cpu(); in function_trace_init() 121 tracing_reset_online_cpus(&tr->trace_buffer); in function_trace_start() 146 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_trace_call() 195 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_stack_trace_call()
|
D | trace_functions_graph.c | 104 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_graph_entry() 174 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_entry() 224 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_graph_return() 255 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_return() 447 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, in get_return_for_leaf() 449 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, in get_return_for_leaf() 506 usecs = iter->ts - iter->trace_buffer->time_start; in print_graph_rel_time()
|
D | trace_branch.c | 58 data = this_cpu_ptr(tr->trace_buffer.data); in probe_likely_condition() 63 buffer = tr->trace_buffer.buffer; in probe_likely_condition()
|
D | trace.h | 176 struct trace_buffer { struct 249 struct trace_buffer trace_buffer; member 262 struct trace_buffer max_buffer; 681 void tracing_reset_online_cpus(struct trace_buffer *buf); 1042 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); in ftrace_trace_task()
|
D | trace_events.c | 240 data = this_cpu_ptr(tr->trace_buffer.data); in trace_event_ignore_this_pid() 549 this_cpu_write(tr->trace_buffer.data->ignore_pid, in event_filter_pid_sched_switch_probe_pre() 563 this_cpu_write(tr->trace_buffer.data->ignore_pid, in event_filter_pid_sched_switch_probe_post() 574 if (!this_cpu_read(tr->trace_buffer.data->ignore_pid)) in event_filter_pid_sched_wakeup_probe_pre() 579 this_cpu_write(tr->trace_buffer.data->ignore_pid, in event_filter_pid_sched_wakeup_probe_pre() 590 if (this_cpu_read(tr->trace_buffer.data->ignore_pid)) in event_filter_pid_sched_wakeup_probe_post() 596 this_cpu_write(tr->trace_buffer.data->ignore_pid, in event_filter_pid_sched_wakeup_probe_post() 628 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; in __ftrace_clear_event_pids() 1570 this_cpu_write(tr->trace_buffer.data->ignore_pid, in ignore_task_cpu()
|
D | trace_syscalls.c | 343 buffer = tr->trace_buffer.buffer; in ftrace_syscall_enter() 389 buffer = tr->trace_buffer.buffer; in ftrace_syscall_exit()
|
D | trace_hwlat.c | 107 struct ring_buffer *buffer = tr->trace_buffer.buffer; in trace_hwlat_sample()
|
D | blktrace.c | 79 buffer = blk_tr->trace_buffer.buffer; in trace_note() 252 buffer = blk_tr->trace_buffer.buffer; in __blk_add_trace()
|
D | ftrace.c | 149 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) in ftrace_pid_func() 6424 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, in ftrace_filter_pid_sched_switch_probe() 6478 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; in clear_ftrace_pids() 6602 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, in ignore_task_cpu()
|
D | trace_output.c | 526 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; in lat_print_timestamp()
|
D | trace_events_hist.c | 898 buffer = trace_file->tr->trace_buffer.buffer; in trace_event_raw_event_synth()
|
/Linux-v5.4/include/linux/ |
D | trace_events.h | 14 struct trace_buffer; 77 struct trace_buffer *trace_buffer; member
|
/Linux-v5.4/arch/powerpc/oprofile/ |
D | op_model_cell.c | 1484 u64 trace_buffer[2]; in cell_handle_interrupt_spu() local 1528 cbe_read_trace_buffer(cpu, trace_buffer); in cell_handle_interrupt_spu() 1547 trace_entry = trace_buffer[0] in cell_handle_interrupt_spu() 1554 last_trace_buffer = trace_buffer[0]; in cell_handle_interrupt_spu()
|
/Linux-v5.4/drivers/net/ethernet/qlogic/qed/ |
D | qed_hsi.h | 13370 u8 trace_buffer[MCP_TRACE_SIZE]; member 13371 #define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
|