Lines Matching full:trace

2  * builtin-trace.c
4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
52 #include "trace/beauty/beauty.h"
53 #include "trace-event.h"
117 struct trace { struct
289 * The evsel->priv as used by 'perf trace'
719 #include "trace/beauty/generated/fsconfig_arrays.c"
889 #include "trace/beauty/arch_errno_names.c"
890 #include "trace/beauty/eventfd.c"
891 #include "trace/beauty/futex_op.c"
892 #include "trace/beauty/futex_val3.c"
893 #include "trace/beauty/mmap.c"
894 #include "trace/beauty/mode_t.c"
895 #include "trace/beauty/msg_flags.c"
896 #include "trace/beauty/open_flags.c"
897 #include "trace/beauty/perf_event_open.c"
898 #include "trace/beauty/pid.c"
899 #include "trace/beauty/sched_policy.c"
900 #include "trace/beauty/seccomp.c"
901 #include "trace/beauty/signum.c"
902 #include "trace/beauty/socket_type.c"
903 #include "trace/beauty/waitid_options.c"
1398 struct trace *trace) in thread__fd_path() argument
1402 if (ttrace == NULL || trace->fd_path_disabled) in thread__fd_path()
1409 if (!trace->live) in thread__fd_path()
1411 ++trace->stats.proc_getname; in thread__fd_path()
1423 const char *path = thread__fd_path(arg->thread, fd, arg->trace); in syscall_arg__scnprintf_fd()
1431 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) in pid__scnprintf_fd() argument
1434 struct thread *thread = machine__find_thread(trace->host, pid, pid); in pid__scnprintf_fd()
1437 const char *path = thread__fd_path(thread, fd, trace); in pid__scnprintf_fd()
1494 if (!arg->trace->vfs_getname) in syscall_arg__scnprintf_filename()
1501 static bool trace__filter_duration(struct trace *trace, double t) in trace__filter_duration() argument
1503 return t < (trace->duration_filter * NSEC_PER_MSEC); in trace__filter_duration()
1506 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in __trace__fprintf_tstamp() argument
1508 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; in __trace__fprintf_tstamp()
1519 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in trace__fprintf_tstamp() argument
1522 return __trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_tstamp()
1536 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) in trace__fprintf_comm_tid() argument
1540 if (trace->multiple_threads) { in trace__fprintf_comm_tid()
1541 if (trace->show_comm) in trace__fprintf_comm_tid()
1549 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, in trace__fprintf_entry_head() argument
1554 if (trace->show_tstamp) in trace__fprintf_entry_head()
1555 printed = trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_entry_head()
1556 if (trace->show_duration) in trace__fprintf_entry_head()
1558 return printed + trace__fprintf_comm_tid(trace, thread, fp); in trace__fprintf_entry_head()
1561 static int trace__process_event(struct trace *trace, struct machine *machine, in trace__process_event() argument
1568 color_fprintf(trace->output, PERF_COLOR_RED, in trace__process_event()
1585 struct trace *trace = container_of(tool, struct trace, tool); in trace__tool_process() local
1586 return trace__process_event(trace, machine, event, sample); in trace__tool_process()
1607 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) in trace__symbols_init() argument
1614 trace->host = machine__new_host(); in trace__symbols_init()
1615 if (trace->host == NULL) in trace__symbols_init()
1618 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); in trace__symbols_init()
1622 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, in trace__symbols_init()
1632 static void trace__symbols__exit(struct trace *trace) in trace__symbols__exit() argument
1634 machine__exit(trace->host); in trace__symbols__exit()
1635 trace->host = NULL; in trace__symbols__exit()
1745 static int trace__read_syscall_info(struct trace *trace, int id) in trace__read_syscall_info() argument
1749 const char *name = syscalltbl__name(trace->sctbl, id); in trace__read_syscall_info()
1752 if (trace->syscalls.table == NULL) { in trace__read_syscall_info()
1753 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); in trace__read_syscall_info()
1754 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
1758 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { in trace__read_syscall_info()
1760 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); in trace__read_syscall_info()
1766 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
1769 …memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof… in trace__read_syscall_info()
1771 trace->syscalls.table = table; in trace__read_syscall_info()
1772 trace->sctbl->syscalls.max_id = id; in trace__read_syscall_info()
1775 sc = trace->syscalls.table + id; in trace__read_syscall_info()
1837 static int trace__validate_ev_qualifier(struct trace *trace) in trace__validate_ev_qualifier() argument
1842 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); in trace__validate_ev_qualifier()
1844 trace->ev_qualifier_ids.entries = malloc(nr_allocated * in trace__validate_ev_qualifier()
1845 sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
1847 if (trace->ev_qualifier_ids.entries == NULL) { in trace__validate_ev_qualifier()
1849 trace->output); in trace__validate_ev_qualifier()
1854 strlist__for_each_entry(pos, trace->ev_qualifier) { in trace__validate_ev_qualifier()
1856 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; in trace__validate_ev_qualifier()
1859 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
1874 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
1879 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
1886 entries = realloc(trace->ev_qualifier_ids.entries, in trace__validate_ev_qualifier()
1887 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
1890 fputs("\nError:\t Not enough memory for parsing\n", trace->output); in trace__validate_ev_qualifier()
1893 trace->ev_qualifier_ids.entries = entries; in trace__validate_ev_qualifier()
1895 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
1899 trace->ev_qualifier_ids.nr = nr_used; in trace__validate_ev_qualifier()
1900 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); in trace__validate_ev_qualifier()
1906 zfree(&trace->ev_qualifier_ids.entries); in trace__validate_ev_qualifier()
1907 trace->ev_qualifier_ids.nr = 0; in trace__validate_ev_qualifier()
1911 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) in trace__syscall_enabled() argument
1915 if (trace->ev_qualifier_ids.nr == 0) in trace__syscall_enabled()
1918 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, in trace__syscall_enabled()
1919 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; in trace__syscall_enabled()
1922 return !trace->not_ev_qualifier; in trace__syscall_enabled()
1924 return trace->not_ev_qualifier; in trace__syscall_enabled()
1956 * in tools/perf/trace/beauty/mount_flags.c
1980 struct trace *trace, struct thread *thread) in syscall__scnprintf_args() argument
1993 .trace = trace, in syscall__scnprintf_args()
1995 .show_string_prefix = trace->show_string_prefix, in syscall__scnprintf_args()
2028 !trace->show_zeros && in syscall__scnprintf_args()
2038 if (trace->show_arg_names) in syscall__scnprintf_args()
2067 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2071 static struct syscall *trace__syscall_info(struct trace *trace, argument
2090 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2099 if (id > trace->sctbl->syscalls.max_id) {
2101 if (id >= trace->sctbl->syscalls.max_id) {
2107 err = trace__read_syscall_info(trace, id);
2113 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2114 (err = trace__read_syscall_info(trace, id)) != 0)
2117 if (trace->syscalls.table[id].name == NULL) {
2118 if (trace->syscalls.table[id].nonexistent)
2123 return &trace->syscalls.table[id];
2128 …fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, s…
2129 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2130 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2131 fputs(" information\n", trace->output);
2198 static int trace__printf_interrupted_entry(struct trace *trace) argument
2204 if (trace->failure_only || trace->current == NULL)
2207 ttrace = thread__priv(trace->current);
2212 …printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->o…
2213 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2215 if (len < trace->args_alignment - 4)
2216 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2218 printed += fprintf(trace->output, " ...\n");
2221 ++trace->nr_events_printed;
2226 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, argument
2231 if (trace->print_sample) {
2234 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2269 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, argument
2280 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2286 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2287 ttrace = thread__trace(thread, trace->output);
2291 trace__fprintf_sample(trace, evsel, sample, thread);
2301 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2302 trace__printf_interrupted_entry(trace);
2313 if (evsel != trace->syscalls.events.sys_enter)
2314 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2320 args, augmented_args, augmented_args_size, trace, thread);
2323 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2326 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2327 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2328 if (trace->args_alignment > printed)
2329 alignment = trace->args_alignment - printed;
2330 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2338 if (trace->current != thread) {
2339 thread__put(trace->current);
2340 trace->current = thread__get(thread);
2348 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, argument
2354 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2362 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2363 ttrace = thread__trace(thread, trace->output);
2372 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2373 …syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, th…
2374 fprintf(trace->output, "%s", msg);
2381 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, argument
2388 trace->max_stack;
2391 if (machine__resolve(trace->host, &al, sample) < 0)
2399 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) argument
2406 …intf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2417 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, argument
2426 int alignment = trace->args_alignment;
2427 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2433 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2434 ttrace = thread__trace(thread, trace->output);
2438 trace__fprintf_sample(trace, evsel, sample, thread);
2442 if (trace->summary)
2443 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2445 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2448 ++trace->stats.vfs_getname;
2453 if (trace__filter_duration(trace, duration))
2456 } else if (trace->duration_filter)
2460 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2462 if (callchain_cursor.nr < trace->min_stack)
2468 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2471 …trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace-…
2474 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2476 printed += fprintf(trace->output, " ... [");
2477 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2479 printed += fprintf(trace->output, "]: %s()", sc->name);
2489 fprintf(trace->output, ")%*s= ", alignment, " ");
2495 fprintf(trace->output, "%ld", ret);
2502 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2505 fprintf(trace->output, "0 (Timeout)");
2511 .trace = trace,
2515 fprintf(trace->output, "%s", bf);
2517 fprintf(trace->output, "%#lx", ret);
2519 struct thread *child = machine__find_thread(trace->host, ret, ret);
2522 fprintf(trace->output, "%ld", ret);
2524 fprintf(trace->output, " (%s)", thread__comm_str(child));
2530 fputc('\n', trace->output);
2536 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2540 trace__fprintf_callchain(trace, sample);
2551 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, argument
2555 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2612 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, argument
2618 struct thread *thread = machine__findnew_thread(trace->host,
2621 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2627 trace->runtime_ms += runtime_ms;
2633 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2666 static void bpf_output__fprintf(struct trace *trace, argument
2670 bpf_output__printer, NULL, trace->output);
2671 ++trace->nr_events_printed;
2674 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample… argument
2691 .trace = trace,
2693 .show_string_prefix = trace->show_string_prefix,
2726 !trace->show_zeros &&
2739 if (1 || trace->show_arg_names)
2745 return printed + fprintf(trace->output, "%s", bf);
2748 static int trace__event_handler(struct trace *trace, struct evsel *evsel, argument
2763 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2766 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2768 if (callchain_cursor.nr < trace->min_stack)
2774 trace__printf_interrupted_entry(trace);
2775 trace__fprintf_tstamp(trace, sample->time, trace->output);
2777 if (trace->trace_syscalls && trace->show_duration)
2778 fprintf(trace->output, "( ): ");
2781 trace__fprintf_comm_tid(trace, thread, trace->output);
2783 if (evsel == trace->syscalls.events.augmented) {
2785 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2788 fprintf(trace->output, "%s(", sc->name);
2789 trace__fprintf_sys_enter(trace, evsel, sample);
2790 fputc(')', trace->output);
2801 fprintf(trace->output, "%s(", evsel->name);
2804 bpf_output__fprintf(trace, sample);
2807 trace__fprintf_sys_enter(trace, evsel, sample)) {
2808 if (trace->libtraceevent_print) {
2811 trace->output);
2813 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2819 fprintf(trace->output, ")\n");
2822 trace__fprintf_callchain(trace, sample);
2826 ++trace->nr_events_printed;
2854 static int trace__pgfault(struct trace *trace, argument
2866 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2869 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2871 if (callchain_cursor.nr < trace->min_stack)
2877 ttrace = thread__trace(thread, trace->output);
2886 if (trace->summary_only)
2891 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2893 fprintf(trace->output, "%sfault [",
2897 print_location(trace->output, sample, &al, false, true);
2899 fprintf(trace->output, "] => ");
2912 print_location(trace->output, sample, &al, true, false);
2914 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2917 trace__fprintf_callchain(trace, sample);
2921 ++trace->nr_events_printed;
2929 static void trace__set_base_time(struct trace *trace, argument
2941 if (trace->base_time == 0 && !trace->full_time &&
2943 trace->base_time = sample->time;
2952 struct trace *trace = container_of(tool, struct trace, tool); local
2958 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2962 trace__set_base_time(trace, evsel, sample);
2965 ++trace->nr_events;
2966 handler(trace, evsel, event, sample);
2973 static int trace__record(struct trace *trace, int argc, const char **argv) argument
3005 if (trace->trace_syscalls) {
3023 if (trace->trace_pgfaults & TRACE_PFMAJ)
3027 if (trace->trace_pgfaults & TRACE_PFMIN)
3041 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3098 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *s… argument
3104 trace__process_event(trace, trace->host, event, sample);
3108 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
3110 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3114 if (evswitch__discard(&trace->evswitch, evsel))
3117 trace__set_base_time(trace, evsel, sample);
3121 …fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3126 handler(trace, evsel, event, sample);
3129 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3133 static int trace__add_syscall_newtp(struct trace *trace) argument
3136 struct evlist *evlist = trace->evlist;
3153 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3154 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3159 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3168 trace->syscalls.events.sys_enter = sys_enter;
3169 trace->syscalls.events.sys_exit = sys_exit;
3182 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) argument
3186 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3187 trace->ev_qualifier_ids.nr,
3188 trace->ev_qualifier_ids.entries);
3193 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3194 sys_exit = trace->syscalls.events.sys_exit;
3207 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name) argument
3209 if (trace->bpf_obj == NULL)
3212 return bpf_object__find_map_by_name(trace->bpf_obj, name);
3215 static void trace__set_bpf_map_filtered_pids(struct trace *trace) argument
3217 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3220 static void trace__set_bpf_map_syscalls(struct trace *trace) argument
3222 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
3223 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3224 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3227 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) argument
3229 if (trace->bpf_obj == NULL)
3232 return bpf_object__find_program_by_title(trace->bpf_obj, name);
3235 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, argument
3243 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3248 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3255 prog = trace__find_bpf_program_by_title(trace, prog_name);
3265 return trace->syscalls.unaugmented_prog;
3268 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) argument
3270 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3275 …sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3276 …sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3279 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) argument
3281 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3282 …return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_…
3285 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) argument
3287 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3288 …return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_p…
3291 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_en… argument
3293 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3310 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace) argument
3312 int fd = bpf_map__fd(trace->syscalls.map);
3314 .enabled = !trace->not_ev_qualifier,
3319 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
3320 int key = trace->ev_qualifier_ids.entries[i];
3323 trace__init_bpf_map_syscall_args(trace, key, &value);
3324 trace__init_syscall_bpf_progs(trace, key);
3335 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled) argument
3337 int fd = bpf_map__fd(trace->syscalls.map);
3343 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3345 trace__init_bpf_map_syscall_args(trace, key, &value);
3355 static int trace__init_syscalls_bpf_map(struct trace *trace) argument
3359 if (trace->ev_qualifier_ids.nr)
3360 enabled = trace->not_ev_qualifier;
3362 return __trace__init_syscalls_bpf_map(trace, enabled);
3365 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *s… argument
3381 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3382 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3387 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3439 …pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_ent…
3440 if (pair_prog == trace->syscalls.unaugmented_prog)
3453 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) argument
3455 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3456 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3459 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3462 if (!trace__syscall_enabled(trace, key))
3465 trace__init_syscall_bpf_progs(trace, key);
3468 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3472 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3506 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3507 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3518 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3525 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3545 static void trace__delete_augmented_syscalls(struct trace *trace) argument
3549 evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3550 evsel__delete(trace->syscalls.events.augmented);
3551 trace->syscalls.events.augmented = NULL;
3553 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3554 if (evsel->bpf_obj == trace->bpf_obj) {
3555 evlist__remove(trace->evlist, evsel);
3561 bpf_object__close(trace->bpf_obj);
3562 trace->bpf_obj = NULL;
3565 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3571 static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3575 static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3579 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3584 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3589 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3595 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3600 static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3605 static bool trace__only_augmented_syscalls_evsels(struct trace *trace) argument
3609 evlist__for_each_entry(trace->evlist, evsel) {
3610 if (evsel == trace->syscalls.events.augmented ||
3611 evsel->bpf_obj == trace->bpf_obj)
3620 static int trace__set_ev_qualifier_filter(struct trace *trace) argument
3622 if (trace->syscalls.map)
3623 return trace__set_ev_qualifier_bpf_filter(trace);
3624 if (trace->syscalls.events.sys_enter)
3625 return trace__set_ev_qualifier_tp_filter(trace);
3647 static int trace__set_filter_loop_pids(struct trace *trace) argument
3653 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3656 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3669 err = perf_evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3670 if (!err && trace->filter_pids.map)
3671 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3676 static int trace__set_filter_pids(struct trace *trace) argument
3685 if (trace->filter_pids.nr > 0) {
3686 err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3687 trace->filter_pids.entries);
3688 if (!err && trace->filter_pids.map) {
3689 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3690 trace->filter_pids.entries);
3692 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3693 err = trace__set_filter_loop_pids(trace);
3699 static int __trace__deliver_event(struct trace *trace, union perf_event *event) argument
3701 struct evlist *evlist = trace->evlist;
3707 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3709 trace__handle_event(trace, event, &sample);
3714 static int __trace__flush_events(struct trace *trace) argument
3716 u64 first = ordered_events__first_time(&trace->oe.data);
3717 u64 flush = trace->oe.last - NSEC_PER_SEC;
3721 return ordered_events__flush_time(&trace->oe.data, flush);
3726 static int trace__flush_events(struct trace *trace) argument
3728 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3731 static int trace__deliver_event(struct trace *trace, union perf_event *event) argument
3735 if (!trace->sort_events)
3736 return __trace__deliver_event(trace, event);
3738 err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3742 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3746 return trace__flush_events(trace);
3752 struct trace *trace = container_of(oe, struct trace, oe.data); local
3754 return __trace__deliver_event(trace, event->event);
3772 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3870 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) argument
3872 struct evlist *evlist = trace->evlist;
3879 if (trace__expand_filter(trace, evsel)) {
3888 static int trace__run(struct trace *trace, int argc, const char **argv) argument
3890 struct evlist *evlist = trace->evlist;
3897 trace->live = true;
3899 if (!trace->raw_augmented_syscalls) {
3900 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3903 if (trace->trace_syscalls)
3904 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3907 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3911 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3915 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3919 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3923 if (trace->sched &&
3930 * trace -G A -e sched:*switch
3935 * trace -e sched:*switch -G A
3943 * trace -G A -e sched:*switch -G B
3951 if (trace->cgroup)
3952 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3954 err = perf_evlist__create_maps(evlist, &trace->opts.target);
3956 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3960 err = trace__symbols_init(trace, evlist);
3962 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3966 perf_evlist__config(evlist, &trace->opts, &callchain_param);
3972 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
3975 fprintf(trace->output, "Couldn't run the workload!\n");
3994 err = trace__set_filter_pids(trace);
3998 if (trace->syscalls.map)
3999 trace__init_syscalls_bpf_map(trace);
4001 if (trace->syscalls.prog_array.sys_enter)
4002 trace__init_syscalls_bpf_prog_array_maps(trace);
4004 if (trace->ev_qualifier_ids.nr > 0) {
4005 err = trace__set_ev_qualifier_filter(trace);
4009 if (trace->syscalls.events.sys_exit) {
4011 trace->syscalls.events.sys_exit->filter);
4026 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4028 err = trace__expand_filters(trace, &evsel);
4035 if (trace->dump.map)
4036 bpf_map__fprintf(trace->dump.map, trace->output);
4038 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4042 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
4048 if (trace->opts.initial_delay) {
4049 usleep(trace->opts.initial_delay * 1000);
4053 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4066 evsel->core.attr.sample_max_stack = trace->max_stack;
4069 before = trace->nr_events;
4080 ++trace->nr_events;
4082 err = trace__deliver_event(trace, event);
4099 if (trace->nr_events == before) {
4108 if (trace__flush_events(trace))
4116 thread__zput(trace->current);
4120 if (trace->sort_events)
4121 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4124 if (trace->summary)
4125 trace__fprintf_thread_summary(trace, trace->output);
4127 if (trace->show_tool_stats) {
4128 fprintf(trace->output, "Stats:\n "
4131 trace->stats.vfs_getname,
4132 trace->stats.proc_getname);
4137 trace__symbols__exit(trace);
4140 cgroup__put(trace->cgroup);
4141 trace->evlist = NULL;
4142 trace->live = false;
4163 fprintf(trace->output, "%s\n", errbuf);
4167 fprintf(trace->output,
4174 fprintf(trace->output, "Not enough memory to run!\n");
4178 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4182 static int trace__replay(struct trace *trace) argument
4190 .force = trace->force,
4196 trace->tool.sample = trace__process_sample;
4197 trace->tool.mmap = perf_event__process_mmap;
4198 trace->tool.mmap2 = perf_event__process_mmap2;
4199 trace->tool.comm = perf_event__process_comm;
4200 trace->tool.exit = perf_event__process_exit;
4201 trace->tool.fork = perf_event__process_fork;
4202 trace->tool.attr = perf_event__process_attr;
4203 trace->tool.tracing_data = perf_event__process_tracing_data;
4204 trace->tool.build_id = perf_event__process_build_id;
4205 trace->tool.namespaces = perf_event__process_namespaces;
4207 trace->tool.ordered_events = true;
4208 trace->tool.ordering_requires_timestamps = true;
4211 trace->multiple_threads = true;
4213 session = perf_session__new(&data, false, &trace->tool);
4217 if (trace->opts.target.pid)
4218 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4220 if (trace->opts.target.tid)
4221 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4226 trace->host = &session->machines.host;
4272 else if (trace->summary)
4273 trace__fprintf_thread_summary(trace, trace->output);
4305 struct trace *trace, FILE *fp) argument
4333 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4339 if (trace->errno_summary && stats->nr_failures) {
4340 const char *arch_name = perf_env__arch(trace->host->env);
4357 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) argument
4366 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4375 if (trace->sched)
4380 printed += thread__dump_stats(ttrace, trace, fp);
4397 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) argument
4404 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4412 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4422 struct trace *trace = opt->value; local
4424 trace->duration_filter = atof(str);
4433 struct trace *trace = opt->value; local
4443 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4444 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4446 if (trace->filter_pids.entries == NULL)
4449 trace->filter_pids.entries[0] = getpid();
4451 for (i = 1; i < trace->filter_pids.nr; ++i)
4452 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4460 static int trace__open_output(struct trace *trace, const char *filename) argument
4472 trace->output = fopen(filename, "w");
4474 return trace->output == NULL ? -errno : 0;
4562 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4570 struct trace *trace = (struct trace *)opt->value; local
4583 trace->not_ev_qualifier = true;
4591 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4592 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4628 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4629 if (trace->ev_qualifier == NULL) {
4630 fputs("Not enough memory to parse event qualifier", trace->output);
4634 if (trace__validate_ev_qualifier(trace))
4636 trace->trace_syscalls = true;
4643 .value = &trace->evlist,
4656 struct trace *trace = opt->value; local
4658 if (!list_empty(&trace->evlist->core.entries)) {
4660 .value = &trace->evlist,
4664 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4671 struct trace *trace = arg; local
4674 if (!strcmp(var, "trace.add_events")) {
4675 trace->perfconfig_events = strdup(value);
4676 if (trace->perfconfig_events == NULL) {
4677 pr_err("Not enough memory for %s\n", "trace.add_events");
4680 } else if (!strcmp(var, "trace.show_timestamp")) {
4681 trace->show_tstamp = perf_config_bool(var, value);
4682 } else if (!strcmp(var, "trace.show_duration")) {
4683 trace->show_duration = perf_config_bool(var, value);
4684 } else if (!strcmp(var, "trace.show_arg_names")) {
4685 trace->show_arg_names = perf_config_bool(var, value);
4686 if (!trace->show_arg_names)
4687 trace->show_zeros = true;
4688 } else if (!strcmp(var, "trace.show_zeros")) {
4690 if (!trace->show_arg_names && !new_show_zeros) {
4691 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4694 trace->show_zeros = new_show_zeros;
4695 } else if (!strcmp(var, "trace.show_prefix")) {
4696 trace->show_string_prefix = perf_config_bool(var, value);
4697 } else if (!strcmp(var, "trace.no_inherit")) {
4698 trace->opts.no_inherit = perf_config_bool(var, value);
4699 } else if (!strcmp(var, "trace.args_alignment")) {
4702 trace->args_alignment = args_alignment;
4703 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4705 trace->libtraceevent_print = true;
4707 trace->libtraceevent_print = false;
4716 "perf trace [<options>] [<command>]",
4717 "perf trace [<options>] -- <command> [<options>]",
4718 "perf trace record [<options>] [<command>]",
4719 "perf trace record [<options>] -- <command> [<options>]",
4722 struct trace trace = { local
4747 OPT_CALLBACK('e', "event", &trace, "event",
4750 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4752 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4754 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4755 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4759 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4760 "trace events on existing process id"),
4761 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4762 "trace events on existing thread id"),
4763 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4765 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4767 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4769 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4771 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4774 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4776 OPT_CALLBACK(0, "duration", &trace, "float",
4782 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4784 OPT_BOOLEAN('T', "time", &trace.full_time,
4786 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4788 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4790 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4792 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4794 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4795 "Trace pagefaults", parse_pagefaults, "maj"),
4796 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4797 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4798 OPT_CALLBACK(0, "call-graph", &trace.opts,
4801 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4803 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4805 OPT_ULONG(0, "max-events", &trace.max_events,
4807 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4810 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4814 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4816 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4820 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4822 OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4825 OPTS_EVSWITCH(&trace.evswitch),
4838 trace.evlist = evlist__new();
4839 trace.sctbl = syscalltbl__new();
4841 if (trace.evlist == NULL || trace.sctbl == NULL) {
4851 * global setting. If it fails we'll get something in 'perf trace -v'
4856 err = perf_config(trace__config, &trace);
4872 * .perfconfig trace.add_events, and filter those out.
4874 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4875 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4876 trace.trace_syscalls = true;
4884 if (trace.perfconfig_events != NULL) {
4888 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4890 parse_events_print_error(&parse_err, trace.perfconfig_events);
4895 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4900 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4902 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4903 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4908 trace.syscalls.events.augmented = evsel;
4910 evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4921 trace.bpf_obj = evsel->bpf_obj;
4928 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4929 trace.trace_syscalls = true;
4936 * This is more to fix the current .perfconfig trace.add_events
4941 * to trace.add_events in the form of
4942 * trace.bpf_augmented_syscalls, that will be only parsed if we
4945 * .perfconfig trace.add_events is still useful if we want, for
4947 * 'perf trace --config determinism.profile' mode, where for some
4955 if (!trace.trace_syscalls) {
4956 trace__delete_augmented_syscalls(&trace);
4958 trace__set_bpf_map_filtered_pids(&trace);
4959 trace__set_bpf_map_syscalls(&trace);
4960trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmen…
4964 err = bpf__setup_stdout(trace.evlist);
4966 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4974 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4975 if (trace.dump.map == NULL) {
4981 if (trace.trace_pgfaults) {
4982 trace.opts.sample_address = true;
4983 trace.opts.sample_time = true;
4986 if (trace.opts.mmap_pages == UINT_MAX)
4989 if (trace.max_stack == UINT_MAX) {
4990 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4995 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4996 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5002 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5007 if (trace.evlist->core.nr_entries > 0) {
5008 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5009 if (evlist__set_syscall_tp_fields(trace.evlist)) {
5015 if (trace.sort_events) {
5016 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5017 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5031 if (trace.syscalls.events.augmented) {
5032 evlist__for_each_entry(trace.evlist, evsel) {
5036 trace.raw_augmented_syscalls = true;
5040 if (trace.syscalls.events.augmented->priv == NULL &&
5042 struct evsel *augmented = trace.syscalls.events.augmented;
5089 if (trace.raw_augmented_syscalls)
5090 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5098 return trace__record(&trace, argc-1, &argv[1]);
5101 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5102 trace.summary_only = true;
5105 if (trace.summary_only)
5106 trace.summary = trace.summary_only;
5109 err = trace__open_output(&trace, output_name);
5116 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5120 err = target__validate(&trace.opts.target);
5122 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5123 fprintf(trace.output, "%s", bf);
5127 err = target__parse_uid(&trace.opts.target);
5129 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5130 fprintf(trace.output, "%s", bf);
5134 if (!argc && target__none(&trace.opts.target))
5135 trace.opts.target.system_wide = true;
5138 err = trace__replay(&trace);
5140 err = trace__run(&trace, argc, argv);
5144 fclose(trace.output);
5146 zfree(&trace.perfconfig_events);