Lines Matching full:trace
2 * builtin-trace.c
4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
52 #include "trace/beauty/beauty.h"
53 #include "trace-event.h"
117 struct trace { struct
289 * The evsel->priv as used by 'perf trace'
727 #include "trace/beauty/generated/fsconfig_arrays.c"
897 #include "trace/beauty/arch_errno_names.c"
898 #include "trace/beauty/eventfd.c"
899 #include "trace/beauty/futex_op.c"
900 #include "trace/beauty/futex_val3.c"
901 #include "trace/beauty/mmap.c"
902 #include "trace/beauty/mode_t.c"
903 #include "trace/beauty/msg_flags.c"
904 #include "trace/beauty/open_flags.c"
905 #include "trace/beauty/perf_event_open.c"
906 #include "trace/beauty/pid.c"
907 #include "trace/beauty/sched_policy.c"
908 #include "trace/beauty/seccomp.c"
909 #include "trace/beauty/signum.c"
910 #include "trace/beauty/socket_type.c"
911 #include "trace/beauty/waitid_options.c"
1406 struct trace *trace) in thread__fd_path() argument
1410 if (ttrace == NULL || trace->fd_path_disabled) in thread__fd_path()
1417 if (!trace->live) in thread__fd_path()
1419 ++trace->stats.proc_getname; in thread__fd_path()
1431 const char *path = thread__fd_path(arg->thread, fd, arg->trace); in syscall_arg__scnprintf_fd()
1439 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) in pid__scnprintf_fd() argument
1442 struct thread *thread = machine__find_thread(trace->host, pid, pid); in pid__scnprintf_fd()
1445 const char *path = thread__fd_path(thread, fd, trace); in pid__scnprintf_fd()
1502 if (!arg->trace->vfs_getname) in syscall_arg__scnprintf_filename()
1509 static bool trace__filter_duration(struct trace *trace, double t) in trace__filter_duration() argument
1511 return t < (trace->duration_filter * NSEC_PER_MSEC); in trace__filter_duration()
1514 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in __trace__fprintf_tstamp() argument
1516 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; in __trace__fprintf_tstamp()
1527 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in trace__fprintf_tstamp() argument
1530 return __trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_tstamp()
1544 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) in trace__fprintf_comm_tid() argument
1548 if (trace->multiple_threads) { in trace__fprintf_comm_tid()
1549 if (trace->show_comm) in trace__fprintf_comm_tid()
1557 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, in trace__fprintf_entry_head() argument
1562 if (trace->show_tstamp) in trace__fprintf_entry_head()
1563 printed = trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_entry_head()
1564 if (trace->show_duration) in trace__fprintf_entry_head()
1566 return printed + trace__fprintf_comm_tid(trace, thread, fp); in trace__fprintf_entry_head()
1569 static int trace__process_event(struct trace *trace, struct machine *machine, in trace__process_event() argument
1576 color_fprintf(trace->output, PERF_COLOR_RED, in trace__process_event()
1593 struct trace *trace = container_of(tool, struct trace, tool); in trace__tool_process() local
1594 return trace__process_event(trace, machine, event, sample); in trace__tool_process()
1615 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) in trace__symbols_init() argument
1622 trace->host = machine__new_host(); in trace__symbols_init()
1623 if (trace->host == NULL) in trace__symbols_init()
1626 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); in trace__symbols_init()
1630 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, in trace__symbols_init()
1640 static void trace__symbols__exit(struct trace *trace) in trace__symbols__exit() argument
1642 machine__exit(trace->host); in trace__symbols__exit()
1643 trace->host = NULL; in trace__symbols__exit()
1753 static int trace__read_syscall_info(struct trace *trace, int id) in trace__read_syscall_info() argument
1757 const char *name = syscalltbl__name(trace->sctbl, id); in trace__read_syscall_info()
1760 if (trace->syscalls.table == NULL) { in trace__read_syscall_info()
1761 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); in trace__read_syscall_info()
1762 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
1766 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { in trace__read_syscall_info()
1768 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); in trace__read_syscall_info()
1774 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
1777 …memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof… in trace__read_syscall_info()
1779 trace->syscalls.table = table; in trace__read_syscall_info()
1780 trace->sctbl->syscalls.max_id = id; in trace__read_syscall_info()
1783 sc = trace->syscalls.table + id; in trace__read_syscall_info()
1845 static int trace__validate_ev_qualifier(struct trace *trace) in trace__validate_ev_qualifier() argument
1850 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); in trace__validate_ev_qualifier()
1852 trace->ev_qualifier_ids.entries = malloc(nr_allocated * in trace__validate_ev_qualifier()
1853 sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
1855 if (trace->ev_qualifier_ids.entries == NULL) { in trace__validate_ev_qualifier()
1857 trace->output); in trace__validate_ev_qualifier()
1862 strlist__for_each_entry(pos, trace->ev_qualifier) { in trace__validate_ev_qualifier()
1864 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; in trace__validate_ev_qualifier()
1867 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
1882 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
1887 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
1894 entries = realloc(trace->ev_qualifier_ids.entries, in trace__validate_ev_qualifier()
1895 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
1898 fputs("\nError:\t Not enough memory for parsing\n", trace->output); in trace__validate_ev_qualifier()
1901 trace->ev_qualifier_ids.entries = entries; in trace__validate_ev_qualifier()
1903 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
1907 trace->ev_qualifier_ids.nr = nr_used; in trace__validate_ev_qualifier()
1908 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); in trace__validate_ev_qualifier()
1914 zfree(&trace->ev_qualifier_ids.entries); in trace__validate_ev_qualifier()
1915 trace->ev_qualifier_ids.nr = 0; in trace__validate_ev_qualifier()
1919 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) in trace__syscall_enabled() argument
1923 if (trace->ev_qualifier_ids.nr == 0) in trace__syscall_enabled()
1926 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, in trace__syscall_enabled()
1927 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; in trace__syscall_enabled()
1930 return !trace->not_ev_qualifier; in trace__syscall_enabled()
1932 return trace->not_ev_qualifier; in trace__syscall_enabled()
1964 * in tools/perf/trace/beauty/mount_flags.c
1988 struct trace *trace, struct thread *thread) in syscall__scnprintf_args() argument
2001 .trace = trace, in syscall__scnprintf_args()
2003 .show_string_prefix = trace->show_string_prefix, in syscall__scnprintf_args()
2036 !trace->show_zeros && in syscall__scnprintf_args()
2046 if (trace->show_arg_names) in syscall__scnprintf_args()
2075 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2079 static struct syscall *trace__syscall_info(struct trace *trace, argument
2098 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2107 if (id > trace->sctbl->syscalls.max_id) {
2109 if (id >= trace->sctbl->syscalls.max_id) {
2115 err = trace__read_syscall_info(trace, id);
2121 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2122 (err = trace__read_syscall_info(trace, id)) != 0)
2125 if (trace->syscalls.table[id].name == NULL) {
2126 if (trace->syscalls.table[id].nonexistent)
2131 return &trace->syscalls.table[id];
2136 …fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, s…
2137 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2138 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2139 fputs(" information\n", trace->output);
2206 static int trace__printf_interrupted_entry(struct trace *trace) argument
2212 if (trace->failure_only || trace->current == NULL)
2215 ttrace = thread__priv(trace->current);
2220 …printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->o…
2221 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2223 if (len < trace->args_alignment - 4)
2224 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2226 printed += fprintf(trace->output, " ...\n");
2229 ++trace->nr_events_printed;
2234 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, argument
2239 if (trace->print_sample) {
2242 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2285 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, argument
2296 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2302 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2303 ttrace = thread__trace(thread, trace->output);
2307 trace__fprintf_sample(trace, evsel, sample, thread);
2317 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2318 trace__printf_interrupted_entry(trace);
2329 if (evsel != trace->syscalls.events.sys_enter)
2330 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2336 args, augmented_args, augmented_args_size, trace, thread);
2339 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2342 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2343 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2344 if (trace->args_alignment > printed)
2345 alignment = trace->args_alignment - printed;
2346 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2354 if (trace->current != thread) {
2355 thread__put(trace->current);
2356 trace->current = thread__get(thread);
2364 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, argument
2370 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2378 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2379 ttrace = thread__trace(thread, trace->output);
2388 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2389 …syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, th…
2390 fprintf(trace->output, "%s", msg);
2397 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, argument
2404 trace->max_stack;
2407 if (machine__resolve(trace->host, &al, sample) < 0)
2415 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) argument
2422 …intf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2433 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, argument
2442 int alignment = trace->args_alignment;
2443 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2449 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2450 ttrace = thread__trace(thread, trace->output);
2454 trace__fprintf_sample(trace, evsel, sample, thread);
2458 if (trace->summary)
2459 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2461 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2464 ++trace->stats.vfs_getname;
2469 if (trace__filter_duration(trace, duration))
2472 } else if (trace->duration_filter)
2476 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2478 if (callchain_cursor.nr < trace->min_stack)
2484 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2487 …trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace-…
2490 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2492 printed += fprintf(trace->output, " ... [");
2493 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2495 printed += fprintf(trace->output, "]: %s()", sc->name);
2505 fprintf(trace->output, ")%*s= ", alignment, " ");
2511 fprintf(trace->output, "%ld", ret);
2518 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2521 fprintf(trace->output, "0 (Timeout)");
2527 .trace = trace,
2531 fprintf(trace->output, "%s", bf);
2533 fprintf(trace->output, "%#lx", ret);
2535 struct thread *child = machine__find_thread(trace->host, ret, ret);
2538 fprintf(trace->output, "%ld", ret);
2540 fprintf(trace->output, " (%s)", thread__comm_str(child));
2546 fputc('\n', trace->output);
2552 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2556 trace__fprintf_callchain(trace, sample);
2567 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, argument
2571 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2628 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, argument
2634 struct thread *thread = machine__findnew_thread(trace->host,
2637 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2643 trace->runtime_ms += runtime_ms;
2649 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2682 static void bpf_output__fprintf(struct trace *trace, argument
2686 bpf_output__printer, NULL, trace->output);
2687 ++trace->nr_events_printed;
2690 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample… argument
2707 .trace = trace,
2709 .show_string_prefix = trace->show_string_prefix,
2742 !trace->show_zeros &&
2755 if (1 || trace->show_arg_names)
2761 return printed + fprintf(trace->output, "%s", bf);
2764 static int trace__event_handler(struct trace *trace, struct evsel *evsel, argument
2779 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2782 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2784 if (callchain_cursor.nr < trace->min_stack)
2790 trace__printf_interrupted_entry(trace);
2791 trace__fprintf_tstamp(trace, sample->time, trace->output);
2793 if (trace->trace_syscalls && trace->show_duration)
2794 fprintf(trace->output, "( ): ");
2797 trace__fprintf_comm_tid(trace, thread, trace->output);
2799 if (evsel == trace->syscalls.events.augmented) {
2801 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2804 fprintf(trace->output, "%s(", sc->name);
2805 trace__fprintf_sys_enter(trace, evsel, sample);
2806 fputc(')', trace->output);
2817 fprintf(trace->output, "%s(", evsel->name);
2820 bpf_output__fprintf(trace, sample);
2823 trace__fprintf_sys_enter(trace, evsel, sample)) {
2824 if (trace->libtraceevent_print) {
2827 trace->output);
2829 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2835 fprintf(trace->output, ")\n");
2838 trace__fprintf_callchain(trace, sample);
2842 ++trace->nr_events_printed;
2870 static int trace__pgfault(struct trace *trace, argument
2882 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2885 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2887 if (callchain_cursor.nr < trace->min_stack)
2893 ttrace = thread__trace(thread, trace->output);
2902 if (trace->summary_only)
2907 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2909 fprintf(trace->output, "%sfault [",
2913 print_location(trace->output, sample, &al, false, true);
2915 fprintf(trace->output, "] => ");
2928 print_location(trace->output, sample, &al, true, false);
2930 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2933 trace__fprintf_callchain(trace, sample);
2937 ++trace->nr_events_printed;
2945 static void trace__set_base_time(struct trace *trace, argument
2957 if (trace->base_time == 0 && !trace->full_time &&
2959 trace->base_time = sample->time;
2968 struct trace *trace = container_of(tool, struct trace, tool); local
2974 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2978 trace__set_base_time(trace, evsel, sample);
2981 ++trace->nr_events;
2982 handler(trace, evsel, event, sample);
2989 static int trace__record(struct trace *trace, int argc, const char **argv) argument
3021 if (trace->trace_syscalls) {
3039 if (trace->trace_pgfaults & TRACE_PFMAJ)
3043 if (trace->trace_pgfaults & TRACE_PFMIN)
3057 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3129 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *s… argument
3135 trace__process_event(trace, trace->host, event, sample);
3139 evsel = evlist__id2evsel(trace->evlist, sample->id);
3141 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3145 if (evswitch__discard(&trace->evswitch, evsel))
3148 trace__set_base_time(trace, evsel, sample);
3152 …fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3157 handler(trace, evsel, event, sample);
3160 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3164 static int trace__add_syscall_newtp(struct trace *trace) argument
3167 struct evlist *evlist = trace->evlist;
3184 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3185 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3190 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3199 trace->syscalls.events.sys_enter = sys_enter;
3200 trace->syscalls.events.sys_exit = sys_exit;
3213 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) argument
3217 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3218 trace->ev_qualifier_ids.nr,
3219 trace->ev_qualifier_ids.entries);
3224 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3225 sys_exit = trace->syscalls.events.sys_exit;
3238 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name) argument
3240 if (trace->bpf_obj == NULL)
3243 return bpf_object__find_map_by_name(trace->bpf_obj, name);
3246 static void trace__set_bpf_map_filtered_pids(struct trace *trace) argument
3248 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3251 static void trace__set_bpf_map_syscalls(struct trace *trace) argument
3253 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
3254 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3255 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3258 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) argument
3260 if (trace->bpf_obj == NULL)
3263 return bpf_object__find_program_by_title(trace->bpf_obj, name);
3266 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, argument
3274 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3279 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3286 prog = trace__find_bpf_program_by_title(trace, prog_name);
3296 return trace->syscalls.unaugmented_prog;
3299 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) argument
3301 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3306 …sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3307 …sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3310 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) argument
3312 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3313 …return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_…
3316 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) argument
3318 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3319 …return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_p…
3322 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_en… argument
3324 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3341 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace) argument
3343 int fd = bpf_map__fd(trace->syscalls.map);
3345 .enabled = !trace->not_ev_qualifier,
3350 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
3351 int key = trace->ev_qualifier_ids.entries[i];
3354 trace__init_bpf_map_syscall_args(trace, key, &value);
3355 trace__init_syscall_bpf_progs(trace, key);
3366 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled) argument
3368 int fd = bpf_map__fd(trace->syscalls.map);
3374 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3376 trace__init_bpf_map_syscall_args(trace, key, &value);
3386 static int trace__init_syscalls_bpf_map(struct trace *trace) argument
3390 if (trace->ev_qualifier_ids.nr)
3391 enabled = trace->not_ev_qualifier;
3393 return __trace__init_syscalls_bpf_map(trace, enabled);
3396 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *s… argument
3412 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3413 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3418 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3470 …pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_ent…
3471 if (pair_prog == trace->syscalls.unaugmented_prog)
3484 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) argument
3486 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3487 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3490 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3493 if (!trace__syscall_enabled(trace, key))
3496 trace__init_syscall_bpf_progs(trace, key);
3499 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3503 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3537 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3538 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3549 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3556 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3576 static void trace__delete_augmented_syscalls(struct trace *trace) argument
3580 evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3581 evsel__delete(trace->syscalls.events.augmented);
3582 trace->syscalls.events.augmented = NULL;
3584 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3585 if (evsel->bpf_obj == trace->bpf_obj) {
3586 evlist__remove(trace->evlist, evsel);
3592 bpf_object__close(trace->bpf_obj);
3593 trace->bpf_obj = NULL;
3596 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3602 static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3606 static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3610 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3615 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3620 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3626 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3631 static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3636 static bool trace__only_augmented_syscalls_evsels(struct trace *trace) argument
3640 evlist__for_each_entry(trace->evlist, evsel) {
3641 if (evsel == trace->syscalls.events.augmented ||
3642 evsel->bpf_obj == trace->bpf_obj)
3651 static int trace__set_ev_qualifier_filter(struct trace *trace) argument
3653 if (trace->syscalls.map)
3654 return trace__set_ev_qualifier_bpf_filter(trace);
3655 if (trace->syscalls.events.sys_enter)
3656 return trace__set_ev_qualifier_tp_filter(trace);
3678 static int trace__set_filter_loop_pids(struct trace *trace) argument
3684 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3687 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3700 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3701 if (!err && trace->filter_pids.map)
3702 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3707 static int trace__set_filter_pids(struct trace *trace) argument
3716 if (trace->filter_pids.nr > 0) {
3717 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3718 trace->filter_pids.entries);
3719 if (!err && trace->filter_pids.map) {
3720 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3721 trace->filter_pids.entries);
3723 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3724 err = trace__set_filter_loop_pids(trace);
3730 static int __trace__deliver_event(struct trace *trace, union perf_event *event) argument
3732 struct evlist *evlist = trace->evlist;
3737 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3739 trace__handle_event(trace, event, &sample);
3744 static int __trace__flush_events(struct trace *trace) argument
3746 u64 first = ordered_events__first_time(&trace->oe.data);
3747 u64 flush = trace->oe.last - NSEC_PER_SEC;
3751 return ordered_events__flush_time(&trace->oe.data, flush);
3756 static int trace__flush_events(struct trace *trace) argument
3758 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3761 static int trace__deliver_event(struct trace *trace, union perf_event *event) argument
3765 if (!trace->sort_events)
3766 return __trace__deliver_event(trace, event);
3768 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3772 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3776 return trace__flush_events(trace);
3782 struct trace *trace = container_of(oe, struct trace, oe.data); local
3784 return __trace__deliver_event(trace, event->event);
3802 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3900 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) argument
3902 struct evlist *evlist = trace->evlist;
3909 if (trace__expand_filter(trace, evsel)) {
3918 static int trace__run(struct trace *trace, int argc, const char **argv) argument
3920 struct evlist *evlist = trace->evlist;
3927 trace->live = true;
3929 if (!trace->raw_augmented_syscalls) {
3930 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3933 if (trace->trace_syscalls)
3934 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3937 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3941 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3945 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3949 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3953 if (trace->sched &&
3960 * trace -G A -e sched:*switch
3965 * trace -e sched:*switch -G A
3973 * trace -G A -e sched:*switch -G B
3981 if (trace->cgroup)
3982 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3984 err = evlist__create_maps(evlist, &trace->opts.target);
3986 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3990 err = trace__symbols_init(trace, evlist);
3992 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3996 evlist__config(evlist, &trace->opts, &callchain_param);
3999 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
4001 fprintf(trace->output, "Couldn't run the workload!\n");
4020 err = trace__set_filter_pids(trace);
4024 if (trace->syscalls.map)
4025 trace__init_syscalls_bpf_map(trace);
4027 if (trace->syscalls.prog_array.sys_enter)
4028 trace__init_syscalls_bpf_prog_array_maps(trace);
4030 if (trace->ev_qualifier_ids.nr > 0) {
4031 err = trace__set_ev_qualifier_filter(trace);
4035 if (trace->syscalls.events.sys_exit) {
4037 trace->syscalls.events.sys_exit->filter);
4052 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4054 err = trace__expand_filters(trace, &evsel);
4061 if (trace->dump.map)
4062 bpf_map__fprintf(trace->dump.map, trace->output);
4064 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4068 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
4074 if (trace->opts.initial_delay) {
4075 usleep(trace->opts.initial_delay * 1000);
4079 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4092 evsel->core.attr.sample_max_stack = trace->max_stack;
4095 before = trace->nr_events;
4106 ++trace->nr_events;
4108 err = trace__deliver_event(trace, event);
4125 if (trace->nr_events == before) {
4134 if (trace__flush_events(trace))
4142 thread__zput(trace->current);
4146 if (trace->sort_events)
4147 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4150 if (trace->summary)
4151 trace__fprintf_thread_summary(trace, trace->output);
4153 if (trace->show_tool_stats) {
4154 fprintf(trace->output, "Stats:\n "
4157 trace->stats.vfs_getname,
4158 trace->stats.proc_getname);
4163 trace__symbols__exit(trace);
4166 cgroup__put(trace->cgroup);
4167 trace->evlist = NULL;
4168 trace->live = false;
4189 fprintf(trace->output, "%s\n", errbuf);
4193 fprintf(trace->output,
4200 fprintf(trace->output, "Not enough memory to run!\n");
4204 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4208 static int trace__replay(struct trace *trace) argument
4216 .force = trace->force,
4222 trace->tool.sample = trace__process_sample;
4223 trace->tool.mmap = perf_event__process_mmap;
4224 trace->tool.mmap2 = perf_event__process_mmap2;
4225 trace->tool.comm = perf_event__process_comm;
4226 trace->tool.exit = perf_event__process_exit;
4227 trace->tool.fork = perf_event__process_fork;
4228 trace->tool.attr = perf_event__process_attr;
4229 trace->tool.tracing_data = perf_event__process_tracing_data;
4230 trace->tool.build_id = perf_event__process_build_id;
4231 trace->tool.namespaces = perf_event__process_namespaces;
4233 trace->tool.ordered_events = true;
4234 trace->tool.ordering_requires_timestamps = true;
4237 trace->multiple_threads = true;
4239 session = perf_session__new(&data, &trace->tool);
4243 if (trace->opts.target.pid)
4244 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4246 if (trace->opts.target.tid)
4247 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4252 trace->host = &session->machines.host;
4294 else if (trace->summary)
4295 trace__fprintf_thread_summary(trace, trace->output);
4327 struct trace *trace, FILE *fp) argument
4355 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4361 if (trace->errno_summary && stats->nr_failures) {
4362 const char *arch_name = perf_env__arch(trace->host->env);
4379 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) argument
4388 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4397 if (trace->sched)
4402 printed += thread__dump_stats(ttrace, trace, fp);
4419 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) argument
4426 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4434 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4444 struct trace *trace = opt->value; local
4446 trace->duration_filter = atof(str);
4455 struct trace *trace = opt->value; local
4465 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4466 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4468 if (trace->filter_pids.entries == NULL)
4471 trace->filter_pids.entries[0] = getpid();
4473 for (i = 1; i < trace->filter_pids.nr; ++i)
4474 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4482 static int trace__open_output(struct trace *trace, const char *filename) argument
4494 trace->output = fopen(filename, "w");
4496 return trace->output == NULL ? -errno : 0;
4584 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4592 struct trace *trace = (struct trace *)opt->value; local
4605 trace->not_ev_qualifier = true;
4613 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4614 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4650 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4651 if (trace->ev_qualifier == NULL) {
4652 fputs("Not enough memory to parse event qualifier", trace->output);
4656 if (trace__validate_ev_qualifier(trace))
4658 trace->trace_syscalls = true;
4665 .value = &trace->evlist,
4681 struct trace *trace = opt->value; local
4683 if (!list_empty(&trace->evlist->core.entries)) {
4685 .value = &trace->evlist,
4689 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4696 struct trace *trace = arg; local
4699 if (!strcmp(var, "trace.add_events")) {
4700 trace->perfconfig_events = strdup(value);
4701 if (trace->perfconfig_events == NULL) {
4702 pr_err("Not enough memory for %s\n", "trace.add_events");
4705 } else if (!strcmp(var, "trace.show_timestamp")) {
4706 trace->show_tstamp = perf_config_bool(var, value);
4707 } else if (!strcmp(var, "trace.show_duration")) {
4708 trace->show_duration = perf_config_bool(var, value);
4709 } else if (!strcmp(var, "trace.show_arg_names")) {
4710 trace->show_arg_names = perf_config_bool(var, value);
4711 if (!trace->show_arg_names)
4712 trace->show_zeros = true;
4713 } else if (!strcmp(var, "trace.show_zeros")) {
4715 if (!trace->show_arg_names && !new_show_zeros) {
4716 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4719 trace->show_zeros = new_show_zeros;
4720 } else if (!strcmp(var, "trace.show_prefix")) {
4721 trace->show_string_prefix = perf_config_bool(var, value);
4722 } else if (!strcmp(var, "trace.no_inherit")) {
4723 trace->opts.no_inherit = perf_config_bool(var, value);
4724 } else if (!strcmp(var, "trace.args_alignment")) {
4727 trace->args_alignment = args_alignment;
4728 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4730 trace->libtraceevent_print = true;
4732 trace->libtraceevent_print = false;
4738 static void trace__exit(struct trace *trace) argument
4742 strlist__delete(trace->ev_qualifier);
4743 free(trace->ev_qualifier_ids.entries);
4744 if (trace->syscalls.table) {
4745 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4746 syscall__exit(&trace->syscalls.table[i]);
4747 free(trace->syscalls.table);
4749 syscalltbl__delete(trace->sctbl);
4750 zfree(&trace->perfconfig_events);
4756 "perf trace [<options>] [<command>]",
4757 "perf trace [<options>] -- <command> [<options>]",
4758 "perf trace record [<options>] [<command>]",
4759 "perf trace record [<options>] -- <command> [<options>]",
4762 struct trace trace = { local
4787 OPT_CALLBACK('e', "event", &trace, "event",
4790 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4792 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4794 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4795 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4799 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4800 "trace events on existing process id"),
4801 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4802 "trace events on existing thread id"),
4803 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4805 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4807 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4809 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4811 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4813 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4815 OPT_CALLBACK(0, "duration", &trace, "float",
4821 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4823 OPT_BOOLEAN('T', "time", &trace.full_time,
4825 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4827 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4829 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4831 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4833 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4834 "Trace pagefaults", parse_pagefaults, "maj"),
4835 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4836 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4837 OPT_CALLBACK(0, "call-graph", &trace.opts,
4840 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4842 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4844 OPT_ULONG(0, "max-events", &trace.max_events,
4846 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4849 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4853 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4855 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4859 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4861 OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4864 OPTS_EVSWITCH(&trace.evswitch),
4879 trace.evlist = evlist__new();
4880 trace.sctbl = syscalltbl__new();
4882 if (trace.evlist == NULL || trace.sctbl == NULL) {
4892 * global setting. If it fails we'll get something in 'perf trace -v'
4897 err = perf_config(trace__config, &trace);
4913 * .perfconfig trace.add_events, and filter those out.
4915 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4916 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4917 trace.trace_syscalls = true;
4925 if (trace.perfconfig_events != NULL) {
4929 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4931 parse_events_print_error(&parse_err, trace.perfconfig_events);
4936 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4941 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4943 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4944 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4949 trace.syscalls.events.augmented = evsel;
4951 evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4962 trace.bpf_obj = evsel->bpf_obj;
4969 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4970 trace.trace_syscalls = true;
4977 * This is more to fix the current .perfconfig trace.add_events
4982 * to trace.add_events in the form of
4983 * trace.bpf_augmented_syscalls, that will be only parsed if we
4986 * .perfconfig trace.add_events is still useful if we want, for
4988 * 'perf trace --config determinism.profile' mode, where for some
4996 if (!trace.trace_syscalls) {
4997 trace__delete_augmented_syscalls(&trace);
4999 trace__set_bpf_map_filtered_pids(&trace);
5000 trace__set_bpf_map_syscalls(&trace);
5001 …trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmen…
5005 err = bpf__setup_stdout(trace.evlist);
5007 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
5015 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
5016 if (trace.dump.map == NULL) {
5022 if (trace.trace_pgfaults) {
5023 trace.opts.sample_address = true;
5024 trace.opts.sample_time = true;
5027 if (trace.opts.mmap_pages == UINT_MAX)
5030 if (trace.max_stack == UINT_MAX) {
5031 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5036 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5037 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5043 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5048 if (trace.evlist->core.nr_entries > 0) {
5049 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5050 if (evlist__set_syscall_tp_fields(trace.evlist)) {
5056 if (trace.sort_events) {
5057 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5058 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5072 if (trace.syscalls.events.augmented) {
5073 evlist__for_each_entry(trace.evlist, evsel) {
5077 trace.raw_augmented_syscalls = true;
5081 if (trace.syscalls.events.augmented->priv == NULL &&
5083 struct evsel *augmented = trace.syscalls.events.augmented;
5130 if (trace.raw_augmented_syscalls)
5131 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5139 return trace__record(&trace, argc-1, &argv[1]);
5142 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5143 trace.summary_only = true;
5146 if (trace.summary_only)
5147 trace.summary = trace.summary_only;
5150 err = trace__open_output(&trace, output_name);
5157 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5161 err = target__validate(&trace.opts.target);
5163 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5164 fprintf(trace.output, "%s", bf);
5168 err = target__parse_uid(&trace.opts.target);
5170 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5171 fprintf(trace.output, "%s", bf);
5175 if (!argc && target__none(&trace.opts.target))
5176 trace.opts.target.system_wide = true;
5179 err = trace__replay(&trace);
5181 err = trace__run(&trace, argc, argv);
5185 fclose(trace.output);
5187 trace__exit(&trace);