Lines Matching full:trace

2  * builtin-trace.c
4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
52 #include "trace/beauty/beauty.h"
53 #include "trace-event.h"
118 struct trace { struct
290 * The evsel->priv as used by 'perf trace'
725 #include "trace/beauty/generated/fsconfig_arrays.c"
895 #include "trace/beauty/arch_errno_names.c"
896 #include "trace/beauty/eventfd.c"
897 #include "trace/beauty/futex_op.c"
898 #include "trace/beauty/futex_val3.c"
899 #include "trace/beauty/mmap.c"
900 #include "trace/beauty/mode_t.c"
901 #include "trace/beauty/msg_flags.c"
902 #include "trace/beauty/open_flags.c"
903 #include "trace/beauty/perf_event_open.c"
904 #include "trace/beauty/pid.c"
905 #include "trace/beauty/sched_policy.c"
906 #include "trace/beauty/seccomp.c"
907 #include "trace/beauty/signum.c"
908 #include "trace/beauty/socket_type.c"
909 #include "trace/beauty/waitid_options.c"
1408 struct trace *trace) in thread__fd_path() argument
1412 if (ttrace == NULL || trace->fd_path_disabled) in thread__fd_path()
1419 if (!trace->live) in thread__fd_path()
1421 ++trace->stats.proc_getname; in thread__fd_path()
1433 const char *path = thread__fd_path(arg->thread, fd, arg->trace); in syscall_arg__scnprintf_fd()
1441 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) in pid__scnprintf_fd() argument
1444 struct thread *thread = machine__find_thread(trace->host, pid, pid); in pid__scnprintf_fd()
1447 const char *path = thread__fd_path(thread, fd, trace); in pid__scnprintf_fd()
1504 if (!arg->trace->vfs_getname) in syscall_arg__scnprintf_filename()
1511 static bool trace__filter_duration(struct trace *trace, double t) in trace__filter_duration() argument
1513 return t < (trace->duration_filter * NSEC_PER_MSEC); in trace__filter_duration()
1516 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in __trace__fprintf_tstamp() argument
1518 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; in __trace__fprintf_tstamp()
1529 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in trace__fprintf_tstamp() argument
1532 return __trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_tstamp()
1553 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) in trace__fprintf_comm_tid() argument
1557 if (trace->multiple_threads) { in trace__fprintf_comm_tid()
1558 if (trace->show_comm) in trace__fprintf_comm_tid()
1566 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, in trace__fprintf_entry_head() argument
1571 if (trace->show_tstamp) in trace__fprintf_entry_head()
1572 printed = trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_entry_head()
1573 if (trace->show_duration) in trace__fprintf_entry_head()
1575 return printed + trace__fprintf_comm_tid(trace, thread, fp); in trace__fprintf_entry_head()
1578 static int trace__process_event(struct trace *trace, struct machine *machine, in trace__process_event() argument
1585 color_fprintf(trace->output, PERF_COLOR_RED, in trace__process_event()
1602 struct trace *trace = container_of(tool, struct trace, tool); in trace__tool_process() local
1603 return trace__process_event(trace, machine, event, sample); in trace__tool_process()
1624 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) in trace__symbols_init() argument
1631 trace->host = machine__new_host(); in trace__symbols_init()
1632 if (trace->host == NULL) in trace__symbols_init()
1635 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); in trace__symbols_init()
1639 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, in trace__symbols_init()
1649 static void trace__symbols__exit(struct trace *trace) in trace__symbols__exit() argument
1651 machine__exit(trace->host); in trace__symbols__exit()
1652 trace->host = NULL; in trace__symbols__exit()
1762 static int trace__read_syscall_info(struct trace *trace, int id) in trace__read_syscall_info() argument
1766 const char *name = syscalltbl__name(trace->sctbl, id); in trace__read_syscall_info()
1769 if (trace->syscalls.table == NULL) { in trace__read_syscall_info()
1770 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); in trace__read_syscall_info()
1771 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
1775 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { in trace__read_syscall_info()
1777 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); in trace__read_syscall_info()
1783 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
1786 …memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof… in trace__read_syscall_info()
1788 trace->syscalls.table = table; in trace__read_syscall_info()
1789 trace->sctbl->syscalls.max_id = id; in trace__read_syscall_info()
1792 sc = trace->syscalls.table + id; in trace__read_syscall_info()
1854 static int trace__validate_ev_qualifier(struct trace *trace) in trace__validate_ev_qualifier() argument
1859 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); in trace__validate_ev_qualifier()
1861 trace->ev_qualifier_ids.entries = malloc(nr_allocated * in trace__validate_ev_qualifier()
1862 sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
1864 if (trace->ev_qualifier_ids.entries == NULL) { in trace__validate_ev_qualifier()
1866 trace->output); in trace__validate_ev_qualifier()
1871 strlist__for_each_entry(pos, trace->ev_qualifier) { in trace__validate_ev_qualifier()
1873 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; in trace__validate_ev_qualifier()
1876 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
1891 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
1896 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
1903 entries = realloc(trace->ev_qualifier_ids.entries, in trace__validate_ev_qualifier()
1904 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
1907 fputs("\nError:\t Not enough memory for parsing\n", trace->output); in trace__validate_ev_qualifier()
1910 trace->ev_qualifier_ids.entries = entries; in trace__validate_ev_qualifier()
1912 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
1916 trace->ev_qualifier_ids.nr = nr_used; in trace__validate_ev_qualifier()
1917 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); in trace__validate_ev_qualifier()
1923 zfree(&trace->ev_qualifier_ids.entries); in trace__validate_ev_qualifier()
1924 trace->ev_qualifier_ids.nr = 0; in trace__validate_ev_qualifier()
1928 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) in trace__syscall_enabled() argument
1932 if (trace->ev_qualifier_ids.nr == 0) in trace__syscall_enabled()
1935 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, in trace__syscall_enabled()
1936 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; in trace__syscall_enabled()
1939 return !trace->not_ev_qualifier; in trace__syscall_enabled()
1941 return trace->not_ev_qualifier; in trace__syscall_enabled()
1973 * in tools/perf/trace/beauty/mount_flags.c
1997 struct trace *trace, struct thread *thread) in syscall__scnprintf_args() argument
2010 .trace = trace, in syscall__scnprintf_args()
2012 .show_string_prefix = trace->show_string_prefix, in syscall__scnprintf_args()
2045 !trace->show_zeros && in syscall__scnprintf_args()
2055 if (trace->show_arg_names) in syscall__scnprintf_args()
2084 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2088 static struct syscall *trace__syscall_info(struct trace *trace, argument
2107 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2116 if (id > trace->sctbl->syscalls.max_id) {
2118 if (id >= trace->sctbl->syscalls.max_id) {
2124 err = trace__read_syscall_info(trace, id);
2130 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2131 (err = trace__read_syscall_info(trace, id)) != 0)
2134 if (trace->syscalls.table[id].name == NULL) {
2135 if (trace->syscalls.table[id].nonexistent)
2140 return &trace->syscalls.table[id];
2145 …fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, s…
2146 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2147 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2148 fputs(" information\n", trace->output);
2212 static int trace__printf_interrupted_entry(struct trace *trace) argument
2218 if (trace->failure_only || trace->current == NULL)
2221 ttrace = thread__priv(trace->current);
2226 …printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->o…
2227 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2229 if (len < trace->args_alignment - 4)
2230 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2232 printed += fprintf(trace->output, " ...\n");
2235 ++trace->nr_events_printed;
2240 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, argument
2245 if (trace->print_sample) {
2248 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2291 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, argument
2302 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2308 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2309 ttrace = thread__trace(thread, trace->output);
2313 trace__fprintf_sample(trace, evsel, sample, thread);
2323 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2324 trace__printf_interrupted_entry(trace);
2335 if (evsel != trace->syscalls.events.sys_enter)
2336 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2342 args, augmented_args, augmented_args_size, trace, thread);
2345 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2348 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2349 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2350 if (trace->args_alignment > printed)
2351 alignment = trace->args_alignment - printed;
2352 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2360 if (trace->current != thread) {
2361 thread__put(trace->current);
2362 trace->current = thread__get(thread);
2370 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, argument
2376 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2384 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2385 ttrace = thread__trace(thread, trace->output);
2394 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2395 …syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, th…
2396 fprintf(trace->output, "%s", msg);
2403 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, argument
2410 trace->max_stack;
2413 if (machine__resolve(trace->host, &al, sample) < 0)
2421 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) argument
2428 …intf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2439 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, argument
2448 int alignment = trace->args_alignment;
2449 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2455 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2456 ttrace = thread__trace(thread, trace->output);
2460 trace__fprintf_sample(trace, evsel, sample, thread);
2464 if (trace->summary)
2465 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2467 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2470 ++trace->stats.vfs_getname;
2475 if (trace__filter_duration(trace, duration))
2478 } else if (trace->duration_filter)
2482 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2484 if (callchain_cursor.nr < trace->min_stack)
2490 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2493 …trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace-…
2496 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2498 printed += fprintf(trace->output, " ... [");
2499 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2501 printed += fprintf(trace->output, "]: %s()", sc->name);
2511 fprintf(trace->output, ")%*s= ", alignment, " ");
2517 fprintf(trace->output, "%ld", ret);
2524 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2527 fprintf(trace->output, "0 (Timeout)");
2533 .trace = trace,
2537 fprintf(trace->output, "%s", bf);
2539 fprintf(trace->output, "%#lx", ret);
2541 struct thread *child = machine__find_thread(trace->host, ret, ret);
2544 fprintf(trace->output, "%ld", ret);
2546 fprintf(trace->output, " (%s)", thread__comm_str(child));
2552 fputc('\n', trace->output);
2558 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2562 trace__fprintf_callchain(trace, sample);
2573 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, argument
2577 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2634 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, argument
2640 struct thread *thread = machine__findnew_thread(trace->host,
2643 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2649 trace->runtime_ms += runtime_ms;
2655 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2688 static void bpf_output__fprintf(struct trace *trace, argument
2692 bpf_output__printer, NULL, trace->output);
2693 ++trace->nr_events_printed;
2696 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample… argument
2713 .trace = trace,
2715 .show_string_prefix = trace->show_string_prefix,
2750 !trace->show_zeros &&
2759 if (trace->show_arg_names)
2765 return printed + fprintf(trace->output, "%s", bf);
2768 static int trace__event_handler(struct trace *trace, struct evsel *evsel, argument
2783 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2786 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2788 if (callchain_cursor.nr < trace->min_stack)
2794 trace__printf_interrupted_entry(trace);
2795 trace__fprintf_tstamp(trace, sample->time, trace->output);
2797 if (trace->trace_syscalls && trace->show_duration)
2798 fprintf(trace->output, "( ): ");
2801 trace__fprintf_comm_tid(trace, thread, trace->output);
2803 if (evsel == trace->syscalls.events.augmented) {
2805 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2808 fprintf(trace->output, "%s(", sc->name);
2809 trace__fprintf_sys_enter(trace, evsel, sample);
2810 fputc(')', trace->output);
2821 fprintf(trace->output, "%s(", evsel->name);
2824 bpf_output__fprintf(trace, sample);
2827 trace__fprintf_sys_enter(trace, evsel, sample)) {
2828 if (trace->libtraceevent_print) {
2831 trace->output);
2833 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2839 fprintf(trace->output, ")\n");
2842 trace__fprintf_callchain(trace, sample);
2846 ++trace->nr_events_printed;
2874 static int trace__pgfault(struct trace *trace, argument
2886 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2889 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2891 if (callchain_cursor.nr < trace->min_stack)
2897 ttrace = thread__trace(thread, trace->output);
2906 if (trace->summary_only)
2911 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2913 fprintf(trace->output, "%sfault [",
2917 print_location(trace->output, sample, &al, false, true);
2919 fprintf(trace->output, "] => ");
2932 print_location(trace->output, sample, &al, true, false);
2934 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2937 trace__fprintf_callchain(trace, sample);
2941 ++trace->nr_events_printed;
2949 static void trace__set_base_time(struct trace *trace, argument
2961 if (trace->base_time == 0 && !trace->full_time &&
2963 trace->base_time = sample->time;
2972 struct trace *trace = container_of(tool, struct trace, tool); local
2978 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2982 trace__set_base_time(trace, evsel, sample);
2985 ++trace->nr_events;
2986 handler(trace, evsel, event, sample);
2993 static int trace__record(struct trace *trace, int argc, const char **argv) argument
3025 if (trace->trace_syscalls) {
3043 if (trace->trace_pgfaults & TRACE_PFMAJ)
3047 if (trace->trace_pgfaults & TRACE_PFMIN)
3061 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3129 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *s… argument
3135 trace__process_event(trace, trace->host, event, sample);
3139 evsel = evlist__id2evsel(trace->evlist, sample->id);
3141 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3145 if (evswitch__discard(&trace->evswitch, evsel))
3148 trace__set_base_time(trace, evsel, sample);
3152 …fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3157 handler(trace, evsel, event, sample);
3160 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3164 static int trace__add_syscall_newtp(struct trace *trace) argument
3167 struct evlist *evlist = trace->evlist;
3184 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3185 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3190 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3199 trace->syscalls.events.sys_enter = sys_enter;
3200 trace->syscalls.events.sys_exit = sys_exit;
3213 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) argument
3217 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3218 trace->ev_qualifier_ids.nr,
3219 trace->ev_qualifier_ids.entries);
3224 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3225 sys_exit = trace->syscalls.events.sys_exit;
3238 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name) argument
3240 if (trace->bpf_obj == NULL)
3243 return bpf_object__find_map_by_name(trace->bpf_obj, name);
3246 static void trace__set_bpf_map_filtered_pids(struct trace *trace) argument
3248 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3251 static void trace__set_bpf_map_syscalls(struct trace *trace) argument
3253 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
3254 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3255 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3258 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) argument
3263 if (trace->bpf_obj == NULL)
3266 bpf_object__for_each_program(pos, trace->bpf_obj) {
3277 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, argument
3285 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3290 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3297 prog = trace__find_bpf_program_by_title(trace, prog_name);
3307 return trace->syscalls.unaugmented_prog;
3310 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) argument
3312 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3317 …sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3318 …sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3321 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) argument
3323 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3324 …return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_…
3327 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) argument
3329 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3330 …return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_p…
3333 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_en… argument
3335 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3352 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace) argument
3354 int fd = bpf_map__fd(trace->syscalls.map);
3356 .enabled = !trace->not_ev_qualifier,
3361 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
3362 int key = trace->ev_qualifier_ids.entries[i];
3365 trace__init_bpf_map_syscall_args(trace, key, &value);
3366 trace__init_syscall_bpf_progs(trace, key);
3377 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled) argument
3379 int fd = bpf_map__fd(trace->syscalls.map);
3385 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3387 trace__init_bpf_map_syscall_args(trace, key, &value);
3397 static int trace__init_syscalls_bpf_map(struct trace *trace) argument
3401 if (trace->ev_qualifier_ids.nr)
3402 enabled = trace->not_ev_qualifier;
3404 return __trace__init_syscalls_bpf_map(trace, enabled);
3407 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *s… argument
3423 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3424 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3429 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3481 …pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_ent…
3482 if (pair_prog == trace->syscalls.unaugmented_prog)
3495 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) argument
3497 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3498 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3501 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3504 if (!trace__syscall_enabled(trace, key))
3507 trace__init_syscall_bpf_progs(trace, key);
3510 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3514 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3548 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3549 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3560 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3567 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3587 static void trace__delete_augmented_syscalls(struct trace *trace) argument
3591 evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3592 evsel__delete(trace->syscalls.events.augmented);
3593 trace->syscalls.events.augmented = NULL;
3595 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3596 if (evsel->bpf_obj == trace->bpf_obj) {
3597 evlist__remove(trace->evlist, evsel);
3603 bpf_object__close(trace->bpf_obj);
3604 trace->bpf_obj = NULL;
3607 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3613 static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3617 static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3621 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3626 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3631 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3637 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3642 static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3647 static bool trace__only_augmented_syscalls_evsels(struct trace *trace) argument
3651 evlist__for_each_entry(trace->evlist, evsel) {
3652 if (evsel == trace->syscalls.events.augmented ||
3653 evsel->bpf_obj == trace->bpf_obj)
3662 static int trace__set_ev_qualifier_filter(struct trace *trace) argument
3664 if (trace->syscalls.map)
3665 return trace__set_ev_qualifier_bpf_filter(trace);
3666 if (trace->syscalls.events.sys_enter)
3667 return trace__set_ev_qualifier_tp_filter(trace);
3689 static int trace__set_filter_loop_pids(struct trace *trace) argument
3695 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3698 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3711 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3712 if (!err && trace->filter_pids.map)
3713 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3718 static int trace__set_filter_pids(struct trace *trace) argument
3727 if (trace->filter_pids.nr > 0) {
3728 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3729 trace->filter_pids.entries);
3730 if (!err && trace->filter_pids.map) {
3731 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3732 trace->filter_pids.entries);
3734 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3735 err = trace__set_filter_loop_pids(trace);
3741 static int __trace__deliver_event(struct trace *trace, union perf_event *event) argument
3743 struct evlist *evlist = trace->evlist;
3748 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3750 trace__handle_event(trace, event, &sample);
3755 static int __trace__flush_events(struct trace *trace) argument
3757 u64 first = ordered_events__first_time(&trace->oe.data);
3758 u64 flush = trace->oe.last - NSEC_PER_SEC;
3762 return ordered_events__flush_time(&trace->oe.data, flush);
3767 static int trace__flush_events(struct trace *trace) argument
3769 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3772 static int trace__deliver_event(struct trace *trace, union perf_event *event) argument
3776 if (!trace->sort_events)
3777 return __trace__deliver_event(trace, event);
3779 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3783 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
3787 return trace__flush_events(trace);
3793 struct trace *trace = container_of(oe, struct trace, oe.data); local
3795 return __trace__deliver_event(trace, event->event);
3813 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3911 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) argument
3913 struct evlist *evlist = trace->evlist;
3920 if (trace__expand_filter(trace, evsel)) {
3929 static int trace__run(struct trace *trace, int argc, const char **argv) argument
3931 struct evlist *evlist = trace->evlist;
3938 trace->live = true;
3940 if (!trace->raw_augmented_syscalls) {
3941 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3944 if (trace->trace_syscalls)
3945 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3948 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3952 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3956 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3960 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3965 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
3967 if (trace->sched &&
3974 * trace -G A -e sched:*switch
3979 * trace -e sched:*switch -G A
3987 * trace -G A -e sched:*switch -G B
3995 if (trace->cgroup)
3996 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3998 err = evlist__create_maps(evlist, &trace->opts.target);
4000 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
4004 err = trace__symbols_init(trace, evlist);
4006 fprintf(trace->output, "Problems initializing symbol libraries!\n");
4010 evlist__config(evlist, &trace->opts, &callchain_param);
4013 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
4015 fprintf(trace->output, "Couldn't run the workload!\n");
4035 err = trace__set_filter_pids(trace);
4039 if (trace->syscalls.map)
4040 trace__init_syscalls_bpf_map(trace);
4042 if (trace->syscalls.prog_array.sys_enter)
4043 trace__init_syscalls_bpf_prog_array_maps(trace);
4045 if (trace->ev_qualifier_ids.nr > 0) {
4046 err = trace__set_ev_qualifier_filter(trace);
4050 if (trace->syscalls.events.sys_exit) {
4052 trace->syscalls.events.sys_exit->filter);
4067 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4069 err = trace__expand_filters(trace, &evsel);
4076 if (trace->dump.map)
4077 bpf_map__fprintf(trace->dump.map, trace->output);
4079 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4083 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
4089 if (trace->opts.initial_delay) {
4090 usleep(trace->opts.initial_delay * 1000);
4094 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4107 evsel->core.attr.sample_max_stack = trace->max_stack;
4110 before = trace->nr_events;
4121 ++trace->nr_events;
4123 err = trace__deliver_event(trace, event);
4140 if (trace->nr_events == before) {
4149 if (trace__flush_events(trace))
4157 thread__zput(trace->current);
4161 if (trace->sort_events)
4162 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4165 if (trace->summary)
4166 trace__fprintf_thread_summary(trace, trace->output);
4168 if (trace->show_tool_stats) {
4169 fprintf(trace->output, "Stats:\n "
4172 trace->stats.vfs_getname,
4173 trace->stats.proc_getname);
4178 trace__symbols__exit(trace);
4181 cgroup__put(trace->cgroup);
4182 trace->evlist = NULL;
4183 trace->live = false;
4204 fprintf(trace->output, "%s\n", errbuf);
4208 fprintf(trace->output,
4215 fprintf(trace->output, "Not enough memory to run!\n");
4219 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4223 static int trace__replay(struct trace *trace) argument
4231 .force = trace->force,
4237 trace->tool.sample = trace__process_sample;
4238 trace->tool.mmap = perf_event__process_mmap;
4239 trace->tool.mmap2 = perf_event__process_mmap2;
4240 trace->tool.comm = perf_event__process_comm;
4241 trace->tool.exit = perf_event__process_exit;
4242 trace->tool.fork = perf_event__process_fork;
4243 trace->tool.attr = perf_event__process_attr;
4244 trace->tool.tracing_data = perf_event__process_tracing_data;
4245 trace->tool.build_id = perf_event__process_build_id;
4246 trace->tool.namespaces = perf_event__process_namespaces;
4248 trace->tool.ordered_events = true;
4249 trace->tool.ordering_requires_timestamps = true;
4252 trace->multiple_threads = true;
4254 session = perf_session__new(&data, &trace->tool);
4258 if (trace->opts.target.pid)
4259 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4261 if (trace->opts.target.tid)
4262 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4267 trace->host = &session->machines.host;
4274 trace->syscalls.events.sys_enter = evsel;
4287 trace->syscalls.events.sys_exit = evsel;
4311 else if (trace->summary)
4312 trace__fprintf_thread_summary(trace, trace->output);
4344 struct trace *trace, FILE *fp) argument
4372 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4378 if (trace->errno_summary && stats->nr_failures) {
4379 const char *arch_name = perf_env__arch(trace->host->env);
4396 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) argument
4405 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4414 if (trace->sched)
4419 printed += thread__dump_stats(ttrace, trace, fp);
4436 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) argument
4443 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4451 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4461 struct trace *trace = opt->value; local
4463 trace->duration_filter = atof(str);
4472 struct trace *trace = opt->value; local
4482 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4483 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4485 if (trace->filter_pids.entries == NULL)
4488 trace->filter_pids.entries[0] = getpid();
4490 for (i = 1; i < trace->filter_pids.nr; ++i)
4491 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4499 static int trace__open_output(struct trace *trace, const char *filename) argument
4511 trace->output = fopen(filename, "w");
4513 return trace->output == NULL ? -errno : 0;
4601 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4609 struct trace *trace = (struct trace *)opt->value; local
4622 trace->not_ev_qualifier = true;
4630 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4631 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4667 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4668 if (trace->ev_qualifier == NULL) {
4669 fputs("Not enough memory to parse event qualifier", trace->output);
4673 if (trace__validate_ev_qualifier(trace))
4675 trace->trace_syscalls = true;
4682 .value = &trace->evlist,
4698 struct trace *trace = opt->value; local
4700 if (!list_empty(&trace->evlist->core.entries)) {
4702 .value = &trace->evlist,
4706 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4713 struct trace *trace = arg; local
4716 if (!strcmp(var, "trace.add_events")) {
4717 trace->perfconfig_events = strdup(value);
4718 if (trace->perfconfig_events == NULL) {
4719 pr_err("Not enough memory for %s\n", "trace.add_events");
4722 } else if (!strcmp(var, "trace.show_timestamp")) {
4723 trace->show_tstamp = perf_config_bool(var, value);
4724 } else if (!strcmp(var, "trace.show_duration")) {
4725 trace->show_duration = perf_config_bool(var, value);
4726 } else if (!strcmp(var, "trace.show_arg_names")) {
4727 trace->show_arg_names = perf_config_bool(var, value);
4728 if (!trace->show_arg_names)
4729 trace->show_zeros = true;
4730 } else if (!strcmp(var, "trace.show_zeros")) {
4732 if (!trace->show_arg_names && !new_show_zeros) {
4733 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4736 trace->show_zeros = new_show_zeros;
4737 } else if (!strcmp(var, "trace.show_prefix")) {
4738 trace->show_string_prefix = perf_config_bool(var, value);
4739 } else if (!strcmp(var, "trace.no_inherit")) {
4740 trace->opts.no_inherit = perf_config_bool(var, value);
4741 } else if (!strcmp(var, "trace.args_alignment")) {
4744 trace->args_alignment = args_alignment;
4745 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4747 trace->libtraceevent_print = true;
4749 trace->libtraceevent_print = false;
4755 static void trace__exit(struct trace *trace) argument
4759 strlist__delete(trace->ev_qualifier);
4760 free(trace->ev_qualifier_ids.entries);
4761 if (trace->syscalls.table) {
4762 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4763 syscall__exit(&trace->syscalls.table[i]);
4764 free(trace->syscalls.table);
4766 syscalltbl__delete(trace->sctbl);
4767 zfree(&trace->perfconfig_events);
4773 "perf trace [<options>] [<command>]",
4774 "perf trace [<options>] -- <command> [<options>]",
4775 "perf trace record [<options>] [<command>]",
4776 "perf trace record [<options>] -- <command> [<options>]",
4779 struct trace trace = { local
4804 OPT_CALLBACK('e', "event", &trace, "event",
4807 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4809 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4811 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4812 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4816 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4817 "trace events on existing process id"),
4818 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4819 "trace events on existing thread id"),
4820 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4822 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4824 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4826 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4828 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4830 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4832 OPT_CALLBACK(0, "duration", &trace, "float",
4838 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4840 OPT_BOOLEAN('T', "time", &trace.full_time,
4842 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4844 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4846 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4848 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4850 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4851 "Trace pagefaults", parse_pagefaults, "maj"),
4852 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4853 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4854 OPT_CALLBACK(0, "call-graph", &trace.opts,
4857 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4859 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4861 OPT_ULONG(0, "max-events", &trace.max_events,
4863 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4866 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4870 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4872 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4876 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4878 OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4881 OPTS_EVSWITCH(&trace.evswitch),
4901 trace.evlist = evlist__new();
4902 trace.sctbl = syscalltbl__new();
4904 if (trace.evlist == NULL || trace.sctbl == NULL) {
4914 * global setting. If it fails we'll get something in 'perf trace -v'
4919 err = perf_config(trace__config, &trace);
4935 * .perfconfig trace.add_events, and filter those out.
4937 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4938 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4939 trace.trace_syscalls = true;
4947 if (trace.perfconfig_events != NULL) {
4951 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4953 parse_events_error__print(&parse_err, trace.perfconfig_events);
4959 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4964 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4966 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4967 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4972 trace.syscalls.events.augmented = evsel;
4974 evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4985 trace.bpf_obj = evsel->bpf_obj;
4992 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4993 trace.trace_syscalls = true;
5000 * This is more to fix the current .perfconfig trace.add_events
5005 * to trace.add_events in the form of
5006 * trace.bpf_augmented_syscalls, that will be only parsed if we
5009 * .perfconfig trace.add_events is still useful if we want, for
5011 * 'perf trace --config determinism.profile' mode, where for some
5019 if (!trace.trace_syscalls) {
5020 trace__delete_augmented_syscalls(&trace);
5022 trace__set_bpf_map_filtered_pids(&trace);
5023 trace__set_bpf_map_syscalls(&trace);
5024trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmen…
5028 err = bpf__setup_stdout(trace.evlist);
5030 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
5038 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
5039 if (trace.dump.map == NULL) {
5045 if (trace.trace_pgfaults) {
5046 trace.opts.sample_address = true;
5047 trace.opts.sample_time = true;
5050 if (trace.opts.mmap_pages == UINT_MAX)
5053 if (trace.max_stack == UINT_MAX) {
5054 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5059 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5060 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5066 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5071 if (trace.evlist->core.nr_entries > 0) {
5072 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5073 if (evlist__set_syscall_tp_fields(trace.evlist)) {
5079 if (trace.sort_events) {
5080 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5081 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5095 if (trace.syscalls.events.augmented) {
5096 evlist__for_each_entry(trace.evlist, evsel) {
5100 trace.raw_augmented_syscalls = true;
5104 if (trace.syscalls.events.augmented->priv == NULL &&
5106 struct evsel *augmented = trace.syscalls.events.augmented;
5153 if (trace.raw_augmented_syscalls)
5154 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5162 return trace__record(&trace, argc-1, &argv[1]);
5165 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5166 trace.summary_only = true;
5169 if (trace.summary_only)
5170 trace.summary = trace.summary_only;
5173 err = trace__open_output(&trace, output_name);
5180 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5184 err = target__validate(&trace.opts.target);
5186 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5187 fprintf(trace.output, "%s", bf);
5191 err = target__parse_uid(&trace.opts.target);
5193 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5194 fprintf(trace.output, "%s", bf);
5198 if (!argc && target__none(&trace.opts.target))
5199 trace.opts.target.system_wide = true;
5202 err = trace__replay(&trace);
5204 err = trace__run(&trace, argc, argv);
5208 fclose(trace.output);
5210 trace__exit(&trace);