Lines Matching +full:set +full:- +full:top

1 // SPDX-License-Identifier: GPL-2.0-only
3 * builtin-top.c
5 * Builtin top command: Display a continuously updated profile of
24 #include "util/bpf-event.h"
39 #include "util/synthetic-events.h"
40 #include "util/top.h"
43 #include <subcmd/parse-options.h>
44 #include "util/parse-events.h"
51 #include "util/parse-branch-options.h"
56 #include "util/ordered-events.h"
95 static void perf_top__update_print_entries(struct perf_top *top) in perf_top__update_print_entries() argument
97 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR; in perf_top__update_print_entries()
105 static void perf_top__resize(struct perf_top *top) in perf_top__resize() argument
107 get_term_dimensions(&top->winsize); in perf_top__resize()
108 perf_top__update_print_entries(top); in perf_top__resize()
111 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) in perf_top__parse_source() argument
118 int err = -1; in perf_top__parse_source()
120 if (!he || !he->ms.sym) in perf_top__parse_source()
121 return -1; in perf_top__parse_source()
123 evsel = hists_to_evsel(he->hists); in perf_top__parse_source()
125 sym = he->ms.sym; in perf_top__parse_source()
126 map = he->ms.map; in perf_top__parse_source()
132 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) { in perf_top__parse_source()
134 "path\n", sym->name); in perf_top__parse_source()
136 return -1; in perf_top__parse_source()
142 if (!symbol__hists(sym, top->evlist->core.nr_entries)) { in perf_top__parse_source()
145 sym->name); in perf_top__parse_source()
150 err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL); in perf_top__parse_source()
152 top->sym_filter_entry = he; in perf_top__parse_source()
155 symbol__strerror_disassemble(&he->ms, err, msg, sizeof(msg)); in perf_top__parse_source()
156 pr_err("Couldn't annotate %s: %s\n", sym->name, msg); in perf_top__parse_source()
165 struct symbol *sym = he->ms.sym; in __zero_source_counters()
178 "Map: %" PRIx64 "-%" PRIx64 "\n" in ui__warn_map_erange()
179 "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n" in ui__warn_map_erange()
184 "Please report to linux-kernel@vger.kernel.org\n", in ui__warn_map_erange()
185 ip, dso->long_name, dso__symtab_origin(dso), in ui__warn_map_erange()
186 map__start(map), map__end(map), sym->start, sym->end, in ui__warn_map_erange()
187 sym->binding == STB_GLOBAL ? 'g' : in ui__warn_map_erange()
188 sym->binding == STB_LOCAL ? 'l' : 'w', sym->name, in ui__warn_map_erange()
197 static void perf_top__record_precise_ip(struct perf_top *top, in perf_top__record_precise_ip() argument
201 EXCLUSIVE_LOCKS_REQUIRED(he->hists->lock) in perf_top__record_precise_ip()
204 struct symbol *sym = he->ms.sym; in perf_top__record_precise_ip()
208 (top->sym_filter_entry == NULL || in perf_top__record_precise_ip()
209 top->sym_filter_entry->ms.sym != sym))) in perf_top__record_precise_ip()
223 * This function is now called with he->hists->lock held. in perf_top__record_precise_ip()
226 mutex_unlock(&he->hists->lock); in perf_top__record_precise_ip()
228 if (err == -ERANGE && !map__erange_warned(he->ms.map)) in perf_top__record_precise_ip()
229 ui__warn_map_erange(he->ms.map, sym, ip); in perf_top__record_precise_ip()
230 else if (err == -ENOMEM) { in perf_top__record_precise_ip()
232 sym->name); in perf_top__record_precise_ip()
236 mutex_lock(&he->hists->lock); in perf_top__record_precise_ip()
240 static void perf_top__show_details(struct perf_top *top) in perf_top__show_details() argument
242 struct hist_entry *he = top->sym_filter_entry; in perf_top__show_details()
251 evsel = hists_to_evsel(he->hists); in perf_top__show_details()
253 symbol = he->ms.sym; in perf_top__show_details()
260 if (notes->src == NULL) in perf_top__show_details()
263 printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name); in perf_top__show_details()
264 printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt); in perf_top__show_details()
266 more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts); in perf_top__show_details()
268 if (top->evlist->enabled) { in perf_top__show_details()
269 if (top->zero) in perf_top__show_details()
270 symbol__annotate_zero_histogram(symbol, top->sym_evsel->core.idx); in perf_top__show_details()
272 symbol__annotate_decay_histogram(symbol, top->sym_evsel->core.idx); in perf_top__show_details()
282 struct evlist *evlist = t->evlist; in perf_top__resort_hists()
294 if (evlist->enabled) { in perf_top__resort_hists()
295 if (t->zero) { in perf_top__resort_hists()
298 hists__decay_entries(hists, t->hide_user_symbols, in perf_top__resort_hists()
299 t->hide_kernel_symbols); in perf_top__resort_hists()
305 /* Non-group events are considered as leader */ in perf_top__resort_hists()
319 static void perf_top__print_sym_table(struct perf_top *top) in perf_top__print_sym_table() argument
323 const int win_width = top->winsize.ws_col - 1; in perf_top__print_sym_table()
324 struct evsel *evsel = top->sym_evsel; in perf_top__print_sym_table()
329 perf_top__header_snprintf(top, bf, sizeof(bf)); in perf_top__print_sym_table()
332 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); in perf_top__print_sym_table()
334 if (!top->record_opts.overwrite && in perf_top__print_sym_table()
335 (top->evlist->stats.nr_lost_warned != in perf_top__print_sym_table()
336 top->evlist->stats.nr_events[PERF_RECORD_LOST])) { in perf_top__print_sym_table()
337 top->evlist->stats.nr_lost_warned = in perf_top__print_sym_table()
338 top->evlist->stats.nr_events[PERF_RECORD_LOST]; in perf_top__print_sym_table()
341 top->evlist->stats.nr_lost_warned); in perf_top__print_sym_table()
345 if (top->sym_filter_entry) { in perf_top__print_sym_table()
346 perf_top__show_details(top); in perf_top__print_sym_table()
350 perf_top__resort_hists(top); in perf_top__print_sym_table()
352 hists__output_recalc_col_len(hists, top->print_entries - printed); in perf_top__print_sym_table()
354 hists__fprintf(hists, false, top->print_entries - printed, win_width, in perf_top__print_sym_table()
355 top->min_percent, stdout, !symbol_conf.use_callchain); in perf_top__print_sym_table()
393 static void perf_top__prompt_symbol(struct perf_top *top, const char *msg) in perf_top__prompt_symbol() argument
396 struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL; in perf_top__prompt_symbol()
397 struct hists *hists = evsel__hists(top->sym_evsel); in perf_top__prompt_symbol()
404 top->sym_filter_entry = NULL; in perf_top__prompt_symbol()
415 next = rb_first_cached(&hists->entries); in perf_top__prompt_symbol()
418 if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) { in perf_top__prompt_symbol()
422 next = rb_next(&n->rb_node); in perf_top__prompt_symbol()
429 perf_top__parse_source(top, found); in perf_top__prompt_symbol()
435 static void perf_top__print_mapped_keys(struct perf_top *top) in perf_top__print_mapped_keys() argument
439 if (top->sym_filter_entry) { in perf_top__print_mapped_keys()
440 struct symbol *sym = top->sym_filter_entry->ms.sym; in perf_top__print_mapped_keys()
441 name = sym->name; in perf_top__print_mapped_keys()
445 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs); in perf_top__print_mapped_keys()
446 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries); in perf_top__print_mapped_keys()
448 if (top->evlist->core.nr_entries > 1) in perf_top__print_mapped_keys()
449 …fprintf(stdout, "\t[E] active event counter. \t(%s)\n", evsel__name(top->sym_evse… in perf_top__print_mapped_keys()
451 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter); in perf_top__print_mapped_keys()
453 …fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.mi… in perf_top__print_mapped_keys()
459 top->hide_kernel_symbols ? "yes" : "no"); in perf_top__print_mapped_keys()
462 top->hide_user_symbols ? "yes" : "no"); in perf_top__print_mapped_keys()
463 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0); in perf_top__print_mapped_keys()
467 static int perf_top__key_mapped(struct perf_top *top, int c) in perf_top__key_mapped() argument
483 return top->evlist->core.nr_entries > 1 ? 1 : 0; in perf_top__key_mapped()
491 static bool perf_top__handle_keypress(struct perf_top *top, int c) in perf_top__handle_keypress() argument
495 if (!perf_top__key_mapped(top, c)) { in perf_top__handle_keypress()
499 perf_top__print_mapped_keys(top); in perf_top__handle_keypress()
505 poll(&stdin_poll, 1, -1); in perf_top__handle_keypress()
509 if (!perf_top__key_mapped(top, c)) in perf_top__handle_keypress()
515 prompt_integer(&top->delay_secs, "Enter display delay"); in perf_top__handle_keypress()
516 if (top->delay_secs < 1) in perf_top__handle_keypress()
517 top->delay_secs = 1; in perf_top__handle_keypress()
520 prompt_integer(&top->print_entries, "Enter display entries (lines)"); in perf_top__handle_keypress()
521 if (top->print_entries == 0) { in perf_top__handle_keypress()
522 perf_top__resize(top); in perf_top__handle_keypress()
529 if (top->evlist->core.nr_entries > 1) { in perf_top__handle_keypress()
535 evlist__for_each_entry(top->evlist, top->sym_evsel) in perf_top__handle_keypress()
536 fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel)); in perf_top__handle_keypress()
540 if (counter >= top->evlist->core.nr_entries) { in perf_top__handle_keypress()
541 top->sym_evsel = evlist__first(top->evlist); in perf_top__handle_keypress()
542 fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel)); in perf_top__handle_keypress()
546 evlist__for_each_entry(top->evlist, top->sym_evsel) in perf_top__handle_keypress()
547 if (top->sym_evsel->core.idx == counter) in perf_top__handle_keypress()
550 top->sym_evsel = evlist__first(top->evlist); in perf_top__handle_keypress()
553 prompt_integer(&top->count_filter, "Enter display event count filter"); in perf_top__handle_keypress()
556 prompt_percent(&top->annotation_opts.min_pcnt, in perf_top__handle_keypress()
560 top->hide_kernel_symbols = !top->hide_kernel_symbols; in perf_top__handle_keypress()
565 if (top->dump_symtab) in perf_top__handle_keypress()
566 perf_session__fprintf_dsos(top->session, stderr); in perf_top__handle_keypress()
570 perf_top__prompt_symbol(top, "Enter details symbol"); in perf_top__handle_keypress()
573 if (!top->sym_filter_entry) in perf_top__handle_keypress()
576 struct hist_entry *syme = top->sym_filter_entry; in perf_top__handle_keypress()
578 top->sym_filter_entry = NULL; in perf_top__handle_keypress()
583 top->hide_user_symbols = !top->hide_user_symbols; in perf_top__handle_keypress()
586 top->zero = !top->zero; in perf_top__handle_keypress()
599 if (t->evlist->selected != NULL) in perf_top__sort_new_samples()
600 t->sym_evsel = t->evlist->selected; in perf_top__sort_new_samples()
604 if (t->lost || t->drop) in perf_top__sort_new_samples()
605 pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n"); in perf_top__sort_new_samples()
617 struct perf_top *top = arg; in display_thread_tui() local
618 const char *help = "For a higher level overview, try: perf top --sort comm,dso"; in display_thread_tui()
621 .arg = top, in display_thread_tui()
622 .refresh = top->delay_secs, in display_thread_tui()
633 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0); in display_thread_tui()
636 perf_top__sort_new_samples(top); in display_thread_tui()
641 * via --uid. in display_thread_tui()
643 evlist__for_each_entry(top->evlist, pos) { in display_thread_tui()
645 hists->uid_filter_str = top->record_opts.target.uid_str; in display_thread_tui()
648 ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent, in display_thread_tui()
649 &top->session->header.env, !top->record_opts.overwrite, in display_thread_tui()
650 &top->annotation_opts); in display_thread_tui()
652 top->zero = true; in display_thread_tui()
678 struct perf_top *top = arg; in display_thread() local
688 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0); in display_thread()
693 delay_msecs = top->delay_secs * MSEC_PER_SEC; in display_thread()
701 perf_top__print_sym_table(top); in display_thread()
709 case -1: in display_thread()
717 if (perf_top__handle_keypress(top, c)) in display_thread()
730 EXCLUSIVE_LOCKS_REQUIRED(iter->he->hists->lock) in hist_iter__top_callback()
732 struct perf_top *top = arg; in hist_iter__top_callback() local
733 struct evsel *evsel = iter->evsel; in hist_iter__top_callback()
736 perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr); in hist_iter__top_callback()
738 hist__account_cycles(iter->sample->branch_stack, al, iter->sample, in hist_iter__top_callback()
739 !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY), in hist_iter__top_callback()
750 struct perf_top *top = container_of(tool, struct perf_top, tool); in perf_event__process_sample() local
759 if (!intlist__has_entry(seen, sample->pid)) { in perf_event__process_sample()
761 sample->pid); in perf_event__process_sample()
762 intlist__add(seen, sample->pid); in perf_event__process_sample()
769 top->session->evlist->stats.nr_unprocessable_samples++); in perf_event__process_sample()
773 if (event->header.misc & PERF_RECORD_MISC_EXACT_IP) in perf_event__process_sample()
774 top->exact_samples++; in perf_event__process_sample()
780 if (top->stitch_lbr) in perf_event__process_sample()
783 if (!machine->kptr_restrict_warned && in perf_event__process_sample()
786 if (!evlist__exclude_kernel(top->session->evlist)) { in perf_event__process_sample()
796 machine->kptr_restrict_warned = true; in perf_event__process_sample()
808 * We may never get here, for instance, if we use -K/ in perf_event__process_sample()
809 * --hide-kernel-symbols, even if the user specifies an in perf_event__process_sample()
810 * invalid --vmlinux ;-) in perf_event__process_sample()
812 if (!machine->kptr_restrict_warned && !top->vmlinux_warned && in perf_event__process_sample()
827 top->vmlinux_warned = true; in perf_event__process_sample()
831 if (al.sym == NULL || !al.sym->idle) { in perf_event__process_sample()
844 mutex_lock(&hists->lock); in perf_event__process_sample()
846 if (hist_entry_iter__add(&iter, &al, top->max_stack, top) < 0) in perf_event__process_sample()
849 mutex_unlock(&hists->lock); in perf_event__process_sample()
857 perf_top__process_lost(struct perf_top *top, union perf_event *event, in perf_top__process_lost() argument
860 top->lost += event->lost.lost; in perf_top__process_lost()
861 top->lost_total += event->lost.lost; in perf_top__process_lost()
862 evsel->evlist->stats.total_lost += event->lost.lost; in perf_top__process_lost()
866 perf_top__process_lost_samples(struct perf_top *top, in perf_top__process_lost_samples() argument
870 top->lost += event->lost_samples.lost; in perf_top__process_lost_samples()
871 top->lost_total += event->lost_samples.lost; in perf_top__process_lost_samples()
872 evsel->evlist->stats.total_lost_samples += event->lost_samples.lost; in perf_top__process_lost_samples()
877 static void perf_top__mmap_read_idx(struct perf_top *top, int idx) in perf_top__mmap_read_idx() argument
879 struct record_opts *opts = &top->record_opts; in perf_top__mmap_read_idx()
880 struct evlist *evlist = top->evlist; in perf_top__mmap_read_idx()
884 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx]; in perf_top__mmap_read_idx()
885 if (perf_mmap__read_init(&md->core) < 0) in perf_top__mmap_read_idx()
888 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in perf_top__mmap_read_idx()
892 if (ret && ret != -1) in perf_top__mmap_read_idx()
895 ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0, NULL); in perf_top__mmap_read_idx()
899 perf_mmap__consume(&md->core); in perf_top__mmap_read_idx()
901 if (top->qe.rotate) { in perf_top__mmap_read_idx()
902 mutex_lock(&top->qe.mutex); in perf_top__mmap_read_idx()
903 top->qe.rotate = false; in perf_top__mmap_read_idx()
904 cond_signal(&top->qe.cond); in perf_top__mmap_read_idx()
905 mutex_unlock(&top->qe.mutex); in perf_top__mmap_read_idx()
909 perf_mmap__read_done(&md->core); in perf_top__mmap_read_idx()
912 static void perf_top__mmap_read(struct perf_top *top) in perf_top__mmap_read() argument
914 bool overwrite = top->record_opts.overwrite; in perf_top__mmap_read()
915 struct evlist *evlist = top->evlist; in perf_top__mmap_read()
921 for (i = 0; i < top->evlist->core.nr_mmaps; i++) in perf_top__mmap_read()
922 perf_top__mmap_read_idx(top, i); in perf_top__mmap_read()
931 * Check per-event overwrite term.
932 * perf top should support consistent term for all events.
933 * - All events don't have per-event term
934 * E.g. "cpu/cpu-cycles/,cpu/instructions/"
936 * - All events have same per-event term
937 * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
938 * Using the per-event setting to replace the opts->overwrite if
940 * - Events have different per-event term
941 * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
942 * Return -1
943 * - Some of the event set per-event term, but some not.
944 * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
945 * Return -1
947 static int perf_top__overwrite_check(struct perf_top *top) in perf_top__overwrite_check() argument
949 struct record_opts *opts = &top->record_opts; in perf_top__overwrite_check()
950 struct evlist *evlist = top->evlist; in perf_top__overwrite_check()
954 int set, overwrite = -1; in perf_top__overwrite_check() local
957 set = -1; in perf_top__overwrite_check()
958 config_terms = &evsel->config_terms; in perf_top__overwrite_check()
960 if (term->type == EVSEL__CONFIG_TERM_OVERWRITE) in perf_top__overwrite_check()
961 set = term->val.overwrite ? 1 : 0; in perf_top__overwrite_check()
965 if ((overwrite < 0) && (set < 0)) in perf_top__overwrite_check()
969 if ((overwrite >= 0) && (set >= 0) && (overwrite != set)) in perf_top__overwrite_check()
970 return -1; in perf_top__overwrite_check()
973 if ((overwrite >= 0) && (set < 0)) in perf_top__overwrite_check()
974 return -1; in perf_top__overwrite_check()
977 if ((overwrite < 0) && (set >= 0)) { in perf_top__overwrite_check()
978 /* if it's first event, set overwrite */ in perf_top__overwrite_check()
980 overwrite = set; in perf_top__overwrite_check()
982 return -1; in perf_top__overwrite_check()
986 if ((overwrite >= 0) && (opts->overwrite != overwrite)) in perf_top__overwrite_check()
987 opts->overwrite = overwrite; in perf_top__overwrite_check()
992 static int perf_top_overwrite_fallback(struct perf_top *top, in perf_top_overwrite_fallback() argument
995 struct record_opts *opts = &top->record_opts; in perf_top_overwrite_fallback()
996 struct evlist *evlist = top->evlist; in perf_top_overwrite_fallback()
999 if (!opts->overwrite) in perf_top_overwrite_fallback()
1007 counter->core.attr.write_backward = false; in perf_top_overwrite_fallback()
1008 opts->overwrite = false; in perf_top_overwrite_fallback()
1009 pr_debug2("fall back to non-overwrite mode\n"); in perf_top_overwrite_fallback()
1013 static int perf_top__start_counters(struct perf_top *top) in perf_top__start_counters() argument
1017 struct evlist *evlist = top->evlist; in perf_top__start_counters()
1018 struct record_opts *opts = &top->record_opts; in perf_top__start_counters()
1020 if (perf_top__overwrite_check(top)) { in perf_top__start_counters()
1021 ui__error("perf top only support consistent per-event " in perf_top__start_counters()
1030 if (evsel__open(counter, top->evlist->core.user_requested_cpus, in perf_top__start_counters()
1031 top->evlist->core.threads) < 0) { in perf_top__start_counters()
1035 * Because perf top is the only tool which has in perf_top__start_counters()
1037 * both overwrite and non-overwrite mode, and in perf_top__start_counters()
1044 perf_top_overwrite_fallback(top, counter)) in perf_top__start_counters()
1053 evsel__open_strerror(counter, &opts->target, errno, msg, sizeof(msg)); in perf_top__start_counters()
1059 if (evlist__mmap(evlist, opts->mmap_pages) < 0) { in perf_top__start_counters()
1068 return -1; in perf_top__start_counters()
1073 if (callchain->mode != CHAIN_NONE) { in callchain_param__setup_sample_type()
1076 return -EINVAL; in callchain_param__setup_sample_type()
1083 static struct ordered_events *rotate_queues(struct perf_top *top) in rotate_queues() argument
1085 struct ordered_events *in = top->qe.in; in rotate_queues()
1087 if (top->qe.in == &top->qe.data[1]) in rotate_queues()
1088 top->qe.in = &top->qe.data[0]; in rotate_queues()
1090 top->qe.in = &top->qe.data[1]; in rotate_queues()
1097 struct perf_top *top = arg; in process_thread() local
1100 struct ordered_events *out, *in = top->qe.in; in process_thread()
1102 if (!in->nr_events) { in process_thread()
1107 out = rotate_queues(top); in process_thread()
1109 mutex_lock(&top->qe.mutex); in process_thread()
1110 top->qe.rotate = true; in process_thread()
1111 cond_wait(&top->qe.cond, &top->qe.mutex); in process_thread()
1112 mutex_unlock(&top->qe.mutex); in process_thread()
1122 * Allow only 'top->delay_secs' seconds behind samples.
1124 static int should_drop(struct ordered_event *qevent, struct perf_top *top) in should_drop() argument
1126 union perf_event *event = qevent->event; in should_drop()
1129 if (event->header.type != PERF_RECORD_SAMPLE) in should_drop()
1132 delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC; in should_drop()
1139 struct perf_top *top = qe->data; in deliver_event() local
1140 struct evlist *evlist = top->evlist; in deliver_event()
1141 struct perf_session *session = top->session; in deliver_event()
1142 union perf_event *event = qevent->event; in deliver_event()
1146 int ret = -1; in deliver_event()
1148 if (should_drop(qevent, top)) { in deliver_event()
1149 top->drop++; in deliver_event()
1150 top->drop_total++; in deliver_event()
1160 evsel = evlist__id2evsel(session->evlist, sample.id); in deliver_event()
1163 if (event->header.type == PERF_RECORD_SAMPLE) { in deliver_event()
1164 if (evswitch__discard(&top->evswitch, evsel)) in deliver_event()
1166 ++top->samples; in deliver_event()
1171 ++top->us_samples; in deliver_event()
1172 if (top->hide_user_symbols) in deliver_event()
1174 machine = &session->machines.host; in deliver_event()
1177 ++top->kernel_samples; in deliver_event()
1178 if (top->hide_kernel_symbols) in deliver_event()
1180 machine = &session->machines.host; in deliver_event()
1183 ++top->guest_kernel_samples; in deliver_event()
1188 ++top->guest_us_samples; in deliver_event()
1195 if (event->header.type == PERF_RECORD_SAMPLE) in deliver_event()
1197 machine = &session->machines.host; in deliver_event()
1201 if (event->header.type == PERF_RECORD_SAMPLE) { in deliver_event()
1202 perf_event__process_sample(&top->tool, event, evsel, in deliver_event()
1204 } else if (event->header.type == PERF_RECORD_LOST) { in deliver_event()
1205 perf_top__process_lost(top, event, evsel); in deliver_event()
1206 } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) { in deliver_event()
1207 perf_top__process_lost_samples(top, event, evsel); in deliver_event()
1208 } else if (event->header.type < PERF_RECORD_MAX) { in deliver_event()
1209 events_stats__inc(&session->evlist->stats, event->header.type); in deliver_event()
1212 ++session->evlist->stats.nr_unknown_events; in deliver_event()
1219 static void init_process_thread(struct perf_top *top) in init_process_thread() argument
1221 ordered_events__init(&top->qe.data[0], deliver_event, top); in init_process_thread()
1222 ordered_events__init(&top->qe.data[1], deliver_event, top); in init_process_thread()
1223 ordered_events__set_copy_on_queue(&top->qe.data[0], true); in init_process_thread()
1224 ordered_events__set_copy_on_queue(&top->qe.data[1], true); in init_process_thread()
1225 top->qe.in = &top->qe.data[0]; in init_process_thread()
1226 mutex_init(&top->qe.mutex); in init_process_thread()
1227 cond_init(&top->qe.cond); in init_process_thread()
1230 static void exit_process_thread(struct perf_top *top) in exit_process_thread() argument
1232 ordered_events__free(&top->qe.data[0]); in exit_process_thread()
1233 ordered_events__free(&top->qe.data[1]); in exit_process_thread()
1234 mutex_destroy(&top->qe.mutex); in exit_process_thread()
1235 cond_destroy(&top->qe.cond); in exit_process_thread()
1238 static int __cmd_top(struct perf_top *top) in __cmd_top() argument
1240 struct record_opts *opts = &top->record_opts; in __cmd_top()
1244 if (!top->annotation_opts.objdump_path) { in __cmd_top()
1245 ret = perf_env__lookup_objdump(&top->session->header.env, in __cmd_top()
1246 &top->annotation_opts.objdump_path); in __cmd_top()
1255 if (perf_session__register_idle_thread(top->session) < 0) in __cmd_top()
1258 if (top->nr_threads_synthesize > 1) in __cmd_top()
1261 init_process_thread(top); in __cmd_top()
1263 if (opts->record_namespaces) in __cmd_top()
1264 top->tool.namespace_events = true; in __cmd_top()
1265 if (opts->record_cgroup) { in __cmd_top()
1267 top->tool.cgroup_events = true; in __cmd_top()
1270 return -1; in __cmd_top()
1274 ret = perf_event__synthesize_bpf_events(top->session, perf_event__process, in __cmd_top()
1275 &top->session->machines.host, in __cmd_top()
1276 &top->record_opts); in __cmd_top()
1278 …pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n… in __cmd_top()
1280 ret = perf_event__synthesize_cgroups(&top->tool, perf_event__process, in __cmd_top()
1281 &top->session->machines.host); in __cmd_top()
1285 machine__synthesize_threads(&top->session->machines.host, &opts->target, in __cmd_top()
1286 top->evlist->core.threads, true, false, in __cmd_top()
1287 top->nr_threads_synthesize); in __cmd_top()
1295 const char *err = str_error_r(-ret, errbuf, sizeof(errbuf)); in __cmd_top()
1302 ret = perf_top__start_counters(top); in __cmd_top()
1306 top->session->evlist = top->evlist; in __cmd_top()
1307 perf_session__set_id_hdr_size(top->session); in __cmd_top()
1311 * group members) have enable_on_exec=1 set, so don't spoil it by in __cmd_top()
1314 * XXX 'top' still doesn't start workloads like record, trace, but should, in __cmd_top()
1317 if (!target__none(&opts->target)) in __cmd_top()
1318 evlist__enable(top->evlist); in __cmd_top()
1320 ret = -1; in __cmd_top()
1321 if (pthread_create(&thread_process, NULL, process_thread, top)) { in __cmd_top()
1327 display_thread), top)) { in __cmd_top()
1332 if (top->realtime_prio) { in __cmd_top()
1335 param.sched_priority = top->realtime_prio; in __cmd_top()
1337 ui__error("Could not set realtime priority.\n"); in __cmd_top()
1342 /* Wait for a minimal set of events before starting the snapshot */ in __cmd_top()
1343 evlist__poll(top->evlist, 100); in __cmd_top()
1345 perf_top__mmap_read(top); in __cmd_top()
1348 u64 hits = top->samples; in __cmd_top()
1350 perf_top__mmap_read(top); in __cmd_top()
1352 if (opts->overwrite || (hits == top->samples)) in __cmd_top()
1353 ret = evlist__poll(top->evlist, 100); in __cmd_top()
1356 perf_top__resize(top); in __cmd_top()
1365 cond_signal(&top->qe.cond); in __cmd_top()
1368 exit_process_thread(top); in __cmd_top()
1382 struct callchain_param *callchain = opt->value; in parse_callchain_opt()
1384 callchain->enabled = !unset; in parse_callchain_opt()
1385 callchain->record_mode = CALLCHAIN_FP; in parse_callchain_opt()
1388 * --no-call-graph in parse_callchain_opt()
1392 callchain->record_mode = CALLCHAIN_NONE; in parse_callchain_opt()
1401 if (!strcmp(var, "top.call-graph")) { in perf_top_config()
1402 var = "call-graph.record-mode"; in perf_top_config()
1405 if (!strcmp(var, "top.children")) { in perf_top_config()
1417 struct perf_top *top = opt->value; in parse_percent_limit() local
1419 top->min_percent = strtof(arg, NULL); in parse_percent_limit()
1429 struct perf_top top = { in cmd_top() local
1443 * separate evlist with a dummy event, i.e. a non-overwrite in cmd_top()
1445 * stays in overwrite mode. -acme in cmd_top()
1455 .evlistp = &top.evlist, in cmd_top()
1458 struct record_opts *opts = &top.record_opts; in cmd_top()
1459 struct target *target = &opts->target; in cmd_top()
1465 OPT_U64('c', "count", &opts->user_interval, "event period to sample"), in cmd_top()
1466 OPT_STRING('p', "pid", &target->pid, "pid", in cmd_top()
1468 OPT_STRING('t', "tid", &target->tid, "tid", in cmd_top()
1470 OPT_BOOLEAN('a', "all-cpus", &target->system_wide, in cmd_top()
1471 "system-wide collection from all CPUs"), in cmd_top()
1472 OPT_STRING('C', "cpu", &target->cpu_list, "cpu", in cmd_top()
1476 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, in cmd_top()
1480 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols, in cmd_top()
1482 OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages", in cmd_top()
1484 OPT_INTEGER('r', "realtime", &top.realtime_prio, in cmd_top()
1486 OPT_INTEGER('d', "delay", &top.delay_secs, in cmd_top()
1488 OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab, in cmd_top()
1490 OPT_INTEGER('f', "count-filter", &top.count_filter, in cmd_top()
1492 OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit, in cmd_top()
1494 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name", in cmd_top()
1496 OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"), in cmd_top()
1497 OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'", in cmd_top()
1500 OPT_INTEGER('E', "entries", &top.print_entries, in cmd_top()
1502 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols, in cmd_top()
1505 OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"), in cmd_top()
1507 OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"), in cmd_top()
1515 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, in cmd_top()
1518 NULL, "enables call-graph recording and display", in cmd_top()
1520 OPT_CALLBACK(0, "call-graph", &callchain_param, in cmd_top()
1525 OPT_INTEGER(0, "max-stack", &top.max_stack, in cmd_top()
1526 "Set the maximum stack depth when parsing the callchain. " in cmd_top()
1528 OPT_CALLBACK(0, "ignore-callees", NULL, "regex", in cmd_top()
1531 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, in cmd_top()
1539 OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src, in cmd_top()
1541 OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw, in cmd_top()
1543 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, in cmd_top()
1545 OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"), in cmd_top()
1550 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", in cmd_top()
1551 "Specify disassembler style (e.g. -M intel for intel syntax)"), in cmd_top()
1552 OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix", in cmd_top()
1553 "Add prefix to source file path names in programs (with --prefix-strip)"), in cmd_top()
1554 OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N", in cmd_top()
1555 "Strip first N entries of source file path name in programs (with --prefix)"), in cmd_top()
1556 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"), in cmd_top()
1557 OPT_CALLBACK(0, "percent-limit", &top, "percent", in cmd_top()
1561 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, in cmd_top()
1564 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, in cmd_top()
1566 OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack, in cmd_top()
1569 OPT_CALLBACK('j', "branch-filter", &opts->branch_stack, in cmd_top()
1572 OPT_BOOLEAN(0, "branch-history", &branch_call_mode, in cmd_top()
1574 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace, in cmd_top()
1578 OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite, in cmd_top()
1581 OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize, in cmd_top()
1583 OPT_CALLBACK('G', "cgroup", &top.evlist, "name", in cmd_top()
1585 OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces, in cmd_top()
1587 OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup, in cmd_top()
1589 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx, in cmd_top()
1593 OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr, in cmd_top()
1596 OPT_CALLBACK(0, "pfm-events", &top.evlist, "event", in cmd_top()
1600 OPTS_EVSWITCH(&top.evswitch), in cmd_top()
1604 "perf top [<options>]", in cmd_top()
1612 annotation_options__init(&top.annotation_opts); in cmd_top()
1614 top.annotation_opts.min_pcnt = 5; in cmd_top()
1615 top.annotation_opts.context = 4; in cmd_top()
1617 top.evlist = evlist__new(); in cmd_top()
1618 if (top.evlist == NULL) in cmd_top()
1619 return -ENOMEM; in cmd_top()
1621 status = perf_config(perf_top_config, &top); in cmd_top()
1638 top.evlist->env = &perf_env; in cmd_top()
1645 top.annotation_opts.disassembler_style = strdup(disassembler_style); in cmd_top()
1646 if (!top.annotation_opts.disassembler_style) in cmd_top()
1647 return -ENOMEM; in cmd_top()
1650 top.annotation_opts.objdump_path = strdup(objdump_path); in cmd_top()
1651 if (!top.annotation_opts.objdump_path) in cmd_top()
1652 return -ENOMEM; in cmd_top()
1657 return -ENOMEM; in cmd_top()
1664 if (annotate_check_args(&top.annotation_opts) < 0) in cmd_top()
1667 if (!top.evlist->core.nr_entries) { in cmd_top()
1669 int err = parse_event(top.evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu"); in cmd_top()
1675 status = evswitch__init(&top.evswitch, top.evlist, stderr); in cmd_top()
1685 pr_err("Error: --hierarchy and --fields options cannot be used together\n"); in cmd_top()
1692 if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) { in cmd_top()
1693 pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n"); in cmd_top()
1697 if (nr_cgroups > 0 && opts->record_cgroup) { in cmd_top()
1698 pr_err("--cgroup and --all-cgroups cannot be used together\n"); in cmd_top()
1703 if (!opts->branch_stack) in cmd_top()
1704 opts->branch_stack = PERF_SAMPLE_BRANCH_ANY; in cmd_top()
1716 if (opts->branch_stack && callchain_param.enabled) in cmd_top()
1723 if (top.use_stdio) in cmd_top()
1726 else if (top.use_tui) in cmd_top()
1732 if (setup_sorting(top.evlist) < 0) { in cmd_top()
1754 status = -saved_errno; in cmd_top()
1759 target->system_wide = true; in cmd_top()
1761 if (evlist__create_maps(top.evlist, target) < 0) { in cmd_top()
1764 status = -errno; in cmd_top()
1768 if (top.delay_secs < 1) in cmd_top()
1769 top.delay_secs = 1; in cmd_top()
1772 status = -EINVAL; in cmd_top()
1776 top.sym_evsel = evlist__first(top.evlist); in cmd_top()
1790 annotation_config__init(&top.annotation_opts); in cmd_top()
1799 get_term_dimensions(&top.winsize); in cmd_top()
1800 if (top.print_entries == 0) { in cmd_top()
1801 perf_top__update_print_entries(&top); in cmd_top()
1805 top.session = perf_session__new(NULL, NULL); in cmd_top()
1806 if (IS_ERR(top.session)) { in cmd_top()
1807 status = PTR_ERR(top.session); in cmd_top()
1808 top.session = NULL; in cmd_top()
1813 if (!top.record_opts.no_bpf_event) { in cmd_top()
1814 top.sb_evlist = evlist__new(); in cmd_top()
1816 if (top.sb_evlist == NULL) { in cmd_top()
1818 status = -EINVAL; in cmd_top()
1822 if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) { in cmd_top()
1824 status = -EINVAL; in cmd_top()
1830 if (evlist__start_sb_thread(top.sb_evlist, target)) { in cmd_top()
1832 opts->no_bpf_event = true; in cmd_top()
1835 status = __cmd_top(&top); in cmd_top()
1837 if (!opts->no_bpf_event) in cmd_top()
1838 evlist__stop_sb_thread(top.sb_evlist); in cmd_top()
1841 evlist__delete(top.evlist); in cmd_top()
1842 perf_session__delete(top.session); in cmd_top()
1843 annotation_options__exit(&top.annotation_opts); in cmd_top()