Lines Matching +full:monitor +full:- +full:interval +full:- +full:ms

1 // SPDX-License-Identifier: GPL-2.0-only
3 * builtin-stat.c
16 1708.761321 task-clock # 11.037 CPUs utilized
17 41,190 context-switches # 0.024 M/sec
18 6,735 CPU-migrations # 0.004 M/sec
19 17,318 page-faults # 0.010 M/sec
21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
26 6,388,934 branch-misses # 1.32% of all branches
31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
46 #include <subcmd/parse-options.h>
47 #include "util/parse-events.h"
51 #include "util/evlist-hybrid.h"
65 #include "util/synthetic-events.h"
67 #include "util/time-utils.h"
73 #include "util/pmu-hybrid.h"
102 /* Default events used for perf stat -T */
104 "task-clock,"
108 "cpu/cycles-t/,"
109 "cpu/tx-start/,"
110 "cpu/el-start/,"
111 "cpu/cycles-ct/"
117 "task-clock,"
121 "cpu/cycles-t/,"
122 "cpu/tx-start/"
127 "topdown-total-slots",
128 "topdown-slots-retired",
129 "topdown-recovery-bubbles",
130 "topdown-fetch-bubbles",
131 "topdown-slots-issued",
137 "topdown-retiring",
138 "topdown-bad-spec",
139 "topdown-fe-bound",
140 "topdown-be-bound",
146 "topdown-retiring",
147 "topdown-bad-spec",
148 "topdown-fe-bound",
149 "topdown-be-bound",
150 "topdown-heavy-ops",
151 "topdown-br-mispredict",
152 "topdown-fetch-lat",
153 "topdown-mem-bound",
176 static volatile pid_t child_pid = -1;
182 static int big_num_opt = -1;
220 .ctl_fd = -1,
221 .ctl_fd_ack = -1,
227 if (!a->core.cpus && !b->core.cpus) in cpus_map_matched()
230 if (!a->core.cpus || !b->core.cpus) in cpus_map_matched()
233 if (a->core.cpus->nr != b->core.cpus->nr) in cpus_map_matched()
236 for (int i = 0; i < a->core.cpus->nr; i++) { in cpus_map_matched()
237 if (a->core.cpus->map[i] != b->core.cpus->map[i]) in cpus_map_matched()
267 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); in evlist__check_cpu_maps()
268 pr_warning(" %s: %s\n", leader->name, buf); in evlist__check_cpu_maps()
269 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); in evlist__check_cpu_maps()
270 pr_warning(" %s: %s\n", evsel->name, buf); in evlist__check_cpu_maps()
275 pos->core.nr_members = 0; in evlist__check_cpu_maps()
277 evsel->core.leader->nr_members = 0; in evlist__check_cpu_maps()
284 r->tv_sec = a->tv_sec - b->tv_sec; in diff_timespec()
285 if (a->tv_nsec < b->tv_nsec) { in diff_timespec()
286 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; in diff_timespec()
287 r->tv_sec--; in diff_timespec()
289 r->tv_nsec = a->tv_nsec - b->tv_nsec ; in diff_timespec()
309 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { in process_synthesized_event()
311 return -1; in process_synthesized_event()
314 perf_stat.bytes_written += event->header.size; in process_synthesized_event()
325 #define WRITE_STAT_ROUND_EVENT(time, interval) \ argument
326 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
328 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
335 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, in evsel__write_stat_event()
342 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { in read_single_counter()
343 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; in read_single_counter()
345 perf_counts(counter->counts, cpu, thread); in read_single_counter()
346 count->ena = count->run = val; in read_single_counter()
347 count->val = val; in read_single_counter()
355 * do not aggregate counts across CPUs in system-wide mode
359 int nthreads = perf_thread_map__nr(evsel_list->core.threads); in read_counter_cpu()
362 if (!counter->supported) in read_counter_cpu()
363 return -ENOENT; in read_counter_cpu()
365 if (counter->core.system_wide) in read_counter_cpu()
371 count = perf_counts(counter->counts, cpu, thread); in read_counter_cpu()
375 * (via evsel__read_counter()) and sets their count->loaded. in read_counter_cpu()
377 if (!perf_counts__is_loaded(counter->counts, cpu, thread) && in read_counter_cpu()
379 counter->counts->scaled = -1; in read_counter_cpu()
380 perf_counts(counter->counts, cpu, thread)->ena = 0; in read_counter_cpu()
381 perf_counts(counter->counts, cpu, thread)->run = 0; in read_counter_cpu()
382 return -1; in read_counter_cpu()
385 perf_counts__set_loaded(counter->counts, cpu, thread, false); in read_counter_cpu()
390 return -1; in read_counter_cpu()
399 count->val, count->ena, count->run); in read_counter_cpu()
416 return -1; in read_affinity_counters()
418 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); in read_affinity_counters()
431 if (!counter->err) { in read_affinity_counters()
432 counter->err = read_counter_cpu(counter, rs, in read_affinity_counters()
433 counter->cpu_iter - 1); in read_affinity_counters()
468 if (counter->err) in read_counters()
469 pr_debug("failed to read counter %s\n", counter->name); in read_counters()
470 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) in read_counters()
471 pr_warning("failed to process counter %s\n", counter->name); in read_counters()
472 counter->err = 0; in read_counters()
480 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); in runtime_stat_new()
481 if (!config->stats) in runtime_stat_new()
482 return -1; in runtime_stat_new()
484 config->stats_num = nthreads; in runtime_stat_new()
487 runtime_stat__init(&config->stats[i]); in runtime_stat_new()
496 if (!config->stats) in runtime_stat_delete()
499 for (i = 0; i < config->stats_num; i++) in runtime_stat_delete()
500 runtime_stat__exit(&config->stats[i]); in runtime_stat_delete()
502 zfree(&config->stats); in runtime_stat_delete()
509 if (!config->stats) in runtime_stat_reset()
512 for (i = 0; i < config->stats_num; i++) in runtime_stat_reset()
513 perf_stat__reset_shadow_per_stat(&config->stats[i]); in runtime_stat_reset()
528 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) in process_interval()
533 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); in process_interval()
537 static bool handle_interval(unsigned int interval, int *times) in handle_interval() argument
539 if (interval) { in handle_interval()
541 if (interval_count && !(--(*times))) in handle_interval()
573 * - we don't have tracee (attaching to task or cpu) in enable_counters()
574 * - we have initial delay configured in enable_counters()
612 workload_exec_errno = info->si_value.sival_int; in workload_exec_failed_signal()
617 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; in evsel__should_store_id()
629 for (i = 0; i < threads->nr; i++) { in is_target_alive()
633 threads->map[i].pid); in is_target_alive()
642 static void process_evlist(struct evlist *evlist, unsigned int interval) in process_evlist() argument
649 if (interval) in process_evlist()
653 if (interval) in process_evlist()
676 tts -= time_diff.tv_sec * MSEC_PER_SEC + in compute_tts()
685 static int dispatch_events(bool forks, int timeout, int interval, int *times) in dispatch_events() argument
691 if (interval) in dispatch_events()
692 sleep_time = interval; in dispatch_events()
704 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; in dispatch_events()
711 if (timeout || handle_interval(interval, times)) in dispatch_events()
715 process_evlist(evsel_list, interval); in dispatch_events()
743 counter->supported = false; in stat_handle_error()
748 counter->errored = true; in stat_handle_error()
751 !(counter->core.leader->nr_members > 1)) in stat_handle_error()
758 evsel_list->core.threads && in stat_handle_error()
759 evsel_list->core.threads->err_thread != -1) { in stat_handle_error()
761 * For global --per-thread case, skip current in stat_handle_error()
764 if (!thread_map__remove(evsel_list->core.threads, in stat_handle_error()
765 evsel_list->core.threads->err_thread)) { in stat_handle_error()
766 evsel_list->core.threads->err_thread = -1; in stat_handle_error()
774 if (child_pid != -1) in stat_handle_error()
781 int interval = stat_config.interval; in __run_perf_stat() local
798 return -1; in __run_perf_stat()
800 child_pid = evsel_list->workload.pid; in __run_perf_stat()
807 return -1; in __run_perf_stat()
811 return -1; in __run_perf_stat()
828 if (counter->reset_group || counter->errored) in __run_perf_stat()
834 counter->cpu_iter - 1) < 0) { in __run_perf_stat()
845 counter->weak_group) { in __run_perf_stat()
847 assert(counter->reset_group); in __run_perf_stat()
854 return -1; in __run_perf_stat()
864 counter->supported = true; in __run_perf_stat()
878 if (!counter->reset_group && !counter->errored) in __run_perf_stat()
882 perf_evsel__close_cpu(&counter->core, counter->cpu_iter); in __run_perf_stat()
886 if (!counter->reset_group && !counter->errored) in __run_perf_stat()
890 if (!counter->reset_group) in __run_perf_stat()
895 counter->cpu_iter - 1) < 0) { in __run_perf_stat()
899 return -1; in __run_perf_stat()
908 counter->supported = true; in __run_perf_stat()
915 if (!counter->supported) { in __run_perf_stat()
916 perf_evsel__free_fd(&counter->core); in __run_perf_stat()
920 l = strlen(counter->unit); in __run_perf_stat()
926 return -1; in __run_perf_stat()
931 counter->filter, evsel__name(counter), errno, in __run_perf_stat()
933 return -1; in __run_perf_stat()
962 return -1; in __run_perf_stat()
967 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) in __run_perf_stat()
968 status = dispatch_events(forks, timeout, interval, &times); in __run_perf_stat()
969 if (child_pid != -1) { in __run_perf_stat()
978 return -1; in __run_perf_stat()
986 return -1; in __run_perf_stat()
991 status = dispatch_events(forks, timeout, interval, &times); in __run_perf_stat()
999 stat_config.walltime_run[run_idx] = t1 - t0; in __run_perf_stat()
1001 if (interval && stat_config.summary) { in __run_perf_stat()
1002 stat_config.interval = 0; in __run_perf_stat()
1005 update_stats(&walltime_nsecs_stats, t1 - t0); in __run_perf_stat()
1015 update_stats(&walltime_nsecs_stats, t1 - t0); in __run_perf_stat()
1023 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); in __run_perf_stat()
1072 static volatile int signr = -1;
1076 if ((child_pid == -1) || stat_config.interval) in skip_signal()
1086 child_pid = -1; in skip_signal()
1103 if (child_pid != -1) in sig_atexit()
1108 if (signr == -1) in sig_atexit()
1155 struct perf_stat_config *config = opt->value; in parse_control_option()
1157 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); in parse_control_option()
1164 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); in parse_stat_cgroups()
1165 return -1; in parse_stat_cgroups()
1179 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
1186 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
1188 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
1190 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
1193 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1194 "system-wide collection from all CPUs"),
1198 "Use --no-scale to disable counter scaling for multiplexing"),
1204 "display details about each run (only with -r option)"),
1206 "null run - dont start any counters"),
1208 "detailed run - start a lot of events"),
1211 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1215 "list of cpus to monitor in system-wide"),
1216 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1218 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
1219 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
1222 "monitor event in cgroup name only", parse_stat_cgroups),
1223 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
1227 OPT_INTEGER(0, "log-fd", &output_fd,
1233 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1234 "print counts at regular interval in ms "
1235 "(overhead is possible for values <= 100ms)"),
1236 OPT_INTEGER(0, "interval-count", &stat_config.times,
1238 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
1239 "clear screen in between new interval"),
1241 "stop workload and print counts after a timeout period in ms (>= 10ms)"),
1242 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1244 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
1246 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1248 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1250 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
1253 … "ms to wait before starting measurement after program start (-1: start with events disabled)"),
1254 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
1256 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
1258 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
1261 "measure top-down statistics"),
1262 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
1263 "Set the metrics level for the top-down statistics (0: max level)"),
1264 OPT_BOOLEAN(0, "smi-cost", &smi_cost,
1267 "monitor specified metrics or metric groups (separated by ,)",
1269 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
1272 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
1275 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
1280 "print summary for interval mode"),
1281 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
1286 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1290 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
1291 …"Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable…
1292 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
1293 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
1331 if (idx >= map->nr) in perf_stat__get_aggr()
1334 cpu = map->map[idx]; in perf_stat__get_aggr()
1336 if (cpu_map__aggr_cpu_id_is_empty(config->cpus_aggr_map->map[cpu])) in perf_stat__get_aggr()
1337 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); in perf_stat__get_aggr()
1339 id = config->cpus_aggr_map->map[cpu]; in perf_stat__get_aggr()
1372 if (counter->percore) in term_percore_set()
1385 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode()
1387 return -1; in perf_stat_init_aggr_mode()
1392 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode()
1394 return -1; in perf_stat_init_aggr_mode()
1399 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode()
1401 return -1; in perf_stat_init_aggr_mode()
1406 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode()
1408 return -1; in perf_stat_init_aggr_mode()
1414 if (cpu_map__build_core_map(evsel_list->core.cpus, in perf_stat_init_aggr_mode()
1417 return -1; in perf_stat_init_aggr_mode()
1430 * The evsel_list->cpus is the base we operate on, in perf_stat_init_aggr_mode()
1434 nr = perf_cpu_map__max(evsel_list->core.cpus); in perf_stat_init_aggr_mode()
1436 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; in perf_stat_init_aggr_mode()
1442 WARN_ONCE(refcount_read(&map->refcnt) != 0, in cpu_aggr_map__delete()
1450 if (map && refcount_dec_and_test(&map->refcnt)) in cpu_aggr_map__put()
1466 if (idx > map->nr) in perf_env__get_cpu()
1467 return -1; in perf_env__get_cpu()
1469 cpu = map->map[idx]; in perf_env__get_cpu()
1471 if (cpu >= env->nr_cpus_avail) in perf_env__get_cpu()
1472 return -1; in perf_env__get_cpu()
1483 if (cpu != -1) in perf_env__get_socket()
1484 id.socket = env->cpu[cpu].socket_id; in perf_env__get_socket()
1495 if (cpu != -1) { in perf_env__get_die()
1501 id.socket = env->cpu[cpu].socket_id; in perf_env__get_die()
1502 id.die = env->cpu[cpu].die_id; in perf_env__get_die()
1514 if (cpu != -1) { in perf_env__get_core()
1520 id.socket = env->cpu[cpu].socket_id; in perf_env__get_core()
1521 id.die = env->cpu[cpu].die_id; in perf_env__get_core()
1522 id.core = env->cpu[cpu].core_id; in perf_env__get_core()
1564 return perf_env__get_socket(map, idx, &perf_stat.session->header.env); in perf_stat__get_socket_file()
1569 return perf_env__get_die(map, idx, &perf_stat.session->header.env); in perf_stat__get_die_file()
1575 return perf_env__get_core(map, idx, &perf_stat.session->header.env); in perf_stat__get_core_file()
1581 return perf_env__get_node(map, idx, &perf_stat.session->header.env); in perf_stat__get_node_file()
1586 struct perf_env *env = &st->session->header.env; in perf_stat_init_aggr_mode_file()
1590 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode_file()
1592 return -1; in perf_stat_init_aggr_mode_file()
1597 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode_file()
1599 return -1; in perf_stat_init_aggr_mode_file()
1604 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode_file()
1606 return -1; in perf_stat_init_aggr_mode_file()
1611 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode_file()
1613 return -1; in perf_stat_init_aggr_mode_file()
1630 * if -d/--detailed, -d -d or -d -d -d is used:
1664 * Detailed stats (-d), covering the L1 and last level data caches: in add_default_attributes()
1694 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: in add_default_attributes()
1737 * Very, very detailed stats (-d -d -d), adding prefetch events: in add_default_attributes()
1761 /* Handle -T as -M transaction. Once platform specific metrics in add_default_attributes()
1775 if (pmu_have_event("cpu", "cycles-ct") && in add_default_attributes()
1776 pmu_have_event("cpu", "el-start")) in add_default_attributes()
1786 return -1; in add_default_attributes()
1796 return -1; in add_default_attributes()
1802 return -1; in add_default_attributes()
1816 return -1; in add_default_attributes()
1821 return -1; in add_default_attributes()
1841 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); in add_default_attributes()
1842 return -1; in add_default_attributes()
1848 return -1; in add_default_attributes()
1851 if (!stat_config.interval && !stat_config.metric_only) { in add_default_attributes()
1854 "Please print the result regularly, e.g. -I1000\n"); in add_default_attributes()
1863 pr_err("top down event configuration requires --per-core mode\n"); in add_default_attributes()
1864 return -1; in add_default_attributes()
1868 pr_err("top down event configuration requires system-wide mode (-a)\n"); in add_default_attributes()
1869 return -1; in add_default_attributes()
1875 return -1; in add_default_attributes()
1888 return -1; in add_default_attributes()
1892 return -1; in add_default_attributes()
1897 if (!evsel_list->core.nr_entries) { in add_default_attributes()
1899 const char *hybrid_str = "cycles,instructions,branches,branch-misses"; in add_default_attributes()
1906 return -1; in add_default_attributes()
1915 return -1; in add_default_attributes()
1924 return -1; in add_default_attributes()
1925 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { in add_default_attributes()
1927 return -1; in add_default_attributes()
1929 if (pmu_have_event("cpu", "stalled-cycles-backend")) { in add_default_attributes()
1931 return -1; in add_default_attributes()
1934 return -1; in add_default_attributes()
1938 return -1; in add_default_attributes()
1948 return -1; in add_default_attributes()
1955 return -1; in add_default_attributes()
1974 perf_header__set_feat(&session->header, feat); in init_features()
1976 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); in init_features()
1977 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); in init_features()
1978 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); in init_features()
1979 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); in init_features()
1980 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); in init_features()
1992 data->path = output_name; in __cmd_record()
1995 pr_err("Cannot use -r option with perf stat record.\n"); in __cmd_record()
1996 return -1; in __cmd_record()
2007 session->evlist = evsel_list; in __cmd_record()
2016 struct perf_record_stat_round *stat_round = &event->stat_round; in process_stat_round_event()
2019 const char **argv = session->header.env.cmdline_argv; in process_stat_round_event()
2020 int argc = session->header.env.nr_cmdline; in process_stat_round_event()
2025 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) in process_stat_round_event()
2026 update_stats(&walltime_nsecs_stats, stat_round->time); in process_stat_round_event()
2028 if (stat_config.interval && stat_round->time) { in process_stat_round_event()
2029 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; in process_stat_round_event()
2030 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; in process_stat_round_event()
2042 struct perf_tool *tool = session->tool; in process_stat_config_event()
2045 perf_event__read_stat_config(&stat_config, &event->stat_config); in process_stat_config_event()
2047 if (perf_cpu_map__empty(st->cpus)) { in process_stat_config_event()
2048 if (st->aggr_mode != AGGR_UNSET) in process_stat_config_event()
2053 if (st->aggr_mode != AGGR_UNSET) in process_stat_config_event()
2054 stat_config.aggr_mode = st->aggr_mode; in process_stat_config_event()
2066 if (!st->cpus || !st->threads) in set_maps()
2069 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) in set_maps()
2070 return -EINVAL; in set_maps()
2072 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); in set_maps()
2075 return -ENOMEM; in set_maps()
2077 st->maps_allocated = true; in set_maps()
2085 struct perf_tool *tool = session->tool; in process_thread_map_event()
2088 if (st->threads) { in process_thread_map_event()
2093 st->threads = thread_map__new_event(&event->thread_map); in process_thread_map_event()
2094 if (!st->threads) in process_thread_map_event()
2095 return -ENOMEM; in process_thread_map_event()
2104 struct perf_tool *tool = session->tool; in process_cpu_map_event()
2108 if (st->cpus) { in process_cpu_map_event()
2113 cpus = cpu_map__new_data(&event->cpu_map.data); in process_cpu_map_event()
2115 return -ENOMEM; in process_cpu_map_event()
2117 st->cpus = cpus; in process_cpu_map_event()
2144 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, in __cmd_report()
2146 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, in __cmd_report()
2148 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, in __cmd_report()
2150 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, in __cmd_report()
2152 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, in __cmd_report()
2163 input_name = "-"; in __cmd_report()
2177 evsel_list = session->evlist; in __cmd_report()
2190 * Make system wide (-a) the default target if in setup_system_wide()
2194 * - there's no workload specified in setup_system_wide()
2195 * - there is workload specified but all requested in setup_system_wide()
2207 if (!counter->core.system_wide && in setup_system_wide()
2208 strcmp(counter->name, "duration_time")) { in setup_system_wide()
2213 if (evsel_list->core.nr_entries) in setup_system_wide()
2224 int status = -EINVAL, run_idx, err; in cmd_stat()
2227 unsigned int interval, timeout; in cmd_stat() local
2235 return -ENOMEM; in cmd_stat()
2239 /* String-parsing callback-based options would segfault when negated */ in cmd_stat()
2260 return -1; in cmd_stat()
2264 interval = stat_config.interval; in cmd_stat()
2268 * For record command the -o is already taken care of. in cmd_stat()
2270 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) in cmd_stat()
2274 fprintf(stderr, "cannot use both --output and --log-fd\n"); in cmd_stat()
2276 parse_options_usage(NULL, stat_options, "log-fd", 0); in cmd_stat()
2281 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); in cmd_stat()
2286 fprintf(stderr, "--metric-only is not supported with -r\n"); in cmd_stat()
2291 fprintf(stderr, "--table is only supported with -r\n"); in cmd_stat()
2298 fprintf(stderr, "argument to --log-fd must be a > 0\n"); in cmd_stat()
2299 parse_options_usage(stat_usage, stat_options, "log-fd", 0); in cmd_stat()
2310 return -1; in cmd_stat()
2319 return -errno; in cmd_stat()
2326 * let the spreadsheet do the pretty-printing in cmd_stat()
2329 /* User explicitly passed -B? */ in cmd_stat()
2331 fprintf(stderr, "-B option not supported with -x\n"); in cmd_stat()
2337 } else if (big_num_opt == 0) /* User passed --no-big-num */ in cmd_stat()
2367 pr_err("failed to setup -r option"); in cmd_stat()
2375 fprintf(stderr, "The --per-thread option is only " in cmd_stat()
2376 "available when monitoring via -p -t -a " in cmd_stat()
2377 "options or only --per-thread.\n"); in cmd_stat()
2385 * no_aggr, cgroup are for system-wide only in cmd_stat()
2386 * --per-thread is aggregated per thread, we dont mix it with cpu mode in cmd_stat()
2392 fprintf(stderr, "both cgroup and no-aggregation " in cmd_stat()
2393 "modes only available in system-wide mode\n"); in cmd_stat()
2398 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); in cmd_stat()
2420 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); in cmd_stat()
2422 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); in cmd_stat()
2429 "for-each-cgroup", 0); in cmd_stat()
2445 pr_err("Problems finding threads of monitor\n"); in cmd_stat()
2463 thread_map__read_comms(evsel_list->core.threads); in cmd_stat()
2466 perf_thread_map__nr(evsel_list->core.threads))) { in cmd_stat()
2475 if (stat_config.times && interval) in cmd_stat()
2477 else if (stat_config.times && !interval) { in cmd_stat()
2478 pr_err("interval-count option should be used together with " in cmd_stat()
2479 "interval-print.\n"); in cmd_stat()
2480 parse_options_usage(stat_usage, stat_options, "interval-count", 0); in cmd_stat()
2487 pr_err("timeout must be >= 10ms.\n"); in cmd_stat()
2491 pr_warning("timeout < 100ms. " in cmd_stat()
2495 if (timeout && interval) { in cmd_stat()
2496 pr_err("timeout option is not supported with interval-print.\n"); in cmd_stat()
2502 if (evlist__alloc_stats(evsel_list, interval)) in cmd_stat()
2514 * by attr->sample_type != 0, and we can't run it on in cmd_stat()
2520 * We dont want to block the signals - that would cause in cmd_stat()
2521 * child tasks to inherit that and Ctrl-C would not work. in cmd_stat()
2522 * What we want is for Ctrl-C to work in the exec()-ed in cmd_stat()
2545 if (forever && status != -1 && !interval) { in cmd_stat()
2551 if (!forever && status != -1 && (!interval || stat_config.summary)) in cmd_stat()
2567 * tools remain -acme in cmd_stat()
2573 &perf_stat.session->machines.host); in cmd_stat()
2579 if (!interval) { in cmd_stat()
2585 perf_stat.session->header.data_size += perf_stat.bytes_written; in cmd_stat()