Lines Matching +full:monitor +full:- +full:interval +full:- +full:ms

1 // SPDX-License-Identifier: GPL-2.0-only
3 * builtin-stat.c
16 1708.761321 task-clock # 11.037 CPUs utilized
17 41,190 context-switches # 0.024 M/sec
18 6,735 CPU-migrations # 0.004 M/sec
19 17,318 page-faults # 0.010 M/sec
21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
26 6,388,934 branch-misses # 1.32% of all branches
31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
45 #include <subcmd/parse-options.h>
46 #include "util/parse-events.h"
64 #include "util/synthetic-events.h"
66 #include "util/time-utils.h"
115 static volatile sig_atomic_t child_pid = -1;
121 static int big_num_opt = -1;
162 .ctl_fd = -1,
163 .ctl_fd_ack = -1,
169 if (!a->core.cpus && !b->core.cpus) in cpus_map_matched()
172 if (!a->core.cpus || !b->core.cpus) in cpus_map_matched()
175 if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) in cpus_map_matched()
178 for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { in cpus_map_matched()
179 if (perf_cpu_map__cpu(a->core.cpus, i).cpu != in cpus_map_matched()
180 perf_cpu_map__cpu(b->core.cpus, i).cpu) in cpus_map_matched()
214 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); in evlist__check_cpu_maps()
215 pr_warning(" %s: %s\n", leader->name, buf); in evlist__check_cpu_maps()
216 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); in evlist__check_cpu_maps()
217 pr_warning(" %s: %s\n", evsel->name, buf); in evlist__check_cpu_maps()
227 r->tv_sec = a->tv_sec - b->tv_sec; in diff_timespec()
228 if (a->tv_nsec < b->tv_nsec) { in diff_timespec()
229 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; in diff_timespec()
230 r->tv_sec--; in diff_timespec()
232 r->tv_nsec = a->tv_nsec - b->tv_nsec ; in diff_timespec()
247 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { in process_synthesized_event()
249 return -1; in process_synthesized_event()
252 perf_stat.bytes_written += event->header.size; in process_synthesized_event()
263 #define WRITE_STAT_ROUND_EVENT(time, interval) \ argument
264 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
266 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
274 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, in evsel__write_stat_event()
281 switch(counter->tool_event) { in read_single_counter()
283 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; in read_single_counter()
285 perf_counts(counter->counts, cpu_map_idx, thread); in read_single_counter()
286 count->ena = count->run = val; in read_single_counter()
287 count->val = val; in read_single_counter()
294 perf_counts(counter->counts, cpu_map_idx, thread); in read_single_counter()
295 if (counter->tool_event == PERF_TOOL_USER_TIME) in read_single_counter()
299 count->ena = count->run = val; in read_single_counter()
300 count->val = val; in read_single_counter()
314 * do not aggregate counts across CPUs in system-wide mode
318 int nthreads = perf_thread_map__nr(evsel_list->core.threads); in read_counter_cpu()
321 if (!counter->supported) in read_counter_cpu()
322 return -ENOENT; in read_counter_cpu()
327 count = perf_counts(counter->counts, cpu_map_idx, thread); in read_counter_cpu()
331 * (via evsel__read_counter()) and sets their count->loaded. in read_counter_cpu()
333 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && in read_counter_cpu()
335 counter->counts->scaled = -1; in read_counter_cpu()
336 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; in read_counter_cpu()
337 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; in read_counter_cpu()
338 return -1; in read_counter_cpu()
341 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); in read_counter_cpu()
346 return -1; in read_counter_cpu()
356 count->val, count->ena, count->run); in read_counter_cpu()
374 return -1; in read_affinity_counters()
384 if (!counter->err) { in read_affinity_counters()
385 counter->err = read_counter_cpu(counter, rs, in read_affinity_counters()
416 return -1; in read_counters()
426 if (counter->err) in process_counters()
427 pr_debug("failed to read counter %s\n", counter->name); in process_counters()
428 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) in process_counters()
429 pr_warning("failed to process counter %s\n", counter->name); in process_counters()
430 counter->err = 0; in process_counters()
450 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) in process_interval()
455 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); in process_interval()
459 static bool handle_interval(unsigned int interval, int *times) in handle_interval() argument
461 if (interval) { in handle_interval()
463 if (interval_count && !(--(*times))) in handle_interval()
517 workload_exec_errno = info->si_value.sival_int; in workload_exec_failed_signal()
522 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; in evsel__should_store_id()
534 for (i = 0; i < threads->nr; i++) { in is_target_alive()
538 threads->map[i].pid); in is_target_alive()
547 static void process_evlist(struct evlist *evlist, unsigned int interval) in process_evlist() argument
556 if (interval) in process_evlist()
579 tts -= time_diff.tv_sec * MSEC_PER_SEC + in compute_tts()
588 static int dispatch_events(bool forks, int timeout, int interval, int *times) in dispatch_events() argument
594 if (interval) in dispatch_events()
595 sleep_time = interval; in dispatch_events()
607 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; in dispatch_events()
614 if (timeout || handle_interval(interval, times)) in dispatch_events()
618 process_evlist(evsel_list, interval); in dispatch_events()
646 counter->supported = false; in stat_handle_error()
651 counter->errored = true; in stat_handle_error()
654 !(counter->core.leader->nr_members > 1)) in stat_handle_error()
661 evsel_list->core.threads && in stat_handle_error()
662 evsel_list->core.threads->err_thread != -1) { in stat_handle_error()
664 * For global --per-thread case, skip current in stat_handle_error()
667 if (!thread_map__remove(evsel_list->core.threads, in stat_handle_error()
668 evsel_list->core.threads->err_thread)) { in stat_handle_error()
669 evsel_list->core.threads->err_thread = -1; in stat_handle_error()
672 } else if (counter->skippable) { in stat_handle_error()
676 counter->supported = false; in stat_handle_error()
677 counter->errored = true; in stat_handle_error()
684 if (child_pid != -1) in stat_handle_error()
691 int interval = stat_config.interval; in __run_perf_stat() local
709 return -1; in __run_perf_stat()
711 child_pid = evsel_list->workload.pid; in __run_perf_stat()
714 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { in __run_perf_stat()
716 return -1; in __run_perf_stat()
721 counter->reset_group = false; in __run_perf_stat()
723 return -1; in __run_perf_stat()
740 if (counter->reset_group || counter->errored) in __run_perf_stat()
757 counter->weak_group) { in __run_perf_stat()
759 assert(counter->reset_group); in __run_perf_stat()
766 return -1; in __run_perf_stat()
776 counter->supported = true; in __run_perf_stat()
789 if (!counter->reset_group && !counter->errored) in __run_perf_stat()
792 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); in __run_perf_stat()
798 if (!counter->reset_group) in __run_perf_stat()
807 return -1; in __run_perf_stat()
816 counter->supported = true; in __run_perf_stat()
822 if (!counter->supported) { in __run_perf_stat()
823 perf_evsel__free_fd(&counter->core); in __run_perf_stat()
827 l = strlen(counter->unit); in __run_perf_stat()
833 return -1; in __run_perf_stat()
838 counter->filter, evsel__name(counter), errno, in __run_perf_stat()
840 return -1; in __run_perf_stat()
867 return -1; in __run_perf_stat()
878 return -1; in __run_perf_stat()
887 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) in __run_perf_stat()
888 status = dispatch_events(forks, timeout, interval, &times); in __run_perf_stat()
889 if (child_pid != -1) { in __run_perf_stat()
898 return -1; in __run_perf_stat()
904 status = dispatch_events(forks, timeout, interval, &times); in __run_perf_stat()
912 stat_config.walltime_run[run_idx] = t1 - t0; in __run_perf_stat()
914 if (interval && stat_config.summary) { in __run_perf_stat()
915 stat_config.interval = 0; in __run_perf_stat()
918 update_stats(&walltime_nsecs_stats, t1 - t0); in __run_perf_stat()
924 update_stats(&walltime_nsecs_stats, t1 - t0); in __run_perf_stat()
934 if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0) in __run_perf_stat()
984 static volatile sig_atomic_t signr = -1;
988 if ((child_pid == -1) || stat_config.interval) in skip_signal()
998 child_pid = -1; in skip_signal()
1015 if (child_pid != -1) in sig_atexit()
1020 if (signr == -1) in sig_atexit()
1061 return -ENOMEM; in append_metric_groups()
1067 return -ENOMEM; in append_metric_groups()
1076 struct perf_stat_config *config = opt->value; in parse_control_option()
1078 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); in parse_control_option()
1085 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); in parse_stat_cgroups()
1086 return -1; in parse_stat_cgroups()
1097 struct evlist *evlist = *(struct evlist **)opt->value; in parse_cputype()
1099 if (!list_empty(&evlist->core.entries)) { in parse_cputype()
1101 return -1; in parse_cputype()
1106 fprintf(stderr, "--cputype %s is not supported!\n", str); in parse_cputype()
1107 return -1; in parse_cputype()
1109 parse_events_option_args.pmu_filter = pmu->name; in parse_cputype()
1119 u32 *aggr_mode = (u32 *)opt->value; in parse_cache_level()
1120 u32 *aggr_level = (u32 *)opt->data; in parse_cache_level()
1138 pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", in parse_cache_level()
1141 return -EINVAL; in parse_cache_level()
1146 pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", in parse_cache_level()
1149 return -EINVAL; in parse_cache_level()
1155 return -EINVAL; in parse_cache_level()
1171 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
1178 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
1180 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
1182 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
1185 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1186 "system-wide collection from all CPUs"),
1188 "Use --no-scale to disable counter scaling for multiplexing"),
1194 "display details about each run (only with -r option)"),
1196 "null run - dont start any counters"),
1198 "detailed run - start a lot of events"),
1201 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1205 "list of cpus to monitor in system-wide"),
1206 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1208 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
1209 OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
1211 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
1213 OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
1216 "monitor event in cgroup name only", parse_stat_cgroups),
1217 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
1221 OPT_INTEGER(0, "log-fd", &output_fd,
1227 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1228 "print counts at regular interval in ms "
1229 "(overhead is possible for values <= 100ms)"),
1230 OPT_INTEGER(0, "interval-count", &stat_config.times,
1232 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
1233 "clear screen in between new interval"),
1235 "stop workload and print counts after a timeout period in ms (>= 10ms)"),
1236 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1238 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
1240 OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level,
1243 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1245 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1247 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
1250 … "ms to wait before starting measurement after program start (-1: start with events disabled)"),
1251 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
1253 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
1255 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
1257 OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
1260 "measure top-down statistics"),
1261 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
1262 "Set the metrics level for the top-down statistics (0: max level)"),
1263 OPT_BOOLEAN(0, "smi-cost", &smi_cost,
1266 "monitor specified metrics or metric groups (separated by ,)",
1268 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
1271 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
1274 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
1279 "print summary for interval mode"),
1280 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
1289 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1293 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
1294 …"Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable…
1295 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
1296 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
1331 * cpu__get_cache_id - Returns 0 if successful in populating the
1344 cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; in cpu__get_cache_details()
1345 cache->cache = -1; in cpu__get_cache_details()
1361 return -1; in cpu__get_cache_details()
1375 cache->cache_lvl = caches[max_level_index].level; in cpu__get_cache_details()
1376 cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map); in cpu__get_cache_details()
1385 cache->cache_lvl = cache_level; in cpu__get_cache_details()
1386 cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); in cpu__get_cache_details()
1403 * aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache
1486 /* per-process mode - should use global aggr mode */ in perf_stat__get_aggr()
1487 if (cpu.cpu == -1) in perf_stat__get_aggr()
1490 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) in perf_stat__get_aggr()
1491 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); in perf_stat__get_aggr()
1493 id = config->cpus_aggr_map->map[cpu.cpu]; in perf_stat__get_aggr()
1596 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, in perf_stat_init_aggr_mode()
1600 return -1; in perf_stat_init_aggr_mode()
1606 nr = perf_thread_map__nr(evsel_list->core.threads); in perf_stat_init_aggr_mode()
1609 return -ENOMEM; in perf_stat_init_aggr_mode()
1615 stat_config.aggr_map->map[s] = id; in perf_stat_init_aggr_mode()
1621 * The evsel_list->cpus is the base we operate on, in perf_stat_init_aggr_mode()
1625 if (evsel_list->core.user_requested_cpus) in perf_stat_init_aggr_mode()
1626 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; in perf_stat_init_aggr_mode()
1630 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; in perf_stat_init_aggr_mode()
1636 WARN_ONCE(refcount_read(&map->refcnt) != 0, in cpu_aggr_map__delete()
1644 if (map && refcount_dec_and_test(&map->refcnt)) in cpu_aggr_map__put()
1661 if (cpu.cpu != -1) in perf_env__get_socket_aggr_by_cpu()
1662 id.socket = env->cpu[cpu.cpu].socket_id; in perf_env__get_socket_aggr_by_cpu()
1672 if (cpu.cpu != -1) { in perf_env__get_die_aggr_by_cpu()
1678 id.socket = env->cpu[cpu.cpu].socket_id; in perf_env__get_die_aggr_by_cpu()
1679 id.die = env->cpu[cpu.cpu].die_id; in perf_env__get_die_aggr_by_cpu()
1689 int caches_cnt = env->caches_cnt; in perf_env__get_cache_id_for_cpu()
1690 struct cpu_cache_level *caches = env->caches; in perf_env__get_cache_id_for_cpu()
1692 id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; in perf_env__get_cache_id_for_cpu()
1693 id->cache = -1; in perf_env__get_cache_id_for_cpu()
1698 for (i = caches_cnt - 1; i > -1; --i) { in perf_env__get_cache_id_for_cpu()
1714 if (map_contains_cpu != -1) { in perf_env__get_cache_id_for_cpu()
1715 id->cache_lvl = caches[i].level; in perf_env__get_cache_id_for_cpu()
1716 id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); in perf_env__get_cache_id_for_cpu()
1728 if (cpu.cpu != -1) { in perf_env__get_cache_aggr_by_cpu()
1731 id.socket = env->cpu[cpu.cpu].socket_id; in perf_env__get_cache_aggr_by_cpu()
1732 id.die = env->cpu[cpu.cpu].die_id; in perf_env__get_cache_aggr_by_cpu()
1744 if (cpu.cpu != -1) { in perf_env__get_core_aggr_by_cpu()
1750 id.socket = env->cpu[cpu.cpu].socket_id; in perf_env__get_core_aggr_by_cpu()
1751 id.die = env->cpu[cpu.cpu].die_id; in perf_env__get_core_aggr_by_cpu()
1752 id.core = env->cpu[cpu.cpu].core_id; in perf_env__get_core_aggr_by_cpu()
1763 if (cpu.cpu != -1) { in perf_env__get_cpu_aggr_by_cpu()
1769 id.socket = env->cpu[cpu.cpu].socket_id; in perf_env__get_cpu_aggr_by_cpu()
1770 id.die = env->cpu[cpu.cpu].die_id; in perf_env__get_cpu_aggr_by_cpu()
1771 id.core = env->cpu[cpu.cpu].core_id; in perf_env__get_cpu_aggr_by_cpu()
1799 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); in perf_stat__get_socket_file()
1804 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); in perf_stat__get_die_file()
1810 return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env); in perf_stat__get_cache_file()
1816 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); in perf_stat__get_core_file()
1822 return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env); in perf_stat__get_cpu_file()
1828 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); in perf_stat__get_node_file()
1834 return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env); in perf_stat__get_global_file()
1889 struct perf_env *env = &st->session->header.env; in perf_stat_init_aggr_mode_file()
1894 int nr = perf_thread_map__nr(evsel_list->core.threads); in perf_stat_init_aggr_mode_file()
1898 return -ENOMEM; in perf_stat_init_aggr_mode_file()
1904 stat_config.aggr_map->map[s] = id; in perf_stat_init_aggr_mode_file()
1912 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, in perf_stat_init_aggr_mode_file()
1916 return -1; in perf_stat_init_aggr_mode_file()
1924 * if -d/--detailed, -d -d or -d -d -d is used:
1951 * Detailed stats (-d), covering the L1 and last level data caches: in add_default_attributes()
1981 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: in add_default_attributes()
2024 * Very, very detailed stats (-d -d -d), adding prefetch events: in add_default_attributes()
2049 /* Handle -T as -M transaction. Once platform specific metrics in add_default_attributes()
2056 return -1; in add_default_attributes()
2072 return -1; in add_default_attributes()
2078 return -1; in add_default_attributes()
2085 return -1; in add_default_attributes()
2110 return -1; in add_default_attributes()
2113 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); in add_default_attributes()
2114 return -1; in add_default_attributes()
2118 if (!stat_config.interval && !stat_config.metric_only) { in add_default_attributes()
2121 "Please print the result regularly, e.g. -I1000\n"); in add_default_attributes()
2132 return -1; in add_default_attributes()
2138 if (!evsel_list->core.nr_entries) { in add_default_attributes()
2144 return -1; in add_default_attributes()
2145 if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) { in add_default_attributes()
2147 return -1; in add_default_attributes()
2149 if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) { in add_default_attributes()
2151 return -1; in add_default_attributes()
2154 return -1; in add_default_attributes()
2164 return -1; in add_default_attributes()
2173 return -1; in add_default_attributes()
2176 metric_evsel->skippable = true; in add_default_attributes()
2177 metric_evsel->default_metricgroup = true; in add_default_attributes()
2179 evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries); in add_default_attributes()
2185 return -1; in add_default_attributes()
2195 return -1; in add_default_attributes()
2202 return -1; in add_default_attributes()
2221 perf_header__set_feat(&session->header, feat); in init_features()
2223 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); in init_features()
2224 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); in init_features()
2225 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); in init_features()
2226 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); in init_features()
2227 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); in init_features()
2239 data->path = output_name; in __cmd_record()
2242 pr_err("Cannot use -r option with perf stat record.\n"); in __cmd_record()
2243 return -1; in __cmd_record()
2254 session->evlist = evsel_list; in __cmd_record()
2263 struct perf_record_stat_round *stat_round = &event->stat_round; in process_stat_round_event()
2265 const char **argv = session->header.env.cmdline_argv; in process_stat_round_event()
2266 int argc = session->header.env.nr_cmdline; in process_stat_round_event()
2270 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) in process_stat_round_event()
2271 update_stats(&walltime_nsecs_stats, stat_round->time); in process_stat_round_event()
2273 if (stat_config.interval && stat_round->time) { in process_stat_round_event()
2274 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; in process_stat_round_event()
2275 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; in process_stat_round_event()
2287 struct perf_tool *tool = session->tool; in process_stat_config_event()
2290 perf_event__read_stat_config(&stat_config, &event->stat_config); in process_stat_config_event()
2292 if (perf_cpu_map__empty(st->cpus)) { in process_stat_config_event()
2293 if (st->aggr_mode != AGGR_UNSET) in process_stat_config_event()
2295 } else if (st->aggr_mode != AGGR_UNSET) { in process_stat_config_event()
2296 stat_config.aggr_mode = st->aggr_mode; in process_stat_config_event()
2305 int nr_aggr = stat_config.aggr_map->nr; in process_stat_config_event()
2307 if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) { in process_stat_config_event()
2309 return -1; in process_stat_config_event()
2317 if (!st->cpus || !st->threads) in set_maps()
2320 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) in set_maps()
2321 return -EINVAL; in set_maps()
2323 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); in set_maps()
2326 return -ENOMEM; in set_maps()
2328 st->maps_allocated = true; in set_maps()
2336 struct perf_tool *tool = session->tool; in process_thread_map_event()
2339 if (st->threads) { in process_thread_map_event()
2344 st->threads = thread_map__new_event(&event->thread_map); in process_thread_map_event()
2345 if (!st->threads) in process_thread_map_event()
2346 return -ENOMEM; in process_thread_map_event()
2355 struct perf_tool *tool = session->tool; in process_cpu_map_event()
2359 if (st->cpus) { in process_cpu_map_event()
2364 cpus = cpu_map__new_data(&event->cpu_map.data); in process_cpu_map_event()
2366 return -ENOMEM; in process_cpu_map_event()
2368 st->cpus = cpus; in process_cpu_map_event()
2396 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, in __cmd_report()
2398 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, in __cmd_report()
2400 OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level, in __cmd_report()
2404 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, in __cmd_report()
2406 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, in __cmd_report()
2408 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, in __cmd_report()
2419 input_name = "-"; in __cmd_report()
2434 evsel_list = session->evlist; in __cmd_report()
2447 * Make system wide (-a) the default target if in setup_system_wide()
2451 * - there's no workload specified in setup_system_wide()
2452 * - there is workload specified but all requested in setup_system_wide()
2464 if (!counter->core.requires_cpu && in setup_system_wide()
2470 if (evsel_list->core.nr_entries) in setup_system_wide()
2481 int status = -EINVAL, run_idx, err; in cmd_stat()
2484 unsigned int interval, timeout; in cmd_stat() local
2492 return -ENOMEM; in cmd_stat()
2496 /* String-parsing callback-based options would segfault when negated */ in cmd_stat()
2515 return -1; in cmd_stat()
2519 interval = stat_config.interval; in cmd_stat()
2523 * For record command the -o is already taken care of. in cmd_stat()
2525 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) in cmd_stat()
2529 fprintf(stderr, "cannot use both --output and --log-fd\n"); in cmd_stat()
2531 parse_options_usage(NULL, stat_options, "log-fd", 0); in cmd_stat()
2536 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); in cmd_stat()
2541 fprintf(stderr, "--metric-only is not supported with -r\n"); in cmd_stat()
2546 fprintf(stderr, "--table is only supported with -r\n"); in cmd_stat()
2553 fprintf(stderr, "argument to --log-fd must be a > 0\n"); in cmd_stat()
2554 parse_options_usage(stat_usage, stat_options, "log-fd", 0); in cmd_stat()
2565 return -1; in cmd_stat()
2576 return -errno; in cmd_stat()
2581 fprintf(stderr, "--interval-clear does not work with output\n"); in cmd_stat()
2583 parse_options_usage(NULL, stat_options, "log-fd", 0); in cmd_stat()
2584 parse_options_usage(NULL, stat_options, "interval-clear", 0); in cmd_stat()
2585 return -1; in cmd_stat()
2591 * let the spreadsheet do the pretty-printing in cmd_stat()
2594 /* User explicitly passed -B? */ in cmd_stat()
2596 fprintf(stderr, "-B option not supported with -x\n"); in cmd_stat()
2602 } else if (big_num_opt == 0) /* User passed --no-big-num */ in cmd_stat()
2632 pr_err("failed to setup -r option"); in cmd_stat()
2640 fprintf(stderr, "The --per-thread option is only " in cmd_stat()
2641 "available when monitoring via -p -t -a " in cmd_stat()
2642 "options or only --per-thread.\n"); in cmd_stat()
2650 * no_aggr, cgroup are for system-wide only in cmd_stat()
2651 * --per-thread is aggregated per thread, we dont mix it with cpu mode in cmd_stat()
2657 fprintf(stderr, "both cgroup and no-aggregation " in cmd_stat()
2658 "modes only available in system-wide mode\n"); in cmd_stat()
2663 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); in cmd_stat()
2687 status = -ENOMEM; in cmd_stat()
2694 * knowing the target is system-wide. in cmd_stat()
2714 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); in cmd_stat()
2716 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); in cmd_stat()
2723 "for-each-cgroup", 0); in cmd_stat()
2732 pr_err("Problems finding threads of monitor\n"); in cmd_stat()
2750 thread_map__read_comms(evsel_list->core.threads); in cmd_stat()
2756 if (stat_config.times && interval) in cmd_stat()
2758 else if (stat_config.times && !interval) { in cmd_stat()
2759 pr_err("interval-count option should be used together with " in cmd_stat()
2760 "interval-print.\n"); in cmd_stat()
2761 parse_options_usage(stat_usage, stat_options, "interval-count", 0); in cmd_stat()
2768 pr_err("timeout must be >= 10ms.\n"); in cmd_stat()
2772 pr_warning("timeout < 100ms. " in cmd_stat()
2776 if (timeout && interval) { in cmd_stat()
2777 pr_err("timeout option is not supported with interval-print.\n"); in cmd_stat()
2786 if (evlist__alloc_stats(&stat_config, evsel_list, interval)) in cmd_stat()
2795 * by attr->sample_type != 0, and we can't run it on in cmd_stat()
2801 * We dont want to block the signals - that would cause in cmd_stat()
2802 * child tasks to inherit that and Ctrl-C would not work. in cmd_stat()
2803 * What we want is for Ctrl-C to work in the exec()-ed in cmd_stat()
2816 /* Enable ignoring missing threads when -p option is defined. */ in cmd_stat()
2817 evlist__first(evsel_list)->ignore_missing_thread = target.pid; in cmd_stat()
2828 if (forever && status != -1 && !interval) { in cmd_stat()
2834 if (!forever && status != -1 && (!interval || stat_config.summary)) { in cmd_stat()
2853 * tools remain -acme in cmd_stat()
2859 &perf_stat.session->machines.host); in cmd_stat()
2865 if (!interval) { in cmd_stat()
2871 perf_stat.session->header.data_size += perf_stat.bytes_written; in cmd_stat()