Lines Matching refs:evsel_list

137 static struct evlist	*evsel_list;  variable
207 perf_evlist__reset_stats(evsel_list); in perf_stat__reset_stats()
270 int nthreads = perf_thread_map__nr(evsel_list->core.threads); in read_counter()
329 evlist__for_each_entry(evsel_list, counter) { in read_counters()
369 evlist__enable(evsel_list); in enable_counters()
380 evlist__disable(evsel_list); in disable_counters()
449 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe, in __run_perf_stat()
454 child_pid = evsel_list->workload.pid; in __run_perf_stat()
458 perf_evlist__set_leader(evsel_list); in __run_perf_stat()
460 evlist__for_each_entry(evsel_list, counter) { in __run_perf_stat()
468 counter = perf_evlist__reset_weak_group(evsel_list, counter); in __run_perf_stat()
492 evsel_list->core.threads && in __run_perf_stat()
493 evsel_list->core.threads->err_thread != -1) { in __run_perf_stat()
498 if (!thread_map__remove(evsel_list->core.threads, in __run_perf_stat()
499 evsel_list->core.threads->err_thread)) { in __run_perf_stat()
500 evsel_list->core.threads->err_thread = -1; in __run_perf_stat()
521 perf_evsel__store_ids(counter, evsel_list)) in __run_perf_stat()
525 if (perf_evlist__apply_filters(evsel_list, &counter)) { in __run_perf_stat()
538 err = perf_session__write_header(perf_stat.session, evsel_list, in __run_perf_stat()
545 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, in __run_perf_stat()
558 perf_evlist__start_workload(evsel_list); in __run_perf_stat()
586 if (!is_target_alive(&target, evsel_list->core.threads)) in __run_perf_stat()
620 evlist__close(evsel_list); in __run_perf_stat()
657 perf_evlist__print_counters(evsel_list, &stat_config, &target, in print_counters()
729 OPT_CALLBACK('e', "event", &evsel_list, "event",
732 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
768 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
803 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
865 evlist__for_each_entry(evsel_list, counter) { in term_percore_set()
879 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode()
886 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode()
893 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode()
901 if (cpu_map__build_core_map(evsel_list->core.cpus, in perf_stat_init_aggr_mode()
921 nr = perf_cpu_map__max(evsel_list->core.cpus); in perf_stat_init_aggr_mode()
1052 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode_file()
1059 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode_file()
1066 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { in perf_stat_init_aggr_mode_file()
1264 struct option opt = { .value = &evsel_list }; in add_default_attributes()
1272 err = parse_events(evsel_list, transaction_attrs, in add_default_attributes()
1275 err = parse_events(evsel_list, in add_default_attributes()
1306 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); in add_default_attributes()
1345 err = parse_events(evsel_list, str, &errinfo); in add_default_attributes()
1361 if (!evsel_list->core.nr_entries) { in add_default_attributes()
1365 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0) in add_default_attributes()
1368 if (perf_evlist__add_default_attrs(evsel_list, in add_default_attributes()
1373 if (perf_evlist__add_default_attrs(evsel_list, in add_default_attributes()
1377 if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0) in add_default_attributes()
1387 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) in add_default_attributes()
1394 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) in add_default_attributes()
1401 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); in add_default_attributes()
1447 session->evlist = evsel_list; in __cmd_record()
1462 evlist__for_each_entry(evsel_list, counter) in process_stat_round_event()
1512 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); in set_maps()
1514 if (perf_evlist__alloc_stats(evsel_list, true)) in set_maps()
1644 evsel_list = session->evlist; in __cmd_report()
1673 evlist__for_each_entry(evsel_list, counter) { in setup_system_wide()
1678 if (evsel_list->core.nr_entries) in setup_system_wide()
1697 evsel_list = evlist__new(); in cmd_stat()
1698 if (evsel_list == NULL) in cmd_stat()
1711 perf_stat__collect_metric_expr(evsel_list); in cmd_stat()
1866 if (perf_evlist__create_maps(evsel_list, &target) < 0) { in cmd_stat()
1884 thread_map__read_comms(evsel_list->core.threads); in cmd_stat()
1887 perf_thread_map__nr(evsel_list->core.threads))) { in cmd_stat()
1920 if (perf_evlist__alloc_stats(evsel_list, interval)) in cmd_stat()
1957 perf_evlist__reset_prev_raw_counts(evsel_list); in cmd_stat()
1998 perf_session__write_header(perf_stat.session, evsel_list, fd, true); in cmd_stat()
2001 evlist__close(evsel_list); in cmd_stat()
2006 perf_evlist__free_stats(evsel_list); in cmd_stat()
2013 evlist__delete(evsel_list); in cmd_stat()