Lines Matching refs:evlist

40 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,  in perf_evlist__init()  argument
46 INIT_HLIST_HEAD(&evlist->heads[i]); in perf_evlist__init()
47 INIT_LIST_HEAD(&evlist->entries); in perf_evlist__init()
48 perf_evlist__set_maps(evlist, cpus, threads); in perf_evlist__init()
49 fdarray__init(&evlist->pollfd, 64); in perf_evlist__init()
50 evlist->workload.pid = -1; in perf_evlist__init()
51 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; in perf_evlist__init()
56 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); in perf_evlist__new() local
58 if (evlist != NULL) in perf_evlist__new()
59 perf_evlist__init(evlist, NULL, NULL); in perf_evlist__new()
61 return evlist; in perf_evlist__new()
66 struct perf_evlist *evlist = perf_evlist__new(); in perf_evlist__new_default() local
68 if (evlist && perf_evlist__add_default(evlist)) { in perf_evlist__new_default()
69 perf_evlist__delete(evlist); in perf_evlist__new_default()
70 evlist = NULL; in perf_evlist__new_default()
73 return evlist; in perf_evlist__new_default()
78 struct perf_evlist *evlist = perf_evlist__new(); in perf_evlist__new_dummy() local
80 if (evlist && perf_evlist__add_dummy(evlist)) { in perf_evlist__new_dummy()
81 perf_evlist__delete(evlist); in perf_evlist__new_dummy()
82 evlist = NULL; in perf_evlist__new_dummy()
85 return evlist; in perf_evlist__new_dummy()
95 void perf_evlist__set_id_pos(struct perf_evlist *evlist) in perf_evlist__set_id_pos() argument
97 struct perf_evsel *first = perf_evlist__first(evlist); in perf_evlist__set_id_pos()
99 evlist->id_pos = first->id_pos; in perf_evlist__set_id_pos()
100 evlist->is_pos = first->is_pos; in perf_evlist__set_id_pos()
103 static void perf_evlist__update_id_pos(struct perf_evlist *evlist) in perf_evlist__update_id_pos() argument
107 evlist__for_each_entry(evlist, evsel) in perf_evlist__update_id_pos()
110 perf_evlist__set_id_pos(evlist); in perf_evlist__update_id_pos()
113 static void perf_evlist__purge(struct perf_evlist *evlist) in perf_evlist__purge() argument
117 evlist__for_each_entry_safe(evlist, n, pos) { in perf_evlist__purge()
119 pos->evlist = NULL; in perf_evlist__purge()
123 evlist->nr_entries = 0; in perf_evlist__purge()
126 void perf_evlist__exit(struct perf_evlist *evlist) in perf_evlist__exit() argument
128 zfree(&evlist->mmap); in perf_evlist__exit()
129 zfree(&evlist->overwrite_mmap); in perf_evlist__exit()
130 fdarray__exit(&evlist->pollfd); in perf_evlist__exit()
133 void perf_evlist__delete(struct perf_evlist *evlist) in perf_evlist__delete() argument
135 if (evlist == NULL) in perf_evlist__delete()
138 perf_evlist__munmap(evlist); in perf_evlist__delete()
139 perf_evlist__close(evlist); in perf_evlist__delete()
140 cpu_map__put(evlist->cpus); in perf_evlist__delete()
141 thread_map__put(evlist->threads); in perf_evlist__delete()
142 evlist->cpus = NULL; in perf_evlist__delete()
143 evlist->threads = NULL; in perf_evlist__delete()
144 perf_evlist__purge(evlist); in perf_evlist__delete()
145 perf_evlist__exit(evlist); in perf_evlist__delete()
146 free(evlist); in perf_evlist__delete()
149 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, in __perf_evlist__propagate_maps() argument
156 if (!evsel->own_cpus || evlist->has_user_cpus) { in __perf_evlist__propagate_maps()
158 evsel->cpus = cpu_map__get(evlist->cpus); in __perf_evlist__propagate_maps()
165 evsel->threads = thread_map__get(evlist->threads); in __perf_evlist__propagate_maps()
168 static void perf_evlist__propagate_maps(struct perf_evlist *evlist) in perf_evlist__propagate_maps() argument
172 evlist__for_each_entry(evlist, evsel) in perf_evlist__propagate_maps()
173 __perf_evlist__propagate_maps(evlist, evsel); in perf_evlist__propagate_maps()
176 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) in perf_evlist__add() argument
178 entry->evlist = evlist; in perf_evlist__add()
179 list_add_tail(&entry->node, &evlist->entries); in perf_evlist__add()
180 entry->idx = evlist->nr_entries; in perf_evlist__add()
183 if (!evlist->nr_entries++) in perf_evlist__add()
184 perf_evlist__set_id_pos(evlist); in perf_evlist__add()
186 __perf_evlist__propagate_maps(evlist, entry); in perf_evlist__add()
189 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel) in perf_evlist__remove() argument
191 evsel->evlist = NULL; in perf_evlist__remove()
193 evlist->nr_entries -= 1; in perf_evlist__remove()
196 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, in perf_evlist__splice_list_tail() argument
203 perf_evlist__add(evlist, evsel); in perf_evlist__splice_list_tail()
221 void perf_evlist__set_leader(struct perf_evlist *evlist) in perf_evlist__set_leader() argument
223 if (evlist->nr_entries) { in perf_evlist__set_leader()
224 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; in perf_evlist__set_leader()
225 __perf_evlist__set_leader(&evlist->entries); in perf_evlist__set_leader()
243 int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise) in __perf_evlist__add_default() argument
250 perf_evlist__add(evlist, evsel); in __perf_evlist__add_default()
254 int perf_evlist__add_dummy(struct perf_evlist *evlist) in perf_evlist__add_dummy() argument
261 struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries); in perf_evlist__add_dummy()
266 perf_evlist__add(evlist, evsel); in perf_evlist__add_dummy()
270 static int perf_evlist__add_attrs(struct perf_evlist *evlist, in perf_evlist__add_attrs() argument
278 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); in perf_evlist__add_attrs()
284 perf_evlist__splice_list_tail(evlist, &head); in perf_evlist__add_attrs()
294 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, in __perf_evlist__add_default_attrs() argument
302 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); in __perf_evlist__add_default_attrs()
306 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) in perf_evlist__find_tracepoint_by_id() argument
310 evlist__for_each_entry(evlist, evsel) { in perf_evlist__find_tracepoint_by_id()
320 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, in perf_evlist__find_tracepoint_by_name() argument
325 evlist__for_each_entry(evlist, evsel) { in perf_evlist__find_tracepoint_by_name()
334 int perf_evlist__add_newtp(struct perf_evlist *evlist, in perf_evlist__add_newtp() argument
343 perf_evlist__add(evlist, evsel); in perf_evlist__add_newtp()
347 static int perf_evlist__nr_threads(struct perf_evlist *evlist, in perf_evlist__nr_threads() argument
353 return thread_map__nr(evlist->threads); in perf_evlist__nr_threads()
356 void perf_evlist__disable(struct perf_evlist *evlist) in perf_evlist__disable() argument
360 evlist__for_each_entry(evlist, pos) { in perf_evlist__disable()
366 evlist->enabled = false; in perf_evlist__disable()
369 void perf_evlist__enable(struct perf_evlist *evlist) in perf_evlist__enable() argument
373 evlist__for_each_entry(evlist, pos) { in perf_evlist__enable()
379 evlist->enabled = true; in perf_evlist__enable()
382 void perf_evlist__toggle_enable(struct perf_evlist *evlist) in perf_evlist__toggle_enable() argument
384 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist); in perf_evlist__toggle_enable()
387 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, in perf_evlist__enable_event_cpu() argument
391 int nr_threads = perf_evlist__nr_threads(evlist, evsel); in perf_evlist__enable_event_cpu()
404 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, in perf_evlist__enable_event_thread() argument
409 int nr_cpus = cpu_map__nr(evlist->cpus); in perf_evlist__enable_event_thread()
422 int perf_evlist__enable_event_idx(struct perf_evlist *evlist, in perf_evlist__enable_event_idx() argument
425 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); in perf_evlist__enable_event_idx()
428 return perf_evlist__enable_event_cpu(evlist, evsel, idx); in perf_evlist__enable_event_idx()
430 return perf_evlist__enable_event_thread(evlist, evsel, idx); in perf_evlist__enable_event_idx()
433 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) in perf_evlist__alloc_pollfd() argument
435 int nr_cpus = cpu_map__nr(evlist->cpus); in perf_evlist__alloc_pollfd()
436 int nr_threads = thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd()
440 evlist__for_each_entry(evlist, evsel) { in perf_evlist__alloc_pollfd()
447 if (fdarray__available_entries(&evlist->pollfd) < nfds && in perf_evlist__alloc_pollfd()
448 fdarray__grow(&evlist->pollfd, nfds) < 0) in perf_evlist__alloc_pollfd()
454 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, in __perf_evlist__add_pollfd() argument
457 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP); in __perf_evlist__add_pollfd()
463 evlist->pollfd.priv[pos].ptr = map; in __perf_evlist__add_pollfd()
471 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) in perf_evlist__add_pollfd() argument
473 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN); in perf_evlist__add_pollfd()
485 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) in perf_evlist__filter_pollfd() argument
487 return fdarray__filter(&evlist->pollfd, revents_and_mask, in perf_evlist__filter_pollfd()
491 int perf_evlist__poll(struct perf_evlist *evlist, int timeout) in perf_evlist__poll() argument
493 return fdarray__poll(&evlist->pollfd, timeout); in perf_evlist__poll()
496 static void perf_evlist__id_hash(struct perf_evlist *evlist, in perf_evlist__id_hash() argument
506 hlist_add_head(&sid->node, &evlist->heads[hash]); in perf_evlist__id_hash()
509 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, in perf_evlist__id_add() argument
512 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); in perf_evlist__id_add()
516 int perf_evlist__id_add_fd(struct perf_evlist *evlist, in perf_evlist__id_add_fd() argument
538 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) in perf_evlist__id_add_fd()
553 perf_evlist__id_add(evlist, evsel, cpu, thread, id); in perf_evlist__id_add_fd()
557 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, in perf_evlist__set_sid_idx() argument
563 if (evlist->cpus && cpu >= 0) in perf_evlist__set_sid_idx()
564 sid->cpu = evlist->cpus->map[cpu]; in perf_evlist__set_sid_idx()
567 if (!evsel->system_wide && evlist->threads && thread >= 0) in perf_evlist__set_sid_idx()
568 sid->tid = thread_map__pid(evlist->threads, thread); in perf_evlist__set_sid_idx()
573 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) in perf_evlist__id2sid() argument
580 head = &evlist->heads[hash]; in perf_evlist__id2sid()
589 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) in perf_evlist__id2evsel() argument
593 if (evlist->nr_entries == 1 || !id) in perf_evlist__id2evsel()
594 return perf_evlist__first(evlist); in perf_evlist__id2evsel()
596 sid = perf_evlist__id2sid(evlist, id); in perf_evlist__id2evsel()
600 if (!perf_evlist__sample_id_all(evlist)) in perf_evlist__id2evsel()
601 return perf_evlist__first(evlist); in perf_evlist__id2evsel()
606 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, in perf_evlist__id2evsel_strict() argument
614 sid = perf_evlist__id2sid(evlist, id); in perf_evlist__id2evsel_strict()
621 static int perf_evlist__event2id(struct perf_evlist *evlist, in perf_evlist__event2id() argument
630 if (evlist->id_pos >= n) in perf_evlist__event2id()
632 *id = array[evlist->id_pos]; in perf_evlist__event2id()
634 if (evlist->is_pos > n) in perf_evlist__event2id()
636 n -= evlist->is_pos; in perf_evlist__event2id()
642 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, in perf_evlist__event2evsel() argument
645 struct perf_evsel *first = perf_evlist__first(evlist); in perf_evlist__event2evsel()
651 if (evlist->nr_entries == 1) in perf_evlist__event2evsel()
658 if (perf_evlist__event2id(evlist, event, &id)) in perf_evlist__event2evsel()
666 head = &evlist->heads[hash]; in perf_evlist__event2evsel()
675 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) in perf_evlist__set_paused() argument
679 if (!evlist->overwrite_mmap) in perf_evlist__set_paused()
682 for (i = 0; i < evlist->nr_mmaps; i++) { in perf_evlist__set_paused()
683 int fd = evlist->overwrite_mmap[i].fd; in perf_evlist__set_paused()
695 static int perf_evlist__pause(struct perf_evlist *evlist) in perf_evlist__pause() argument
697 return perf_evlist__set_paused(evlist, true); in perf_evlist__pause()
700 static int perf_evlist__resume(struct perf_evlist *evlist) in perf_evlist__resume() argument
702 return perf_evlist__set_paused(evlist, false); in perf_evlist__resume()
705 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) in perf_evlist__munmap_nofree() argument
709 if (evlist->mmap) in perf_evlist__munmap_nofree()
710 for (i = 0; i < evlist->nr_mmaps; i++) in perf_evlist__munmap_nofree()
711 perf_mmap__munmap(&evlist->mmap[i]); in perf_evlist__munmap_nofree()
713 if (evlist->overwrite_mmap) in perf_evlist__munmap_nofree()
714 for (i = 0; i < evlist->nr_mmaps; i++) in perf_evlist__munmap_nofree()
715 perf_mmap__munmap(&evlist->overwrite_mmap[i]); in perf_evlist__munmap_nofree()
718 void perf_evlist__munmap(struct perf_evlist *evlist) in perf_evlist__munmap() argument
720 perf_evlist__munmap_nofree(evlist); in perf_evlist__munmap()
721 zfree(&evlist->mmap); in perf_evlist__munmap()
722 zfree(&evlist->overwrite_mmap); in perf_evlist__munmap()
725 static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist, in perf_evlist__alloc_mmap() argument
731 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); in perf_evlist__alloc_mmap()
732 if (cpu_map__empty(evlist->cpus)) in perf_evlist__alloc_mmap()
733 evlist->nr_mmaps = thread_map__nr(evlist->threads); in perf_evlist__alloc_mmap()
734 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); in perf_evlist__alloc_mmap()
738 for (i = 0; i < evlist->nr_mmaps; i++) { in perf_evlist__alloc_mmap()
756 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, in perf_evlist__should_poll()
764 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, in perf_evlist__mmap_per_evsel() argument
770 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx); in perf_evlist__mmap_per_evsel()
772 evlist__for_each_entry(evlist, evsel) { in perf_evlist__mmap_per_evsel()
773 struct perf_mmap *maps = evlist->mmap; in perf_evlist__mmap_per_evsel()
781 maps = evlist->overwrite_mmap; in perf_evlist__mmap_per_evsel()
784 maps = perf_evlist__alloc_mmap(evlist, true); in perf_evlist__mmap_per_evsel()
787 evlist->overwrite_mmap = maps; in perf_evlist__mmap_per_evsel()
788 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) in perf_evlist__mmap_per_evsel()
789 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); in perf_evlist__mmap_per_evsel()
815 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; in perf_evlist__mmap_per_evsel()
825 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) { in perf_evlist__mmap_per_evsel()
831 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, in perf_evlist__mmap_per_evsel()
834 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, in perf_evlist__mmap_per_evsel()
842 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, in perf_evlist__mmap_per_cpu() argument
846 int nr_cpus = cpu_map__nr(evlist->cpus); in perf_evlist__mmap_per_cpu()
847 int nr_threads = thread_map__nr(evlist->threads); in perf_evlist__mmap_per_cpu()
854 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, in perf_evlist__mmap_per_cpu()
858 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, in perf_evlist__mmap_per_cpu()
867 perf_evlist__munmap_nofree(evlist); in perf_evlist__mmap_per_cpu()
871 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, in perf_evlist__mmap_per_thread() argument
875 int nr_threads = thread_map__nr(evlist->threads); in perf_evlist__mmap_per_thread()
882 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, in perf_evlist__mmap_per_thread()
885 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, in perf_evlist__mmap_per_thread()
893 perf_evlist__munmap_nofree(evlist); in perf_evlist__mmap_per_thread()
1019 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, in perf_evlist__mmap_ex() argument
1024 const struct cpu_map *cpus = evlist->cpus; in perf_evlist__mmap_ex()
1025 const struct thread_map *threads = evlist->threads; in perf_evlist__mmap_ex()
1033 if (!evlist->mmap) in perf_evlist__mmap_ex()
1034 evlist->mmap = perf_evlist__alloc_mmap(evlist, false); in perf_evlist__mmap_ex()
1035 if (!evlist->mmap) in perf_evlist__mmap_ex()
1038 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) in perf_evlist__mmap_ex()
1041 evlist->mmap_len = perf_evlist__mmap_size(pages); in perf_evlist__mmap_ex()
1042 pr_debug("mmap size %zuB\n", evlist->mmap_len); in perf_evlist__mmap_ex()
1043 mp.mask = evlist->mmap_len - page_size - 1; in perf_evlist__mmap_ex()
1045 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, in perf_evlist__mmap_ex()
1048 evlist__for_each_entry(evlist, evsel) { in perf_evlist__mmap_ex()
1056 return perf_evlist__mmap_per_thread(evlist, &mp); in perf_evlist__mmap_ex()
1058 return perf_evlist__mmap_per_cpu(evlist, &mp); in perf_evlist__mmap_ex()
1061 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) in perf_evlist__mmap() argument
1063 return perf_evlist__mmap_ex(evlist, pages, 0, false); in perf_evlist__mmap()
1066 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) in perf_evlist__create_maps() argument
1104 evlist->has_user_cpus = !!target->cpu_list; in perf_evlist__create_maps()
1106 perf_evlist__set_maps(evlist, cpus, threads); in perf_evlist__create_maps()
1115 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, in perf_evlist__set_maps() argument
1125 if (cpus != evlist->cpus) { in perf_evlist__set_maps()
1126 cpu_map__put(evlist->cpus); in perf_evlist__set_maps()
1127 evlist->cpus = cpu_map__get(cpus); in perf_evlist__set_maps()
1130 if (threads != evlist->threads) { in perf_evlist__set_maps()
1131 thread_map__put(evlist->threads); in perf_evlist__set_maps()
1132 evlist->threads = thread_map__get(threads); in perf_evlist__set_maps()
1135 perf_evlist__propagate_maps(evlist); in perf_evlist__set_maps()
1138 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist, in __perf_evlist__set_sample_bit() argument
1143 evlist__for_each_entry(evlist, evsel) in __perf_evlist__set_sample_bit()
1147 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, in __perf_evlist__reset_sample_bit() argument
1152 evlist__for_each_entry(evlist, evsel) in __perf_evlist__reset_sample_bit()
1156 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) in perf_evlist__apply_filters() argument
1161 evlist__for_each_entry(evlist, evsel) { in perf_evlist__apply_filters()
1179 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) in perf_evlist__set_filter() argument
1184 evlist__for_each_entry(evlist, evsel) { in perf_evlist__set_filter()
1196 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) in perf_evlist__set_filter_pids() argument
1217 ret = perf_evlist__set_filter(evlist, filter); in perf_evlist__set_filter_pids()
1223 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) in perf_evlist__set_filter_pid() argument
1225 return perf_evlist__set_filter_pids(evlist, 1, &pid); in perf_evlist__set_filter_pid()
1228 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) in perf_evlist__valid_sample_type() argument
1232 if (evlist->nr_entries == 1) in perf_evlist__valid_sample_type()
1235 if (evlist->id_pos < 0 || evlist->is_pos < 0) in perf_evlist__valid_sample_type()
1238 evlist__for_each_entry(evlist, pos) { in perf_evlist__valid_sample_type()
1239 if (pos->id_pos != evlist->id_pos || in perf_evlist__valid_sample_type()
1240 pos->is_pos != evlist->is_pos) in perf_evlist__valid_sample_type()
1247 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) in __perf_evlist__combined_sample_type() argument
1251 if (evlist->combined_sample_type) in __perf_evlist__combined_sample_type()
1252 return evlist->combined_sample_type; in __perf_evlist__combined_sample_type()
1254 evlist__for_each_entry(evlist, evsel) in __perf_evlist__combined_sample_type()
1255 evlist->combined_sample_type |= evsel->attr.sample_type; in __perf_evlist__combined_sample_type()
1257 return evlist->combined_sample_type; in __perf_evlist__combined_sample_type()
1260 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) in perf_evlist__combined_sample_type() argument
1262 evlist->combined_sample_type = 0; in perf_evlist__combined_sample_type()
1263 return __perf_evlist__combined_sample_type(evlist); in perf_evlist__combined_sample_type()
1266 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist) in perf_evlist__combined_branch_type() argument
1271 evlist__for_each_entry(evlist, evsel) in perf_evlist__combined_branch_type()
1276 bool perf_evlist__valid_read_format(struct perf_evlist *evlist) in perf_evlist__valid_read_format() argument
1278 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; in perf_evlist__valid_read_format()
1282 evlist__for_each_entry(evlist, pos) { in perf_evlist__valid_read_format()
1296 u64 perf_evlist__read_format(struct perf_evlist *evlist) in perf_evlist__read_format() argument
1298 struct perf_evsel *first = perf_evlist__first(evlist); in perf_evlist__read_format()
1302 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) in perf_evlist__id_hdr_size() argument
1304 struct perf_evsel *first = perf_evlist__first(evlist); in perf_evlist__id_hdr_size()
1335 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) in perf_evlist__valid_sample_id_all() argument
1337 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; in perf_evlist__valid_sample_id_all()
1339 evlist__for_each_entry_continue(evlist, pos) { in perf_evlist__valid_sample_id_all()
1347 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) in perf_evlist__sample_id_all() argument
1349 struct perf_evsel *first = perf_evlist__first(evlist); in perf_evlist__sample_id_all()
1353 void perf_evlist__set_selected(struct perf_evlist *evlist, in perf_evlist__set_selected() argument
1356 evlist->selected = evsel; in perf_evlist__set_selected()
1359 void perf_evlist__close(struct perf_evlist *evlist) in perf_evlist__close() argument
1363 evlist__for_each_entry_reverse(evlist, evsel) in perf_evlist__close()
1367 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) in perf_evlist__create_syswide_maps() argument
1390 perf_evlist__set_maps(evlist, cpus, threads); in perf_evlist__create_syswide_maps()
1398 int perf_evlist__open(struct perf_evlist *evlist) in perf_evlist__open() argument
1407 if (evlist->threads == NULL && evlist->cpus == NULL) { in perf_evlist__open()
1408 err = perf_evlist__create_syswide_maps(evlist); in perf_evlist__open()
1413 perf_evlist__update_id_pos(evlist); in perf_evlist__open()
1415 evlist__for_each_entry(evlist, evsel) { in perf_evlist__open()
1423 perf_evlist__close(evlist); in perf_evlist__open()
1428 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, in perf_evlist__prepare_workload() argument
1445 evlist->workload.pid = fork(); in perf_evlist__prepare_workload()
1446 if (evlist->workload.pid < 0) { in perf_evlist__prepare_workload()
1451 if (!evlist->workload.pid) { in perf_evlist__prepare_workload()
1510 if (evlist->threads == NULL) { in perf_evlist__prepare_workload()
1515 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid); in perf_evlist__prepare_workload()
1529 evlist->workload.cork_fd = go_pipe[1]; in perf_evlist__prepare_workload()
1542 int perf_evlist__start_workload(struct perf_evlist *evlist) in perf_evlist__start_workload() argument
1544 if (evlist->workload.cork_fd > 0) { in perf_evlist__start_workload()
1550 ret = write(evlist->workload.cork_fd, &bf, 1); in perf_evlist__start_workload()
1554 close(evlist->workload.cork_fd); in perf_evlist__start_workload()
1561 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, in perf_evlist__parse_sample() argument
1564 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); in perf_evlist__parse_sample()
1571 int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist, in perf_evlist__parse_sample_timestamp() argument
1575 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); in perf_evlist__parse_sample_timestamp()
1582 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) in perf_evlist__fprintf() argument
1587 evlist__for_each_entry(evlist, evsel) { in perf_evlist__fprintf()
1595 int perf_evlist__strerror_open(struct perf_evlist *evlist, in perf_evlist__strerror_open() argument
1624 struct perf_evsel *first = perf_evlist__first(evlist); in perf_evlist__strerror_open()
1649 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) in perf_evlist__strerror_mmap() argument
1652 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; in perf_evlist__strerror_mmap()
1680 void perf_evlist__to_front(struct perf_evlist *evlist, in perf_evlist__to_front() argument
1686 if (move_evsel == perf_evlist__first(evlist)) in perf_evlist__to_front()
1689 evlist__for_each_entry_safe(evlist, n, evsel) { in perf_evlist__to_front()
1694 list_splice(&move, &evlist->entries); in perf_evlist__to_front()
1697 void perf_evlist__set_tracking_event(struct perf_evlist *evlist, in perf_evlist__set_tracking_event() argument
1705 evlist__for_each_entry(evlist, evsel) { in perf_evlist__set_tracking_event()
1714 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, in perf_evlist__find_evsel_by_str() argument
1719 evlist__for_each_entry(evlist, evsel) { in perf_evlist__find_evsel_by_str()
1729 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, in perf_evlist__toggle_bkw_mmap() argument
1732 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; in perf_evlist__toggle_bkw_mmap()
1739 if (!evlist->overwrite_mmap) in perf_evlist__toggle_bkw_mmap()
1769 evlist->bkw_mmap_state = state; in perf_evlist__toggle_bkw_mmap()
1773 perf_evlist__pause(evlist); in perf_evlist__toggle_bkw_mmap()
1776 perf_evlist__resume(evlist); in perf_evlist__toggle_bkw_mmap()
1787 bool perf_evlist__exclude_kernel(struct perf_evlist *evlist) in perf_evlist__exclude_kernel() argument
1791 evlist__for_each_entry(evlist, evsel) { in perf_evlist__exclude_kernel()
1804 void perf_evlist__force_leader(struct perf_evlist *evlist) in perf_evlist__force_leader() argument
1806 if (!evlist->nr_groups) { in perf_evlist__force_leader()
1807 struct perf_evsel *leader = perf_evlist__first(evlist); in perf_evlist__force_leader()
1809 perf_evlist__set_leader(evlist); in perf_evlist__force_leader()