Lines Matching +full:cpu +full:- +full:core
1 /* SPDX-License-Identifier: GPL-2.0 */
28 /** struct evsel - event selector
30 * @evlist - evlist this evsel is in, if it is in one.
31 * @core - libperf evsel object
32 * @name - Can be set to retain the original event name passed by the user,
39 * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
40 * is used there is an id sample appended to non-sample events
44 struct perf_evsel core; member
53 * These fields can be set in the parse-events code or similar.
158 return perf_evsel__cpus(&evsel->core); in evsel__cpus()
163 return evsel__cpus(evsel)->nr; in evsel__nr_cpus()
169 void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
244 int evsel__enable_cpu(struct evsel *evsel, int cpu);
247 int evsel__disable_cpu(struct evsel *evsel, int cpu);
249 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu);
272 (evsel->core.attr.type == PERF_TYPE_##t && \
273 evsel->core.attr.config == PERF_COUNT_##c)
277 return (e1->core.attr.type == e2->core.attr.type) && in evsel__match2()
278 (e1->core.attr.config == e2->core.attr.config); in evsel__match2()
281 int evsel__read_counter(struct evsel *evsel, int cpu, int thread);
283 int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale);
286 * evsel__read_on_cpu - Read out the results on a CPU and thread
288 * @evsel - event selector to read value
289 * @cpu - CPU of interest
290 * @thread - thread of interest
292 static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread) in evsel__read_on_cpu() argument
294 return __evsel__read_on_cpu(evsel, cpu, thread, false); in evsel__read_on_cpu()
298 * evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
300 * @evsel - event selector to read value
301 * @cpu - CPU of interest
302 * @thread - thread of interest
304 static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu, int thread) in evsel__read_on_cpu_scaled() argument
306 return __evsel__read_on_cpu(evsel, cpu, thread, true); in evsel__read_on_cpu_scaled()
317 return list_entry(evsel->core.node.next, struct evsel, core.node); in evsel__next()
322 return list_entry(evsel->core.node.prev, struct evsel, core.node); in evsel__prev()
326 * evsel__is_group_leader - Return whether given evsel is a leader event
328 * @evsel - evsel selector to be tested
330 * Return %true if @evsel is a group leader or a stand-alone event
334 return evsel->leader == evsel; in evsel__is_group_leader()
338 * evsel__is_group_event - Return whether given evsel is a group event
340 * @evsel - evsel selector to be tested
350 return evsel__is_group_leader(evsel) && evsel->core.nr_members > 1; in evsel__is_group_event()
372 return evsel->idx - evsel->leader->idx; in evsel__group_idx()
377 for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
378 (_evsel) && (_evsel)->leader == (_leader); \
379 (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
384 (_evsel) && (_evsel)->leader == (_leader); \
385 (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
389 return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; in evsel__has_branch_callstack()
394 return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; in evsel__has_branch_hw_idx()
403 return evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN || in evsel__has_callchain()
404 evsel->synth_sample_type & PERF_SAMPLE_CALLCHAIN; in evsel__has_callchain()
413 return evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK || in evsel__has_br_stack()
414 evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK; in evsel__has_br_stack()
419 return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) && in evsel__is_dummy_event()
420 (evsel->core.attr.config == PERF_COUNT_SW_DUMMY); in evsel__is_dummy_event()