1 // SPDX-License-Identifier: GPL-2.0
2 #include <perf/evlist.h>
3 #include <perf/evsel.h>
4 #include <linux/bitops.h>
5 #include <linux/list.h>
6 #include <linux/hash.h>
7 #include <sys/ioctl.h>
8 #include <internal/evlist.h>
9 #include <internal/evsel.h>
10 #include <internal/xyarray.h>
11 #include <internal/mmap.h>
12 #include <internal/cpumap.h>
13 #include <internal/threadmap.h>
14 #include <internal/lib.h>
15 #include <linux/zalloc.h>
16 #include <stdlib.h>
17 #include <errno.h>
18 #include <unistd.h>
19 #include <fcntl.h>
20 #include <signal.h>
21 #include <poll.h>
22 #include <sys/mman.h>
23 #include <perf/cpumap.h>
24 #include <perf/threadmap.h>
25 #include <api/fd/array.h>
26 #include "internal.h"
27
perf_evlist__init(struct perf_evlist * evlist)28 void perf_evlist__init(struct perf_evlist *evlist)
29 {
30 INIT_LIST_HEAD(&evlist->entries);
31 evlist->nr_entries = 0;
32 fdarray__init(&evlist->pollfd, 64);
33 perf_evlist__reset_id_hash(evlist);
34 }
35
__perf_evlist__propagate_maps(struct perf_evlist * evlist,struct perf_evsel * evsel)36 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
37 struct perf_evsel *evsel)
38 {
39 if (evsel->system_wide) {
40 /* System wide: set the cpu map of the evsel to all online CPUs. */
41 perf_cpu_map__put(evsel->cpus);
42 evsel->cpus = perf_cpu_map__new(NULL);
43 } else if (evlist->has_user_cpus && evsel->is_pmu_core) {
44 /*
45 * User requested CPUs on a core PMU, ensure the requested CPUs
46 * are valid by intersecting with those of the PMU.
47 */
48 perf_cpu_map__put(evsel->cpus);
49 evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
50 } else if (!evsel->own_cpus || evlist->has_user_cpus ||
51 (!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
52 /*
53 * The PMU didn't specify a default cpu map, this isn't a core
54 * event and the user requested CPUs or the evlist user
55 * requested CPUs have the "any CPU" (aka dummy) CPU value. In
56 * which case use the user requested CPUs rather than the PMU
57 * ones.
58 */
59 perf_cpu_map__put(evsel->cpus);
60 evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
61 } else if (evsel->cpus != evsel->own_cpus) {
62 /*
63 * No user requested cpu map but the PMU cpu map doesn't match
64 * the evsel's. Reset it back to the PMU cpu map.
65 */
66 perf_cpu_map__put(evsel->cpus);
67 evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
68 }
69
70 if (evsel->system_wide) {
71 perf_thread_map__put(evsel->threads);
72 evsel->threads = perf_thread_map__new_dummy();
73 } else {
74 perf_thread_map__put(evsel->threads);
75 evsel->threads = perf_thread_map__get(evlist->threads);
76 }
77
78 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
79 }
80
perf_evlist__propagate_maps(struct perf_evlist * evlist)81 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
82 {
83 struct perf_evsel *evsel;
84
85 evlist->needs_map_propagation = true;
86
87 perf_evlist__for_each_evsel(evlist, evsel)
88 __perf_evlist__propagate_maps(evlist, evsel);
89 }
90
perf_evlist__add(struct perf_evlist * evlist,struct perf_evsel * evsel)91 void perf_evlist__add(struct perf_evlist *evlist,
92 struct perf_evsel *evsel)
93 {
94 evsel->idx = evlist->nr_entries;
95 list_add_tail(&evsel->node, &evlist->entries);
96 evlist->nr_entries += 1;
97
98 if (evlist->needs_map_propagation)
99 __perf_evlist__propagate_maps(evlist, evsel);
100 }
101
perf_evlist__remove(struct perf_evlist * evlist,struct perf_evsel * evsel)102 void perf_evlist__remove(struct perf_evlist *evlist,
103 struct perf_evsel *evsel)
104 {
105 list_del_init(&evsel->node);
106 evlist->nr_entries -= 1;
107 }
108
perf_evlist__new(void)109 struct perf_evlist *perf_evlist__new(void)
110 {
111 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
112
113 if (evlist != NULL)
114 perf_evlist__init(evlist);
115
116 return evlist;
117 }
118
119 struct perf_evsel *
perf_evlist__next(struct perf_evlist * evlist,struct perf_evsel * prev)120 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
121 {
122 struct perf_evsel *next;
123
124 if (!prev) {
125 next = list_first_entry(&evlist->entries,
126 struct perf_evsel,
127 node);
128 } else {
129 next = list_next_entry(prev, node);
130 }
131
132 /* Empty list is noticed here so don't need checking on entry. */
133 if (&next->node == &evlist->entries)
134 return NULL;
135
136 return next;
137 }
138
perf_evlist__purge(struct perf_evlist * evlist)139 static void perf_evlist__purge(struct perf_evlist *evlist)
140 {
141 struct perf_evsel *pos, *n;
142
143 perf_evlist__for_each_entry_safe(evlist, n, pos) {
144 list_del_init(&pos->node);
145 perf_evsel__delete(pos);
146 }
147
148 evlist->nr_entries = 0;
149 }
150
perf_evlist__exit(struct perf_evlist * evlist)151 void perf_evlist__exit(struct perf_evlist *evlist)
152 {
153 perf_cpu_map__put(evlist->user_requested_cpus);
154 perf_cpu_map__put(evlist->all_cpus);
155 perf_thread_map__put(evlist->threads);
156 evlist->user_requested_cpus = NULL;
157 evlist->all_cpus = NULL;
158 evlist->threads = NULL;
159 fdarray__exit(&evlist->pollfd);
160 }
161
perf_evlist__delete(struct perf_evlist * evlist)162 void perf_evlist__delete(struct perf_evlist *evlist)
163 {
164 if (evlist == NULL)
165 return;
166
167 perf_evlist__munmap(evlist);
168 perf_evlist__close(evlist);
169 perf_evlist__purge(evlist);
170 perf_evlist__exit(evlist);
171 free(evlist);
172 }
173
perf_evlist__set_maps(struct perf_evlist * evlist,struct perf_cpu_map * cpus,struct perf_thread_map * threads)174 void perf_evlist__set_maps(struct perf_evlist *evlist,
175 struct perf_cpu_map *cpus,
176 struct perf_thread_map *threads)
177 {
178 /*
179 * Allow for the possibility that one or another of the maps isn't being
180 * changed i.e. don't put it. Note we are assuming the maps that are
181 * being applied are brand new and evlist is taking ownership of the
182 * original reference count of 1. If that is not the case it is up to
183 * the caller to increase the reference count.
184 */
185 if (cpus != evlist->user_requested_cpus) {
186 perf_cpu_map__put(evlist->user_requested_cpus);
187 evlist->user_requested_cpus = perf_cpu_map__get(cpus);
188 }
189
190 if (threads != evlist->threads) {
191 perf_thread_map__put(evlist->threads);
192 evlist->threads = perf_thread_map__get(threads);
193 }
194
195 perf_evlist__propagate_maps(evlist);
196 }
197
perf_evlist__open(struct perf_evlist * evlist)198 int perf_evlist__open(struct perf_evlist *evlist)
199 {
200 struct perf_evsel *evsel;
201 int err;
202
203 perf_evlist__for_each_entry(evlist, evsel) {
204 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
205 if (err < 0)
206 goto out_err;
207 }
208
209 return 0;
210
211 out_err:
212 perf_evlist__close(evlist);
213 return err;
214 }
215
perf_evlist__close(struct perf_evlist * evlist)216 void perf_evlist__close(struct perf_evlist *evlist)
217 {
218 struct perf_evsel *evsel;
219
220 perf_evlist__for_each_entry_reverse(evlist, evsel)
221 perf_evsel__close(evsel);
222 }
223
perf_evlist__enable(struct perf_evlist * evlist)224 void perf_evlist__enable(struct perf_evlist *evlist)
225 {
226 struct perf_evsel *evsel;
227
228 perf_evlist__for_each_entry(evlist, evsel)
229 perf_evsel__enable(evsel);
230 }
231
perf_evlist__disable(struct perf_evlist * evlist)232 void perf_evlist__disable(struct perf_evlist *evlist)
233 {
234 struct perf_evsel *evsel;
235
236 perf_evlist__for_each_entry(evlist, evsel)
237 perf_evsel__disable(evsel);
238 }
239
perf_evlist__read_format(struct perf_evlist * evlist)240 u64 perf_evlist__read_format(struct perf_evlist *evlist)
241 {
242 struct perf_evsel *first = perf_evlist__first(evlist);
243
244 return first->attr.read_format;
245 }
246
247 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
248
perf_evlist__id_hash(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)249 static void perf_evlist__id_hash(struct perf_evlist *evlist,
250 struct perf_evsel *evsel,
251 int cpu, int thread, u64 id)
252 {
253 int hash;
254 struct perf_sample_id *sid = SID(evsel, cpu, thread);
255
256 sid->id = id;
257 sid->evsel = evsel;
258 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
259 hlist_add_head(&sid->node, &evlist->heads[hash]);
260 }
261
perf_evlist__reset_id_hash(struct perf_evlist * evlist)262 void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
263 {
264 int i;
265
266 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
267 INIT_HLIST_HEAD(&evlist->heads[i]);
268 }
269
perf_evlist__id_add(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)270 void perf_evlist__id_add(struct perf_evlist *evlist,
271 struct perf_evsel *evsel,
272 int cpu, int thread, u64 id)
273 {
274 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
275 evsel->id[evsel->ids++] = id;
276 }
277
perf_evlist__id_add_fd(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,int fd)278 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
279 struct perf_evsel *evsel,
280 int cpu, int thread, int fd)
281 {
282 u64 read_data[4] = { 0, };
283 int id_idx = 1; /* The first entry is the counter value */
284 u64 id;
285 int ret;
286
287 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
288 if (!ret)
289 goto add;
290
291 if (errno != ENOTTY)
292 return -1;
293
294 /* Legacy way to get event id.. All hail to old kernels! */
295
296 /*
297 * This way does not work with group format read, so bail
298 * out in that case.
299 */
300 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
301 return -1;
302
303 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
304 read(fd, &read_data, sizeof(read_data)) == -1)
305 return -1;
306
307 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
308 ++id_idx;
309 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
310 ++id_idx;
311
312 id = read_data[id_idx];
313
314 add:
315 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
316 return 0;
317 }
318
perf_evlist__alloc_pollfd(struct perf_evlist * evlist)319 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
320 {
321 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
322 int nr_threads = perf_thread_map__nr(evlist->threads);
323 int nfds = 0;
324 struct perf_evsel *evsel;
325
326 perf_evlist__for_each_entry(evlist, evsel) {
327 if (evsel->system_wide)
328 nfds += nr_cpus;
329 else
330 nfds += nr_cpus * nr_threads;
331 }
332
333 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
334 fdarray__grow(&evlist->pollfd, nfds) < 0)
335 return -ENOMEM;
336
337 return 0;
338 }
339
perf_evlist__add_pollfd(struct perf_evlist * evlist,int fd,void * ptr,short revent,enum fdarray_flags flags)340 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
341 void *ptr, short revent, enum fdarray_flags flags)
342 {
343 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
344
345 if (pos >= 0) {
346 evlist->pollfd.priv[pos].ptr = ptr;
347 fcntl(fd, F_SETFL, O_NONBLOCK);
348 }
349
350 return pos;
351 }
352
perf_evlist__munmap_filtered(struct fdarray * fda,int fd,void * arg __maybe_unused)353 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
354 void *arg __maybe_unused)
355 {
356 struct perf_mmap *map = fda->priv[fd].ptr;
357
358 if (map)
359 perf_mmap__put(map);
360 }
361
perf_evlist__filter_pollfd(struct perf_evlist * evlist,short revents_and_mask)362 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
363 {
364 return fdarray__filter(&evlist->pollfd, revents_and_mask,
365 perf_evlist__munmap_filtered, NULL);
366 }
367
perf_evlist__poll(struct perf_evlist * evlist,int timeout)368 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
369 {
370 return fdarray__poll(&evlist->pollfd, timeout);
371 }
372
perf_evlist__alloc_mmap(struct perf_evlist * evlist,bool overwrite)373 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
374 {
375 int i;
376 struct perf_mmap *map;
377
378 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
379 if (!map)
380 return NULL;
381
382 for (i = 0; i < evlist->nr_mmaps; i++) {
383 struct perf_mmap *prev = i ? &map[i - 1] : NULL;
384
385 /*
386 * When the perf_mmap() call is made we grab one refcount, plus
387 * one extra to let perf_mmap__consume() get the last
388 * events after all real references (perf_mmap__get()) are
389 * dropped.
390 *
391 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
392 * thus does perf_mmap__get() on it.
393 */
394 perf_mmap__init(&map[i], prev, overwrite, NULL);
395 }
396
397 return map;
398 }
399
perf_evsel__set_sid_idx(struct perf_evsel * evsel,int idx,int cpu,int thread)400 static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
401 {
402 struct perf_sample_id *sid = SID(evsel, cpu, thread);
403
404 sid->idx = idx;
405 sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
406 sid->tid = perf_thread_map__pid(evsel->threads, thread);
407 }
408
409 static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist * evlist,bool overwrite,int idx)410 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
411 {
412 struct perf_mmap *maps;
413
414 maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
415
416 if (!maps) {
417 maps = perf_evlist__alloc_mmap(evlist, overwrite);
418 if (!maps)
419 return NULL;
420
421 if (overwrite)
422 evlist->mmap_ovw = maps;
423 else
424 evlist->mmap = maps;
425 }
426
427 return &maps[idx];
428 }
429
430 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
431
432 static int
perf_evlist__mmap_cb_mmap(struct perf_mmap * map,struct perf_mmap_param * mp,int output,struct perf_cpu cpu)433 perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
434 int output, struct perf_cpu cpu)
435 {
436 return perf_mmap__mmap(map, mp, output, cpu);
437 }
438
perf_evlist__set_mmap_first(struct perf_evlist * evlist,struct perf_mmap * map,bool overwrite)439 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
440 bool overwrite)
441 {
442 if (overwrite)
443 evlist->mmap_ovw_first = map;
444 else
445 evlist->mmap_first = map;
446 }
447
448 static int
mmap_per_evsel(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,int idx,struct perf_mmap_param * mp,int cpu_idx,int thread,int * _output,int * _output_overwrite,int * nr_mmaps)449 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
450 int idx, struct perf_mmap_param *mp, int cpu_idx,
451 int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
452 {
453 struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
454 struct perf_evsel *evsel;
455 int revent;
456
457 perf_evlist__for_each_entry(evlist, evsel) {
458 bool overwrite = evsel->attr.write_backward;
459 enum fdarray_flags flgs;
460 struct perf_mmap *map;
461 int *output, fd, cpu;
462
463 if (evsel->system_wide && thread)
464 continue;
465
466 cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
467 if (cpu == -1)
468 continue;
469
470 map = ops->get(evlist, overwrite, idx);
471 if (map == NULL)
472 return -ENOMEM;
473
474 if (overwrite) {
475 mp->prot = PROT_READ;
476 output = _output_overwrite;
477 } else {
478 mp->prot = PROT_READ | PROT_WRITE;
479 output = _output;
480 }
481
482 fd = FD(evsel, cpu, thread);
483
484 if (*output == -1) {
485 *output = fd;
486
487 /*
488 * The last one will be done at perf_mmap__consume(), so that we
489 * make sure we don't prevent tools from consuming every last event in
490 * the ring buffer.
491 *
492 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
493 * anymore, but the last events for it are still in the ring buffer,
494 * waiting to be consumed.
495 *
496 * Tools can chose to ignore this at their own discretion, but the
497 * evlist layer can't just drop it when filtering events in
498 * perf_evlist__filter_pollfd().
499 */
500 refcount_set(&map->refcnt, 2);
501
502 if (ops->idx)
503 ops->idx(evlist, evsel, mp, idx);
504
505 /* Debug message used by test scripts */
506 pr_debug("idx %d: mmapping fd %d\n", idx, *output);
507 if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
508 return -1;
509
510 *nr_mmaps += 1;
511
512 if (!idx)
513 perf_evlist__set_mmap_first(evlist, map, overwrite);
514 } else {
515 /* Debug message used by test scripts */
516 pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
517 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
518 return -1;
519
520 perf_mmap__get(map);
521 }
522
523 revent = !overwrite ? POLLIN : 0;
524
525 flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
526 if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
527 perf_mmap__put(map);
528 return -1;
529 }
530
531 if (evsel->attr.read_format & PERF_FORMAT_ID) {
532 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
533 fd) < 0)
534 return -1;
535 perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
536 }
537 }
538
539 return 0;
540 }
541
542 static int
mmap_per_thread(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)543 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
544 struct perf_mmap_param *mp)
545 {
546 int nr_threads = perf_thread_map__nr(evlist->threads);
547 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
548 int cpu, thread, idx = 0;
549 int nr_mmaps = 0;
550
551 pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
552 __func__, nr_cpus, nr_threads);
553
554 /* per-thread mmaps */
555 for (thread = 0; thread < nr_threads; thread++, idx++) {
556 int output = -1;
557 int output_overwrite = -1;
558
559 if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
560 &output_overwrite, &nr_mmaps))
561 goto out_unmap;
562 }
563
564 /* system-wide mmaps i.e. per-cpu */
565 for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
566 int output = -1;
567 int output_overwrite = -1;
568
569 if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
570 &output_overwrite, &nr_mmaps))
571 goto out_unmap;
572 }
573
574 if (nr_mmaps != evlist->nr_mmaps)
575 pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
576
577 return 0;
578
579 out_unmap:
580 perf_evlist__munmap(evlist);
581 return -1;
582 }
583
584 static int
mmap_per_cpu(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)585 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
586 struct perf_mmap_param *mp)
587 {
588 int nr_threads = perf_thread_map__nr(evlist->threads);
589 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
590 int nr_mmaps = 0;
591 int cpu, thread;
592
593 pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
594
595 for (cpu = 0; cpu < nr_cpus; cpu++) {
596 int output = -1;
597 int output_overwrite = -1;
598
599 for (thread = 0; thread < nr_threads; thread++) {
600 if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
601 thread, &output, &output_overwrite, &nr_mmaps))
602 goto out_unmap;
603 }
604 }
605
606 if (nr_mmaps != evlist->nr_mmaps)
607 pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
608
609 return 0;
610
611 out_unmap:
612 perf_evlist__munmap(evlist);
613 return -1;
614 }
615
perf_evlist__nr_mmaps(struct perf_evlist * evlist)616 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
617 {
618 int nr_mmaps;
619
620 /* One for each CPU */
621 nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
622 if (perf_cpu_map__empty(evlist->all_cpus)) {
623 /* Plus one for each thread */
624 nr_mmaps += perf_thread_map__nr(evlist->threads);
625 /* Minus the per-thread CPU (-1) */
626 nr_mmaps -= 1;
627 }
628
629 return nr_mmaps;
630 }
631
perf_evlist__mmap_ops(struct perf_evlist * evlist,struct perf_evlist_mmap_ops * ops,struct perf_mmap_param * mp)632 int perf_evlist__mmap_ops(struct perf_evlist *evlist,
633 struct perf_evlist_mmap_ops *ops,
634 struct perf_mmap_param *mp)
635 {
636 const struct perf_cpu_map *cpus = evlist->all_cpus;
637 struct perf_evsel *evsel;
638
639 if (!ops || !ops->get || !ops->mmap)
640 return -EINVAL;
641
642 mp->mask = evlist->mmap_len - page_size - 1;
643
644 evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
645
646 perf_evlist__for_each_entry(evlist, evsel) {
647 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
648 evsel->sample_id == NULL &&
649 perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
650 return -ENOMEM;
651 }
652
653 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
654 return -ENOMEM;
655
656 if (perf_cpu_map__empty(cpus))
657 return mmap_per_thread(evlist, ops, mp);
658
659 return mmap_per_cpu(evlist, ops, mp);
660 }
661
perf_evlist__mmap(struct perf_evlist * evlist,int pages)662 int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
663 {
664 struct perf_mmap_param mp;
665 struct perf_evlist_mmap_ops ops = {
666 .get = perf_evlist__mmap_cb_get,
667 .mmap = perf_evlist__mmap_cb_mmap,
668 };
669
670 evlist->mmap_len = (pages + 1) * page_size;
671
672 return perf_evlist__mmap_ops(evlist, &ops, &mp);
673 }
674
perf_evlist__munmap(struct perf_evlist * evlist)675 void perf_evlist__munmap(struct perf_evlist *evlist)
676 {
677 int i;
678
679 if (evlist->mmap) {
680 for (i = 0; i < evlist->nr_mmaps; i++)
681 perf_mmap__munmap(&evlist->mmap[i]);
682 }
683
684 if (evlist->mmap_ovw) {
685 for (i = 0; i < evlist->nr_mmaps; i++)
686 perf_mmap__munmap(&evlist->mmap_ovw[i]);
687 }
688
689 zfree(&evlist->mmap);
690 zfree(&evlist->mmap_ovw);
691 }
692
693 struct perf_mmap*
perf_evlist__next_mmap(struct perf_evlist * evlist,struct perf_mmap * map,bool overwrite)694 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
695 bool overwrite)
696 {
697 if (map)
698 return map->next;
699
700 return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
701 }
702
__perf_evlist__set_leader(struct list_head * list,struct perf_evsel * leader)703 void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
704 {
705 struct perf_evsel *evsel;
706 int n = 0;
707
708 __perf_evlist__for_each_entry(list, evsel) {
709 evsel->leader = leader;
710 n++;
711 }
712 leader->nr_members = n;
713 }
714
perf_evlist__set_leader(struct perf_evlist * evlist)715 void perf_evlist__set_leader(struct perf_evlist *evlist)
716 {
717 if (evlist->nr_entries) {
718 struct perf_evsel *first = list_entry(evlist->entries.next,
719 struct perf_evsel, node);
720
721 __perf_evlist__set_leader(&evlist->entries, first);
722 }
723 }
724
perf_evlist__nr_groups(struct perf_evlist * evlist)725 int perf_evlist__nr_groups(struct perf_evlist *evlist)
726 {
727 struct perf_evsel *evsel;
728 int nr_groups = 0;
729
730 perf_evlist__for_each_evsel(evlist, evsel) {
731 /*
732 * evsels by default have a nr_members of 1, and they are their
733 * own leader. If the nr_members is >1 then this is an
734 * indication of a group.
735 */
736 if (evsel->leader == evsel && evsel->nr_members > 1)
737 nr_groups++;
738 }
739 return nr_groups;
740 }
741