Lines Matching +full:cpu +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0
30 INIT_LIST_HEAD(&evlist->entries); in perf_evlist__init()
31 evlist->nr_entries = 0; in perf_evlist__init()
32 fdarray__init(&evlist->pollfd, 64); in perf_evlist__init()
41 * keep it, if there's no target cpu list defined. in __perf_evlist__propagate_maps()
43 if (evsel->system_wide) { in __perf_evlist__propagate_maps()
44 perf_cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps()
45 evsel->cpus = perf_cpu_map__new(NULL); in __perf_evlist__propagate_maps()
46 } else if (!evsel->own_cpus || evlist->has_user_cpus || in __perf_evlist__propagate_maps()
47 (!evsel->requires_cpu && perf_cpu_map__empty(evlist->user_requested_cpus))) { in __perf_evlist__propagate_maps()
48 perf_cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps()
49 evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus); in __perf_evlist__propagate_maps()
50 } else if (evsel->cpus != evsel->own_cpus) { in __perf_evlist__propagate_maps()
51 perf_cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps()
52 evsel->cpus = perf_cpu_map__get(evsel->own_cpus); in __perf_evlist__propagate_maps()
55 if (evsel->system_wide) { in __perf_evlist__propagate_maps()
56 perf_thread_map__put(evsel->threads); in __perf_evlist__propagate_maps()
57 evsel->threads = perf_thread_map__new_dummy(); in __perf_evlist__propagate_maps()
59 perf_thread_map__put(evsel->threads); in __perf_evlist__propagate_maps()
60 evsel->threads = perf_thread_map__get(evlist->threads); in __perf_evlist__propagate_maps()
63 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus); in __perf_evlist__propagate_maps()
70 evlist->needs_map_propagation = true; in perf_evlist__propagate_maps()
79 evsel->idx = evlist->nr_entries; in perf_evlist__add()
80 list_add_tail(&evsel->node, &evlist->entries); in perf_evlist__add()
81 evlist->nr_entries += 1; in perf_evlist__add()
83 if (evlist->needs_map_propagation) in perf_evlist__add()
90 list_del_init(&evsel->node); in perf_evlist__remove()
91 evlist->nr_entries -= 1; in perf_evlist__remove()
110 next = list_first_entry(&evlist->entries, in perf_evlist__next()
118 if (&next->node == &evlist->entries) in perf_evlist__next()
129 list_del_init(&pos->node); in perf_evlist__purge()
133 evlist->nr_entries = 0; in perf_evlist__purge()
138 perf_cpu_map__put(evlist->user_requested_cpus); in perf_evlist__exit()
139 perf_cpu_map__put(evlist->all_cpus); in perf_evlist__exit()
140 perf_thread_map__put(evlist->threads); in perf_evlist__exit()
141 evlist->user_requested_cpus = NULL; in perf_evlist__exit()
142 evlist->all_cpus = NULL; in perf_evlist__exit()
143 evlist->threads = NULL; in perf_evlist__exit()
144 fdarray__exit(&evlist->pollfd); in perf_evlist__exit()
170 if (cpus != evlist->user_requested_cpus) { in perf_evlist__set_maps()
171 perf_cpu_map__put(evlist->user_requested_cpus); in perf_evlist__set_maps()
172 evlist->user_requested_cpus = perf_cpu_map__get(cpus); in perf_evlist__set_maps()
175 if (threads != evlist->threads) { in perf_evlist__set_maps()
176 perf_thread_map__put(evlist->threads); in perf_evlist__set_maps()
177 evlist->threads = perf_thread_map__get(threads); in perf_evlist__set_maps()
189 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads); in perf_evlist__open()
229 return first->attr.read_format; in perf_evlist__read_format()
232 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
236 int cpu, int thread, u64 id) in perf_evlist__id_hash() argument
239 struct perf_sample_id *sid = SID(evsel, cpu, thread); in perf_evlist__id_hash()
241 sid->id = id; in perf_evlist__id_hash()
242 sid->evsel = evsel; in perf_evlist__id_hash()
243 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); in perf_evlist__id_hash()
244 hlist_add_head(&sid->node, &evlist->heads[hash]); in perf_evlist__id_hash()
252 INIT_HLIST_HEAD(&evlist->heads[i]); in perf_evlist__reset_id_hash()
257 int cpu, int thread, u64 id) in perf_evlist__id_add() argument
259 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); in perf_evlist__id_add()
260 evsel->id[evsel->ids++] = id; in perf_evlist__id_add()
265 int cpu, int thread, int fd) in perf_evlist__id_add_fd() argument
277 return -1; in perf_evlist__id_add_fd()
286 return -1; in perf_evlist__id_add_fd()
288 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || in perf_evlist__id_add_fd()
289 read(fd, &read_data, sizeof(read_data)) == -1) in perf_evlist__id_add_fd()
290 return -1; in perf_evlist__id_add_fd()
292 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in perf_evlist__id_add_fd()
294 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in perf_evlist__id_add_fd()
300 perf_evlist__id_add(evlist, evsel, cpu, thread, id); in perf_evlist__id_add_fd()
306 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus); in perf_evlist__alloc_pollfd()
307 int nr_threads = perf_thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd()
312 if (evsel->system_wide) in perf_evlist__alloc_pollfd()
318 if (fdarray__available_entries(&evlist->pollfd) < nfds && in perf_evlist__alloc_pollfd()
319 fdarray__grow(&evlist->pollfd, nfds) < 0) in perf_evlist__alloc_pollfd()
320 return -ENOMEM; in perf_evlist__alloc_pollfd()
328 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags); in perf_evlist__add_pollfd()
331 evlist->pollfd.priv[pos].ptr = ptr; in perf_evlist__add_pollfd()
341 struct perf_mmap *map = fda->priv[fd].ptr; in perf_evlist__munmap_filtered() local
343 if (map) in perf_evlist__munmap_filtered()
344 perf_mmap__put(map); in perf_evlist__munmap_filtered()
349 return fdarray__filter(&evlist->pollfd, revents_and_mask, in perf_evlist__filter_pollfd()
355 return fdarray__poll(&evlist->pollfd, timeout); in perf_evlist__poll()
361 struct perf_mmap *map; in perf_evlist__alloc_mmap() local
363 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); in perf_evlist__alloc_mmap()
364 if (!map) in perf_evlist__alloc_mmap()
367 for (i = 0; i < evlist->nr_mmaps; i++) { in perf_evlist__alloc_mmap()
368 struct perf_mmap *prev = i ? &map[i - 1] : NULL; in perf_evlist__alloc_mmap()
379 perf_mmap__init(&map[i], prev, overwrite, NULL); in perf_evlist__alloc_mmap()
382 return map; in perf_evlist__alloc_mmap()
385 static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread) in perf_evsel__set_sid_idx() argument
387 struct perf_sample_id *sid = SID(evsel, cpu, thread); in perf_evsel__set_sid_idx()
389 sid->idx = idx; in perf_evsel__set_sid_idx()
390 sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu); in perf_evsel__set_sid_idx()
391 sid->tid = perf_thread_map__pid(evsel->threads, thread); in perf_evsel__set_sid_idx()
399 maps = overwrite ? evlist->mmap_ovw : evlist->mmap; in perf_evlist__mmap_cb_get()
407 evlist->mmap_ovw = maps; in perf_evlist__mmap_cb_get()
409 evlist->mmap = maps; in perf_evlist__mmap_cb_get()
415 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
418 perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp, in perf_evlist__mmap_cb_mmap() argument
419 int output, struct perf_cpu cpu) in perf_evlist__mmap_cb_mmap() argument
421 return perf_mmap__mmap(map, mp, output, cpu); in perf_evlist__mmap_cb_mmap()
424 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map, in perf_evlist__set_mmap_first() argument
428 evlist->mmap_ovw_first = map; in perf_evlist__set_mmap_first()
430 evlist->mmap_first = map; in perf_evlist__set_mmap_first()
438 struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx); in mmap_per_evsel()
443 bool overwrite = evsel->attr.write_backward; in mmap_per_evsel()
445 struct perf_mmap *map; in mmap_per_evsel() local
446 int *output, fd, cpu; in mmap_per_evsel() local
448 if (evsel->system_wide && thread) in mmap_per_evsel()
451 cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu); in mmap_per_evsel()
452 if (cpu == -1) in mmap_per_evsel()
455 map = ops->get(evlist, overwrite, idx); in mmap_per_evsel()
456 if (map == NULL) in mmap_per_evsel()
457 return -ENOMEM; in mmap_per_evsel()
460 mp->prot = PROT_READ; in mmap_per_evsel()
463 mp->prot = PROT_READ | PROT_WRITE; in mmap_per_evsel()
467 fd = FD(evsel, cpu, thread); in mmap_per_evsel()
469 if (*output == -1) { in mmap_per_evsel()
485 refcount_set(&map->refcnt, 2); in mmap_per_evsel()
487 if (ops->idx) in mmap_per_evsel()
488 ops->idx(evlist, evsel, mp, idx); in mmap_per_evsel()
492 if (ops->mmap(map, mp, *output, evlist_cpu) < 0) in mmap_per_evsel()
493 return -1; in mmap_per_evsel()
498 perf_evlist__set_mmap_first(evlist, map, overwrite); in mmap_per_evsel()
501 pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output); in mmap_per_evsel()
503 return -1; in mmap_per_evsel()
505 perf_mmap__get(map); in mmap_per_evsel()
510 flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default; in mmap_per_evsel()
511 if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) { in mmap_per_evsel()
512 perf_mmap__put(map); in mmap_per_evsel()
513 return -1; in mmap_per_evsel()
516 if (evsel->attr.read_format & PERF_FORMAT_ID) { in mmap_per_evsel()
517 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, in mmap_per_evsel()
519 return -1; in mmap_per_evsel()
520 perf_evsel__set_sid_idx(evsel, idx, cpu, thread); in mmap_per_evsel()
531 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_thread()
532 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus); in mmap_per_thread()
533 int cpu, thread, idx = 0; in mmap_per_thread() local
536 pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n", in mmap_per_thread()
539 /* per-thread mmaps */ in mmap_per_thread()
541 int output = -1; in mmap_per_thread()
542 int output_overwrite = -1; in mmap_per_thread()
549 /* system-wide mmaps i.e. per-cpu */ in mmap_per_thread()
550 for (cpu = 1; cpu < nr_cpus; cpu++, idx++) { in mmap_per_thread()
551 int output = -1; in mmap_per_thread()
552 int output_overwrite = -1; in mmap_per_thread()
554 if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output, in mmap_per_thread()
559 if (nr_mmaps != evlist->nr_mmaps) in mmap_per_thread()
560 pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps); in mmap_per_thread()
566 return -1; in mmap_per_thread()
573 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_cpu()
574 int nr_cpus = perf_cpu_map__nr(evlist->all_cpus); in mmap_per_cpu()
576 int cpu, thread; in mmap_per_cpu() local
578 pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads); in mmap_per_cpu()
580 for (cpu = 0; cpu < nr_cpus; cpu++) { in mmap_per_cpu()
581 int output = -1; in mmap_per_cpu()
582 int output_overwrite = -1; in mmap_per_cpu()
585 if (mmap_per_evsel(evlist, ops, cpu, mp, cpu, in mmap_per_cpu()
591 if (nr_mmaps != evlist->nr_mmaps) in mmap_per_cpu()
592 pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps); in mmap_per_cpu()
598 return -1; in mmap_per_cpu()
605 /* One for each CPU */ in perf_evlist__nr_mmaps()
606 nr_mmaps = perf_cpu_map__nr(evlist->all_cpus); in perf_evlist__nr_mmaps()
607 if (perf_cpu_map__empty(evlist->all_cpus)) { in perf_evlist__nr_mmaps()
609 nr_mmaps += perf_thread_map__nr(evlist->threads); in perf_evlist__nr_mmaps()
610 /* Minus the per-thread CPU (-1) */ in perf_evlist__nr_mmaps()
611 nr_mmaps -= 1; in perf_evlist__nr_mmaps()
621 const struct perf_cpu_map *cpus = evlist->all_cpus; in perf_evlist__mmap_ops()
624 if (!ops || !ops->get || !ops->mmap) in perf_evlist__mmap_ops()
625 return -EINVAL; in perf_evlist__mmap_ops()
627 mp->mask = evlist->mmap_len - page_size - 1; in perf_evlist__mmap_ops()
629 evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist); in perf_evlist__mmap_ops()
632 if ((evsel->attr.read_format & PERF_FORMAT_ID) && in perf_evlist__mmap_ops()
633 evsel->sample_id == NULL && in perf_evlist__mmap_ops()
634 perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0) in perf_evlist__mmap_ops()
635 return -ENOMEM; in perf_evlist__mmap_ops()
638 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) in perf_evlist__mmap_ops()
639 return -ENOMEM; in perf_evlist__mmap_ops()
655 evlist->mmap_len = (pages + 1) * page_size; in perf_evlist__mmap()
664 if (evlist->mmap) { in perf_evlist__munmap()
665 for (i = 0; i < evlist->nr_mmaps; i++) in perf_evlist__munmap()
666 perf_mmap__munmap(&evlist->mmap[i]); in perf_evlist__munmap()
669 if (evlist->mmap_ovw) { in perf_evlist__munmap()
670 for (i = 0; i < evlist->nr_mmaps; i++) in perf_evlist__munmap()
671 perf_mmap__munmap(&evlist->mmap_ovw[i]); in perf_evlist__munmap()
674 zfree(&evlist->mmap); in perf_evlist__munmap()
675 zfree(&evlist->mmap_ovw); in perf_evlist__munmap()
679 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map, in perf_evlist__next_mmap() argument
682 if (map) in perf_evlist__next_mmap()
683 return map->next; in perf_evlist__next_mmap()
685 return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first; in perf_evlist__next_mmap()
695 leader->nr_members = last->idx - first->idx + 1; in __perf_evlist__set_leader()
698 evsel->leader = leader; in __perf_evlist__set_leader()
703 if (evlist->nr_entries) { in perf_evlist__set_leader()
704 struct perf_evsel *first = list_entry(evlist->entries.next, in perf_evlist__set_leader()
707 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; in perf_evlist__set_leader()
708 __perf_evlist__set_leader(&evlist->entries, first); in perf_evlist__set_leader()