Lines Matching +full:cpu +full:- +full:core
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); in mmap_cpu_mask__scnprintf()
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); in mmap_cpu_mask__scnprintf()
41 return perf_mmap__mmap_len(&map->core); in mmap__mmap_len()
73 return map->aio.nr_cblocks > 0; in perf_mmap__aio_enabled()
79 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, in perf_mmap__aio_alloc()
81 if (map->aio.data[idx] == MAP_FAILED) { in perf_mmap__aio_alloc()
82 map->aio.data[idx] = NULL; in perf_mmap__aio_alloc()
83 return -1; in perf_mmap__aio_alloc()
91 if (map->aio.data[idx]) { in perf_mmap__aio_free()
92 munmap(map->aio.data[idx], mmap__mmap_len(map)); in perf_mmap__aio_free()
93 map->aio.data[idx] = NULL; in perf_mmap__aio_free()
97 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) in perf_mmap__aio_bind() argument
106 data = map->aio.data[idx]; in perf_mmap__aio_bind()
108 node_index = cpu__get_node(cpu); in perf_mmap__aio_bind()
112 return -1; in perf_mmap__aio_bind()
116 pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n", in perf_mmap__aio_bind()
118 err = -1; in perf_mmap__aio_bind()
128 map->aio.data[idx] = malloc(mmap__mmap_len(map)); in perf_mmap__aio_alloc()
129 if (map->aio.data[idx] == NULL) in perf_mmap__aio_alloc()
130 return -1; in perf_mmap__aio_alloc()
137 zfree(&(map->aio.data[idx])); in perf_mmap__aio_free()
141 int cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind()
151 map->aio.nr_cblocks = mp->nr_cblocks; in perf_mmap__aio_mmap()
152 if (map->aio.nr_cblocks) { in perf_mmap__aio_mmap()
153 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); in perf_mmap__aio_mmap()
154 if (!map->aio.aiocb) { in perf_mmap__aio_mmap()
156 return -1; in perf_mmap__aio_mmap()
158 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); in perf_mmap__aio_mmap()
159 if (!map->aio.cblocks) { in perf_mmap__aio_mmap()
161 return -1; in perf_mmap__aio_mmap()
163 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); in perf_mmap__aio_mmap()
164 if (!map->aio.data) { in perf_mmap__aio_mmap()
166 return -1; in perf_mmap__aio_mmap()
169 for (i = 0; i < map->aio.nr_cblocks; ++i) { in perf_mmap__aio_mmap()
171 if (ret == -1) { in perf_mmap__aio_mmap()
173 return -1; in perf_mmap__aio_mmap()
175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap()
176 if (ret == -1) in perf_mmap__aio_mmap()
177 return -1; in perf_mmap__aio_mmap()
179 * Use cblock.aio_fildes value different from -1 in perf_mmap__aio_mmap()
184 map->aio.cblocks[i].aio_fildes = -1; in perf_mmap__aio_mmap()
188 * are kept in separate per-prio queues and adding in perf_mmap__aio_mmap()
189 * a new request will iterate thru shorter per-prio in perf_mmap__aio_mmap()
193 prio = delta_max - i; in perf_mmap__aio_mmap()
194 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; in perf_mmap__aio_mmap()
205 for (i = 0; i < map->aio.nr_cblocks; ++i) in perf_mmap__aio_munmap()
207 if (map->aio.data) in perf_mmap__aio_munmap()
208 zfree(&map->aio.data); in perf_mmap__aio_munmap()
209 zfree(&map->aio.cblocks); in perf_mmap__aio_munmap()
210 zfree(&map->aio.aiocb); in perf_mmap__aio_munmap()
231 bitmap_free(map->affinity_mask.bits); in mmap__munmap()
234 if (map->data != NULL) { in mmap__munmap()
235 munmap(map->data, mmap__mmap_len(map)); in mmap__munmap()
236 map->data = NULL; in mmap__munmap()
238 auxtrace_mmap__munmap(&map->auxtrace_mmap); in mmap__munmap()
243 int c, cpu, nr_cpus; in build_node_mask() local
252 cpu = cpu_map->map[c]; /* map c index to online cpu index */ in build_node_mask()
253 if (cpu__get_node(cpu) == node) in build_node_mask()
254 set_bit(cpu, mask->bits); in build_node_mask()
260 map->affinity_mask.nbits = cpu__max_cpu(); in perf_mmap__setup_affinity_mask()
261 map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits); in perf_mmap__setup_affinity_mask()
262 if (!map->affinity_mask.bits) in perf_mmap__setup_affinity_mask()
263 return -1; in perf_mmap__setup_affinity_mask()
265 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask()
266 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); in perf_mmap__setup_affinity_mask()
267 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask()
268 set_bit(map->core.cpu, map->affinity_mask.bits); in perf_mmap__setup_affinity_mask()
273 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) in mmap__mmap() argument
275 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { in mmap__mmap()
278 return -1; in mmap__mmap()
281 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
285 return -1; in mmap__mmap()
289 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap"); in mmap__mmap()
291 map->core.flush = mp->flush; in mmap__mmap()
293 map->comp_level = mp->comp_level; in mmap__mmap()
295 if (map->comp_level && !perf_mmap__aio_enabled(map)) { in mmap__mmap()
296 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, in mmap__mmap()
298 if (map->data == MAP_FAILED) { in mmap__mmap()
301 map->data = NULL; in mmap__mmap()
302 return -1; in mmap__mmap()
306 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, in mmap__mmap()
307 &mp->auxtrace_mp, map->core.base, fd)) in mmap__mmap()
308 return -1; in mmap__mmap()
316 u64 head = perf_mmap__read_head(&md->core); in perf_mmap__push()
317 unsigned char *data = md->core.base + page_size; in perf_mmap__push()
322 rc = perf_mmap__read_init(&md->core); in perf_mmap__push()
324 return (rc == -EAGAIN) ? 1 : -1; in perf_mmap__push()
326 size = md->core.end - md->core.start; in perf_mmap__push()
328 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { in perf_mmap__push()
329 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push()
330 size = md->core.mask + 1 - (md->core.start & md->core.mask); in perf_mmap__push()
331 md->core.start += size; in perf_mmap__push()
334 rc = -1; in perf_mmap__push()
339 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push()
340 size = md->core.end - md->core.start; in perf_mmap__push()
341 md->core.start += size; in perf_mmap__push()
344 rc = -1; in perf_mmap__push()
348 md->core.prev = head; in perf_mmap__push()
349 perf_mmap__consume(&md->core); in perf_mmap__push()