Lines Matching refs:map

26 size_t perf_mmap__mmap_len(struct mmap *map)  in perf_mmap__mmap_len()  argument
28 return map->core.mask + 1 + page_size; in perf_mmap__mmap_len()
32 static union perf_event *perf_mmap__read(struct mmap *map, in perf_mmap__read() argument
35 unsigned char *data = map->core.base + page_size; in perf_mmap__read()
42 event = (union perf_event *)&data[*startp & map->core.mask]; in perf_mmap__read()
52 if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) { in perf_mmap__read()
55 void *dst = map->core.event_copy; in perf_mmap__read()
58 cpy = min(map->core.mask + 1 - (offset & map->core.mask), len); in perf_mmap__read()
59 memcpy(dst, &data[offset & map->core.mask], cpy); in perf_mmap__read()
65 event = (union perf_event *)map->core.event_copy; in perf_mmap__read()
86 union perf_event *perf_mmap__read_event(struct mmap *map) in perf_mmap__read_event() argument
93 if (!refcount_read(&map->core.refcnt)) in perf_mmap__read_event()
97 if (!map->core.overwrite) in perf_mmap__read_event()
98 map->core.end = perf_mmap__read_head(map); in perf_mmap__read_event()
100 event = perf_mmap__read(map, &map->core.start, map->core.end); in perf_mmap__read_event()
102 if (!map->core.overwrite) in perf_mmap__read_event()
103 map->core.prev = map->core.start; in perf_mmap__read_event()
108 static bool perf_mmap__empty(struct mmap *map) in perf_mmap__empty() argument
110 return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base; in perf_mmap__empty()
113 void perf_mmap__get(struct mmap *map) in perf_mmap__get() argument
115 refcount_inc(&map->core.refcnt); in perf_mmap__get()
118 void perf_mmap__put(struct mmap *map) in perf_mmap__put() argument
120 BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0); in perf_mmap__put()
122 if (refcount_dec_and_test(&map->core.refcnt)) in perf_mmap__put()
123 perf_mmap__munmap(map); in perf_mmap__put()
126 void perf_mmap__consume(struct mmap *map) in perf_mmap__consume() argument
128 if (!map->core.overwrite) { in perf_mmap__consume()
129 u64 old = map->core.prev; in perf_mmap__consume()
131 perf_mmap__write_tail(map, old); in perf_mmap__consume()
134 if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map)) in perf_mmap__consume()
135 perf_mmap__put(map); in perf_mmap__consume()
165 static int perf_mmap__aio_enabled(struct mmap *map) in perf_mmap__aio_enabled() argument
167 return map->aio.nr_cblocks > 0; in perf_mmap__aio_enabled()
171 static int perf_mmap__aio_alloc(struct mmap *map, int idx) in perf_mmap__aio_alloc() argument
173 map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE, in perf_mmap__aio_alloc()
175 if (map->aio.data[idx] == MAP_FAILED) { in perf_mmap__aio_alloc()
176 map->aio.data[idx] = NULL; in perf_mmap__aio_alloc()
183 static void perf_mmap__aio_free(struct mmap *map, int idx) in perf_mmap__aio_free() argument
185 if (map->aio.data[idx]) { in perf_mmap__aio_free()
186 munmap(map->aio.data[idx], perf_mmap__mmap_len(map)); in perf_mmap__aio_free()
187 map->aio.data[idx] = NULL; in perf_mmap__aio_free()
191 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) in perf_mmap__aio_bind() argument
198 data = map->aio.data[idx]; in perf_mmap__aio_bind()
199 mmap_len = perf_mmap__mmap_len(map); in perf_mmap__aio_bind()
211 static int perf_mmap__aio_alloc(struct mmap *map, int idx) in perf_mmap__aio_alloc() argument
213 map->aio.data[idx] = malloc(perf_mmap__mmap_len(map)); in perf_mmap__aio_alloc()
214 if (map->aio.data[idx] == NULL) in perf_mmap__aio_alloc()
220 static void perf_mmap__aio_free(struct mmap *map, int idx) in perf_mmap__aio_free() argument
222 zfree(&(map->aio.data[idx])); in perf_mmap__aio_free()
225 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, in perf_mmap__aio_bind()
232 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp) in perf_mmap__aio_mmap() argument
236 map->aio.nr_cblocks = mp->nr_cblocks; in perf_mmap__aio_mmap()
237 if (map->aio.nr_cblocks) { in perf_mmap__aio_mmap()
238 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); in perf_mmap__aio_mmap()
239 if (!map->aio.aiocb) { in perf_mmap__aio_mmap()
243 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); in perf_mmap__aio_mmap()
244 if (!map->aio.cblocks) { in perf_mmap__aio_mmap()
248 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); in perf_mmap__aio_mmap()
249 if (!map->aio.data) { in perf_mmap__aio_mmap()
254 for (i = 0; i < map->aio.nr_cblocks; ++i) { in perf_mmap__aio_mmap()
255 ret = perf_mmap__aio_alloc(map, i); in perf_mmap__aio_mmap()
260 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap()
269 map->aio.cblocks[i].aio_fildes = -1; in perf_mmap__aio_mmap()
279 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; in perf_mmap__aio_mmap()
286 static void perf_mmap__aio_munmap(struct mmap *map) in perf_mmap__aio_munmap() argument
290 for (i = 0; i < map->aio.nr_cblocks; ++i) in perf_mmap__aio_munmap()
291 perf_mmap__aio_free(map, i); in perf_mmap__aio_munmap()
292 if (map->aio.data) in perf_mmap__aio_munmap()
293 zfree(&map->aio.data); in perf_mmap__aio_munmap()
294 zfree(&map->aio.cblocks); in perf_mmap__aio_munmap()
295 zfree(&map->aio.aiocb); in perf_mmap__aio_munmap()
298 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused) in perf_mmap__aio_enabled()
303 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused, in perf_mmap__aio_mmap()
309 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) in perf_mmap__aio_munmap()
314 void perf_mmap__munmap(struct mmap *map) in perf_mmap__munmap() argument
316 perf_mmap__aio_munmap(map); in perf_mmap__munmap()
317 if (map->data != NULL) { in perf_mmap__munmap()
318 munmap(map->data, perf_mmap__mmap_len(map)); in perf_mmap__munmap()
319 map->data = NULL; in perf_mmap__munmap()
321 if (map->core.base != NULL) { in perf_mmap__munmap()
322 munmap(map->core.base, perf_mmap__mmap_len(map)); in perf_mmap__munmap()
323 map->core.base = NULL; in perf_mmap__munmap()
324 map->core.fd = -1; in perf_mmap__munmap()
325 refcount_set(&map->core.refcnt, 0); in perf_mmap__munmap()
327 auxtrace_mmap__munmap(&map->auxtrace_mmap); in perf_mmap__munmap()
341 cpu = cpu_map->map[c]; /* map c index to online cpu index */ in build_node_mask()
347 static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) in perf_mmap__setup_affinity_mask() argument
349 CPU_ZERO(&map->affinity_mask); in perf_mmap__setup_affinity_mask()
351 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); in perf_mmap__setup_affinity_mask()
353 CPU_SET(map->core.cpu, &map->affinity_mask); in perf_mmap__setup_affinity_mask()
356 int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) in perf_mmap__mmap() argument
371 refcount_set(&map->core.refcnt, 2); in perf_mmap__mmap()
372 map->core.prev = 0; in perf_mmap__mmap()
373 map->core.mask = mp->mask; in perf_mmap__mmap()
374 map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, in perf_mmap__mmap()
376 if (map->core.base == MAP_FAILED) { in perf_mmap__mmap()
379 map->core.base = NULL; in perf_mmap__mmap()
382 map->core.fd = fd; in perf_mmap__mmap()
383 map->core.cpu = cpu; in perf_mmap__mmap()
385 perf_mmap__setup_affinity_mask(map, mp); in perf_mmap__mmap()
387 map->core.flush = mp->flush; in perf_mmap__mmap()
389 map->comp_level = mp->comp_level; in perf_mmap__mmap()
391 if (map->comp_level && !perf_mmap__aio_enabled(map)) { in perf_mmap__mmap()
392 map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE, in perf_mmap__mmap()
394 if (map->data == MAP_FAILED) { in perf_mmap__mmap()
397 map->data = NULL; in perf_mmap__mmap()
402 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, in perf_mmap__mmap()
403 &mp->auxtrace_mp, map->core.base, fd)) in perf_mmap__mmap()
406 return perf_mmap__aio_mmap(map, mp); in perf_mmap__mmap()
478 int perf_mmap__read_init(struct mmap *map) in perf_mmap__read_init() argument
483 if (!refcount_read(&map->core.refcnt)) in perf_mmap__read_init()
486 return __perf_mmap__read_init(map); in perf_mmap__read_init()
490 int push(struct mmap *map, void *to, void *buf, size_t size)) in perf_mmap__push() argument
536 void perf_mmap__read_done(struct mmap *map) in perf_mmap__read_done() argument
541 if (!refcount_read(&map->core.refcnt)) in perf_mmap__read_done()
544 map->core.prev = perf_mmap__read_head(map); in perf_mmap__read_done()