Lines Matching +full:non +full:- +full:pc

1 // SPDX-License-Identifier: GPL-2.0
21 map->fd = -1; in perf_mmap__init()
22 map->overwrite = overwrite; in perf_mmap__init()
23 map->unmap_cb = unmap_cb; in perf_mmap__init()
24 refcount_set(&map->refcnt, 0); in perf_mmap__init()
26 prev->next = map; in perf_mmap__init()
31 return map->mask + 1 + page_size; in perf_mmap__mmap_len()
37 map->prev = 0; in perf_mmap__mmap()
38 map->mask = mp->mask; in perf_mmap__mmap()
39 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, in perf_mmap__mmap()
41 if (map->base == MAP_FAILED) { in perf_mmap__mmap()
42 map->base = NULL; in perf_mmap__mmap()
43 return -1; in perf_mmap__mmap()
46 map->fd = fd; in perf_mmap__mmap()
47 map->cpu = cpu; in perf_mmap__mmap()
53 if (map && map->base != NULL) { in perf_mmap__munmap()
54 munmap(map->base, perf_mmap__mmap_len(map)); in perf_mmap__munmap()
55 map->base = NULL; in perf_mmap__munmap()
56 map->fd = -1; in perf_mmap__munmap()
57 refcount_set(&map->refcnt, 0); in perf_mmap__munmap()
59 if (map && map->unmap_cb) in perf_mmap__munmap()
60 map->unmap_cb(map); in perf_mmap__munmap()
65 refcount_inc(&map->refcnt); in perf_mmap__get()
70 BUG_ON(map->base && refcount_read(&map->refcnt) == 0); in perf_mmap__put()
72 if (refcount_dec_and_test(&map->refcnt)) in perf_mmap__put()
78 ring_buffer_write_tail(md->base, tail); in perf_mmap__write_tail()
83 return ring_buffer_read_head(map->base); in perf_mmap__read_head()
88 struct perf_event_mmap_page *pc = map->base; in perf_mmap__empty() local
90 return perf_mmap__read_head(map) == map->prev && !pc->aux_size; in perf_mmap__empty()
95 if (!map->overwrite) { in perf_mmap__consume()
96 u64 old = map->prev; in perf_mmap__consume()
101 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map)) in perf_mmap__consume()
114 if (evt_head - *start >= (unsigned int)size) { in overwrite_rb_find_range()
116 if (evt_head - *start > (unsigned int)size) in overwrite_rb_find_range()
117 evt_head -= pheader->size; in overwrite_rb_find_range()
124 if (pheader->size == 0) { in overwrite_rb_find_range()
130 evt_head += pheader->size; in overwrite_rb_find_range()
134 return -1; in overwrite_rb_find_range()
143 u64 old = md->prev; in __perf_mmap__read_init()
144 unsigned char *data = md->base + page_size; in __perf_mmap__read_init()
147 md->start = md->overwrite ? head : old; in __perf_mmap__read_init()
148 md->end = md->overwrite ? old : head; in __perf_mmap__read_init()
150 if ((md->end - md->start) < md->flush) in __perf_mmap__read_init()
151 return -EAGAIN; in __perf_mmap__read_init()
153 size = md->end - md->start; in __perf_mmap__read_init()
154 if (size > (unsigned long)(md->mask) + 1) { in __perf_mmap__read_init()
155 if (!md->overwrite) { in __perf_mmap__read_init()
158 md->prev = head; in __perf_mmap__read_init()
160 return -EAGAIN; in __perf_mmap__read_init()
167 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) in __perf_mmap__read_init()
168 return -EINVAL; in __perf_mmap__read_init()
179 if (!refcount_read(&map->refcnt)) in perf_mmap__read_init()
180 return -ENOENT; in perf_mmap__read_init()
188 * The last perf_mmap__read() will set tail to map->core.prev.
189 * Need to correct the map->core.prev to head which is the end of next read.
196 if (!refcount_read(&map->refcnt)) in perf_mmap__read_done()
199 map->prev = perf_mmap__read_head(map); in perf_mmap__read_done()
206 unsigned char *data = map->base + page_size; in perf_mmap__read()
208 int diff = end - *startp; in perf_mmap__read()
210 if (diff >= (int)sizeof(event->header)) { in perf_mmap__read()
213 event = (union perf_event *)&data[*startp & map->mask]; in perf_mmap__read()
214 size = event->header.size; in perf_mmap__read()
216 if (size < sizeof(event->header) || diff < (int)size) in perf_mmap__read()
220 * Event straddles the mmap boundary -- header should always in perf_mmap__read()
223 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { in perf_mmap__read()
226 void *dst = map->event_copy; in perf_mmap__read()
229 cpy = min(map->mask + 1 - (offset & map->mask), len); in perf_mmap__read()
230 memcpy(dst, &data[offset & map->mask], cpy); in perf_mmap__read()
233 len -= cpy; in perf_mmap__read()
236 event = (union perf_event *)map->event_copy; in perf_mmap__read()
264 if (!refcount_read(&map->refcnt)) in perf_mmap__read_event()
267 /* non-overwirte doesn't pause the ringbuffer */ in perf_mmap__read_event()
268 if (!map->overwrite) in perf_mmap__read_event()
269 map->end = perf_mmap__read_head(map); in perf_mmap__read_event()
271 event = perf_mmap__read(map, &map->start, map->end); in perf_mmap__read_event()
273 if (!map->overwrite) in perf_mmap__read_event()
274 map->prev = map->start; in perf_mmap__read_event()
304 struct perf_event_mmap_page *pc = map->base; in perf_mmap__read_self() local
308 if (!pc || !pc->cap_user_rdpmc) in perf_mmap__read_self()
309 return -1; in perf_mmap__read_self()
312 seq = READ_ONCE(pc->lock); in perf_mmap__read_self()
315 count->ena = READ_ONCE(pc->time_enabled); in perf_mmap__read_self()
316 count->run = READ_ONCE(pc->time_running); in perf_mmap__read_self()
318 if (pc->cap_user_time && count->ena != count->run) { in perf_mmap__read_self()
320 time_mult = READ_ONCE(pc->time_mult); in perf_mmap__read_self()
321 time_shift = READ_ONCE(pc->time_shift); in perf_mmap__read_self()
322 time_offset = READ_ONCE(pc->time_offset); in perf_mmap__read_self()
324 if (pc->cap_user_time_short) { in perf_mmap__read_self()
325 time_cycles = READ_ONCE(pc->time_cycles); in perf_mmap__read_self()
326 time_mask = READ_ONCE(pc->time_mask); in perf_mmap__read_self()
330 idx = READ_ONCE(pc->index); in perf_mmap__read_self()
331 cnt = READ_ONCE(pc->offset); in perf_mmap__read_self()
332 if (pc->cap_user_rdpmc && idx) { in perf_mmap__read_self()
333 s64 evcnt = read_perf_counter(idx - 1); in perf_mmap__read_self()
334 u16 width = READ_ONCE(pc->pmc_width); in perf_mmap__read_self()
336 evcnt <<= 64 - width; in perf_mmap__read_self()
337 evcnt >>= 64 - width; in perf_mmap__read_self()
340 return -1; in perf_mmap__read_self()
343 } while (READ_ONCE(pc->lock) != seq); in perf_mmap__read_self()
345 if (count->ena != count->run) { in perf_mmap__read_self()
349 cyc = time_cycles + ((cyc - time_cycles) & time_mask); in perf_mmap__read_self()
353 count->ena += delta; in perf_mmap__read_self()
355 count->run += delta; in perf_mmap__read_self()
357 cnt = mul_u64_u64_div64(cnt, count->ena, count->run); in perf_mmap__read_self()
360 count->val = cnt; in perf_mmap__read_self()