Lines Matching full:rb

42 static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)  in ringbuf_unmap_ring()  argument
45 munmap(r->consumer_pos, rb->page_size); in ringbuf_unmap_ring()
49 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); in ringbuf_unmap_ring()
55 int ring_buffer__add(struct ring_buffer *rb, int map_fd, in ring_buffer__add() argument
81 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add()
84 rb->rings = tmp; in ring_buffer__add()
86 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); in ring_buffer__add()
89 rb->events = tmp; in ring_buffer__add()
91 r = &rb->rings[rb->ring_cnt]; in ring_buffer__add()
100 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, in ring_buffer__add()
114 tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ, in ring_buffer__add()
115 MAP_SHARED, map_fd, rb->page_size); in ring_buffer__add()
118 ringbuf_unmap_ring(rb, r); in ring_buffer__add()
124 r->data = tmp + rb->page_size; in ring_buffer__add()
126 e = &rb->events[rb->ring_cnt]; in ring_buffer__add()
130 e->data.fd = rb->ring_cnt; in ring_buffer__add()
131 if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) { in ring_buffer__add()
133 ringbuf_unmap_ring(rb, r); in ring_buffer__add()
139 rb->ring_cnt++; in ring_buffer__add()
143 void ring_buffer__free(struct ring_buffer *rb) in ring_buffer__free() argument
147 if (!rb) in ring_buffer__free()
150 for (i = 0; i < rb->ring_cnt; ++i) in ring_buffer__free()
151 ringbuf_unmap_ring(rb, &rb->rings[i]); in ring_buffer__free()
152 if (rb->epoll_fd >= 0) in ring_buffer__free()
153 close(rb->epoll_fd); in ring_buffer__free()
155 free(rb->events); in ring_buffer__free()
156 free(rb->rings); in ring_buffer__free()
157 free(rb); in ring_buffer__free()
164 struct ring_buffer *rb; in ring_buffer__new() local
170 rb = calloc(1, sizeof(*rb)); in ring_buffer__new()
171 if (!rb) in ring_buffer__new()
174 rb->page_size = getpagesize(); in ring_buffer__new()
176 rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); in ring_buffer__new()
177 if (rb->epoll_fd < 0) { in ring_buffer__new()
183 err = ring_buffer__add(rb, map_fd, sample_cb, ctx); in ring_buffer__new()
187 return rb; in ring_buffer__new()
190 ring_buffer__free(rb); in ring_buffer__new()
250 int ring_buffer__consume(struct ring_buffer *rb) in ring_buffer__consume() argument
254 for (i = 0; i < rb->ring_cnt; i++) { in ring_buffer__consume()
255 struct ring *ring = &rb->rings[i]; in ring_buffer__consume()
269 int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms) in ring_buffer__poll() argument
273 cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms); in ring_buffer__poll()
275 __u32 ring_id = rb->events[i].data.fd; in ring_buffer__poll()
276 struct ring *ring = &rb->rings[ring_id]; in ring_buffer__poll()