Lines Matching full:rb
52 struct bpf_ringbuf *rb; member
69 struct bpf_ringbuf *rb; in bpf_ringbuf_area_alloc() local
106 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc()
108 if (rb) { in bpf_ringbuf_area_alloc()
110 rb->pages = pages; in bpf_ringbuf_area_alloc()
111 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc()
112 return rb; in bpf_ringbuf_area_alloc()
124 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() local
126 wake_up_all(&rb->waitq); in bpf_ringbuf_notify()
131 struct bpf_ringbuf *rb; in bpf_ringbuf_alloc() local
133 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); in bpf_ringbuf_alloc()
134 if (!rb) in bpf_ringbuf_alloc()
137 spin_lock_init(&rb->spinlock); in bpf_ringbuf_alloc()
138 init_waitqueue_head(&rb->waitq); in bpf_ringbuf_alloc()
139 init_irq_work(&rb->work, bpf_ringbuf_notify); in bpf_ringbuf_alloc()
141 rb->mask = data_sz - 1; in bpf_ringbuf_alloc()
142 rb->consumer_pos = 0; in bpf_ringbuf_alloc()
143 rb->producer_pos = 0; in bpf_ringbuf_alloc()
145 return rb; in bpf_ringbuf_alloc()
172 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
173 if (!rb_map->rb) { in ringbuf_map_alloc()
181 static void bpf_ringbuf_free(struct bpf_ringbuf *rb) in bpf_ringbuf_free() argument
184 * to unmap rb itself with vunmap() below in bpf_ringbuf_free()
186 struct page **pages = rb->pages; in bpf_ringbuf_free()
187 int i, nr_pages = rb->nr_pages; in bpf_ringbuf_free()
189 vunmap(rb); in bpf_ringbuf_free()
200 bpf_ringbuf_free(rb_map->rb); in ringbuf_map_free()
240 return remap_vmalloc_range(vma, rb_map->rb, in ringbuf_map_mmap()
244 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb) in ringbuf_avail_data_sz() argument
248 cons_pos = smp_load_acquire(&rb->consumer_pos); in ringbuf_avail_data_sz()
249 prod_pos = smp_load_acquire(&rb->producer_pos); in ringbuf_avail_data_sz()
259 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll()
261 if (ringbuf_avail_data_sz(rb_map->rb)) in ringbuf_map_poll()
287 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb, in bpf_ringbuf_rec_pg_off() argument
290 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT; in bpf_ringbuf_rec_pg_off()
305 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) in __bpf_ringbuf_reserve() argument
315 if (len > rb->mask + 1) in __bpf_ringbuf_reserve()
318 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_ringbuf_reserve()
321 if (!spin_trylock_irqsave(&rb->spinlock, flags)) in __bpf_ringbuf_reserve()
324 spin_lock_irqsave(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
327 prod_pos = rb->producer_pos; in __bpf_ringbuf_reserve()
333 if (new_prod_pos - cons_pos > rb->mask) { in __bpf_ringbuf_reserve()
334 spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
338 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve()
339 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr); in __bpf_ringbuf_reserve()
344 smp_store_release(&rb->producer_pos, new_prod_pos); in __bpf_ringbuf_reserve()
346 spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
359 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_3()
374 struct bpf_ringbuf *rb; in bpf_ringbuf_commit() local
378 rb = bpf_ringbuf_restore_from_rec(hdr); in bpf_ringbuf_commit()
389 rec_pos = (void *)hdr - (void *)rb->data; in bpf_ringbuf_commit()
390 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit()
393 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
395 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
434 rec = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
454 struct bpf_ringbuf *rb; in BPF_CALL_2() local
456 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_2()
460 return ringbuf_avail_data_sz(rb); in BPF_CALL_2()
462 return rb->mask + 1; in BPF_CALL_2()
464 return smp_load_acquire(&rb->consumer_pos); in BPF_CALL_2()
466 return smp_load_acquire(&rb->producer_pos); in BPF_CALL_2()