Lines Matching refs:rb
23 atomic_set(&handle->rb->poll, EPOLLIN); in perf_output_wakeup()
39 struct ring_buffer *rb = handle->rb; in perf_output_get_handle() local
42 local_inc(&rb->nest); in perf_output_get_handle()
43 handle->wakeup = local_read(&rb->wakeup); in perf_output_get_handle()
48 struct ring_buffer *rb = handle->rb; in perf_output_put_handle() local
52 head = local_read(&rb->head); in perf_output_put_handle()
58 if (!local_dec_and_test(&rb->nest)) in perf_output_put_handle()
88 rb->user_page->data_head = head; in perf_output_put_handle()
94 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle()
95 local_inc(&rb->nest); in perf_output_put_handle()
99 if (handle->wakeup != local_read(&rb->wakeup)) in perf_output_put_handle()
122 struct ring_buffer *rb; in __perf_output_begin() local
138 rb = rcu_dereference(event->rb); in __perf_output_begin()
139 if (unlikely(!rb)) in __perf_output_begin()
142 if (unlikely(rb->paused)) { in __perf_output_begin()
143 if (rb->nr_pages) in __perf_output_begin()
144 local_inc(&rb->lost); in __perf_output_begin()
148 handle->rb = rb; in __perf_output_begin()
151 have_lost = local_read(&rb->lost); in __perf_output_begin()
161 tail = READ_ONCE(rb->user_page->data_tail); in __perf_output_begin()
162 offset = head = local_read(&rb->head); in __perf_output_begin()
163 if (!rb->overwrite) { in __perf_output_begin()
165 perf_data_size(rb), in __perf_output_begin()
186 } while (local_cmpxchg(&rb->head, offset, head) != offset); in __perf_output_begin()
198 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) in __perf_output_begin()
199 local_add(rb->watermark, &rb->wakeup); in __perf_output_begin()
201 page_shift = PAGE_SHIFT + page_order(rb); in __perf_output_begin()
203 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
205 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin()
215 lost_event.lost = local_xchg(&rb->lost, 0); in __perf_output_begin()
226 local_inc(&rb->lost); in __perf_output_begin()
273 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) in ring_buffer_init() argument
275 long max_size = perf_data_size(rb); in ring_buffer_init()
278 rb->watermark = min(max_size, watermark); in ring_buffer_init()
280 if (!rb->watermark) in ring_buffer_init()
281 rb->watermark = max_size / 2; in ring_buffer_init()
284 rb->overwrite = 0; in ring_buffer_init()
286 rb->overwrite = 1; in ring_buffer_init()
288 atomic_set(&rb->refcount, 1); in ring_buffer_init()
290 INIT_LIST_HEAD(&rb->event_list); in ring_buffer_init()
291 spin_lock_init(&rb->event_lock); in ring_buffer_init()
297 if (!rb->nr_pages) in ring_buffer_init()
298 rb->paused = 1; in ring_buffer_init()
333 struct ring_buffer *rb; in perf_aux_output_begin() local
343 rb = ring_buffer_get(output_event); in perf_aux_output_begin()
344 if (!rb) in perf_aux_output_begin()
347 if (!rb_has_aux(rb)) in perf_aux_output_begin()
358 if (!atomic_read(&rb->aux_mmap_count)) in perf_aux_output_begin()
361 if (!atomic_inc_not_zero(&rb->aux_refcount)) in perf_aux_output_begin()
368 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1))) in perf_aux_output_begin()
371 aux_head = rb->aux_head; in perf_aux_output_begin()
373 handle->rb = rb; in perf_aux_output_begin()
384 if (!rb->aux_overwrite) { in perf_aux_output_begin()
385 aux_tail = READ_ONCE(rb->user_page->aux_tail); in perf_aux_output_begin()
386 handle->wakeup = rb->aux_wakeup + rb->aux_watermark; in perf_aux_output_begin()
387 if (aux_head - aux_tail < perf_aux_size(rb)) in perf_aux_output_begin()
388 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); in perf_aux_output_begin()
398 local_set(&rb->aux_nest, 0); in perf_aux_output_begin()
403 return handle->rb->aux_priv; in perf_aux_output_begin()
407 rb_free_aux(rb); in perf_aux_output_begin()
410 ring_buffer_put(rb); in perf_aux_output_begin()
417 static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb) in rb_need_aux_wakeup() argument
419 if (rb->aux_overwrite) in rb_need_aux_wakeup()
422 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { in rb_need_aux_wakeup()
423 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark); in rb_need_aux_wakeup()
443 struct ring_buffer *rb = handle->rb; in perf_aux_output_end() local
447 if (rb->aux_overwrite) { in perf_aux_output_end()
451 rb->aux_head = aux_head; in perf_aux_output_end()
455 aux_head = rb->aux_head; in perf_aux_output_end()
456 rb->aux_head += size; in perf_aux_output_end()
468 rb->user_page->aux_head = rb->aux_head; in perf_aux_output_end()
469 if (rb_need_aux_wakeup(rb)) in perf_aux_output_end()
480 local_set(&rb->aux_nest, 0); in perf_aux_output_end()
482 rb_free_aux(rb); in perf_aux_output_end()
483 ring_buffer_put(rb); in perf_aux_output_end()
493 struct ring_buffer *rb = handle->rb; in perf_aux_output_skip() local
498 rb->aux_head += size; in perf_aux_output_skip()
500 rb->user_page->aux_head = rb->aux_head; in perf_aux_output_skip()
501 if (rb_need_aux_wakeup(rb)) { in perf_aux_output_skip()
503 handle->wakeup = rb->aux_wakeup + rb->aux_watermark; in perf_aux_output_skip()
506 handle->head = rb->aux_head; in perf_aux_output_skip()
519 return handle->rb->aux_priv; in perf_get_aux()
551 static void rb_free_aux_page(struct ring_buffer *rb, int idx) in rb_free_aux_page() argument
553 struct page *page = virt_to_page(rb->aux_pages[idx]); in rb_free_aux_page()
560 static void __rb_free_aux(struct ring_buffer *rb) in __rb_free_aux() argument
572 if (rb->aux_priv) { in __rb_free_aux()
573 rb->free_aux(rb->aux_priv); in __rb_free_aux()
574 rb->free_aux = NULL; in __rb_free_aux()
575 rb->aux_priv = NULL; in __rb_free_aux()
578 if (rb->aux_nr_pages) { in __rb_free_aux()
579 for (pg = 0; pg < rb->aux_nr_pages; pg++) in __rb_free_aux()
580 rb_free_aux_page(rb, pg); in __rb_free_aux()
582 kfree(rb->aux_pages); in __rb_free_aux()
583 rb->aux_nr_pages = 0; in __rb_free_aux()
587 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, in rb_alloc_aux() argument
617 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, in rb_alloc_aux()
619 if (!rb->aux_pages) in rb_alloc_aux()
622 rb->free_aux = event->pmu->free_aux; in rb_alloc_aux()
623 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux()
627 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux()
632 for (last = rb->aux_nr_pages + (1 << page_private(page)); in rb_alloc_aux()
633 last > rb->aux_nr_pages; rb->aux_nr_pages++) in rb_alloc_aux()
634 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); in rb_alloc_aux()
645 struct page *page = virt_to_page(rb->aux_pages[0]); in rb_alloc_aux()
651 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, in rb_alloc_aux()
653 if (!rb->aux_priv) in rb_alloc_aux()
664 atomic_set(&rb->aux_refcount, 1); in rb_alloc_aux()
666 rb->aux_overwrite = overwrite; in rb_alloc_aux()
667 rb->aux_watermark = watermark; in rb_alloc_aux()
669 if (!rb->aux_watermark && !rb->aux_overwrite) in rb_alloc_aux()
670 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); in rb_alloc_aux()
674 rb->aux_pgoff = pgoff; in rb_alloc_aux()
676 __rb_free_aux(rb); in rb_alloc_aux()
681 void rb_free_aux(struct ring_buffer *rb) in rb_free_aux() argument
683 if (atomic_dec_and_test(&rb->aux_refcount)) in rb_free_aux()
684 __rb_free_aux(rb); in rb_free_aux()
694 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) in __perf_mmap_to_page() argument
696 if (pgoff > rb->nr_pages) in __perf_mmap_to_page()
700 return virt_to_page(rb->user_page); in __perf_mmap_to_page()
702 return virt_to_page(rb->data_pages[pgoff - 1]); in __perf_mmap_to_page()
720 struct ring_buffer *rb; in rb_alloc() local
727 rb = kzalloc(size, GFP_KERNEL); in rb_alloc()
728 if (!rb) in rb_alloc()
731 rb->user_page = perf_mmap_alloc_page(cpu); in rb_alloc()
732 if (!rb->user_page) in rb_alloc()
736 rb->data_pages[i] = perf_mmap_alloc_page(cpu); in rb_alloc()
737 if (!rb->data_pages[i]) in rb_alloc()
741 rb->nr_pages = nr_pages; in rb_alloc()
743 ring_buffer_init(rb, watermark, flags); in rb_alloc()
745 return rb; in rb_alloc()
749 free_page((unsigned long)rb->data_pages[i]); in rb_alloc()
751 free_page((unsigned long)rb->user_page); in rb_alloc()
754 kfree(rb); in rb_alloc()
768 void rb_free(struct ring_buffer *rb) in rb_free() argument
772 perf_mmap_free_page((unsigned long)rb->user_page); in rb_free()
773 for (i = 0; i < rb->nr_pages; i++) in rb_free()
774 perf_mmap_free_page((unsigned long)rb->data_pages[i]); in rb_free()
775 kfree(rb); in rb_free()
779 static int data_page_nr(struct ring_buffer *rb) in data_page_nr() argument
781 return rb->nr_pages << page_order(rb); in data_page_nr()
785 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) in __perf_mmap_to_page() argument
788 if (pgoff > data_page_nr(rb)) in __perf_mmap_to_page()
791 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); in __perf_mmap_to_page()
803 struct ring_buffer *rb; in rb_free_work() local
807 rb = container_of(work, struct ring_buffer, work); in rb_free_work()
808 nr = data_page_nr(rb); in rb_free_work()
810 base = rb->user_page; in rb_free_work()
816 kfree(rb); in rb_free_work()
819 void rb_free(struct ring_buffer *rb) in rb_free() argument
821 schedule_work(&rb->work); in rb_free()
826 struct ring_buffer *rb; in rb_alloc() local
833 rb = kzalloc(size, GFP_KERNEL); in rb_alloc()
834 if (!rb) in rb_alloc()
837 INIT_WORK(&rb->work, rb_free_work); in rb_alloc()
843 rb->user_page = all_buf; in rb_alloc()
844 rb->data_pages[0] = all_buf + PAGE_SIZE; in rb_alloc()
846 rb->nr_pages = 1; in rb_alloc()
847 rb->page_order = ilog2(nr_pages); in rb_alloc()
850 ring_buffer_init(rb, watermark, flags); in rb_alloc()
852 return rb; in rb_alloc()
855 kfree(rb); in rb_alloc()
864 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) in perf_mmap_to_page() argument
866 if (rb->aux_nr_pages) { in perf_mmap_to_page()
868 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages) in perf_mmap_to_page()
872 if (pgoff >= rb->aux_pgoff) { in perf_mmap_to_page()
873 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages); in perf_mmap_to_page()
874 return virt_to_page(rb->aux_pages[aux_pgoff]); in perf_mmap_to_page()
878 return __perf_mmap_to_page(rb, pgoff); in perf_mmap_to_page()