Lines Matching full:rb
22 atomic_set(&handle->rb->poll, EPOLLIN); in perf_output_wakeup()
38 struct perf_buffer *rb = handle->rb; in perf_output_get_handle() local
46 (*(volatile unsigned int *)&rb->nest)++; in perf_output_get_handle()
47 handle->wakeup = local_read(&rb->wakeup); in perf_output_get_handle()
52 struct perf_buffer *rb = handle->rb; in perf_output_put_handle() local
58 * @rb->user_page->data_head. in perf_output_put_handle()
60 nest = READ_ONCE(rb->nest); in perf_output_put_handle()
62 WRITE_ONCE(rb->nest, nest - 1); in perf_output_put_handle()
69 * we must ensure the load of @rb->head happens after we've in perf_output_put_handle()
70 * incremented @rb->nest. in perf_output_put_handle()
72 * Otherwise we can observe a @rb->head value before one published in perf_output_put_handle()
76 head = local_read(&rb->head); in perf_output_put_handle()
79 * IRQ/NMI can happen here and advance @rb->head, causing our in perf_output_put_handle()
110 WRITE_ONCE(rb->user_page->data_head, head); in perf_output_put_handle()
118 WRITE_ONCE(rb->nest, 0); in perf_output_put_handle()
121 * Ensure we decrement @rb->nest before we validate the @rb->head. in perf_output_put_handle()
125 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle()
126 WRITE_ONCE(rb->nest, 1); in perf_output_put_handle()
130 if (handle->wakeup != local_read(&rb->wakeup)) in perf_output_put_handle()
154 struct perf_buffer *rb; in __perf_output_begin() local
170 rb = rcu_dereference(event->rb); in __perf_output_begin()
171 if (unlikely(!rb)) in __perf_output_begin()
174 if (unlikely(rb->paused)) { in __perf_output_begin()
175 if (rb->nr_pages) in __perf_output_begin()
176 local_inc(&rb->lost); in __perf_output_begin()
180 handle->rb = rb; in __perf_output_begin()
183 have_lost = local_read(&rb->lost); in __perf_output_begin()
193 tail = READ_ONCE(rb->user_page->data_tail); in __perf_output_begin()
194 offset = head = local_read(&rb->head); in __perf_output_begin()
195 if (!rb->overwrite) { in __perf_output_begin()
197 perf_data_size(rb), in __perf_output_begin()
218 } while (local_cmpxchg(&rb->head, offset, head) != offset); in __perf_output_begin()
230 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) in __perf_output_begin()
231 local_add(rb->watermark, &rb->wakeup); in __perf_output_begin()
233 page_shift = PAGE_SHIFT + page_order(rb); in __perf_output_begin()
235 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
237 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin()
245 lost_event.lost = local_xchg(&rb->lost, 0); in __perf_output_begin()
256 local_inc(&rb->lost); in __perf_output_begin()
306 ring_buffer_init(struct perf_buffer *rb, long watermark, int flags) in ring_buffer_init() argument
308 long max_size = perf_data_size(rb); in ring_buffer_init()
311 rb->watermark = min(max_size, watermark); in ring_buffer_init()
313 if (!rb->watermark) in ring_buffer_init()
314 rb->watermark = max_size / 2; in ring_buffer_init()
317 rb->overwrite = 0; in ring_buffer_init()
319 rb->overwrite = 1; in ring_buffer_init()
321 refcount_set(&rb->refcount, 1); in ring_buffer_init()
323 INIT_LIST_HEAD(&rb->event_list); in ring_buffer_init()
324 spin_lock_init(&rb->event_lock); in ring_buffer_init()
327 * perf_output_begin() only checks rb->paused, therefore in ring_buffer_init()
328 * rb->paused must be true if we have no pages for output. in ring_buffer_init()
330 if (!rb->nr_pages) in ring_buffer_init()
331 rb->paused = 1; in ring_buffer_init()
366 struct perf_buffer *rb; in perf_aux_output_begin() local
377 rb = ring_buffer_get(output_event); in perf_aux_output_begin()
378 if (!rb) in perf_aux_output_begin()
381 if (!rb_has_aux(rb)) in perf_aux_output_begin()
388 * Checking rb::aux_mmap_count and rb::refcount has to be done in in perf_aux_output_begin()
392 if (!atomic_read(&rb->aux_mmap_count)) in perf_aux_output_begin()
395 if (!refcount_inc_not_zero(&rb->aux_refcount)) in perf_aux_output_begin()
398 nest = READ_ONCE(rb->aux_nest); in perf_aux_output_begin()
406 WRITE_ONCE(rb->aux_nest, nest + 1); in perf_aux_output_begin()
408 aux_head = rb->aux_head; in perf_aux_output_begin()
410 handle->rb = rb; in perf_aux_output_begin()
421 if (!rb->aux_overwrite) { in perf_aux_output_begin()
422 aux_tail = READ_ONCE(rb->user_page->aux_tail); in perf_aux_output_begin()
423 handle->wakeup = rb->aux_wakeup + rb->aux_watermark; in perf_aux_output_begin()
424 if (aux_head - aux_tail < perf_aux_size(rb)) in perf_aux_output_begin()
425 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); in perf_aux_output_begin()
435 WRITE_ONCE(rb->aux_nest, 0); in perf_aux_output_begin()
440 return handle->rb->aux_priv; in perf_aux_output_begin()
444 rb_free_aux(rb); in perf_aux_output_begin()
447 ring_buffer_put(rb); in perf_aux_output_begin()
454 static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb) in rb_need_aux_wakeup() argument
456 if (rb->aux_overwrite) in rb_need_aux_wakeup()
459 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { in rb_need_aux_wakeup()
460 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark); in rb_need_aux_wakeup()
480 struct perf_buffer *rb = handle->rb; in perf_aux_output_end() local
484 if (rb->aux_overwrite) { in perf_aux_output_end()
488 rb->aux_head = aux_head; in perf_aux_output_end()
492 aux_head = rb->aux_head; in perf_aux_output_end()
493 rb->aux_head += size; in perf_aux_output_end()
512 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head); in perf_aux_output_end()
513 if (rb_need_aux_wakeup(rb)) in perf_aux_output_end()
524 WRITE_ONCE(rb->aux_nest, 0); in perf_aux_output_end()
526 rb_free_aux(rb); in perf_aux_output_end()
527 ring_buffer_put(rb); in perf_aux_output_end()
537 struct perf_buffer *rb = handle->rb; in perf_aux_output_skip() local
542 rb->aux_head += size; in perf_aux_output_skip()
544 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head); in perf_aux_output_skip()
545 if (rb_need_aux_wakeup(rb)) { in perf_aux_output_skip()
547 handle->wakeup = rb->aux_wakeup + rb->aux_watermark; in perf_aux_output_skip()
550 handle->head = rb->aux_head; in perf_aux_output_skip()
563 return handle->rb->aux_priv; in perf_get_aux()
574 struct perf_buffer *rb = aux_handle->rb; in perf_output_copy_aux() local
578 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1; in perf_output_copy_aux()
579 to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1; in perf_output_copy_aux()
588 addr = rb->aux_pages[from >> PAGE_SHIFT]; in perf_output_copy_aux()
597 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1; in perf_output_copy_aux()
631 static void rb_free_aux_page(struct perf_buffer *rb, int idx) in rb_free_aux_page() argument
633 struct page *page = virt_to_page(rb->aux_pages[idx]); in rb_free_aux_page()
640 static void __rb_free_aux(struct perf_buffer *rb) in __rb_free_aux() argument
652 if (rb->aux_priv) { in __rb_free_aux()
653 rb->free_aux(rb->aux_priv); in __rb_free_aux()
654 rb->free_aux = NULL; in __rb_free_aux()
655 rb->aux_priv = NULL; in __rb_free_aux()
658 if (rb->aux_nr_pages) { in __rb_free_aux()
659 for (pg = 0; pg < rb->aux_nr_pages; pg++) in __rb_free_aux()
660 rb_free_aux_page(rb, pg); in __rb_free_aux()
662 kfree(rb->aux_pages); in __rb_free_aux()
663 rb->aux_nr_pages = 0; in __rb_free_aux()
667 int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, in rb_alloc_aux() argument
694 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, in rb_alloc_aux()
696 if (!rb->aux_pages) in rb_alloc_aux()
699 rb->free_aux = event->pmu->free_aux; in rb_alloc_aux()
700 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux()
704 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux()
709 for (last = rb->aux_nr_pages + (1 << page_private(page)); in rb_alloc_aux()
710 last > rb->aux_nr_pages; rb->aux_nr_pages++) in rb_alloc_aux()
711 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); in rb_alloc_aux()
722 struct page *page = virt_to_page(rb->aux_pages[0]); in rb_alloc_aux()
728 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages, in rb_alloc_aux()
730 if (!rb->aux_priv) in rb_alloc_aux()
741 refcount_set(&rb->aux_refcount, 1); in rb_alloc_aux()
743 rb->aux_overwrite = overwrite; in rb_alloc_aux()
744 rb->aux_watermark = watermark; in rb_alloc_aux()
746 if (!rb->aux_watermark && !rb->aux_overwrite) in rb_alloc_aux()
747 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); in rb_alloc_aux()
751 rb->aux_pgoff = pgoff; in rb_alloc_aux()
753 __rb_free_aux(rb); in rb_alloc_aux()
758 void rb_free_aux(struct perf_buffer *rb) in rb_free_aux() argument
760 if (refcount_dec_and_test(&rb->aux_refcount)) in rb_free_aux()
761 __rb_free_aux(rb); in rb_free_aux()
771 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) in __perf_mmap_to_page() argument
773 if (pgoff > rb->nr_pages) in __perf_mmap_to_page()
777 return virt_to_page(rb->user_page); in __perf_mmap_to_page()
779 return virt_to_page(rb->data_pages[pgoff - 1]); in __perf_mmap_to_page()
805 struct perf_buffer *rb; in rb_alloc() local
815 rb = kzalloc(size, GFP_KERNEL); in rb_alloc()
816 if (!rb) in rb_alloc()
819 rb->user_page = perf_mmap_alloc_page(cpu); in rb_alloc()
820 if (!rb->user_page) in rb_alloc()
824 rb->data_pages[i] = perf_mmap_alloc_page(cpu); in rb_alloc()
825 if (!rb->data_pages[i]) in rb_alloc()
829 rb->nr_pages = nr_pages; in rb_alloc()
831 ring_buffer_init(rb, watermark, flags); in rb_alloc()
833 return rb; in rb_alloc()
837 perf_mmap_free_page(rb->data_pages[i]); in rb_alloc()
839 perf_mmap_free_page(rb->user_page); in rb_alloc()
842 kfree(rb); in rb_alloc()
848 void rb_free(struct perf_buffer *rb) in rb_free() argument
852 perf_mmap_free_page(rb->user_page); in rb_free()
853 for (i = 0; i < rb->nr_pages; i++) in rb_free()
854 perf_mmap_free_page(rb->data_pages[i]); in rb_free()
855 kfree(rb); in rb_free()
859 static int data_page_nr(struct perf_buffer *rb) in data_page_nr() argument
861 return rb->nr_pages << page_order(rb); in data_page_nr()
865 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) in __perf_mmap_to_page() argument
868 if (pgoff > data_page_nr(rb)) in __perf_mmap_to_page()
871 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); in __perf_mmap_to_page()
883 struct perf_buffer *rb; in rb_free_work() local
887 rb = container_of(work, struct perf_buffer, work); in rb_free_work()
888 nr = data_page_nr(rb); in rb_free_work()
890 base = rb->user_page; in rb_free_work()
896 kfree(rb); in rb_free_work()
899 void rb_free(struct perf_buffer *rb) in rb_free() argument
901 schedule_work(&rb->work); in rb_free()
906 struct perf_buffer *rb; in rb_alloc() local
913 rb = kzalloc(size, GFP_KERNEL); in rb_alloc()
914 if (!rb) in rb_alloc()
917 INIT_WORK(&rb->work, rb_free_work); in rb_alloc()
923 rb->user_page = all_buf; in rb_alloc()
924 rb->data_pages[0] = all_buf + PAGE_SIZE; in rb_alloc()
926 rb->nr_pages = 1; in rb_alloc()
927 rb->page_order = ilog2(nr_pages); in rb_alloc()
930 ring_buffer_init(rb, watermark, flags); in rb_alloc()
932 return rb; in rb_alloc()
935 kfree(rb); in rb_alloc()
944 perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) in perf_mmap_to_page() argument
946 if (rb->aux_nr_pages) { in perf_mmap_to_page()
948 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages) in perf_mmap_to_page()
952 if (pgoff >= rb->aux_pgoff) { in perf_mmap_to_page()
953 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages); in perf_mmap_to_page()
954 return virt_to_page(rb->aux_pages[aux_pgoff]); in perf_mmap_to_page()
958 return __perf_mmap_to_page(rb, pgoff); in perf_mmap_to_page()