Lines Matching refs:cpu_buffer
563 struct ring_buffer_per_cpu *cpu_buffer; member
769 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
772 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
773 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
796 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
843 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp() local
850 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
853 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
854 verify_event(cpu_buffer, event); in ring_buffer_event_time_stamp()
860 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
867 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) in ring_buffer_event_time_stamp()
869 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
919 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit() local
923 nr_pages = cpu_buffer->nr_pages; in full_hit()
960 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters() local
979 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
981 if (!cpu_buffer) in ring_buffer_wake_waiters()
983 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
1005 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait() local
1023 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
1024 work = &cpu_buffer->irq_work; in ring_buffer_wait()
1077 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1078 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
1081 if (!cpu_buffer->shortest_full || in ring_buffer_wait()
1082 cpu_buffer->shortest_full > full) in ring_buffer_wait()
1083 cpu_buffer->shortest_full = full; in ring_buffer_wait()
1084 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1123 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
1133 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1134 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1140 if (!cpu_buffer->shortest_full || in ring_buffer_poll_wait()
1141 cpu_buffer->shortest_full > full) in ring_buffer_poll_wait()
1142 cpu_buffer->shortest_full = full; in ring_buffer_poll_wait()
1364 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
1368 head = cpu_buffer->head_page; in rb_head_page_activate()
1389 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
1394 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1396 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1400 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
1423 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
1428 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
1432 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
1437 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
1441 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
1446 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
1458 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
1465 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1469 list = cpu_buffer->pages; in rb_set_head_page()
1470 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1473 page = head = cpu_buffer->head_page; in rb_set_head_page()
1483 cpu_buffer->head_page = page; in rb_set_head_page()
1490 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1510 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1529 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1541 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1567 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1571 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1576 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); in rb_check_bpage()
1586 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1588 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1591 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1595 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1600 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1604 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1610 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages() argument
1651 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1655 rb_check_bpage(cpu_buffer, bpage); in __rb_allocate_pages()
1659 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1684 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
1691 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) in rb_allocate_pages()
1699 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1702 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1704 rb_check_pages(cpu_buffer); in rb_allocate_pages()
1712 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
1717 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
1719 if (!cpu_buffer) in rb_allocate_cpu_buffer()
1722 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1723 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1724 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1725 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1726 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1727 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1728 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1729 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1730 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1731 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1738 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1740 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1747 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1748 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1750 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1754 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1755 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1756 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1758 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
1760 return cpu_buffer; in rb_allocate_cpu_buffer()
1763 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1766 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
1770 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
1772 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1775 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
1777 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1780 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
1790 kfree(cpu_buffer); in rb_free_cpu_buffer()
1911 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1924 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1935 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1936 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1946 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1952 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1965 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
1980 cpu_buffer->pages = next_page; in rb_remove_pages()
1984 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1988 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1989 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1991 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2013 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2014 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
2015 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2027 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
2033 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
2035 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2041 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2062 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); in rb_insert_pages()
2098 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
2099 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2104 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2113 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
2117 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2118 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
2120 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
2121 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2124 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2129 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
2131 rb_update_pages(cpu_buffer); in update_pages_handler()
2132 complete(&cpu_buffer->update_done); in update_pages_handler()
2148 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
2180 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2181 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2189 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2191 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2192 cpu_buffer->nr_pages; in ring_buffer_resize()
2196 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2202 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2203 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2204 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2220 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2221 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2226 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2227 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2234 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2236 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
2244 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2245 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2249 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2250 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2255 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2257 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2265 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2270 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2271 cpu_buffer->nr_pages; in ring_buffer_resize()
2273 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2274 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2275 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2276 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2285 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2290 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2295 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2296 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2300 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2322 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2323 rb_check_pages(cpu_buffer); in ring_buffer_resize()
2336 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2337 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2339 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2342 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2372 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
2374 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2375 cpu_buffer->reader_page->read); in rb_reader_event()
2444 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
2446 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2459 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
2467 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2468 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2485 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
2501 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
2522 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2523 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
2524 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
2555 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
2576 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
2593 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
2610 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2617 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
2628 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
2631 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
2640 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
2706 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2716 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2722 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
2726 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2727 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2741 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2765 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2771 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2775 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
2793 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2794 cpu_buffer->tail_page) && in rb_move_tail()
2795 (cpu_buffer->commit_page == in rb_move_tail()
2796 cpu_buffer->reader_page))) { in rb_move_tail()
2797 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2803 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
2807 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2810 rb_end_commit(cpu_buffer); in rb_move_tail()
2812 local_inc(&cpu_buffer->committing); in rb_move_tail()
2819 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2854 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp() argument
2864 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), in rb_check_timestamp()
2872 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp() argument
2904 rb_check_timestamp(cpu_buffer, info); in rb_add_timestamp()
2925 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
2931 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
2934 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
2941 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); in rb_update_event()
3004 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
3018 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3022 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) in rb_try_to_discard()
3034 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, in rb_try_to_discard()
3047 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3069 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3078 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
3080 local_inc(&cpu_buffer->committing); in rb_start_commit()
3081 local_inc(&cpu_buffer->commits); in rb_start_commit()
3085 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
3098 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3100 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3101 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3103 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3104 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3110 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3111 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3112 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3116 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
3117 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3121 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3122 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3123 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3124 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3137 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3141 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
3145 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
3146 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3150 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3153 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3154 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
3156 local_dec(&cpu_buffer->committing); in rb_end_commit()
3166 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3167 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3168 local_inc(&cpu_buffer->committing); in rb_end_commit()
3186 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit() argument
3188 local_inc(&cpu_buffer->entries); in rb_commit()
3189 rb_end_commit(cpu_buffer); in rb_commit()
3193 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3201 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3202 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3204 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3207 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3210 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3213 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3216 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3218 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3221 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3222 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3224 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3297 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
3299 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3304 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3311 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3317 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3318 cpu_buffer->current_context = val; in trace_recursive_lock()
3324 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
3326 cpu_buffer->current_context &= in trace_recursive_unlock()
3327 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3348 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
3354 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3356 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3368 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
3373 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3375 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3389 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
3392 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3394 rb_commit(cpu_buffer); in ring_buffer_unlock_commit()
3396 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3398 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
3462 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
3523 RB_WARN_ON(cpu_buffer, 1); in check_buffer()
3533 atomic_inc(&cpu_buffer->record_disabled); in check_buffer()
3537 cpu_buffer->cpu, in check_buffer()
3550 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
3558 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
3568 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3572 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3573 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3575 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3597 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3609 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3610 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3612 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, in __rb_reserve_next()
3615 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); in __rb_reserve_next()
3616 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
3624 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3626 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); in __rb_reserve_next()
3627 RB_WARN_ON(cpu_buffer, !s_ok); in __rb_reserve_next()
3636 check_buffer(cpu_buffer, info, tail); in __rb_reserve_next()
3640 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3641 RB_WARN_ON(cpu_buffer, !a_ok); in __rb_reserve_next()
3649 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3656 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3658 RB_WARN_ON(cpu_buffer, !a_ok); in __rb_reserve_next()
3659 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3663 rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3693 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
3705 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3712 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
3720 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
3731 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3732 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3733 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3740 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3760 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
3763 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
3774 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
3796 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
3811 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3813 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3819 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
3822 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3829 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
3843 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
3847 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3873 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
3898 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
3905 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3912 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3914 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
3915 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
3919 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
3921 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
3945 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
3961 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3963 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3969 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
3972 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3980 rb_commit(cpu_buffer); in ring_buffer_write()
3982 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3987 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
3996 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
3998 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3999 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
4000 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
4145 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
4150 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4151 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4165 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
4170 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4171 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4182 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
4184 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4185 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4196 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
4203 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4204 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4209 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4210 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4212 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
4215 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4228 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
4234 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4235 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4248 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
4253 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4255 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
4267 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
4273 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4274 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4290 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
4296 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4297 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4312 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
4318 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4319 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4333 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
4338 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4339 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4352 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
4358 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4359 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
4375 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
4381 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4382 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4391 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
4394 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4395 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4399 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4400 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
4403 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4404 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4420 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
4426 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4428 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4430 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4440 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
4449 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4450 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4451 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4452 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
4466 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4478 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4483 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
4494 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4499 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
4500 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4504 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4508 RB_WARN_ON(cpu_buffer, 1); in rb_update_read_stamp()
4538 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4543 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
4552 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4561 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
4566 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4569 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4573 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
4574 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
4579 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4583 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
4589 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
4590 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
4591 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
4592 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
4598 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
4601 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
4602 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
4609 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
4612 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
4624 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
4637 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
4650 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
4651 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
4653 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
4656 cpu_buffer->reader_page = reader; in rb_get_reader_page()
4657 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
4659 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
4660 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
4661 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
4669 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4671 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4691 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) in rb_get_reader_page()
4709 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
4715 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
4718 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
4721 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
4724 cpu_buffer->read++; in rb_advance_reader()
4726 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
4729 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4730 cpu_buffer->read_bytes += length; in rb_advance_reader()
4735 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
4737 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4753 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4762 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
4764 return cpu_buffer->lost_events; in rb_lost_events()
4768 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
4784 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
4787 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
4791 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
4796 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
4809 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
4816 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4817 cpu_buffer->cpu, ts); in rb_buffer_peek()
4820 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
4825 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4826 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4827 cpu_buffer->cpu, ts); in rb_buffer_peek()
4830 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
4834 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
4845 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
4852 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4853 buffer = cpu_buffer->buffer; in rb_iter_peek()
4860 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4861 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
4862 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
4879 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
4909 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4910 cpu_buffer->cpu, ts); in rb_iter_peek()
4920 cpu_buffer->cpu, ts); in rb_iter_peek()
4925 RB_WARN_ON(cpu_buffer, 1); in rb_iter_peek()
4932 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
4935 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4948 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4952 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4957 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
4960 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4977 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
4987 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
4988 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
4990 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
4991 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
5025 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
5030 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5032 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5055 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
5067 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5069 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
5071 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
5073 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5074 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
5077 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
5114 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
5130 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5132 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5134 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5168 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
5174 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5176 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5177 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5179 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5180 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5194 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
5203 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5204 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
5205 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5207 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5222 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance() local
5225 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5229 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5262 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
5266 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
5268 cpu_buffer->head_page in rb_reset_cpu()
5269 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5270 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
5271 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
5275 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5276 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5278 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5279 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5280 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
5282 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5283 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5284 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5285 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5286 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5287 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5288 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5289 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5290 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5291 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5292 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5293 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5294 cpu_buffer->read = 0; in rb_reset_cpu()
5295 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5297 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5298 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5300 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5302 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5303 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5305 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
5306 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
5310 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer() argument
5314 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5316 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5319 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5321 rb_reset_cpu(cpu_buffer); in reset_disabled_cpu_buffer()
5323 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5326 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5336 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
5344 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5345 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5350 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_cpu()
5352 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5353 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5368 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus() local
5375 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5377 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5378 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5385 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5391 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
5394 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_online_cpus()
5396 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5397 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5409 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset() local
5416 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5418 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5419 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5426 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5428 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset()
5430 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5431 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5444 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
5452 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5454 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
5455 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
5456 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
5474 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
5482 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5484 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
5485 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
5486 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
5596 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
5604 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5606 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5608 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5609 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5610 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5613 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5643 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page() local
5651 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5658 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5660 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5661 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5665 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5709 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
5739 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5741 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
5745 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
5751 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5761 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
5762 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5775 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
5788 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5803 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
5810 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
5823 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5824 cpu_buffer->read_bytes += rb_page_commit(reader); in ring_buffer_read_page()
5845 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
5871 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()