Lines Matching refs:cpu_buffer
524 struct ring_buffer_per_cpu *cpu_buffer; member
561 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); in ring_buffer_wait()
578 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
579 work = &cpu_buffer->irq_work; in ring_buffer_wait()
630 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
631 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
632 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
666 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
675 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
676 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
845 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_is_head_page() argument
875 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_set_list_to_head() argument
888 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
892 head = cpu_buffer->head_page; in rb_head_page_activate()
899 rb_set_list_to_head(cpu_buffer, head->list.prev); in rb_head_page_activate()
913 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
918 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
920 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
924 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
947 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
952 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
956 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
961 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
965 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
970 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
974 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_inc_page() argument
983 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
990 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
994 list = cpu_buffer->pages; in rb_set_head_page()
995 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
998 page = head = cpu_buffer->head_page; in rb_set_head_page()
1007 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { in rb_set_head_page()
1008 cpu_buffer->head_page = page; in rb_set_head_page()
1011 rb_inc_page(cpu_buffer, &page); in rb_set_head_page()
1015 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1038 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1068 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1094 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1098 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1103 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) in rb_check_bpage()
1112 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_list() argument
1115 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) in rb_check_list()
1117 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) in rb_check_list()
1129 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1131 struct list_head *head = cpu_buffer->pages; in rb_check_pages()
1135 if (cpu_buffer->head_page) in rb_check_pages()
1136 rb_set_head_page(cpu_buffer); in rb_check_pages()
1138 rb_head_page_deactivate(cpu_buffer); in rb_check_pages()
1140 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) in rb_check_pages()
1142 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) in rb_check_pages()
1145 if (rb_check_list(cpu_buffer, head)) in rb_check_pages()
1149 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1152 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1155 if (rb_check_list(cpu_buffer, &bpage->list)) in rb_check_pages()
1159 rb_head_page_activate(cpu_buffer); in rb_check_pages()
1235 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
1242 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) in rb_allocate_pages()
1250 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1253 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1255 rb_check_pages(cpu_buffer); in rb_allocate_pages()
1263 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
1268 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
1270 if (!cpu_buffer) in rb_allocate_cpu_buffer()
1273 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1274 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1275 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1276 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1277 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1278 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1279 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1280 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1281 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1282 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1289 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1291 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1298 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1299 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1301 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1305 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1306 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1307 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1309 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
1311 return cpu_buffer; in rb_allocate_cpu_buffer()
1314 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1317 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
1321 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
1323 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1326 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1328 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
1339 kfree(cpu_buffer); in rb_free_cpu_buffer()
1457 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1470 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1481 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1482 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1492 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1498 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1524 cpu_buffer->pages = next_page; in rb_remove_pages()
1528 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1535 cpu_buffer->read = 0; in rb_remove_pages()
1538 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1539 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1541 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1552 rb_inc_page(cpu_buffer, &tmp_iter_page); in rb_remove_pages()
1563 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1564 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_remove_pages()
1576 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
1582 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
1584 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1587 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1609 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
1643 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
1644 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1649 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
1658 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
1662 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
1663 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
1665 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
1666 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
1669 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
1674 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
1676 rb_update_pages(cpu_buffer); in update_pages_handler()
1677 complete(&cpu_buffer->update_done); in update_pages_handler()
1693 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
1730 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1732 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1733 cpu_buffer->nr_pages; in ring_buffer_resize()
1737 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
1743 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
1744 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
1745 &cpu_buffer->new_pages, cpu)) { in ring_buffer_resize()
1759 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1760 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
1765 rb_update_pages(cpu_buffer); in ring_buffer_resize()
1766 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1769 &cpu_buffer->update_pages_work); in ring_buffer_resize()
1775 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1776 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
1780 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
1781 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1790 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
1792 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
1795 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1796 cpu_buffer->nr_pages; in ring_buffer_resize()
1798 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
1799 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
1800 __rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
1801 &cpu_buffer->new_pages, cpu_id)) { in ring_buffer_resize()
1810 rb_update_pages(cpu_buffer); in ring_buffer_resize()
1813 &cpu_buffer->update_pages_work); in ring_buffer_resize()
1814 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
1817 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1839 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1840 rb_check_pages(cpu_buffer); in ring_buffer_resize()
1852 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1853 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1855 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
1858 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
1886 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
1888 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
1889 cpu_buffer->reader_page->read); in rb_reader_event()
1910 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
1912 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
1925 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
1933 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
1934 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
1936 rb_inc_page(cpu_buffer, &iter->head_page); in rb_inc_iter()
1950 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
1966 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
1987 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
1988 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_handle_head_page()
2019 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
2038 rb_inc_page(cpu_buffer, &new_head); in rb_handle_head_page()
2040 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
2057 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
2074 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2081 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
2092 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
2095 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
2104 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
2131 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2173 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2179 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
2183 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2184 struct ring_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2190 rb_inc_page(cpu_buffer, &next_page); in rb_move_tail()
2198 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2216 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { in rb_move_tail()
2222 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2228 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2232 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
2250 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2251 cpu_buffer->tail_page) && in rb_move_tail()
2252 (cpu_buffer->commit_page == in rb_move_tail()
2253 cpu_buffer->reader_page))) { in rb_move_tail()
2254 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2260 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
2264 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2267 rb_end_commit(cpu_buffer); in rb_move_tail()
2269 local_inc(&cpu_buffer->committing); in rb_move_tail()
2276 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2303 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2318 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
2326 if (unlikely(!rb_event_is_commit(cpu_buffer, event))) in rb_update_event()
2334 bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer); in rb_update_event()
2390 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
2403 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
2420 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
2429 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
2431 local_inc(&cpu_buffer->committing); in rb_start_commit()
2432 local_inc(&cpu_buffer->commits); in rb_start_commit()
2436 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
2449 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
2451 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
2452 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
2454 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2455 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2457 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2458 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2459 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); in rb_set_commit_to_write()
2461 if (rb_page_write(cpu_buffer->commit_page)) in rb_set_commit_to_write()
2462 cpu_buffer->write_stamp = in rb_set_commit_to_write()
2463 cpu_buffer->commit_page->page->time_stamp; in rb_set_commit_to_write()
2467 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
2468 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
2470 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2471 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2472 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2473 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
2486 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2490 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
2494 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
2495 !local_read(&cpu_buffer->committing))) in rb_end_commit()
2499 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
2502 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
2503 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
2505 local_dec(&cpu_buffer->committing); in rb_end_commit()
2515 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
2516 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
2517 local_inc(&cpu_buffer->committing); in rb_end_commit()
2536 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_event_is_commit() argument
2545 return cpu_buffer->commit_page->page == (void *)addr && in rb_event_is_commit()
2546 rb_commit_index(cpu_buffer) == index; in rb_event_is_commit()
2550 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_write_stamp() argument
2559 if (rb_event_is_commit(cpu_buffer, event)) { in rb_update_write_stamp()
2565 cpu_buffer->write_stamp = in rb_update_write_stamp()
2566 cpu_buffer->commit_page->page->time_stamp; in rb_update_write_stamp()
2569 cpu_buffer->write_stamp += delta; in rb_update_write_stamp()
2572 cpu_buffer->write_stamp = delta; in rb_update_write_stamp()
2574 cpu_buffer->write_stamp += event->time_delta; in rb_update_write_stamp()
2578 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit() argument
2581 local_inc(&cpu_buffer->entries); in rb_commit()
2582 rb_update_write_stamp(cpu_buffer, event); in rb_commit()
2583 rb_end_commit(cpu_buffer); in rb_commit()
2587 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
2597 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
2598 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
2600 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
2603 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_wakeups()
2605 if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) { in rb_wakeups()
2606 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
2607 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
2609 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
2652 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
2654 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
2664 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) in trace_recursive_lock()
2667 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
2668 cpu_buffer->current_context = val; in trace_recursive_lock()
2674 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
2676 cpu_buffer->current_context &= in trace_recursive_unlock()
2677 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
2698 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
2704 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
2706 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
2718 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
2723 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
2725 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
2741 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
2744 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
2746 rb_commit(cpu_buffer, event); in ring_buffer_unlock_commit()
2748 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
2750 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
2759 rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_timestamp() argument
2766 (unsigned long long)cpu_buffer->write_stamp, in rb_handle_timestamp()
2776 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
2792 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
2803 if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer)) in __rb_reserve_next()
2808 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
2813 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
2825 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
2832 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
2840 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
2850 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
2851 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
2852 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
2871 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
2874 info.ts = rb_time_stamp(cpu_buffer->buffer); in rb_reserve_next_event()
2875 diff = info.ts - cpu_buffer->write_stamp; in rb_reserve_next_event()
2882 rb_handle_timestamp(cpu_buffer, &info); in rb_reserve_next_event()
2884 if (likely(info.ts >= cpu_buffer->write_stamp)) { in rb_reserve_next_event()
2887 rb_handle_timestamp(cpu_buffer, &info); in rb_reserve_next_event()
2890 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
2904 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
2926 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
2941 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
2943 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
2949 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
2952 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
2959 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
2973 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
2977 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
2992 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
2999 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
3003 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
3028 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
3035 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3042 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3044 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
3045 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
3052 rb_update_write_stamp(cpu_buffer, event); in ring_buffer_discard_commit()
3054 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
3056 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
3080 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
3096 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3098 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3104 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
3107 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3115 rb_commit(cpu_buffer, event); in ring_buffer_write()
3117 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3122 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
3131 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
3133 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3134 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
3135 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
3260 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
3265 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
3266 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
3280 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
3285 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
3286 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
3297 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
3299 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
3300 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
3311 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
3318 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
3319 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3324 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
3325 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
3327 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
3330 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3343 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
3349 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
3350 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
3363 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
3368 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
3370 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
3382 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
3388 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
3389 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
3405 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
3411 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
3412 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
3427 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
3433 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
3434 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
3448 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
3453 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
3454 return cpu_buffer->read; in ring_buffer_read_events_cpu()
3467 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
3473 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
3474 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
3490 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
3496 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
3497 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
3506 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
3509 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
3510 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
3513 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
3516 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
3530 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
3536 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
3538 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
3540 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
3550 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
3556 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
3559 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
3560 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
3561 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
3567 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
3572 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
3583 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
3588 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
3592 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
3632 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
3641 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
3650 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
3655 reader = cpu_buffer->reader_page; in rb_get_reader_page()
3658 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
3662 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
3663 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
3668 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
3672 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
3678 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
3679 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
3680 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
3681 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
3687 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
3690 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
3691 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
3698 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
3701 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); in rb_get_reader_page()
3713 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
3726 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
3739 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
3740 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); in rb_get_reader_page()
3743 cpu_buffer->reader_page = reader; in rb_get_reader_page()
3744 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
3746 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
3747 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
3748 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
3756 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
3758 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
3764 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
3770 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
3773 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
3776 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
3779 cpu_buffer->read++; in rb_advance_reader()
3781 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
3784 cpu_buffer->reader_page->read += length; in rb_advance_reader()
3789 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
3793 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
3800 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
3814 if (RB_WARN_ON(cpu_buffer, in rb_advance_iter()
3815 (iter->head_page == cpu_buffer->commit_page) && in rb_advance_iter()
3816 (iter->head + length > rb_commit_index(cpu_buffer)))) in rb_advance_iter()
3825 (iter->head_page != cpu_buffer->commit_page)) in rb_advance_iter()
3829 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
3831 return cpu_buffer->lost_events; in rb_lost_events()
3835 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
3851 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
3854 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
3858 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
3863 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
3876 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
3882 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3883 cpu_buffer->cpu, ts); in rb_buffer_peek()
3886 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
3891 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
3892 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3893 cpu_buffer->cpu, ts); in rb_buffer_peek()
3896 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
3911 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
3918 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
3919 buffer = cpu_buffer->buffer; in rb_iter_peek()
3926 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
3927 iter->cache_reader_page != cpu_buffer->reader_page)) in rb_iter_peek()
3942 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) in rb_iter_peek()
3945 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
3972 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
3973 cpu_buffer->cpu, ts); in rb_iter_peek()
3983 cpu_buffer->cpu, ts); in rb_iter_peek()
3995 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
3998 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4011 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4015 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4020 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
4023 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4041 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
4051 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
4052 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
4054 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
4055 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
4075 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
4080 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4082 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4105 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
4117 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4119 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
4121 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
4123 cpu_buffer->lost_events = 0; in ring_buffer_consume()
4124 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
4127 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
4163 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
4173 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4175 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
4178 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_read_prepare()
4212 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
4218 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
4220 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4221 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
4223 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
4224 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4238 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
4247 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4248 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
4249 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4251 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_read_finish()
4252 atomic_dec(&cpu_buffer->buffer->resize_disabled); in ring_buffer_read_finish()
4268 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read() local
4271 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read()
4282 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read()
4308 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
4310 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
4312 cpu_buffer->head_page in rb_reset_cpu()
4313 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
4314 local_set(&cpu_buffer->head_page->write, 0); in rb_reset_cpu()
4315 local_set(&cpu_buffer->head_page->entries, 0); in rb_reset_cpu()
4316 local_set(&cpu_buffer->head_page->page->commit, 0); in rb_reset_cpu()
4318 cpu_buffer->head_page->read = 0; in rb_reset_cpu()
4320 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
4321 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
4323 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
4324 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
4325 local_set(&cpu_buffer->reader_page->write, 0); in rb_reset_cpu()
4326 local_set(&cpu_buffer->reader_page->entries, 0); in rb_reset_cpu()
4327 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_reset_cpu()
4328 cpu_buffer->reader_page->read = 0; in rb_reset_cpu()
4330 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
4331 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
4332 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
4333 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
4334 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
4335 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
4336 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
4337 cpu_buffer->read = 0; in rb_reset_cpu()
4338 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
4340 cpu_buffer->write_stamp = 0; in rb_reset_cpu()
4341 cpu_buffer->read_stamp = 0; in rb_reset_cpu()
4343 cpu_buffer->lost_events = 0; in rb_reset_cpu()
4344 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
4346 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
4356 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
4363 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
4368 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_reset_cpu()
4370 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in ring_buffer_reset_cpu()
4373 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_reset_cpu()
4375 rb_reset_cpu(cpu_buffer); in ring_buffer_reset_cpu()
4377 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_reset_cpu()
4380 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_reset_cpu()
4382 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
4406 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
4414 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
4416 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
4417 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
4418 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
4436 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
4444 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
4446 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
4447 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
4448 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
4548 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
4556 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
4558 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
4560 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
4561 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
4562 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
4565 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
4595 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page() local
4605 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
4607 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
4608 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
4612 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
4656 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
4686 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
4688 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
4692 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4698 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
4708 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
4709 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
4727 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
4742 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
4749 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4762 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
4763 cpu_buffer->read_bytes += BUF_PAGE_SIZE; in ring_buffer_read_page()
4784 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
4810 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()