Lines Matching refs:cpu_buffer

505 	struct ring_buffer_per_cpu	*cpu_buffer;  member
577 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); in ring_buffer_wait()
594 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
595 work = &cpu_buffer->irq_work; in ring_buffer_wait()
648 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
649 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
650 nr_pages = cpu_buffer->nr_pages; in ring_buffer_wait()
652 if (!cpu_buffer->shortest_full || in ring_buffer_wait()
653 cpu_buffer->shortest_full < full) in ring_buffer_wait()
654 cpu_buffer->shortest_full = full; in ring_buffer_wait()
655 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
689 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
698 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
699 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
868 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_is_head_page() argument
898 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_set_list_to_head() argument
911 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
915 head = cpu_buffer->head_page; in rb_head_page_activate()
922 rb_set_list_to_head(cpu_buffer, head->list.prev); in rb_head_page_activate()
936 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
941 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
943 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
947 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
970 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
975 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
979 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
984 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
988 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
993 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
997 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_inc_page() argument
1006 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
1013 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1017 list = cpu_buffer->pages; in rb_set_head_page()
1018 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1021 page = head = cpu_buffer->head_page; in rb_set_head_page()
1030 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { in rb_set_head_page()
1031 cpu_buffer->head_page = page; in rb_set_head_page()
1034 rb_inc_page(cpu_buffer, &page); in rb_set_head_page()
1038 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1061 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1080 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1092 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1118 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1122 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1127 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) in rb_check_bpage()
1136 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_list() argument
1139 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) in rb_check_list()
1141 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) in rb_check_list()
1153 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1155 struct list_head *head = cpu_buffer->pages; in rb_check_pages()
1159 if (cpu_buffer->head_page) in rb_check_pages()
1160 rb_set_head_page(cpu_buffer); in rb_check_pages()
1162 rb_head_page_deactivate(cpu_buffer); in rb_check_pages()
1164 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) in rb_check_pages()
1166 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) in rb_check_pages()
1169 if (rb_check_list(cpu_buffer, head)) in rb_check_pages()
1173 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1176 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1179 if (rb_check_list(cpu_buffer, &bpage->list)) in rb_check_pages()
1183 rb_head_page_activate(cpu_buffer); in rb_check_pages()
1259 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
1266 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) in rb_allocate_pages()
1274 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1277 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1279 rb_check_pages(cpu_buffer); in rb_allocate_pages()
1287 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
1292 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
1294 if (!cpu_buffer) in rb_allocate_cpu_buffer()
1297 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1298 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1299 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1300 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1301 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1302 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1303 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1304 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1305 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1306 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1313 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1315 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1322 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1323 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1325 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1329 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1330 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1331 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1333 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
1335 return cpu_buffer; in rb_allocate_cpu_buffer()
1338 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1341 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
1345 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
1347 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1350 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1352 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
1363 kfree(cpu_buffer); in rb_free_cpu_buffer()
1481 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1494 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1505 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1506 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1516 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1522 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1548 cpu_buffer->pages = next_page; in rb_remove_pages()
1552 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1559 cpu_buffer->read = 0; in rb_remove_pages()
1562 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1563 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1565 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1576 rb_inc_page(cpu_buffer, &tmp_iter_page); in rb_remove_pages()
1587 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1588 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_remove_pages()
1600 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
1606 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
1608 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1611 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1633 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
1667 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
1668 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1673 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
1682 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
1686 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
1687 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
1689 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
1690 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
1693 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
1698 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
1700 rb_update_pages(cpu_buffer); in update_pages_handler()
1701 complete(&cpu_buffer->update_done); in update_pages_handler()
1717 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
1754 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1756 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1757 cpu_buffer->nr_pages; in ring_buffer_resize()
1761 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
1767 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
1768 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
1769 &cpu_buffer->new_pages, cpu)) { in ring_buffer_resize()
1783 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1784 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
1789 rb_update_pages(cpu_buffer); in ring_buffer_resize()
1790 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1793 &cpu_buffer->update_pages_work); in ring_buffer_resize()
1799 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1800 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
1804 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
1805 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1814 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
1816 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
1819 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1820 cpu_buffer->nr_pages; in ring_buffer_resize()
1822 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
1823 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
1824 __rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
1825 &cpu_buffer->new_pages, cpu_id)) { in ring_buffer_resize()
1834 rb_update_pages(cpu_buffer); in ring_buffer_resize()
1837 &cpu_buffer->update_pages_work); in ring_buffer_resize()
1838 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
1841 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1863 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1864 rb_check_pages(cpu_buffer); in ring_buffer_resize()
1876 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
1877 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
1879 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
1882 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
1910 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
1912 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
1913 cpu_buffer->reader_page->read); in rb_reader_event()
1934 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
1936 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
1949 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
1957 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
1958 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
1960 rb_inc_page(cpu_buffer, &iter->head_page); in rb_inc_iter()
1974 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
1990 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
2011 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2012 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_handle_head_page()
2043 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
2062 rb_inc_page(cpu_buffer, &new_head); in rb_handle_head_page()
2064 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
2081 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
2098 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2105 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
2116 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
2119 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
2128 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
2155 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2197 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2203 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
2207 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2208 struct ring_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2214 rb_inc_page(cpu_buffer, &next_page); in rb_move_tail()
2222 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2240 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { in rb_move_tail()
2246 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2252 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2256 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
2274 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2275 cpu_buffer->tail_page) && in rb_move_tail()
2276 (cpu_buffer->commit_page == in rb_move_tail()
2277 cpu_buffer->reader_page))) { in rb_move_tail()
2278 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2284 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
2288 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2291 rb_end_commit(cpu_buffer); in rb_move_tail()
2293 local_inc(&cpu_buffer->committing); in rb_move_tail()
2300 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2327 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2342 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
2350 if (unlikely(!rb_event_is_commit(cpu_buffer, event))) in rb_update_event()
2358 bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer); in rb_update_event()
2414 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
2427 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
2444 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
2453 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
2455 local_inc(&cpu_buffer->committing); in rb_start_commit()
2456 local_inc(&cpu_buffer->commits); in rb_start_commit()
2460 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
2473 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
2475 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
2476 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
2478 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2479 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2481 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2482 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2483 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); in rb_set_commit_to_write()
2485 if (rb_page_write(cpu_buffer->commit_page)) in rb_set_commit_to_write()
2486 cpu_buffer->write_stamp = in rb_set_commit_to_write()
2487 cpu_buffer->commit_page->page->time_stamp; in rb_set_commit_to_write()
2491 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
2492 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
2494 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2495 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2496 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2497 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
2510 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2514 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
2518 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
2519 !local_read(&cpu_buffer->committing))) in rb_end_commit()
2523 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
2526 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
2527 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
2529 local_dec(&cpu_buffer->committing); in rb_end_commit()
2539 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
2540 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
2541 local_inc(&cpu_buffer->committing); in rb_end_commit()
2560 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_event_is_commit() argument
2569 return cpu_buffer->commit_page->page == (void *)addr && in rb_event_is_commit()
2570 rb_commit_index(cpu_buffer) == index; in rb_event_is_commit()
2574 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_write_stamp() argument
2583 if (rb_event_is_commit(cpu_buffer, event)) { in rb_update_write_stamp()
2589 cpu_buffer->write_stamp = in rb_update_write_stamp()
2590 cpu_buffer->commit_page->page->time_stamp; in rb_update_write_stamp()
2593 cpu_buffer->write_stamp += delta; in rb_update_write_stamp()
2596 cpu_buffer->write_stamp = delta; in rb_update_write_stamp()
2598 cpu_buffer->write_stamp += event->time_delta; in rb_update_write_stamp()
2602 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit() argument
2605 local_inc(&cpu_buffer->entries); in rb_commit()
2606 rb_update_write_stamp(cpu_buffer, event); in rb_commit()
2607 rb_end_commit(cpu_buffer); in rb_commit()
2611 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
2623 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
2624 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
2626 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
2629 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
2632 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
2635 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
2638 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
2640 full = cpu_buffer->shortest_full; in rb_wakeups()
2641 nr_pages = cpu_buffer->nr_pages; in rb_wakeups()
2642 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); in rb_wakeups()
2646 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
2647 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
2649 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
2691 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
2693 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
2703 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) in trace_recursive_lock()
2706 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
2707 cpu_buffer->current_context = val; in trace_recursive_lock()
2713 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
2715 cpu_buffer->current_context &= in trace_recursive_unlock()
2716 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
2737 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
2743 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
2745 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
2757 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
2762 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
2764 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
2780 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
2783 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
2785 rb_commit(cpu_buffer, event); in ring_buffer_unlock_commit()
2787 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
2789 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
2798 rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_timestamp() argument
2805 (unsigned long long)cpu_buffer->write_stamp, in rb_handle_timestamp()
2815 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
2831 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
2842 if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer)) in __rb_reserve_next()
2847 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
2852 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
2864 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
2871 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
2879 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
2889 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
2890 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
2891 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
2910 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
2913 info.ts = rb_time_stamp(cpu_buffer->buffer); in rb_reserve_next_event()
2914 diff = info.ts - cpu_buffer->write_stamp; in rb_reserve_next_event()
2921 rb_handle_timestamp(cpu_buffer, &info); in rb_reserve_next_event()
2923 if (likely(info.ts >= cpu_buffer->write_stamp)) { in rb_reserve_next_event()
2926 rb_handle_timestamp(cpu_buffer, &info); in rb_reserve_next_event()
2929 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
2943 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
2965 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
2980 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
2982 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
2988 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
2991 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
2998 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
3012 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
3016 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3031 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
3038 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
3042 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
3067 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
3074 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3081 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3083 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
3084 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
3091 rb_update_write_stamp(cpu_buffer, event); in ring_buffer_discard_commit()
3093 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
3095 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
3119 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
3135 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3137 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3143 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
3146 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3154 rb_commit(cpu_buffer, event); in ring_buffer_write()
3156 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3161 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
3170 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
3172 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3173 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
3174 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
3299 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
3304 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
3305 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
3319 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
3324 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
3325 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
3336 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
3338 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
3339 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
3350 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
3357 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
3358 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3363 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
3364 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
3366 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
3369 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3382 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
3388 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
3389 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
3402 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
3407 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
3409 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
3421 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
3427 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
3428 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
3444 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
3450 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
3451 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
3466 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
3472 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
3473 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
3487 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
3492 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
3493 return cpu_buffer->read; in ring_buffer_read_events_cpu()
3506 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
3512 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
3513 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
3529 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
3535 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
3536 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
3545 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
3548 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
3549 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
3552 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
3555 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
3569 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
3575 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
3577 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
3579 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
3589 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
3595 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
3598 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
3599 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
3600 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
3606 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
3611 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
3622 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
3627 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
3631 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
3671 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
3680 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
3689 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
3694 reader = cpu_buffer->reader_page; in rb_get_reader_page()
3697 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
3701 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
3702 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
3707 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
3711 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
3717 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
3718 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
3719 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
3720 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
3726 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
3729 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
3730 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
3737 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
3740 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); in rb_get_reader_page()
3752 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
3765 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
3778 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
3779 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); in rb_get_reader_page()
3781 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
3784 cpu_buffer->reader_page = reader; in rb_get_reader_page()
3785 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
3787 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
3788 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
3789 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
3797 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
3799 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
3805 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
3811 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
3814 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
3817 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
3820 cpu_buffer->read++; in rb_advance_reader()
3822 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
3825 cpu_buffer->reader_page->read += length; in rb_advance_reader()
3830 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
3834 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
3841 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
3855 if (RB_WARN_ON(cpu_buffer, in rb_advance_iter()
3856 (iter->head_page == cpu_buffer->commit_page) && in rb_advance_iter()
3857 (iter->head + length > rb_commit_index(cpu_buffer)))) in rb_advance_iter()
3866 (iter->head_page != cpu_buffer->commit_page)) in rb_advance_iter()
3870 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
3872 return cpu_buffer->lost_events; in rb_lost_events()
3876 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
3892 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
3895 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
3899 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
3904 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
3917 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
3923 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3924 cpu_buffer->cpu, ts); in rb_buffer_peek()
3927 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
3932 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
3933 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
3934 cpu_buffer->cpu, ts); in rb_buffer_peek()
3937 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
3952 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
3959 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
3960 buffer = cpu_buffer->buffer; in rb_iter_peek()
3967 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
3968 iter->cache_reader_page != cpu_buffer->reader_page)) in rb_iter_peek()
3983 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) in rb_iter_peek()
3986 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
4013 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4014 cpu_buffer->cpu, ts); in rb_iter_peek()
4024 cpu_buffer->cpu, ts); in rb_iter_peek()
4036 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
4039 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4052 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4056 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4061 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
4064 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4082 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
4092 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
4093 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
4095 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
4096 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
4116 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
4121 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4123 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4146 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
4158 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4160 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
4162 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
4164 cpu_buffer->lost_events = 0; in ring_buffer_consume()
4165 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
4168 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
4205 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
4215 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4217 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
4220 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_read_prepare()
4254 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
4260 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
4262 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4263 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
4265 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
4266 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4280 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
4289 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4290 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
4291 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4293 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_read_finish()
4294 atomic_dec(&cpu_buffer->buffer->resize_disabled); in ring_buffer_read_finish()
4310 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read() local
4313 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read()
4324 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read()
4350 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
4352 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
4354 cpu_buffer->head_page in rb_reset_cpu()
4355 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
4356 local_set(&cpu_buffer->head_page->write, 0); in rb_reset_cpu()
4357 local_set(&cpu_buffer->head_page->entries, 0); in rb_reset_cpu()
4358 local_set(&cpu_buffer->head_page->page->commit, 0); in rb_reset_cpu()
4360 cpu_buffer->head_page->read = 0; in rb_reset_cpu()
4362 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
4363 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
4365 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
4366 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
4367 local_set(&cpu_buffer->reader_page->write, 0); in rb_reset_cpu()
4368 local_set(&cpu_buffer->reader_page->entries, 0); in rb_reset_cpu()
4369 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_reset_cpu()
4370 cpu_buffer->reader_page->read = 0; in rb_reset_cpu()
4372 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
4373 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
4374 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
4375 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
4376 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
4377 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
4378 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
4379 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
4380 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
4381 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
4382 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
4383 cpu_buffer->read = 0; in rb_reset_cpu()
4384 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
4386 cpu_buffer->write_stamp = 0; in rb_reset_cpu()
4387 cpu_buffer->read_stamp = 0; in rb_reset_cpu()
4389 cpu_buffer->lost_events = 0; in rb_reset_cpu()
4390 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
4392 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
4402 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
4409 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
4414 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_reset_cpu()
4416 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in ring_buffer_reset_cpu()
4419 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_reset_cpu()
4421 rb_reset_cpu(cpu_buffer); in ring_buffer_reset_cpu()
4423 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_reset_cpu()
4426 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_reset_cpu()
4428 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
4452 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
4460 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
4462 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
4463 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
4464 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
4482 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
4490 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
4492 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
4493 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
4494 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
4594 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
4602 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
4604 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
4606 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
4607 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
4608 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
4611 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
4641 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page() local
4651 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
4653 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
4654 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
4658 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
4702 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
4732 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
4734 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
4738 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4744 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
4754 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
4755 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
4773 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
4788 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
4795 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
4808 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
4809 cpu_buffer->read_bytes += BUF_PAGE_SIZE; in ring_buffer_read_page()
4830 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
4856 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()