Lines Matching refs:cpu_buffer
550 struct ring_buffer_per_cpu *cpu_buffer; member
753 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
756 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
757 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
780 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
809 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp() local
817 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
818 verify_event(cpu_buffer, event); in ring_buffer_event_time_stamp()
824 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
831 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) in ring_buffer_event_time_stamp()
833 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
902 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait() local
919 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
920 work = &cpu_buffer->irq_work; in ring_buffer_wait()
973 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
974 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
975 nr_pages = cpu_buffer->nr_pages; in ring_buffer_wait()
977 if (!cpu_buffer->shortest_full || in ring_buffer_wait()
978 cpu_buffer->shortest_full < full) in ring_buffer_wait()
979 cpu_buffer->shortest_full = full; in ring_buffer_wait()
980 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1014 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
1023 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1024 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1242 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
1246 head = cpu_buffer->head_page; in rb_head_page_activate()
1267 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
1272 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1274 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1278 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
1301 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
1306 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
1310 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
1315 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
1319 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
1324 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
1336 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
1343 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1347 list = cpu_buffer->pages; in rb_set_head_page()
1348 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1351 page = head = cpu_buffer->head_page; in rb_set_head_page()
1361 cpu_buffer->head_page = page; in rb_set_head_page()
1368 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1391 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1410 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1422 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1448 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1452 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1457 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) in rb_check_bpage()
1466 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_list() argument
1469 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) in rb_check_list()
1471 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) in rb_check_list()
1483 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1485 struct list_head *head = cpu_buffer->pages; in rb_check_pages()
1489 if (cpu_buffer->head_page) in rb_check_pages()
1490 rb_set_head_page(cpu_buffer); in rb_check_pages()
1492 rb_head_page_deactivate(cpu_buffer); in rb_check_pages()
1494 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) in rb_check_pages()
1496 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) in rb_check_pages()
1499 if (rb_check_list(cpu_buffer, head)) in rb_check_pages()
1503 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1506 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1509 if (rb_check_list(cpu_buffer, &bpage->list)) in rb_check_pages()
1513 rb_head_page_activate(cpu_buffer); in rb_check_pages()
1518 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages() argument
1559 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1563 rb_check_bpage(cpu_buffer, bpage); in __rb_allocate_pages()
1567 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1592 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
1599 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) in rb_allocate_pages()
1607 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1610 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1612 rb_check_pages(cpu_buffer); in rb_allocate_pages()
1620 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
1625 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
1627 if (!cpu_buffer) in rb_allocate_cpu_buffer()
1630 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1631 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1632 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1633 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1634 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1635 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1636 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1637 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1638 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1639 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1646 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1648 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1655 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1656 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1658 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1662 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1663 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1664 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1666 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
1668 return cpu_buffer; in rb_allocate_cpu_buffer()
1671 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1674 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
1678 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
1680 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1683 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1685 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
1696 kfree(cpu_buffer); in rb_free_cpu_buffer()
1815 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1828 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1839 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1840 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1850 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1856 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1882 cpu_buffer->pages = next_page; in rb_remove_pages()
1886 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1893 cpu_buffer->read = 0; in rb_remove_pages()
1896 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1897 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1899 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1921 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1922 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_remove_pages()
1934 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
1940 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
1942 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1945 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1967 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
2001 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
2002 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
2007 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2016 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
2020 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2021 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
2023 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
2024 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2027 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2032 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
2034 rb_update_pages(cpu_buffer); in update_pages_handler()
2035 complete(&cpu_buffer->update_done); in update_pages_handler()
2051 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
2083 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2084 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2092 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2094 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2095 cpu_buffer->nr_pages; in ring_buffer_resize()
2099 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2105 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2106 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2107 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2121 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2122 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2127 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2128 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2131 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2137 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2138 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2142 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2143 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2148 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2150 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2158 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2163 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2164 cpu_buffer->nr_pages; in ring_buffer_resize()
2166 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2167 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2168 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2169 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2178 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2181 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2182 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2185 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2207 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2208 rb_check_pages(cpu_buffer); in ring_buffer_resize()
2220 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2221 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2223 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2226 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2255 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
2257 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2258 cpu_buffer->reader_page->read); in rb_reader_event()
2327 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
2329 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2342 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
2350 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2351 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2368 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
2384 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
2405 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2406 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_handle_head_page()
2437 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
2458 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
2475 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
2492 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2499 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
2510 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
2513 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
2522 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
2549 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2591 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2597 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
2601 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2602 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2616 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2640 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2646 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2650 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
2668 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2669 cpu_buffer->tail_page) && in rb_move_tail()
2670 (cpu_buffer->commit_page == in rb_move_tail()
2671 cpu_buffer->reader_page))) { in rb_move_tail()
2672 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2678 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
2682 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2685 rb_end_commit(cpu_buffer); in rb_move_tail()
2687 local_inc(&cpu_buffer->committing); in rb_move_tail()
2694 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2729 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp() argument
2739 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), in rb_check_timestamp()
2747 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp() argument
2772 rb_check_timestamp(cpu_buffer, info); in rb_add_timestamp()
2793 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
2799 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
2802 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
2809 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); in rb_update_event()
2872 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
2887 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
2891 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) in rb_try_to_discard()
2903 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, in rb_try_to_discard()
2916 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
2937 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
2946 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
2948 local_inc(&cpu_buffer->committing); in rb_start_commit()
2949 local_inc(&cpu_buffer->commits); in rb_start_commit()
2953 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
2966 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
2968 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
2969 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
2971 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2972 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2974 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2975 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2976 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
2980 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
2981 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
2983 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2984 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2985 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2986 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
2999 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3003 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
3007 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
3008 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3012 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3015 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3016 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
3018 local_dec(&cpu_buffer->committing); in rb_end_commit()
3028 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3029 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3030 local_inc(&cpu_buffer->committing); in rb_end_commit()
3048 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit() argument
3051 local_inc(&cpu_buffer->entries); in rb_commit()
3052 rb_end_commit(cpu_buffer); in rb_commit()
3056 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3068 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3069 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3071 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3074 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3077 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3080 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3083 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3085 full = cpu_buffer->shortest_full; in rb_wakeups()
3086 nr_pages = cpu_buffer->nr_pages; in rb_wakeups()
3087 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); in rb_wakeups()
3091 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3092 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3094 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3167 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
3169 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3179 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3186 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3192 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3193 cpu_buffer->current_context = val; in trace_recursive_lock()
3199 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
3201 cpu_buffer->current_context &= in trace_recursive_unlock()
3202 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3223 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
3229 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3231 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3243 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
3248 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3250 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3266 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
3269 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3271 rb_commit(cpu_buffer, event); in ring_buffer_unlock_commit()
3273 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3275 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
3339 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
3400 RB_WARN_ON(cpu_buffer, 1); in check_buffer()
3410 atomic_inc(&cpu_buffer->record_disabled); in check_buffer()
3414 cpu_buffer->cpu, in check_buffer()
3427 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
3435 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
3445 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3449 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3450 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3452 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3474 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3486 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3487 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3489 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, in __rb_reserve_next()
3492 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); in __rb_reserve_next()
3493 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
3501 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3503 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); in __rb_reserve_next()
3504 RB_WARN_ON(cpu_buffer, !s_ok); in __rb_reserve_next()
3513 check_buffer(cpu_buffer, info, tail); in __rb_reserve_next()
3517 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3518 RB_WARN_ON(cpu_buffer, !a_ok); in __rb_reserve_next()
3526 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3533 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3535 RB_WARN_ON(cpu_buffer, !a_ok); in __rb_reserve_next()
3536 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3540 rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3570 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
3582 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3589 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
3597 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
3608 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3609 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3610 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3617 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3637 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
3640 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
3651 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
3673 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
3688 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3690 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3696 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
3699 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3706 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
3720 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
3724 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3750 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
3775 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
3782 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3789 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3791 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
3792 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
3796 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
3798 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
3822 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
3838 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3840 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3846 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
3849 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3857 rb_commit(cpu_buffer, event); in ring_buffer_write()
3859 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3864 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
3873 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
3875 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3876 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
3877 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
4022 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
4027 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4028 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4042 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
4047 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4048 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4059 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
4061 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4062 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4073 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
4080 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4081 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4086 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4087 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4089 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
4092 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4105 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
4111 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4112 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4125 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
4130 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4132 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
4144 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
4150 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4151 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4167 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
4173 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4174 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4189 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
4195 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4196 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4210 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
4215 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4216 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4229 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
4235 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4236 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
4252 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
4258 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4259 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4268 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
4271 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4272 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4276 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4279 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4280 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4296 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
4302 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4304 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4306 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4316 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
4325 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4326 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4327 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4328 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
4342 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4354 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4359 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
4370 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4375 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4379 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4383 RB_WARN_ON(cpu_buffer, 1); in rb_update_read_stamp()
4413 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4419 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
4428 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4437 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
4442 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4445 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4449 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
4450 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
4455 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4459 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
4465 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
4466 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
4467 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
4468 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
4474 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
4477 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
4478 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
4485 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
4488 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
4500 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
4513 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
4526 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
4527 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
4529 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
4532 cpu_buffer->reader_page = reader; in rb_get_reader_page()
4533 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
4535 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
4536 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
4537 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
4545 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4547 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4553 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
4559 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
4562 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
4565 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
4568 cpu_buffer->read++; in rb_advance_reader()
4570 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
4573 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4578 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
4580 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4596 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4605 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
4607 return cpu_buffer->lost_events; in rb_lost_events()
4611 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
4627 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
4630 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
4634 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
4639 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
4652 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
4658 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4659 cpu_buffer->cpu, ts); in rb_buffer_peek()
4662 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
4667 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4668 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4669 cpu_buffer->cpu, ts); in rb_buffer_peek()
4672 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
4676 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
4687 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
4694 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4695 buffer = cpu_buffer->buffer; in rb_iter_peek()
4702 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4703 iter->cache_reader_page != cpu_buffer->reader_page)) in rb_iter_peek()
4720 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
4749 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4750 cpu_buffer->cpu, ts); in rb_iter_peek()
4760 cpu_buffer->cpu, ts); in rb_iter_peek()
4765 RB_WARN_ON(cpu_buffer, 1); in rb_iter_peek()
4772 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
4775 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4788 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4792 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4797 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
4800 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4818 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
4828 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
4829 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
4831 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
4832 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
4866 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
4871 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4873 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4896 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
4908 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4910 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
4912 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
4914 cpu_buffer->lost_events = 0; in ring_buffer_consume()
4915 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
4918 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
4955 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
4971 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4973 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
4975 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5009 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
5015 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5017 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5018 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5020 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5021 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5035 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
5044 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5045 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
5046 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5048 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5063 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance() local
5066 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5070 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5095 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
5097 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
5099 cpu_buffer->head_page in rb_reset_cpu()
5100 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5101 local_set(&cpu_buffer->head_page->write, 0); in rb_reset_cpu()
5102 local_set(&cpu_buffer->head_page->entries, 0); in rb_reset_cpu()
5103 local_set(&cpu_buffer->head_page->page->commit, 0); in rb_reset_cpu()
5105 cpu_buffer->head_page->read = 0; in rb_reset_cpu()
5107 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5108 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5110 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5111 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5112 local_set(&cpu_buffer->reader_page->write, 0); in rb_reset_cpu()
5113 local_set(&cpu_buffer->reader_page->entries, 0); in rb_reset_cpu()
5114 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_reset_cpu()
5115 cpu_buffer->reader_page->read = 0; in rb_reset_cpu()
5117 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5118 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5119 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5120 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5121 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5122 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5123 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5124 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5125 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5126 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5127 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5128 cpu_buffer->read = 0; in rb_reset_cpu()
5129 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5131 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5132 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5134 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5136 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5137 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5139 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
5143 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer() argument
5147 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5149 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5152 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5154 rb_reset_cpu(cpu_buffer); in reset_disabled_cpu_buffer()
5156 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5159 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5169 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
5177 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5178 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5183 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_cpu()
5185 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5186 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5199 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus() local
5206 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5208 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5209 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5218 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_online_cpus()
5220 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5221 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5233 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset() local
5237 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5239 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5240 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5247 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5249 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset()
5251 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5252 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5263 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
5271 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5273 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
5274 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
5275 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
5293 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
5301 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5303 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
5304 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
5305 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
5406 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
5414 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5416 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5418 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5419 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5420 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5423 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5453 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page() local
5463 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5465 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5466 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5470 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5514 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
5544 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5546 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
5550 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
5556 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5566 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
5567 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5585 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5600 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
5607 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
5620 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5621 cpu_buffer->read_bytes += BUF_PAGE_SIZE; in ring_buffer_read_page()
5642 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
5668 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()