Lines Matching +full:partition +full:- +full:art

1 // SPDX-License-Identifier: GPL-2.0
78 * Here's some silly ASCII art.
80 * +------+
83 * +------+ +---+ +---+ +---+
84 * | |-->| |-->| |
85 * +---+ +---+ +---+
88 * +---------------+
91 * +------+
93 * |page |------------------v
94 * +------+ +---+ +---+ +---+
95 * | |-->| |-->| |
96 * +---+ +---+ +---+
99 * +---------------+
102 * +------+
104 * |page |------------------v
105 * +------+ +---+ +---+ +---+
106 * ^ | |-->| |-->| |
107 * | +---+ +---+ +---+
110 * +------------------------------+
113 * +------+
115 * |page |------------------v
116 * +------+ +---+ +---+ +---+
117 * ^ | | | |-->| |
118 * | New +---+ +---+ +---+
119 * | Reader------^ |
121 * +------------------------------+
164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event()
174 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding()
175 event->time_delta = 0; in rb_event_set_padding()
183 if (event->type_len) in rb_event_data_length()
184 length = event->type_len * RB_ALIGNMENT; in rb_event_data_length()
186 length = event->array[0]; in rb_event_data_length()
198 switch (event->type_len) { in rb_event_length()
202 return -1; in rb_event_length()
203 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length()
238 * ring_buffer_event_length - return the length of the event
255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in ring_buffer_event_length()
257 length -= RB_EVNT_HDR_SIZE; in ring_buffer_event_length()
258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
259 length -= sizeof(event->array[0]); in ring_buffer_event_length()
270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); in rb_event_data()
272 if (event->type_len) in rb_event_data()
273 return (void *)&event->array[0]; in rb_event_data()
275 return (void *)&event->array[1]; in rb_event_data()
279 * ring_buffer_event_data - return the data of the event
289 for_each_cpu(cpu, buffer->cpumask)
292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
295 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
302 ts = event->array[0]; in rb_event_time_stamp()
304 ts += event->time_delta; in rb_event_time_stamp()
344 * the update partition of the counter is incremented. This will
354 local_set(&bpage->commit, 0); in rb_init_page()
363 free_page((unsigned long)bpage->page); in free_buffer_page()
377 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
379 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
380 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
438 * EXTEND - wants a time extend
439 * ABSOLUTE - the buffer requests all events to have absolute time stamps
440 * FORCE - force a full time stamp.
583 * - Reads may fail if it interrupted a modification of the time stamp.
588 * - Writes always succeed and will overwrite other writes and writes
591 * - A write followed by a read of the same time stamp will always succeed,
594 * - A cmpxchg will fail if it interrupted another write or cmpxchg.
600 * The two most significant bits of each half holds a 2 bit counter (0-3).
606 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
636 c = local_read(&t->cnt); in __rb_time_read()
637 top = local_read(&t->top); in __rb_time_read()
638 bottom = local_read(&t->bottom); in __rb_time_read()
639 msb = local_read(&t->msb); in __rb_time_read()
640 } while (c != local_read(&t->cnt)); in __rb_time_read()
687 cnt = local_inc_return(&t->cnt); in rb_time_set()
688 rb_time_val_set(&t->top, top, cnt); in rb_time_set()
689 rb_time_val_set(&t->bottom, bottom, cnt); in rb_time_set()
690 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); in rb_time_set()
691 } while (cnt != local_read(&t->cnt)); in rb_time_set()
716 cnt = local_read(&t->cnt); in rb_time_cmpxchg()
730 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) in rb_time_cmpxchg()
732 if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) in rb_time_cmpxchg()
734 if (!rb_time_read_cmpxchg(&t->top, top, top2)) in rb_time_cmpxchg()
736 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) in rb_time_cmpxchg()
747 *ret = local64_read(&t->time); in rb_time_read()
752 local64_set(&t->time, val); in rb_time_set()
758 val = local64_cmpxchg(&t->time, expect, set); in rb_time_cmpxchg()
774 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
775 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
786 commit = local_read(&page->page->commit); in verify_event()
787 write = local_read(&page->write); in verify_event()
788 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event()
789 addr < (unsigned long)&page->page->data[write]) in verify_event()
792 next = rb_list_head(page->list.next); in verify_event()
826 * ring_buffer_event_time_stamp - return the event's current time stamp
845 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
850 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { in ring_buffer_event_time_stamp()
852 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
855 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
861 if (likely(--nest < MAX_NEST)) in ring_buffer_event_time_stamp()
862 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
869 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) in ring_buffer_event_time_stamp()
871 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
877 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
885 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
889 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
901 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
902 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
903 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
908 cnt -= lost; in ring_buffer_nr_dirty_pages()
916 return cnt - read; in ring_buffer_nr_dirty_pages()
921 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
925 nr_pages = cpu_buffer->nr_pages; in full_hit()
935 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
944 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
945 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { in rb_wake_up_waiters()
946 rbwork->wakeup_full = false; in rb_wake_up_waiters()
947 rbwork->full_waiters_pending = false; in rb_wake_up_waiters()
948 wake_up_all(&rbwork->full_waiters); in rb_wake_up_waiters()
953 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
973 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
975 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
980 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
984 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
987 rbwork->wait_index++; in ring_buffer_wake_waiters()
991 rb_wake_up_waiters(&rbwork->work); in ring_buffer_wake_waiters()
995 * ring_buffer_wait - wait for input to the ring buffer
1018 work = &buffer->irq_work; in ring_buffer_wait()
1022 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
1023 return -ENODEV; in ring_buffer_wait()
1024 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
1025 work = &cpu_buffer->irq_work; in ring_buffer_wait()
1028 wait_index = READ_ONCE(work->wait_index); in ring_buffer_wait()
1032 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
1034 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
1057 work->full_waiters_pending = true; in ring_buffer_wait()
1059 work->waiters_pending = true; in ring_buffer_wait()
1062 ret = -EINTR; in ring_buffer_wait()
1078 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1079 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
1082 if (!cpu_buffer->shortest_full || in ring_buffer_wait()
1083 cpu_buffer->shortest_full > full) in ring_buffer_wait()
1084 cpu_buffer->shortest_full = full; in ring_buffer_wait()
1085 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1094 if (wait_index != work->wait_index) in ring_buffer_wait()
1099 finish_wait(&work->full_waiters, &wait); in ring_buffer_wait()
1101 finish_wait(&work->waiters, &wait); in ring_buffer_wait()
1107 * ring_buffer_poll_wait - poll on buffer input
1128 work = &buffer->irq_work; in ring_buffer_poll_wait()
1131 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1132 return -EINVAL; in ring_buffer_poll_wait()
1134 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1135 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1139 poll_wait(filp, &work->full_waiters, poll_table); in ring_buffer_poll_wait()
1140 work->full_waiters_pending = true; in ring_buffer_poll_wait()
1142 poll_wait(filp, &work->waiters, poll_table); in ring_buffer_poll_wait()
1143 work->waiters_pending = true; in ring_buffer_poll_wait()
1178 atomic_inc(&__b->buffer->record_disabled); \
1180 atomic_inc(&b->record_disabled); \
1193 /* Skip retpolines :-( */ in rb_time_stamp()
1194 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1197 ts = buffer->clock(); in rb_time_stamp()
1239 * ASCII art, the reader sets its old page to point to the next
1255 * head->list->prev->next bit 1 bit 0
1256 * ------- -------
1263 * +----+ +-----+ +-----+
1264 * | |------>| T |---X--->| N |
1265 * | |<------| | | |
1266 * +----+ +-----+ +-----+
1268 * | +-----+ | |
1269 * +----------| R |----------+ |
1270 * | |<-----------+
1271 * +-----+
1273 * Key: ---X--> HEAD flag set in pointer
1303 * rb_list_head - remove any bit
1313 * rb_is_head_page - test if the given page is the head page
1325 val = (unsigned long)list->next; in rb_is_head_page()
1327 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page()
1342 struct list_head *list = page->list.prev; in rb_is_reader_page()
1344 return rb_list_head(list->next) != &page->list; in rb_is_reader_page()
1348 * rb_set_list_to_head - set a list_head to be pointing to head.
1354 ptr = (unsigned long *)&list->next; in rb_set_list_to_head()
1360 * rb_head_page_activate - sets up head page
1366 head = cpu_buffer->head_page; in rb_head_page_activate()
1373 rb_set_list_to_head(head->list.prev); in rb_head_page_activate()
1378 unsigned long *ptr = (unsigned long *)&list->next; in rb_list_head_clear()
1384 * rb_head_page_deactivate - clears head page ptr (for free list)
1392 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1394 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1404 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
1407 list = &prev->list; in rb_head_page_set()
1411 ret = cmpxchg((unsigned long *)&list->next, in rb_head_page_set()
1450 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
1463 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1467 list = cpu_buffer->pages; in rb_set_head_page()
1468 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1471 page = head = cpu_buffer->head_page; in rb_set_head_page()
1480 if (rb_is_head_page(page, page->list.prev)) { in rb_set_head_page()
1481 cpu_buffer->head_page = page; in rb_set_head_page()
1496 unsigned long *ptr = (unsigned long *)&old->list.prev->next; in rb_head_page_replace()
1503 ret = cmpxchg(ptr, val, (unsigned long)&new->list); in rb_head_page_replace()
1509 * rb_tail_page_update - move the tail page forward
1527 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update()
1528 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update()
1530 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1542 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1557 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update()
1558 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update()
1565 local_set(&next_page->page->commit, 0); in rb_tail_page_update()
1568 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1584 * rb_check_list - make sure a pointer to a list has the last bits zero
1589 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) in rb_check_list()
1591 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) in rb_check_list()
1597 * rb_check_pages - integrity check of buffer pages
1605 struct list_head *head = cpu_buffer->pages; in rb_check_pages()
1609 if (cpu_buffer->head_page) in rb_check_pages()
1614 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) in rb_check_pages()
1615 return -1; in rb_check_pages()
1616 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) in rb_check_pages()
1617 return -1; in rb_check_pages()
1620 return -1; in rb_check_pages()
1624 bpage->list.next->prev != &bpage->list)) in rb_check_pages()
1625 return -1; in rb_check_pages()
1627 bpage->list.prev->next != &bpage->list)) in rb_check_pages()
1628 return -1; in rb_check_pages()
1629 if (rb_check_list(cpu_buffer, &bpage->list)) in rb_check_pages()
1630 return -1; in rb_check_pages()
1642 bool user_thread = current->mm != NULL; in __rb_allocate_pages()
1655 return -ENOMEM; in __rb_allocate_pages()
1659 * gracefully without invoking oom-killer and the system is not in __rb_allocate_pages()
1679 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1685 list_add(&bpage->list, pages); in __rb_allocate_pages()
1687 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1690 bpage->page = page_address(page); in __rb_allocate_pages()
1691 rb_init_page(bpage->page); in __rb_allocate_pages()
1703 list_del_init(&bpage->list); in __rb_allocate_pages()
1709 return -ENOMEM; in __rb_allocate_pages()
1720 return -ENOMEM; in rb_allocate_pages()
1727 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1730 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1750 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1751 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1752 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1753 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1754 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1755 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1756 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1757 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1758 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1759 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1768 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1772 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
1773 rb_init_page(bpage->page); in rb_allocate_cpu_buffer()
1775 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1776 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1782 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1783 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1784 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1791 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1800 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1803 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1809 list_del_init(&bpage->list); in rb_free_cpu_buffer()
1820 * __ring_buffer_alloc - allocate a new ring_buffer
1845 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1849 buffer->flags = flags; in __ring_buffer_alloc()
1850 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1851 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1853 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1854 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1860 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1863 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1865 if (!buffer->buffers) in __ring_buffer_alloc()
1869 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1870 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1871 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1874 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1878 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1884 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1885 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1887 kfree(buffer->buffers); in __ring_buffer_alloc()
1890 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1899 * ring_buffer_free - free a ring buffer.
1907 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1910 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1912 kfree(buffer->buffers); in ring_buffer_free()
1913 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1922 buffer->clock = clock; in ring_buffer_set_clock()
1927 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1932 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
1939 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
1944 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
1959 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1960 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1970 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1976 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1977 tail_page = rb_list_head(tail_page->next); in rb_remove_pages()
1981 first_page = list_entry(rb_list_head(to_remove->next), in rb_remove_pages()
1985 to_remove = rb_list_head(to_remove)->next; in rb_remove_pages()
1989 next_page = rb_list_head(to_remove)->next; in rb_remove_pages()
1996 tail_page->next = (struct list_head *)((unsigned long)next_page | in rb_remove_pages()
1999 next_page->prev = tail_page; in rb_remove_pages()
2002 cpu_buffer->pages = next_page; in rb_remove_pages()
2006 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
2013 cpu_buffer->read = 0; in rb_remove_pages()
2016 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
2017 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2019 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2041 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2042 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_remove_pages()
2043 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2051 nr_removed--; in rb_remove_pages()
2063 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2066 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
2074 * 2. We cmpxchg the prev_page->next to point from head page to the in rb_insert_pages()
2076 * 3. Finally, we update the head->prev to the end of new list. in rb_insert_pages()
2083 while (retries--) { in rb_insert_pages()
2088 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
2091 prev_page = head_page->prev; in rb_insert_pages()
2093 first_page = pages->next; in rb_insert_pages()
2094 last_page = pages->prev; in rb_insert_pages()
2099 last_page->next = head_page_with_bit; in rb_insert_pages()
2100 first_page->prev = prev_page; in rb_insert_pages()
2102 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); in rb_insert_pages()
2110 head_page->prev = last_page; in rb_insert_pages()
2123 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
2128 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2130 list_del_init(&bpage->list); in rb_insert_pages()
2141 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2145 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2148 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2156 complete(&cpu_buffer->update_done); in update_pages_handler()
2160 * ring_buffer_resize - resize the ring buffer
2177 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2184 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2194 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2204 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2205 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2206 err = -EBUSY; in ring_buffer_resize()
2213 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2215 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2216 cpu_buffer->nr_pages; in ring_buffer_resize()
2220 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2226 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2227 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2228 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2230 err = -ENOMEM; in ring_buffer_resize()
2242 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2243 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2249 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2252 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2258 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2259 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2263 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2264 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2269 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2271 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2279 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2280 err = -EBUSY; in ring_buffer_resize()
2284 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2285 cpu_buffer->nr_pages; in ring_buffer_resize()
2287 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2288 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2289 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2290 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2291 err = -ENOMEM; in ring_buffer_resize()
2302 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2303 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2306 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2318 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2319 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2328 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2331 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2334 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2341 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2342 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2344 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2347 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2349 list_del_init(&bpage->list); in ring_buffer_resize()
2354 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2361 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2363 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2365 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2366 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2372 return bpage->page->data + index; in __rb_page_index()
2378 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2379 cpu_buffer->reader_page->read); in rb_reader_event()
2384 return local_read(&bpage->page->commit); in rb_page_commit()
2391 struct buffer_page *iter_head_page = iter->head_page; in rb_iter_head_event()
2395 if (iter->head != iter->next_event) in rb_iter_head_event()
2396 return iter->event; in rb_iter_head_event()
2405 event = __rb_page_index(iter_head_page, iter->head); in rb_iter_head_event()
2414 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) in rb_iter_head_event()
2418 memcpy(iter->event, event, length); in rb_iter_head_event()
2426 if (iter->page_stamp != iter_head_page->page->time_stamp || in rb_iter_head_event()
2430 iter->next_event = iter->head + length; in rb_iter_head_event()
2431 return iter->event; in rb_iter_head_event()
2434 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_head_event()
2435 iter->head = 0; in rb_iter_head_event()
2436 iter->next_event = 0; in rb_iter_head_event()
2437 iter->missed_events = 1; in rb_iter_head_event()
2450 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2458 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; in rb_event_index()
2463 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
2471 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2472 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2474 rb_inc_page(&iter->head_page); in rb_inc_iter()
2476 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_inc_iter()
2477 iter->head = 0; in rb_inc_iter()
2478 iter->next_event = 0; in rb_inc_iter()
2482 * rb_handle_head_page - writer hit the head page
2486 * -1 on error
2510 * NORMAL - an interrupt already moved it for us in rb_handle_head_page()
2511 * HEAD - we are the first to get here. in rb_handle_head_page()
2512 * UPDATE - we are the interrupt interrupting in rb_handle_head_page()
2514 * MOVED - a reader on another CPU moved the next in rb_handle_head_page()
2526 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2527 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_handle_head_page()
2528 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
2560 return -1; in rb_handle_head_page()
2585 * HEAD - an interrupt came in and already set it. in rb_handle_head_page()
2586 * NORMAL - One of two things: in rb_handle_head_page()
2598 return -1; in rb_handle_head_page()
2614 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2637 return -1; in rb_handle_head_page()
2647 struct buffer_page *tail_page = info->tail_page; in rb_reset_tail()
2649 unsigned long length = info->length; in rb_reset_tail()
2662 tail_page->real_end = 0; in rb_reset_tail()
2664 local_sub(length, &tail_page->write); in rb_reset_tail()
2671 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2678 tail_page->real_end = tail; in rb_reset_tail()
2691 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { in rb_reset_tail()
2701 local_sub(length, &tail_page->write); in rb_reset_tail()
2706 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail()
2707 event->type_len = RINGBUF_TYPE_PADDING; in rb_reset_tail()
2709 event->time_delta = 1; in rb_reset_tail()
2711 /* Make sure the padding is visible before the tail_page->write update */ in rb_reset_tail()
2715 length = (tail + length) - BUF_PAGE_SIZE; in rb_reset_tail()
2716 local_sub(length, &tail_page->write); in rb_reset_tail()
2728 struct buffer_page *tail_page = info->tail_page; in rb_move_tail()
2729 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2730 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2744 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2762 if (rb_is_head_page(next_page, &tail_page->list)) { in rb_move_tail()
2768 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2773 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2774 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2796 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2797 cpu_buffer->tail_page) && in rb_move_tail()
2798 (cpu_buffer->commit_page == in rb_move_tail()
2799 cpu_buffer->reader_page))) { in rb_move_tail()
2800 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2815 local_inc(&cpu_buffer->committing); in rb_move_tail()
2818 return ERR_PTR(-EAGAIN); in rb_move_tail()
2832 event->type_len = RINGBUF_TYPE_TIME_STAMP; in rb_add_time_stamp()
2834 event->type_len = RINGBUF_TYPE_TIME_EXTEND; in rb_add_time_stamp()
2838 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
2839 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
2842 event->time_delta = 0; in rb_add_time_stamp()
2843 event->array[0] = 0; in rb_add_time_stamp()
2863 (unsigned long long)info->delta, in rb_check_timestamp()
2864 (unsigned long long)info->ts, in rb_check_timestamp()
2865 (unsigned long long)info->before, in rb_check_timestamp()
2866 (unsigned long long)info->after, in rb_check_timestamp()
2867 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), in rb_check_timestamp()
2881 bool abs = info->add_timestamp & in rb_add_timestamp()
2884 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp()
2889 if (abs && (info->ts & TS_MSB)) { in rb_add_timestamp()
2890 info->delta &= ABS_TS_MASK; in rb_add_timestamp()
2893 } else if (info->before == info->after && info->before > info->ts) { in rb_add_timestamp()
2903 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
2904 info->before, info->ts); in rb_add_timestamp()
2909 info->delta = 0; in rb_add_timestamp()
2911 *event = rb_add_time_stamp(*event, info->delta, abs); in rb_add_timestamp()
2912 *length -= RB_LEN_TIME_EXTEND; in rb_add_timestamp()
2917 * rb_update_event - update event type and data
2932 unsigned length = info->length; in rb_update_event()
2933 u64 delta = info->delta; in rb_update_event()
2934 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
2937 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
2943 if (unlikely(info->add_timestamp)) in rb_update_event()
2946 event->time_delta = delta; in rb_update_event()
2947 length -= RB_EVNT_HDR_SIZE; in rb_update_event()
2949 event->type_len = 0; in rb_update_event()
2950 event->array[0] = length; in rb_update_event()
2952 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); in rb_update_event()
2989 switch (event->type_len) { in rb_time_delta()
3000 return event->time_delta; in rb_time_delta()
3022 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3026 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) in rb_try_to_discard()
3032 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
3034 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
3038 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, in rb_try_to_discard()
3039 write_stamp, write_stamp - delta)) in rb_try_to_discard()
3051 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3069 index = local_cmpxchg(&bpage->write, old_index, new_index); in rb_try_to_discard()
3072 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3083 local_inc(&cpu_buffer->committing); in rb_start_commit()
3084 local_inc(&cpu_buffer->commits); in rb_start_commit()
3101 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3103 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3104 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3107 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3109 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3110 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3111 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3116 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3118 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3119 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3121 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3134 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3143 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3147 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3150 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3153 local_dec(&cpu_buffer->committing); in rb_end_commit()
3163 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3164 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3165 local_inc(&cpu_buffer->committing); in rb_end_commit()
3176 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; in rb_event_discard()
3177 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_discard()
3179 if (!event->time_delta) in rb_event_discard()
3180 event->time_delta = 1; in rb_event_discard()
3186 local_inc(&cpu_buffer->entries); in rb_commit()
3193 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3194 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3196 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3199 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3200 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3202 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3205 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3208 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3211 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3214 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3216 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3219 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3220 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3222 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3259 * 101 - 1 = 100
3262 * 1010 - 1 = 1001
3297 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3300 bit = RB_CTX_NORMAL - bit; in trace_recursive_lock()
3302 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3309 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3315 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3316 cpu_buffer->current_context = val; in trace_recursive_lock()
3324 cpu_buffer->current_context &= in trace_recursive_unlock()
3325 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3332 * ring_buffer_nest_start - Allow to trace while nested
3352 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3354 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3358 * ring_buffer_nest_end - Allow to trace while nested
3371 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3373 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3378 * ring_buffer_unlock_commit - commit a reserved
3392 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3418 ts = bpage->time_stamp; in dump_buffer_page()
3423 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page()
3425 switch (event->type_len) { in dump_buffer_page()
3440 ts += event->time_delta; in dump_buffer_page()
3441 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); in dump_buffer_page()
3445 ts += event->time_delta; in dump_buffer_page()
3446 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); in dump_buffer_page()
3472 bpage = info->tail_page->page; in check_buffer()
3476 tail = local_read(&bpage->commit); in check_buffer()
3477 } else if (info->add_timestamp & in check_buffer()
3487 if (tail <= 8 || tail > local_read(&bpage->commit)) in check_buffer()
3496 ts = bpage->time_stamp; in check_buffer()
3500 event = (struct ring_buffer_event *)(bpage->data + e); in check_buffer()
3502 switch (event->type_len) { in check_buffer()
3515 if (event->time_delta == 1) in check_buffer()
3519 ts += event->time_delta; in check_buffer()
3526 if ((full && ts > info->ts) || in check_buffer()
3527 (!full && ts + info->delta != info->ts)) { in check_buffer()
3533 atomic_inc(&cpu_buffer->record_disabled); in check_buffer()
3537 cpu_buffer->cpu, in check_buffer()
3538 ts + info->delta, info->ts, info->delta, in check_buffer()
3539 info->before, info->after, in check_buffer()
3543 /* Do not re-enable checking */ in check_buffer()
3567 /* Don't let the compiler play games with cpu_buffer->tail_page */ in __rb_reserve_next()
3568 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3570 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; in __rb_reserve_next()
3572 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3573 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3575 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3577 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { in __rb_reserve_next()
3578 info->delta = info->ts; in __rb_reserve_next()
3585 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { in __rb_reserve_next()
3586 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3587 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3589 info->delta = info->ts - info->after; in __rb_reserve_next()
3590 if (unlikely(test_time_stamp(info->delta))) { in __rb_reserve_next()
3591 info->add_timestamp |= RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3592 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3597 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3599 /*C*/ write = local_add_return(info->length, &tail_page->write); in __rb_reserve_next()
3604 tail = write - info->length; in __rb_reserve_next()
3609 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3610 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3611 if (a_ok && b_ok && info->before != info->after) in __rb_reserve_next()
3612 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, in __rb_reserve_next()
3613 info->before, info->after); in __rb_reserve_next()
3624 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3626 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); in __rb_reserve_next()
3628 if (likely(!(info->add_timestamp & in __rb_reserve_next()
3631 info->delta = info->ts - info->after; in __rb_reserve_next()
3634 info->delta = info->ts; in __rb_reserve_next()
3637 if (unlikely(info->ts != save_before)) { in __rb_reserve_next()
3638 /* SLOW PATH - Interrupted between C and E */ in __rb_reserve_next()
3640 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3644 if (save_before > info->after) { in __rb_reserve_next()
3649 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3650 info->after, save_before); in __rb_reserve_next()
3655 /* SLOW PATH - Interrupted between A and C */ in __rb_reserve_next()
3656 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3659 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3661 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && in __rb_reserve_next()
3662 info->after < ts && in __rb_reserve_next()
3663 rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3664 info->after, ts)) { in __rb_reserve_next()
3666 info->delta = ts - info->after; in __rb_reserve_next()
3676 info->delta = 0; in __rb_reserve_next()
3678 info->ts = ts; in __rb_reserve_next()
3679 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; in __rb_reserve_next()
3686 if (unlikely(!tail && !(info->add_timestamp & in __rb_reserve_next()
3688 info->delta = 0; in __rb_reserve_next()
3695 local_inc(&tail_page->entries); in __rb_reserve_next()
3702 tail_page->page->time_stamp = info->ts; in __rb_reserve_next()
3705 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3731 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3732 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3733 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3740 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3765 if (unlikely(PTR_ERR(event) == -EAGAIN)) { in rb_reserve_next_event()
3767 info.length -= RB_LEN_TIME_EXTEND; in rb_reserve_next_event()
3779 * ring_buffer_lock_reserve - reserve a part of the buffer
3803 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3808 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3811 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3813 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3847 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3853 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
3854 local_dec(&bpage->entries); in rb_decrement_entry()
3865 if (bpage->page == (void *)addr) { in rb_decrement_entry()
3866 local_dec(&bpage->entries); in rb_decrement_entry()
3877 * ring_buffer_discard_commit - discard an event that has not been committed
3905 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3912 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3929 * ring_buffer_write - write data to the buffer without reserving
3948 int ret = -EBUSY; in ring_buffer_write()
3953 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3958 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3961 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3963 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3998 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
4000 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
4007 if (reader->read != rb_page_commit(reader)) in rb_per_cpu_empty()
4033 * ring_buffer_record_disable - stop all writes into the buffer
4043 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
4048 * ring_buffer_record_enable - enable writes to the buffer
4056 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
4061 * ring_buffer_record_off - stop all writes into the buffer
4077 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4079 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_off()
4084 * ring_buffer_record_on - restart writes into the buffer
4100 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4102 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_on()
4107 * ring_buffer_record_is_on - return true if the ring buffer can write
4114 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4118 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4130 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4134 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4147 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4150 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4151 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4156 * ring_buffer_record_enable_cpu - enable writes to the buffer
4167 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4170 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4171 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4184 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4185 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4189 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4200 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4203 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4204 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4209 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4210 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4214 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
4215 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4222 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
4231 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4234 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4235 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4242 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4250 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4253 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4260 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4270 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4273 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4274 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4281 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4293 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4296 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4297 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4304 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4315 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4318 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4319 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4326 * ring_buffer_read_events_cpu - get the number of events successfully read
4335 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4338 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4339 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4344 * ring_buffer_entries - get the number of entries in a buffer
4358 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4367 * ring_buffer_overruns - get the number of overruns in buffer
4381 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4382 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4391 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
4394 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4395 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4396 iter->next_event = iter->head; in rb_iter_reset()
4398 iter->cache_reader_page = iter->head_page; in rb_iter_reset()
4399 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4401 if (iter->head) { in rb_iter_reset()
4402 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4403 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4405 iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_reset()
4406 iter->page_stamp = iter->read_stamp; in rb_iter_reset()
4411 * ring_buffer_iter_reset - reset an iterator
4425 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4427 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4429 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4434 * ring_buffer_iter_empty - check if an iterator has no more to read
4448 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4449 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4450 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4451 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
4452 commit_ts = commit_page->page->time_stamp; in ring_buffer_iter_empty()
4465 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4466 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); in ring_buffer_iter_empty()
4474 return ((iter->head_page == commit_page && iter->head >= commit) || in ring_buffer_iter_empty()
4475 (iter->head_page == reader && commit_page == head_page && in ring_buffer_iter_empty()
4476 head_page->read == commit && in ring_buffer_iter_empty()
4477 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4487 switch (event->type_len) { in rb_update_read_stamp()
4493 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4498 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
4499 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4503 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4518 switch (event->type_len) { in rb_update_iter_read_stamp()
4524 iter->read_stamp += delta; in rb_update_iter_read_stamp()
4529 delta = rb_fix_abs_ts(delta, iter->read_stamp); in rb_update_iter_read_stamp()
4530 iter->read_stamp = delta; in rb_update_iter_read_stamp()
4534 iter->read_stamp += event->time_delta; in rb_update_iter_read_stamp()
4538 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4553 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4567 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4570 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4575 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
4580 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4590 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
4591 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
4592 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
4593 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
4602 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
4603 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
4606 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
4610 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
4613 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
4625 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
4638 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
4651 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
4652 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
4654 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
4657 cpu_buffer->reader_page = reader; in rb_get_reader_page()
4658 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
4660 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
4661 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
4662 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
4669 if (reader && reader->read == 0) in rb_get_reader_page()
4670 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4672 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4719 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in rb_advance_reader()
4720 cpu_buffer->read++; in rb_advance_reader()
4725 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4732 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4735 if (iter->head == iter->next_event) { in rb_advance_iter()
4741 iter->head = iter->next_event; in rb_advance_iter()
4746 if (iter->next_event >= rb_page_size(iter->head_page)) { in rb_advance_iter()
4748 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4754 rb_update_iter_read_stamp(iter, iter->event); in rb_advance_iter()
4759 return cpu_buffer->lost_events; in rb_lost_events()
4788 switch (event->type_len) { in rb_buffer_peek()
4810 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); in rb_buffer_peek()
4811 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4812 cpu_buffer->cpu, ts); in rb_buffer_peek()
4820 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4821 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4822 cpu_buffer->cpu, ts); in rb_buffer_peek()
4847 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4848 buffer = cpu_buffer->buffer; in rb_iter_peek()
4855 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4856 iter->cache_reader_page != cpu_buffer->reader_page)) in rb_iter_peek()
4876 if (iter->head >= rb_page_size(iter->head_page)) { in rb_iter_peek()
4885 switch (event->type_len) { in rb_iter_peek()
4902 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); in rb_iter_peek()
4903 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4904 cpu_buffer->cpu, ts); in rb_iter_peek()
4912 *ts = iter->read_stamp + event->time_delta; in rb_iter_peek()
4914 cpu_buffer->cpu, ts); in rb_iter_peek()
4929 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4942 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4946 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4954 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4959 * ring_buffer_peek - peek at the next event to be read
4972 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4977 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4984 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4989 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4995 /** ring_buffer_iter_dropped - report if there are dropped events
5002 bool ret = iter->missed_events != 0; in ring_buffer_iter_dropped()
5004 iter->missed_events = 0; in ring_buffer_iter_dropped()
5010 * ring_buffer_iter_peek - peek at the next event to be read
5020 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
5025 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5027 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5029 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_iter_peek()
5036 * ring_buffer_consume - return an event and consume it
5059 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5062 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5068 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5078 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_consume()
5086 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5112 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5119 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); in ring_buffer_read_prepare()
5120 if (!iter->event) { in ring_buffer_read_prepare()
5125 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5127 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5129 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5136 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5150 * ring_buffer_read_start - start a non consuming read of the buffer
5169 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5171 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5172 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5174 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5175 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5180 * ring_buffer_read_finish - finish reading the iterator of the buffer
5183 * This re-enables the recording to the buffer, and frees the
5189 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
5198 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5200 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5202 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5203 kfree(iter->event); in ring_buffer_read_finish()
5209 * ring_buffer_iter_advance - advance the iterator to the next location
5217 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
5220 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5224 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5229 * ring_buffer_size - return the size of the ring buffer (in bytes)
5237 * BUF_PAGE_SIZE * buffer->nr_pages in ring_buffer_size()
5241 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5244 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5253 cpu_buffer->head_page in rb_reset_cpu()
5254 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5255 local_set(&cpu_buffer->head_page->write, 0); in rb_reset_cpu()
5256 local_set(&cpu_buffer->head_page->entries, 0); in rb_reset_cpu()
5257 local_set(&cpu_buffer->head_page->page->commit, 0); in rb_reset_cpu()
5259 cpu_buffer->head_page->read = 0; in rb_reset_cpu()
5261 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5262 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5264 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5265 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5266 local_set(&cpu_buffer->reader_page->write, 0); in rb_reset_cpu()
5267 local_set(&cpu_buffer->reader_page->entries, 0); in rb_reset_cpu()
5268 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_reset_cpu()
5269 cpu_buffer->reader_page->read = 0; in rb_reset_cpu()
5271 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5272 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5273 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5274 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5275 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5276 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5277 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5278 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5279 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5280 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5281 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5282 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5283 cpu_buffer->read = 0; in rb_reset_cpu()
5284 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5286 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5287 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5289 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5291 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5292 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5302 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5304 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5307 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5311 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5314 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5318 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5324 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5326 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5330 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5332 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5333 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5340 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5341 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5343 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5348 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5358 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5361 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5363 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5364 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5371 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5375 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5376 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5379 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5383 * ring_buffer_reset - reset a ring buffer
5392 mutex_lock(&buffer->mutex); in ring_buffer_reset()
5395 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5397 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5398 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5405 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5409 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5410 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5413 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
5418 * ring_buffer_empty - is the ring buffer empty?
5431 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5447 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5458 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5461 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5474 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5489 int ret = -EINVAL; in ring_buffer_swap_cpu()
5491 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
5492 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
5495 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
5496 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
5499 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) in ring_buffer_swap_cpu()
5502 ret = -EAGAIN; in ring_buffer_swap_cpu()
5504 if (atomic_read(&buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5507 if (atomic_read(&buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5510 if (atomic_read(&cpu_buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5513 if (atomic_read(&cpu_buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5522 atomic_inc(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5523 atomic_inc(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5525 ret = -EBUSY; in ring_buffer_swap_cpu()
5526 if (local_read(&cpu_buffer_a->committing)) in ring_buffer_swap_cpu()
5528 if (local_read(&cpu_buffer_b->committing)) in ring_buffer_swap_cpu()
5531 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
5532 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
5534 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5535 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5540 atomic_dec(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5541 atomic_dec(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5549 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5571 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5572 return ERR_PTR(-ENODEV); in ring_buffer_alloc_read_page()
5574 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5576 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5578 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5579 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5580 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5583 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5592 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
5604 * ring_buffer_free_read_page - free an allocated read page
5613 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5623 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5625 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5626 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5630 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5639 * ring_buffer_read_page - extract a page from the ring buffer
5674 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5683 int ret = -1; in ring_buffer_read_page()
5685 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5695 len -= BUF_PAGE_HDR_SIZE; in ring_buffer_read_page()
5704 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5712 read = reader->read; in ring_buffer_read_page()
5716 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5725 if (read || (len < (commit - read)) || in ring_buffer_read_page()
5726 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
5727 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5739 (!read || (len < (commit - read)) || in ring_buffer_read_page()
5740 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
5743 if (len > (commit - read)) in ring_buffer_read_page()
5744 len = (commit - read); in ring_buffer_read_page()
5753 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5764 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
5766 len -= size; in ring_buffer_read_page()
5769 rpos = reader->read; in ring_buffer_read_page()
5781 local_set(&bpage->commit, pos); in ring_buffer_read_page()
5782 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
5788 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5789 cpu_buffer->read_bytes += BUF_PAGE_SIZE; in ring_buffer_read_page()
5793 bpage = reader->page; in ring_buffer_read_page()
5794 reader->page = *data_page; in ring_buffer_read_page()
5795 local_set(&reader->write, 0); in ring_buffer_read_page()
5796 local_set(&reader->entries, 0); in ring_buffer_read_page()
5797 reader->read = 0; in ring_buffer_read_page()
5805 if (reader->real_end) in ring_buffer_read_page()
5806 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
5810 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
5812 commit = local_read(&bpage->commit); in ring_buffer_read_page()
5820 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
5821 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
5823 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
5826 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
5833 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); in ring_buffer_read_page()
5836 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5856 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
5865 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
5866 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
5874 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
5876 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
5879 return -ENOMEM; in trace_rb_cpu_prepare()
5882 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
5950 cnt = data->cnt + (nested ? 27 : 0); in rb_write_something()
5953 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); in rb_write_something()
5961 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
5966 data->bytes_dropped += len; in rb_write_something()
5968 data->bytes_dropped_nested += len; in rb_write_something()
5975 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
5979 item->size = size; in rb_write_something()
5980 memcpy(item->str, rb_string, size); in rb_write_something()
5983 data->bytes_alloc_nested += event_len; in rb_write_something()
5984 data->bytes_written_nested += len; in rb_write_something()
5985 data->events_nested++; in rb_write_something()
5986 if (!data->min_size_nested || len < data->min_size_nested) in rb_write_something()
5987 data->min_size_nested = len; in rb_write_something()
5988 if (len > data->max_size_nested) in rb_write_something()
5989 data->max_size_nested = len; in rb_write_something()
5991 data->bytes_alloc += event_len; in rb_write_something()
5992 data->bytes_written += len; in rb_write_something()
5993 data->events++; in rb_write_something()
5994 if (!data->min_size || len < data->min_size) in rb_write_something()
5995 data->max_size = len; in rb_write_something()
5996 if (len > data->max_size) in rb_write_something()
5997 data->max_size = len; in rb_write_something()
6001 ring_buffer_unlock_commit(data->buffer, event); in rb_write_something()
6012 data->cnt++; in rb_test()
6015 /* Now sleep between a min of 100-300us and a max of 1ms */ in rb_test()
6016 usleep_range(((data->cnt % 3) + 1) * 100, 1000); in rb_test()
6134 ret = -1; in test_ringbuffer()
6136 total_events = data->events + data->events_nested; in test_ringbuffer()
6137 total_written = data->bytes_written + data->bytes_written_nested; in test_ringbuffer()
6138 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; in test_ringbuffer()
6139 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; in test_ringbuffer()
6141 big_event_size = data->max_size + data->max_size_nested; in test_ringbuffer()
6142 small_event_size = data->min_size + data->min_size_nested; in test_ringbuffer()
6161 total_size += item->size + sizeof(struct rb_item); in test_ringbuffer()
6162 if (memcmp(&item->str[0], rb_string, item->size) != 0) { in test_ringbuffer()
6164 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
6165 pr_info("expected: %.*s\n", item->size, rb_string); in test_ringbuffer()
6167 ret = -1; in test_ringbuffer()
6175 ret = -1; in test_ringbuffer()