Lines Matching +full:cpu +full:- +full:read
1 // SPDX-License-Identifier: GPL-2.0
27 #include <linux/cpu.h>
58 * allocated for each CPU. A writer may only write to a buffer that is
59 * associated with the CPU it is currently executing on. A reader may read
60 * from any per cpu buffer.
62 * The reader is special. For each per cpu buffer, the reader has its own
63 * reader page. When a reader has read the entire reader page, this reader
72 * +------+
75 * +------+ +---+ +---+ +---+
76 * | |-->| |-->| |
77 * +---+ +---+ +---+
80 * +---------------+
83 * +------+
85 * |page |------------------v
86 * +------+ +---+ +---+ +---+
87 * | |-->| |-->| |
88 * +---+ +---+ +---+
91 * +---------------+
94 * +------+
96 * |page |------------------v
97 * +------+ +---+ +---+ +---+
98 * ^ | |-->| |-->| |
99 * | +---+ +---+ +---+
102 * +------------------------------+
105 * +------+
107 * |page |------------------v
108 * +------+ +---+ +---+ +---+
109 * ^ | | | |-->| |
110 * | New +---+ +---+ +---+
111 * | Reader------^ |
113 * +------------------------------+
156 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
160 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event()
166 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding()
167 event->time_delta = 0; in rb_event_set_padding()
175 if (event->type_len) in rb_event_data_length()
176 length = event->type_len * RB_ALIGNMENT; in rb_event_data_length()
178 length = event->array[0]; in rb_event_data_length()
190 switch (event->type_len) { in rb_event_length()
194 return -1; in rb_event_length()
195 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length()
230 * ring_buffer_event_length - return the length of the event
247 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in ring_buffer_event_length()
249 length -= RB_EVNT_HDR_SIZE; in ring_buffer_event_length()
250 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
251 length -= sizeof(event->array[0]); in ring_buffer_event_length()
262 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); in rb_event_data()
264 if (event->type_len) in rb_event_data()
265 return (void *)&event->array[0]; in rb_event_data()
267 return (void *)&event->array[1]; in rb_event_data()
271 * ring_buffer_event_data - return the data of the event
280 #define for_each_buffer_cpu(buffer, cpu) \ argument
281 for_each_cpu(cpu, buffer->cpumask)
283 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
284 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
287 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
294 ts = event->array[0]; in rb_event_time_stamp()
296 ts += event->time_delta; in rb_event_time_stamp()
323 unsigned read; /* index for next read */ member
346 local_set(&bpage->commit, 0); in rb_init_page()
355 free_page((unsigned long)bpage->page); in free_buffer_page()
369 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
371 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
372 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
429 * EXTEND - wants a time extend
430 * ABSOLUTE - the buffer requests all events to have absolute time stamps
431 * FORCE - force a full time stamp.
486 int cpu; member
497 struct buffer_page *head_page; /* read from head */
515 unsigned long read; member
572 * - Only need 59 bits (uses 60 to make it even).
573 * - Reads may fail if it interrupted a modification of the time stamp.
575 * the read itself is interrupted by a write.
578 * - Writes always succeed and will overwrite other writes and writes
581 * - A write followed by a read of the same time stamp will always succeed,
584 * - A cmpxchg will fail if it interrupted another write or cmpxchg.
590 * The two most significant bits of each half holds a 2 bit counter (0-3).
596 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
620 * If the read is interrupted by a write, then the cnt will in __rb_time_read()
621 * be different. Loop until both top and bottom have been read in __rb_time_read()
625 c = local_read(&t->cnt); in __rb_time_read()
626 top = local_read(&t->top); in __rb_time_read()
627 bottom = local_read(&t->bottom); in __rb_time_read()
628 } while (c != local_read(&t->cnt)); in __rb_time_read()
672 cnt = local_inc_return(&t->cnt); in rb_time_set()
673 rb_time_val_set(&t->top, top, cnt); in rb_time_set()
674 rb_time_val_set(&t->bottom, bottom, cnt); in rb_time_set()
675 } while (cnt != local_read(&t->cnt)); in rb_time_set()
700 cnt = local_read(&t->cnt); in rb_time_cmpxchg()
714 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) in rb_time_cmpxchg()
716 if (!rb_time_read_cmpxchg(&t->top, top, top2)) in rb_time_cmpxchg()
718 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) in rb_time_cmpxchg()
729 *ret = local64_read(&t->time); in rb_time_read()
734 local64_set(&t->time, val); in rb_time_set()
740 val = local64_cmpxchg(&t->time, expect, set); in rb_time_cmpxchg()
756 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
757 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
768 commit = local_read(&page->page->commit); in verify_event()
769 write = local_read(&page->write); in verify_event()
770 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event()
771 addr < (unsigned long)&page->page->data[write]) in verify_event()
774 next = rb_list_head(page->list.next); in verify_event()
790 * ring_buffer_event_time_stamp - return the event's current time stamp
809 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
814 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) in ring_buffer_event_time_stamp()
817 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
822 /* Read the current saved nesting level time stamp */ in ring_buffer_event_time_stamp()
823 if (likely(--nest < MAX_NEST)) in ring_buffer_event_time_stamp()
824 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
831 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) in ring_buffer_event_time_stamp()
832 /* Screw it, just read the current time */ in ring_buffer_event_time_stamp()
833 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
839 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
841 * @cpu: The cpu of the ring_buffer to get the number of pages from
845 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
847 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
851 * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
853 * @cpu: The cpu of the ring_buffer to get the number of pages from
857 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
859 size_t read; in ring_buffer_nr_dirty_pages() local
862 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
863 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
864 /* The reader can read an empty page, but not more than that */ in ring_buffer_nr_dirty_pages()
865 if (cnt < read) { in ring_buffer_nr_dirty_pages()
866 WARN_ON_ONCE(read > cnt + 1); in ring_buffer_nr_dirty_pages()
870 return cnt - read; in ring_buffer_nr_dirty_pages()
874 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
883 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
884 if (rbwork->wakeup_full) { in rb_wake_up_waiters()
885 rbwork->wakeup_full = false; in rb_wake_up_waiters()
886 wake_up_all(&rbwork->full_waiters); in rb_wake_up_waiters()
891 * ring_buffer_wait - wait for input to the ring buffer
893 * @cpu: the cpu buffer to wait on
894 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
896 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
897 * as data is added to any of the @buffer's cpu buffers. Otherwise
898 * it will wait for data to be added to a specific cpu buffer.
900 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
909 * data in any cpu buffer, or a specific buffer, put the in ring_buffer_wait()
912 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wait()
913 work = &buffer->irq_work; in ring_buffer_wait()
914 /* Full only makes sense on per cpu reads */ in ring_buffer_wait()
917 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
918 return -ENODEV; in ring_buffer_wait()
919 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
920 work = &cpu_buffer->irq_work; in ring_buffer_wait()
926 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
928 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
951 work->full_waiters_pending = true; in ring_buffer_wait()
953 work->waiters_pending = true; in ring_buffer_wait()
956 ret = -EINTR; in ring_buffer_wait()
960 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
963 if (cpu != RING_BUFFER_ALL_CPUS && in ring_buffer_wait()
964 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
973 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
974 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
975 nr_pages = cpu_buffer->nr_pages; in ring_buffer_wait()
976 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); in ring_buffer_wait()
977 if (!cpu_buffer->shortest_full || in ring_buffer_wait()
978 cpu_buffer->shortest_full < full) in ring_buffer_wait()
979 cpu_buffer->shortest_full = full; in ring_buffer_wait()
980 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
990 finish_wait(&work->full_waiters, &wait); in ring_buffer_wait()
992 finish_wait(&work->waiters, &wait); in ring_buffer_wait()
998 * ring_buffer_poll_wait - poll on buffer input
1000 * @cpu: the cpu buffer to wait on
1004 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1005 * as data is added to any of the @buffer's cpu buffers. Otherwise
1006 * it will wait for data to be added to a specific cpu buffer.
1011 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1017 if (cpu == RING_BUFFER_ALL_CPUS) in ring_buffer_poll_wait()
1018 work = &buffer->irq_work; in ring_buffer_poll_wait()
1020 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1021 return -EINVAL; in ring_buffer_poll_wait()
1023 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1024 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1027 poll_wait(filp, &work->waiters, poll_table); in ring_buffer_poll_wait()
1028 work->waiters_pending = true; in ring_buffer_poll_wait()
1044 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1045 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1058 atomic_inc(&__b->buffer->record_disabled); \
1060 atomic_inc(&b->record_disabled); \
1073 /* Skip retpolines :-( */ in rb_time_stamp()
1074 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1077 ts = buffer->clock(); in rb_time_stamp()
1096 int cpu, u64 *ts) in ring_buffer_normalize_time_stamp() argument
1105 * Although writes only happen on the CPU that they are on,
1107 * happen on any CPU.
1135 * head->list->prev->next bit 1 bit 0
1136 * ------- -------
1143 * +----+ +-----+ +-----+
1144 * | |------>| T |---X--->| N |
1145 * | |<------| | | |
1146 * +----+ +-----+ +-----+
1148 * | +-----+ | |
1149 * +----------| R |----------+ |
1150 * | |<-----------+
1151 * +-----+
1153 * Key: ---X--> HEAD flag set in pointer
1183 * rb_list_head - remove any bit
1193 * rb_is_head_page - test if the given page is the head page
1205 val = (unsigned long)list->next; in rb_is_head_page()
1207 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page()
1222 struct list_head *list = page->list.prev; in rb_is_reader_page()
1224 return rb_list_head(list->next) != &page->list; in rb_is_reader_page()
1228 * rb_set_list_to_head - set a list_head to be pointing to head.
1234 ptr = (unsigned long *)&list->next; in rb_set_list_to_head()
1240 * rb_head_page_activate - sets up head page
1246 head = cpu_buffer->head_page; in rb_head_page_activate()
1253 rb_set_list_to_head(head->list.prev); in rb_head_page_activate()
1258 unsigned long *ptr = (unsigned long *)&list->next; in rb_list_head_clear()
1264 * rb_head_page_deactivate - clears head page ptr (for free list)
1272 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1274 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1284 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
1287 list = &prev->list; in rb_head_page_set()
1291 ret = cmpxchg((unsigned long *)&list->next, in rb_head_page_set()
1330 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
1343 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1347 list = cpu_buffer->pages; in rb_set_head_page()
1348 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1351 page = head = cpu_buffer->head_page; in rb_set_head_page()
1360 if (rb_is_head_page(page, page->list.prev)) { in rb_set_head_page()
1361 cpu_buffer->head_page = page; in rb_set_head_page()
1376 unsigned long *ptr = (unsigned long *)&old->list.prev->next; in rb_head_page_replace()
1383 ret = cmpxchg(ptr, val, (unsigned long)&new->list); in rb_head_page_replace()
1389 * rb_tail_page_update - move the tail page forward
1407 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update()
1408 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update()
1410 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1422 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1437 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update()
1438 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update()
1445 local_set(&next_page->page->commit, 0); in rb_tail_page_update()
1448 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1464 * rb_check_list - make sure a pointer to a list has the last bits zero
1469 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) in rb_check_list()
1471 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) in rb_check_list()
1477 * rb_check_pages - integrity check of buffer pages
1478 * @cpu_buffer: CPU buffer with pages to test
1485 struct list_head *head = cpu_buffer->pages; in rb_check_pages()
1489 if (cpu_buffer->head_page) in rb_check_pages()
1494 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) in rb_check_pages()
1495 return -1; in rb_check_pages()
1496 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) in rb_check_pages()
1497 return -1; in rb_check_pages()
1500 return -1; in rb_check_pages()
1504 bpage->list.next->prev != &bpage->list)) in rb_check_pages()
1505 return -1; in rb_check_pages()
1507 bpage->list.prev->next != &bpage->list)) in rb_check_pages()
1508 return -1; in rb_check_pages()
1509 if (rb_check_list(cpu_buffer, &bpage->list)) in rb_check_pages()
1510 return -1; in rb_check_pages()
1522 bool user_thread = current->mm != NULL; in __rb_allocate_pages()
1535 return -ENOMEM; in __rb_allocate_pages()
1539 * gracefully without invoking oom-killer and the system is not in __rb_allocate_pages()
1559 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1565 list_add(&bpage->list, pages); in __rb_allocate_pages()
1567 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1570 bpage->page = page_address(page); in __rb_allocate_pages()
1571 rb_init_page(bpage->page); in __rb_allocate_pages()
1583 list_del_init(&bpage->list); in __rb_allocate_pages()
1589 return -ENOMEM; in __rb_allocate_pages()
1600 return -ENOMEM; in rb_allocate_pages()
1607 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1610 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1618 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1626 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
1630 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1631 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1632 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1633 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1634 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1635 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1636 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1637 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1638 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1639 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1642 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
1648 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1649 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); in rb_allocate_cpu_buffer()
1652 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
1653 rb_init_page(bpage->page); in rb_allocate_cpu_buffer()
1655 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1656 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1662 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1663 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1664 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1671 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1680 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1683 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1689 list_del_init(&bpage->list); in rb_free_cpu_buffer()
1700 * __ring_buffer_alloc - allocate a new ring_buffer
1701 * @size: the size in bytes per cpu that is needed.
1716 int cpu; in __ring_buffer_alloc() local
1725 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1729 buffer->flags = flags; in __ring_buffer_alloc()
1730 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1731 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1733 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1734 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1740 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1743 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1745 if (!buffer->buffers) in __ring_buffer_alloc()
1748 cpu = raw_smp_processor_id(); in __ring_buffer_alloc()
1749 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1750 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1751 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1754 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1758 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1763 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1764 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1765 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1767 kfree(buffer->buffers); in __ring_buffer_alloc()
1770 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1779 * ring_buffer_free - free a ring buffer.
1785 int cpu; in ring_buffer_free() local
1787 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1789 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1790 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1792 kfree(buffer->buffers); in ring_buffer_free()
1793 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1802 buffer->clock = clock; in ring_buffer_set_clock()
1807 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1812 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
1819 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
1824 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
1839 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1840 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1850 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1856 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1857 tail_page = rb_list_head(tail_page->next); in rb_remove_pages()
1861 first_page = list_entry(rb_list_head(to_remove->next), in rb_remove_pages()
1865 to_remove = rb_list_head(to_remove)->next; in rb_remove_pages()
1869 next_page = rb_list_head(to_remove)->next; in rb_remove_pages()
1876 tail_page->next = (struct list_head *)((unsigned long)next_page | in rb_remove_pages()
1879 next_page->prev = tail_page; in rb_remove_pages()
1882 cpu_buffer->pages = next_page; in rb_remove_pages()
1886 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1890 * change read pointer to make sure any read iterators reset in rb_remove_pages()
1893 cpu_buffer->read = 0; in rb_remove_pages()
1896 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1897 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1899 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1921 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1922 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_remove_pages()
1930 nr_removed--; in rb_remove_pages()
1942 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1945 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1953 * 2. We cmpxchg the prev_page->next to point from head page to the in rb_insert_pages()
1955 * 3. Finally, we update the head->prev to the end of new list. in rb_insert_pages()
1962 while (retries--) { in rb_insert_pages()
1967 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
1970 prev_page = head_page->prev; in rb_insert_pages()
1972 first_page = pages->next; in rb_insert_pages()
1973 last_page = pages->prev; in rb_insert_pages()
1978 last_page->next = head_page_with_bit; in rb_insert_pages()
1979 first_page->prev = prev_page; in rb_insert_pages()
1981 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); in rb_insert_pages()
1989 head_page->prev = last_page; in rb_insert_pages()
2002 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
2007 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2009 list_del_init(&bpage->list); in rb_insert_pages()
2020 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2024 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2027 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2035 complete(&cpu_buffer->update_done); in update_pages_handler()
2039 * ring_buffer_resize - resize the ring buffer
2042 * @cpu_id: the cpu buffer to resize
2053 int cpu, err; in ring_buffer_resize() local
2056 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2063 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2073 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2082 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2083 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2084 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2085 err = -EBUSY; in ring_buffer_resize()
2091 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2092 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2094 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2095 cpu_buffer->nr_pages; in ring_buffer_resize()
2099 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2105 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2106 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2107 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2109 err = -ENOMEM; in ring_buffer_resize()
2120 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2121 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2122 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2125 /* Can't run something on an offline CPU. */ in ring_buffer_resize()
2126 if (!cpu_online(cpu)) { in ring_buffer_resize()
2128 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2130 schedule_work_on(cpu, in ring_buffer_resize()
2131 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2136 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2137 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2138 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2141 if (cpu_online(cpu)) in ring_buffer_resize()
2142 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2143 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2148 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2150 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2158 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2159 err = -EBUSY; in ring_buffer_resize()
2163 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2164 cpu_buffer->nr_pages; in ring_buffer_resize()
2166 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2167 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2168 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2169 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2170 err = -ENOMEM; in ring_buffer_resize()
2176 /* Can't run something on an offline CPU. */ in ring_buffer_resize()
2181 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2182 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2185 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2197 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2198 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2206 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2207 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2210 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2213 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2217 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2220 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2221 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2223 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2226 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2228 list_del_init(&bpage->list); in ring_buffer_resize()
2233 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2240 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2242 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2244 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2245 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2251 return bpage->page->data + index; in __rb_page_index()
2257 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2258 cpu_buffer->reader_page->read); in rb_reader_event()
2263 return local_read(&bpage->page->commit); in rb_page_commit()
2270 struct buffer_page *iter_head_page = iter->head_page; in rb_iter_head_event()
2274 if (iter->head != iter->next_event) in rb_iter_head_event()
2275 return iter->event; in rb_iter_head_event()
2284 event = __rb_page_index(iter_head_page, iter->head); in rb_iter_head_event()
2293 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) in rb_iter_head_event()
2294 /* Writer corrupted the read? */ in rb_iter_head_event()
2297 memcpy(iter->event, event, length); in rb_iter_head_event()
2304 /* Make sure the page didn't change since we read this */ in rb_iter_head_event()
2305 if (iter->page_stamp != iter_head_page->page->time_stamp || in rb_iter_head_event()
2309 iter->next_event = iter->head + length; in rb_iter_head_event()
2310 return iter->event; in rb_iter_head_event()
2313 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_head_event()
2314 iter->head = 0; in rb_iter_head_event()
2315 iter->next_event = 0; in rb_iter_head_event()
2316 iter->missed_events = 1; in rb_iter_head_event()
2329 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2337 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; in rb_event_index()
2342 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
2350 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2351 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2353 rb_inc_page(&iter->head_page); in rb_inc_iter()
2355 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_inc_iter()
2356 iter->head = 0; in rb_inc_iter()
2357 iter->next_event = 0; in rb_inc_iter()
2361 * rb_handle_head_page - writer hit the head page
2365 * -1 on error
2389 * NORMAL - an interrupt already moved it for us in rb_handle_head_page()
2390 * HEAD - we are the first to get here. in rb_handle_head_page()
2391 * UPDATE - we are the interrupt interrupting in rb_handle_head_page()
2393 * MOVED - a reader on another CPU moved the next in rb_handle_head_page()
2405 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2406 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); in rb_handle_head_page()
2431 * The reader is on another CPU and just did in rb_handle_head_page()
2438 return -1; in rb_handle_head_page()
2445 * The reader (on another CPU) will spin till in rb_handle_head_page()
2463 * HEAD - an interrupt came in and already set it. in rb_handle_head_page()
2464 * NORMAL - One of two things: in rb_handle_head_page()
2476 return -1; in rb_handle_head_page()
2492 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2515 return -1; in rb_handle_head_page()
2525 struct buffer_page *tail_page = info->tail_page; in rb_reset_tail()
2527 unsigned long length = info->length; in rb_reset_tail()
2540 tail_page->real_end = 0; in rb_reset_tail()
2542 local_sub(length, &tail_page->write); in rb_reset_tail()
2549 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2556 tail_page->real_end = tail; in rb_reset_tail()
2569 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { in rb_reset_tail()
2576 local_sub(length, &tail_page->write); in rb_reset_tail()
2581 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail()
2582 event->type_len = RINGBUF_TYPE_PADDING; in rb_reset_tail()
2584 event->time_delta = 1; in rb_reset_tail()
2587 length = (tail + length) - BUF_PAGE_SIZE; in rb_reset_tail()
2588 local_sub(length, &tail_page->write); in rb_reset_tail()
2600 struct buffer_page *tail_page = info->tail_page; in rb_move_tail()
2601 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2602 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2616 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2624 * could be on another CPU trying to swap its reader in rb_move_tail()
2634 if (rb_is_head_page(next_page, &tail_page->list)) { in rb_move_tail()
2640 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2645 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2646 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2668 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2669 cpu_buffer->tail_page) && in rb_move_tail()
2670 (cpu_buffer->commit_page == in rb_move_tail()
2671 cpu_buffer->reader_page))) { in rb_move_tail()
2672 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2687 local_inc(&cpu_buffer->committing); in rb_move_tail()
2690 return ERR_PTR(-EAGAIN); in rb_move_tail()
2704 event->type_len = RINGBUF_TYPE_TIME_STAMP; in rb_add_time_stamp()
2706 event->type_len = RINGBUF_TYPE_TIME_EXTEND; in rb_add_time_stamp()
2710 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
2711 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
2714 event->time_delta = 0; in rb_add_time_stamp()
2715 event->array[0] = 0; in rb_add_time_stamp()
2735 (unsigned long long)info->delta, in rb_check_timestamp()
2736 (unsigned long long)info->ts, in rb_check_timestamp()
2737 (unsigned long long)info->before, in rb_check_timestamp()
2738 (unsigned long long)info->after, in rb_check_timestamp()
2739 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), in rb_check_timestamp()
2753 bool abs = info->add_timestamp & in rb_add_timestamp()
2756 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp()
2758 if (info->before == info->after && info->before > info->ts) { in rb_add_timestamp()
2768 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
2769 info->before, info->ts); in rb_add_timestamp()
2774 info->delta = 0; in rb_add_timestamp()
2776 *event = rb_add_time_stamp(*event, info->delta, abs); in rb_add_timestamp()
2777 *length -= RB_LEN_TIME_EXTEND; in rb_add_timestamp()
2782 * rb_update_event - update event type and data
2783 * @cpu_buffer: The per cpu buffer of the @event
2797 unsigned length = info->length; in rb_update_event()
2798 u64 delta = info->delta; in rb_update_event()
2799 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
2802 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
2808 if (unlikely(info->add_timestamp)) in rb_update_event()
2811 event->time_delta = delta; in rb_update_event()
2812 length -= RB_EVNT_HDR_SIZE; in rb_update_event()
2814 event->type_len = 0; in rb_update_event()
2815 event->array[0] = length; in rb_update_event()
2817 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); in rb_update_event()
2854 switch (event->type_len) { in rb_time_delta()
2865 return event->time_delta; in rb_time_delta()
2887 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
2891 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) in rb_try_to_discard()
2894 /* Make sure the write stamp is read before testing the location */ in rb_try_to_discard()
2897 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
2899 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
2903 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, in rb_try_to_discard()
2904 write_stamp, write_stamp - delta)) in rb_try_to_discard()
2916 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
2934 index = local_cmpxchg(&bpage->write, old_index, new_index); in rb_try_to_discard()
2937 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
2948 local_inc(&cpu_buffer->committing); in rb_start_commit()
2949 local_inc(&cpu_buffer->commits); in rb_start_commit()
2958 * We only race with interrupts and NMIs on this CPU. in rb_set_commit_to_write()
2966 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
2968 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
2969 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
2972 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2974 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2975 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2976 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
2981 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
2983 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2984 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2986 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
2999 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3008 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3012 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3015 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3018 local_dec(&cpu_buffer->committing); in rb_end_commit()
3028 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3029 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3030 local_inc(&cpu_buffer->committing); in rb_end_commit()
3041 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; in rb_event_discard()
3042 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_discard()
3044 if (!event->time_delta) in rb_event_discard()
3045 event->time_delta = 1; in rb_event_discard()
3051 local_inc(&cpu_buffer->entries); in rb_commit()
3062 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3063 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3065 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3068 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3069 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3071 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3074 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3077 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3080 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3083 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3085 full = cpu_buffer->shortest_full; in rb_wakeups()
3086 nr_pages = cpu_buffer->nr_pages; in rb_wakeups()
3087 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); in rb_wakeups()
3091 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3092 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3094 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3131 * 101 - 1 = 100
3134 * 1010 - 1 = 1001
3169 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3179 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3186 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3192 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3193 cpu_buffer->current_context = val; in trace_recursive_lock()
3201 cpu_buffer->current_context &= in trace_recursive_unlock()
3202 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3209 * ring_buffer_nest_start - Allow to trace while nested
3224 int cpu; in ring_buffer_nest_start() local
3228 cpu = raw_smp_processor_id(); in ring_buffer_nest_start()
3229 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3231 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3235 * ring_buffer_nest_end - Allow to trace while nested
3244 int cpu; in ring_buffer_nest_end() local
3247 cpu = raw_smp_processor_id(); in ring_buffer_nest_end()
3248 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3250 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3255 * ring_buffer_unlock_commit - commit a reserved
3267 int cpu = raw_smp_processor_id(); in ring_buffer_unlock_commit() local
3269 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3295 ts = bpage->time_stamp; in dump_buffer_page()
3300 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page()
3302 switch (event->type_len) { in dump_buffer_page()
3317 ts += event->time_delta; in dump_buffer_page()
3318 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); in dump_buffer_page()
3322 ts += event->time_delta; in dump_buffer_page()
3323 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); in dump_buffer_page()
3349 bpage = info->tail_page->page; in check_buffer()
3353 tail = local_read(&bpage->commit); in check_buffer()
3354 } else if (info->add_timestamp & in check_buffer()
3364 if (tail <= 8 || tail > local_read(&bpage->commit)) in check_buffer()
3373 ts = bpage->time_stamp; in check_buffer()
3377 event = (struct ring_buffer_event *)(bpage->data + e); in check_buffer()
3379 switch (event->type_len) { in check_buffer()
3392 if (event->time_delta == 1) in check_buffer()
3396 ts += event->time_delta; in check_buffer()
3403 if ((full && ts > info->ts) || in check_buffer()
3404 (!full && ts + info->delta != info->ts)) { in check_buffer()
3410 atomic_inc(&cpu_buffer->record_disabled); in check_buffer()
3413 …pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%… in check_buffer()
3414 cpu_buffer->cpu, in check_buffer()
3415 ts + info->delta, info->ts, info->delta, in check_buffer()
3416 info->before, info->after, in check_buffer()
3420 /* Do not re-enable checking */ in check_buffer()
3444 /* Don't let the compiler play games with cpu_buffer->tail_page */ in __rb_reserve_next()
3445 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3447 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; in __rb_reserve_next()
3449 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3450 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3452 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3454 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { in __rb_reserve_next()
3455 info->delta = info->ts; in __rb_reserve_next()
3462 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { in __rb_reserve_next()
3463 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3464 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3466 info->delta = info->ts - info->after; in __rb_reserve_next()
3467 if (unlikely(test_time_stamp(info->delta))) { in __rb_reserve_next()
3468 info->add_timestamp |= RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3469 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3474 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3476 /*C*/ write = local_add_return(info->length, &tail_page->write); in __rb_reserve_next()
3481 tail = write - info->length; in __rb_reserve_next()
3486 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3487 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3488 if (a_ok && b_ok && info->before != info->after) in __rb_reserve_next()
3489 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, in __rb_reserve_next()
3490 info->before, info->after); in __rb_reserve_next()
3501 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3503 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); in __rb_reserve_next()
3505 if (likely(!(info->add_timestamp & in __rb_reserve_next()
3508 info->delta = info->ts - info->after; in __rb_reserve_next()
3511 info->delta = info->ts; in __rb_reserve_next()
3514 if (unlikely(info->ts != save_before)) { in __rb_reserve_next()
3515 /* SLOW PATH - Interrupted between C and E */ in __rb_reserve_next()
3517 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3521 if (save_before > info->after) { in __rb_reserve_next()
3526 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3527 info->after, save_before); in __rb_reserve_next()
3532 /* SLOW PATH - Interrupted between A and C */ in __rb_reserve_next()
3533 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3536 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3538 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && in __rb_reserve_next()
3539 info->after < ts && in __rb_reserve_next()
3540 rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3541 info->after, ts)) { in __rb_reserve_next()
3543 info->delta = ts - info->after; in __rb_reserve_next()
3553 info->delta = 0; in __rb_reserve_next()
3555 info->ts = ts; in __rb_reserve_next()
3556 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; in __rb_reserve_next()
3563 if (unlikely(!tail && !(info->add_timestamp & in __rb_reserve_next()
3565 info->delta = 0; in __rb_reserve_next()
3572 local_inc(&tail_page->entries); in __rb_reserve_next()
3579 tail_page->page->time_stamp = info->ts; in __rb_reserve_next()
3582 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3602 * Due to the ability to swap a cpu buffer from a buffer in rb_reserve_next_event()
3608 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3609 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3610 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3617 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3642 if (unlikely(PTR_ERR(event) == -EAGAIN)) { in rb_reserve_next_event()
3644 info.length -= RB_LEN_TIME_EXTEND; in rb_reserve_next_event()
3656 * ring_buffer_lock_reserve - reserve a part of the buffer
3675 int cpu; in ring_buffer_lock_reserve() local
3680 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3683 cpu = raw_smp_processor_id(); in ring_buffer_lock_reserve()
3685 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3688 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3690 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3724 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3730 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
3731 local_dec(&bpage->entries); in rb_decrement_entry()
3742 if (bpage->page == (void *)addr) { in rb_decrement_entry()
3743 local_dec(&bpage->entries); in rb_decrement_entry()
3754 * ring_buffer_discard_commit - discard an event that has not been committed
3760 * and then that event will not be read later.
3776 int cpu; in ring_buffer_discard_commit() local
3781 cpu = smp_processor_id(); in ring_buffer_discard_commit()
3782 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3789 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3806 * ring_buffer_write - write data to the buffer without reserving
3825 int ret = -EBUSY; in ring_buffer_write()
3826 int cpu; in ring_buffer_write() local
3830 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3833 cpu = raw_smp_processor_id(); in ring_buffer_write()
3835 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3838 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3840 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3875 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3877 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
3884 if (reader->read != rb_page_commit(reader)) in rb_per_cpu_empty()
3889 * committed content has been read, the ring buffer is empty. in rb_per_cpu_empty()
3896 * and head page, there should always be content to read. in rb_per_cpu_empty()
3904 * swap reader page with head page when it is to read data. in rb_per_cpu_empty()
3910 * ring_buffer_record_disable - stop all writes into the buffer
3920 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
3925 * ring_buffer_record_enable - enable writes to the buffer
3933 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
3938 * ring_buffer_record_off - stop all writes into the buffer
3954 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
3956 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_off()
3961 * ring_buffer_record_on - restart writes into the buffer
3977 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
3979 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); in ring_buffer_record_on()
3984 * ring_buffer_record_is_on - return true if the ring buffer can write
3991 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
3995 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4007 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4011 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4013 * @cpu: The CPU buffer to stop
4020 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
4024 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4027 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4028 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4033 * ring_buffer_record_enable_cpu - enable writes to the buffer
4035 * @cpu: The CPU to enable.
4040 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
4044 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4047 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4048 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4055 * the entries read from the ring buffer and the number of
4061 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4062 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4066 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4068 * @cpu: The per CPU buffer to read from.
4070 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
4077 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4080 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4081 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4086 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4087 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4091 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
4092 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4099 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
4101 * @cpu: The per CPU buffer to read from.
4103 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
4108 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4111 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4112 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4119 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4121 * @cpu: The per CPU buffer to get the entries from.
4123 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
4127 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4130 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4137 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4140 * @cpu: The per CPU buffer to get the number of overruns from
4142 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
4147 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4150 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4151 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4158 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4162 * @cpu: The per CPU buffer to get the number of overruns from
4165 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
4170 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4173 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4174 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4181 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4184 * @cpu: The per CPU buffer to get the number of overruns from
4187 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
4192 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4195 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4196 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4203 * ring_buffer_read_events_cpu - get the number of events successfully read
4205 * @cpu: The per CPU buffer to get the number of events read
4208 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
4212 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4215 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4216 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4221 * ring_buffer_entries - get the number of entries in a buffer
4225 * (all CPU entries)
4231 int cpu; in ring_buffer_entries() local
4234 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
4235 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4244 * ring_buffer_overruns - get the number of overruns in buffer
4248 * (all CPU entries)
4254 int cpu; in ring_buffer_overruns() local
4257 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
4258 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4259 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4268 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
4271 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4272 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4273 iter->next_event = iter->head; in rb_iter_reset()
4275 iter->cache_reader_page = iter->head_page; in rb_iter_reset()
4276 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4278 if (iter->head) { in rb_iter_reset()
4279 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4280 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4282 iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_reset()
4283 iter->page_stamp = iter->read_stamp; in rb_iter_reset()
4288 * ring_buffer_iter_reset - reset an iterator
4302 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4304 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4306 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4311 * ring_buffer_iter_empty - check if an iterator has no more to read
4325 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4326 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4327 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4328 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
4329 commit_ts = commit_page->page->time_stamp; in ring_buffer_iter_empty()
4342 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4343 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); in ring_buffer_iter_empty()
4351 return ((iter->head_page == commit_page && iter->head >= commit) || in ring_buffer_iter_empty()
4352 (iter->head_page == reader && commit_page == head_page && in ring_buffer_iter_empty()
4353 head_page->read == commit && in ring_buffer_iter_empty()
4354 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4364 switch (event->type_len) { in rb_update_read_stamp()
4370 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4375 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4379 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4394 switch (event->type_len) { in rb_update_iter_read_stamp()
4400 iter->read_stamp += delta; in rb_update_iter_read_stamp()
4405 iter->read_stamp = delta; in rb_update_iter_read_stamp()
4409 iter->read_stamp += event->time_delta; in rb_update_iter_read_stamp()
4413 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4428 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4442 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4444 /* If there's more to read, return this page */ in rb_get_reader_page()
4445 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4450 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
4455 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4465 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
4466 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
4467 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
4468 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
4477 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
4478 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
4481 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
4485 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
4488 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
4491 * We want to make sure we read the overruns after we set up our in rb_get_reader_page()
4500 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
4513 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
4526 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
4527 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
4529 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
4532 cpu_buffer->reader_page = reader; in rb_get_reader_page()
4533 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
4535 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
4536 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
4537 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
4544 if (reader && reader->read == 0) in rb_get_reader_page()
4545 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4547 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4567 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in rb_advance_reader()
4568 cpu_buffer->read++; in rb_advance_reader()
4573 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4580 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4583 if (iter->head == iter->next_event) { in rb_advance_iter()
4589 iter->head = iter->next_event; in rb_advance_iter()
4594 if (iter->next_event >= rb_page_size(iter->head_page)) { in rb_advance_iter()
4596 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4602 rb_update_iter_read_stamp(iter, iter->event); in rb_advance_iter()
4607 return cpu_buffer->lost_events; in rb_lost_events()
4636 switch (event->type_len) { in rb_buffer_peek()
4658 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4659 cpu_buffer->cpu, ts); in rb_buffer_peek()
4667 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4668 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4669 cpu_buffer->cpu, ts); in rb_buffer_peek()
4694 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4695 buffer = cpu_buffer->buffer; in rb_iter_peek()
4698 * Check if someone performed a consuming read to in rb_iter_peek()
4699 * the buffer. A consuming read invalidates the iterator in rb_iter_peek()
4702 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4703 iter->cache_reader_page != cpu_buffer->reader_page)) in rb_iter_peek()
4712 * to read, just give up if we fail to get an event after in rb_iter_peek()
4723 if (iter->head >= rb_page_size(iter->head_page)) { in rb_iter_peek()
4732 switch (event->type_len) { in rb_iter_peek()
4749 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4750 cpu_buffer->cpu, ts); in rb_iter_peek()
4758 *ts = iter->read_stamp + event->time_delta; in rb_iter_peek()
4760 cpu_buffer->cpu, ts); in rb_iter_peek()
4775 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4784 * to do the read, but this can corrupt the ring buffer, in rb_reader_lock()
4788 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4792 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4800 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4805 * ring_buffer_peek - peek at the next event to be read
4806 * @buffer: The ring buffer to read
4807 * @cpu: The cpu to peak at
4811 * This will return the event that will be read next, but does
4815 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
4818 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4823 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4830 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4835 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4841 /** ring_buffer_iter_dropped - report if there are dropped events
4848 bool ret = iter->missed_events != 0; in ring_buffer_iter_dropped()
4850 iter->missed_events = 0; in ring_buffer_iter_dropped()
4856 * ring_buffer_iter_peek - peek at the next event to be read
4860 * This will return the event that will be read next, but does
4866 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
4871 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4873 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4875 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_iter_peek()
4882 * ring_buffer_consume - return an event and consume it
4884 * @cpu: the cpu to read the buffer from
4893 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
4905 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
4908 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4914 cpu_buffer->lost_events = 0; in ring_buffer_consume()
4924 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_consume()
4932 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4933 * @buffer: The ring buffer to read from
4934 * @cpu: The cpu buffer to iterate over
4942 * corrupted. This is not a consuming read, so a producer is not
4953 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
4958 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
4965 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); in ring_buffer_read_prepare()
4966 if (!iter->event) { in ring_buffer_read_prepare()
4971 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4973 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
4975 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
4982 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4996 * ring_buffer_read_start - start a non consuming read of the buffer
5015 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5017 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5018 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5020 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5021 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5026 * ring_buffer_read_finish - finish reading the iterator of the buffer
5029 * This re-enables the recording to the buffer, and frees the
5035 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
5041 * Must prevent readers from trying to read, as the check in ring_buffer_read_finish()
5044 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5046 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5048 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5049 kfree(iter->event); in ring_buffer_read_finish()
5055 * ring_buffer_iter_advance - advance the iterator to the next location
5058 * Move the location of the iterator such that the next read will
5063 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
5066 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5070 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5075 * ring_buffer_size - return the size of the ring buffer (in bytes)
5077 * @cpu: The CPU to get ring buffer size from.
5079 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
5083 * BUF_PAGE_SIZE * buffer->nr_pages in ring_buffer_size()
5085 * return the per cpu buffer value. in ring_buffer_size()
5087 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5090 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5099 cpu_buffer->head_page in rb_reset_cpu()
5100 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5101 local_set(&cpu_buffer->head_page->write, 0); in rb_reset_cpu()
5102 local_set(&cpu_buffer->head_page->entries, 0); in rb_reset_cpu()
5103 local_set(&cpu_buffer->head_page->page->commit, 0); in rb_reset_cpu()
5105 cpu_buffer->head_page->read = 0; in rb_reset_cpu()
5107 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5108 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5110 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5111 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5112 local_set(&cpu_buffer->reader_page->write, 0); in rb_reset_cpu()
5113 local_set(&cpu_buffer->reader_page->entries, 0); in rb_reset_cpu()
5114 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_reset_cpu()
5115 cpu_buffer->reader_page->read = 0; in rb_reset_cpu()
5117 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5118 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5119 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5120 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5121 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5122 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5123 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5124 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5125 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5126 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5127 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5128 cpu_buffer->read = 0; in rb_reset_cpu()
5129 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5131 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5132 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5134 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5136 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5137 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5142 /* Must have disabled the cpu buffer then done a synchronize_rcu */
5147 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5149 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5152 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5156 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5159 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5163 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5164 * @buffer: The ring buffer to reset a per cpu buffer of
5165 * @cpu: The CPU buffer to be reset
5167 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
5169 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5171 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5175 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5177 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5178 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5185 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5186 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5188 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5193 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5194 * @buffer: The ring buffer to reset a per cpu buffer of
5195 * @cpu: The CPU buffer to be reset
5200 int cpu; in ring_buffer_reset_online_cpus() local
5203 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5205 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5206 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5208 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5209 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5215 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5220 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5221 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5224 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5228 * ring_buffer_reset - reset a ring buffer
5229 * @buffer: The ring buffer to reset all cpu buffers
5234 int cpu; in ring_buffer_reset() local
5236 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5237 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5239 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5240 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5246 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5247 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5251 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5252 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5258 * rind_buffer_empty - is the ring buffer empty?
5266 int cpu; in ring_buffer_empty() local
5270 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
5271 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5287 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5289 * @cpu: The CPU buffer to test
5291 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
5298 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5301 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5314 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5317 * @cpu: the CPU of the buffers to swap
5320 * of a CPU buffer and has another back up buffer lying around.
5321 * it is expected that the tracer handles the cpu buffer not being
5325 struct trace_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument
5329 int ret = -EINVAL; in ring_buffer_swap_cpu()
5331 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
5332 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
5335 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
5336 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
5339 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) in ring_buffer_swap_cpu()
5342 ret = -EAGAIN; in ring_buffer_swap_cpu()
5344 if (atomic_read(&buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5347 if (atomic_read(&buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5350 if (atomic_read(&cpu_buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5353 if (atomic_read(&cpu_buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5359 * Normally this will be called from the same CPU as cpu. in ring_buffer_swap_cpu()
5362 atomic_inc(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5363 atomic_inc(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5365 ret = -EBUSY; in ring_buffer_swap_cpu()
5366 if (local_read(&cpu_buffer_a->committing)) in ring_buffer_swap_cpu()
5368 if (local_read(&cpu_buffer_b->committing)) in ring_buffer_swap_cpu()
5371 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
5372 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
5374 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5375 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5380 atomic_dec(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5381 atomic_dec(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5389 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5391 * @cpu: the cpu buffer to allocate.
5399 * the page that was allocated, with the read page of the buffer.
5404 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
5411 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5412 return ERR_PTR(-ENODEV); in ring_buffer_alloc_read_page()
5414 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5416 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5418 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5419 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5420 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5423 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5429 page = alloc_pages_node(cpu_to_node(cpu), in ring_buffer_alloc_read_page()
5432 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
5444 * ring_buffer_free_read_page - free an allocated read page
5446 * @cpu: the cpu buffer the page came from
5451 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) in ring_buffer_free_read_page() argument
5453 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5463 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5465 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5466 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5470 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5479 * ring_buffer_read_page - extract a page from the ring buffer
5483 * @cpu: the cpu of the buffer to extract
5492 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
5495 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
5512 void **data_page, size_t len, int cpu, int full) in ring_buffer_read_page() argument
5514 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5521 unsigned int read; in ring_buffer_read_page() local
5523 int ret = -1; in ring_buffer_read_page()
5525 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5535 len -= BUF_PAGE_HDR_SIZE; in ring_buffer_read_page()
5544 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5552 read = reader->read; in ring_buffer_read_page()
5556 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5559 * If this page has been partially read or in ring_buffer_read_page()
5560 * if len is not big enough to read the rest of the page or in ring_buffer_read_page()
5565 if (read || (len < (commit - read)) || in ring_buffer_read_page()
5566 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
5567 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5568 unsigned int rpos = read; in ring_buffer_read_page()
5575 if (len > (commit - read)) in ring_buffer_read_page()
5576 len = (commit - read); in ring_buffer_read_page()
5585 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5596 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
5598 len -= size; in ring_buffer_read_page()
5601 rpos = reader->read; in ring_buffer_read_page()
5613 local_set(&bpage->commit, pos); in ring_buffer_read_page()
5614 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
5617 read = 0; in ring_buffer_read_page()
5620 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5621 cpu_buffer->read_bytes += BUF_PAGE_SIZE; in ring_buffer_read_page()
5625 bpage = reader->page; in ring_buffer_read_page()
5626 reader->page = *data_page; in ring_buffer_read_page()
5627 local_set(&reader->write, 0); in ring_buffer_read_page()
5628 local_set(&reader->entries, 0); in ring_buffer_read_page()
5629 reader->read = 0; in ring_buffer_read_page()
5637 if (reader->real_end) in ring_buffer_read_page()
5638 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
5640 ret = read; in ring_buffer_read_page()
5642 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
5644 commit = local_read(&bpage->commit); in ring_buffer_read_page()
5652 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
5653 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
5655 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
5658 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
5665 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); in ring_buffer_read_page()
5668 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5676 * We only allocate new buffers, never free them if the CPU goes down.
5680 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) in trace_rb_cpu_prepare() argument
5688 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
5693 /* check if all cpu sizes are same */ in trace_rb_cpu_prepare()
5695 /* fill in the size from first enabled cpu */ in trace_rb_cpu_prepare()
5697 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
5698 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
5706 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
5707 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
5708 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
5709 WARN(1, "failed to allocate ring buffer on CPU %u\n", in trace_rb_cpu_prepare()
5710 cpu); in trace_rb_cpu_prepare()
5711 return -ENOMEM; in trace_rb_cpu_prepare()
5714 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
5722 * It will kick off a thread per CPU that will go into a loop
5723 * writing to the per cpu ring buffer various sizes of data.
5750 int cpu; member
5756 /* 1 meg per cpu */
5782 cnt = data->cnt + (nested ? 27 : 0); in rb_write_something()
5785 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); in rb_write_something()
5790 /* read rb_test_started before checking buffer enabled */ in rb_write_something()
5793 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
5798 data->bytes_dropped += len; in rb_write_something()
5800 data->bytes_dropped_nested += len; in rb_write_something()
5807 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
5811 item->size = size; in rb_write_something()
5812 memcpy(item->str, rb_string, size); in rb_write_something()
5815 data->bytes_alloc_nested += event_len; in rb_write_something()
5816 data->bytes_written_nested += len; in rb_write_something()
5817 data->events_nested++; in rb_write_something()
5818 if (!data->min_size_nested || len < data->min_size_nested) in rb_write_something()
5819 data->min_size_nested = len; in rb_write_something()
5820 if (len > data->max_size_nested) in rb_write_something()
5821 data->max_size_nested = len; in rb_write_something()
5823 data->bytes_alloc += event_len; in rb_write_something()
5824 data->bytes_written += len; in rb_write_something()
5825 data->events++; in rb_write_something()
5826 if (!data->min_size || len < data->min_size) in rb_write_something()
5827 data->max_size = len; in rb_write_something()
5828 if (len > data->max_size) in rb_write_something()
5829 data->max_size = len; in rb_write_something()
5833 ring_buffer_unlock_commit(data->buffer, event); in rb_write_something()
5844 data->cnt++; in rb_test()
5847 /* Now sleep between a min of 100-300us and a max of 1ms */ in rb_test()
5848 usleep_range(((data->cnt % 3) + 1) * 100, 1000); in rb_test()
5857 int cpu = smp_processor_id(); in rb_ipi() local
5859 data = &rb_data[cpu]; in rb_ipi()
5880 int cpu; in test_ringbuffer() local
5897 for_each_online_cpu(cpu) { in test_ringbuffer()
5898 rb_data[cpu].buffer = buffer; in test_ringbuffer()
5899 rb_data[cpu].cpu = cpu; in test_ringbuffer()
5900 rb_data[cpu].cnt = cpu; in test_ringbuffer()
5901 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], in test_ringbuffer()
5902 "rbtester/%d", cpu); in test_ringbuffer()
5903 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { in test_ringbuffer()
5905 ret = PTR_ERR(rb_threads[cpu]); in test_ringbuffer()
5909 kthread_bind(rb_threads[cpu], cpu); in test_ringbuffer()
5910 wake_up_process(rb_threads[cpu]); in test_ringbuffer()
5941 for_each_online_cpu(cpu) { in test_ringbuffer()
5942 if (!rb_threads[cpu]) in test_ringbuffer()
5944 kthread_stop(rb_threads[cpu]); in test_ringbuffer()
5953 for_each_online_cpu(cpu) { in test_ringbuffer()
5955 struct rb_test_data *data = &rb_data[cpu]; in test_ringbuffer()
5969 ret = -1; in test_ringbuffer()
5971 total_events = data->events + data->events_nested; in test_ringbuffer()
5972 total_written = data->bytes_written + data->bytes_written_nested; in test_ringbuffer()
5973 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; in test_ringbuffer()
5974 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; in test_ringbuffer()
5976 big_event_size = data->max_size + data->max_size_nested; in test_ringbuffer()
5977 small_event_size = data->min_size + data->min_size_nested; in test_ringbuffer()
5979 pr_info("CPU %d:\n", cpu); in test_ringbuffer()
5992 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
5996 total_size += item->size + sizeof(struct rb_item); in test_ringbuffer()
5997 if (memcmp(&item->str[0], rb_string, item->size) != 0) { in test_ringbuffer()
5999 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
6000 pr_info("expected: %.*s\n", item->size, rb_string); in test_ringbuffer()
6002 ret = -1; in test_ringbuffer()
6010 ret = -1; in test_ringbuffer()
6012 pr_info(" read events: %ld\n", total_read); in test_ringbuffer()