Lines Matching +full:cpu +full:- +full:read
1 // SPDX-License-Identifier: GPL-2.0
27 #include <linux/cpu.h>
66 * allocated for each CPU. A writer may only write to a buffer that is
67 * associated with the CPU it is currently executing on. A reader may read
68 * from any per cpu buffer.
70 * The reader is special. For each per cpu buffer, the reader has its own
71 * reader page. When a reader has read the entire reader page, this reader
80 * +------+
83 * +------+ +---+ +---+ +---+
84 * | |-->| |-->| |
85 * +---+ +---+ +---+
88 * +---------------+
91 * +------+
93 * |page |------------------v
94 * +------+ +---+ +---+ +---+
95 * | |-->| |-->| |
96 * +---+ +---+ +---+
99 * +---------------+
102 * +------+
104 * |page |------------------v
105 * +------+ +---+ +---+ +---+
106 * ^ | |-->| |-->| |
107 * | +---+ +---+ +---+
110 * +------------------------------+
113 * +------+
115 * |page |------------------v
116 * +------+ +---+ +---+ +---+
117 * ^ | | | |-->| |
118 * | New +---+ +---+ +---+
119 * | Reader------^ |
121 * +------------------------------+
164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event()
174 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding()
175 event->time_delta = 0; in rb_event_set_padding()
183 if (event->type_len) in rb_event_data_length()
184 length = event->type_len * RB_ALIGNMENT; in rb_event_data_length()
186 length = event->array[0]; in rb_event_data_length()
198 switch (event->type_len) { in rb_event_length()
202 return -1; in rb_event_length()
203 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length()
238 * ring_buffer_event_length - return the length of the event
255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in ring_buffer_event_length()
257 length -= RB_EVNT_HDR_SIZE; in ring_buffer_event_length()
258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
259 length -= sizeof(event->array[0]); in ring_buffer_event_length()
270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); in rb_event_data()
272 if (event->type_len) in rb_event_data()
273 return (void *)&event->array[0]; in rb_event_data()
275 return (void *)&event->array[1]; in rb_event_data()
279 * ring_buffer_event_data - return the data of the event
288 #define for_each_buffer_cpu(buffer, cpu) \ argument
289 for_each_cpu(cpu, buffer->cpumask)
291 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
295 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
302 ts = event->array[0]; in rb_event_time_stamp()
304 ts += event->time_delta; in rb_event_time_stamp()
331 unsigned read; /* index for next read */ member
354 local_set(&bpage->commit, 0); in rb_init_page()
359 return local_read(&bpage->page->commit); in rb_page_commit()
364 free_page((unsigned long)bpage->page); in free_buffer_page()
376 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
378 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
379 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
437 * EXTEND - wants a time extend
438 * ABSOLUTE - the buffer requests all events to have absolute time stamps
439 * FORCE - force a full time stamp.
495 int cpu; member
506 struct buffer_page *head_page; /* read from head */
525 unsigned long read; member
586 * - Reads may fail if it interrupted a modification of the time stamp.
588 * the read itself is interrupted by a write.
591 * - Writes always succeed and will overwrite other writes and writes
594 * - A write followed by a read of the same time stamp will always succeed,
597 * - A cmpxchg will fail if it interrupted another write or cmpxchg.
603 * The two most significant bits of each half holds a 2 bit counter (0-3).
609 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
634 * If the read is interrupted by a write, then the cnt will in __rb_time_read()
635 * be different. Loop until both top and bottom have been read in __rb_time_read()
639 c = local_read(&t->cnt); in __rb_time_read()
640 top = local_read(&t->top); in __rb_time_read()
641 bottom = local_read(&t->bottom); in __rb_time_read()
642 msb = local_read(&t->msb); in __rb_time_read()
643 } while (c != local_read(&t->cnt)); in __rb_time_read()
690 cnt = local_inc_return(&t->cnt); in rb_time_set()
691 rb_time_val_set(&t->top, top, cnt); in rb_time_set()
692 rb_time_val_set(&t->bottom, bottom, cnt); in rb_time_set()
693 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); in rb_time_set()
694 } while (cnt != local_read(&t->cnt)); in rb_time_set()
716 cnt = local_read(&t->cnt); in rb_time_cmpxchg()
730 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) in rb_time_cmpxchg()
732 if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) in rb_time_cmpxchg()
734 if (!rb_time_read_cmpxchg(&t->top, top, top2)) in rb_time_cmpxchg()
736 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) in rb_time_cmpxchg()
747 *ret = local64_read(&t->time); in rb_time_read()
752 local64_set(&t->time, val); in rb_time_set()
757 return local64_try_cmpxchg(&t->time, &expect, set); in rb_time_cmpxchg()
772 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
773 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
784 commit = local_read(&page->page->commit); in verify_event()
785 write = local_read(&page->write); in verify_event()
786 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event()
787 addr < (unsigned long)&page->page->data[write]) in verify_event()
790 next = rb_list_head(page->list.next); in verify_event()
824 * ring_buffer_event_time_stamp - return the event's current time stamp
843 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
848 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { in ring_buffer_event_time_stamp()
850 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
853 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
858 /* Read the current saved nesting level time stamp */ in ring_buffer_event_time_stamp()
859 if (likely(--nest < MAX_NEST)) in ring_buffer_event_time_stamp()
860 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
867 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) in ring_buffer_event_time_stamp()
868 /* Screw it, just read the current time */ in ring_buffer_event_time_stamp()
869 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
875 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
877 * @cpu: The cpu of the ring_buffer to get the number of pages from
881 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
883 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
887 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
889 * @cpu: The cpu of the ring_buffer to get the number of pages from
893 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
895 size_t read; in ring_buffer_nr_dirty_pages() local
899 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
900 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
901 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
906 cnt -= lost; in ring_buffer_nr_dirty_pages()
908 /* The reader can read an empty page, but not more than that */ in ring_buffer_nr_dirty_pages()
909 if (cnt < read) { in ring_buffer_nr_dirty_pages()
910 WARN_ON_ONCE(read > cnt + 1); in ring_buffer_nr_dirty_pages()
914 return cnt - read; in ring_buffer_nr_dirty_pages()
917 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
919 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
923 nr_pages = cpu_buffer->nr_pages; in full_hit()
927 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); in full_hit()
933 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
942 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
943 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { in rb_wake_up_waiters()
944 rbwork->wakeup_full = false; in rb_wake_up_waiters()
945 rbwork->full_waiters_pending = false; in rb_wake_up_waiters()
946 wake_up_all(&rbwork->full_waiters); in rb_wake_up_waiters()
951 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
953 * @cpu: The CPU buffer to wake waiters on
958 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
966 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wake_waiters()
969 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
970 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
972 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
974 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
976 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) in ring_buffer_wake_waiters()
979 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
980 /* The CPU buffer may not have been initialized yet */ in ring_buffer_wake_waiters()
983 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
986 rbwork->wait_index++; in ring_buffer_wake_waiters()
990 rb_wake_up_waiters(&rbwork->work); in ring_buffer_wake_waiters()
994 * ring_buffer_wait - wait for input to the ring buffer
996 * @cpu: the cpu buffer to wait on
997 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
999 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1000 * as data is added to any of the @buffer's cpu buffers. Otherwise
1001 * it will wait for data to be added to a specific cpu buffer.
1003 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
1013 * data in any cpu buffer, or a specific buffer, put the in ring_buffer_wait()
1016 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wait()
1017 work = &buffer->irq_work; in ring_buffer_wait()
1018 /* Full only makes sense on per cpu reads */ in ring_buffer_wait()
1021 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
1022 return -ENODEV; in ring_buffer_wait()
1023 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
1024 work = &cpu_buffer->irq_work; in ring_buffer_wait()
1027 wait_index = READ_ONCE(work->wait_index); in ring_buffer_wait()
1031 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
1033 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
1056 work->full_waiters_pending = true; in ring_buffer_wait()
1058 work->waiters_pending = true; in ring_buffer_wait()
1061 ret = -EINTR; in ring_buffer_wait()
1065 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
1068 if (cpu != RING_BUFFER_ALL_CPUS && in ring_buffer_wait()
1069 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
1077 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1078 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
1079 done = !pagebusy && full_hit(buffer, cpu, full); in ring_buffer_wait()
1081 if (!cpu_buffer->shortest_full || in ring_buffer_wait()
1082 cpu_buffer->shortest_full > full) in ring_buffer_wait()
1083 cpu_buffer->shortest_full = full; in ring_buffer_wait()
1084 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1093 if (wait_index != work->wait_index) in ring_buffer_wait()
1098 finish_wait(&work->full_waiters, &wait); in ring_buffer_wait()
1100 finish_wait(&work->waiters, &wait); in ring_buffer_wait()
1106 * ring_buffer_poll_wait - poll on buffer input
1108 * @cpu: the cpu buffer to wait on
1111 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
1113 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1114 * as data is added to any of the @buffer's cpu buffers. Otherwise
1115 * it will wait for data to be added to a specific cpu buffer.
1120 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1126 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_poll_wait()
1127 work = &buffer->irq_work; in ring_buffer_poll_wait()
1130 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1131 return -EINVAL; in ring_buffer_poll_wait()
1133 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1134 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1138 poll_wait(filp, &work->full_waiters, poll_table); in ring_buffer_poll_wait()
1139 work->full_waiters_pending = true; in ring_buffer_poll_wait()
1140 if (!cpu_buffer->shortest_full || in ring_buffer_poll_wait()
1141 cpu_buffer->shortest_full > full) in ring_buffer_poll_wait()
1142 cpu_buffer->shortest_full = full; in ring_buffer_poll_wait()
1144 poll_wait(filp, &work->waiters, poll_table); in ring_buffer_poll_wait()
1145 work->waiters_pending = true; in ring_buffer_poll_wait()
1164 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; in ring_buffer_poll_wait()
1166 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1167 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1180 atomic_inc(&__b->buffer->record_disabled); \
1182 atomic_inc(&b->record_disabled); \
1195 /* Skip retpolines :-( */ in rb_time_stamp()
1196 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1199 ts = buffer->clock(); in rb_time_stamp()
1218 int cpu, u64 *ts) in ring_buffer_normalize_time_stamp() argument
1227 * Although writes only happen on the CPU that they are on,
1229 * happen on any CPU.
1257 * head->list->prev->next bit 1 bit 0
1258 * ------- -------
1265 * +----+ +-----+ +-----+
1266 * | |------>| T |---X--->| N |
1267 * | |<------| | | |
1268 * +----+ +-----+ +-----+
1270 * | +-----+ | |
1271 * +----------| R |----------+ |
1272 * | |<-----------+
1273 * +-----+
1275 * Key: ---X--> HEAD flag set in pointer
1305 * rb_list_head - remove any bit
1315 * rb_is_head_page - test if the given page is the head page
1327 val = (unsigned long)list->next; in rb_is_head_page()
1329 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page()
1344 struct list_head *list = page->list.prev; in rb_is_reader_page()
1346 return rb_list_head(list->next) != &page->list; in rb_is_reader_page()
1350 * rb_set_list_to_head - set a list_head to be pointing to head.
1356 ptr = (unsigned long *)&list->next; in rb_set_list_to_head()
1362 * rb_head_page_activate - sets up head page
1368 head = cpu_buffer->head_page; in rb_head_page_activate()
1375 rb_set_list_to_head(head->list.prev); in rb_head_page_activate()
1380 unsigned long *ptr = (unsigned long *)&list->next; in rb_list_head_clear()
1386 * rb_head_page_deactivate - clears head page ptr (for free list)
1394 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1396 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1406 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
1409 list = &prev->list; in rb_head_page_set()
1413 ret = cmpxchg((unsigned long *)&list->next, in rb_head_page_set()
1452 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
1465 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1469 list = cpu_buffer->pages; in rb_set_head_page()
1470 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1473 page = head = cpu_buffer->head_page; in rb_set_head_page()
1482 if (rb_is_head_page(page, page->list.prev)) { in rb_set_head_page()
1483 cpu_buffer->head_page = page; in rb_set_head_page()
1498 unsigned long *ptr = (unsigned long *)&old->list.prev->next; in rb_head_page_replace()
1504 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); in rb_head_page_replace()
1508 * rb_tail_page_update - move the tail page forward
1526 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update()
1527 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update()
1529 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1541 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1556 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update()
1557 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update()
1564 local_set(&next_page->page->commit, 0); in rb_tail_page_update()
1567 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1580 * rb_check_pages - integrity check of buffer pages
1581 * @cpu_buffer: CPU buffer with pages to test
1588 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1592 rb_list_head(rb_list_head(head->next)->prev) != head)) in rb_check_pages()
1596 rb_list_head(rb_list_head(head->prev)->next) != head)) in rb_check_pages()
1599 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { in rb_check_pages()
1601 rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) in rb_check_pages()
1605 rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) in rb_check_pages()
1614 bool user_thread = current->mm != NULL; in __rb_allocate_pages()
1627 return -ENOMEM; in __rb_allocate_pages()
1631 * gracefully without invoking oom-killer and the system is not in __rb_allocate_pages()
1651 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1657 list_add(&bpage->list, pages); in __rb_allocate_pages()
1659 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1662 bpage->page = page_address(page); in __rb_allocate_pages()
1663 rb_init_page(bpage->page); in __rb_allocate_pages()
1675 list_del_init(&bpage->list); in __rb_allocate_pages()
1681 return -ENOMEM; in __rb_allocate_pages()
1692 return -ENOMEM; in rb_allocate_pages()
1699 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1702 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1710 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1718 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
1722 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1723 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1724 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1725 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1726 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1727 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1728 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1729 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1730 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1731 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1734 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
1740 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1741 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); in rb_allocate_cpu_buffer()
1744 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
1745 rb_init_page(bpage->page); in rb_allocate_cpu_buffer()
1747 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1748 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1754 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1755 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1756 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1763 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1772 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1775 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
1777 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1783 list_del_init(&bpage->list); in rb_free_cpu_buffer()
1794 * __ring_buffer_alloc - allocate a new ring_buffer
1795 * @size: the size in bytes per cpu that is needed.
1810 int cpu; in __ring_buffer_alloc() local
1819 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1823 buffer->flags = flags; in __ring_buffer_alloc()
1824 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1825 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1827 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1828 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1834 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1837 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1839 if (!buffer->buffers) in __ring_buffer_alloc()
1842 cpu = raw_smp_processor_id(); in __ring_buffer_alloc()
1843 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1844 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1845 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1848 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1852 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1857 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1858 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1859 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1861 kfree(buffer->buffers); in __ring_buffer_alloc()
1864 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1873 * ring_buffer_free - free a ring buffer.
1879 int cpu; in ring_buffer_free() local
1881 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1883 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
1885 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1886 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1888 kfree(buffer->buffers); in ring_buffer_free()
1889 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1898 buffer->clock = clock; in ring_buffer_set_clock()
1903 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1908 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
1915 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
1920 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
1935 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1936 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1946 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1952 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1953 tail_page = rb_list_head(tail_page->next); in rb_remove_pages()
1957 first_page = list_entry(rb_list_head(to_remove->next), in rb_remove_pages()
1961 to_remove = rb_list_head(to_remove)->next; in rb_remove_pages()
1964 /* Read iterators need to reset themselves when some pages removed */ in rb_remove_pages()
1965 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
1967 next_page = rb_list_head(to_remove)->next; in rb_remove_pages()
1974 tail_page->next = (struct list_head *)((unsigned long)next_page | in rb_remove_pages()
1977 next_page->prev = tail_page; in rb_remove_pages()
1980 cpu_buffer->pages = next_page; in rb_remove_pages()
1984 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1988 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1989 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1991 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2013 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2014 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
2015 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2023 nr_removed--; in rb_remove_pages()
2035 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2041 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2049 * 2. We cmpxchg the prev_page->next to point from head page to the in rb_insert_pages()
2051 * 3. Finally, we update the head->prev to the end of new list. in rb_insert_pages()
2058 while (retries--) { in rb_insert_pages()
2066 head_page = &hpage->list; in rb_insert_pages()
2067 prev_page = head_page->prev; in rb_insert_pages()
2069 first_page = pages->next; in rb_insert_pages()
2070 last_page = pages->prev; in rb_insert_pages()
2075 last_page->next = head_page_with_bit; in rb_insert_pages()
2076 first_page->prev = prev_page; in rb_insert_pages()
2078 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); in rb_insert_pages()
2086 head_page->prev = last_page; in rb_insert_pages()
2099 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2104 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2106 list_del_init(&bpage->list); in rb_insert_pages()
2117 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2121 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2124 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2132 complete(&cpu_buffer->update_done); in update_pages_handler()
2136 * ring_buffer_resize - resize the ring buffer
2139 * @cpu_id: the cpu buffer to resize
2150 int cpu, err; in ring_buffer_resize() local
2153 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2160 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2170 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2171 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2179 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2180 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2181 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2182 err = -EBUSY; in ring_buffer_resize()
2188 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2189 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2191 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2192 cpu_buffer->nr_pages; in ring_buffer_resize()
2196 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2202 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2203 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2204 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2206 err = -ENOMEM; in ring_buffer_resize()
2219 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2220 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2221 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2224 /* Can't run something on an offline CPU. */ in ring_buffer_resize()
2225 if (!cpu_online(cpu)) { in ring_buffer_resize()
2227 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2231 if (cpu != smp_processor_id()) { in ring_buffer_resize()
2233 schedule_work_on(cpu, in ring_buffer_resize()
2234 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2236 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
2243 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2244 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2245 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2248 if (cpu_online(cpu)) in ring_buffer_resize()
2249 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2250 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2255 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2257 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2265 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2266 err = -EBUSY; in ring_buffer_resize()
2270 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2271 cpu_buffer->nr_pages; in ring_buffer_resize()
2273 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2274 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2275 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2276 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2277 err = -ENOMEM; in ring_buffer_resize()
2283 /* Can't run something on an offline CPU. */ in ring_buffer_resize()
2295 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2296 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2300 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2312 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2313 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2321 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2322 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2325 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2328 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2329 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2333 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2336 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2337 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2339 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2342 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2344 list_del_init(&bpage->list); in ring_buffer_resize()
2349 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2350 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2357 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2359 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2361 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2362 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2368 return bpage->page->data + index; in __rb_page_index()
2374 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2375 cpu_buffer->reader_page->read); in rb_reader_event()
2382 struct buffer_page *iter_head_page = iter->head_page; in rb_iter_head_event()
2386 if (iter->head != iter->next_event) in rb_iter_head_event()
2387 return iter->event; in rb_iter_head_event()
2398 if (iter->head > commit - 8) in rb_iter_head_event()
2401 event = __rb_page_index(iter_head_page, iter->head); in rb_iter_head_event()
2410 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) in rb_iter_head_event()
2411 /* Writer corrupted the read? */ in rb_iter_head_event()
2414 memcpy(iter->event, event, length); in rb_iter_head_event()
2421 /* Make sure the page didn't change since we read this */ in rb_iter_head_event()
2422 if (iter->page_stamp != iter_head_page->page->time_stamp || in rb_iter_head_event()
2426 iter->next_event = iter->head + length; in rb_iter_head_event()
2427 return iter->event; in rb_iter_head_event()
2430 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_head_event()
2431 iter->head = 0; in rb_iter_head_event()
2432 iter->next_event = 0; in rb_iter_head_event()
2433 iter->missed_events = 1; in rb_iter_head_event()
2446 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2454 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; in rb_event_index()
2459 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
2467 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2468 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2470 rb_inc_page(&iter->head_page); in rb_inc_iter()
2472 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_inc_iter()
2473 iter->head = 0; in rb_inc_iter()
2474 iter->next_event = 0; in rb_inc_iter()
2478 * rb_handle_head_page - writer hit the head page
2482 * -1 on error
2506 * NORMAL - an interrupt already moved it for us in rb_handle_head_page()
2507 * HEAD - we are the first to get here. in rb_handle_head_page()
2508 * UPDATE - we are the interrupt interrupting in rb_handle_head_page()
2510 * MOVED - a reader on another CPU moved the next in rb_handle_head_page()
2522 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2523 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
2524 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
2549 * The reader is on another CPU and just did in rb_handle_head_page()
2556 return -1; in rb_handle_head_page()
2563 * The reader (on another CPU) will spin till in rb_handle_head_page()
2581 * HEAD - an interrupt came in and already set it. in rb_handle_head_page()
2582 * NORMAL - One of two things: in rb_handle_head_page()
2594 return -1; in rb_handle_head_page()
2610 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2633 return -1; in rb_handle_head_page()
2643 struct buffer_page *tail_page = info->tail_page; in rb_reset_tail()
2645 unsigned long length = info->length; in rb_reset_tail()
2658 tail_page->real_end = 0; in rb_reset_tail()
2660 local_sub(length, &tail_page->write); in rb_reset_tail()
2671 tail_page->real_end = tail; in rb_reset_tail()
2685 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { in rb_reset_tail()
2695 local_sub(length, &tail_page->write); in rb_reset_tail()
2700 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail()
2701 event->type_len = RINGBUF_TYPE_PADDING; in rb_reset_tail()
2703 event->time_delta = 1; in rb_reset_tail()
2706 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2708 /* Make sure the padding is visible before the tail_page->write update */ in rb_reset_tail()
2712 length = (tail + length) - BUF_PAGE_SIZE; in rb_reset_tail()
2713 local_sub(length, &tail_page->write); in rb_reset_tail()
2725 struct buffer_page *tail_page = info->tail_page; in rb_move_tail()
2726 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2727 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2741 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2749 * could be on another CPU trying to swap its reader in rb_move_tail()
2759 if (rb_is_head_page(next_page, &tail_page->list)) { in rb_move_tail()
2765 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2770 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2771 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2793 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2794 cpu_buffer->tail_page) && in rb_move_tail()
2795 (cpu_buffer->commit_page == in rb_move_tail()
2796 cpu_buffer->reader_page))) { in rb_move_tail()
2797 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2812 local_inc(&cpu_buffer->committing); in rb_move_tail()
2815 return ERR_PTR(-EAGAIN); in rb_move_tail()
2829 event->type_len = RINGBUF_TYPE_TIME_STAMP; in rb_add_time_stamp()
2831 event->type_len = RINGBUF_TYPE_TIME_EXTEND; in rb_add_time_stamp()
2835 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
2836 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
2839 event->time_delta = 0; in rb_add_time_stamp()
2840 event->array[0] = 0; in rb_add_time_stamp()
2860 (unsigned long long)info->delta, in rb_check_timestamp()
2861 (unsigned long long)info->ts, in rb_check_timestamp()
2862 (unsigned long long)info->before, in rb_check_timestamp()
2863 (unsigned long long)info->after, in rb_check_timestamp()
2864 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), in rb_check_timestamp()
2878 bool abs = info->add_timestamp & in rb_add_timestamp()
2881 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp()
2886 if (abs && (info->ts & TS_MSB)) { in rb_add_timestamp()
2887 info->delta &= ABS_TS_MASK; in rb_add_timestamp()
2890 } else if (info->before == info->after && info->before > info->ts) { in rb_add_timestamp()
2900 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
2901 info->before, info->ts); in rb_add_timestamp()
2906 info->delta = 0; in rb_add_timestamp()
2908 *event = rb_add_time_stamp(*event, info->delta, abs); in rb_add_timestamp()
2909 *length -= RB_LEN_TIME_EXTEND; in rb_add_timestamp()
2914 * rb_update_event - update event type and data
2915 * @cpu_buffer: The per cpu buffer of the @event
2929 unsigned length = info->length; in rb_update_event()
2930 u64 delta = info->delta; in rb_update_event()
2931 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
2934 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
2940 if (unlikely(info->add_timestamp)) in rb_update_event()
2943 event->time_delta = delta; in rb_update_event()
2944 length -= RB_EVNT_HDR_SIZE; in rb_update_event()
2946 event->type_len = 0; in rb_update_event()
2947 event->array[0] = length; in rb_update_event()
2949 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); in rb_update_event()
2986 switch (event->type_len) { in rb_time_delta()
2997 return event->time_delta; in rb_time_delta()
3018 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3022 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) in rb_try_to_discard()
3025 /* Make sure the write stamp is read before testing the location */ in rb_try_to_discard()
3028 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
3030 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
3034 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, in rb_try_to_discard()
3035 write_stamp, write_stamp - delta)) in rb_try_to_discard()
3047 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3067 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { in rb_try_to_discard()
3069 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3080 local_inc(&cpu_buffer->committing); in rb_start_commit()
3081 local_inc(&cpu_buffer->commits); in rb_start_commit()
3090 * We only race with interrupts and NMIs on this CPU. in rb_set_commit_to_write()
3098 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3100 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3101 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3104 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3110 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3111 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3112 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3117 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3121 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3122 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3124 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3137 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3146 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3150 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3153 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3156 local_dec(&cpu_buffer->committing); in rb_end_commit()
3166 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3167 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3168 local_inc(&cpu_buffer->committing); in rb_end_commit()
3179 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; in rb_event_discard()
3180 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_discard()
3182 if (!event->time_delta) in rb_event_discard()
3183 event->time_delta = 1; in rb_event_discard()
3188 local_inc(&cpu_buffer->entries); in rb_commit()
3195 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3196 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3198 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3201 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3202 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3204 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3207 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3210 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3213 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3216 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3218 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3221 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3222 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3224 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3261 * 101 - 1 = 100
3264 * 1010 - 1 = 1001
3299 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3302 bit = RB_CTX_NORMAL - bit; in trace_recursive_lock()
3304 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3311 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3317 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3318 cpu_buffer->current_context = val; in trace_recursive_lock()
3326 cpu_buffer->current_context &= in trace_recursive_unlock()
3327 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3334 * ring_buffer_nest_start - Allow to trace while nested
3349 int cpu; in ring_buffer_nest_start() local
3353 cpu = raw_smp_processor_id(); in ring_buffer_nest_start()
3354 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3356 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3360 * ring_buffer_nest_end - Allow to trace while nested
3369 int cpu; in ring_buffer_nest_end() local
3372 cpu = raw_smp_processor_id(); in ring_buffer_nest_end()
3373 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3375 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3380 * ring_buffer_unlock_commit - commit a reserved
3390 int cpu = raw_smp_processor_id(); in ring_buffer_unlock_commit() local
3392 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3418 ts = bpage->time_stamp; in dump_buffer_page()
3423 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page()
3425 switch (event->type_len) { in dump_buffer_page()
3440 ts += event->time_delta; in dump_buffer_page()
3441 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); in dump_buffer_page()
3445 ts += event->time_delta; in dump_buffer_page()
3446 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); in dump_buffer_page()
3472 bpage = info->tail_page->page; in check_buffer()
3476 tail = local_read(&bpage->commit); in check_buffer()
3477 } else if (info->add_timestamp & in check_buffer()
3487 if (tail <= 8 || tail > local_read(&bpage->commit)) in check_buffer()
3496 ts = bpage->time_stamp; in check_buffer()
3500 event = (struct ring_buffer_event *)(bpage->data + e); in check_buffer()
3502 switch (event->type_len) { in check_buffer()
3515 if (event->time_delta == 1) in check_buffer()
3519 ts += event->time_delta; in check_buffer()
3526 if ((full && ts > info->ts) || in check_buffer()
3527 (!full && ts + info->delta != info->ts)) { in check_buffer()
3533 atomic_inc(&cpu_buffer->record_disabled); in check_buffer()
3536 …pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%… in check_buffer()
3537 cpu_buffer->cpu, in check_buffer()
3538 ts + info->delta, info->ts, info->delta, in check_buffer()
3539 info->before, info->after, in check_buffer()
3543 /* Do not re-enable checking */ in check_buffer()
3567 /* Don't let the compiler play games with cpu_buffer->tail_page */ in __rb_reserve_next()
3568 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3570 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; in __rb_reserve_next()
3572 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3573 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3575 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3577 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { in __rb_reserve_next()
3578 info->delta = info->ts; in __rb_reserve_next()
3585 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { in __rb_reserve_next()
3586 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3587 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3589 info->delta = info->ts - info->after; in __rb_reserve_next()
3590 if (unlikely(test_time_stamp(info->delta))) { in __rb_reserve_next()
3591 info->add_timestamp |= RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3592 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3597 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3599 /*C*/ write = local_add_return(info->length, &tail_page->write); in __rb_reserve_next()
3604 tail = write - info->length; in __rb_reserve_next()
3609 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3610 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3611 if (a_ok && b_ok && info->before != info->after) in __rb_reserve_next()
3612 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, in __rb_reserve_next()
3613 info->before, info->after); in __rb_reserve_next()
3624 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3626 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); in __rb_reserve_next()
3628 if (likely(!(info->add_timestamp & in __rb_reserve_next()
3631 info->delta = info->ts - info->after; in __rb_reserve_next()
3634 info->delta = info->ts; in __rb_reserve_next()
3637 if (unlikely(info->ts != save_before)) { in __rb_reserve_next()
3638 /* SLOW PATH - Interrupted between C and E */ in __rb_reserve_next()
3640 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3644 if (save_before > info->after) { in __rb_reserve_next()
3649 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3650 info->after, save_before); in __rb_reserve_next()
3655 /* SLOW PATH - Interrupted between A and C */ in __rb_reserve_next()
3656 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3659 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3661 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && in __rb_reserve_next()
3662 info->after < ts && in __rb_reserve_next()
3663 rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3664 info->after, ts)) { in __rb_reserve_next()
3666 info->delta = ts - info->after; in __rb_reserve_next()
3676 info->delta = 0; in __rb_reserve_next()
3678 info->ts = ts; in __rb_reserve_next()
3679 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; in __rb_reserve_next()
3686 if (unlikely(!tail && !(info->add_timestamp & in __rb_reserve_next()
3688 info->delta = 0; in __rb_reserve_next()
3695 local_inc(&tail_page->entries); in __rb_reserve_next()
3702 tail_page->page->time_stamp = info->ts; in __rb_reserve_next()
3705 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3725 * Due to the ability to swap a cpu buffer from a buffer in rb_reserve_next_event()
3731 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3732 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3733 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3740 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3765 if (unlikely(PTR_ERR(event) == -EAGAIN)) { in rb_reserve_next_event()
3767 info.length -= RB_LEN_TIME_EXTEND; in rb_reserve_next_event()
3779 * ring_buffer_lock_reserve - reserve a part of the buffer
3798 int cpu; in ring_buffer_lock_reserve() local
3803 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3806 cpu = raw_smp_processor_id(); in ring_buffer_lock_reserve()
3808 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3811 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3813 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3847 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3853 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
3854 local_dec(&bpage->entries); in rb_decrement_entry()
3865 if (bpage->page == (void *)addr) { in rb_decrement_entry()
3866 local_dec(&bpage->entries); in rb_decrement_entry()
3877 * ring_buffer_discard_commit - discard an event that has not been committed
3883 * and then that event will not be read later.
3899 int cpu; in ring_buffer_discard_commit() local
3904 cpu = smp_processor_id(); in ring_buffer_discard_commit()
3905 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3912 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3929 * ring_buffer_write - write data to the buffer without reserving
3948 int ret = -EBUSY; in ring_buffer_write()
3949 int cpu; in ring_buffer_write() local
3953 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3956 cpu = raw_smp_processor_id(); in ring_buffer_write()
3958 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3961 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3963 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3998 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
4000 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
4007 if (reader->read != rb_page_commit(reader)) in rb_per_cpu_empty()
4012 * committed content has been read, the ring buffer is empty. in rb_per_cpu_empty()
4019 * and head page, there should always be content to read. in rb_per_cpu_empty()
4027 * swap reader page with head page when it is to read data. in rb_per_cpu_empty()
4033 * ring_buffer_record_disable - stop all writes into the buffer
4043 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
4048 * ring_buffer_record_enable - enable writes to the buffer
4056 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
4061 * ring_buffer_record_off - stop all writes into the buffer
4076 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4079 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_off()
4084 * ring_buffer_record_on - restart writes into the buffer
4099 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4102 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_on()
4107 * ring_buffer_record_is_on - return true if the ring buffer can write
4114 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4118 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4130 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4134 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4136 * @cpu: The CPU buffer to stop
4143 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
4147 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4150 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4151 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4156 * ring_buffer_record_enable_cpu - enable writes to the buffer
4158 * @cpu: The CPU to enable.
4163 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
4167 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4170 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4171 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4178 * the entries read from the ring buffer and the number of
4184 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4185 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4189 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4191 * @cpu: The per CPU buffer to read from.
4193 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
4200 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4203 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4204 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4209 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4210 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4214 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
4215 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4222 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4224 * @cpu: The per CPU buffer to read from.
4226 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
4231 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4234 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4235 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4242 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4244 * @cpu: The per CPU buffer to get the entries from.
4246 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
4250 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4253 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4260 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4263 * @cpu: The per CPU buffer to get the number of overruns from
4265 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
4270 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4273 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4274 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4281 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4285 * @cpu: The per CPU buffer to get the number of overruns from
4288 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
4293 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4296 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4297 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4304 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4307 * @cpu: The per CPU buffer to get the number of overruns from
4310 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
4315 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4318 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4319 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4326 * ring_buffer_read_events_cpu - get the number of events successfully read
4328 * @cpu: The per CPU buffer to get the number of events read
4331 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
4335 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4338 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4339 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4344 * ring_buffer_entries - get the number of entries in a buffer
4348 * (all CPU entries)
4354 int cpu; in ring_buffer_entries() local
4357 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
4358 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4367 * ring_buffer_overruns - get the number of overruns in buffer
4371 * (all CPU entries)
4377 int cpu; in ring_buffer_overruns() local
4380 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
4381 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4382 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4391 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
4394 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4395 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4396 iter->next_event = iter->head; in rb_iter_reset()
4398 iter->cache_reader_page = iter->head_page; in rb_iter_reset()
4399 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4400 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
4402 if (iter->head) { in rb_iter_reset()
4403 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4404 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4406 iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_reset()
4407 iter->page_stamp = iter->read_stamp; in rb_iter_reset()
4412 * ring_buffer_iter_reset - reset an iterator
4426 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4428 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4430 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4435 * ring_buffer_iter_empty - check if an iterator has no more to read
4449 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4450 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4451 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4452 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
4453 commit_ts = commit_page->page->time_stamp; in ring_buffer_iter_empty()
4466 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4467 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); in ring_buffer_iter_empty()
4475 return ((iter->head_page == commit_page && iter->head >= commit) || in ring_buffer_iter_empty()
4476 (iter->head_page == reader && commit_page == head_page && in ring_buffer_iter_empty()
4477 head_page->read == commit && in ring_buffer_iter_empty()
4478 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4488 switch (event->type_len) { in rb_update_read_stamp()
4494 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4499 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
4500 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4504 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4518 switch (event->type_len) { in rb_update_iter_read_stamp()
4524 iter->read_stamp += delta; in rb_update_iter_read_stamp()
4529 delta = rb_fix_abs_ts(delta, iter->read_stamp); in rb_update_iter_read_stamp()
4530 iter->read_stamp = delta; in rb_update_iter_read_stamp()
4534 iter->read_stamp += event->time_delta; in rb_update_iter_read_stamp()
4538 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4552 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4566 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4568 /* If there's more to read, return this page */ in rb_get_reader_page()
4569 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4574 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
4579 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4589 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
4590 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
4591 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
4592 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
4601 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
4602 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
4605 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
4609 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
4612 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
4615 * We want to make sure we read the overruns after we set up our in rb_get_reader_page()
4624 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
4637 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
4650 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
4651 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
4653 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
4656 cpu_buffer->reader_page = reader; in rb_get_reader_page()
4657 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
4659 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
4660 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
4661 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
4668 if (reader && reader->read == 0) in rb_get_reader_page()
4669 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4671 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4699 * if the page has not been fully filled, so the read barrier in rb_get_reader_page()
4723 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in rb_advance_reader()
4724 cpu_buffer->read++; in rb_advance_reader()
4729 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4730 cpu_buffer->read_bytes += length; in rb_advance_reader()
4737 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4740 if (iter->head == iter->next_event) { in rb_advance_iter()
4746 iter->head = iter->next_event; in rb_advance_iter()
4751 if (iter->next_event >= rb_page_size(iter->head_page)) { in rb_advance_iter()
4753 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4759 rb_update_iter_read_stamp(iter, iter->event); in rb_advance_iter()
4764 return cpu_buffer->lost_events; in rb_lost_events()
4793 switch (event->type_len) { in rb_buffer_peek()
4815 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); in rb_buffer_peek()
4816 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4817 cpu_buffer->cpu, ts); in rb_buffer_peek()
4825 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4826 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4827 cpu_buffer->cpu, ts); in rb_buffer_peek()
4852 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4853 buffer = cpu_buffer->buffer; in rb_iter_peek()
4856 * Check if someone performed a consuming read to the buffer in rb_iter_peek()
4860 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4861 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
4862 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
4871 * to read, just give up if we fail to get an event after in rb_iter_peek()
4882 if (iter->head >= rb_page_size(iter->head_page)) { in rb_iter_peek()
4891 switch (event->type_len) { in rb_iter_peek()
4908 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); in rb_iter_peek()
4909 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4910 cpu_buffer->cpu, ts); in rb_iter_peek()
4918 *ts = iter->read_stamp + event->time_delta; in rb_iter_peek()
4920 cpu_buffer->cpu, ts); in rb_iter_peek()
4935 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4944 * to do the read, but this can corrupt the ring buffer, in rb_reader_lock()
4948 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4952 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4960 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4964 * ring_buffer_peek - peek at the next event to be read
4965 * @buffer: The ring buffer to read
4966 * @cpu: The cpu to peak at
4970 * This will return the event that will be read next, but does
4974 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
4977 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4982 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4989 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4994 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
5000 /** ring_buffer_iter_dropped - report if there are dropped events
5007 bool ret = iter->missed_events != 0; in ring_buffer_iter_dropped()
5009 iter->missed_events = 0; in ring_buffer_iter_dropped()
5015 * ring_buffer_iter_peek - peek at the next event to be read
5019 * This will return the event that will be read next, but does
5025 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
5030 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5032 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5034 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_iter_peek()
5041 * ring_buffer_consume - return an event and consume it
5043 * @cpu: the cpu to read the buffer from
5052 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
5064 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5067 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5073 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5083 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_consume()
5091 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5092 * @buffer: The ring buffer to read from
5093 * @cpu: The cpu buffer to iterate over
5101 * corrupted. This is not a consuming read, so a producer is not
5112 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
5117 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5124 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); in ring_buffer_read_prepare()
5125 if (!iter->event) { in ring_buffer_read_prepare()
5130 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5132 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5134 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5141 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5155 * ring_buffer_read_start - start a non consuming read of the buffer
5174 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5176 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5177 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5179 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5180 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5185 * ring_buffer_read_finish - finish reading the iterator of the buffer
5188 * This re-enables the recording to the buffer, and frees the
5194 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
5200 * Must prevent readers from trying to read, as the check in ring_buffer_read_finish()
5203 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5205 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5207 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5208 kfree(iter->event); in ring_buffer_read_finish()
5214 * ring_buffer_iter_advance - advance the iterator to the next location
5217 * Move the location of the iterator such that the next read will
5222 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
5225 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5229 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5234 * ring_buffer_size - return the size of the ring buffer (in bytes)
5236 * @cpu: The CPU to get ring buffer size from.
5238 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
5242 * BUF_PAGE_SIZE * buffer->nr_pages in ring_buffer_size()
5244 * return the per cpu buffer value. in ring_buffer_size()
5246 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5249 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5255 local_set(&page->write, 0); in rb_clear_buffer_page()
5256 local_set(&page->entries, 0); in rb_clear_buffer_page()
5257 rb_init_page(page->page); in rb_clear_buffer_page()
5258 page->read = 0; in rb_clear_buffer_page()
5268 cpu_buffer->head_page in rb_reset_cpu()
5269 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5270 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
5271 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
5275 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5276 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5278 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5279 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5280 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
5282 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5283 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5284 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5285 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5286 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5287 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5288 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5289 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5290 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5291 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5292 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5293 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5294 cpu_buffer->read = 0; in rb_reset_cpu()
5295 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5297 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5298 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5300 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5302 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5303 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5306 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
5309 /* Must have disabled the cpu buffer then done a synchronize_rcu */
5314 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5316 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5319 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5323 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5326 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5330 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5331 * @buffer: The ring buffer to reset a per cpu buffer of
5332 * @cpu: The CPU buffer to be reset
5334 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
5336 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5338 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5342 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5344 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5345 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5352 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5353 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5355 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5363 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5364 * @buffer: The ring buffer to reset a per cpu buffer of
5369 int cpu; in ring_buffer_reset_online_cpus() local
5372 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5374 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5375 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5377 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5378 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5384 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5385 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5388 * If a CPU came online during the synchronize_rcu(), then in ring_buffer_reset_online_cpus()
5391 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
5396 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5397 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5400 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5404 * ring_buffer_reset - reset a ring buffer
5405 * @buffer: The ring buffer to reset all cpu buffers
5410 int cpu; in ring_buffer_reset() local
5413 mutex_lock(&buffer->mutex); in ring_buffer_reset()
5415 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5416 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5418 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5419 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5425 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5426 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5430 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5431 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5434 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
5439 * ring_buffer_empty - is the ring buffer empty?
5448 int cpu; in ring_buffer_empty() local
5451 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
5452 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5468 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5470 * @cpu: The CPU buffer to test
5472 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
5479 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5482 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5495 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5498 * @cpu: the CPU of the buffers to swap
5501 * of a CPU buffer and has another back up buffer lying around.
5502 * it is expected that the tracer handles the cpu buffer not being
5506 struct trace_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument
5510 int ret = -EINVAL; in ring_buffer_swap_cpu()
5512 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
5513 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
5516 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
5517 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
5520 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) in ring_buffer_swap_cpu()
5523 ret = -EAGAIN; in ring_buffer_swap_cpu()
5525 if (atomic_read(&buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5528 if (atomic_read(&buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5531 if (atomic_read(&cpu_buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5534 if (atomic_read(&cpu_buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5540 * Normally this will be called from the same CPU as cpu. in ring_buffer_swap_cpu()
5543 atomic_inc(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5544 atomic_inc(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5546 ret = -EBUSY; in ring_buffer_swap_cpu()
5547 if (local_read(&cpu_buffer_a->committing)) in ring_buffer_swap_cpu()
5549 if (local_read(&cpu_buffer_b->committing)) in ring_buffer_swap_cpu()
5554 * it will mess the state of the cpu buffer. in ring_buffer_swap_cpu()
5556 if (atomic_read(&buffer_a->resizing)) in ring_buffer_swap_cpu()
5558 if (atomic_read(&buffer_b->resizing)) in ring_buffer_swap_cpu()
5561 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
5562 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
5564 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5565 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5570 atomic_dec(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5571 atomic_dec(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5579 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5581 * @cpu: the cpu buffer to allocate.
5589 * the page that was allocated, with the read page of the buffer.
5594 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
5601 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5602 return ERR_PTR(-ENODEV); in ring_buffer_alloc_read_page()
5604 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5606 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5608 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5609 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5610 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5613 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5619 page = alloc_pages_node(cpu_to_node(cpu), in ring_buffer_alloc_read_page()
5622 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
5634 * ring_buffer_free_read_page - free an allocated read page
5636 * @cpu: the cpu buffer the page came from
5641 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) in ring_buffer_free_read_page() argument
5648 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5651 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5658 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5660 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5661 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5665 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5674 * ring_buffer_read_page - extract a page from the ring buffer
5678 * @cpu: the cpu of the buffer to extract
5687 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
5690 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
5707 void **data_page, size_t len, int cpu, int full) in ring_buffer_read_page() argument
5709 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5716 unsigned int read; in ring_buffer_read_page() local
5718 int ret = -1; in ring_buffer_read_page()
5720 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5730 len -= BUF_PAGE_HDR_SIZE; in ring_buffer_read_page()
5739 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5747 read = reader->read; in ring_buffer_read_page()
5751 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5754 * If this page has been partially read or in ring_buffer_read_page()
5755 * if len is not big enough to read the rest of the page or in ring_buffer_read_page()
5760 if (read || (len < (commit - read)) || in ring_buffer_read_page()
5761 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
5762 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5763 unsigned int rpos = read; in ring_buffer_read_page()
5769 * if there's been a previous partial read and the in ring_buffer_read_page()
5770 * rest of the page can be read and the commit page is off in ring_buffer_read_page()
5774 (!read || (len < (commit - read)) || in ring_buffer_read_page()
5775 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
5778 if (len > (commit - read)) in ring_buffer_read_page()
5779 len = (commit - read); in ring_buffer_read_page()
5788 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5799 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
5801 len -= size; in ring_buffer_read_page()
5804 rpos = reader->read; in ring_buffer_read_page()
5816 local_set(&bpage->commit, pos); in ring_buffer_read_page()
5817 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
5820 read = 0; in ring_buffer_read_page()
5823 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5824 cpu_buffer->read_bytes += rb_page_commit(reader); in ring_buffer_read_page()
5828 bpage = reader->page; in ring_buffer_read_page()
5829 reader->page = *data_page; in ring_buffer_read_page()
5830 local_set(&reader->write, 0); in ring_buffer_read_page()
5831 local_set(&reader->entries, 0); in ring_buffer_read_page()
5832 reader->read = 0; in ring_buffer_read_page()
5840 if (reader->real_end) in ring_buffer_read_page()
5841 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
5843 ret = read; in ring_buffer_read_page()
5845 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
5847 commit = local_read(&bpage->commit); in ring_buffer_read_page()
5855 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
5856 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
5858 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
5861 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
5868 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); in ring_buffer_read_page()
5871 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5879 * We only allocate new buffers, never free them if the CPU goes down.
5883 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) in trace_rb_cpu_prepare() argument
5891 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
5896 /* check if all cpu sizes are same */ in trace_rb_cpu_prepare()
5898 /* fill in the size from first enabled cpu */ in trace_rb_cpu_prepare()
5900 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
5901 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
5909 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
5910 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
5911 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
5912 WARN(1, "failed to allocate ring buffer on CPU %u\n", in trace_rb_cpu_prepare()
5913 cpu); in trace_rb_cpu_prepare()
5914 return -ENOMEM; in trace_rb_cpu_prepare()
5917 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
5925 * It will kick off a thread per CPU that will go into a loop
5926 * writing to the per cpu ring buffer various sizes of data.
5953 int cpu; member
5959 /* 1 meg per cpu */
5985 cnt = data->cnt + (nested ? 27 : 0); in rb_write_something()
5988 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); in rb_write_something()
5993 /* read rb_test_started before checking buffer enabled */ in rb_write_something()
5996 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
6001 data->bytes_dropped += len; in rb_write_something()
6003 data->bytes_dropped_nested += len; in rb_write_something()
6010 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
6014 item->size = size; in rb_write_something()
6015 memcpy(item->str, rb_string, size); in rb_write_something()
6018 data->bytes_alloc_nested += event_len; in rb_write_something()
6019 data->bytes_written_nested += len; in rb_write_something()
6020 data->events_nested++; in rb_write_something()
6021 if (!data->min_size_nested || len < data->min_size_nested) in rb_write_something()
6022 data->min_size_nested = len; in rb_write_something()
6023 if (len > data->max_size_nested) in rb_write_something()
6024 data->max_size_nested = len; in rb_write_something()
6026 data->bytes_alloc += event_len; in rb_write_something()
6027 data->bytes_written += len; in rb_write_something()
6028 data->events++; in rb_write_something()
6029 if (!data->min_size || len < data->min_size) in rb_write_something()
6030 data->max_size = len; in rb_write_something()
6031 if (len > data->max_size) in rb_write_something()
6032 data->max_size = len; in rb_write_something()
6036 ring_buffer_unlock_commit(data->buffer); in rb_write_something()
6047 data->cnt++; in rb_test()
6050 /* Now sleep between a min of 100-300us and a max of 1ms */ in rb_test()
6051 usleep_range(((data->cnt % 3) + 1) * 100, 1000); in rb_test()
6060 int cpu = smp_processor_id(); in rb_ipi() local
6062 data = &rb_data[cpu]; in rb_ipi()
6083 int cpu; in test_ringbuffer() local
6100 for_each_online_cpu(cpu) { in test_ringbuffer()
6101 rb_data[cpu].buffer = buffer; in test_ringbuffer()
6102 rb_data[cpu].cpu = cpu; in test_ringbuffer()
6103 rb_data[cpu].cnt = cpu; in test_ringbuffer()
6104 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], in test_ringbuffer()
6105 cpu, "rbtester/%u"); in test_ringbuffer()
6106 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { in test_ringbuffer()
6108 ret = PTR_ERR(rb_threads[cpu]); in test_ringbuffer()
6141 for_each_online_cpu(cpu) { in test_ringbuffer()
6142 if (!rb_threads[cpu]) in test_ringbuffer()
6144 kthread_stop(rb_threads[cpu]); in test_ringbuffer()
6153 for_each_online_cpu(cpu) { in test_ringbuffer()
6155 struct rb_test_data *data = &rb_data[cpu]; in test_ringbuffer()
6169 ret = -1; in test_ringbuffer()
6171 total_events = data->events + data->events_nested; in test_ringbuffer()
6172 total_written = data->bytes_written + data->bytes_written_nested; in test_ringbuffer()
6173 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; in test_ringbuffer()
6174 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; in test_ringbuffer()
6176 big_event_size = data->max_size + data->max_size_nested; in test_ringbuffer()
6177 small_event_size = data->min_size + data->min_size_nested; in test_ringbuffer()
6179 pr_info("CPU %d:\n", cpu); in test_ringbuffer()
6192 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
6196 total_size += item->size + sizeof(struct rb_item); in test_ringbuffer()
6197 if (memcmp(&item->str[0], rb_string, item->size) != 0) { in test_ringbuffer()
6199 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
6200 pr_info("expected: %.*s\n", item->size, rb_string); in test_ringbuffer()
6202 ret = -1; in test_ringbuffer()
6210 ret = -1; in test_ringbuffer()
6212 pr_info(" read events: %ld\n", total_read); in test_ringbuffer()