Lines Matching full:buffer
3 * Generic ring buffer
33 * The "absolute" timestamp in the buffer is only 59 bits.
43 * The ring buffer header is special. We must manually up keep it.
65 * The ring buffer is made up of a list of pages. A separate list of pages is
66 * allocated for each CPU. A writer may only write to a buffer that is
68 * from any per cpu buffer.
70 * The reader is special. For each per cpu buffer, the reader has its own
72 * page is swapped with another page in the ring buffer.
76 * again (as long as it is out of the ring buffer).
81 * |reader| RING BUFFER
92 * |reader| RING BUFFER
103 * |reader| RING BUFFER
114 * |buffer| RING BUFFER
126 * and swap that into the ring buffer.
264 /* inline for ring buffer fast paths */
288 #define for_each_buffer_cpu(buffer, cpu) \ argument
289 for_each_cpu(cpu, buffer->cpumask)
291 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
317 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
321 * Note, the buffer_page list must be first. The buffer pages
322 * are allocated in cache lines, which means that each buffer
325 * add flags in the list struct pointers, to make the ring buffer
329 struct list_head list; /* list of buffer pages */
338 * The buffer page counters, write and entries, must be reset
438 * ABSOLUTE - the buffer requests all events to have absolute time stamps
492 * head_page == tail_page && head == tail then buffer is empty.
498 struct trace_buffer *buffer; member
533 /* ring buffer pages to update, > 0 to add, < 0 to remove */
580 * buffer doesn't need all the features of a true 64 bit atomic,
583 * For the ring buffer, 64 bit required operations for the time is
764 * is on the buffer that it passed in.
821 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
825 * @buffer: The buffer that the event is on
829 * committed to the ring buffer. And must be called from the same
836 * the max nesting, then the write_stamp of the buffer is returned,
840 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, in ring_buffer_event_time_stamp() argument
843 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
869 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
875 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
876 * @buffer: The ring_buffer to get the number of pages from
879 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
881 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
883 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
887 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
888 * @buffer: The ring_buffer to get the number of pages from
891 * Returns the number of pages that have content in the ring buffer.
893 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
899 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
900 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
901 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
917 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
919 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
927 dirty = ring_buffer_nr_dirty_pages(buffer, cpu); in full_hit()
933 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
936 * ring buffer waiters queue.
951 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
952 * @buffer: The ring buffer to wake waiters on
953 * @cpu: The CPU buffer to wake waiters on
955 * In the case of a file that represents a ring buffer is closing,
958 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
963 if (!buffer) in ring_buffer_wake_waiters()
969 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
970 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
972 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
974 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
979 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
980 /* The CPU buffer may not have been initialized yet */ in ring_buffer_wake_waiters()
994 * ring_buffer_wait - wait for input to the ring buffer
995 * @buffer: buffer to wait on
996 * @cpu: the cpu buffer to wait on
1000 * as data is added to any of the @buffer's cpu buffers. Otherwise
1001 * it will wait for data to be added to a specific cpu buffer.
1003 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
1013 * data in any cpu buffer, or a specific buffer, put the in ring_buffer_wait()
1017 work = &buffer->irq_work; in ring_buffer_wait()
1021 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
1023 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
1042 * We don't clear it even if the buffer is no longer in ring_buffer_wait()
1065 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
1069 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
1079 done = !pagebusy && full_hit(buffer, cpu, full); in ring_buffer_wait()
1106 * ring_buffer_poll_wait - poll on buffer input
1107 * @buffer: buffer to wait on
1108 * @cpu: the cpu buffer to wait on
1114 * as data is added to any of the @buffer's cpu buffers. Otherwise
1115 * it will wait for data to be added to a specific cpu buffer.
1120 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1127 work = &buffer->irq_work; in ring_buffer_poll_wait()
1130 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1133 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1150 * checking if the ring buffer is empty. Once the waiters_pending bit in ring_buffer_poll_wait()
1157 * the buffer goes from empty to having content. But as this race is in ring_buffer_poll_wait()
1164 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; in ring_buffer_poll_wait()
1166 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1167 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1172 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1180 atomic_inc(&__b->buffer->record_disabled); \
1191 static inline u64 rb_time_stamp(struct trace_buffer *buffer) in rb_time_stamp() argument
1196 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1199 ts = buffer->clock(); in rb_time_stamp()
1205 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) in ring_buffer_time_stamp() argument
1210 time = rb_time_stamp(buffer); in ring_buffer_time_stamp()
1217 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, in ring_buffer_normalize_time_stamp() argument
1226 * Making the ring buffer lockless makes things tricky.
1231 * The reader page is always off the ring buffer, but when the
1233 * a new one from the buffer. The reader needs to take from
1283 * the reader page with a page in the buffer, but before it
1580 * rb_check_pages - integrity check of buffer pages
1581 * @cpu_buffer: CPU buffer with pages to test
1695 * The ring buffer page list is a circular list that does not in rb_allocate_pages()
1710 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1723 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1725 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1796 * @flags: attributes to set for the ring buffer.
1797 * @key: ring buffer reader_lock_key.
1800 * flag. This flag means that the buffer will overwrite old data
1801 * when the buffer wraps. If this flag is not set, the buffer will
1807 struct trace_buffer *buffer; in __ring_buffer_alloc() local
1814 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), in __ring_buffer_alloc()
1816 if (!buffer) in __ring_buffer_alloc()
1819 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1823 buffer->flags = flags; in __ring_buffer_alloc()
1824 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1825 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1827 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1828 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1834 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1837 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1839 if (!buffer->buffers) in __ring_buffer_alloc()
1843 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1844 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1845 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1848 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1852 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1854 return buffer; in __ring_buffer_alloc()
1857 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1858 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1859 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1861 kfree(buffer->buffers); in __ring_buffer_alloc()
1864 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1867 kfree(buffer); in __ring_buffer_alloc()
1873 * ring_buffer_free - free a ring buffer.
1874 * @buffer: the buffer to free.
1877 ring_buffer_free(struct trace_buffer *buffer) in ring_buffer_free() argument
1881 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1883 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
1885 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1886 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1888 kfree(buffer->buffers); in ring_buffer_free()
1889 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1891 kfree(buffer); in ring_buffer_free()
1895 void ring_buffer_set_clock(struct trace_buffer *buffer, in ring_buffer_set_clock() argument
1898 buffer->clock = clock; in ring_buffer_set_clock()
1901 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) in ring_buffer_set_time_stamp_abs() argument
1903 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1906 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) in ring_buffer_time_stamp_abs() argument
1908 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
1950 * from the ring buffer in rb_remove_pages()
1979 /* make sure pages points to a valid page in the ring buffer */ in rb_remove_pages()
1993 /* last buffer page to remove */ in rb_remove_pages()
2010 * bytes consumed in ring buffer from here. in rb_remove_pages()
2044 * in the ring buffer. Now we are racing with the writer trying to in rb_insert_pages()
2136 * ring_buffer_resize - resize the ring buffer
2137 * @buffer: the buffer to resize.
2139 * @cpu_id: the cpu buffer to resize
2145 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, in ring_buffer_resize() argument
2153 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2155 if (!buffer) in ring_buffer_resize()
2158 /* Make sure the requested buffer exists */ in ring_buffer_resize()
2160 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2169 /* prevent another thread from changing buffer sizes */ in ring_buffer_resize()
2170 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2171 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2176 * manipulating the ring buffer and is expecting a sane state while in ring_buffer_resize()
2179 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2180 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2188 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2189 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2217 * since we can change their buffer sizes without any race. in ring_buffer_resize()
2219 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2220 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2243 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2244 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2255 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2262 * manipulating the ring buffer and is expecting a sane state while in ring_buffer_resize()
2306 * The ring buffer resize can happen with the ring buffer in ring_buffer_resize()
2308 * as possible. But if the buffer is disabled, we do not need in ring_buffer_resize()
2310 * that the buffer is not corrupt. in ring_buffer_resize()
2312 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2313 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2315 * Even though the buffer was disabled, we must make sure in ring_buffer_resize()
2321 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2322 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2325 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2328 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2329 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2333 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2336 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2349 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2350 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2355 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) in ring_buffer_change_overwrite() argument
2357 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2359 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2361 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2362 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2711 /* Set write to end of buffer */ in rb_reset_tail()
2727 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail() local
2737 * it all the way around the buffer, bail, and warn in rb_move_tail()
2750 * page with the buffer head. in rb_move_tail()
2756 * the buffer, unless the commit page is still on the in rb_move_tail()
2770 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2786 * page. We could have a small buffer, and in rb_move_tail()
2787 * have filled up the buffer with events in rb_move_tail()
2884 * is added to the buffer, it will lose those bits. in rb_add_timestamp()
2900 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
2915 * @cpu_buffer: The per cpu buffer of the @event
2920 * is the actual size that is written to the ring buffer,
3193 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3195 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3196 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3198 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3218 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3283 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3335 * @buffer: The ring buffer to modify
3337 * The ring buffer has a safety mechanism to prevent recursion.
3346 void ring_buffer_nest_start(struct trace_buffer *buffer) in ring_buffer_nest_start() argument
3354 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3361 * @buffer: The ring buffer to modify
3366 void ring_buffer_nest_end(struct trace_buffer *buffer) in ring_buffer_nest_end() argument
3373 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3381 * @buffer: The buffer to commit to
3383 * This commits the data to the ring buffer, and releases any locks held.
3387 int ring_buffer_unlock_commit(struct trace_buffer *buffer) in ring_buffer_unlock_commit() argument
3392 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3396 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3460 * the buffer page.
3575 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3606 /* See if we shot pass the end of this buffer page */ in __rb_reserve_next()
3659 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3690 /* We reserved something on the buffer */ in __rb_reserve_next()
3711 rb_reserve_next_event(struct trace_buffer *buffer, in rb_reserve_next_event() argument
3725 * Due to the ability to swap a cpu buffer from a buffer in rb_reserve_next_event()
3731 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3740 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3779 * ring_buffer_lock_reserve - reserve a part of the buffer
3780 * @buffer: the ring buffer to reserve from
3783 * Returns a reserved event on the ring buffer to copy directly to.
3794 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) in ring_buffer_lock_reserve() argument
3803 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3808 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3811 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3822 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3872 /* commit not part of this buffer?? */ in rb_decrement_entry()
3878 * @buffer: the ring buffer
3881 * Sometimes an event that is in the ring buffer needs to be ignored.
3882 * This function lets the user discard an event in the ring buffer
3886 * committed. It will try to free the event from the ring buffer
3895 void ring_buffer_discard_commit(struct trace_buffer *buffer, in ring_buffer_discard_commit() argument
3905 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3912 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3929 * ring_buffer_write - write data to the buffer without reserving
3930 * @buffer: The ring buffer to write to.
3932 * @data: The data to write to the buffer.
3935 * one function. If you already have the data to write to the buffer, it
3941 int ring_buffer_write(struct trace_buffer *buffer, in ring_buffer_write() argument
3953 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3958 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3961 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3972 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3982 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
4012 * committed content has been read, the ring buffer is empty. in rb_per_cpu_empty()
4033 * ring_buffer_record_disable - stop all writes into the buffer
4034 * @buffer: The ring buffer to stop writes to.
4036 * This prevents all writes to the buffer. Any attempt to write
4037 * to the buffer after this will fail and return NULL.
4041 void ring_buffer_record_disable(struct trace_buffer *buffer) in ring_buffer_record_disable() argument
4043 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
4048 * ring_buffer_record_enable - enable writes to the buffer
4049 * @buffer: The ring buffer to enable writes
4054 void ring_buffer_record_enable(struct trace_buffer *buffer) in ring_buffer_record_enable() argument
4056 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
4061 * ring_buffer_record_off - stop all writes into the buffer
4062 * @buffer: The ring buffer to stop writes to.
4064 * This prevents all writes to the buffer. Any attempt to write
4065 * to the buffer after this will fail and return NULL.
4071 void ring_buffer_record_off(struct trace_buffer *buffer) in ring_buffer_record_off() argument
4076 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4079 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_off()
4084 * ring_buffer_record_on - restart writes into the buffer
4085 * @buffer: The ring buffer to start writes to.
4087 * This enables all writes to the buffer that was disabled by
4094 void ring_buffer_record_on(struct trace_buffer *buffer) in ring_buffer_record_on() argument
4099 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4102 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_on()
4107 * ring_buffer_record_is_on - return true if the ring buffer can write
4108 * @buffer: The ring buffer to see if write is enabled
4110 * Returns true if the ring buffer is in a state that it accepts writes.
4112 bool ring_buffer_record_is_on(struct trace_buffer *buffer) in ring_buffer_record_is_on() argument
4114 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4118 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4119 * @buffer: The ring buffer to see if write is set enabled
4121 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4124 * It may return true when the ring buffer has been disabled by
4126 * the ring buffer.
4128 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) in ring_buffer_record_is_set_on() argument
4130 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4135 * @buffer: The ring buffer to stop writes to.
4136 * @cpu: The CPU buffer to stop
4138 * This prevents all writes to the buffer. Any attempt to write
4139 * to the buffer after this will fail and return NULL.
4143 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
4147 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4150 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4156 * ring_buffer_record_enable_cpu - enable writes to the buffer
4157 * @buffer: The ring buffer to enable writes
4163 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
4167 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4170 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4176 * The total entries in the ring buffer is the running counter
4177 * of entries entered into the ring buffer, minus the sum of
4178 * the entries read from the ring buffer and the number of
4189 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4190 * @buffer: The ring buffer
4191 * @cpu: The per CPU buffer to read from.
4193 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
4200 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4203 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4222 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4223 * @buffer: The ring buffer
4224 * @cpu: The per CPU buffer to read from.
4226 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
4231 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4234 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4242 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4243 * @buffer: The ring buffer
4244 * @cpu: The per CPU buffer to get the entries from.
4246 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
4250 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4253 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4261 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4262 * @buffer: The ring buffer
4263 * @cpu: The per CPU buffer to get the number of overruns from
4265 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
4270 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4273 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4282 * commits failing due to the buffer wrapping around while there are uncommitted
4284 * @buffer: The ring buffer
4285 * @cpu: The per CPU buffer to get the number of overruns from
4288 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
4293 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4296 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4305 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4306 * @buffer: The ring buffer
4307 * @cpu: The per CPU buffer to get the number of overruns from
4310 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
4315 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4318 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4327 * @buffer: The ring buffer
4328 * @cpu: The per CPU buffer to get the number of events read
4331 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
4335 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4338 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4344 * ring_buffer_entries - get the number of entries in a buffer
4345 * @buffer: The ring buffer
4347 * Returns the total number of entries in the ring buffer
4350 unsigned long ring_buffer_entries(struct trace_buffer *buffer) in ring_buffer_entries() argument
4356 /* if you care about this being correct, lock the buffer */ in ring_buffer_entries()
4357 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
4358 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4367 * ring_buffer_overruns - get the number of overruns in buffer
4368 * @buffer: The ring buffer
4370 * Returns the total number of overruns in the ring buffer
4373 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) in ring_buffer_overruns() argument
4379 /* if you care about this being correct, lock the buffer */ in ring_buffer_overruns()
4380 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
4381 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4582 /* Don't bother swapping if the ring buffer is empty */ in rb_get_reader_page()
4605 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
4606 * has no specific buffer page to point to. Lets move it out in rb_get_reader_page()
4717 /* This function should not be called when buffer is empty */ in rb_advance_reader()
4749 * Check if we are at the end of the buffer. in rb_advance_iter()
4816 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4826 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4844 struct trace_buffer *buffer; in rb_iter_peek() local
4853 buffer = cpu_buffer->buffer; in rb_iter_peek()
4856 * Check if someone performed a consuming read to the buffer in rb_iter_peek()
4857 * or removed some pages from the buffer. In these cases, in rb_iter_peek()
4873 * the ring buffer with an active write as the consumer is. in rb_iter_peek()
4909 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4919 ring_buffer_normalize_time_stamp(buffer, in rb_iter_peek()
4940 * If an NMI die dumps out the content of the ring buffer in rb_reader_lock()
4942 * preempted a task that holds the ring buffer locks. If in rb_reader_lock()
4944 * to do the read, but this can corrupt the ring buffer, in rb_reader_lock()
4951 /* Continue without locking, but disable the ring buffer */ in rb_reader_lock()
4965 * @buffer: The ring buffer to read
4974 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
4977 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4982 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
5001 * @iter: The ring buffer iterator
5016 * @iter: The ring buffer iterator
5042 * @buffer: The ring buffer to get the next event from
5043 * @cpu: the cpu to read the buffer from
5047 * Returns the next event in the ring buffer, and that event is consumed.
5049 * and eventually empty the ring buffer if the producer is slower.
5052 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
5064 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5067 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5091 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5092 * @buffer: The ring buffer to read from
5093 * @cpu: The cpu buffer to iterate over
5097 * through the buffer. Memory is allocated, buffer recording
5100 * Disabling buffer recording prevents the reading from being
5112 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
5117 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5130 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5155 * ring_buffer_read_start - start a non consuming read of the buffer
5158 * This finalizes the startup of an iteration through the buffer.
5185 * ring_buffer_read_finish - finish reading the iterator of the buffer
5188 * This re-enables the recording to the buffer, and frees the
5198 * Ring buffer is disabled from recording, here's a good place in ring_buffer_read_finish()
5199 * to check the integrity of the ring buffer. in ring_buffer_read_finish()
5215 * @iter: The ring buffer iterator
5234 * ring_buffer_size - return the size of the ring buffer (in bytes)
5235 * @buffer: The ring buffer.
5236 * @cpu: The CPU to get ring buffer size from.
5238 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
5242 * BUF_PAGE_SIZE * buffer->nr_pages in ring_buffer_size()
5244 * return the per cpu buffer value. in ring_buffer_size()
5246 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5249 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5309 /* Must have disabled the cpu buffer then done a synchronize_rcu */
5330 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5331 * @buffer: The ring buffer to reset a per cpu buffer of
5332 * @cpu: The CPU buffer to be reset
5334 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
5336 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5338 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5341 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset_cpu()
5342 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5355 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5363 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5364 * @buffer: The ring buffer to reset a per cpu buffer of
5366 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) in ring_buffer_reset_online_cpus() argument
5371 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset_online_cpus()
5372 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5374 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5375 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5384 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5385 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5400 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5404 * ring_buffer_reset - reset a ring buffer
5405 * @buffer: The ring buffer to reset all cpu buffers
5407 void ring_buffer_reset(struct trace_buffer *buffer) in ring_buffer_reset() argument
5412 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset()
5413 mutex_lock(&buffer->mutex); in ring_buffer_reset()
5415 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5416 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5425 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5426 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5434 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
5439 * ring_buffer_empty - is the ring buffer empty?
5440 * @buffer: The ring buffer to test
5442 bool ring_buffer_empty(struct trace_buffer *buffer) in ring_buffer_empty() argument
5450 /* yes this is racy, but if you don't like the race, lock the buffer */ in ring_buffer_empty()
5451 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
5452 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5468 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5469 * @buffer: The ring buffer
5470 * @cpu: The CPU buffer to test
5472 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
5479 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5482 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5495 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5496 * @buffer_a: One buffer to swap with
5497 * @buffer_b: The other buffer to swap with
5501 * of a CPU buffer and has another back up buffer lying around.
5502 * it is expected that the tracer handles the cpu buffer not being
5554 * it will mess the state of the cpu buffer. in ring_buffer_swap_cpu()
5564 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5565 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5579 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5580 * @buffer: the buffer to allocate for.
5581 * @cpu: the cpu buffer to allocate.
5584 * When reading a full page from the ring buffer, these functions
5587 * needs to get pages from the ring buffer, it passes the result
5589 * the page that was allocated, with the read page of the buffer.
5594 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
5601 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5604 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5635 * @buffer: the buffer the page was allocate for
5636 * @cpu: the cpu buffer the page came from
5641 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) in ring_buffer_free_read_page() argument
5648 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5651 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5674 * ring_buffer_read_page - extract a page from the ring buffer
5675 * @buffer: buffer to extract from
5678 * @cpu: the cpu of the buffer to extract
5681 * This function will pull out a page from the ring buffer and consume it.
5684 * to swap with a page in the ring buffer.
5687 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
5690 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
5698 * The ring buffer can be used anywhere in the kernel and can not
5699 * blindly call wake_up. The layer that uses the ring buffer must be
5706 int ring_buffer_read_page(struct trace_buffer *buffer, in ring_buffer_read_page() argument
5709 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5720 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5757 * we must copy the data from the page to the buffer. in ring_buffer_read_page()
5880 * If we were to free the buffer, then the user would lose any trace that was in
5881 * the buffer.
5885 struct trace_buffer *buffer; in trace_rb_cpu_prepare() local
5890 buffer = container_of(node, struct trace_buffer, node); in trace_rb_cpu_prepare()
5891 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
5897 for_each_buffer_cpu(buffer, cpu_i) { in trace_rb_cpu_prepare()
5900 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
5901 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
5909 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
5910 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
5911 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
5912 WARN(1, "failed to allocate ring buffer on CPU %u\n", in trace_rb_cpu_prepare()
5917 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
5923 * This is a basic integrity check of the ring buffer.
5926 * writing to the per cpu ring buffer various sizes of data.
5930 * IPIs to the other CPUs to also write into the ring buffer.
5931 * this is to test the nesting ability of the buffer.
5934 * ring buffer should happen that's not expected, a big warning
5940 struct trace_buffer *buffer; member
5993 /* read rb_test_started before checking buffer enabled */ in rb_write_something()
5996 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
6010 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
6036 ring_buffer_unlock_commit(data->buffer); in rb_write_something()
6082 struct trace_buffer *buffer; in test_ringbuffer() local
6087 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); in test_ringbuffer()
6091 pr_info("Running ring buffer tests...\n"); in test_ringbuffer()
6093 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); in test_ringbuffer()
6094 if (WARN_ON(!buffer)) in test_ringbuffer()
6097 /* Disable buffer so that threads can't write to it yet */ in test_ringbuffer()
6098 ring_buffer_record_off(buffer); in test_ringbuffer()
6101 rb_data[cpu].buffer = buffer; in test_ringbuffer()
6121 ring_buffer_record_on(buffer); in test_ringbuffer()
6123 * Show buffer is enabled before setting rb_test_started. in test_ringbuffer()
6126 * buffer gets enabled, there will always be some kind of in test_ringbuffer()
6129 * the threads see that the buffer is active. in test_ringbuffer()
6147 ring_buffer_free(buffer); in test_ringbuffer()
6187 if (RB_WARN_ON(buffer, total_dropped)) in test_ringbuffer()
6192 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
6199 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
6201 RB_WARN_ON(buffer, 1); in test_ringbuffer()
6221 if (RB_WARN_ON(buffer, total_len != total_alloc || in test_ringbuffer()
6225 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) in test_ringbuffer()
6231 pr_info("Ring buffer PASSED!\n"); in test_ringbuffer()
6233 ring_buffer_free(buffer); in test_ringbuffer()