1 /* ring_buffer.c: Simple ring buffer API */
2
3 /*
4 * Copyright (c) 2015 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <sys/ring_buffer.h>
10 #include <string.h>
11
12 /* LCOV_EXCL_START */
13 /* The weak function used to allow overwriting it in the test and trigger
14 * rewinding earlier.
15 */
ring_buf_get_rewind_threshold(void)16 uint32_t __weak ring_buf_get_rewind_threshold(void)
17 {
18 return RING_BUFFER_MAX_SIZE;
19 }
20 /* LCOV_EXCL_STOP */
21
22 /**
23 * Internal data structure for a buffer header.
24 *
25 * We want all of this to fit in a single uint32_t. Every item stored in the
26 * ring buffer will be one of these headers plus any extra data supplied
27 */
28 struct ring_element {
29 uint32_t type :16; /**< Application-specific */
30 uint32_t length :8; /**< length in 32-bit chunks */
31 uint32_t value :8; /**< Room for small integral values */
32 };
33
mod(struct ring_buf * buf,uint32_t val)34 static uint32_t mod(struct ring_buf *buf, uint32_t val)
35 {
36 return likely(buf->mask) ? val & buf->mask : val % buf->size;
37 }
38
get_rewind_value(uint32_t buf_size,uint32_t threshold)39 static uint32_t get_rewind_value(uint32_t buf_size, uint32_t threshold)
40 {
41 return buf_size * (threshold / buf_size);
42 }
43
ring_buf_is_empty(struct ring_buf * buf)44 int ring_buf_is_empty(struct ring_buf *buf)
45 {
46 uint32_t tail = buf->tail;
47 uint32_t head = buf->head;
48
49 if (tail < head) {
50 tail += get_rewind_value(buf->size,
51 ring_buf_get_rewind_threshold());
52 }
53
54 return (head == tail);
55 }
56
ring_buf_size_get(struct ring_buf * buf)57 uint32_t ring_buf_size_get(struct ring_buf *buf)
58 {
59 uint32_t tail = buf->tail;
60 uint32_t head = buf->head;
61
62 if (tail < head) {
63 tail += get_rewind_value(buf->size,
64 ring_buf_get_rewind_threshold());
65 }
66
67 return tail - head;
68 }
69
ring_buf_space_get(struct ring_buf * buf)70 uint32_t ring_buf_space_get(struct ring_buf *buf)
71 {
72 return buf->size - ring_buf_size_get(buf);
73 }
74
ring_buf_item_put(struct ring_buf * buf,uint16_t type,uint8_t value,uint32_t * data,uint8_t size32)75 int ring_buf_item_put(struct ring_buf *buf, uint16_t type, uint8_t value,
76 uint32_t *data, uint8_t size32)
77 {
78 uint32_t i, space, index, rc;
79 uint32_t threshold = ring_buf_get_rewind_threshold();
80 uint32_t rew;
81
82 space = ring_buf_space_get(buf);
83 if (space >= (size32 + 1)) {
84 struct ring_element *header =
85 (struct ring_element *)&buf->buf.buf32[mod(buf, buf->tail)];
86
87 header->type = type;
88 header->length = size32;
89 header->value = value;
90
91 if (likely(buf->mask)) {
92 for (i = 0U; i < size32; ++i) {
93 index = (i + buf->tail + 1) & buf->mask;
94 buf->buf.buf32[index] = data[i];
95 }
96 } else {
97 for (i = 0U; i < size32; ++i) {
98 index = (i + buf->tail + 1) % buf->size;
99 buf->buf.buf32[index] = data[i];
100 }
101 }
102
103 /* Check if indexes shall be rewound. */
104 if (buf->tail > threshold) {
105 rew = get_rewind_value(buf->size, threshold);
106 } else {
107 rew = 0;
108 }
109
110 buf->tail = buf->tail + (size32 + 1 - rew);
111 rc = 0U;
112 } else {
113 buf->misc.item_mode.dropped_put_count++;
114 rc = -EMSGSIZE;
115 }
116
117 return rc;
118 }
119
ring_buf_item_get(struct ring_buf * buf,uint16_t * type,uint8_t * value,uint32_t * data,uint8_t * size32)120 int ring_buf_item_get(struct ring_buf *buf, uint16_t *type, uint8_t *value,
121 uint32_t *data, uint8_t *size32)
122 {
123 struct ring_element *header;
124 uint32_t i, index;
125 uint32_t tail = buf->tail;
126 uint32_t rew;
127
128 /* Tail is always ahead, if it is not, it's only because it got rewound. */
129 if (tail < buf->head) {
130 /* Locally undo rewind to get tail aligned with head. */
131 rew = get_rewind_value(buf->size,
132 ring_buf_get_rewind_threshold());
133 tail += rew;
134 } else if (ring_buf_is_empty(buf)) {
135 return -EAGAIN;
136 } else {
137 rew = 0;
138 }
139
140 header = (struct ring_element *) &buf->buf.buf32[mod(buf, buf->head)];
141
142 if (data && (header->length > *size32)) {
143 *size32 = header->length;
144 return -EMSGSIZE;
145 }
146
147 *size32 = header->length;
148 *type = header->type;
149 *value = header->value;
150
151 if (data) {
152 if (likely(buf->mask)) {
153 for (i = 0U; i < header->length; ++i) {
154 index = (i + buf->head + 1) & buf->mask;
155 data[i] = buf->buf.buf32[index];
156 }
157 } else {
158 for (i = 0U; i < header->length; ++i) {
159 index = (i + buf->head + 1) % buf->size;
160 data[i] = buf->buf.buf32[index];
161 }
162 }
163 }
164
165 /* Include potential rewinding */
166 buf->head = buf->head + header->length + 1 - rew;
167
168 return 0;
169 }
170
171 /** @brief Wraps index if it exceeds the limit.
172 *
173 * @param val Value
174 * @param max Max.
175 *
176 * @return value % max.
177 */
wrap(uint32_t val,uint32_t max)178 static inline uint32_t wrap(uint32_t val, uint32_t max)
179 {
180 return val >= max ? (val - max) : val;
181 }
182
ring_buf_put_claim(struct ring_buf * buf,uint8_t ** data,uint32_t size)183 uint32_t ring_buf_put_claim(struct ring_buf *buf, uint8_t **data, uint32_t size)
184 {
185 uint32_t space, trail_size, allocated, tmp_trail_mod;
186
187 tmp_trail_mod = mod(buf, buf->misc.byte_mode.tmp_tail);
188 space = (buf->head + buf->size) - buf->misc.byte_mode.tmp_tail;
189 trail_size = buf->size - tmp_trail_mod;
190
191 /* Limit requested size to available size. */
192 size = MIN(size, space);
193
194 trail_size = buf->size - (tmp_trail_mod);
195
196 /* Limit allocated size to trail size. */
197 allocated = MIN(trail_size, size);
198 *data = &buf->buf.buf8[tmp_trail_mod];
199
200 buf->misc.byte_mode.tmp_tail =
201 buf->misc.byte_mode.tmp_tail + allocated;
202
203 return allocated;
204 }
205
ring_buf_put_finish(struct ring_buf * buf,uint32_t size)206 int ring_buf_put_finish(struct ring_buf *buf, uint32_t size)
207 {
208 uint32_t rew;
209 uint32_t threshold = ring_buf_get_rewind_threshold();
210
211 if ((buf->tail + size) > (buf->head + buf->size)) {
212 return -EINVAL;
213 }
214
215 /* Check if indexes shall be rewind. */
216 if (buf->tail > threshold) {
217 rew = get_rewind_value(buf->size, threshold);
218 } else {
219 rew = 0;
220 }
221
222 buf->tail += (size - rew);
223 buf->misc.byte_mode.tmp_tail = buf->tail;
224
225 return 0;
226 }
227
ring_buf_put(struct ring_buf * buf,const uint8_t * data,uint32_t size)228 uint32_t ring_buf_put(struct ring_buf *buf, const uint8_t *data, uint32_t size)
229 {
230 uint8_t *dst;
231 uint32_t partial_size;
232 uint32_t total_size = 0U;
233 int err;
234
235 do {
236 partial_size = ring_buf_put_claim(buf, &dst, size);
237 memcpy(dst, data, partial_size);
238 total_size += partial_size;
239 size -= partial_size;
240 data += partial_size;
241 } while (size && partial_size);
242
243 err = ring_buf_put_finish(buf, total_size);
244 __ASSERT_NO_MSG(err == 0);
245
246 return total_size;
247 }
248
ring_buf_get_claim(struct ring_buf * buf,uint8_t ** data,uint32_t size)249 uint32_t ring_buf_get_claim(struct ring_buf *buf, uint8_t **data, uint32_t size)
250 {
251 uint32_t space, granted_size, trail_size, tmp_head_mod;
252 uint32_t tail = buf->tail;
253
254 /* Tail is always ahead, if it is not, it's only because it got rewinded. */
255 if (tail < buf->misc.byte_mode.tmp_head) {
256 /* Locally, increment it to pre-rewind value */
257 tail += get_rewind_value(buf->size,
258 ring_buf_get_rewind_threshold());
259 }
260
261 tmp_head_mod = mod(buf, buf->misc.byte_mode.tmp_head);
262 space = tail - buf->misc.byte_mode.tmp_head;
263 trail_size = buf->size - tmp_head_mod;
264
265 /* Limit requested size to available size. */
266 granted_size = MIN(size, space);
267
268 /* Limit allocated size to trail size. */
269 granted_size = MIN(trail_size, granted_size);
270
271 *data = &buf->buf.buf8[tmp_head_mod];
272 buf->misc.byte_mode.tmp_head += granted_size;
273
274 return granted_size;
275 }
276
ring_buf_get_finish(struct ring_buf * buf,uint32_t size)277 int ring_buf_get_finish(struct ring_buf *buf, uint32_t size)
278 {
279 uint32_t tail = buf->tail;
280 uint32_t rew;
281
282 /* Tail is always ahead, if it is not, it's only because it got rewinded. */
283 if (tail < buf->misc.byte_mode.tmp_head) {
284 /* tail was rewinded. Locally, increment it to pre-rewind value */
285 rew = get_rewind_value(buf->size,
286 ring_buf_get_rewind_threshold());
287 tail += rew;
288 } else {
289 rew = 0;
290 }
291
292 if ((buf->head + size) > tail) {
293 return -EINVAL;
294 }
295
296 /* Include potential rewinding. */
297 buf->head += (size - rew);
298 buf->misc.byte_mode.tmp_head = buf->head;
299
300 return 0;
301 }
302
ring_buf_get(struct ring_buf * buf,uint8_t * data,uint32_t size)303 uint32_t ring_buf_get(struct ring_buf *buf, uint8_t *data, uint32_t size)
304 {
305 uint8_t *src;
306 uint32_t partial_size;
307 uint32_t total_size = 0U;
308 int err;
309
310 do {
311 partial_size = ring_buf_get_claim(buf, &src, size);
312 if (data) {
313 memcpy(data, src, partial_size);
314 data += partial_size;
315 }
316 total_size += partial_size;
317 size -= partial_size;
318 } while (size && partial_size);
319
320 err = ring_buf_get_finish(buf, total_size);
321 __ASSERT_NO_MSG(err == 0);
322
323 return total_size;
324 }
325
ring_buf_peek(struct ring_buf * buf,uint8_t * data,uint32_t size)326 uint32_t ring_buf_peek(struct ring_buf *buf, uint8_t *data, uint32_t size)
327 {
328 uint8_t *src;
329 uint32_t partial_size;
330 uint32_t total_size = 0U;
331 int err;
332
333 size = MIN(size, ring_buf_size_get(buf));
334
335 do {
336 partial_size = ring_buf_get_claim(buf, &src, size);
337 __ASSERT_NO_MSG(data != NULL);
338 memcpy(data, src, partial_size);
339 data += partial_size;
340 total_size += partial_size;
341 size -= partial_size;
342 } while (size && partial_size);
343
344 /* effectively unclaim total_size bytes */
345 err = ring_buf_get_finish(buf, 0);
346 __ASSERT_NO_MSG(err == 0);
347
348 return total_size;
349 }
350