1 /*
2 * Copyright (c) 2021 Nordic Semiconductor
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/sys/mpsc_pbuf.h>
7
8 #define MPSC_PBUF_DEBUG 0
9
10 #define MPSC_PBUF_DBG(buffer, ...) do { \
11 if (MPSC_PBUF_DEBUG) { \
12 printk(__VA_ARGS__); \
13 if (buffer) { \
14 mpsc_state_print(buffer); \
15 } \
16 } \
17 } while (false)
18
mpsc_state_print(struct mpsc_pbuf_buffer * buffer)19 static inline void mpsc_state_print(struct mpsc_pbuf_buffer *buffer)
20 {
21 if (MPSC_PBUF_DEBUG) {
22 printk(", wr:%d/%d, rd:%d/%d\n",
23 buffer->wr_idx, buffer->tmp_wr_idx,
24 buffer->rd_idx, buffer->tmp_rd_idx);
25 }
26 }
27
mpsc_pbuf_init(struct mpsc_pbuf_buffer * buffer,const struct mpsc_pbuf_buffer_config * cfg)28 void mpsc_pbuf_init(struct mpsc_pbuf_buffer *buffer,
29 const struct mpsc_pbuf_buffer_config *cfg)
30 {
31 memset(buffer, 0, offsetof(struct mpsc_pbuf_buffer, buf));
32 buffer->get_wlen = cfg->get_wlen;
33 buffer->notify_drop = cfg->notify_drop;
34 buffer->buf = cfg->buf;
35 buffer->size = cfg->size;
36 buffer->max_usage = 0;
37 buffer->flags = cfg->flags;
38
39 if (is_power_of_two(buffer->size)) {
40 buffer->flags |= MPSC_PBUF_SIZE_POW2;
41 }
42
43 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
44 int err;
45
46 err = k_sem_init(&buffer->sem, 0, 1);
47 __ASSERT_NO_MSG(err == 0);
48 ARG_UNUSED(err);
49 }
50 }
51
52 /* Calculate free space available or till end of buffer.
53 *
54 * @param buffer Buffer.
55 * @param[out] res Destination where free space is written.
56 *
57 * @retval true when space was calculated until end of buffer (and there might
58 * be more space available after wrapping.
59 * @retval false When result is total free space.
60 */
free_space(struct mpsc_pbuf_buffer * buffer,uint32_t * res)61 static inline bool free_space(struct mpsc_pbuf_buffer *buffer, uint32_t *res)
62 {
63 if (buffer->flags & MPSC_PBUF_FULL) {
64 *res = 0;
65 return false;
66 }
67
68 if (buffer->rd_idx > buffer->tmp_wr_idx) {
69 *res = buffer->rd_idx - buffer->tmp_wr_idx;
70 return false;
71 }
72 *res = buffer->size - buffer->tmp_wr_idx;
73
74 return true;
75 }
76
77 /* Get amount of valid data.
78 *
79 * @param buffer Buffer.
80 * @param[out] res Destination where available space is written.
81 *
82 * @retval true when space was calculated until end of buffer (and there might
83 * be more space available after wrapping.
84 * @retval false When result is total free space.
85 */
available(struct mpsc_pbuf_buffer * buffer,uint32_t * res)86 static inline bool available(struct mpsc_pbuf_buffer *buffer, uint32_t *res)
87 {
88 if (buffer->flags & MPSC_PBUF_FULL || buffer->tmp_rd_idx > buffer->wr_idx) {
89 *res = buffer->size - buffer->tmp_rd_idx;
90 return true;
91 }
92
93 *res = (buffer->wr_idx - buffer->tmp_rd_idx);
94
95 return false;
96 }
97
get_usage(struct mpsc_pbuf_buffer * buffer)98 static inline uint32_t get_usage(struct mpsc_pbuf_buffer *buffer)
99 {
100 uint32_t f;
101
102 if (free_space(buffer, &f)) {
103 f += (buffer->rd_idx - 1);
104 }
105
106 return buffer->size - 1 - f;
107 }
108
max_utilization_update(struct mpsc_pbuf_buffer * buffer)109 static inline void max_utilization_update(struct mpsc_pbuf_buffer *buffer)
110 {
111 if (!(buffer->flags & MPSC_PBUF_MAX_UTILIZATION)) {
112 return;
113 }
114
115 buffer->max_usage = MAX(buffer->max_usage, get_usage(buffer));
116 }
117
is_valid(union mpsc_pbuf_generic * item)118 static inline bool is_valid(union mpsc_pbuf_generic *item)
119 {
120 return item->hdr.valid;
121 }
122
is_invalid(union mpsc_pbuf_generic * item)123 static inline bool is_invalid(union mpsc_pbuf_generic *item)
124 {
125 return !item->hdr.valid && !item->hdr.busy;
126 }
127
idx_inc(struct mpsc_pbuf_buffer * buffer,uint32_t idx,int32_t val)128 static inline uint32_t idx_inc(struct mpsc_pbuf_buffer *buffer,
129 uint32_t idx, int32_t val)
130 {
131 uint32_t i = idx + val;
132
133 if (buffer->flags & MPSC_PBUF_SIZE_POW2) {
134 return i & (buffer->size - 1);
135 }
136
137 return (i >= buffer->size) ? i - buffer->size : i;
138 }
139
get_skip(union mpsc_pbuf_generic * item)140 static inline uint32_t get_skip(union mpsc_pbuf_generic *item)
141 {
142 if (item->hdr.busy && !item->hdr.valid) {
143 return item->skip.len;
144 }
145
146 return 0;
147 }
148
149
tmp_wr_idx_inc(struct mpsc_pbuf_buffer * buffer,int32_t wlen)150 static ALWAYS_INLINE void tmp_wr_idx_inc(struct mpsc_pbuf_buffer *buffer, int32_t wlen)
151 {
152 buffer->tmp_wr_idx = idx_inc(buffer, buffer->tmp_wr_idx, wlen);
153 if (buffer->tmp_wr_idx == buffer->rd_idx) {
154 buffer->flags |= MPSC_PBUF_FULL;
155 }
156 }
157
rd_idx_inc(struct mpsc_pbuf_buffer * buffer,int32_t wlen)158 static void rd_idx_inc(struct mpsc_pbuf_buffer *buffer, int32_t wlen)
159 {
160 buffer->rd_idx = idx_inc(buffer, buffer->rd_idx, wlen);
161 buffer->flags &= ~MPSC_PBUF_FULL;
162 }
163
add_skip_item(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)164 static void add_skip_item(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
165 {
166 union mpsc_pbuf_generic skip = {
167 .skip = { .valid = 0, .busy = 1, .len = wlen }
168 };
169
170 buffer->buf[buffer->tmp_wr_idx] = skip.raw;
171 tmp_wr_idx_inc(buffer, wlen);
172 buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
173 }
174
drop_item_locked(struct mpsc_pbuf_buffer * buffer,uint32_t free_wlen,union mpsc_pbuf_generic ** item_to_drop,uint32_t * tmp_wr_idx_shift)175 static bool drop_item_locked(struct mpsc_pbuf_buffer *buffer,
176 uint32_t free_wlen,
177 union mpsc_pbuf_generic **item_to_drop,
178 uint32_t *tmp_wr_idx_shift)
179 {
180 union mpsc_pbuf_generic *item;
181 uint32_t skip_wlen;
182
183 item = (union mpsc_pbuf_generic *)&buffer->buf[buffer->rd_idx];
184 skip_wlen = get_skip(item);
185 *item_to_drop = NULL;
186 *tmp_wr_idx_shift = 0;
187
188 if (skip_wlen) {
189 /* Skip packet found, can be dropped to free some space */
190 MPSC_PBUF_DBG(buffer, "no space: Found skip packet %d len", skip_wlen);
191
192 rd_idx_inc(buffer, skip_wlen);
193 buffer->tmp_rd_idx = buffer->rd_idx;
194 return true;
195 }
196
197 /* Other options for dropping available only in overwrite mode. */
198 if (!(buffer->flags & MPSC_PBUF_MODE_OVERWRITE)) {
199 return false;
200 }
201
202 uint32_t rd_wlen = buffer->get_wlen(item);
203
204 /* If packet is busy need to be ommited. */
205 if (!is_valid(item)) {
206 return false;
207 } else if (item->hdr.busy) {
208 MPSC_PBUF_DBG(buffer, "no space: Found busy packet %p (len:%d)", item, rd_wlen);
209 /* Add skip packet before claimed packet. */
210 if (free_wlen) {
211 add_skip_item(buffer, free_wlen);
212 MPSC_PBUF_DBG(buffer, "no space: Added skip packet (len:%d)", free_wlen);
213 }
214 /* Move all indexes forward, after claimed packet. */
215 buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, rd_wlen);
216
217 /* If allocation wrapped around the buffer and found busy packet
218 * that was already ommited, skip it again.
219 */
220 if (buffer->rd_idx == buffer->tmp_rd_idx) {
221 buffer->tmp_rd_idx = idx_inc(buffer, buffer->tmp_rd_idx, rd_wlen);
222 }
223
224 buffer->tmp_wr_idx = buffer->tmp_rd_idx;
225 buffer->rd_idx = buffer->tmp_rd_idx;
226 buffer->flags |= MPSC_PBUF_FULL;
227 } else {
228 /* Prepare packet dropping. */
229 rd_idx_inc(buffer, rd_wlen);
230 buffer->tmp_rd_idx = buffer->rd_idx;
231 /* Temporary move tmp_wr idx forward to ensure that packet
232 * will not be dropped twice and content will not be
233 * overwritten.
234 */
235 if (free_wlen) {
236 /* Free location mark as invalid to prevent
237 * reading incomplete data.
238 */
239 union mpsc_pbuf_generic invalid = {
240 .hdr = {
241 .valid = 0,
242 .busy = 0
243 }
244 };
245
246 buffer->buf[buffer->tmp_wr_idx] = invalid.raw;
247 }
248
249 *tmp_wr_idx_shift = rd_wlen + free_wlen;
250 buffer->tmp_wr_idx = idx_inc(buffer, buffer->tmp_wr_idx, *tmp_wr_idx_shift);
251 buffer->flags |= MPSC_PBUF_FULL;
252 item->hdr.valid = 0;
253 *item_to_drop = item;
254 MPSC_PBUF_DBG(buffer, "no space: dropping packet %p (len: %d)",
255 item, rd_wlen);
256 }
257
258 return true;
259 }
260
post_drop_action(struct mpsc_pbuf_buffer * buffer,uint32_t prev_tmp_wr_idx,uint32_t tmp_wr_idx_shift)261 static void post_drop_action(struct mpsc_pbuf_buffer *buffer,
262 uint32_t prev_tmp_wr_idx,
263 uint32_t tmp_wr_idx_shift)
264 {
265 uint32_t cmp_tmp_wr_idx = idx_inc(buffer, prev_tmp_wr_idx, tmp_wr_idx_shift);
266
267 if (cmp_tmp_wr_idx == buffer->tmp_wr_idx) {
268 /* Operation not interrupted by another alloc. */
269 buffer->tmp_wr_idx = prev_tmp_wr_idx;
270 buffer->flags &= ~MPSC_PBUF_FULL;
271 return;
272 }
273
274 /* Operation interrupted, mark area as to be skipped. */
275 union mpsc_pbuf_generic skip = {
276 .skip = {
277 .valid = 0,
278 .busy = 1,
279 .len = tmp_wr_idx_shift
280 }
281 };
282
283 buffer->buf[prev_tmp_wr_idx] = skip.raw;
284 buffer->wr_idx = idx_inc(buffer,
285 buffer->wr_idx,
286 tmp_wr_idx_shift);
287 /* full flag? */
288 }
289
mpsc_pbuf_put_word(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic item)290 void mpsc_pbuf_put_word(struct mpsc_pbuf_buffer *buffer,
291 const union mpsc_pbuf_generic item)
292 {
293 bool cont;
294 uint32_t free_wlen;
295 k_spinlock_key_t key;
296 union mpsc_pbuf_generic *dropped_item = NULL;
297 uint32_t tmp_wr_idx_shift = 0;
298 uint32_t tmp_wr_idx_val = 0;
299
300 do {
301 key = k_spin_lock(&buffer->lock);
302
303 if (tmp_wr_idx_shift) {
304 post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
305 tmp_wr_idx_shift = 0;
306 }
307
308 (void)free_space(buffer, &free_wlen);
309
310 MPSC_PBUF_DBG(buffer, "put_word (%d free space)", (int)free_wlen);
311
312 if (free_wlen) {
313 buffer->buf[buffer->tmp_wr_idx] = item.raw;
314 tmp_wr_idx_inc(buffer, 1);
315 cont = false;
316 buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, 1);
317 max_utilization_update(buffer);
318 } else {
319 tmp_wr_idx_val = buffer->tmp_wr_idx;
320 cont = drop_item_locked(buffer, free_wlen,
321 &dropped_item, &tmp_wr_idx_shift);
322 }
323
324 k_spin_unlock(&buffer->lock, key);
325
326 if (dropped_item) {
327 /* Notify about item being dropped. */
328 if (buffer->notify_drop) {
329 buffer->notify_drop(buffer, dropped_item);
330 }
331 dropped_item = NULL;
332 }
333 } while (cont);
334 }
335
mpsc_pbuf_alloc(struct mpsc_pbuf_buffer * buffer,size_t wlen,k_timeout_t timeout)336 union mpsc_pbuf_generic *mpsc_pbuf_alloc(struct mpsc_pbuf_buffer *buffer,
337 size_t wlen, k_timeout_t timeout)
338 {
339 union mpsc_pbuf_generic *item = NULL;
340 union mpsc_pbuf_generic *dropped_item = NULL;
341 bool cont = true;
342 uint32_t free_wlen;
343 uint32_t tmp_wr_idx_shift = 0;
344 uint32_t tmp_wr_idx_val = 0;
345
346 MPSC_PBUF_DBG(buffer, "alloc %d words", (int)wlen);
347
348 if (wlen > (buffer->size)) {
349 MPSC_PBUF_DBG(buffer, "Failed to alloc");
350 return NULL;
351 }
352
353 do {
354 k_spinlock_key_t key;
355 bool wrap;
356
357 key = k_spin_lock(&buffer->lock);
358 if (tmp_wr_idx_shift) {
359 post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
360 tmp_wr_idx_shift = 0;
361 }
362
363 wrap = free_space(buffer, &free_wlen);
364
365 if (free_wlen >= wlen) {
366 item =
367 (union mpsc_pbuf_generic *)&buffer->buf[buffer->tmp_wr_idx];
368 item->hdr.valid = 0;
369 item->hdr.busy = 0;
370 tmp_wr_idx_inc(buffer, wlen);
371 cont = false;
372 } else if (wrap) {
373 add_skip_item(buffer, free_wlen);
374 cont = true;
375 } else if (IS_ENABLED(CONFIG_MULTITHREADING) && !K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
376 !k_is_in_isr()) {
377 int err;
378
379 k_spin_unlock(&buffer->lock, key);
380 err = k_sem_take(&buffer->sem, timeout);
381 key = k_spin_lock(&buffer->lock);
382 cont = (err == 0) ? true : false;
383 } else if (cont) {
384 tmp_wr_idx_val = buffer->tmp_wr_idx;
385 cont = drop_item_locked(buffer, free_wlen,
386 &dropped_item, &tmp_wr_idx_shift);
387 }
388 k_spin_unlock(&buffer->lock, key);
389
390 if (dropped_item) {
391 /* Notify about item being dropped. */
392 if (buffer->notify_drop) {
393 buffer->notify_drop(buffer, dropped_item);
394 }
395 dropped_item = NULL;
396 }
397 } while (cont);
398
399
400 MPSC_PBUF_DBG(buffer, "allocated %p", item);
401
402 if (IS_ENABLED(CONFIG_MPSC_CLEAR_ALLOCATED) && item) {
403 /* During test fill with 0's to simplify message comparison */
404 memset(item, 0, sizeof(int) * wlen);
405 }
406
407 return item;
408 }
409
mpsc_pbuf_commit(struct mpsc_pbuf_buffer * buffer,union mpsc_pbuf_generic * item)410 void mpsc_pbuf_commit(struct mpsc_pbuf_buffer *buffer,
411 union mpsc_pbuf_generic *item)
412 {
413 uint32_t wlen = buffer->get_wlen(item);
414
415 k_spinlock_key_t key = k_spin_lock(&buffer->lock);
416
417 item->hdr.valid = 1;
418 buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
419 max_utilization_update(buffer);
420 k_spin_unlock(&buffer->lock, key);
421 MPSC_PBUF_DBG(buffer, "committed %p", item);
422 }
423
mpsc_pbuf_put_word_ext(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic item,const void * data)424 void mpsc_pbuf_put_word_ext(struct mpsc_pbuf_buffer *buffer,
425 const union mpsc_pbuf_generic item,
426 const void *data)
427 {
428 static const size_t l =
429 (sizeof(item) + sizeof(data)) / sizeof(uint32_t);
430 union mpsc_pbuf_generic *dropped_item = NULL;
431 bool cont;
432 uint32_t tmp_wr_idx_shift = 0;
433 uint32_t tmp_wr_idx_val = 0;
434
435 do {
436 k_spinlock_key_t key;
437 uint32_t free_wlen;
438 bool wrap;
439
440 key = k_spin_lock(&buffer->lock);
441
442 if (tmp_wr_idx_shift) {
443 post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
444 tmp_wr_idx_shift = 0;
445 }
446
447 wrap = free_space(buffer, &free_wlen);
448
449 if (free_wlen >= l) {
450 buffer->buf[buffer->tmp_wr_idx] = item.raw;
451 void **p =
452 (void **)&buffer->buf[buffer->tmp_wr_idx + 1];
453
454 *p = (void *)data;
455 tmp_wr_idx_inc(buffer, l);
456 buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, l);
457 cont = false;
458 max_utilization_update(buffer);
459 } else if (wrap) {
460 add_skip_item(buffer, free_wlen);
461 cont = true;
462 } else {
463 tmp_wr_idx_val = buffer->tmp_wr_idx;
464 cont = drop_item_locked(buffer, free_wlen,
465 &dropped_item, &tmp_wr_idx_shift);
466 }
467
468 k_spin_unlock(&buffer->lock, key);
469
470 if (dropped_item) {
471 /* Notify about item being dropped. */
472 if (buffer->notify_drop) {
473 buffer->notify_drop(buffer, dropped_item);
474 }
475 dropped_item = NULL;
476 }
477 } while (cont);
478 }
479
mpsc_pbuf_put_data(struct mpsc_pbuf_buffer * buffer,const uint32_t * data,size_t wlen)480 void mpsc_pbuf_put_data(struct mpsc_pbuf_buffer *buffer, const uint32_t *data,
481 size_t wlen)
482 {
483 bool cont;
484 union mpsc_pbuf_generic *dropped_item = NULL;
485 uint32_t tmp_wr_idx_shift = 0;
486 uint32_t tmp_wr_idx_val = 0;
487
488 do {
489 uint32_t free_wlen;
490 k_spinlock_key_t key;
491 bool wrap;
492
493 key = k_spin_lock(&buffer->lock);
494
495 if (tmp_wr_idx_shift) {
496 post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
497 tmp_wr_idx_shift = 0;
498 }
499
500 wrap = free_space(buffer, &free_wlen);
501
502 if (free_wlen >= wlen) {
503 memcpy(&buffer->buf[buffer->tmp_wr_idx], data,
504 wlen * sizeof(uint32_t));
505 buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
506 tmp_wr_idx_inc(buffer, wlen);
507 cont = false;
508 max_utilization_update(buffer);
509 } else if (wrap) {
510 add_skip_item(buffer, free_wlen);
511 cont = true;
512 } else {
513 tmp_wr_idx_val = buffer->tmp_wr_idx;
514 cont = drop_item_locked(buffer, free_wlen,
515 &dropped_item, &tmp_wr_idx_shift);
516 }
517
518 k_spin_unlock(&buffer->lock, key);
519
520 if (dropped_item) {
521 /* Notify about item being dropped. */
522 dropped_item->hdr.valid = 0;
523 if (buffer->notify_drop) {
524 buffer->notify_drop(buffer, dropped_item);
525 }
526 dropped_item = NULL;
527 }
528 } while (cont);
529 }
530
mpsc_pbuf_claim(struct mpsc_pbuf_buffer * buffer)531 const union mpsc_pbuf_generic *mpsc_pbuf_claim(struct mpsc_pbuf_buffer *buffer)
532 {
533 union mpsc_pbuf_generic *item;
534 bool cont;
535
536 do {
537 uint32_t a;
538 k_spinlock_key_t key;
539
540 cont = false;
541 key = k_spin_lock(&buffer->lock);
542 (void)available(buffer, &a);
543 item = (union mpsc_pbuf_generic *)
544 &buffer->buf[buffer->tmp_rd_idx];
545
546 if (!a || is_invalid(item)) {
547 MPSC_PBUF_DBG(buffer, "invalid claim %d: %p", a, item);
548 item = NULL;
549 } else {
550 uint32_t skip = get_skip(item);
551
552 if (skip || !is_valid(item)) {
553 uint32_t inc =
554 skip ? skip : buffer->get_wlen(item);
555
556 buffer->tmp_rd_idx =
557 idx_inc(buffer, buffer->tmp_rd_idx, inc);
558 rd_idx_inc(buffer, inc);
559 cont = true;
560 } else {
561 item->hdr.busy = 1;
562 buffer->tmp_rd_idx =
563 idx_inc(buffer, buffer->tmp_rd_idx,
564 buffer->get_wlen(item));
565 }
566 }
567
568 if (!cont) {
569 MPSC_PBUF_DBG(buffer, ">>claimed %d: %p", a, item);
570 }
571 k_spin_unlock(&buffer->lock, key);
572 } while (cont);
573
574 return item;
575 }
576
mpsc_pbuf_free(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic * item)577 void mpsc_pbuf_free(struct mpsc_pbuf_buffer *buffer,
578 const union mpsc_pbuf_generic *item)
579 {
580 uint32_t wlen = buffer->get_wlen(item);
581 k_spinlock_key_t key = k_spin_lock(&buffer->lock);
582 union mpsc_pbuf_generic *witem = (union mpsc_pbuf_generic *)item;
583
584 witem->hdr.valid = 0;
585 if (!(buffer->flags & MPSC_PBUF_MODE_OVERWRITE) ||
586 ((uint32_t *)item == &buffer->buf[buffer->rd_idx])) {
587 witem->hdr.busy = 0;
588 if (buffer->rd_idx == buffer->tmp_rd_idx) {
589 /* There is a chance that there are so many new packets
590 * added between claim and free that rd_idx points again
591 * at claimed item. In that case tmp_rd_idx points at
592 * the same location. In that case increment also tmp_rd_idx
593 * which will mark freed buffer as the only free space in
594 * the buffer.
595 */
596 buffer->tmp_rd_idx = idx_inc(buffer, buffer->tmp_rd_idx, wlen);
597 }
598 rd_idx_inc(buffer, wlen);
599 } else {
600 MPSC_PBUF_DBG(buffer, "Allocation occurred during claim");
601 witem->skip.len = wlen;
602 }
603 MPSC_PBUF_DBG(buffer, "<<freed: %p", item);
604
605 k_spin_unlock(&buffer->lock, key);
606 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
607 k_sem_give(&buffer->sem);
608 }
609 }
610
mpsc_pbuf_is_pending(struct mpsc_pbuf_buffer * buffer)611 bool mpsc_pbuf_is_pending(struct mpsc_pbuf_buffer *buffer)
612 {
613 uint32_t a;
614 k_spinlock_key_t key = k_spin_lock(&buffer->lock);
615
616 (void)available(buffer, &a);
617 k_spin_unlock(&buffer->lock, key);
618
619 return a ? true : false;
620 }
621
mpsc_pbuf_get_utilization(struct mpsc_pbuf_buffer * buffer,uint32_t * size,uint32_t * now)622 void mpsc_pbuf_get_utilization(struct mpsc_pbuf_buffer *buffer,
623 uint32_t *size, uint32_t *now)
624 {
625 /* One byte is left for full/empty distinction. */
626 *size = (buffer->size - 1) * sizeof(int);
627 *now = get_usage(buffer) * sizeof(int);
628 }
629
mpsc_pbuf_get_max_utilization(struct mpsc_pbuf_buffer * buffer,uint32_t * max)630 int mpsc_pbuf_get_max_utilization(struct mpsc_pbuf_buffer *buffer, uint32_t *max)
631 {
632
633 if (!(buffer->flags & MPSC_PBUF_MAX_UTILIZATION)) {
634 return -ENOTSUP;
635 }
636
637 *max = buffer->max_usage * sizeof(int);
638 return 0;
639 }
640