1 /*
2  * Copyright (c) 2021 Nordic Semiconductor
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/sys/mpsc_pbuf.h>
7 
8 #define MPSC_PBUF_DEBUG 0
9 
10 #define MPSC_PBUF_DBG(buffer, ...) do { \
11 	if (MPSC_PBUF_DEBUG) { \
12 		printk(__VA_ARGS__); \
13 		if (buffer) { \
14 			mpsc_state_print(buffer); \
15 		} \
16 	} \
17 } while (0)
18 
mpsc_state_print(struct mpsc_pbuf_buffer * buffer)19 static inline void mpsc_state_print(struct mpsc_pbuf_buffer *buffer)
20 {
21 	if (MPSC_PBUF_DEBUG) {
22 		printk(", wr:%d/%d, rd:%d/%d\n",
23 			buffer->wr_idx, buffer->tmp_wr_idx,
24 			buffer->rd_idx, buffer->tmp_rd_idx);
25 	}
26 }
27 
mpsc_pbuf_init(struct mpsc_pbuf_buffer * buffer,const struct mpsc_pbuf_buffer_config * cfg)28 void mpsc_pbuf_init(struct mpsc_pbuf_buffer *buffer,
29 		    const struct mpsc_pbuf_buffer_config *cfg)
30 {
31 	int err;
32 
33 	memset(buffer, 0, offsetof(struct mpsc_pbuf_buffer, buf));
34 	buffer->get_wlen = cfg->get_wlen;
35 	buffer->notify_drop = cfg->notify_drop;
36 	buffer->buf = cfg->buf;
37 	buffer->size = cfg->size;
38 	buffer->max_usage = 0;
39 	buffer->flags = cfg->flags;
40 
41 	if (is_power_of_two(buffer->size)) {
42 		buffer->flags |= MPSC_PBUF_SIZE_POW2;
43 	}
44 
45 	err = k_sem_init(&buffer->sem, 0, 1);
46 	__ASSERT_NO_MSG(err == 0);
47 	ARG_UNUSED(err);
48 }
49 
50 /* Calculate free space available or till end of buffer.
51  *
52  * @param buffer Buffer.
53  * @param[out] res Destination where free space is written.
54  *
55  * @retval true when space was calculated until end of buffer (and there might
56  * be more space available after wrapping.
57  * @retval false When result is total free space.
58  */
free_space(struct mpsc_pbuf_buffer * buffer,uint32_t * res)59 static inline bool free_space(struct mpsc_pbuf_buffer *buffer, uint32_t *res)
60 {
61 	if (buffer->flags & MPSC_PBUF_FULL) {
62 		*res = 0;
63 		return false;
64 	}
65 
66 	if (buffer->rd_idx > buffer->tmp_wr_idx) {
67 		*res =  buffer->rd_idx - buffer->tmp_wr_idx;
68 		return false;
69 	}
70 	*res = buffer->size - buffer->tmp_wr_idx;
71 
72 	return true;
73 }
74 
75 /* Get amount of valid data.
76  *
77  * @param buffer Buffer.
78  * @param[out] res Destination where available space is written.
79  *
80  * @retval true when space was calculated until end of buffer (and there might
81  * be more space available after wrapping.
82  * @retval false When result is total free space.
83  */
available(struct mpsc_pbuf_buffer * buffer,uint32_t * res)84 static inline bool available(struct mpsc_pbuf_buffer *buffer, uint32_t *res)
85 {
86 	if (buffer->flags & MPSC_PBUF_FULL || buffer->tmp_rd_idx > buffer->wr_idx) {
87 		*res = buffer->size - buffer->tmp_rd_idx;
88 		return true;
89 	}
90 
91 	*res = (buffer->wr_idx - buffer->tmp_rd_idx);
92 
93 	return false;
94 }
95 
get_usage(struct mpsc_pbuf_buffer * buffer)96 static inline uint32_t get_usage(struct mpsc_pbuf_buffer *buffer)
97 {
98 	uint32_t f;
99 
100 	if (free_space(buffer, &f)) {
101 		f += (buffer->rd_idx - 1);
102 	}
103 
104 	return buffer->size - 1 - f;
105 }
106 
max_utilization_update(struct mpsc_pbuf_buffer * buffer)107 static inline void max_utilization_update(struct mpsc_pbuf_buffer *buffer)
108 {
109 	if (!(buffer->flags & MPSC_PBUF_MAX_UTILIZATION)) {
110 		return;
111 	}
112 
113 	buffer->max_usage = MAX(buffer->max_usage, get_usage(buffer));
114 }
115 
is_valid(union mpsc_pbuf_generic * item)116 static inline bool is_valid(union mpsc_pbuf_generic *item)
117 {
118 	return item->hdr.valid;
119 }
120 
is_invalid(union mpsc_pbuf_generic * item)121 static inline bool is_invalid(union mpsc_pbuf_generic *item)
122 {
123 	return !item->hdr.valid && !item->hdr.busy;
124 }
125 
idx_inc(struct mpsc_pbuf_buffer * buffer,uint32_t idx,int32_t val)126 static inline uint32_t idx_inc(struct mpsc_pbuf_buffer *buffer,
127 				uint32_t idx, int32_t val)
128 {
129 	uint32_t i = idx + val;
130 
131 	if (buffer->flags & MPSC_PBUF_SIZE_POW2) {
132 		return i & (buffer->size - 1);
133 	}
134 
135 	return (i >= buffer->size) ? i - buffer->size : i;
136 }
137 
get_skip(union mpsc_pbuf_generic * item)138 static inline uint32_t get_skip(union mpsc_pbuf_generic *item)
139 {
140 	if (item->hdr.busy && !item->hdr.valid) {
141 		return item->skip.len;
142 	}
143 
144 	return 0;
145 }
146 
147 
tmp_wr_idx_inc(struct mpsc_pbuf_buffer * buffer,int32_t wlen)148 static ALWAYS_INLINE void tmp_wr_idx_inc(struct mpsc_pbuf_buffer *buffer, int32_t wlen)
149 {
150 	buffer->tmp_wr_idx = idx_inc(buffer, buffer->tmp_wr_idx, wlen);
151 	if (buffer->tmp_wr_idx == buffer->rd_idx) {
152 		buffer->flags |= MPSC_PBUF_FULL;
153 	}
154 }
155 
rd_idx_inc(struct mpsc_pbuf_buffer * buffer,int32_t wlen)156 static void rd_idx_inc(struct mpsc_pbuf_buffer *buffer, int32_t wlen)
157 {
158 	buffer->rd_idx = idx_inc(buffer, buffer->rd_idx, wlen);
159 	buffer->flags &= ~MPSC_PBUF_FULL;
160 }
161 
add_skip_item(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)162 static void add_skip_item(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
163 {
164 	union mpsc_pbuf_generic skip = {
165 		.skip = { .valid = 0, .busy = 1, .len = wlen }
166 	};
167 
168 	buffer->buf[buffer->tmp_wr_idx] = skip.raw;
169 	tmp_wr_idx_inc(buffer, wlen);
170 	buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
171 }
172 
drop_item_locked(struct mpsc_pbuf_buffer * buffer,uint32_t free_wlen,union mpsc_pbuf_generic ** item_to_drop,uint32_t * tmp_wr_idx_shift)173 static bool drop_item_locked(struct mpsc_pbuf_buffer *buffer,
174 			     uint32_t free_wlen,
175 			     union mpsc_pbuf_generic **item_to_drop,
176 			     uint32_t *tmp_wr_idx_shift)
177 {
178 	union mpsc_pbuf_generic *item;
179 	uint32_t skip_wlen;
180 
181 	item = (union mpsc_pbuf_generic *)&buffer->buf[buffer->rd_idx];
182 	skip_wlen = get_skip(item);
183 	*item_to_drop = NULL;
184 	*tmp_wr_idx_shift = 0;
185 
186 	if (skip_wlen) {
187 		/* Skip packet found, can be dropped to free some space */
188 		MPSC_PBUF_DBG(buffer, "no space: Found skip packet %d len", skip_wlen);
189 
190 		rd_idx_inc(buffer, skip_wlen);
191 		buffer->tmp_rd_idx = buffer->rd_idx;
192 		return true;
193 	}
194 
195 	/* Other options for dropping available only in overwrite mode. */
196 	if (!(buffer->flags & MPSC_PBUF_MODE_OVERWRITE)) {
197 		return false;
198 	}
199 
200 	uint32_t rd_wlen = buffer->get_wlen(item);
201 
202 	/* If packet is busy need to be ommited. */
203 	if (!is_valid(item)) {
204 		return false;
205 	} else if (item->hdr.busy) {
206 		MPSC_PBUF_DBG(buffer, "no space: Found busy packet %p (len:%d)", item, rd_wlen);
207 		/* Add skip packet before claimed packet. */
208 		if (free_wlen) {
209 			add_skip_item(buffer, free_wlen);
210 			MPSC_PBUF_DBG(buffer, "no space: Added skip packet (len:%d)", free_wlen);
211 		}
212 		/* Move all indexes forward, after claimed packet. */
213 		buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, rd_wlen);
214 
215 		/* If allocation wrapped around the buffer and found busy packet
216 		 * that was already ommited, skip it again.
217 		 */
218 		if (buffer->rd_idx == buffer->tmp_rd_idx) {
219 			buffer->tmp_rd_idx = idx_inc(buffer, buffer->tmp_rd_idx, rd_wlen);
220 		}
221 
222 		buffer->tmp_wr_idx = buffer->tmp_rd_idx;
223 		buffer->rd_idx = buffer->tmp_rd_idx;
224 		buffer->flags |= MPSC_PBUF_FULL;
225 	} else {
226 		/* Prepare packet dropping. */
227 		rd_idx_inc(buffer, rd_wlen);
228 		buffer->tmp_rd_idx = buffer->rd_idx;
229 		/* Temporary move tmp_wr idx forward to ensure that packet
230 		 * will not be dropped twice and content will not be
231 		 * overwritten.
232 		 */
233 		if (free_wlen) {
234 			/* Free location mark as invalid to prevent
235 			 * reading incomplete data.
236 			 */
237 			union mpsc_pbuf_generic invalid = {
238 				.hdr = {
239 					.valid = 0,
240 					.busy = 0
241 				}
242 			};
243 
244 			buffer->buf[buffer->tmp_wr_idx] = invalid.raw;
245 		}
246 
247 		*tmp_wr_idx_shift = rd_wlen + free_wlen;
248 		buffer->tmp_wr_idx = idx_inc(buffer, buffer->tmp_wr_idx, *tmp_wr_idx_shift);
249 		buffer->flags |= MPSC_PBUF_FULL;
250 		item->hdr.valid = 0;
251 		*item_to_drop = item;
252 		MPSC_PBUF_DBG(buffer, "no space: dropping packet %p (len: %d)",
253 			       item, rd_wlen);
254 	}
255 
256 	return true;
257 }
258 
post_drop_action(struct mpsc_pbuf_buffer * buffer,uint32_t prev_tmp_wr_idx,uint32_t tmp_wr_idx_shift)259 static void post_drop_action(struct mpsc_pbuf_buffer *buffer,
260 			     uint32_t prev_tmp_wr_idx,
261 			     uint32_t tmp_wr_idx_shift)
262 {
263 	uint32_t cmp_tmp_wr_idx = idx_inc(buffer, prev_tmp_wr_idx, tmp_wr_idx_shift);
264 
265 	if (cmp_tmp_wr_idx == buffer->tmp_wr_idx) {
266 		/* Operation not interrupted by another alloc. */
267 		buffer->tmp_wr_idx = prev_tmp_wr_idx;
268 		buffer->flags &= ~MPSC_PBUF_FULL;
269 		return;
270 	}
271 
272 	/* Operation interrupted, mark area as to be skipped. */
273 	union mpsc_pbuf_generic skip = {
274 		.skip = {
275 			.valid = 0,
276 			.busy = 1,
277 			.len = tmp_wr_idx_shift
278 		}
279 	};
280 
281 	buffer->buf[prev_tmp_wr_idx] = skip.raw;
282 	buffer->wr_idx = idx_inc(buffer,
283 				 buffer->wr_idx,
284 				 tmp_wr_idx_shift);
285 	/* full flag? */
286 }
287 
mpsc_pbuf_put_word(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic item)288 void mpsc_pbuf_put_word(struct mpsc_pbuf_buffer *buffer,
289 			const union mpsc_pbuf_generic item)
290 {
291 	bool cont;
292 	uint32_t free_wlen;
293 	k_spinlock_key_t key;
294 	union mpsc_pbuf_generic *dropped_item = NULL;
295 	uint32_t tmp_wr_idx_shift = 0;
296 	uint32_t tmp_wr_idx_val = 0;
297 
298 	do {
299 		key = k_spin_lock(&buffer->lock);
300 
301 		if (tmp_wr_idx_shift) {
302 			post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
303 			tmp_wr_idx_shift = 0;
304 		}
305 
306 		(void)free_space(buffer, &free_wlen);
307 
308 		MPSC_PBUF_DBG(buffer, "put_word (%d free space)", (int)free_wlen);
309 
310 		if (free_wlen) {
311 			buffer->buf[buffer->tmp_wr_idx] = item.raw;
312 			tmp_wr_idx_inc(buffer, 1);
313 			cont = false;
314 			buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, 1);
315 			max_utilization_update(buffer);
316 		} else {
317 			tmp_wr_idx_val = buffer->tmp_wr_idx;
318 			cont = drop_item_locked(buffer, free_wlen,
319 						&dropped_item, &tmp_wr_idx_shift);
320 		}
321 
322 		k_spin_unlock(&buffer->lock, key);
323 
324 		if (dropped_item) {
325 			/* Notify about item being dropped. */
326 			if (buffer->notify_drop) {
327 				buffer->notify_drop(buffer, dropped_item);
328 			}
329 			dropped_item = NULL;
330 		}
331 	} while (cont);
332 }
333 
mpsc_pbuf_alloc(struct mpsc_pbuf_buffer * buffer,size_t wlen,k_timeout_t timeout)334 union mpsc_pbuf_generic *mpsc_pbuf_alloc(struct mpsc_pbuf_buffer *buffer,
335 					 size_t wlen, k_timeout_t timeout)
336 {
337 	union mpsc_pbuf_generic *item = NULL;
338 	union mpsc_pbuf_generic *dropped_item = NULL;
339 	bool cont = true;
340 	uint32_t free_wlen;
341 	uint32_t tmp_wr_idx_shift = 0;
342 	uint32_t tmp_wr_idx_val = 0;
343 
344 	MPSC_PBUF_DBG(buffer, "alloc %d words", (int)wlen);
345 
346 	if (wlen > (buffer->size)) {
347 		MPSC_PBUF_DBG(buffer, "Failed to alloc");
348 		return NULL;
349 	}
350 
351 	do {
352 		k_spinlock_key_t key;
353 		bool wrap;
354 
355 		key = k_spin_lock(&buffer->lock);
356 		if (tmp_wr_idx_shift) {
357 			post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
358 			tmp_wr_idx_shift = 0;
359 		}
360 
361 		wrap = free_space(buffer, &free_wlen);
362 
363 		if (free_wlen >= wlen) {
364 			item =
365 			    (union mpsc_pbuf_generic *)&buffer->buf[buffer->tmp_wr_idx];
366 			item->hdr.valid = 0;
367 			item->hdr.busy = 0;
368 			tmp_wr_idx_inc(buffer, wlen);
369 			cont = false;
370 		} else if (wrap) {
371 			add_skip_item(buffer, free_wlen);
372 			cont = true;
373 		} else if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) && !k_is_in_isr()) {
374 			int err;
375 
376 			k_spin_unlock(&buffer->lock, key);
377 			err = k_sem_take(&buffer->sem, timeout);
378 			key = k_spin_lock(&buffer->lock);
379 			cont = (err == 0) ? true : false;
380 		} else if (cont) {
381 			tmp_wr_idx_val = buffer->tmp_wr_idx;
382 			cont = drop_item_locked(buffer, free_wlen,
383 						&dropped_item, &tmp_wr_idx_shift);
384 		}
385 		k_spin_unlock(&buffer->lock, key);
386 
387 		if (dropped_item) {
388 			/* Notify about item being dropped. */
389 			if (buffer->notify_drop) {
390 				buffer->notify_drop(buffer, dropped_item);
391 			}
392 			dropped_item = NULL;
393 		}
394 	} while (cont);
395 
396 
397 	MPSC_PBUF_DBG(buffer, "allocated %p", item);
398 
399 	if (IS_ENABLED(CONFIG_MPSC_CLEAR_ALLOCATED) && item) {
400 		/* During test fill with 0's to simplify message comparison */
401 		memset(item, 0, sizeof(int) * wlen);
402 	}
403 
404 	return item;
405 }
406 
mpsc_pbuf_commit(struct mpsc_pbuf_buffer * buffer,union mpsc_pbuf_generic * item)407 void mpsc_pbuf_commit(struct mpsc_pbuf_buffer *buffer,
408 		       union mpsc_pbuf_generic *item)
409 {
410 	uint32_t wlen = buffer->get_wlen(item);
411 
412 	k_spinlock_key_t key = k_spin_lock(&buffer->lock);
413 
414 	item->hdr.valid = 1;
415 	buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
416 	max_utilization_update(buffer);
417 	k_spin_unlock(&buffer->lock, key);
418 	MPSC_PBUF_DBG(buffer, "committed %p", item);
419 }
420 
mpsc_pbuf_put_word_ext(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic item,const void * data)421 void mpsc_pbuf_put_word_ext(struct mpsc_pbuf_buffer *buffer,
422 			    const union mpsc_pbuf_generic item,
423 			    const void *data)
424 {
425 	static const size_t l =
426 		(sizeof(item) + sizeof(data)) / sizeof(uint32_t);
427 	union mpsc_pbuf_generic *dropped_item = NULL;
428 	bool cont;
429 	uint32_t tmp_wr_idx_shift = 0;
430 	uint32_t tmp_wr_idx_val = 0;
431 
432 	do {
433 		k_spinlock_key_t key;
434 		uint32_t free_wlen;
435 		bool wrap;
436 
437 		key = k_spin_lock(&buffer->lock);
438 
439 		if (tmp_wr_idx_shift) {
440 			post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
441 			tmp_wr_idx_shift = 0;
442 		}
443 
444 		wrap = free_space(buffer, &free_wlen);
445 
446 		if (free_wlen >= l) {
447 			buffer->buf[buffer->tmp_wr_idx] = item.raw;
448 			void **p =
449 				(void **)&buffer->buf[buffer->tmp_wr_idx + 1];
450 
451 			*p = (void *)data;
452 			tmp_wr_idx_inc(buffer, l);
453 			buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, l);
454 			cont = false;
455 			max_utilization_update(buffer);
456 		} else if (wrap) {
457 			add_skip_item(buffer, free_wlen);
458 			cont = true;
459 		} else {
460 			tmp_wr_idx_val = buffer->tmp_wr_idx;
461 			cont = drop_item_locked(buffer, free_wlen,
462 						 &dropped_item, &tmp_wr_idx_shift);
463 		}
464 
465 		k_spin_unlock(&buffer->lock, key);
466 
467 		if (dropped_item) {
468 			/* Notify about item being dropped. */
469 			if (buffer->notify_drop) {
470 				buffer->notify_drop(buffer, dropped_item);
471 			}
472 			dropped_item = NULL;
473 		}
474 	} while (cont);
475 }
476 
mpsc_pbuf_put_data(struct mpsc_pbuf_buffer * buffer,const uint32_t * data,size_t wlen)477 void mpsc_pbuf_put_data(struct mpsc_pbuf_buffer *buffer, const uint32_t *data,
478 			size_t wlen)
479 {
480 	bool cont;
481 	union mpsc_pbuf_generic *dropped_item = NULL;
482 	uint32_t tmp_wr_idx_shift = 0;
483 	uint32_t tmp_wr_idx_val = 0;
484 
485 	do {
486 		uint32_t free_wlen;
487 		k_spinlock_key_t key;
488 		bool wrap;
489 
490 		key = k_spin_lock(&buffer->lock);
491 
492 		if (tmp_wr_idx_shift) {
493 			post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
494 			tmp_wr_idx_shift = 0;
495 		}
496 
497 		wrap = free_space(buffer, &free_wlen);
498 
499 		if (free_wlen >= wlen) {
500 			memcpy(&buffer->buf[buffer->tmp_wr_idx], data,
501 				wlen * sizeof(uint32_t));
502 			buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
503 			tmp_wr_idx_inc(buffer, wlen);
504 			cont = false;
505 			max_utilization_update(buffer);
506 		} else if (wrap) {
507 			add_skip_item(buffer, free_wlen);
508 			cont = true;
509 		} else {
510 			tmp_wr_idx_val = buffer->tmp_wr_idx;
511 			cont = drop_item_locked(buffer, free_wlen,
512 						 &dropped_item, &tmp_wr_idx_shift);
513 		}
514 
515 		k_spin_unlock(&buffer->lock, key);
516 
517 		if (dropped_item) {
518 			/* Notify about item being dropped. */
519 			dropped_item->hdr.valid = 0;
520 			if (buffer->notify_drop) {
521 				buffer->notify_drop(buffer, dropped_item);
522 			}
523 			dropped_item = NULL;
524 		}
525 	} while (cont);
526 }
527 
mpsc_pbuf_claim(struct mpsc_pbuf_buffer * buffer)528 const union mpsc_pbuf_generic *mpsc_pbuf_claim(struct mpsc_pbuf_buffer *buffer)
529 {
530 	union mpsc_pbuf_generic *item;
531 	bool cont;
532 
533 	do {
534 		uint32_t a;
535 		k_spinlock_key_t key;
536 
537 		cont = false;
538 		key = k_spin_lock(&buffer->lock);
539 		(void)available(buffer, &a);
540 		item = (union mpsc_pbuf_generic *)
541 			&buffer->buf[buffer->tmp_rd_idx];
542 
543 		if (!a || is_invalid(item)) {
544 			MPSC_PBUF_DBG(buffer, "invalid claim %d: %p", a, item);
545 			item = NULL;
546 		} else {
547 			uint32_t skip = get_skip(item);
548 
549 			if (skip || !is_valid(item)) {
550 				uint32_t inc =
551 					skip ? skip : buffer->get_wlen(item);
552 
553 				buffer->tmp_rd_idx =
554 				      idx_inc(buffer, buffer->tmp_rd_idx, inc);
555 				rd_idx_inc(buffer, inc);
556 				cont = true;
557 			} else {
558 				item->hdr.busy = 1;
559 				buffer->tmp_rd_idx =
560 					idx_inc(buffer, buffer->tmp_rd_idx,
561 						buffer->get_wlen(item));
562 			}
563 		}
564 
565 		if (!cont) {
566 			MPSC_PBUF_DBG(buffer, ">>claimed %d: %p", a, item);
567 		}
568 		k_spin_unlock(&buffer->lock, key);
569 	} while (cont);
570 
571 	return item;
572 }
573 
mpsc_pbuf_free(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic * item)574 void mpsc_pbuf_free(struct mpsc_pbuf_buffer *buffer,
575 		     const union mpsc_pbuf_generic *item)
576 {
577 	uint32_t wlen = buffer->get_wlen(item);
578 	k_spinlock_key_t key = k_spin_lock(&buffer->lock);
579 	union mpsc_pbuf_generic *witem = (union mpsc_pbuf_generic *)item;
580 
581 	witem->hdr.valid = 0;
582 	if (!(buffer->flags & MPSC_PBUF_MODE_OVERWRITE) ||
583 		 ((uint32_t *)item == &buffer->buf[buffer->rd_idx])) {
584 		witem->hdr.busy = 0;
585 		if (buffer->rd_idx == buffer->tmp_rd_idx) {
586 			/* There is a chance that there are so many new packets
587 			 * added between claim and free that rd_idx points again
588 			 * at claimed item. In that case tmp_rd_idx points at
589 			 * the same location. In that case increment also tmp_rd_idx
590 			 * which will mark freed buffer as the only free space in
591 			 * the buffer.
592 			 */
593 			buffer->tmp_rd_idx = idx_inc(buffer, buffer->tmp_rd_idx, wlen);
594 		}
595 		rd_idx_inc(buffer, wlen);
596 	} else {
597 		MPSC_PBUF_DBG(buffer, "Allocation occurred during claim");
598 		witem->skip.len = wlen;
599 	}
600 	MPSC_PBUF_DBG(buffer, "<<freed: %p", item);
601 
602 	k_spin_unlock(&buffer->lock, key);
603 	k_sem_give(&buffer->sem);
604 }
605 
mpsc_pbuf_is_pending(struct mpsc_pbuf_buffer * buffer)606 bool mpsc_pbuf_is_pending(struct mpsc_pbuf_buffer *buffer)
607 {
608 	uint32_t a;
609 
610 	(void)available(buffer, &a);
611 
612 	return a ? true : false;
613 }
614 
mpsc_pbuf_get_utilization(struct mpsc_pbuf_buffer * buffer,uint32_t * size,uint32_t * now)615 void mpsc_pbuf_get_utilization(struct mpsc_pbuf_buffer *buffer,
616 			       uint32_t *size, uint32_t *now)
617 {
618 	/* One byte is left for full/empty distinction. */
619 	*size = (buffer->size - 1) * sizeof(int);
620 	*now = get_usage(buffer) * sizeof(int);
621 }
622 
mpsc_pbuf_get_max_utilization(struct mpsc_pbuf_buffer * buffer,uint32_t * max)623 int mpsc_pbuf_get_max_utilization(struct mpsc_pbuf_buffer *buffer, uint32_t *max)
624 {
625 
626 	if (!(buffer->flags & MPSC_PBUF_MAX_UTILIZATION)) {
627 		return -ENOTSUP;
628 	}
629 
630 	*max = buffer->max_usage * sizeof(int);
631 	return 0;
632 }
633