1 /*
2  * Copyright (c) 2021 Nordic Semiconductor
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/sys/mpsc_pbuf.h>
7 
8 #define MPSC_PBUF_DEBUG 0
9 
10 #define MPSC_PBUF_DBG(buffer, ...) do { \
11 	if (MPSC_PBUF_DEBUG) { \
12 		printk(__VA_ARGS__); \
13 		if (buffer) { \
14 			mpsc_state_print(buffer); \
15 		} \
16 	} \
17 } while (false)
18 
mpsc_state_print(struct mpsc_pbuf_buffer * buffer)19 static inline void mpsc_state_print(struct mpsc_pbuf_buffer *buffer)
20 {
21 	if (MPSC_PBUF_DEBUG) {
22 		printk(", wr:%d/%d, rd:%d/%d\n",
23 			buffer->wr_idx, buffer->tmp_wr_idx,
24 			buffer->rd_idx, buffer->tmp_rd_idx);
25 	}
26 }
27 
mpsc_pbuf_init(struct mpsc_pbuf_buffer * buffer,const struct mpsc_pbuf_buffer_config * cfg)28 void mpsc_pbuf_init(struct mpsc_pbuf_buffer *buffer,
29 		    const struct mpsc_pbuf_buffer_config *cfg)
30 {
31 	memset(buffer, 0, offsetof(struct mpsc_pbuf_buffer, buf));
32 	buffer->get_wlen = cfg->get_wlen;
33 	buffer->notify_drop = cfg->notify_drop;
34 	buffer->buf = cfg->buf;
35 	buffer->size = cfg->size;
36 	buffer->max_usage = 0;
37 	buffer->flags = cfg->flags;
38 
39 	if (is_power_of_two(buffer->size)) {
40 		buffer->flags |= MPSC_PBUF_SIZE_POW2;
41 	}
42 
43 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
44 		int err;
45 
46 		err = k_sem_init(&buffer->sem, 0, 1);
47 		__ASSERT_NO_MSG(err == 0);
48 		ARG_UNUSED(err);
49 	}
50 }
51 
52 /* Calculate free space available or till end of buffer.
53  *
54  * @param buffer Buffer.
55  * @param[out] res Destination where free space is written.
56  *
57  * @retval true when space was calculated until end of buffer (and there might
58  * be more space available after wrapping.
59  * @retval false When result is total free space.
60  */
free_space(struct mpsc_pbuf_buffer * buffer,uint32_t * res)61 static inline bool free_space(struct mpsc_pbuf_buffer *buffer, uint32_t *res)
62 {
63 	if (buffer->flags & MPSC_PBUF_FULL) {
64 		*res = 0;
65 		return false;
66 	}
67 
68 	if (buffer->rd_idx > buffer->tmp_wr_idx) {
69 		*res =  buffer->rd_idx - buffer->tmp_wr_idx;
70 		return false;
71 	}
72 	*res = buffer->size - buffer->tmp_wr_idx;
73 
74 	return true;
75 }
76 
77 /* Get amount of valid data.
78  *
79  * @param buffer Buffer.
80  * @param[out] res Destination where available space is written.
81  *
82  * @retval true when space was calculated until end of buffer (and there might
83  * be more space available after wrapping.
84  * @retval false When result is total free space.
85  */
available(struct mpsc_pbuf_buffer * buffer,uint32_t * res)86 static inline bool available(struct mpsc_pbuf_buffer *buffer, uint32_t *res)
87 {
88 	if (buffer->flags & MPSC_PBUF_FULL || buffer->tmp_rd_idx > buffer->wr_idx) {
89 		*res = buffer->size - buffer->tmp_rd_idx;
90 		return true;
91 	}
92 
93 	*res = (buffer->wr_idx - buffer->tmp_rd_idx);
94 
95 	return false;
96 }
97 
get_usage(struct mpsc_pbuf_buffer * buffer)98 static inline uint32_t get_usage(struct mpsc_pbuf_buffer *buffer)
99 {
100 	uint32_t f;
101 
102 	if (free_space(buffer, &f)) {
103 		f += (buffer->rd_idx - 1);
104 	}
105 
106 	return buffer->size - 1 - f;
107 }
108 
max_utilization_update(struct mpsc_pbuf_buffer * buffer)109 static inline void max_utilization_update(struct mpsc_pbuf_buffer *buffer)
110 {
111 	if (!(buffer->flags & MPSC_PBUF_MAX_UTILIZATION)) {
112 		return;
113 	}
114 
115 	buffer->max_usage = MAX(buffer->max_usage, get_usage(buffer));
116 }
117 
is_valid(union mpsc_pbuf_generic * item)118 static inline bool is_valid(union mpsc_pbuf_generic *item)
119 {
120 	return item->hdr.valid;
121 }
122 
is_invalid(union mpsc_pbuf_generic * item)123 static inline bool is_invalid(union mpsc_pbuf_generic *item)
124 {
125 	return !item->hdr.valid && !item->hdr.busy;
126 }
127 
idx_inc(struct mpsc_pbuf_buffer * buffer,uint32_t idx,int32_t val)128 static inline uint32_t idx_inc(struct mpsc_pbuf_buffer *buffer,
129 				uint32_t idx, int32_t val)
130 {
131 	uint32_t i = idx + val;
132 
133 	if (buffer->flags & MPSC_PBUF_SIZE_POW2) {
134 		return i & (buffer->size - 1);
135 	}
136 
137 	return (i >= buffer->size) ? i - buffer->size : i;
138 }
139 
get_skip(union mpsc_pbuf_generic * item)140 static inline uint32_t get_skip(union mpsc_pbuf_generic *item)
141 {
142 	if (item->hdr.busy && !item->hdr.valid) {
143 		return item->skip.len;
144 	}
145 
146 	return 0;
147 }
148 
149 
tmp_wr_idx_inc(struct mpsc_pbuf_buffer * buffer,int32_t wlen)150 static ALWAYS_INLINE void tmp_wr_idx_inc(struct mpsc_pbuf_buffer *buffer, int32_t wlen)
151 {
152 	buffer->tmp_wr_idx = idx_inc(buffer, buffer->tmp_wr_idx, wlen);
153 	if (buffer->tmp_wr_idx == buffer->rd_idx) {
154 		buffer->flags |= MPSC_PBUF_FULL;
155 	}
156 }
157 
rd_idx_inc(struct mpsc_pbuf_buffer * buffer,int32_t wlen)158 static void rd_idx_inc(struct mpsc_pbuf_buffer *buffer, int32_t wlen)
159 {
160 	buffer->rd_idx = idx_inc(buffer, buffer->rd_idx, wlen);
161 	buffer->flags &= ~MPSC_PBUF_FULL;
162 }
163 
add_skip_item(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)164 static void add_skip_item(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
165 {
166 	union mpsc_pbuf_generic skip = {
167 		.skip = { .valid = 0, .busy = 1, .len = wlen }
168 	};
169 
170 	buffer->buf[buffer->tmp_wr_idx] = skip.raw;
171 	tmp_wr_idx_inc(buffer, wlen);
172 	buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
173 }
174 
drop_item_locked(struct mpsc_pbuf_buffer * buffer,uint32_t free_wlen,union mpsc_pbuf_generic ** item_to_drop,uint32_t * tmp_wr_idx_shift)175 static bool drop_item_locked(struct mpsc_pbuf_buffer *buffer,
176 			     uint32_t free_wlen,
177 			     union mpsc_pbuf_generic **item_to_drop,
178 			     uint32_t *tmp_wr_idx_shift)
179 {
180 	union mpsc_pbuf_generic *item;
181 	uint32_t skip_wlen;
182 
183 	item = (union mpsc_pbuf_generic *)&buffer->buf[buffer->rd_idx];
184 	skip_wlen = get_skip(item);
185 	*item_to_drop = NULL;
186 	*tmp_wr_idx_shift = 0;
187 
188 	if (skip_wlen) {
189 		/* Skip packet found, can be dropped to free some space */
190 		MPSC_PBUF_DBG(buffer, "no space: Found skip packet %d len", skip_wlen);
191 
192 		rd_idx_inc(buffer, skip_wlen);
193 		buffer->tmp_rd_idx = buffer->rd_idx;
194 		return true;
195 	}
196 
197 	/* Other options for dropping available only in overwrite mode. */
198 	if (!(buffer->flags & MPSC_PBUF_MODE_OVERWRITE)) {
199 		return false;
200 	}
201 
202 	uint32_t rd_wlen = buffer->get_wlen(item);
203 
204 	/* If packet is busy need to be omitted. */
205 	if (!is_valid(item)) {
206 		return false;
207 	} else if (item->hdr.busy) {
208 		bool ret = true;
209 
210 		MPSC_PBUF_DBG(buffer, "no space: Found busy packet %p (len:%d)", item, rd_wlen);
211 		/* Add skip packet before claimed packet. */
212 		if (free_wlen) {
213 			add_skip_item(buffer, free_wlen);
214 			MPSC_PBUF_DBG(buffer, "no space: Added skip packet (len:%d)", free_wlen);
215 		}
216 		/* Move all indexes forward, after claimed packet. */
217 		buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, rd_wlen);
218 
219 		/* If allocation wrapped around the buffer and found busy packet
220 		 * that was already omitted, skip it again and indicate that no
221 		 * packet was dropped.
222 		 */
223 		if (buffer->rd_idx == buffer->tmp_rd_idx) {
224 			buffer->tmp_rd_idx = idx_inc(buffer, buffer->tmp_rd_idx, rd_wlen);
225 			ret = false;
226 		}
227 
228 		buffer->tmp_wr_idx = buffer->tmp_rd_idx;
229 		buffer->rd_idx = buffer->tmp_rd_idx;
230 		buffer->flags |= MPSC_PBUF_FULL;
231 		return ret;
232 	} else {
233 		/* Prepare packet dropping. */
234 		rd_idx_inc(buffer, rd_wlen);
235 		buffer->tmp_rd_idx = buffer->rd_idx;
236 		/* Temporary move tmp_wr idx forward to ensure that packet
237 		 * will not be dropped twice and content will not be
238 		 * overwritten.
239 		 */
240 		if (free_wlen) {
241 			/* Free location mark as invalid to prevent
242 			 * reading incomplete data.
243 			 */
244 			union mpsc_pbuf_generic invalid = {
245 				.hdr = {
246 					.valid = 0,
247 					.busy = 0
248 				}
249 			};
250 
251 			buffer->buf[buffer->tmp_wr_idx] = invalid.raw;
252 		}
253 
254 		*tmp_wr_idx_shift = rd_wlen + free_wlen;
255 		buffer->tmp_wr_idx = idx_inc(buffer, buffer->tmp_wr_idx, *tmp_wr_idx_shift);
256 		buffer->flags |= MPSC_PBUF_FULL;
257 		item->hdr.valid = 0;
258 		*item_to_drop = item;
259 		MPSC_PBUF_DBG(buffer, "no space: dropping packet %p (len: %d)",
260 			       item, rd_wlen);
261 	}
262 
263 	return true;
264 }
265 
post_drop_action(struct mpsc_pbuf_buffer * buffer,uint32_t prev_tmp_wr_idx,uint32_t tmp_wr_idx_shift)266 static void post_drop_action(struct mpsc_pbuf_buffer *buffer,
267 			     uint32_t prev_tmp_wr_idx,
268 			     uint32_t tmp_wr_idx_shift)
269 {
270 	uint32_t cmp_tmp_wr_idx = idx_inc(buffer, prev_tmp_wr_idx, tmp_wr_idx_shift);
271 
272 	if (cmp_tmp_wr_idx == buffer->tmp_wr_idx) {
273 		/* Operation not interrupted by another alloc. */
274 		buffer->tmp_wr_idx = prev_tmp_wr_idx;
275 		buffer->flags &= ~MPSC_PBUF_FULL;
276 		return;
277 	}
278 
279 	/* Operation interrupted, mark area as to be skipped. */
280 	union mpsc_pbuf_generic skip = {
281 		.skip = {
282 			.valid = 0,
283 			.busy = 1,
284 			.len = tmp_wr_idx_shift
285 		}
286 	};
287 
288 	buffer->buf[prev_tmp_wr_idx] = skip.raw;
289 	buffer->wr_idx = idx_inc(buffer,
290 				 buffer->wr_idx,
291 				 tmp_wr_idx_shift);
292 	/* full flag? */
293 }
294 
mpsc_pbuf_put_word(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic item)295 void mpsc_pbuf_put_word(struct mpsc_pbuf_buffer *buffer,
296 			const union mpsc_pbuf_generic item)
297 {
298 	bool cont;
299 	uint32_t free_wlen;
300 	k_spinlock_key_t key;
301 	union mpsc_pbuf_generic *dropped_item = NULL;
302 	uint32_t tmp_wr_idx_shift = 0;
303 	uint32_t tmp_wr_idx_val = 0;
304 
305 	do {
306 		key = k_spin_lock(&buffer->lock);
307 
308 		if (tmp_wr_idx_shift) {
309 			post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
310 			tmp_wr_idx_shift = 0;
311 		}
312 
313 		(void)free_space(buffer, &free_wlen);
314 
315 		MPSC_PBUF_DBG(buffer, "put_word (%d free space)", (int)free_wlen);
316 
317 		if (free_wlen) {
318 			buffer->buf[buffer->tmp_wr_idx] = item.raw;
319 			tmp_wr_idx_inc(buffer, 1);
320 			cont = false;
321 			buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, 1);
322 			max_utilization_update(buffer);
323 		} else {
324 			tmp_wr_idx_val = buffer->tmp_wr_idx;
325 			cont = drop_item_locked(buffer, free_wlen,
326 						&dropped_item, &tmp_wr_idx_shift);
327 		}
328 
329 		k_spin_unlock(&buffer->lock, key);
330 
331 		if (dropped_item) {
332 			/* Notify about item being dropped. */
333 			if (buffer->notify_drop) {
334 				buffer->notify_drop(buffer, dropped_item);
335 			}
336 			dropped_item = NULL;
337 		}
338 	} while (cont);
339 }
340 
mpsc_pbuf_alloc(struct mpsc_pbuf_buffer * buffer,size_t wlen,k_timeout_t timeout)341 union mpsc_pbuf_generic *mpsc_pbuf_alloc(struct mpsc_pbuf_buffer *buffer,
342 					 size_t wlen, k_timeout_t timeout)
343 {
344 	union mpsc_pbuf_generic *item = NULL;
345 	union mpsc_pbuf_generic *dropped_item = NULL;
346 	bool cont = true;
347 	uint32_t free_wlen;
348 	uint32_t tmp_wr_idx_shift = 0;
349 	uint32_t tmp_wr_idx_val = 0;
350 
351 	MPSC_PBUF_DBG(buffer, "alloc %d words", (int)wlen);
352 
353 	if (wlen > (buffer->size)) {
354 		MPSC_PBUF_DBG(buffer, "Failed to alloc");
355 		return NULL;
356 	}
357 
358 	do {
359 		k_spinlock_key_t key;
360 		bool wrap;
361 
362 		key = k_spin_lock(&buffer->lock);
363 		if (tmp_wr_idx_shift) {
364 			post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
365 			tmp_wr_idx_shift = 0;
366 		}
367 
368 		wrap = free_space(buffer, &free_wlen);
369 
370 		if (free_wlen >= wlen) {
371 			item =
372 			    (union mpsc_pbuf_generic *)&buffer->buf[buffer->tmp_wr_idx];
373 			item->hdr.valid = 0;
374 			item->hdr.busy = 0;
375 			tmp_wr_idx_inc(buffer, wlen);
376 			cont = false;
377 		} else if (wrap) {
378 			add_skip_item(buffer, free_wlen);
379 			cont = true;
380 		} else if (IS_ENABLED(CONFIG_MULTITHREADING) && !K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
381 			   !k_is_in_isr() && arch_irq_unlocked(key.key)) {
382 			int err;
383 
384 			k_spin_unlock(&buffer->lock, key);
385 			err = k_sem_take(&buffer->sem, timeout);
386 			key = k_spin_lock(&buffer->lock);
387 			cont = (err == 0) ? true : false;
388 		} else if (cont) {
389 			tmp_wr_idx_val = buffer->tmp_wr_idx;
390 			cont = drop_item_locked(buffer, free_wlen,
391 						&dropped_item, &tmp_wr_idx_shift);
392 		}
393 		k_spin_unlock(&buffer->lock, key);
394 
395 		if (dropped_item) {
396 			/* Notify about item being dropped. */
397 			if (buffer->notify_drop) {
398 				buffer->notify_drop(buffer, dropped_item);
399 			}
400 			dropped_item = NULL;
401 		}
402 	} while (cont);
403 
404 
405 	MPSC_PBUF_DBG(buffer, "allocated %p", item);
406 
407 	if (IS_ENABLED(CONFIG_MPSC_CLEAR_ALLOCATED) && item) {
408 		/* During test fill with 0's to simplify message comparison */
409 		memset(item, 0, sizeof(int) * wlen);
410 	}
411 
412 	return item;
413 }
414 
mpsc_pbuf_commit(struct mpsc_pbuf_buffer * buffer,union mpsc_pbuf_generic * item)415 void mpsc_pbuf_commit(struct mpsc_pbuf_buffer *buffer,
416 		       union mpsc_pbuf_generic *item)
417 {
418 	uint32_t wlen = buffer->get_wlen(item);
419 
420 	k_spinlock_key_t key = k_spin_lock(&buffer->lock);
421 
422 	item->hdr.valid = 1;
423 	buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
424 	max_utilization_update(buffer);
425 	k_spin_unlock(&buffer->lock, key);
426 	MPSC_PBUF_DBG(buffer, "committed %p", item);
427 }
428 
mpsc_pbuf_put_word_ext(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic item,const void * data)429 void mpsc_pbuf_put_word_ext(struct mpsc_pbuf_buffer *buffer,
430 			    const union mpsc_pbuf_generic item,
431 			    const void *data)
432 {
433 	static const size_t l =
434 		(sizeof(item) + sizeof(data)) / sizeof(uint32_t);
435 	union mpsc_pbuf_generic *dropped_item = NULL;
436 	bool cont;
437 	uint32_t tmp_wr_idx_shift = 0;
438 	uint32_t tmp_wr_idx_val = 0;
439 
440 	do {
441 		k_spinlock_key_t key;
442 		uint32_t free_wlen;
443 		bool wrap;
444 
445 		key = k_spin_lock(&buffer->lock);
446 
447 		if (tmp_wr_idx_shift) {
448 			post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
449 			tmp_wr_idx_shift = 0;
450 		}
451 
452 		wrap = free_space(buffer, &free_wlen);
453 
454 		if (free_wlen >= l) {
455 			buffer->buf[buffer->tmp_wr_idx] = item.raw;
456 			void **p =
457 				(void **)&buffer->buf[buffer->tmp_wr_idx + 1];
458 
459 			*p = (void *)data;
460 			tmp_wr_idx_inc(buffer, l);
461 			buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, l);
462 			cont = false;
463 			max_utilization_update(buffer);
464 		} else if (wrap) {
465 			add_skip_item(buffer, free_wlen);
466 			cont = true;
467 		} else {
468 			tmp_wr_idx_val = buffer->tmp_wr_idx;
469 			cont = drop_item_locked(buffer, free_wlen,
470 						 &dropped_item, &tmp_wr_idx_shift);
471 		}
472 
473 		k_spin_unlock(&buffer->lock, key);
474 
475 		if (dropped_item) {
476 			/* Notify about item being dropped. */
477 			if (buffer->notify_drop) {
478 				buffer->notify_drop(buffer, dropped_item);
479 			}
480 			dropped_item = NULL;
481 		}
482 	} while (cont);
483 }
484 
mpsc_pbuf_put_data(struct mpsc_pbuf_buffer * buffer,const uint32_t * data,size_t wlen)485 void mpsc_pbuf_put_data(struct mpsc_pbuf_buffer *buffer, const uint32_t *data,
486 			size_t wlen)
487 {
488 	bool cont;
489 	union mpsc_pbuf_generic *dropped_item = NULL;
490 	uint32_t tmp_wr_idx_shift = 0;
491 	uint32_t tmp_wr_idx_val = 0;
492 
493 	do {
494 		uint32_t free_wlen;
495 		k_spinlock_key_t key;
496 		bool wrap;
497 
498 		key = k_spin_lock(&buffer->lock);
499 
500 		if (tmp_wr_idx_shift) {
501 			post_drop_action(buffer, tmp_wr_idx_val, tmp_wr_idx_shift);
502 			tmp_wr_idx_shift = 0;
503 		}
504 
505 		wrap = free_space(buffer, &free_wlen);
506 
507 		if (free_wlen >= wlen) {
508 			memcpy(&buffer->buf[buffer->tmp_wr_idx], data,
509 				wlen * sizeof(uint32_t));
510 			buffer->wr_idx = idx_inc(buffer, buffer->wr_idx, wlen);
511 			tmp_wr_idx_inc(buffer, wlen);
512 			cont = false;
513 			max_utilization_update(buffer);
514 		} else if (wrap) {
515 			add_skip_item(buffer, free_wlen);
516 			cont = true;
517 		} else {
518 			tmp_wr_idx_val = buffer->tmp_wr_idx;
519 			cont = drop_item_locked(buffer, free_wlen,
520 						 &dropped_item, &tmp_wr_idx_shift);
521 		}
522 
523 		k_spin_unlock(&buffer->lock, key);
524 
525 		if (dropped_item) {
526 			/* Notify about item being dropped. */
527 			dropped_item->hdr.valid = 0;
528 			if (buffer->notify_drop) {
529 				buffer->notify_drop(buffer, dropped_item);
530 			}
531 			dropped_item = NULL;
532 		}
533 	} while (cont);
534 }
535 
mpsc_pbuf_claim(struct mpsc_pbuf_buffer * buffer)536 const union mpsc_pbuf_generic *mpsc_pbuf_claim(struct mpsc_pbuf_buffer *buffer)
537 {
538 	union mpsc_pbuf_generic *item;
539 	bool cont;
540 
541 	do {
542 		uint32_t a;
543 		k_spinlock_key_t key;
544 
545 		cont = false;
546 		key = k_spin_lock(&buffer->lock);
547 		(void)available(buffer, &a);
548 		item = (union mpsc_pbuf_generic *)
549 			&buffer->buf[buffer->tmp_rd_idx];
550 
551 		if (!a || is_invalid(item)) {
552 			MPSC_PBUF_DBG(buffer, "invalid claim %d: %p", a, item);
553 			item = NULL;
554 		} else {
555 			uint32_t skip = get_skip(item);
556 
557 			if (skip || !is_valid(item)) {
558 				uint32_t inc =
559 					skip ? skip : buffer->get_wlen(item);
560 
561 				buffer->tmp_rd_idx =
562 				      idx_inc(buffer, buffer->tmp_rd_idx, inc);
563 				rd_idx_inc(buffer, inc);
564 				cont = true;
565 			} else {
566 				item->hdr.busy = 1;
567 				buffer->tmp_rd_idx =
568 					idx_inc(buffer, buffer->tmp_rd_idx,
569 						buffer->get_wlen(item));
570 			}
571 		}
572 
573 		if (!cont) {
574 			MPSC_PBUF_DBG(buffer, ">>claimed %d: %p", a, item);
575 		}
576 		k_spin_unlock(&buffer->lock, key);
577 	} while (cont);
578 
579 	return item;
580 }
581 
mpsc_pbuf_free(struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic * item)582 void mpsc_pbuf_free(struct mpsc_pbuf_buffer *buffer,
583 		     const union mpsc_pbuf_generic *item)
584 {
585 	uint32_t wlen = buffer->get_wlen(item);
586 	k_spinlock_key_t key = k_spin_lock(&buffer->lock);
587 	union mpsc_pbuf_generic *witem = (union mpsc_pbuf_generic *)item;
588 
589 	witem->hdr.valid = 0;
590 	if (!(buffer->flags & MPSC_PBUF_MODE_OVERWRITE) ||
591 		 ((uint32_t *)item == &buffer->buf[buffer->rd_idx])) {
592 		witem->hdr.busy = 0;
593 		if (buffer->rd_idx == buffer->tmp_rd_idx) {
594 			/* There is a chance that there are so many new packets
595 			 * added between claim and free that rd_idx points again
596 			 * at claimed item. In that case tmp_rd_idx points at
597 			 * the same location. In that case increment also tmp_rd_idx
598 			 * which will mark freed buffer as the only free space in
599 			 * the buffer.
600 			 */
601 			buffer->tmp_rd_idx = idx_inc(buffer, buffer->tmp_rd_idx, wlen);
602 		}
603 		rd_idx_inc(buffer, wlen);
604 	} else {
605 		MPSC_PBUF_DBG(buffer, "Allocation occurred during claim");
606 		witem->skip.len = wlen;
607 	}
608 	MPSC_PBUF_DBG(buffer, "<<freed: %p", item);
609 
610 	k_spin_unlock(&buffer->lock, key);
611 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
612 		k_sem_give(&buffer->sem);
613 	}
614 }
615 
mpsc_pbuf_is_pending(struct mpsc_pbuf_buffer * buffer)616 bool mpsc_pbuf_is_pending(struct mpsc_pbuf_buffer *buffer)
617 {
618 	uint32_t a;
619 	k_spinlock_key_t key = k_spin_lock(&buffer->lock);
620 
621 	(void)available(buffer, &a);
622 	k_spin_unlock(&buffer->lock, key);
623 
624 	return a ? true : false;
625 }
626 
mpsc_pbuf_get_utilization(struct mpsc_pbuf_buffer * buffer,uint32_t * size,uint32_t * now)627 void mpsc_pbuf_get_utilization(struct mpsc_pbuf_buffer *buffer,
628 			       uint32_t *size, uint32_t *now)
629 {
630 	k_spinlock_key_t key = k_spin_lock(&buffer->lock);
631 
632 	/* One byte is left for full/empty distinction. */
633 	*size = (buffer->size - 1) * sizeof(int);
634 	*now = get_usage(buffer) * sizeof(int);
635 
636 	k_spin_unlock(&buffer->lock, key);
637 }
638 
mpsc_pbuf_get_max_utilization(struct mpsc_pbuf_buffer * buffer,uint32_t * max)639 int mpsc_pbuf_get_max_utilization(struct mpsc_pbuf_buffer *buffer, uint32_t *max)
640 {
641 	int rc;
642 	k_spinlock_key_t key = k_spin_lock(&buffer->lock);
643 
644 	if (buffer->flags & MPSC_PBUF_MAX_UTILIZATION) {
645 		*max = buffer->max_usage * sizeof(int);
646 		rc = 0;
647 	} else {
648 		rc = -ENOTSUP;
649 	}
650 
651 	k_spin_unlock(&buffer->lock, key);
652 
653 	return rc;
654 }
655