1 /* buf.c - Buffer management */
2 
3 /*
4  * Copyright (c) 2015-2019 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #define LOG_MODULE_NAME net_buf
10 #define LOG_LEVEL CONFIG_NET_BUF_LOG_LEVEL
11 
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
14 
15 #include <stdio.h>
16 #include <errno.h>
17 #include <stddef.h>
18 #include <string.h>
19 #include <zephyr/sys/byteorder.h>
20 
21 #include <zephyr/net_buf.h>
22 
23 #if defined(CONFIG_NET_BUF_LOG)
24 #define NET_BUF_DBG(fmt, ...) LOG_DBG("(%p) " fmt, k_current_get(), \
25 				      ##__VA_ARGS__)
26 #define NET_BUF_ERR(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__)
27 #define NET_BUF_WARN(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__)
28 #define NET_BUF_INFO(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__)
29 #else
30 
31 #define NET_BUF_DBG(fmt, ...)
32 #define NET_BUF_ERR(fmt, ...)
33 #define NET_BUF_WARN(fmt, ...)
34 #define NET_BUF_INFO(fmt, ...)
35 #endif /* CONFIG_NET_BUF_LOG */
36 
37 #define NET_BUF_ASSERT(cond, ...) __ASSERT(cond, "" __VA_ARGS__)
38 
39 #if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
40 #define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
41 #else
42 #define WARN_ALLOC_INTERVAL K_FOREVER
43 #endif
44 
45 /* Linker-defined symbol bound to the static pool structs */
46 STRUCT_SECTION_START_EXTERN(net_buf_pool);
47 
net_buf_pool_get(int id)48 struct net_buf_pool *net_buf_pool_get(int id)
49 {
50 	struct net_buf_pool *pool;
51 
52 	STRUCT_SECTION_GET(net_buf_pool, id, &pool);
53 
54 	return pool;
55 }
56 
pool_id(struct net_buf_pool * pool)57 static int pool_id(struct net_buf_pool *pool)
58 {
59 	return pool - TYPE_SECTION_START(net_buf_pool);
60 }
61 
net_buf_id(const struct net_buf * buf)62 int net_buf_id(const struct net_buf *buf)
63 {
64 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
65 	size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
66 				__alignof__(struct net_buf));
67 	ptrdiff_t offset = (uint8_t *)buf - (uint8_t *)pool->__bufs;
68 
69 	return offset / struct_size;
70 }
71 
pool_get_uninit(struct net_buf_pool * pool,uint16_t uninit_count)72 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
73 					      uint16_t uninit_count)
74 {
75 	size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
76 				__alignof__(struct net_buf));
77 	size_t byte_offset = (pool->buf_count - uninit_count) * struct_size;
78 	struct net_buf *buf;
79 
80 	buf = (struct net_buf *)(((uint8_t *)pool->__bufs) + byte_offset);
81 
82 	buf->pool_id = pool_id(pool);
83 	buf->user_data_size = pool->user_data_size;
84 
85 	return buf;
86 }
87 
net_buf_reset(struct net_buf * buf)88 void net_buf_reset(struct net_buf *buf)
89 {
90 	__ASSERT_NO_MSG(buf->flags == 0U);
91 	__ASSERT_NO_MSG(buf->frags == NULL);
92 
93 	net_buf_simple_reset(&buf->b);
94 }
95 
generic_data_ref(struct net_buf * buf,uint8_t * data)96 static uint8_t *generic_data_ref(struct net_buf *buf, uint8_t *data)
97 {
98 	uint8_t *ref_count;
99 
100 	ref_count = data - sizeof(void *);
101 	(*ref_count)++;
102 
103 	return data;
104 }
105 
mem_pool_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)106 static uint8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
107 				 k_timeout_t timeout)
108 {
109 	struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
110 	struct k_heap *pool = buf_pool->alloc->alloc_data;
111 	uint8_t *ref_count;
112 
113 	/* Reserve extra space for a ref-count (uint8_t) */
114 	void *b = k_heap_alloc(pool, sizeof(void *) + *size, timeout);
115 
116 	if (b == NULL) {
117 		return NULL;
118 	}
119 
120 	ref_count = (uint8_t *)b;
121 	*ref_count = 1U;
122 
123 	/* Return pointer to the byte following the ref count */
124 	return ref_count + sizeof(void *);
125 }
126 
mem_pool_data_unref(struct net_buf * buf,uint8_t * data)127 static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
128 {
129 	struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
130 	struct k_heap *pool = buf_pool->alloc->alloc_data;
131 	uint8_t *ref_count;
132 
133 	ref_count = data - sizeof(void *);
134 	if (--(*ref_count)) {
135 		return;
136 	}
137 
138 	/* Need to copy to local variable due to alignment */
139 	k_heap_free(pool, ref_count);
140 }
141 
142 const struct net_buf_data_cb net_buf_var_cb = {
143 	.alloc = mem_pool_data_alloc,
144 	.ref   = generic_data_ref,
145 	.unref = mem_pool_data_unref,
146 };
147 
fixed_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)148 static uint8_t *fixed_data_alloc(struct net_buf *buf, size_t *size,
149 			      k_timeout_t timeout)
150 {
151 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
152 	const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
153 
154 	*size = pool->alloc->max_alloc_size;
155 
156 	return fixed->data_pool + *size * net_buf_id(buf);
157 }
158 
fixed_data_unref(struct net_buf * buf,uint8_t * data)159 static void fixed_data_unref(struct net_buf *buf, uint8_t *data)
160 {
161 	/* Nothing needed for fixed-size data pools */
162 }
163 
164 const struct net_buf_data_cb net_buf_fixed_cb = {
165 	.alloc = fixed_data_alloc,
166 	.unref = fixed_data_unref,
167 };
168 
169 #if (K_HEAP_MEM_POOL_SIZE > 0)
170 
heap_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)171 static uint8_t *heap_data_alloc(struct net_buf *buf, size_t *size,
172 			     k_timeout_t timeout)
173 {
174 	uint8_t *ref_count;
175 
176 	ref_count = k_malloc(sizeof(void *) + *size);
177 	if (!ref_count) {
178 		return NULL;
179 	}
180 
181 	*ref_count = 1U;
182 
183 	return ref_count + sizeof(void *);
184 }
185 
heap_data_unref(struct net_buf * buf,uint8_t * data)186 static void heap_data_unref(struct net_buf *buf, uint8_t *data)
187 {
188 	uint8_t *ref_count;
189 
190 	ref_count = data - sizeof(void *);
191 	if (--(*ref_count)) {
192 		return;
193 	}
194 
195 	k_free(ref_count);
196 }
197 
198 static const struct net_buf_data_cb net_buf_heap_cb = {
199 	.alloc = heap_data_alloc,
200 	.ref   = generic_data_ref,
201 	.unref = heap_data_unref,
202 };
203 
204 const struct net_buf_data_alloc net_buf_heap_alloc = {
205 	.cb = &net_buf_heap_cb,
206 	.max_alloc_size = 0,
207 };
208 
209 #endif /* K_HEAP_MEM_POOL_SIZE > 0 */
210 
data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)211 static uint8_t *data_alloc(struct net_buf *buf, size_t *size, k_timeout_t timeout)
212 {
213 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
214 
215 	return pool->alloc->cb->alloc(buf, size, timeout);
216 }
217 
data_ref(struct net_buf * buf,uint8_t * data)218 static uint8_t *data_ref(struct net_buf *buf, uint8_t *data)
219 {
220 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
221 
222 	return pool->alloc->cb->ref(buf, data);
223 }
224 
225 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_len_debug(struct net_buf_pool * pool,size_t size,k_timeout_t timeout,const char * func,int line)226 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
227 					k_timeout_t timeout, const char *func,
228 					int line)
229 #else
230 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
231 				  k_timeout_t timeout)
232 #endif
233 {
234 	k_timepoint_t end = sys_timepoint_calc(timeout);
235 	struct net_buf *buf;
236 	k_spinlock_key_t key;
237 
238 	__ASSERT_NO_MSG(pool);
239 
240 	NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size);
241 
242 	/* We need to prevent race conditions
243 	 * when accessing pool->uninit_count.
244 	 */
245 	key = k_spin_lock(&pool->lock);
246 
247 	/* If there are uninitialized buffers we're guaranteed to succeed
248 	 * with the allocation one way or another.
249 	 */
250 	if (pool->uninit_count) {
251 		uint16_t uninit_count;
252 
253 		/* If this is not the first access to the pool, we can
254 		 * be opportunistic and try to fetch a previously used
255 		 * buffer from the LIFO with K_NO_WAIT.
256 		 */
257 		if (pool->uninit_count < pool->buf_count) {
258 			buf = k_lifo_get(&pool->free, K_NO_WAIT);
259 			if (buf) {
260 				k_spin_unlock(&pool->lock, key);
261 				goto success;
262 			}
263 		}
264 
265 		uninit_count = pool->uninit_count--;
266 		k_spin_unlock(&pool->lock, key);
267 
268 		buf = pool_get_uninit(pool, uninit_count);
269 		goto success;
270 	}
271 
272 	k_spin_unlock(&pool->lock, key);
273 
274 #if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN)
275 	if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
276 		uint32_t ref = k_uptime_get_32();
277 		buf = k_lifo_get(&pool->free, K_NO_WAIT);
278 		while (!buf) {
279 #if defined(CONFIG_NET_BUF_POOL_USAGE)
280 			NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
281 				     func, line, pool->name);
282 #else
283 			NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
284 				     func, line, pool);
285 #endif
286 			buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
287 #if defined(CONFIG_NET_BUF_POOL_USAGE)
288 			NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
289 				     func, line, pool->name,
290 				     (k_uptime_get_32() - ref) / MSEC_PER_SEC);
291 #else
292 			NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
293 				     func, line, pool,
294 				     (k_uptime_get_32() - ref) / MSEC_PER_SEC);
295 #endif
296 		}
297 	} else {
298 		buf = k_lifo_get(&pool->free, timeout);
299 	}
300 #else
301 	buf = k_lifo_get(&pool->free, timeout);
302 #endif
303 	if (!buf) {
304 		NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
305 		return NULL;
306 	}
307 
308 success:
309 	NET_BUF_DBG("allocated buf %p", buf);
310 
311 	if (size) {
312 #if __ASSERT_ON
313 		size_t req_size = size;
314 #endif
315 		timeout = sys_timepoint_timeout(end);
316 		buf->__buf = data_alloc(buf, &size, timeout);
317 		if (!buf->__buf) {
318 			NET_BUF_ERR("%s():%d: Failed to allocate data",
319 				    func, line);
320 			net_buf_destroy(buf);
321 			return NULL;
322 		}
323 
324 #if __ASSERT_ON
325 		NET_BUF_ASSERT(req_size <= size);
326 #endif
327 	} else {
328 		buf->__buf = NULL;
329 	}
330 
331 	buf->ref   = 1U;
332 	buf->flags = 0U;
333 	buf->frags = NULL;
334 	buf->size  = size;
335 	memset(buf->user_data, 0, buf->user_data_size);
336 	net_buf_reset(buf);
337 
338 #if defined(CONFIG_NET_BUF_POOL_USAGE)
339 	atomic_dec(&pool->avail_count);
340 	__ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0);
341 	pool->max_used = MAX(pool->max_used,
342 			     pool->buf_count - atomic_get(&pool->avail_count));
343 #endif
344 	return buf;
345 }
346 
347 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_fixed_debug(struct net_buf_pool * pool,k_timeout_t timeout,const char * func,int line)348 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
349 					  k_timeout_t timeout, const char *func,
350 					  int line)
351 {
352 	return net_buf_alloc_len_debug(pool, pool->alloc->max_alloc_size, timeout, func,
353 				       line);
354 }
355 #else
net_buf_alloc_fixed(struct net_buf_pool * pool,k_timeout_t timeout)356 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
357 				    k_timeout_t timeout)
358 {
359 	return net_buf_alloc_len(pool, pool->alloc->max_alloc_size, timeout);
360 }
361 #endif
362 
363 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_with_data_debug(struct net_buf_pool * pool,void * data,size_t size,k_timeout_t timeout,const char * func,int line)364 struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
365 					      void *data, size_t size,
366 					      k_timeout_t timeout,
367 					      const char *func, int line)
368 #else
369 struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
370 					void *data, size_t size,
371 					k_timeout_t timeout)
372 #endif
373 {
374 	struct net_buf *buf;
375 
376 #if defined(CONFIG_NET_BUF_LOG)
377 	buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line);
378 #else
379 	buf = net_buf_alloc_len(pool, 0, timeout);
380 #endif
381 	if (!buf) {
382 		return NULL;
383 	}
384 
385 	net_buf_simple_init_with_data(&buf->b, data, size);
386 	buf->flags = NET_BUF_EXTERNAL_DATA;
387 
388 	return buf;
389 }
390 
391 static struct k_spinlock net_buf_slist_lock;
392 
net_buf_slist_put(sys_slist_t * list,struct net_buf * buf)393 void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
394 {
395 	k_spinlock_key_t key;
396 
397 	__ASSERT_NO_MSG(list);
398 	__ASSERT_NO_MSG(buf);
399 
400 	key = k_spin_lock(&net_buf_slist_lock);
401 	sys_slist_append(list, &buf->node);
402 	k_spin_unlock(&net_buf_slist_lock, key);
403 }
404 
net_buf_slist_get(sys_slist_t * list)405 struct net_buf *net_buf_slist_get(sys_slist_t *list)
406 {
407 	struct net_buf *buf;
408 	k_spinlock_key_t key;
409 
410 	__ASSERT_NO_MSG(list);
411 
412 	key = k_spin_lock(&net_buf_slist_lock);
413 
414 	buf = (void *)sys_slist_get(list);
415 
416 	k_spin_unlock(&net_buf_slist_lock, key);
417 
418 	return buf;
419 }
420 
421 #if defined(CONFIG_NET_BUF_LOG)
net_buf_unref_debug(struct net_buf * buf,const char * func,int line)422 void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
423 #else
424 void net_buf_unref(struct net_buf *buf)
425 #endif
426 {
427 	__ASSERT_NO_MSG(buf);
428 
429 	while (buf) {
430 		struct net_buf *frags = buf->frags;
431 		struct net_buf_pool *pool;
432 
433 #if defined(CONFIG_NET_BUF_LOG)
434 		if (!buf->ref) {
435 			NET_BUF_ERR("%s():%d: buf %p double free", func, line,
436 				    buf);
437 			return;
438 		}
439 #endif
440 		NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
441 			    buf->pool_id, buf->frags);
442 
443 		if (--buf->ref > 0) {
444 			return;
445 		}
446 
447 		buf->data = NULL;
448 		buf->frags = NULL;
449 
450 		pool = net_buf_pool_get(buf->pool_id);
451 
452 #if defined(CONFIG_NET_BUF_POOL_USAGE)
453 		atomic_inc(&pool->avail_count);
454 		__ASSERT_NO_MSG(atomic_get(&pool->avail_count) <= pool->buf_count);
455 #endif
456 
457 		if (pool->destroy) {
458 			pool->destroy(buf);
459 		} else {
460 			net_buf_destroy(buf);
461 		}
462 
463 		buf = frags;
464 	}
465 }
466 
net_buf_ref(struct net_buf * buf)467 struct net_buf *net_buf_ref(struct net_buf *buf)
468 {
469 	__ASSERT_NO_MSG(buf);
470 
471 	NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
472 		    buf, buf->ref, buf->pool_id);
473 	buf->ref++;
474 	return buf;
475 }
476 
net_buf_clone(struct net_buf * buf,k_timeout_t timeout)477 struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
478 {
479 	k_timepoint_t end = sys_timepoint_calc(timeout);
480 	struct net_buf_pool *pool;
481 	struct net_buf *clone;
482 
483 	__ASSERT_NO_MSG(buf);
484 
485 	pool = net_buf_pool_get(buf->pool_id);
486 
487 	clone = net_buf_alloc_len(pool, 0, timeout);
488 	if (!clone) {
489 		return NULL;
490 	}
491 
492 	/* If the pool supports data referencing use that. Otherwise
493 	 * we need to allocate new data and make a copy.
494 	 */
495 	if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) {
496 		clone->__buf = buf->__buf ? data_ref(buf, buf->__buf) : NULL;
497 		clone->data = buf->data;
498 		clone->len = buf->len;
499 		clone->size = buf->size;
500 	} else {
501 		size_t size = buf->size;
502 
503 		timeout = sys_timepoint_timeout(end);
504 
505 		clone->__buf = data_alloc(clone, &size, timeout);
506 		if (!clone->__buf || size < buf->size) {
507 			net_buf_destroy(clone);
508 			return NULL;
509 		}
510 
511 		clone->size = size;
512 		clone->data = clone->__buf + net_buf_headroom(buf);
513 		net_buf_add_mem(clone, buf->data, buf->len);
514 	}
515 
516 	/* user_data_size should be the same for buffers from the same pool */
517 	__ASSERT(buf->user_data_size == clone->user_data_size, "Unexpected user data size");
518 
519 	memcpy(clone->user_data, buf->user_data, clone->user_data_size);
520 
521 	return clone;
522 }
523 
net_buf_user_data_copy(struct net_buf * dst,const struct net_buf * src)524 int net_buf_user_data_copy(struct net_buf *dst, const struct net_buf *src)
525 {
526 	__ASSERT_NO_MSG(dst);
527 	__ASSERT_NO_MSG(src);
528 
529 	if (dst == src) {
530 		return 0;
531 	}
532 
533 	if (dst->user_data_size < src->user_data_size) {
534 		return -EINVAL;
535 	}
536 
537 	memcpy(dst->user_data, src->user_data, src->user_data_size);
538 
539 	return 0;
540 }
541 
net_buf_frag_last(struct net_buf * buf)542 struct net_buf *net_buf_frag_last(struct net_buf *buf)
543 {
544 	__ASSERT_NO_MSG(buf);
545 
546 	while (buf->frags) {
547 		buf = buf->frags;
548 	}
549 
550 	return buf;
551 }
552 
net_buf_frag_insert(struct net_buf * parent,struct net_buf * frag)553 void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
554 {
555 	__ASSERT_NO_MSG(parent);
556 	__ASSERT_NO_MSG(frag);
557 
558 	if (parent->frags) {
559 		net_buf_frag_last(frag)->frags = parent->frags;
560 	}
561 	/* Take ownership of the fragment reference */
562 	parent->frags = frag;
563 }
564 
net_buf_frag_add(struct net_buf * head,struct net_buf * frag)565 struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
566 {
567 	__ASSERT_NO_MSG(frag);
568 
569 	if (!head) {
570 		return net_buf_ref(frag);
571 	}
572 
573 	net_buf_frag_insert(net_buf_frag_last(head), frag);
574 
575 	return head;
576 }
577 
578 #if defined(CONFIG_NET_BUF_LOG)
net_buf_frag_del_debug(struct net_buf * parent,struct net_buf * frag,const char * func,int line)579 struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
580 				       struct net_buf *frag,
581 				       const char *func, int line)
582 #else
583 struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
584 #endif
585 {
586 	struct net_buf *next_frag;
587 
588 	__ASSERT_NO_MSG(frag);
589 
590 	if (parent) {
591 		__ASSERT_NO_MSG(parent->frags);
592 		__ASSERT_NO_MSG(parent->frags == frag);
593 		parent->frags = frag->frags;
594 	}
595 
596 	next_frag = frag->frags;
597 
598 	frag->frags = NULL;
599 
600 #if defined(CONFIG_NET_BUF_LOG)
601 	net_buf_unref_debug(frag, func, line);
602 #else
603 	net_buf_unref(frag);
604 #endif
605 
606 	return next_frag;
607 }
608 
net_buf_linearize(void * dst,size_t dst_len,const struct net_buf * src,size_t offset,size_t len)609 size_t net_buf_linearize(void *dst, size_t dst_len, const struct net_buf *src,
610 			 size_t offset, size_t len)
611 {
612 	const struct net_buf *frag;
613 	size_t to_copy;
614 	size_t copied;
615 
616 	len = MIN(len, dst_len);
617 
618 	frag = src;
619 
620 	/* find the right fragment to start copying from */
621 	while (frag && offset >= frag->len) {
622 		offset -= frag->len;
623 		frag = frag->frags;
624 	}
625 
626 	/* traverse the fragment chain until len bytes are copied */
627 	copied = 0;
628 	while (frag && len > 0) {
629 		to_copy = MIN(len, frag->len - offset);
630 		memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
631 
632 		copied += to_copy;
633 
634 		/* to_copy is always <= len */
635 		len -= to_copy;
636 		frag = frag->frags;
637 
638 		/* after the first iteration, this value will be 0 */
639 		offset = 0;
640 	}
641 
642 	return copied;
643 }
644 
645 /* This helper routine will append multiple bytes, if there is no place for
646  * the data in current fragment then create new fragment and add it to
647  * the buffer. It assumes that the buffer has at least one fragment.
648  */
net_buf_append_bytes(struct net_buf * buf,size_t len,const void * value,k_timeout_t timeout,net_buf_allocator_cb allocate_cb,void * user_data)649 size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
650 			    const void *value, k_timeout_t timeout,
651 			    net_buf_allocator_cb allocate_cb, void *user_data)
652 {
653 	struct net_buf *frag = net_buf_frag_last(buf);
654 	size_t added_len = 0;
655 	const uint8_t *value8 = value;
656 	size_t max_size;
657 
658 	do {
659 		uint16_t count = MIN(len, net_buf_tailroom(frag));
660 
661 		net_buf_add_mem(frag, value8, count);
662 		len -= count;
663 		added_len += count;
664 		value8 += count;
665 
666 		if (len == 0) {
667 			return added_len;
668 		}
669 
670 		if (allocate_cb) {
671 			frag = allocate_cb(timeout, user_data);
672 		} else {
673 			struct net_buf_pool *pool;
674 
675 			/* Allocate from the original pool if no callback has
676 			 * been provided.
677 			 */
678 			pool = net_buf_pool_get(buf->pool_id);
679 			max_size = pool->alloc->max_alloc_size;
680 			frag = net_buf_alloc_len(pool,
681 						 max_size ? MIN(len, max_size) : len,
682 						 timeout);
683 		}
684 
685 		if (!frag) {
686 			return added_len;
687 		}
688 
689 		net_buf_frag_add(buf, frag);
690 	} while (1);
691 
692 	/* Unreachable */
693 	return 0;
694 }
695 
net_buf_data_match(const struct net_buf * buf,size_t offset,const void * data,size_t len)696 size_t net_buf_data_match(const struct net_buf *buf, size_t offset, const void *data, size_t len)
697 {
698 	const uint8_t *dptr = data;
699 	const uint8_t *bptr;
700 	size_t compared = 0;
701 	size_t to_compare;
702 
703 	if (!buf || !data) {
704 		return compared;
705 	}
706 
707 	/* find the right fragment to start comparison */
708 	while (buf && offset >= buf->len) {
709 		offset -= buf->len;
710 		buf = buf->frags;
711 	}
712 
713 	while (buf && len > 0) {
714 		bptr = buf->data + offset;
715 		to_compare = MIN(len, buf->len - offset);
716 
717 		for (size_t i = 0; i < to_compare; ++i) {
718 			if (dptr[compared] != bptr[i]) {
719 				return compared;
720 			}
721 			compared++;
722 		}
723 
724 		len -= to_compare;
725 		buf = buf->frags;
726 		offset = 0;
727 	}
728 
729 	return compared;
730 }
731