1 /* buf.c - Buffer management */
2 
3 /*
4  * Copyright (c) 2015-2019 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #define LOG_MODULE_NAME net_buf
10 #define LOG_LEVEL CONFIG_NET_BUF_LOG_LEVEL
11 
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
14 
15 #include <stdio.h>
16 #include <errno.h>
17 #include <stddef.h>
18 #include <string.h>
19 #include <zephyr/sys/byteorder.h>
20 
21 #include <zephyr/net/buf.h>
22 
23 #if defined(CONFIG_NET_BUF_LOG)
24 #define NET_BUF_DBG(fmt, ...) LOG_DBG("(%p) " fmt, k_current_get(), \
25 				      ##__VA_ARGS__)
26 #define NET_BUF_ERR(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__)
27 #define NET_BUF_WARN(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__)
28 #define NET_BUF_INFO(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__)
29 #else
30 
31 #define NET_BUF_DBG(fmt, ...)
32 #define NET_BUF_ERR(fmt, ...)
33 #define NET_BUF_WARN(fmt, ...)
34 #define NET_BUF_INFO(fmt, ...)
35 #endif /* CONFIG_NET_BUF_LOG */
36 
37 #define NET_BUF_ASSERT(cond, ...) __ASSERT(cond, "" __VA_ARGS__)
38 
39 #if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
40 #define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
41 #else
42 #define WARN_ALLOC_INTERVAL K_FOREVER
43 #endif
44 
45 /* Linker-defined symbol bound to the static pool structs */
46 STRUCT_SECTION_START_EXTERN(net_buf_pool);
47 
net_buf_pool_get(int id)48 struct net_buf_pool *net_buf_pool_get(int id)
49 {
50 	struct net_buf_pool *pool;
51 
52 	STRUCT_SECTION_GET(net_buf_pool, id, &pool);
53 
54 	return pool;
55 }
56 
pool_id(struct net_buf_pool * pool)57 static int pool_id(struct net_buf_pool *pool)
58 {
59 	return pool - TYPE_SECTION_START(net_buf_pool);
60 }
61 
net_buf_id(struct net_buf * buf)62 int net_buf_id(struct net_buf *buf)
63 {
64 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
65 	size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
66 				__alignof__(struct net_buf));
67 	ptrdiff_t offset = (uint8_t *)buf - (uint8_t *)pool->__bufs;
68 
69 	return offset / struct_size;
70 }
71 
pool_get_uninit(struct net_buf_pool * pool,uint16_t uninit_count)72 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
73 					      uint16_t uninit_count)
74 {
75 	size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
76 				__alignof__(struct net_buf));
77 	size_t byte_offset = (pool->buf_count - uninit_count) * struct_size;
78 	struct net_buf *buf;
79 
80 	buf = (struct net_buf *)(((uint8_t *)pool->__bufs) + byte_offset);
81 
82 	buf->pool_id = pool_id(pool);
83 	buf->user_data_size = pool->user_data_size;
84 
85 	return buf;
86 }
87 
net_buf_reset(struct net_buf * buf)88 void net_buf_reset(struct net_buf *buf)
89 {
90 	__ASSERT_NO_MSG(buf->flags == 0U);
91 	__ASSERT_NO_MSG(buf->frags == NULL);
92 
93 	net_buf_simple_reset(&buf->b);
94 }
95 
generic_data_ref(struct net_buf * buf,uint8_t * data)96 static uint8_t *generic_data_ref(struct net_buf *buf, uint8_t *data)
97 {
98 	uint8_t *ref_count;
99 
100 	ref_count = data - sizeof(void *);
101 	(*ref_count)++;
102 
103 	return data;
104 }
105 
mem_pool_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)106 static uint8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
107 				 k_timeout_t timeout)
108 {
109 	struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
110 	struct k_heap *pool = buf_pool->alloc->alloc_data;
111 	uint8_t *ref_count;
112 
113 	/* Reserve extra space for a ref-count (uint8_t) */
114 	void *b = k_heap_alloc(pool, sizeof(void *) + *size, timeout);
115 
116 	if (b == NULL) {
117 		return NULL;
118 	}
119 
120 	ref_count = (uint8_t *)b;
121 	*ref_count = 1U;
122 
123 	/* Return pointer to the byte following the ref count */
124 	return ref_count + sizeof(void *);
125 }
126 
mem_pool_data_unref(struct net_buf * buf,uint8_t * data)127 static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
128 {
129 	struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
130 	struct k_heap *pool = buf_pool->alloc->alloc_data;
131 	uint8_t *ref_count;
132 
133 	ref_count = data - sizeof(void *);
134 	if (--(*ref_count)) {
135 		return;
136 	}
137 
138 	/* Need to copy to local variable due to alignment */
139 	k_heap_free(pool, ref_count);
140 }
141 
142 const struct net_buf_data_cb net_buf_var_cb = {
143 	.alloc = mem_pool_data_alloc,
144 	.ref   = generic_data_ref,
145 	.unref = mem_pool_data_unref,
146 };
147 
fixed_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)148 static uint8_t *fixed_data_alloc(struct net_buf *buf, size_t *size,
149 			      k_timeout_t timeout)
150 {
151 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
152 	const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
153 
154 	*size = MIN(fixed->data_size, *size);
155 
156 	return fixed->data_pool + fixed->data_size * net_buf_id(buf);
157 }
158 
fixed_data_unref(struct net_buf * buf,uint8_t * data)159 static void fixed_data_unref(struct net_buf *buf, uint8_t *data)
160 {
161 	/* Nothing needed for fixed-size data pools */
162 }
163 
164 const struct net_buf_data_cb net_buf_fixed_cb = {
165 	.alloc = fixed_data_alloc,
166 	.unref = fixed_data_unref,
167 };
168 
169 #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
170 
heap_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)171 static uint8_t *heap_data_alloc(struct net_buf *buf, size_t *size,
172 			     k_timeout_t timeout)
173 {
174 	uint8_t *ref_count;
175 
176 	ref_count = k_malloc(sizeof(void *) + *size);
177 	if (!ref_count) {
178 		return NULL;
179 	}
180 
181 	*ref_count = 1U;
182 
183 	return ref_count + sizeof(void *);
184 }
185 
heap_data_unref(struct net_buf * buf,uint8_t * data)186 static void heap_data_unref(struct net_buf *buf, uint8_t *data)
187 {
188 	uint8_t *ref_count;
189 
190 	ref_count = data - sizeof(void *);
191 	if (--(*ref_count)) {
192 		return;
193 	}
194 
195 	k_free(ref_count);
196 }
197 
198 static const struct net_buf_data_cb net_buf_heap_cb = {
199 	.alloc = heap_data_alloc,
200 	.ref   = generic_data_ref,
201 	.unref = heap_data_unref,
202 };
203 
204 const struct net_buf_data_alloc net_buf_heap_alloc = {
205 	.cb = &net_buf_heap_cb,
206 };
207 
208 #endif /* CONFIG_HEAP_MEM_POOL_SIZE > 0 */
209 
data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)210 static uint8_t *data_alloc(struct net_buf *buf, size_t *size, k_timeout_t timeout)
211 {
212 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
213 
214 	return pool->alloc->cb->alloc(buf, size, timeout);
215 }
216 
data_ref(struct net_buf * buf,uint8_t * data)217 static uint8_t *data_ref(struct net_buf *buf, uint8_t *data)
218 {
219 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
220 
221 	return pool->alloc->cb->ref(buf, data);
222 }
223 
data_unref(struct net_buf * buf,uint8_t * data)224 static void data_unref(struct net_buf *buf, uint8_t *data)
225 {
226 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
227 
228 	if (buf->flags & NET_BUF_EXTERNAL_DATA) {
229 		return;
230 	}
231 
232 	pool->alloc->cb->unref(buf, data);
233 }
234 
235 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_len_debug(struct net_buf_pool * pool,size_t size,k_timeout_t timeout,const char * func,int line)236 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
237 					k_timeout_t timeout, const char *func,
238 					int line)
239 #else
240 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
241 				  k_timeout_t timeout)
242 #endif
243 {
244 	k_timepoint_t end = sys_timepoint_calc(timeout);
245 	struct net_buf *buf;
246 	k_spinlock_key_t key;
247 
248 	__ASSERT_NO_MSG(pool);
249 
250 	NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size);
251 
252 	/* We need to prevent race conditions
253 	 * when accessing pool->uninit_count.
254 	 */
255 	key = k_spin_lock(&pool->lock);
256 
257 	/* If there are uninitialized buffers we're guaranteed to succeed
258 	 * with the allocation one way or another.
259 	 */
260 	if (pool->uninit_count) {
261 		uint16_t uninit_count;
262 
263 		/* If this is not the first access to the pool, we can
264 		 * be opportunistic and try to fetch a previously used
265 		 * buffer from the LIFO with K_NO_WAIT.
266 		 */
267 		if (pool->uninit_count < pool->buf_count) {
268 			buf = k_lifo_get(&pool->free, K_NO_WAIT);
269 			if (buf) {
270 				k_spin_unlock(&pool->lock, key);
271 				goto success;
272 			}
273 		}
274 
275 		uninit_count = pool->uninit_count--;
276 		k_spin_unlock(&pool->lock, key);
277 
278 		buf = pool_get_uninit(pool, uninit_count);
279 		goto success;
280 	}
281 
282 	k_spin_unlock(&pool->lock, key);
283 
284 #if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN)
285 	if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
286 		uint32_t ref = k_uptime_get_32();
287 		buf = k_lifo_get(&pool->free, K_NO_WAIT);
288 		while (!buf) {
289 #if defined(CONFIG_NET_BUF_POOL_USAGE)
290 			NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
291 				     func, line, pool->name);
292 #else
293 			NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
294 				     func, line, pool);
295 #endif
296 			buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
297 #if defined(CONFIG_NET_BUF_POOL_USAGE)
298 			NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
299 				     func, line, pool->name,
300 				     (k_uptime_get_32() - ref) / MSEC_PER_SEC);
301 #else
302 			NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
303 				     func, line, pool,
304 				     (k_uptime_get_32() - ref) / MSEC_PER_SEC);
305 #endif
306 		}
307 	} else {
308 		buf = k_lifo_get(&pool->free, timeout);
309 	}
310 #else
311 	buf = k_lifo_get(&pool->free, timeout);
312 #endif
313 	if (!buf) {
314 		NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
315 		return NULL;
316 	}
317 
318 success:
319 	NET_BUF_DBG("allocated buf %p", buf);
320 
321 	if (size) {
322 #if __ASSERT_ON
323 		size_t req_size = size;
324 #endif
325 		timeout = sys_timepoint_timeout(end);
326 		buf->__buf = data_alloc(buf, &size, timeout);
327 		if (!buf->__buf) {
328 			NET_BUF_ERR("%s():%d: Failed to allocate data",
329 				    func, line);
330 			net_buf_destroy(buf);
331 			return NULL;
332 		}
333 
334 #if __ASSERT_ON
335 		NET_BUF_ASSERT(req_size <= size);
336 #endif
337 	} else {
338 		buf->__buf = NULL;
339 	}
340 
341 	buf->ref   = 1U;
342 	buf->flags = 0U;
343 	buf->frags = NULL;
344 	buf->size  = size;
345 	net_buf_reset(buf);
346 
347 #if defined(CONFIG_NET_BUF_POOL_USAGE)
348 	atomic_dec(&pool->avail_count);
349 	__ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0);
350 #endif
351 	return buf;
352 }
353 
354 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_fixed_debug(struct net_buf_pool * pool,k_timeout_t timeout,const char * func,int line)355 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
356 					  k_timeout_t timeout, const char *func,
357 					  int line)
358 {
359 	const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
360 
361 	return net_buf_alloc_len_debug(pool, fixed->data_size, timeout, func,
362 				       line);
363 }
364 #else
net_buf_alloc_fixed(struct net_buf_pool * pool,k_timeout_t timeout)365 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
366 				    k_timeout_t timeout)
367 {
368 	const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
369 
370 	return net_buf_alloc_len(pool, fixed->data_size, timeout);
371 }
372 #endif
373 
374 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_with_data_debug(struct net_buf_pool * pool,void * data,size_t size,k_timeout_t timeout,const char * func,int line)375 struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
376 					      void *data, size_t size,
377 					      k_timeout_t timeout,
378 					      const char *func, int line)
379 #else
380 struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
381 					void *data, size_t size,
382 					k_timeout_t timeout)
383 #endif
384 {
385 	struct net_buf *buf;
386 
387 #if defined(CONFIG_NET_BUF_LOG)
388 	buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line);
389 #else
390 	buf = net_buf_alloc_len(pool, 0, timeout);
391 #endif
392 	if (!buf) {
393 		return NULL;
394 	}
395 
396 	net_buf_simple_init_with_data(&buf->b, data, size);
397 	buf->flags = NET_BUF_EXTERNAL_DATA;
398 
399 	return buf;
400 }
401 
402 #if defined(CONFIG_NET_BUF_LOG)
net_buf_get_debug(struct k_fifo * fifo,k_timeout_t timeout,const char * func,int line)403 struct net_buf *net_buf_get_debug(struct k_fifo *fifo, k_timeout_t timeout,
404 				  const char *func, int line)
405 #else
406 struct net_buf *net_buf_get(struct k_fifo *fifo, k_timeout_t timeout)
407 #endif
408 {
409 	struct net_buf *buf;
410 
411 	NET_BUF_DBG("%s():%d: fifo %p", func, line, fifo);
412 
413 	buf = k_fifo_get(fifo, timeout);
414 	if (!buf) {
415 		return NULL;
416 	}
417 
418 	NET_BUF_DBG("%s():%d: buf %p fifo %p", func, line, buf, fifo);
419 
420 	return buf;
421 }
422 
423 static struct k_spinlock net_buf_slist_lock;
424 
net_buf_slist_put(sys_slist_t * list,struct net_buf * buf)425 void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
426 {
427 	k_spinlock_key_t key;
428 
429 	__ASSERT_NO_MSG(list);
430 	__ASSERT_NO_MSG(buf);
431 
432 	key = k_spin_lock(&net_buf_slist_lock);
433 	sys_slist_append(list, &buf->node);
434 	k_spin_unlock(&net_buf_slist_lock, key);
435 }
436 
net_buf_slist_get(sys_slist_t * list)437 struct net_buf *net_buf_slist_get(sys_slist_t *list)
438 {
439 	struct net_buf *buf;
440 	k_spinlock_key_t key;
441 
442 	__ASSERT_NO_MSG(list);
443 
444 	key = k_spin_lock(&net_buf_slist_lock);
445 
446 	buf = (void *)sys_slist_get(list);
447 
448 	k_spin_unlock(&net_buf_slist_lock, key);
449 
450 	return buf;
451 }
452 
net_buf_put(struct k_fifo * fifo,struct net_buf * buf)453 void net_buf_put(struct k_fifo *fifo, struct net_buf *buf)
454 {
455 	__ASSERT_NO_MSG(fifo);
456 	__ASSERT_NO_MSG(buf);
457 
458 	k_fifo_put(fifo, buf);
459 }
460 
461 #if defined(CONFIG_NET_BUF_LOG)
net_buf_unref_debug(struct net_buf * buf,const char * func,int line)462 void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
463 #else
464 void net_buf_unref(struct net_buf *buf)
465 #endif
466 {
467 	__ASSERT_NO_MSG(buf);
468 
469 	while (buf) {
470 		struct net_buf *frags = buf->frags;
471 		struct net_buf_pool *pool;
472 
473 #if defined(CONFIG_NET_BUF_LOG)
474 		if (!buf->ref) {
475 			NET_BUF_ERR("%s():%d: buf %p double free", func, line,
476 				    buf);
477 			return;
478 		}
479 #endif
480 		NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
481 			    buf->pool_id, buf->frags);
482 
483 		if (--buf->ref > 0) {
484 			return;
485 		}
486 
487 		if (buf->__buf) {
488 			data_unref(buf, buf->__buf);
489 			buf->__buf = NULL;
490 		}
491 
492 		buf->data = NULL;
493 		buf->frags = NULL;
494 
495 		pool = net_buf_pool_get(buf->pool_id);
496 
497 #if defined(CONFIG_NET_BUF_POOL_USAGE)
498 		atomic_inc(&pool->avail_count);
499 		__ASSERT_NO_MSG(atomic_get(&pool->avail_count) <= pool->buf_count);
500 #endif
501 
502 		if (pool->destroy) {
503 			pool->destroy(buf);
504 		} else {
505 			net_buf_destroy(buf);
506 		}
507 
508 		buf = frags;
509 	}
510 }
511 
net_buf_ref(struct net_buf * buf)512 struct net_buf *net_buf_ref(struct net_buf *buf)
513 {
514 	__ASSERT_NO_MSG(buf);
515 
516 	NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
517 		    buf, buf->ref, buf->pool_id);
518 	buf->ref++;
519 	return buf;
520 }
521 
net_buf_clone(struct net_buf * buf,k_timeout_t timeout)522 struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
523 {
524 	k_timepoint_t end = sys_timepoint_calc(timeout);
525 	struct net_buf_pool *pool;
526 	struct net_buf *clone;
527 
528 	__ASSERT_NO_MSG(buf);
529 
530 	pool = net_buf_pool_get(buf->pool_id);
531 
532 	clone = net_buf_alloc_len(pool, 0, timeout);
533 	if (!clone) {
534 		return NULL;
535 	}
536 
537 	/* If the pool supports data referencing use that. Otherwise
538 	 * we need to allocate new data and make a copy.
539 	 */
540 	if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) {
541 		clone->__buf = data_ref(buf, buf->__buf);
542 		clone->data = buf->data;
543 		clone->len = buf->len;
544 		clone->size = buf->size;
545 	} else {
546 		size_t size = buf->size;
547 
548 		timeout = sys_timepoint_timeout(end);
549 
550 		clone->__buf = data_alloc(clone, &size, timeout);
551 		if (!clone->__buf || size < buf->size) {
552 			net_buf_destroy(clone);
553 			return NULL;
554 		}
555 
556 		clone->size = size;
557 		clone->data = clone->__buf + net_buf_headroom(buf);
558 		net_buf_add_mem(clone, buf->data, buf->len);
559 	}
560 
561 	return clone;
562 }
563 
net_buf_frag_last(struct net_buf * buf)564 struct net_buf *net_buf_frag_last(struct net_buf *buf)
565 {
566 	__ASSERT_NO_MSG(buf);
567 
568 	while (buf->frags) {
569 		buf = buf->frags;
570 	}
571 
572 	return buf;
573 }
574 
net_buf_frag_insert(struct net_buf * parent,struct net_buf * frag)575 void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
576 {
577 	__ASSERT_NO_MSG(parent);
578 	__ASSERT_NO_MSG(frag);
579 
580 	if (parent->frags) {
581 		net_buf_frag_last(frag)->frags = parent->frags;
582 	}
583 	/* Take ownership of the fragment reference */
584 	parent->frags = frag;
585 }
586 
net_buf_frag_add(struct net_buf * head,struct net_buf * frag)587 struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
588 {
589 	__ASSERT_NO_MSG(frag);
590 
591 	if (!head) {
592 		return net_buf_ref(frag);
593 	}
594 
595 	net_buf_frag_insert(net_buf_frag_last(head), frag);
596 
597 	return head;
598 }
599 
600 #if defined(CONFIG_NET_BUF_LOG)
net_buf_frag_del_debug(struct net_buf * parent,struct net_buf * frag,const char * func,int line)601 struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
602 				       struct net_buf *frag,
603 				       const char *func, int line)
604 #else
605 struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
606 #endif
607 {
608 	struct net_buf *next_frag;
609 
610 	__ASSERT_NO_MSG(frag);
611 
612 	if (parent) {
613 		__ASSERT_NO_MSG(parent->frags);
614 		__ASSERT_NO_MSG(parent->frags == frag);
615 		parent->frags = frag->frags;
616 	}
617 
618 	next_frag = frag->frags;
619 
620 	frag->frags = NULL;
621 
622 #if defined(CONFIG_NET_BUF_LOG)
623 	net_buf_unref_debug(frag, func, line);
624 #else
625 	net_buf_unref(frag);
626 #endif
627 
628 	return next_frag;
629 }
630 
net_buf_linearize(void * dst,size_t dst_len,struct net_buf * src,size_t offset,size_t len)631 size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
632 			 size_t offset, size_t len)
633 {
634 	struct net_buf *frag;
635 	size_t to_copy;
636 	size_t copied;
637 
638 	len = MIN(len, dst_len);
639 
640 	frag = src;
641 
642 	/* find the right fragment to start copying from */
643 	while (frag && offset >= frag->len) {
644 		offset -= frag->len;
645 		frag = frag->frags;
646 	}
647 
648 	/* traverse the fragment chain until len bytes are copied */
649 	copied = 0;
650 	while (frag && len > 0) {
651 		to_copy = MIN(len, frag->len - offset);
652 		memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
653 
654 		copied += to_copy;
655 
656 		/* to_copy is always <= len */
657 		len -= to_copy;
658 		frag = frag->frags;
659 
660 		/* after the first iteration, this value will be 0 */
661 		offset = 0;
662 	}
663 
664 	return copied;
665 }
666 
667 /* This helper routine will append multiple bytes, if there is no place for
668  * the data in current fragment then create new fragment and add it to
669  * the buffer. It assumes that the buffer has at least one fragment.
670  */
net_buf_append_bytes(struct net_buf * buf,size_t len,const void * value,k_timeout_t timeout,net_buf_allocator_cb allocate_cb,void * user_data)671 size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
672 			    const void *value, k_timeout_t timeout,
673 			    net_buf_allocator_cb allocate_cb, void *user_data)
674 {
675 	struct net_buf *frag = net_buf_frag_last(buf);
676 	size_t added_len = 0;
677 	const uint8_t *value8 = value;
678 
679 	do {
680 		uint16_t count = MIN(len, net_buf_tailroom(frag));
681 
682 		net_buf_add_mem(frag, value8, count);
683 		len -= count;
684 		added_len += count;
685 		value8 += count;
686 
687 		if (len == 0) {
688 			return added_len;
689 		}
690 
691 		if (allocate_cb) {
692 			frag = allocate_cb(timeout, user_data);
693 		} else {
694 			struct net_buf_pool *pool;
695 
696 			/* Allocate from the original pool if no callback has
697 			 * been provided.
698 			 */
699 			pool = net_buf_pool_get(buf->pool_id);
700 			frag = net_buf_alloc_len(pool, len, timeout);
701 		}
702 
703 		if (!frag) {
704 			return added_len;
705 		}
706 
707 		net_buf_frag_add(buf, frag);
708 	} while (1);
709 
710 	/* Unreachable */
711 	return 0;
712 }
713