1 /* buf.c - Buffer management */
2 
3 /*
4  * Copyright (c) 2015-2019 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #define LOG_MODULE_NAME net_buf
10 #define LOG_LEVEL CONFIG_NET_BUF_LOG_LEVEL
11 
12 #include <logging/log.h>
13 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
14 
15 #include <stdio.h>
16 #include <errno.h>
17 #include <stddef.h>
18 #include <string.h>
19 #include <sys/byteorder.h>
20 
21 #include <net/buf.h>
22 
23 #if defined(CONFIG_NET_BUF_LOG)
24 #define NET_BUF_DBG(fmt, ...) LOG_DBG("(%p) " fmt, k_current_get(), \
25 				      ##__VA_ARGS__)
26 #define NET_BUF_ERR(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__)
27 #define NET_BUF_WARN(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__)
28 #define NET_BUF_INFO(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__)
29 #else
30 
31 #define NET_BUF_DBG(fmt, ...)
32 #define NET_BUF_ERR(fmt, ...)
33 #define NET_BUF_WARN(fmt, ...)
34 #define NET_BUF_INFO(fmt, ...)
35 #endif /* CONFIG_NET_BUF_LOG */
36 
37 #define NET_BUF_ASSERT(cond, ...) __ASSERT(cond, "" __VA_ARGS__)
38 
39 #if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
40 #define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
41 #else
42 #define WARN_ALLOC_INTERVAL K_FOREVER
43 #endif
44 
45 /* Linker-defined symbol bound to the static pool structs */
46 extern struct net_buf_pool _net_buf_pool_list[];
47 
net_buf_pool_get(int id)48 struct net_buf_pool *net_buf_pool_get(int id)
49 {
50 	return &_net_buf_pool_list[id];
51 }
52 
pool_id(struct net_buf_pool * pool)53 static int pool_id(struct net_buf_pool *pool)
54 {
55 	return pool - _net_buf_pool_list;
56 }
57 
net_buf_id(struct net_buf * buf)58 int net_buf_id(struct net_buf *buf)
59 {
60 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
61 
62 	return buf - pool->__bufs;
63 }
64 
pool_get_uninit(struct net_buf_pool * pool,uint16_t uninit_count)65 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
66 					      uint16_t uninit_count)
67 {
68 	struct net_buf *buf;
69 
70 	buf = &pool->__bufs[pool->buf_count - uninit_count];
71 
72 	buf->pool_id = pool_id(pool);
73 
74 	return buf;
75 }
76 
net_buf_reset(struct net_buf * buf)77 void net_buf_reset(struct net_buf *buf)
78 {
79 	__ASSERT_NO_MSG(buf->flags == 0U);
80 	__ASSERT_NO_MSG(buf->frags == NULL);
81 
82 	net_buf_simple_reset(&buf->b);
83 }
84 
generic_data_ref(struct net_buf * buf,uint8_t * data)85 static uint8_t *generic_data_ref(struct net_buf *buf, uint8_t *data)
86 {
87 	uint8_t *ref_count;
88 
89 	ref_count = data - 1;
90 	(*ref_count)++;
91 
92 	return data;
93 }
94 
mem_pool_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)95 static uint8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
96 				 k_timeout_t timeout)
97 {
98 	struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
99 	struct k_heap *pool = buf_pool->alloc->alloc_data;
100 	uint8_t *ref_count;
101 
102 	/* Reserve extra space for a ref-count (uint8_t) */
103 	void *b = k_heap_alloc(pool, 1 + *size, timeout);
104 
105 	if (b == NULL) {
106 		return NULL;
107 	}
108 
109 	ref_count = (uint8_t *)b;
110 	*ref_count = 1U;
111 
112 	/* Return pointer to the byte following the ref count */
113 	return ref_count + 1;
114 }
115 
mem_pool_data_unref(struct net_buf * buf,uint8_t * data)116 static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
117 {
118 	struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
119 	struct k_heap *pool = buf_pool->alloc->alloc_data;
120 	uint8_t *ref_count;
121 
122 	ref_count = data - 1;
123 	if (--(*ref_count)) {
124 		return;
125 	}
126 
127 	/* Need to copy to local variable due to alignment */
128 	k_heap_free(pool, ref_count);
129 }
130 
131 const struct net_buf_data_cb net_buf_var_cb = {
132 	.alloc = mem_pool_data_alloc,
133 	.ref   = generic_data_ref,
134 	.unref = mem_pool_data_unref,
135 };
136 
fixed_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)137 static uint8_t *fixed_data_alloc(struct net_buf *buf, size_t *size,
138 			      k_timeout_t timeout)
139 {
140 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
141 	const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
142 
143 	*size = MIN(fixed->data_size, *size);
144 
145 	return fixed->data_pool + fixed->data_size * net_buf_id(buf);
146 }
147 
fixed_data_unref(struct net_buf * buf,uint8_t * data)148 static void fixed_data_unref(struct net_buf *buf, uint8_t *data)
149 {
150 	/* Nothing needed for fixed-size data pools */
151 }
152 
153 const struct net_buf_data_cb net_buf_fixed_cb = {
154 	.alloc = fixed_data_alloc,
155 	.unref = fixed_data_unref,
156 };
157 
158 #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
159 
heap_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)160 static uint8_t *heap_data_alloc(struct net_buf *buf, size_t *size,
161 			     k_timeout_t timeout)
162 {
163 	uint8_t *ref_count;
164 
165 	ref_count = k_malloc(1 + *size);
166 	if (!ref_count) {
167 		return NULL;
168 	}
169 
170 	*ref_count = 1U;
171 
172 	return ref_count + 1;
173 }
174 
heap_data_unref(struct net_buf * buf,uint8_t * data)175 static void heap_data_unref(struct net_buf *buf, uint8_t *data)
176 {
177 	uint8_t *ref_count;
178 
179 	ref_count = data - 1;
180 	if (--(*ref_count)) {
181 		return;
182 	}
183 
184 	k_free(ref_count);
185 }
186 
187 static const struct net_buf_data_cb net_buf_heap_cb = {
188 	.alloc = heap_data_alloc,
189 	.ref   = generic_data_ref,
190 	.unref = heap_data_unref,
191 };
192 
193 const struct net_buf_data_alloc net_buf_heap_alloc = {
194 	.cb = &net_buf_heap_cb,
195 };
196 
197 #endif /* CONFIG_HEAP_MEM_POOL_SIZE > 0 */
198 
data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)199 static uint8_t *data_alloc(struct net_buf *buf, size_t *size, k_timeout_t timeout)
200 {
201 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
202 
203 	return pool->alloc->cb->alloc(buf, size, timeout);
204 }
205 
data_ref(struct net_buf * buf,uint8_t * data)206 static uint8_t *data_ref(struct net_buf *buf, uint8_t *data)
207 {
208 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
209 
210 	return pool->alloc->cb->ref(buf, data);
211 }
212 
data_unref(struct net_buf * buf,uint8_t * data)213 static void data_unref(struct net_buf *buf, uint8_t *data)
214 {
215 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
216 
217 	if (buf->flags & NET_BUF_EXTERNAL_DATA) {
218 		return;
219 	}
220 
221 	pool->alloc->cb->unref(buf, data);
222 }
223 
224 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_len_debug(struct net_buf_pool * pool,size_t size,k_timeout_t timeout,const char * func,int line)225 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
226 					k_timeout_t timeout, const char *func,
227 					int line)
228 #else
229 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
230 				  k_timeout_t timeout)
231 #endif
232 {
233 	uint64_t end = sys_clock_timeout_end_calc(timeout);
234 	struct net_buf *buf;
235 	unsigned int key;
236 
237 	__ASSERT_NO_MSG(pool);
238 
239 	NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size);
240 
241 	/* We need to lock interrupts temporarily to prevent race conditions
242 	 * when accessing pool->uninit_count.
243 	 */
244 	key = irq_lock();
245 
246 	/* If there are uninitialized buffers we're guaranteed to succeed
247 	 * with the allocation one way or another.
248 	 */
249 	if (pool->uninit_count) {
250 		uint16_t uninit_count;
251 
252 		/* If this is not the first access to the pool, we can
253 		 * be opportunistic and try to fetch a previously used
254 		 * buffer from the LIFO with K_NO_WAIT.
255 		 */
256 		if (pool->uninit_count < pool->buf_count) {
257 			buf = k_lifo_get(&pool->free, K_NO_WAIT);
258 			if (buf) {
259 				irq_unlock(key);
260 				goto success;
261 			}
262 		}
263 
264 		uninit_count = pool->uninit_count--;
265 		irq_unlock(key);
266 
267 		buf = pool_get_uninit(pool, uninit_count);
268 		goto success;
269 	}
270 
271 	irq_unlock(key);
272 
273 #if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN)
274 	if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
275 		uint32_t ref = k_uptime_get_32();
276 		buf = k_lifo_get(&pool->free, K_NO_WAIT);
277 		while (!buf) {
278 #if defined(CONFIG_NET_BUF_POOL_USAGE)
279 			NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
280 				     func, line, pool->name);
281 #else
282 			NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
283 				     func, line, pool);
284 #endif
285 			buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
286 #if defined(CONFIG_NET_BUF_POOL_USAGE)
287 			NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
288 				     func, line, pool->name,
289 				     (k_uptime_get_32() - ref) / MSEC_PER_SEC);
290 #else
291 			NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
292 				     func, line, pool,
293 				     (k_uptime_get_32() - ref) / MSEC_PER_SEC);
294 #endif
295 		}
296 	} else {
297 		buf = k_lifo_get(&pool->free, timeout);
298 	}
299 #else
300 	buf = k_lifo_get(&pool->free, timeout);
301 #endif
302 	if (!buf) {
303 		NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
304 		return NULL;
305 	}
306 
307 success:
308 	NET_BUF_DBG("allocated buf %p", buf);
309 
310 	if (size) {
311 #if __ASSERT_ON
312 		size_t req_size = size;
313 #endif
314 		if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
315 		    !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
316 			int64_t remaining = end - sys_clock_tick_get();
317 
318 			if (remaining <= 0) {
319 				timeout = K_NO_WAIT;
320 			} else {
321 				timeout = Z_TIMEOUT_TICKS(remaining);
322 			}
323 		}
324 
325 		buf->__buf = data_alloc(buf, &size, timeout);
326 		if (!buf->__buf) {
327 			NET_BUF_ERR("%s():%d: Failed to allocate data",
328 				    func, line);
329 			net_buf_destroy(buf);
330 			return NULL;
331 		}
332 
333 #if __ASSERT_ON
334 		NET_BUF_ASSERT(req_size <= size);
335 #endif
336 	} else {
337 		buf->__buf = NULL;
338 	}
339 
340 	buf->ref   = 1U;
341 	buf->flags = 0U;
342 	buf->frags = NULL;
343 	buf->size  = size;
344 	net_buf_reset(buf);
345 
346 #if defined(CONFIG_NET_BUF_POOL_USAGE)
347 	atomic_dec(&pool->avail_count);
348 	__ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0);
349 #endif
350 	return buf;
351 }
352 
353 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_fixed_debug(struct net_buf_pool * pool,k_timeout_t timeout,const char * func,int line)354 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
355 					  k_timeout_t timeout, const char *func,
356 					  int line)
357 {
358 	const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
359 
360 	return net_buf_alloc_len_debug(pool, fixed->data_size, timeout, func,
361 				       line);
362 }
363 #else
net_buf_alloc_fixed(struct net_buf_pool * pool,k_timeout_t timeout)364 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
365 				    k_timeout_t timeout)
366 {
367 	const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
368 
369 	return net_buf_alloc_len(pool, fixed->data_size, timeout);
370 }
371 #endif
372 
373 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_with_data_debug(struct net_buf_pool * pool,void * data,size_t size,k_timeout_t timeout,const char * func,int line)374 struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
375 					      void *data, size_t size,
376 					      k_timeout_t timeout,
377 					      const char *func, int line)
378 #else
379 struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
380 					void *data, size_t size,
381 					k_timeout_t timeout)
382 #endif
383 {
384 	struct net_buf *buf;
385 
386 #if defined(CONFIG_NET_BUF_LOG)
387 	buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line);
388 #else
389 	buf = net_buf_alloc_len(pool, 0, timeout);
390 #endif
391 	if (!buf) {
392 		return NULL;
393 	}
394 
395 	net_buf_simple_init_with_data(&buf->b, data, size);
396 	buf->flags = NET_BUF_EXTERNAL_DATA;
397 
398 	return buf;
399 }
400 
401 #if defined(CONFIG_NET_BUF_LOG)
net_buf_get_debug(struct k_fifo * fifo,k_timeout_t timeout,const char * func,int line)402 struct net_buf *net_buf_get_debug(struct k_fifo *fifo, k_timeout_t timeout,
403 				  const char *func, int line)
404 #else
405 struct net_buf *net_buf_get(struct k_fifo *fifo, k_timeout_t timeout)
406 #endif
407 {
408 	struct net_buf *buf;
409 
410 	NET_BUF_DBG("%s():%d: fifo %p", func, line, fifo);
411 
412 	buf = k_fifo_get(fifo, timeout);
413 	if (!buf) {
414 		return NULL;
415 	}
416 
417 	NET_BUF_DBG("%s():%d: buf %p fifo %p", func, line, buf, fifo);
418 
419 	return buf;
420 }
421 
net_buf_simple_init_with_data(struct net_buf_simple * buf,void * data,size_t size)422 void net_buf_simple_init_with_data(struct net_buf_simple *buf,
423 				   void *data, size_t size)
424 {
425 	buf->__buf = data;
426 	buf->data  = data;
427 	buf->size  = size;
428 	buf->len   = size;
429 }
430 
net_buf_simple_reserve(struct net_buf_simple * buf,size_t reserve)431 void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve)
432 {
433 	__ASSERT_NO_MSG(buf);
434 	__ASSERT_NO_MSG(buf->len == 0U);
435 	NET_BUF_DBG("buf %p reserve %zu", buf, reserve);
436 
437 	buf->data = buf->__buf + reserve;
438 }
439 
net_buf_slist_put(sys_slist_t * list,struct net_buf * buf)440 void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
441 {
442 	unsigned int key;
443 
444 	__ASSERT_NO_MSG(list);
445 	__ASSERT_NO_MSG(buf);
446 
447 	key = irq_lock();
448 	sys_slist_append(list, &buf->node);
449 	irq_unlock(key);
450 }
451 
net_buf_slist_get(sys_slist_t * list)452 struct net_buf *net_buf_slist_get(sys_slist_t *list)
453 {
454 	struct net_buf *buf;
455 	unsigned int key;
456 
457 	__ASSERT_NO_MSG(list);
458 
459 	key = irq_lock();
460 	buf = (void *)sys_slist_get(list);
461 	irq_unlock(key);
462 
463 	return buf;
464 }
465 
net_buf_put(struct k_fifo * fifo,struct net_buf * buf)466 void net_buf_put(struct k_fifo *fifo, struct net_buf *buf)
467 {
468 	__ASSERT_NO_MSG(fifo);
469 	__ASSERT_NO_MSG(buf);
470 
471 	k_fifo_put(fifo, buf);
472 }
473 
474 #if defined(CONFIG_NET_BUF_LOG)
net_buf_unref_debug(struct net_buf * buf,const char * func,int line)475 void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
476 #else
477 void net_buf_unref(struct net_buf *buf)
478 #endif
479 {
480 	__ASSERT_NO_MSG(buf);
481 
482 	while (buf) {
483 		struct net_buf *frags = buf->frags;
484 		struct net_buf_pool *pool;
485 
486 #if defined(CONFIG_NET_BUF_LOG)
487 		if (!buf->ref) {
488 			NET_BUF_ERR("%s():%d: buf %p double free", func, line,
489 				    buf);
490 			return;
491 		}
492 #endif
493 		NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
494 			    buf->pool_id, buf->frags);
495 
496 		if (--buf->ref > 0) {
497 			return;
498 		}
499 
500 		if (buf->__buf) {
501 			data_unref(buf, buf->__buf);
502 			buf->__buf = NULL;
503 		}
504 
505 		buf->data = NULL;
506 		buf->frags = NULL;
507 
508 		pool = net_buf_pool_get(buf->pool_id);
509 
510 #if defined(CONFIG_NET_BUF_POOL_USAGE)
511 		atomic_inc(&pool->avail_count);
512 		__ASSERT_NO_MSG(atomic_get(&pool->avail_count) <= pool->buf_count);
513 #endif
514 
515 		if (pool->destroy) {
516 			pool->destroy(buf);
517 		} else {
518 			net_buf_destroy(buf);
519 		}
520 
521 		buf = frags;
522 	}
523 }
524 
net_buf_ref(struct net_buf * buf)525 struct net_buf *net_buf_ref(struct net_buf *buf)
526 {
527 	__ASSERT_NO_MSG(buf);
528 
529 	NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
530 		    buf, buf->ref, buf->pool_id);
531 	buf->ref++;
532 	return buf;
533 }
534 
net_buf_clone(struct net_buf * buf,k_timeout_t timeout)535 struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
536 {
537 	int64_t end = sys_clock_timeout_end_calc(timeout);
538 	struct net_buf_pool *pool;
539 	struct net_buf *clone;
540 
541 	__ASSERT_NO_MSG(buf);
542 
543 	pool = net_buf_pool_get(buf->pool_id);
544 
545 	clone = net_buf_alloc_len(pool, 0, timeout);
546 	if (!clone) {
547 		return NULL;
548 	}
549 
550 	/* If the pool supports data referencing use that. Otherwise
551 	 * we need to allocate new data and make a copy.
552 	 */
553 	if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) {
554 		clone->__buf = data_ref(buf, buf->__buf);
555 		clone->data = buf->data;
556 		clone->len = buf->len;
557 		clone->size = buf->size;
558 	} else {
559 		size_t size = buf->size;
560 
561 		if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
562 		    !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
563 			int64_t remaining = end - sys_clock_tick_get();
564 
565 			if (remaining <= 0) {
566 				timeout = K_NO_WAIT;
567 			} else {
568 				timeout = Z_TIMEOUT_TICKS(remaining);
569 			}
570 		}
571 
572 		clone->__buf = data_alloc(clone, &size, timeout);
573 		if (!clone->__buf || size < buf->size) {
574 			net_buf_destroy(clone);
575 			return NULL;
576 		}
577 
578 		clone->size = size;
579 		clone->data = clone->__buf + net_buf_headroom(buf);
580 		net_buf_add_mem(clone, buf->data, buf->len);
581 	}
582 
583 	return clone;
584 }
585 
net_buf_frag_last(struct net_buf * buf)586 struct net_buf *net_buf_frag_last(struct net_buf *buf)
587 {
588 	__ASSERT_NO_MSG(buf);
589 
590 	while (buf->frags) {
591 		buf = buf->frags;
592 	}
593 
594 	return buf;
595 }
596 
net_buf_frag_insert(struct net_buf * parent,struct net_buf * frag)597 void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
598 {
599 	__ASSERT_NO_MSG(parent);
600 	__ASSERT_NO_MSG(frag);
601 
602 	if (parent->frags) {
603 		net_buf_frag_last(frag)->frags = parent->frags;
604 	}
605 	/* Take ownership of the fragment reference */
606 	parent->frags = frag;
607 }
608 
net_buf_frag_add(struct net_buf * head,struct net_buf * frag)609 struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
610 {
611 	__ASSERT_NO_MSG(frag);
612 
613 	if (!head) {
614 		return net_buf_ref(frag);
615 	}
616 
617 	net_buf_frag_insert(net_buf_frag_last(head), frag);
618 
619 	return head;
620 }
621 
622 #if defined(CONFIG_NET_BUF_LOG)
net_buf_frag_del_debug(struct net_buf * parent,struct net_buf * frag,const char * func,int line)623 struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
624 				       struct net_buf *frag,
625 				       const char *func, int line)
626 #else
627 struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
628 #endif
629 {
630 	struct net_buf *next_frag;
631 
632 	__ASSERT_NO_MSG(frag);
633 
634 	if (parent) {
635 		__ASSERT_NO_MSG(parent->frags);
636 		__ASSERT_NO_MSG(parent->frags == frag);
637 		parent->frags = frag->frags;
638 	}
639 
640 	next_frag = frag->frags;
641 
642 	frag->frags = NULL;
643 
644 #if defined(CONFIG_NET_BUF_LOG)
645 	net_buf_unref_debug(frag, func, line);
646 #else
647 	net_buf_unref(frag);
648 #endif
649 
650 	return next_frag;
651 }
652 
net_buf_linearize(void * dst,size_t dst_len,struct net_buf * src,size_t offset,size_t len)653 size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
654 			 size_t offset, size_t len)
655 {
656 	struct net_buf *frag;
657 	size_t to_copy;
658 	size_t copied;
659 
660 	len = MIN(len, dst_len);
661 
662 	frag = src;
663 
664 	/* find the right fragment to start copying from */
665 	while (frag && offset >= frag->len) {
666 		offset -= frag->len;
667 		frag = frag->frags;
668 	}
669 
670 	/* traverse the fragment chain until len bytes are copied */
671 	copied = 0;
672 	while (frag && len > 0) {
673 		to_copy = MIN(len, frag->len - offset);
674 		memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
675 
676 		copied += to_copy;
677 
678 		/* to_copy is always <= len */
679 		len -= to_copy;
680 		frag = frag->frags;
681 
682 		/* after the first iteration, this value will be 0 */
683 		offset = 0;
684 	}
685 
686 	return copied;
687 }
688 
689 /* This helper routine will append multiple bytes, if there is no place for
690  * the data in current fragment then create new fragment and add it to
691  * the buffer. It assumes that the buffer has at least one fragment.
692  */
net_buf_append_bytes(struct net_buf * buf,size_t len,const void * value,k_timeout_t timeout,net_buf_allocator_cb allocate_cb,void * user_data)693 size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
694 			    const void *value, k_timeout_t timeout,
695 			    net_buf_allocator_cb allocate_cb, void *user_data)
696 {
697 	struct net_buf *frag = net_buf_frag_last(buf);
698 	size_t added_len = 0;
699 	const uint8_t *value8 = value;
700 
701 	do {
702 		uint16_t count = MIN(len, net_buf_tailroom(frag));
703 
704 		net_buf_add_mem(frag, value8, count);
705 		len -= count;
706 		added_len += count;
707 		value8 += count;
708 
709 		if (len == 0) {
710 			return added_len;
711 		}
712 
713 		if (allocate_cb) {
714 			frag = allocate_cb(timeout, user_data);
715 		} else {
716 			struct net_buf_pool *pool;
717 
718 			/* Allocate from the original pool if no callback has
719 			 * been provided.
720 			 */
721 			pool = net_buf_pool_get(buf->pool_id);
722 			frag = net_buf_alloc_len(pool, len, timeout);
723 		}
724 
725 		if (!frag) {
726 			return added_len;
727 		}
728 
729 		net_buf_frag_add(buf, frag);
730 	} while (1);
731 
732 	/* Unreachable */
733 	return 0;
734 }
735 
736 #if defined(CONFIG_NET_BUF_SIMPLE_LOG)
737 #define NET_BUF_SIMPLE_DBG(fmt, ...) NET_BUF_DBG(fmt, ##__VA_ARGS__)
738 #define NET_BUF_SIMPLE_ERR(fmt, ...) NET_BUF_ERR(fmt, ##__VA_ARGS__)
739 #define NET_BUF_SIMPLE_WARN(fmt, ...) NET_BUF_WARN(fmt, ##__VA_ARGS__)
740 #define NET_BUF_SIMPLE_INFO(fmt, ...) NET_BUF_INFO(fmt, ##__VA_ARGS__)
741 #else
742 #define NET_BUF_SIMPLE_DBG(fmt, ...)
743 #define NET_BUF_SIMPLE_ERR(fmt, ...)
744 #define NET_BUF_SIMPLE_WARN(fmt, ...)
745 #define NET_BUF_SIMPLE_INFO(fmt, ...)
746 #endif /* CONFIG_NET_BUF_SIMPLE_LOG */
747 
net_buf_simple_clone(const struct net_buf_simple * original,struct net_buf_simple * clone)748 void net_buf_simple_clone(const struct net_buf_simple *original,
749 			  struct net_buf_simple *clone)
750 {
751 	memcpy(clone, original, sizeof(struct net_buf_simple));
752 }
753 
net_buf_simple_add(struct net_buf_simple * buf,size_t len)754 void *net_buf_simple_add(struct net_buf_simple *buf, size_t len)
755 {
756 	uint8_t *tail = net_buf_simple_tail(buf);
757 
758 	NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
759 
760 	__ASSERT_NO_MSG(net_buf_simple_tailroom(buf) >= len);
761 
762 	buf->len += len;
763 	return tail;
764 }
765 
net_buf_simple_add_mem(struct net_buf_simple * buf,const void * mem,size_t len)766 void *net_buf_simple_add_mem(struct net_buf_simple *buf, const void *mem,
767 			     size_t len)
768 {
769 	NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
770 
771 	return memcpy(net_buf_simple_add(buf, len), mem, len);
772 }
773 
net_buf_simple_add_u8(struct net_buf_simple * buf,uint8_t val)774 uint8_t *net_buf_simple_add_u8(struct net_buf_simple *buf, uint8_t val)
775 {
776 	uint8_t *u8;
777 
778 	NET_BUF_SIMPLE_DBG("buf %p val 0x%02x", buf, val);
779 
780 	u8 = net_buf_simple_add(buf, 1);
781 	*u8 = val;
782 
783 	return u8;
784 }
785 
net_buf_simple_add_le16(struct net_buf_simple * buf,uint16_t val)786 void net_buf_simple_add_le16(struct net_buf_simple *buf, uint16_t val)
787 {
788 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
789 
790 	sys_put_le16(val, net_buf_simple_add(buf, sizeof(val)));
791 }
792 
net_buf_simple_add_be16(struct net_buf_simple * buf,uint16_t val)793 void net_buf_simple_add_be16(struct net_buf_simple *buf, uint16_t val)
794 {
795 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
796 
797 	sys_put_be16(val, net_buf_simple_add(buf, sizeof(val)));
798 }
799 
net_buf_simple_add_le24(struct net_buf_simple * buf,uint32_t val)800 void net_buf_simple_add_le24(struct net_buf_simple *buf, uint32_t val)
801 {
802 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
803 
804 	sys_put_le24(val, net_buf_simple_add(buf, 3));
805 }
806 
net_buf_simple_add_be24(struct net_buf_simple * buf,uint32_t val)807 void net_buf_simple_add_be24(struct net_buf_simple *buf, uint32_t val)
808 {
809 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
810 
811 	sys_put_be24(val, net_buf_simple_add(buf, 3));
812 }
813 
net_buf_simple_add_le32(struct net_buf_simple * buf,uint32_t val)814 void net_buf_simple_add_le32(struct net_buf_simple *buf, uint32_t val)
815 {
816 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
817 
818 	sys_put_le32(val, net_buf_simple_add(buf, sizeof(val)));
819 }
820 
net_buf_simple_add_be32(struct net_buf_simple * buf,uint32_t val)821 void net_buf_simple_add_be32(struct net_buf_simple *buf, uint32_t val)
822 {
823 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
824 
825 	sys_put_be32(val, net_buf_simple_add(buf, sizeof(val)));
826 }
827 
net_buf_simple_add_le48(struct net_buf_simple * buf,uint64_t val)828 void net_buf_simple_add_le48(struct net_buf_simple *buf, uint64_t val)
829 {
830 	NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
831 
832 	sys_put_le48(val, net_buf_simple_add(buf, 6));
833 }
834 
net_buf_simple_add_be48(struct net_buf_simple * buf,uint64_t val)835 void net_buf_simple_add_be48(struct net_buf_simple *buf, uint64_t val)
836 {
837 	NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
838 
839 	sys_put_be48(val, net_buf_simple_add(buf, 6));
840 }
841 
net_buf_simple_add_le64(struct net_buf_simple * buf,uint64_t val)842 void net_buf_simple_add_le64(struct net_buf_simple *buf, uint64_t val)
843 {
844 	NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
845 
846 	sys_put_le64(val, net_buf_simple_add(buf, sizeof(val)));
847 }
848 
net_buf_simple_add_be64(struct net_buf_simple * buf,uint64_t val)849 void net_buf_simple_add_be64(struct net_buf_simple *buf, uint64_t val)
850 {
851 	NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
852 
853 	sys_put_be64(val, net_buf_simple_add(buf, sizeof(val)));
854 }
855 
net_buf_simple_remove_mem(struct net_buf_simple * buf,size_t len)856 void *net_buf_simple_remove_mem(struct net_buf_simple *buf, size_t len)
857 {
858 	NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
859 
860 	__ASSERT_NO_MSG(buf->len >= len);
861 
862 	buf->len -= len;
863 	return buf->data + buf->len;
864 }
865 
net_buf_simple_remove_u8(struct net_buf_simple * buf)866 uint8_t net_buf_simple_remove_u8(struct net_buf_simple *buf)
867 {
868 	uint8_t val;
869 	void *ptr;
870 
871 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
872 	val = *(uint8_t *)ptr;
873 
874 	return val;
875 }
876 
net_buf_simple_remove_le16(struct net_buf_simple * buf)877 uint16_t net_buf_simple_remove_le16(struct net_buf_simple *buf)
878 {
879 	uint16_t val;
880 	void *ptr;
881 
882 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
883 	val = UNALIGNED_GET((uint16_t *)ptr);
884 
885 	return sys_le16_to_cpu(val);
886 }
887 
net_buf_simple_remove_be16(struct net_buf_simple * buf)888 uint16_t net_buf_simple_remove_be16(struct net_buf_simple *buf)
889 {
890 	uint16_t val;
891 	void *ptr;
892 
893 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
894 	val = UNALIGNED_GET((uint16_t *)ptr);
895 
896 	return sys_be16_to_cpu(val);
897 }
898 
net_buf_simple_remove_le24(struct net_buf_simple * buf)899 uint32_t net_buf_simple_remove_le24(struct net_buf_simple *buf)
900 {
901 	struct uint24 {
902 		uint32_t u24 : 24;
903 	} __packed val;
904 	void *ptr;
905 
906 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
907 	val = UNALIGNED_GET((struct uint24 *)ptr);
908 
909 	return sys_le24_to_cpu(val.u24);
910 }
911 
net_buf_simple_remove_be24(struct net_buf_simple * buf)912 uint32_t net_buf_simple_remove_be24(struct net_buf_simple *buf)
913 {
914 	struct uint24 {
915 		uint32_t u24 : 24;
916 	} __packed val;
917 	void *ptr;
918 
919 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
920 	val = UNALIGNED_GET((struct uint24 *)ptr);
921 
922 	return sys_be24_to_cpu(val.u24);
923 }
924 
net_buf_simple_remove_le32(struct net_buf_simple * buf)925 uint32_t net_buf_simple_remove_le32(struct net_buf_simple *buf)
926 {
927 	uint32_t val;
928 	void *ptr;
929 
930 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
931 	val = UNALIGNED_GET((uint32_t *)ptr);
932 
933 	return sys_le32_to_cpu(val);
934 }
935 
net_buf_simple_remove_be32(struct net_buf_simple * buf)936 uint32_t net_buf_simple_remove_be32(struct net_buf_simple *buf)
937 {
938 	uint32_t val;
939 	void *ptr;
940 
941 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
942 	val = UNALIGNED_GET((uint32_t *)ptr);
943 
944 	return sys_be32_to_cpu(val);
945 }
946 
net_buf_simple_remove_le48(struct net_buf_simple * buf)947 uint64_t net_buf_simple_remove_le48(struct net_buf_simple *buf)
948 {
949 	struct uint48 {
950 		uint64_t u48 : 48;
951 	} __packed val;
952 	void *ptr;
953 
954 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
955 	val = UNALIGNED_GET((struct uint48 *)ptr);
956 
957 	return sys_le48_to_cpu(val.u48);
958 }
959 
net_buf_simple_remove_be48(struct net_buf_simple * buf)960 uint64_t net_buf_simple_remove_be48(struct net_buf_simple *buf)
961 {
962 	struct uint48 {
963 		uint64_t u48 : 48;
964 	} __packed val;
965 	void *ptr;
966 
967 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
968 	val = UNALIGNED_GET((struct uint48 *)ptr);
969 
970 	return sys_be48_to_cpu(val.u48);
971 }
972 
net_buf_simple_remove_le64(struct net_buf_simple * buf)973 uint64_t net_buf_simple_remove_le64(struct net_buf_simple *buf)
974 {
975 	uint64_t val;
976 	void *ptr;
977 
978 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
979 	val = UNALIGNED_GET((uint64_t *)ptr);
980 
981 	return sys_le64_to_cpu(val);
982 }
983 
net_buf_simple_remove_be64(struct net_buf_simple * buf)984 uint64_t net_buf_simple_remove_be64(struct net_buf_simple *buf)
985 {
986 	uint64_t val;
987 	void *ptr;
988 
989 	ptr = net_buf_simple_remove_mem(buf, sizeof(val));
990 	val = UNALIGNED_GET((uint64_t *)ptr);
991 
992 	return sys_be64_to_cpu(val);
993 }
994 
net_buf_simple_push(struct net_buf_simple * buf,size_t len)995 void *net_buf_simple_push(struct net_buf_simple *buf, size_t len)
996 {
997 	NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
998 
999 	__ASSERT_NO_MSG(net_buf_simple_headroom(buf) >= len);
1000 
1001 	buf->data -= len;
1002 	buf->len += len;
1003 	return buf->data;
1004 }
1005 
net_buf_simple_push_mem(struct net_buf_simple * buf,const void * mem,size_t len)1006 void *net_buf_simple_push_mem(struct net_buf_simple *buf, const void *mem,
1007 			      size_t len)
1008 {
1009 	NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
1010 
1011 	return memcpy(net_buf_simple_push(buf, len), mem, len);
1012 }
1013 
net_buf_simple_push_le16(struct net_buf_simple * buf,uint16_t val)1014 void net_buf_simple_push_le16(struct net_buf_simple *buf, uint16_t val)
1015 {
1016 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1017 
1018 	sys_put_le16(val, net_buf_simple_push(buf, sizeof(val)));
1019 }
1020 
net_buf_simple_push_be16(struct net_buf_simple * buf,uint16_t val)1021 void net_buf_simple_push_be16(struct net_buf_simple *buf, uint16_t val)
1022 {
1023 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1024 
1025 	sys_put_be16(val, net_buf_simple_push(buf, sizeof(val)));
1026 }
1027 
net_buf_simple_push_u8(struct net_buf_simple * buf,uint8_t val)1028 void net_buf_simple_push_u8(struct net_buf_simple *buf, uint8_t val)
1029 {
1030 	uint8_t *data = net_buf_simple_push(buf, 1);
1031 
1032 	*data = val;
1033 }
1034 
net_buf_simple_push_le24(struct net_buf_simple * buf,uint32_t val)1035 void net_buf_simple_push_le24(struct net_buf_simple *buf, uint32_t val)
1036 {
1037 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1038 
1039 	sys_put_le24(val, net_buf_simple_push(buf, 3));
1040 }
1041 
net_buf_simple_push_be24(struct net_buf_simple * buf,uint32_t val)1042 void net_buf_simple_push_be24(struct net_buf_simple *buf, uint32_t val)
1043 {
1044 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1045 
1046 	sys_put_be24(val, net_buf_simple_push(buf, 3));
1047 }
1048 
net_buf_simple_push_le32(struct net_buf_simple * buf,uint32_t val)1049 void net_buf_simple_push_le32(struct net_buf_simple *buf, uint32_t val)
1050 {
1051 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1052 
1053 	sys_put_le32(val, net_buf_simple_push(buf, sizeof(val)));
1054 }
1055 
net_buf_simple_push_be32(struct net_buf_simple * buf,uint32_t val)1056 void net_buf_simple_push_be32(struct net_buf_simple *buf, uint32_t val)
1057 {
1058 	NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1059 
1060 	sys_put_be32(val, net_buf_simple_push(buf, sizeof(val)));
1061 }
1062 
net_buf_simple_push_le48(struct net_buf_simple * buf,uint64_t val)1063 void net_buf_simple_push_le48(struct net_buf_simple *buf, uint64_t val)
1064 {
1065 	NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
1066 
1067 	sys_put_le48(val, net_buf_simple_push(buf, 6));
1068 }
1069 
net_buf_simple_push_be48(struct net_buf_simple * buf,uint64_t val)1070 void net_buf_simple_push_be48(struct net_buf_simple *buf, uint64_t val)
1071 {
1072 	NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
1073 
1074 	sys_put_be48(val, net_buf_simple_push(buf, 6));
1075 }
1076 
net_buf_simple_push_le64(struct net_buf_simple * buf,uint64_t val)1077 void net_buf_simple_push_le64(struct net_buf_simple *buf, uint64_t val)
1078 {
1079 	NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
1080 
1081 	sys_put_le64(val, net_buf_simple_push(buf, sizeof(val)));
1082 }
1083 
net_buf_simple_push_be64(struct net_buf_simple * buf,uint64_t val)1084 void net_buf_simple_push_be64(struct net_buf_simple *buf, uint64_t val)
1085 {
1086 	NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
1087 
1088 	sys_put_be64(val, net_buf_simple_push(buf, sizeof(val)));
1089 }
1090 
net_buf_simple_pull(struct net_buf_simple * buf,size_t len)1091 void *net_buf_simple_pull(struct net_buf_simple *buf, size_t len)
1092 {
1093 	NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
1094 
1095 	__ASSERT_NO_MSG(buf->len >= len);
1096 
1097 	buf->len -= len;
1098 	return buf->data += len;
1099 }
1100 
net_buf_simple_pull_mem(struct net_buf_simple * buf,size_t len)1101 void *net_buf_simple_pull_mem(struct net_buf_simple *buf, size_t len)
1102 {
1103 	void *data = buf->data;
1104 
1105 	NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
1106 
1107 	__ASSERT_NO_MSG(buf->len >= len);
1108 
1109 	buf->len -= len;
1110 	buf->data += len;
1111 
1112 	return data;
1113 }
1114 
net_buf_simple_pull_u8(struct net_buf_simple * buf)1115 uint8_t net_buf_simple_pull_u8(struct net_buf_simple *buf)
1116 {
1117 	uint8_t val;
1118 
1119 	val = buf->data[0];
1120 	net_buf_simple_pull(buf, 1);
1121 
1122 	return val;
1123 }
1124 
net_buf_simple_pull_le16(struct net_buf_simple * buf)1125 uint16_t net_buf_simple_pull_le16(struct net_buf_simple *buf)
1126 {
1127 	uint16_t val;
1128 
1129 	val = UNALIGNED_GET((uint16_t *)buf->data);
1130 	net_buf_simple_pull(buf, sizeof(val));
1131 
1132 	return sys_le16_to_cpu(val);
1133 }
1134 
net_buf_simple_pull_be16(struct net_buf_simple * buf)1135 uint16_t net_buf_simple_pull_be16(struct net_buf_simple *buf)
1136 {
1137 	uint16_t val;
1138 
1139 	val = UNALIGNED_GET((uint16_t *)buf->data);
1140 	net_buf_simple_pull(buf, sizeof(val));
1141 
1142 	return sys_be16_to_cpu(val);
1143 }
1144 
net_buf_simple_pull_le24(struct net_buf_simple * buf)1145 uint32_t net_buf_simple_pull_le24(struct net_buf_simple *buf)
1146 {
1147 	struct uint24 {
1148 		uint32_t u24:24;
1149 	} __packed val;
1150 
1151 	val = UNALIGNED_GET((struct uint24 *)buf->data);
1152 	net_buf_simple_pull(buf, sizeof(val));
1153 
1154 	return sys_le24_to_cpu(val.u24);
1155 }
1156 
net_buf_simple_pull_be24(struct net_buf_simple * buf)1157 uint32_t net_buf_simple_pull_be24(struct net_buf_simple *buf)
1158 {
1159 	struct uint24 {
1160 		uint32_t u24:24;
1161 	} __packed val;
1162 
1163 	val = UNALIGNED_GET((struct uint24 *)buf->data);
1164 	net_buf_simple_pull(buf, sizeof(val));
1165 
1166 	return sys_be24_to_cpu(val.u24);
1167 }
1168 
net_buf_simple_pull_le32(struct net_buf_simple * buf)1169 uint32_t net_buf_simple_pull_le32(struct net_buf_simple *buf)
1170 {
1171 	uint32_t val;
1172 
1173 	val = UNALIGNED_GET((uint32_t *)buf->data);
1174 	net_buf_simple_pull(buf, sizeof(val));
1175 
1176 	return sys_le32_to_cpu(val);
1177 }
1178 
net_buf_simple_pull_be32(struct net_buf_simple * buf)1179 uint32_t net_buf_simple_pull_be32(struct net_buf_simple *buf)
1180 {
1181 	uint32_t val;
1182 
1183 	val = UNALIGNED_GET((uint32_t *)buf->data);
1184 	net_buf_simple_pull(buf, sizeof(val));
1185 
1186 	return sys_be32_to_cpu(val);
1187 }
1188 
net_buf_simple_pull_le48(struct net_buf_simple * buf)1189 uint64_t net_buf_simple_pull_le48(struct net_buf_simple *buf)
1190 {
1191 	struct uint48 {
1192 		uint64_t u48:48;
1193 	} __packed val;
1194 
1195 	val = UNALIGNED_GET((struct uint48 *)buf->data);
1196 	net_buf_simple_pull(buf, sizeof(val));
1197 
1198 	return sys_le48_to_cpu(val.u48);
1199 }
1200 
net_buf_simple_pull_be48(struct net_buf_simple * buf)1201 uint64_t net_buf_simple_pull_be48(struct net_buf_simple *buf)
1202 {
1203 	struct uint48 {
1204 		uint64_t u48:48;
1205 	} __packed val;
1206 
1207 	val = UNALIGNED_GET((struct uint48 *)buf->data);
1208 	net_buf_simple_pull(buf, sizeof(val));
1209 
1210 	return sys_be48_to_cpu(val.u48);
1211 }
1212 
net_buf_simple_pull_le64(struct net_buf_simple * buf)1213 uint64_t net_buf_simple_pull_le64(struct net_buf_simple *buf)
1214 {
1215 	uint64_t val;
1216 
1217 	val = UNALIGNED_GET((uint64_t *)buf->data);
1218 	net_buf_simple_pull(buf, sizeof(val));
1219 
1220 	return sys_le64_to_cpu(val);
1221 }
1222 
net_buf_simple_pull_be64(struct net_buf_simple * buf)1223 uint64_t net_buf_simple_pull_be64(struct net_buf_simple *buf)
1224 {
1225 	uint64_t val;
1226 
1227 	val = UNALIGNED_GET((uint64_t *)buf->data);
1228 	net_buf_simple_pull(buf, sizeof(val));
1229 
1230 	return sys_be64_to_cpu(val);
1231 }
1232 
net_buf_simple_headroom(struct net_buf_simple * buf)1233 size_t net_buf_simple_headroom(struct net_buf_simple *buf)
1234 {
1235 	return buf->data - buf->__buf;
1236 }
1237 
net_buf_simple_tailroom(struct net_buf_simple * buf)1238 size_t net_buf_simple_tailroom(struct net_buf_simple *buf)
1239 {
1240 	return buf->size - net_buf_simple_headroom(buf) - buf->len;
1241 }
1242 
net_buf_simple_max_len(struct net_buf_simple * buf)1243 uint16_t net_buf_simple_max_len(struct net_buf_simple *buf)
1244 {
1245 	return buf->size - net_buf_simple_headroom(buf);
1246 }
1247