1 /* buf.c - Buffer management */
2
3 /*
4 * Copyright (c) 2015-2019 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define LOG_MODULE_NAME net_buf
10 #define LOG_LEVEL CONFIG_NET_BUF_LOG_LEVEL
11
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
14
15 #include <stdio.h>
16 #include <errno.h>
17 #include <stddef.h>
18 #include <string.h>
19 #include <zephyr/sys/byteorder.h>
20
21 #include <zephyr/net_buf.h>
22
23 #if defined(CONFIG_NET_BUF_LOG)
24 #define NET_BUF_DBG(fmt, ...) LOG_DBG("(%p) " fmt, k_current_get(), \
25 ##__VA_ARGS__)
26 #define NET_BUF_ERR(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__)
27 #define NET_BUF_WARN(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__)
28 #define NET_BUF_INFO(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__)
29 #else
30
31 #define NET_BUF_DBG(fmt, ...)
32 #define NET_BUF_ERR(fmt, ...)
33 #define NET_BUF_WARN(fmt, ...)
34 #define NET_BUF_INFO(fmt, ...)
35 #endif /* CONFIG_NET_BUF_LOG */
36
37 #if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
38 #define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
39 #else
40 #define WARN_ALLOC_INTERVAL K_FOREVER
41 #endif
42
43 #define GET_ALIGN(pool) MAX(sizeof(void *), pool->alloc->alignment)
44
45 /* Linker-defined symbol bound to the static pool structs */
46 STRUCT_SECTION_START_EXTERN(net_buf_pool);
47
net_buf_pool_get(int id)48 struct net_buf_pool *net_buf_pool_get(int id)
49 {
50 struct net_buf_pool *pool;
51
52 STRUCT_SECTION_GET(net_buf_pool, id, &pool);
53
54 return pool;
55 }
56
pool_id(struct net_buf_pool * pool)57 static int pool_id(struct net_buf_pool *pool)
58 {
59 return pool - TYPE_SECTION_START(net_buf_pool);
60 }
61
net_buf_id(const struct net_buf * buf)62 int net_buf_id(const struct net_buf *buf)
63 {
64 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
65 size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
66 __alignof__(struct net_buf));
67 ptrdiff_t offset = (uint8_t *)buf - (uint8_t *)pool->__bufs;
68
69 return offset / struct_size;
70 }
71
pool_get_uninit(struct net_buf_pool * pool,uint16_t uninit_count)72 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
73 uint16_t uninit_count)
74 {
75 size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
76 __alignof__(struct net_buf));
77 size_t byte_offset = (pool->buf_count - uninit_count) * struct_size;
78 struct net_buf *buf;
79
80 buf = (struct net_buf *)(((uint8_t *)pool->__bufs) + byte_offset);
81
82 buf->pool_id = pool_id(pool);
83 buf->user_data_size = pool->user_data_size;
84
85 return buf;
86 }
87
net_buf_reset(struct net_buf * buf)88 void net_buf_reset(struct net_buf *buf)
89 {
90 __ASSERT_NO_MSG(buf->frags == NULL);
91
92 net_buf_simple_reset(&buf->b);
93 }
94
generic_data_ref(struct net_buf * buf,uint8_t * data)95 static uint8_t *generic_data_ref(struct net_buf *buf, uint8_t *data)
96 {
97 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
98 uint8_t *ref_count;
99
100 ref_count = data - GET_ALIGN(buf_pool);
101 (*ref_count)++;
102
103 return data;
104 }
105
mem_pool_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)106 static uint8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
107 k_timeout_t timeout)
108 {
109 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
110 struct k_heap *pool = buf_pool->alloc->alloc_data;
111 uint8_t *ref_count;
112 void *b;
113
114 if (buf_pool->alloc->alignment == 0) {
115 /* Reserve extra space for a ref-count (uint8_t) */
116 b = k_heap_alloc(pool, sizeof(void *) + *size, timeout);
117
118 } else {
119 if (*size < buf_pool->alloc->alignment) {
120 NET_BUF_DBG("Requested size %zu is smaller than alignment %zu",
121 *size, buf_pool->alloc->alignment);
122 return NULL;
123 }
124
125 /* Reserve extra space for a ref-count (uint8_t) */
126 b = k_heap_aligned_alloc(pool,
127 buf_pool->alloc->alignment,
128 GET_ALIGN(buf_pool) +
129 ROUND_UP(*size, buf_pool->alloc->alignment),
130 timeout);
131 }
132
133 if (b == NULL) {
134 return NULL;
135 }
136
137 ref_count = (uint8_t *)b;
138 *ref_count = 1U;
139
140 /* Return pointer to the byte following the ref count */
141 return ref_count + GET_ALIGN(buf_pool);
142 }
143
mem_pool_data_unref(struct net_buf * buf,uint8_t * data)144 static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
145 {
146 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
147 struct k_heap *pool = buf_pool->alloc->alloc_data;
148 uint8_t *ref_count;
149
150 ref_count = data - GET_ALIGN(buf_pool);
151 if (--(*ref_count)) {
152 return;
153 }
154
155 /* Need to copy to local variable due to alignment */
156 k_heap_free(pool, ref_count);
157 }
158
159 const struct net_buf_data_cb net_buf_var_cb = {
160 .alloc = mem_pool_data_alloc,
161 .ref = generic_data_ref,
162 .unref = mem_pool_data_unref,
163 };
164
fixed_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)165 static uint8_t *fixed_data_alloc(struct net_buf *buf, size_t *size,
166 k_timeout_t timeout)
167 {
168 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
169 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
170
171 *size = pool->alloc->max_alloc_size;
172
173 return fixed->data_pool + *size * net_buf_id(buf);
174 }
175
fixed_data_unref(struct net_buf * buf,uint8_t * data)176 static void fixed_data_unref(struct net_buf *buf, uint8_t *data)
177 {
178 /* Nothing needed for fixed-size data pools */
179 }
180
181 const struct net_buf_data_cb net_buf_fixed_cb = {
182 .alloc = fixed_data_alloc,
183 .unref = fixed_data_unref,
184 };
185
186 #if (K_HEAP_MEM_POOL_SIZE > 0)
187
heap_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)188 static uint8_t *heap_data_alloc(struct net_buf *buf, size_t *size,
189 k_timeout_t timeout)
190 {
191 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
192 uint8_t *ref_count;
193
194 ref_count = k_malloc(GET_ALIGN(buf_pool) + *size);
195 if (!ref_count) {
196 return NULL;
197 }
198
199 *ref_count = 1U;
200
201 return ref_count + GET_ALIGN(buf_pool);
202 }
203
heap_data_unref(struct net_buf * buf,uint8_t * data)204 static void heap_data_unref(struct net_buf *buf, uint8_t *data)
205 {
206 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
207 uint8_t *ref_count;
208
209 ref_count = data - GET_ALIGN(buf_pool);
210 if (--(*ref_count)) {
211 return;
212 }
213
214 k_free(ref_count);
215 }
216
217 static const struct net_buf_data_cb net_buf_heap_cb = {
218 .alloc = heap_data_alloc,
219 .ref = generic_data_ref,
220 .unref = heap_data_unref,
221 };
222
223 const struct net_buf_data_alloc net_buf_heap_alloc = {
224 .cb = &net_buf_heap_cb,
225 .max_alloc_size = 0,
226 };
227
228 #endif /* K_HEAP_MEM_POOL_SIZE > 0 */
229
data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)230 static uint8_t *data_alloc(struct net_buf *buf, size_t *size, k_timeout_t timeout)
231 {
232 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
233
234 return pool->alloc->cb->alloc(buf, size, timeout);
235 }
236
data_ref(struct net_buf * buf,uint8_t * data)237 static uint8_t *data_ref(struct net_buf *buf, uint8_t *data)
238 {
239 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
240
241 return pool->alloc->cb->ref(buf, data);
242 }
243
244 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_len_debug(struct net_buf_pool * pool,size_t size,k_timeout_t timeout,const char * func,int line)245 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
246 k_timeout_t timeout, const char *func,
247 int line)
248 #else
249 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
250 k_timeout_t timeout)
251 #endif
252 {
253 k_timepoint_t end = sys_timepoint_calc(timeout);
254 struct net_buf *buf;
255 k_spinlock_key_t key;
256
257 __ASSERT_NO_MSG(pool);
258
259 NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size);
260
261 /* We need to prevent race conditions
262 * when accessing pool->uninit_count.
263 */
264 key = k_spin_lock(&pool->lock);
265
266 /* If there are uninitialized buffers we're guaranteed to succeed
267 * with the allocation one way or another.
268 */
269 if (pool->uninit_count) {
270 uint16_t uninit_count;
271
272 /* If this is not the first access to the pool, we can
273 * be opportunistic and try to fetch a previously used
274 * buffer from the LIFO with K_NO_WAIT.
275 */
276 if (pool->uninit_count < pool->buf_count) {
277 buf = k_lifo_get(&pool->free, K_NO_WAIT);
278 if (buf) {
279 k_spin_unlock(&pool->lock, key);
280 goto success;
281 }
282 }
283
284 uninit_count = pool->uninit_count--;
285 k_spin_unlock(&pool->lock, key);
286
287 buf = pool_get_uninit(pool, uninit_count);
288 goto success;
289 }
290
291 k_spin_unlock(&pool->lock, key);
292
293 #if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN)
294 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
295 uint32_t ref = k_uptime_get_32();
296 buf = k_lifo_get(&pool->free, K_NO_WAIT);
297 while (!buf) {
298 #if defined(CONFIG_NET_BUF_POOL_USAGE)
299 NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
300 func, line, pool->name);
301 #else
302 NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
303 func, line, pool);
304 #endif
305 buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
306 #if defined(CONFIG_NET_BUF_POOL_USAGE)
307 NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
308 func, line, pool->name,
309 (k_uptime_get_32() - ref) / MSEC_PER_SEC);
310 #else
311 NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
312 func, line, pool,
313 (k_uptime_get_32() - ref) / MSEC_PER_SEC);
314 #endif
315 }
316 } else {
317 buf = k_lifo_get(&pool->free, timeout);
318 }
319 #else
320 buf = k_lifo_get(&pool->free, timeout);
321 #endif
322 if (!buf) {
323 NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
324 return NULL;
325 }
326
327 success:
328 NET_BUF_DBG("allocated buf %p", buf);
329
330 if (size) {
331 __maybe_unused size_t req_size = size;
332
333 timeout = sys_timepoint_timeout(end);
334 buf->__buf = data_alloc(buf, &size, timeout);
335 if (!buf->__buf) {
336 NET_BUF_ERR("%s():%d: Failed to allocate data",
337 func, line);
338 net_buf_destroy(buf);
339 return NULL;
340 }
341
342 __ASSERT_NO_MSG(req_size <= size);
343 } else {
344 buf->__buf = NULL;
345 }
346
347 buf->ref = 1U;
348 buf->flags = 0U;
349 buf->frags = NULL;
350 buf->size = size;
351 memset(buf->user_data, 0, buf->user_data_size);
352 net_buf_reset(buf);
353
354 #if defined(CONFIG_NET_BUF_POOL_USAGE)
355 atomic_dec(&pool->avail_count);
356 __ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0);
357 pool->max_used = max(pool->max_used,
358 pool->buf_count - atomic_get(&pool->avail_count));
359 #endif
360 return buf;
361 }
362
363 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_fixed_debug(struct net_buf_pool * pool,k_timeout_t timeout,const char * func,int line)364 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
365 k_timeout_t timeout, const char *func,
366 int line)
367 {
368 return net_buf_alloc_len_debug(pool, pool->alloc->max_alloc_size, timeout, func,
369 line);
370 }
371 #else
net_buf_alloc_fixed(struct net_buf_pool * pool,k_timeout_t timeout)372 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
373 k_timeout_t timeout)
374 {
375 return net_buf_alloc_len(pool, pool->alloc->max_alloc_size, timeout);
376 }
377 #endif
378
379 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_with_data_debug(struct net_buf_pool * pool,void * data,size_t size,k_timeout_t timeout,const char * func,int line)380 struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
381 void *data, size_t size,
382 k_timeout_t timeout,
383 const char *func, int line)
384 #else
385 struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
386 void *data, size_t size,
387 k_timeout_t timeout)
388 #endif
389 {
390 struct net_buf *buf;
391
392 #if defined(CONFIG_NET_BUF_LOG)
393 buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line);
394 #else
395 buf = net_buf_alloc_len(pool, 0, timeout);
396 #endif
397 if (!buf) {
398 return NULL;
399 }
400
401 net_buf_simple_init_with_data(&buf->b, data, size);
402 buf->flags = NET_BUF_EXTERNAL_DATA;
403
404 return buf;
405 }
406
407 static struct k_spinlock net_buf_slist_lock;
408
net_buf_slist_put(sys_slist_t * list,struct net_buf * buf)409 void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
410 {
411 k_spinlock_key_t key;
412
413 __ASSERT_NO_MSG(list);
414 __ASSERT_NO_MSG(buf);
415
416 key = k_spin_lock(&net_buf_slist_lock);
417 sys_slist_append(list, &buf->node);
418 k_spin_unlock(&net_buf_slist_lock, key);
419 }
420
net_buf_slist_get(sys_slist_t * list)421 struct net_buf *net_buf_slist_get(sys_slist_t *list)
422 {
423 struct net_buf *buf;
424 k_spinlock_key_t key;
425
426 __ASSERT_NO_MSG(list);
427
428 key = k_spin_lock(&net_buf_slist_lock);
429
430 buf = (void *)sys_slist_get(list);
431
432 k_spin_unlock(&net_buf_slist_lock, key);
433
434 return buf;
435 }
436
437 #if defined(CONFIG_NET_BUF_LOG)
net_buf_unref_debug(struct net_buf * buf,const char * func,int line)438 void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
439 #else
440 void net_buf_unref(struct net_buf *buf)
441 #endif
442 {
443 __ASSERT_NO_MSG(buf);
444
445 while (buf) {
446 struct net_buf *frags = buf->frags;
447 struct net_buf_pool *pool;
448
449 __ASSERT(buf->ref, "buf %p double free", buf);
450 if (!buf->ref) {
451 #if defined(CONFIG_NET_BUF_LOG)
452 NET_BUF_ERR("%s():%d: buf %p double free", func, line,
453 buf);
454 #endif
455 return;
456 }
457 NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
458 buf->pool_id, buf->frags);
459
460 if (--buf->ref > 0) {
461 return;
462 }
463
464 buf->data = NULL;
465 buf->frags = NULL;
466
467 pool = net_buf_pool_get(buf->pool_id);
468
469 #if defined(CONFIG_NET_BUF_POOL_USAGE)
470 atomic_inc(&pool->avail_count);
471 __ASSERT_NO_MSG(atomic_get(&pool->avail_count) <= pool->buf_count);
472 #endif
473
474 if (pool->destroy) {
475 pool->destroy(buf);
476 } else {
477 net_buf_destroy(buf);
478 }
479
480 buf = frags;
481 }
482 }
483
net_buf_ref(struct net_buf * buf)484 struct net_buf *net_buf_ref(struct net_buf *buf)
485 {
486 __ASSERT_NO_MSG(buf);
487
488 NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
489 buf, buf->ref, buf->pool_id);
490 buf->ref++;
491 return buf;
492 }
493
net_buf_clone(struct net_buf * buf,k_timeout_t timeout)494 struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
495 {
496 k_timepoint_t end = sys_timepoint_calc(timeout);
497 struct net_buf_pool *pool;
498 struct net_buf *clone;
499
500 __ASSERT_NO_MSG(buf);
501
502 pool = net_buf_pool_get(buf->pool_id);
503
504 clone = net_buf_alloc_len(pool, 0, timeout);
505 if (!clone) {
506 return NULL;
507 }
508
509 /* If the pool supports data referencing use that. Otherwise
510 * we need to allocate new data and make a copy.
511 */
512 if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) {
513 clone->__buf = buf->__buf ? data_ref(buf, buf->__buf) : NULL;
514 clone->data = buf->data;
515 clone->len = buf->len;
516 clone->size = buf->size;
517 } else {
518 size_t size = buf->size;
519
520 timeout = sys_timepoint_timeout(end);
521
522 clone->__buf = data_alloc(clone, &size, timeout);
523 if (!clone->__buf || size < buf->size) {
524 net_buf_destroy(clone);
525 return NULL;
526 }
527
528 clone->size = size;
529 clone->data = clone->__buf + net_buf_headroom(buf);
530 net_buf_add_mem(clone, buf->data, buf->len);
531 }
532
533 /* user_data_size should be the same for buffers from the same pool */
534 __ASSERT(buf->user_data_size == clone->user_data_size, "Unexpected user data size");
535
536 memcpy(clone->user_data, buf->user_data, clone->user_data_size);
537
538 return clone;
539 }
540
net_buf_user_data_copy(struct net_buf * dst,const struct net_buf * src)541 int net_buf_user_data_copy(struct net_buf *dst, const struct net_buf *src)
542 {
543 __ASSERT_NO_MSG(dst);
544 __ASSERT_NO_MSG(src);
545
546 if (dst == src) {
547 return 0;
548 }
549
550 if (dst->user_data_size < src->user_data_size) {
551 return -EINVAL;
552 }
553
554 memcpy(dst->user_data, src->user_data, src->user_data_size);
555
556 return 0;
557 }
558
net_buf_frag_last(struct net_buf * buf)559 struct net_buf *net_buf_frag_last(struct net_buf *buf)
560 {
561 __ASSERT_NO_MSG(buf);
562
563 while (buf->frags) {
564 buf = buf->frags;
565 }
566
567 return buf;
568 }
569
net_buf_frag_insert(struct net_buf * parent,struct net_buf * frag)570 void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
571 {
572 __ASSERT_NO_MSG(parent);
573 __ASSERT_NO_MSG(frag);
574
575 if (parent->frags) {
576 net_buf_frag_last(frag)->frags = parent->frags;
577 }
578 /* Take ownership of the fragment reference */
579 parent->frags = frag;
580 }
581
net_buf_frag_add(struct net_buf * head,struct net_buf * frag)582 struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
583 {
584 __ASSERT_NO_MSG(frag);
585
586 if (!head) {
587 return net_buf_ref(frag);
588 }
589
590 net_buf_frag_insert(net_buf_frag_last(head), frag);
591
592 return head;
593 }
594
595 #if defined(CONFIG_NET_BUF_LOG)
net_buf_frag_del_debug(struct net_buf * parent,struct net_buf * frag,const char * func,int line)596 struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
597 struct net_buf *frag,
598 const char *func, int line)
599 #else
600 struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
601 #endif
602 {
603 struct net_buf *next_frag;
604
605 __ASSERT_NO_MSG(frag);
606
607 if (parent) {
608 __ASSERT_NO_MSG(parent->frags);
609 __ASSERT_NO_MSG(parent->frags == frag);
610 parent->frags = frag->frags;
611 }
612
613 next_frag = frag->frags;
614
615 frag->frags = NULL;
616
617 #if defined(CONFIG_NET_BUF_LOG)
618 net_buf_unref_debug(frag, func, line);
619 #else
620 net_buf_unref(frag);
621 #endif
622
623 return next_frag;
624 }
625
net_buf_linearize(void * dst,size_t dst_len,const struct net_buf * src,size_t offset,size_t len)626 size_t net_buf_linearize(void *dst, size_t dst_len, const struct net_buf *src,
627 size_t offset, size_t len)
628 {
629 const struct net_buf *frag;
630 size_t to_copy;
631 size_t copied;
632
633 len = min(len, dst_len);
634
635 frag = src;
636
637 /* find the right fragment to start copying from */
638 while (frag && offset >= frag->len) {
639 offset -= frag->len;
640 frag = frag->frags;
641 }
642
643 /* traverse the fragment chain until len bytes are copied */
644 copied = 0;
645 while (frag && len > 0) {
646 to_copy = min(len, frag->len - offset);
647 memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
648
649 copied += to_copy;
650
651 /* to_copy is always <= len */
652 len -= to_copy;
653 frag = frag->frags;
654
655 /* after the first iteration, this value will be 0 */
656 offset = 0;
657 }
658
659 return copied;
660 }
661
662 /* This helper routine will append multiple bytes, if there is no place for
663 * the data in current fragment then create new fragment and add it to
664 * the buffer. It assumes that the buffer has at least one fragment.
665 */
net_buf_append_bytes(struct net_buf * buf,size_t len,const void * value,k_timeout_t timeout,net_buf_allocator_cb allocate_cb,void * user_data)666 size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
667 const void *value, k_timeout_t timeout,
668 net_buf_allocator_cb allocate_cb, void *user_data)
669 {
670 struct net_buf *frag = net_buf_frag_last(buf);
671 size_t added_len = 0;
672 const uint8_t *value8 = value;
673 size_t max_size;
674
675 do {
676 uint16_t count = min(len, net_buf_tailroom(frag));
677
678 net_buf_add_mem(frag, value8, count);
679 len -= count;
680 added_len += count;
681 value8 += count;
682
683 if (len == 0) {
684 return added_len;
685 }
686
687 if (allocate_cb) {
688 frag = allocate_cb(timeout, user_data);
689 } else {
690 struct net_buf_pool *pool;
691
692 /* Allocate from the original pool if no callback has
693 * been provided.
694 */
695 pool = net_buf_pool_get(buf->pool_id);
696 max_size = pool->alloc->max_alloc_size;
697 frag = net_buf_alloc_len(pool,
698 max_size ? min(len, max_size) : len,
699 timeout);
700 }
701
702 if (!frag) {
703 return added_len;
704 }
705
706 net_buf_frag_add(buf, frag);
707 } while (1);
708
709 /* Unreachable */
710 return 0;
711 }
712
net_buf_data_match(const struct net_buf * buf,size_t offset,const void * data,size_t len)713 size_t net_buf_data_match(const struct net_buf *buf, size_t offset, const void *data, size_t len)
714 {
715 const uint8_t *dptr = data;
716 const uint8_t *bptr;
717 size_t compared = 0;
718 size_t to_compare;
719
720 if (!buf || !data) {
721 return compared;
722 }
723
724 /* find the right fragment to start comparison */
725 while (buf && offset >= buf->len) {
726 offset -= buf->len;
727 buf = buf->frags;
728 }
729
730 while (buf && len > 0) {
731 bptr = buf->data + offset;
732 to_compare = min(len, buf->len - offset);
733
734 for (size_t i = 0; i < to_compare; ++i) {
735 if (dptr[compared] != bptr[i]) {
736 return compared;
737 }
738 compared++;
739 }
740
741 len -= to_compare;
742 buf = buf->frags;
743 offset = 0;
744 }
745
746 return compared;
747 }
748