1 /*
2 * Copyright (c) 2015 Intel Corporation
3 * Additional Copyright (c) 2018 Espressif Systems (Shanghai) PTE LTD
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <string.h>
9 #include "mesh_common.h"
10
net_buf_id(struct net_buf * buf)11 int net_buf_id(struct net_buf *buf)
12 {
13 struct net_buf_pool *pool = buf->pool;
14
15 return buf - pool->__bufs;
16 }
17
pool_get_uninit(struct net_buf_pool * pool,uint16_t uninit_count)18 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
19 uint16_t uninit_count)
20 {
21 struct net_buf *buf = NULL;
22
23 buf = &pool->__bufs[pool->buf_count - uninit_count];
24
25 buf->pool = pool;
26
27 return buf;
28 }
29
net_buf_simple_clone(const struct net_buf_simple * original,struct net_buf_simple * clone)30 void net_buf_simple_clone(const struct net_buf_simple *original,
31 struct net_buf_simple *clone)
32 {
33 memcpy(clone, original, sizeof(struct net_buf_simple));
34 }
35
net_buf_simple_add(struct net_buf_simple * buf,size_t len)36 void *net_buf_simple_add(struct net_buf_simple *buf, size_t len)
37 {
38 uint8_t *tail = net_buf_simple_tail(buf);
39
40 NET_BUF_SIMPLE_DBG("buf %p len %u", buf, len);
41
42 NET_BUF_SIMPLE_ASSERT(net_buf_simple_tailroom(buf) >= len);
43
44 buf->len += len;
45 return tail;
46 }
47
net_buf_simple_add_mem(struct net_buf_simple * buf,const void * mem,size_t len)48 void *net_buf_simple_add_mem(struct net_buf_simple *buf, const void *mem,
49 size_t len)
50 {
51 NET_BUF_SIMPLE_DBG("buf %p len %u", buf, len);
52
53 return memcpy(net_buf_simple_add(buf, len), mem, len);
54 }
55
net_buf_simple_add_u8(struct net_buf_simple * buf,uint8_t val)56 uint8_t *net_buf_simple_add_u8(struct net_buf_simple *buf, uint8_t val)
57 {
58 uint8_t *u8 = NULL;
59
60 NET_BUF_SIMPLE_DBG("buf %p val 0x%02x", buf, val);
61
62 u8 = net_buf_simple_add(buf, 1);
63 *u8 = val;
64
65 return u8;
66 }
67
net_buf_simple_add_le16(struct net_buf_simple * buf,uint16_t val)68 void net_buf_simple_add_le16(struct net_buf_simple *buf, uint16_t val)
69 {
70 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
71
72 sys_put_le16(val, net_buf_simple_add(buf, sizeof(val)));
73 }
74
net_buf_simple_add_be16(struct net_buf_simple * buf,uint16_t val)75 void net_buf_simple_add_be16(struct net_buf_simple *buf, uint16_t val)
76 {
77 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
78
79 sys_put_be16(val, net_buf_simple_add(buf, sizeof(val)));
80 }
81
net_buf_simple_add_le24(struct net_buf_simple * buf,uint32_t val)82 void net_buf_simple_add_le24(struct net_buf_simple *buf, uint32_t val)
83 {
84 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
85
86 sys_put_le24(val, net_buf_simple_add(buf, 3));
87 }
88
net_buf_simple_add_be24(struct net_buf_simple * buf,uint32_t val)89 void net_buf_simple_add_be24(struct net_buf_simple *buf, uint32_t val)
90 {
91 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
92
93 sys_put_be24(val, net_buf_simple_add(buf, 3));
94 }
95
net_buf_simple_add_le32(struct net_buf_simple * buf,uint32_t val)96 void net_buf_simple_add_le32(struct net_buf_simple *buf, uint32_t val)
97 {
98 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
99
100 sys_put_le32(val, net_buf_simple_add(buf, sizeof(val)));
101 }
102
net_buf_simple_add_be32(struct net_buf_simple * buf,uint32_t val)103 void net_buf_simple_add_be32(struct net_buf_simple *buf, uint32_t val)
104 {
105 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
106
107 sys_put_be32(val, net_buf_simple_add(buf, sizeof(val)));
108 }
109
net_buf_simple_add_le48(struct net_buf_simple * buf,uint64_t val)110 void net_buf_simple_add_le48(struct net_buf_simple *buf, uint64_t val)
111 {
112 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
113
114 sys_put_le48(val, net_buf_simple_add(buf, 6));
115 }
116
net_buf_simple_add_be48(struct net_buf_simple * buf,uint64_t val)117 void net_buf_simple_add_be48(struct net_buf_simple *buf, uint64_t val)
118 {
119 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
120
121 sys_put_be48(val, net_buf_simple_add(buf, 6));
122 }
123
net_buf_simple_add_le64(struct net_buf_simple * buf,uint64_t val)124 void net_buf_simple_add_le64(struct net_buf_simple *buf, uint64_t val)
125 {
126 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
127
128 sys_put_le64(val, net_buf_simple_add(buf, sizeof(val)));
129 }
130
net_buf_simple_add_be64(struct net_buf_simple * buf,uint64_t val)131 void net_buf_simple_add_be64(struct net_buf_simple *buf, uint64_t val)
132 {
133 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
134
135 sys_put_be64(val, net_buf_simple_add(buf, sizeof(val)));
136 }
137
net_buf_simple_push(struct net_buf_simple * buf,size_t len)138 void *net_buf_simple_push(struct net_buf_simple *buf, size_t len)
139 {
140 NET_BUF_SIMPLE_DBG("buf %p len %u", buf, len);
141
142 NET_BUF_SIMPLE_ASSERT(net_buf_simple_headroom(buf) >= len);
143
144 buf->data -= len;
145 buf->len += len;
146 return buf->data;
147 }
148
net_buf_simple_push_le16(struct net_buf_simple * buf,uint16_t val)149 void net_buf_simple_push_le16(struct net_buf_simple *buf, uint16_t val)
150 {
151 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
152
153 sys_put_le16(val, net_buf_simple_push(buf, sizeof(val)));
154 }
155
net_buf_simple_push_be16(struct net_buf_simple * buf,uint16_t val)156 void net_buf_simple_push_be16(struct net_buf_simple *buf, uint16_t val)
157 {
158 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
159
160 sys_put_be16(val, net_buf_simple_push(buf, sizeof(val)));
161 }
162
net_buf_simple_push_u8(struct net_buf_simple * buf,uint8_t val)163 void net_buf_simple_push_u8(struct net_buf_simple *buf, uint8_t val)
164 {
165 uint8_t *data = net_buf_simple_push(buf, 1);
166
167 *data = val;
168 }
169
net_buf_simple_push_le24(struct net_buf_simple * buf,uint32_t val)170 void net_buf_simple_push_le24(struct net_buf_simple *buf, uint32_t val)
171 {
172 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
173
174 sys_put_le24(val, net_buf_simple_push(buf, 3));
175 }
176
net_buf_simple_push_be24(struct net_buf_simple * buf,uint32_t val)177 void net_buf_simple_push_be24(struct net_buf_simple *buf, uint32_t val)
178 {
179 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
180
181 sys_put_be24(val, net_buf_simple_push(buf, 3));
182 }
183
net_buf_simple_push_le32(struct net_buf_simple * buf,uint32_t val)184 void net_buf_simple_push_le32(struct net_buf_simple *buf, uint32_t val)
185 {
186 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
187
188 sys_put_le32(val, net_buf_simple_push(buf, sizeof(val)));
189 }
190
net_buf_simple_push_be32(struct net_buf_simple * buf,uint32_t val)191 void net_buf_simple_push_be32(struct net_buf_simple *buf, uint32_t val)
192 {
193 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
194
195 sys_put_be32(val, net_buf_simple_push(buf, sizeof(val)));
196 }
197
net_buf_simple_push_le48(struct net_buf_simple * buf,uint64_t val)198 void net_buf_simple_push_le48(struct net_buf_simple *buf, uint64_t val)
199 {
200 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
201
202 sys_put_le48(val, net_buf_simple_push(buf, 6));
203 }
204
net_buf_simple_push_be48(struct net_buf_simple * buf,uint64_t val)205 void net_buf_simple_push_be48(struct net_buf_simple *buf, uint64_t val)
206 {
207 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
208
209 sys_put_be48(val, net_buf_simple_push(buf, 6));
210 }
211
net_buf_simple_push_le64(struct net_buf_simple * buf,uint64_t val)212 void net_buf_simple_push_le64(struct net_buf_simple *buf, uint64_t val)
213 {
214 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
215
216 sys_put_le64(val, net_buf_simple_push(buf, sizeof(val)));
217 }
218
net_buf_simple_push_be64(struct net_buf_simple * buf,uint64_t val)219 void net_buf_simple_push_be64(struct net_buf_simple *buf, uint64_t val)
220 {
221 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
222
223 sys_put_be64(val, net_buf_simple_push(buf, sizeof(val)));
224 }
225
net_buf_simple_pull(struct net_buf_simple * buf,size_t len)226 void *net_buf_simple_pull(struct net_buf_simple *buf, size_t len)
227 {
228 NET_BUF_SIMPLE_DBG("buf %p len %u", buf, len);
229
230 NET_BUF_SIMPLE_ASSERT(buf->len >= len);
231
232 buf->len -= len;
233 return buf->data += len;
234 }
235
net_buf_simple_pull_mem(struct net_buf_simple * buf,size_t len)236 void *net_buf_simple_pull_mem(struct net_buf_simple *buf, size_t len)
237 {
238 void *data = buf->data;
239
240 NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
241
242 NET_BUF_SIMPLE_ASSERT(buf->len >= len);
243
244 buf->len -= len;
245 buf->data += len;
246
247 return data;
248 }
249
net_buf_simple_pull_u8(struct net_buf_simple * buf)250 uint8_t net_buf_simple_pull_u8(struct net_buf_simple *buf)
251 {
252 uint8_t val = 0U;
253
254 val = buf->data[0];
255 net_buf_simple_pull(buf, 1);
256
257 return val;
258 }
259
net_buf_simple_pull_le16(struct net_buf_simple * buf)260 uint16_t net_buf_simple_pull_le16(struct net_buf_simple *buf)
261 {
262 uint16_t val = 0U;
263
264 val = UNALIGNED_GET((uint16_t *)buf->data);
265 net_buf_simple_pull(buf, sizeof(val));
266
267 return sys_le16_to_cpu(val);
268 }
269
net_buf_simple_pull_be16(struct net_buf_simple * buf)270 uint16_t net_buf_simple_pull_be16(struct net_buf_simple *buf)
271 {
272 uint16_t val = 0U;
273
274 val = UNALIGNED_GET((uint16_t *)buf->data);
275 net_buf_simple_pull(buf, sizeof(val));
276
277 return sys_be16_to_cpu(val);
278 }
279
net_buf_simple_pull_le24(struct net_buf_simple * buf)280 uint32_t net_buf_simple_pull_le24(struct net_buf_simple *buf)
281 {
282 struct uint24 {
283 uint32_t u24:24;
284 } __packed val;
285
286 val = UNALIGNED_GET((struct uint24 *)buf->data);
287 net_buf_simple_pull(buf, sizeof(val));
288
289 return sys_le24_to_cpu(val.u24);
290 }
291
net_buf_simple_pull_be24(struct net_buf_simple * buf)292 uint32_t net_buf_simple_pull_be24(struct net_buf_simple *buf)
293 {
294 struct uint24 {
295 uint32_t u24:24;
296 } __packed val;
297
298 val = UNALIGNED_GET((struct uint24 *)buf->data);
299 net_buf_simple_pull(buf, sizeof(val));
300
301 return sys_be24_to_cpu(val.u24);
302 }
303
net_buf_simple_pull_le32(struct net_buf_simple * buf)304 uint32_t net_buf_simple_pull_le32(struct net_buf_simple *buf)
305 {
306 uint32_t val = 0U;
307
308 val = UNALIGNED_GET((uint32_t *)buf->data);
309 net_buf_simple_pull(buf, sizeof(val));
310
311 return sys_le32_to_cpu(val);
312 }
313
net_buf_simple_pull_be32(struct net_buf_simple * buf)314 uint32_t net_buf_simple_pull_be32(struct net_buf_simple *buf)
315 {
316 uint32_t val = 0U;
317
318 val = UNALIGNED_GET((uint32_t *)buf->data);
319 net_buf_simple_pull(buf, sizeof(val));
320
321 return sys_be32_to_cpu(val);
322 }
323
net_buf_simple_pull_le48(struct net_buf_simple * buf)324 uint64_t net_buf_simple_pull_le48(struct net_buf_simple *buf)
325 {
326 struct uint48 {
327 uint64_t u48:48;
328 } __packed val;
329
330 val = UNALIGNED_GET((struct uint48 *)buf->data);
331 net_buf_simple_pull(buf, sizeof(val));
332
333 return sys_le48_to_cpu(val.u48);
334 }
335
net_buf_simple_pull_be48(struct net_buf_simple * buf)336 uint64_t net_buf_simple_pull_be48(struct net_buf_simple *buf)
337 {
338 struct uint48 {
339 uint64_t u48:48;
340 } __packed val;
341
342 val = UNALIGNED_GET((struct uint48 *)buf->data);
343 net_buf_simple_pull(buf, sizeof(val));
344
345 return sys_be48_to_cpu(val.u48);
346 }
347
net_buf_simple_pull_le64(struct net_buf_simple * buf)348 uint64_t net_buf_simple_pull_le64(struct net_buf_simple *buf)
349 {
350 uint64_t val;
351
352 val = UNALIGNED_GET((uint64_t *)buf->data);
353 net_buf_simple_pull(buf, sizeof(val));
354
355 return sys_le64_to_cpu(val);
356 }
357
net_buf_simple_pull_be64(struct net_buf_simple * buf)358 uint64_t net_buf_simple_pull_be64(struct net_buf_simple *buf)
359 {
360 uint64_t val;
361
362 val = UNALIGNED_GET((uint64_t *)buf->data);
363 net_buf_simple_pull(buf, sizeof(val));
364
365 return sys_be64_to_cpu(val);
366 }
367
net_buf_simple_headroom(struct net_buf_simple * buf)368 size_t net_buf_simple_headroom(struct net_buf_simple *buf)
369 {
370 return buf->data - buf->__buf;
371 }
372
net_buf_simple_tailroom(struct net_buf_simple * buf)373 size_t net_buf_simple_tailroom(struct net_buf_simple *buf)
374 {
375 return buf->size - net_buf_simple_headroom(buf) - buf->len;
376 }
377
net_buf_reset(struct net_buf * buf)378 void net_buf_reset(struct net_buf *buf)
379 {
380 NET_BUF_ASSERT(buf->flags == 0);
381 NET_BUF_ASSERT(buf->frags == NULL);
382
383 net_buf_simple_reset(&buf->b);
384 }
385
net_buf_simple_init_with_data(struct net_buf_simple * buf,void * data,size_t size)386 void net_buf_simple_init_with_data(struct net_buf_simple *buf,
387 void *data, size_t size)
388 {
389 buf->__buf = data;
390 buf->data = data;
391 buf->size = size;
392 buf->len = size;
393 }
394
net_buf_simple_reserve(struct net_buf_simple * buf,size_t reserve)395 void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve)
396 {
397 NET_BUF_ASSERT(buf);
398 NET_BUF_ASSERT(buf->len == 0U);
399 NET_BUF_DBG("buf %p reserve %zu", buf, reserve);
400
401 buf->data = buf->__buf + reserve;
402 }
403
net_buf_slist_put(sys_slist_t * list,struct net_buf * buf)404 void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
405 {
406 struct net_buf *tail = NULL;
407
408 NET_BUF_ASSERT(list);
409 NET_BUF_ASSERT(buf);
410
411 for (tail = buf; tail->frags; tail = tail->frags) {
412 tail->flags |= NET_BUF_FRAGS;
413 }
414
415 bt_mesh_list_lock();
416 sys_slist_append_list(list, &buf->node, &tail->node);
417 bt_mesh_list_unlock();
418 }
419
net_buf_slist_get(sys_slist_t * list)420 struct net_buf *net_buf_slist_get(sys_slist_t *list)
421 {
422 struct net_buf *buf = NULL, *frag = NULL;
423
424 NET_BUF_ASSERT(list);
425
426 bt_mesh_list_lock();
427 buf = (void *)sys_slist_get(list);
428 bt_mesh_list_unlock();
429
430 if (!buf) {
431 return NULL;
432 }
433
434 /* Get any fragments belonging to this buffer */
435 for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
436 bt_mesh_list_lock();
437 frag->frags = (void *)sys_slist_get(list);
438 bt_mesh_list_unlock();
439
440 NET_BUF_ASSERT(frag->frags);
441
442 /* The fragments flag is only for list-internal usage */
443 frag->flags &= ~NET_BUF_FRAGS;
444 }
445
446 /* Mark the end of the fragment list */
447 frag->frags = NULL;
448
449 return buf;
450 }
451
net_buf_ref(struct net_buf * buf)452 struct net_buf *net_buf_ref(struct net_buf *buf)
453 {
454 NET_BUF_ASSERT(buf);
455
456 NET_BUF_DBG("buf %p (old) ref %u pool %p", buf, buf->ref, buf->pool);
457
458 buf->ref++;
459 return buf;
460 }
461
462 #if defined(CONFIG_BLE_MESH_NET_BUF_LOG)
net_buf_unref_debug(struct net_buf * buf,const char * func,int line)463 void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
464 #else
465 void net_buf_unref(struct net_buf *buf)
466 #endif
467 {
468 NET_BUF_ASSERT(buf);
469
470 while (buf) {
471 struct net_buf *frags = buf->frags;
472 struct net_buf_pool *pool = NULL;
473
474 #if defined(CONFIG_BLE_MESH_NET_BUF_LOG)
475 if (!buf->ref) {
476 NET_BUF_ERR("%s():%d: buf %p double free", func, line,
477 buf);
478 return;
479 }
480 #endif
481 NET_BUF_DBG("buf %p ref %u pool %p frags %p", buf, buf->ref,
482 buf->pool, buf->frags);
483
484 /* Changed by Espressif. Add !buf->ref to avoid minus 0 */
485 if (!buf->ref || --buf->ref > 0) {
486 return;
487 }
488
489 buf->frags = NULL;
490
491 pool = buf->pool;
492
493 pool->uninit_count++;
494 #if defined(CONFIG_BLE_MESH_NET_BUF_POOL_USAGE)
495 pool->avail_count++;
496 NET_BUF_DBG("Unref, pool %p, avail_count %d, uninit_count %d",
497 pool, pool->avail_count, pool->uninit_count);
498 NET_BUF_ASSERT(pool->avail_count <= pool->buf_count);
499 #endif
500
501 if (pool->destroy) {
502 pool->destroy(buf);
503 }
504
505 buf = frags;
506 }
507 }
508
fixed_data_alloc(struct net_buf * buf,size_t * size,int32_t timeout)509 static uint8_t *fixed_data_alloc(struct net_buf *buf, size_t *size, int32_t timeout)
510 {
511 struct net_buf_pool *pool = buf->pool;
512 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
513
514 *size = MIN(fixed->data_size, *size);
515
516 return fixed->data_pool + fixed->data_size * net_buf_id(buf);
517 }
518
fixed_data_unref(struct net_buf * buf,uint8_t * data)519 static void fixed_data_unref(struct net_buf *buf, uint8_t *data)
520 {
521 /* Nothing needed for fixed-size data pools */
522 }
523
524 const struct net_buf_data_cb net_buf_fixed_cb = {
525 .alloc = fixed_data_alloc,
526 .unref = fixed_data_unref,
527 };
528
data_alloc(struct net_buf * buf,size_t * size,int32_t timeout)529 static uint8_t *data_alloc(struct net_buf *buf, size_t *size, int32_t timeout)
530 {
531 struct net_buf_pool *pool = buf->pool;
532
533 return pool->alloc->cb->alloc(buf, size, timeout);
534 }
535
536 #if defined(CONFIG_BLE_MESH_NET_BUF_LOG)
net_buf_alloc_len_debug(struct net_buf_pool * pool,size_t size,int32_t timeout,const char * func,int line)537 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
538 int32_t timeout, const char *func, int line)
539 #else
540 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
541 int32_t timeout)
542 #endif
543 {
544 struct net_buf *buf = NULL;
545 int i;
546
547 NET_BUF_ASSERT(pool);
548
549 NET_BUF_DBG("Alloc, pool %p, uninit_count %d, buf_count %d",
550 pool, pool->uninit_count, pool->buf_count);
551
552 /* We need to lock interrupts temporarily to prevent race conditions
553 * when accessing pool->uninit_count.
554 */
555 bt_mesh_buf_lock();
556
557 /* If there are uninitialized buffers we're guaranteed to succeed
558 * with the allocation one way or another.
559 */
560 if (pool->uninit_count) {
561 /* Changed by Espressif. Use buf when buf->ref is 0 */
562 for (i = pool->buf_count; i > 0; i--) {
563 buf = pool_get_uninit(pool, i);
564 if (!buf->ref) {
565 bt_mesh_buf_unlock();
566 goto success;
567 }
568 }
569 }
570
571 bt_mesh_buf_unlock();
572
573 NET_BUF_ERR("Out of free buffer, pool %p", pool);
574 return NULL;
575
576 success:
577 NET_BUF_DBG("allocated buf %p", buf);
578
579 if (size) {
580 buf->__buf = data_alloc(buf, &size, timeout);
581 if (!buf->__buf) {
582 NET_BUF_ERR("Out of data, buf %p", buf);
583 return NULL;
584 }
585 } else {
586 NET_BUF_WARN("Zero data size, buf %p", buf);
587 buf->__buf = NULL;
588 }
589
590 buf->ref = 1;
591 buf->flags = 0;
592 buf->frags = NULL;
593 buf->size = size;
594 net_buf_reset(buf);
595
596 pool->uninit_count--;
597 #if defined(CONFIG_BLE_MESH_NET_BUF_POOL_USAGE)
598 pool->avail_count--;
599 NET_BUF_ASSERT(pool->avail_count >= 0);
600 #endif
601
602 return buf;
603 }
604
605 #if defined(CONFIG_BLE_MESH_NET_BUF_LOG)
net_buf_alloc_fixed_debug(struct net_buf_pool * pool,int32_t timeout,const char * func,int line)606 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
607 int32_t timeout, const char *func,
608 int line)
609 {
610 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
611
612 return net_buf_alloc_len_debug(pool, fixed->data_size, timeout, func, line);
613 }
614 #else
net_buf_alloc_fixed(struct net_buf_pool * pool,int32_t timeout)615 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, int32_t timeout)
616 {
617 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
618
619 return net_buf_alloc_len(pool, fixed->data_size, timeout);
620 }
621 #endif
622
net_buf_frag_last(struct net_buf * buf)623 struct net_buf *net_buf_frag_last(struct net_buf *buf)
624 {
625 NET_BUF_ASSERT(buf);
626
627 while (buf->frags) {
628 buf = buf->frags;
629 }
630
631 return buf;
632 }
633
net_buf_frag_insert(struct net_buf * parent,struct net_buf * frag)634 void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
635 {
636 NET_BUF_ASSERT(parent);
637 NET_BUF_ASSERT(frag);
638
639 if (parent->frags) {
640 net_buf_frag_last(frag)->frags = parent->frags;
641 }
642 /* Take ownership of the fragment reference */
643 parent->frags = frag;
644 }
645
net_buf_frag_add(struct net_buf * head,struct net_buf * frag)646 struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
647 {
648 NET_BUF_ASSERT(frag);
649
650 if (!head) {
651 return net_buf_ref(frag);
652 }
653
654 net_buf_frag_insert(net_buf_frag_last(head), frag);
655
656 return head;
657 }
658
659 #if defined(CONFIG_BLE_MESH_NET_BUF_LOG)
net_buf_frag_del_debug(struct net_buf * parent,struct net_buf * frag,const char * func,int line)660 struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
661 struct net_buf *frag,
662 const char *func, int line)
663 #else
664 struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
665 #endif
666 {
667 struct net_buf *next_frag = NULL;
668
669 NET_BUF_ASSERT(frag);
670
671 if (parent) {
672 NET_BUF_ASSERT(parent->frags);
673 NET_BUF_ASSERT(parent->frags == frag);
674 parent->frags = frag->frags;
675 }
676
677 next_frag = frag->frags;
678
679 frag->frags = NULL;
680
681 #if defined(CONFIG_BLE_MESH_NET_BUF_LOG)
682 net_buf_unref_debug(frag, func, line);
683 #else
684 net_buf_unref(frag);
685 #endif
686
687 return next_frag;
688 }
689
net_buf_linearize(void * dst,size_t dst_len,struct net_buf * src,size_t offset,size_t len)690 size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
691 size_t offset, size_t len)
692 {
693 struct net_buf *frag = NULL;
694 size_t to_copy = 0U;
695 size_t copied = 0U;
696
697 len = MIN(len, dst_len);
698
699 frag = src;
700
701 /* find the right fragment to start copying from */
702 while (frag && offset >= frag->len) {
703 offset -= frag->len;
704 frag = frag->frags;
705 }
706
707 /* traverse the fragment chain until len bytes are copied */
708 copied = 0;
709 while (frag && len > 0) {
710 to_copy = MIN(len, frag->len - offset);
711 memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
712
713 copied += to_copy;
714
715 /* to_copy is always <= len */
716 len -= to_copy;
717 frag = frag->frags;
718
719 /* after the first iteration, this value will be 0 */
720 offset = 0;
721 }
722
723 return copied;
724 }
725
726 /* This helper routine will append multiple bytes, if there is no place for
727 * the data in current fragment then create new fragment and add it to
728 * the buffer. It assumes that the buffer has at least one fragment.
729 */
net_buf_append_bytes(struct net_buf * buf,size_t len,const void * value,int32_t timeout,net_buf_allocator_cb allocate_cb,void * user_data)730 size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
731 const void *value, int32_t timeout,
732 net_buf_allocator_cb allocate_cb, void *user_data)
733 {
734 struct net_buf *frag = net_buf_frag_last(buf);
735 size_t added_len = 0U;
736 const uint8_t *value8 = value;
737
738 do {
739 uint16_t count = MIN(len, net_buf_tailroom(frag));
740
741 net_buf_add_mem(frag, value8, count);
742 len -= count;
743 added_len += count;
744 value8 += count;
745
746 if (len == 0) {
747 return added_len;
748 }
749
750 frag = allocate_cb(timeout, user_data);
751 if (!frag) {
752 return added_len;
753 }
754
755 net_buf_frag_add(buf, frag);
756 } while (1);
757
758 /* Unreachable */
759 return 0;
760 }
761