1 /* main.c - Application main entry point */
2 
3 /*
4  * Copyright (c) 2015 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <zephyr/types.h>
10 #include <stddef.h>
11 #include <string.h>
12 #include <zephyr/sys/printk.h>
13 
14 #include <zephyr/net_buf.h>
15 
16 #include <zephyr/ztest.h>
17 
18 #define TEST_TIMEOUT K_SECONDS(1)
19 
20 #define USER_DATA_HEAP	4
21 #define USER_DATA_FIXED	0
22 #define USER_DATA_VAR	63
23 #define FIXED_BUFFER_SIZE 128
24 
25 struct bt_data {
26 	void *hci_sync;
27 
28 	union {
29 		uint16_t hci_opcode;
30 		uint16_t acl_handle;
31 	};
32 
33 	uint8_t type;
34 };
35 
36 struct in6_addr {
37 	union {
38 		uint8_t u6_addr8[16];
39 		uint16_t u6_addr16[8];          /* In big endian */
40 		uint32_t u6_addr32[4];          /* In big endian */
41 	} in6_u;
42 #define s6_addr         in6_u.u6_addr8
43 #define s6_addr16       in6_u.u6_addr16
44 #define s6_addr32       in6_u.u6_addr32
45 };
46 
47 struct ipv6_hdr {
48 	uint8_t vtc;
49 	uint8_t tcflow;
50 	uint16_t flow;
51 	uint8_t len[2];
52 	uint8_t nexthdr;
53 	uint8_t hop_limit;
54 	struct in6_addr src;
55 	struct in6_addr dst;
56 } __attribute__((__packed__));
57 
58 struct udp_hdr {
59 	uint16_t src_port;
60 	uint16_t dst_port;
61 	uint16_t len;
62 	uint16_t chksum;
63 } __attribute__((__packed__));
64 
65 static int destroy_called;
66 
67 static void buf_destroy(struct net_buf *buf);
68 static void fixed_destroy(struct net_buf *buf);
69 static void var_destroy(struct net_buf *buf);
70 static void var_destroy_aligned(struct net_buf *buf);
71 static void var_destroy_aligned_small(struct net_buf *buf);
72 
73 #define VAR_POOL_ALIGN 8
74 #define VAR_POOL_ALIGN_SMALL 4
75 #define VAR_POOL_DATA_COUNT 4
76 #define VAR_POOL_DATA_SIZE (VAR_POOL_DATA_COUNT * 64)
77 
78 NET_BUF_POOL_HEAP_DEFINE(bufs_pool, 10, USER_DATA_HEAP, buf_destroy);
79 NET_BUF_POOL_FIXED_DEFINE(fixed_pool, 10, FIXED_BUFFER_SIZE, USER_DATA_FIXED, fixed_destroy);
80 NET_BUF_POOL_VAR_DEFINE(var_pool, 10, 1024, USER_DATA_VAR, var_destroy);
81 
82 /* Two pools, one with aligned to 8 bytes and one with aligned to 4 bytes
83  * buffers. The aligned pools are used to test that the alignment works
84  * correctly.
85  */
86 NET_BUF_POOL_VAR_ALIGN_DEFINE(var_pool_aligned, VAR_POOL_DATA_COUNT,
87 			      VAR_POOL_DATA_SIZE, USER_DATA_VAR,
88 			      var_destroy_aligned, VAR_POOL_ALIGN);
89 NET_BUF_POOL_VAR_ALIGN_DEFINE(var_pool_aligned_small, VAR_POOL_DATA_COUNT,
90 			      VAR_POOL_DATA_SIZE, USER_DATA_VAR,
91 			      var_destroy_aligned_small, VAR_POOL_ALIGN_SMALL);
92 
buf_destroy(struct net_buf * buf)93 static void buf_destroy(struct net_buf *buf)
94 {
95 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
96 
97 	destroy_called++;
98 	zassert_equal(pool, &bufs_pool, "Invalid free pointer in buffer");
99 	net_buf_destroy(buf);
100 }
101 
fixed_destroy(struct net_buf * buf)102 static void fixed_destroy(struct net_buf *buf)
103 {
104 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
105 
106 	destroy_called++;
107 	zassert_equal(pool, &fixed_pool, "Invalid free pointer in buffer");
108 	net_buf_destroy(buf);
109 }
110 
var_destroy(struct net_buf * buf)111 static void var_destroy(struct net_buf *buf)
112 {
113 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
114 
115 	destroy_called++;
116 	zassert_equal(pool, &var_pool, "Invalid free pointer in buffer");
117 	net_buf_destroy(buf);
118 }
119 
var_destroy_aligned(struct net_buf * buf)120 static void var_destroy_aligned(struct net_buf *buf)
121 {
122 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
123 
124 	destroy_called++;
125 	zassert_equal(pool, &var_pool_aligned, "Invalid free pointer in buffer");
126 	net_buf_destroy(buf);
127 }
128 
var_destroy_aligned_small(struct net_buf * buf)129 static void var_destroy_aligned_small(struct net_buf *buf)
130 {
131 	struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
132 
133 	destroy_called++;
134 	zassert_equal(pool, &var_pool_aligned_small, "Invalid free pointer in buffer");
135 	net_buf_destroy(buf);
136 }
137 
138 static const char example_data[] = "0123456789"
139 				   "abcdefghijklmnopqrstuvxyz"
140 				   "!#¤%&/()=?";
141 
ZTEST(net_buf_tests,test_net_buf_1)142 ZTEST(net_buf_tests, test_net_buf_1)
143 {
144 	struct net_buf *bufs[bufs_pool.buf_count];
145 	struct net_buf *buf;
146 	int i;
147 
148 	for (i = 0; i < bufs_pool.buf_count; i++) {
149 		zassert_equal(bufs_pool.buf_count - i, net_buf_get_available(&bufs_pool));
150 		/* Assertion requires that this test runs first */
151 		zassert_equal(i, net_buf_get_max_used(&bufs_pool));
152 		buf = net_buf_alloc_len(&bufs_pool, 74, K_NO_WAIT);
153 		zassert_not_null(buf, "Failed to get buffer");
154 		bufs[i] = buf;
155 	}
156 
157 	for (i = 0; i < ARRAY_SIZE(bufs); i++) {
158 		zassert_equal(i, net_buf_get_available(&bufs_pool));
159 		zassert_equal(ARRAY_SIZE(bufs), net_buf_get_max_used(&bufs_pool));
160 		net_buf_unref(bufs[i]);
161 	}
162 	zassert_equal(bufs_pool.buf_count, net_buf_get_available(&bufs_pool));
163 
164 	zassert_equal(destroy_called, ARRAY_SIZE(bufs),
165 		      "Incorrect destroy callback count");
166 }
167 
ZTEST(net_buf_tests,test_net_buf_2)168 ZTEST(net_buf_tests, test_net_buf_2)
169 {
170 	struct net_buf *frag, *head;
171 	static struct k_fifo fifo;
172 	int i;
173 
174 	head = net_buf_alloc_len(&bufs_pool, 74, K_NO_WAIT);
175 	zassert_not_null(head, "Failed to get fragment list head");
176 
177 	frag = head;
178 	for (i = 0; i < bufs_pool.buf_count - 1; i++) {
179 		frag->frags = net_buf_alloc_len(&bufs_pool, 74, K_NO_WAIT);
180 		zassert_not_null(frag->frags, "Failed to get fragment");
181 		frag = frag->frags;
182 	}
183 
184 	k_fifo_init(&fifo);
185 	k_fifo_put(&fifo, head);
186 	head = k_fifo_get(&fifo, K_NO_WAIT);
187 
188 	destroy_called = 0;
189 	net_buf_unref(head);
190 	zassert_equal(destroy_called, bufs_pool.buf_count,
191 		      "Incorrect fragment destroy callback count");
192 }
193 
test_3_thread(void * arg1,void * arg2,void * arg3)194 static void test_3_thread(void *arg1, void *arg2, void *arg3)
195 {
196 	ARG_UNUSED(arg3);
197 
198 	struct k_fifo *fifo = (struct k_fifo *)arg1;
199 	struct k_sem *sema = (struct k_sem *)arg2;
200 	struct net_buf *buf;
201 
202 	k_sem_give(sema);
203 
204 	buf = k_fifo_get(fifo, TEST_TIMEOUT);
205 	zassert_not_null(buf, "Unable to get buffer");
206 
207 	destroy_called = 0;
208 	net_buf_unref(buf);
209 	zassert_equal(destroy_called, bufs_pool.buf_count,
210 		      "Incorrect destroy callback count");
211 
212 	k_sem_give(sema);
213 }
214 
215 static K_THREAD_STACK_DEFINE(test_3_thread_stack, 1024 + CONFIG_TEST_EXTRA_STACK_SIZE);
216 
ZTEST(net_buf_tests,test_net_buf_3)217 ZTEST(net_buf_tests, test_net_buf_3)
218 {
219 	static struct k_thread test_3_thread_data;
220 	struct net_buf *frag, *head;
221 	static struct k_fifo fifo;
222 	static struct k_sem sema;
223 	int i;
224 
225 	head = net_buf_alloc_len(&bufs_pool, 74, K_NO_WAIT);
226 	zassert_not_null(head, "Failed to get fragment list head");
227 
228 	frag = head;
229 	for (i = 0; i < bufs_pool.buf_count - 1; i++) {
230 		frag->frags = net_buf_alloc_len(&bufs_pool, 74, K_NO_WAIT);
231 		zassert_not_null(frag->frags, "Failed to get fragment");
232 		frag = frag->frags;
233 	}
234 
235 	k_fifo_init(&fifo);
236 	k_sem_init(&sema, 0, UINT_MAX);
237 
238 	k_thread_create(&test_3_thread_data, test_3_thread_stack,
239 			K_THREAD_STACK_SIZEOF(test_3_thread_stack),
240 			test_3_thread, &fifo, &sema, NULL,
241 			K_PRIO_COOP(7), 0, K_NO_WAIT);
242 
243 	zassert_true(k_sem_take(&sema, TEST_TIMEOUT) == 0,
244 		     "Timeout while waiting for semaphore");
245 
246 	k_fifo_put(&fifo, head);
247 
248 	zassert_true(k_sem_take(&sema, TEST_TIMEOUT) == 0,
249 		     "Timeout while waiting for semaphore");
250 }
251 
ZTEST(net_buf_tests,test_net_buf_4)252 ZTEST(net_buf_tests, test_net_buf_4)
253 {
254 	struct net_buf *frags[bufs_pool.buf_count - 1];
255 	struct net_buf *buf, *frag;
256 	int i, removed;
257 
258 	destroy_called = 0;
259 
260 	/* Create a buf that does not have any data to store, it just
261 	 * contains link to fragments.
262 	 */
263 	buf = net_buf_alloc_len(&bufs_pool, 0, K_FOREVER);
264 
265 	zassert_equal(buf->size, 0, "Invalid buffer size");
266 
267 	/* Test the fragments by appending after last fragment */
268 	for (i = 0; i < bufs_pool.buf_count - 2; i++) {
269 		frag = net_buf_alloc_len(&bufs_pool, 74, K_FOREVER);
270 		net_buf_frag_add(buf, frag);
271 		frags[i] = frag;
272 	}
273 
274 	/* And one as a first fragment */
275 	frag = net_buf_alloc_len(&bufs_pool, 74, K_FOREVER);
276 	net_buf_frag_insert(buf, frag);
277 	frags[i] = frag;
278 
279 	frag = buf->frags;
280 
281 	i = 0;
282 	while (frag) {
283 		frag = frag->frags;
284 		i++;
285 	}
286 
287 	zassert_equal(i, bufs_pool.buf_count - 1, "Incorrect fragment count");
288 
289 	/* Remove about half of the fragments and verify count */
290 	i = removed = 0;
291 	frag = buf->frags;
292 	while (frag) {
293 		struct net_buf *next = frag->frags;
294 
295 		if ((i % 2) && next) {
296 			net_buf_frag_del(frag, next);
297 			removed++;
298 		} else {
299 			frag = next;
300 		}
301 		i++;
302 	}
303 
304 	i = 0;
305 	frag = buf->frags;
306 	while (frag) {
307 		frag = frag->frags;
308 		i++;
309 	}
310 
311 	zassert_equal(1 + i + removed, bufs_pool.buf_count,
312 		      "Incorrect removed fragment count");
313 
314 	removed = 0;
315 
316 	while (buf->frags) {
317 		struct net_buf *frag2 = buf->frags;
318 
319 		net_buf_frag_del(buf, frag2);
320 		removed++;
321 	}
322 
323 	zassert_equal(removed, i, "Incorrect removed fragment count");
324 	zassert_equal(destroy_called, bufs_pool.buf_count - 1,
325 		      "Incorrect frag destroy callback count");
326 
327 	/* Add the fragments back and verify that they are properly unref
328 	 * by freeing the top buf.
329 	 */
330 	for (i = 0; i < bufs_pool.buf_count - 4; i++) {
331 		net_buf_frag_add(buf,
332 				 net_buf_alloc_len(&bufs_pool, 74, K_FOREVER));
333 	}
334 
335 	/* Create a fragment list and add it to frags list after first
336 	 * element
337 	 */
338 	frag = net_buf_alloc_len(&bufs_pool, 74, K_FOREVER);
339 	net_buf_frag_add(frag, net_buf_alloc_len(&bufs_pool, 74, K_FOREVER));
340 	net_buf_frag_insert(frag, net_buf_alloc_len(&bufs_pool, 74, K_FOREVER));
341 	net_buf_frag_insert(buf->frags->frags, frag);
342 
343 	i = 0;
344 	frag = buf->frags;
345 	while (frag) {
346 		frag = frag->frags;
347 		i++;
348 	}
349 
350 	zassert_equal(i, bufs_pool.buf_count - 1, "Incorrect fragment count");
351 
352 	destroy_called = 0;
353 
354 	net_buf_unref(buf);
355 
356 	zassert_equal(destroy_called, bufs_pool.buf_count,
357 		      "Incorrect frag destroy callback count");
358 }
359 
ZTEST(net_buf_tests,test_net_buf_big_buf)360 ZTEST(net_buf_tests, test_net_buf_big_buf)
361 {
362 	struct net_buf *big_frags[bufs_pool.buf_count];
363 	struct net_buf *buf, *frag;
364 	struct ipv6_hdr *ipv6;
365 	struct udp_hdr *udp;
366 	int i, len;
367 
368 	destroy_called = 0;
369 
370 	buf = net_buf_alloc_len(&bufs_pool, 0, K_FOREVER);
371 
372 	/* We reserve some space in front of the buffer for protocol
373 	 * headers (IPv6 + UDP). Link layer headers are ignored in
374 	 * this example.
375 	 */
376 #define PROTO_HEADERS (sizeof(struct ipv6_hdr) + sizeof(struct udp_hdr))
377 	frag = net_buf_alloc_len(&bufs_pool, 1280, K_FOREVER);
378 	net_buf_reserve(frag, PROTO_HEADERS);
379 	big_frags[0] = frag;
380 
381 	/* First add some application data */
382 	len = strlen(example_data);
383 	for (i = 0; i < 2; i++) {
384 		zassert_true(net_buf_tailroom(frag) >= len,
385 			     "Allocated buffer is too small");
386 		memcpy(net_buf_add(frag, len), example_data, len);
387 	}
388 
389 	ipv6 = (struct ipv6_hdr *)(frag->data - net_buf_headroom(frag));
390 	udp = (struct udp_hdr *)((uint8_t *)ipv6 + sizeof(*ipv6));
391 
392 	net_buf_frag_add(buf, frag);
393 	net_buf_unref(buf);
394 
395 	zassert_equal(destroy_called, 2, "Incorrect destroy callback count");
396 }
397 
ZTEST(net_buf_tests,test_net_buf_multi_frags)398 ZTEST(net_buf_tests, test_net_buf_multi_frags)
399 {
400 	struct net_buf *frags[bufs_pool.buf_count];
401 	struct net_buf *buf;
402 	struct ipv6_hdr *ipv6;
403 	struct udp_hdr *udp;
404 	int i, len, avail = 0, occupied = 0;
405 
406 	destroy_called = 0;
407 
408 	/* Example of multi fragment scenario with IPv6 */
409 	buf = net_buf_alloc_len(&bufs_pool, 0, K_FOREVER);
410 
411 	/* We reserve some space in front of the buffer for link layer headers.
412 	 * In this example, we use min MTU (81 bytes) defined in rfc 4944 ch. 4
413 	 *
414 	 * Note that with IEEE 802.15.4 we typically cannot have zero-copy
415 	 * in sending side because of the IPv6 header compression.
416 	 */
417 
418 #define LL_HEADERS (127 - 81)
419 	for (i = 0; i < bufs_pool.buf_count - 2; i++) {
420 		frags[i] = net_buf_alloc_len(&bufs_pool, 128, K_FOREVER);
421 		net_buf_reserve(frags[i], LL_HEADERS);
422 		avail += net_buf_tailroom(frags[i]);
423 		net_buf_frag_add(buf, frags[i]);
424 	}
425 
426 	/* Place the IP + UDP header in the first fragment */
427 	frags[i] = net_buf_alloc_len(&bufs_pool, 128, K_FOREVER);
428 	net_buf_reserve(frags[i], LL_HEADERS + (sizeof(struct ipv6_hdr) +
429 						sizeof(struct udp_hdr)));
430 	avail += net_buf_tailroom(frags[i]);
431 	net_buf_frag_insert(buf, frags[i]);
432 
433 	/* First add some application data */
434 	len = strlen(example_data);
435 	for (i = 0; i < bufs_pool.buf_count - 2; i++) {
436 		zassert_true(net_buf_tailroom(frags[i]) >= len,
437 			     "Allocated buffer is too small");
438 		memcpy(net_buf_add(frags[i], len), example_data, len);
439 		occupied += frags[i]->len;
440 	}
441 
442 	ipv6 = (struct ipv6_hdr *)(frags[i]->data - net_buf_headroom(frags[i]));
443 	udp = (struct udp_hdr *)((uint8_t *)ipv6 + sizeof(*ipv6));
444 
445 	net_buf_unref(buf);
446 
447 	zassert_equal(destroy_called, bufs_pool.buf_count,
448 		      "Incorrect frag destroy callback count");
449 }
450 
ZTEST(net_buf_tests,test_net_buf_clone_ref_count)451 ZTEST(net_buf_tests, test_net_buf_clone_ref_count)
452 {
453 	struct net_buf *buf, *clone;
454 
455 	destroy_called = 0;
456 
457 	/* Heap pool supports reference counting */
458 	buf = net_buf_alloc_len(&bufs_pool, 74, K_NO_WAIT);
459 	zassert_not_null(buf, "Failed to get buffer");
460 
461 	clone = net_buf_clone(buf, K_NO_WAIT);
462 	zassert_not_null(clone, "Failed to get clone buffer");
463 	zassert_equal(buf->data, clone->data, "Incorrect clone data pointer");
464 
465 	net_buf_unref(buf);
466 	net_buf_unref(clone);
467 
468 	zassert_equal(destroy_called, 2, "Incorrect destroy callback count");
469 }
470 
ZTEST(net_buf_tests,test_net_buf_clone_no_ref_count)471 ZTEST(net_buf_tests, test_net_buf_clone_no_ref_count)
472 {
473 	struct net_buf *buf, *clone;
474 	const uint8_t data[3] = {0x11, 0x22, 0x33};
475 
476 	destroy_called = 0;
477 
478 	/* Fixed pool does not support reference counting */
479 	buf = net_buf_alloc_len(&fixed_pool, 3, K_NO_WAIT);
480 	zassert_not_null(buf, "Failed to get buffer");
481 	net_buf_add_mem(buf, data, sizeof(data));
482 
483 	clone = net_buf_clone(buf, K_NO_WAIT);
484 	zassert_not_null(clone, "Failed to get clone buffer");
485 	zassert_not_equal(buf->data, clone->data,
486 			  "No reference counting support, different pointers expected");
487 	zassert_mem_equal(clone->data, data, sizeof(data));
488 
489 	net_buf_unref(buf);
490 	net_buf_unref(clone);
491 
492 	zassert_equal(destroy_called, 2, "Incorrect destroy callback count");
493 }
494 
495 /* Regression test: Zero sized buffers must be copy-able, not trigger a NULL pointer dereference */
ZTEST(net_buf_tests,test_net_buf_clone_reference_counted_zero_sized_buffer)496 ZTEST(net_buf_tests, test_net_buf_clone_reference_counted_zero_sized_buffer)
497 {
498 	struct net_buf *buf, *clone;
499 
500 	buf = net_buf_alloc_len(&var_pool, 0, K_NO_WAIT);
501 	zassert_not_null(buf, "Failed to get buffer");
502 
503 	clone = net_buf_clone(buf, K_NO_WAIT);
504 	zassert_not_null(clone, "Failed to clone zero sized buffer");
505 
506 	net_buf_unref(buf);
507 }
508 
ZTEST(net_buf_tests,test_net_buf_clone_user_data)509 ZTEST(net_buf_tests, test_net_buf_clone_user_data)
510 {
511 	struct net_buf *original, *clone;
512 	uint32_t *buf_user_data, *clone_user_data;
513 
514 	/* Requesting size 1 because all we are interested in are the user data */
515 	original = net_buf_alloc_len(&bufs_pool, 1, K_NO_WAIT);
516 	zassert_not_null(original, "Failed to get buffer");
517 	buf_user_data = net_buf_user_data(original);
518 	*buf_user_data = 0xAABBCCDD;
519 
520 	clone = net_buf_clone(original, K_NO_WAIT);
521 	zassert_not_null(clone, "Failed to get clone buffer");
522 	clone_user_data = net_buf_user_data(clone);
523 	zexpect_equal(*clone_user_data, 0xAABBCCDD, "User data copy is invalid");
524 
525 	net_buf_unref(original);
526 	net_buf_unref(clone);
527 }
528 
ZTEST(net_buf_tests,test_net_buf_fixed_pool)529 ZTEST(net_buf_tests, test_net_buf_fixed_pool)
530 {
531 	struct net_buf *buf;
532 
533 	destroy_called = 0;
534 
535 	buf = net_buf_alloc_len(&fixed_pool, 20, K_NO_WAIT);
536 	zassert_not_null(buf, "Failed to get buffer");
537 
538 	/* Verify buffer's size and len - even though we requested less bytes we
539 	 * should get a buffer with the fixed size.
540 	 */
541 	zassert_equal(buf->size, FIXED_BUFFER_SIZE, "Invalid fixed buffer size");
542 	zassert_equal(buf->len, 0, "Invalid fixed buffer length");
543 
544 	net_buf_unref(buf);
545 
546 	zassert_equal(destroy_called, 1, "Incorrect destroy callback count");
547 }
548 
ZTEST(net_buf_tests,test_net_buf_var_pool)549 ZTEST(net_buf_tests, test_net_buf_var_pool)
550 {
551 	struct net_buf *buf1, *buf2, *buf3;
552 
553 	destroy_called = 0;
554 
555 	buf1 = net_buf_alloc_len(&var_pool, 20, K_NO_WAIT);
556 	zassert_not_null(buf1, "Failed to get buffer");
557 
558 	buf2 = net_buf_alloc_len(&var_pool, 200, K_NO_WAIT);
559 	zassert_not_null(buf2, "Failed to get buffer");
560 
561 	buf3 = net_buf_clone(buf2, K_NO_WAIT);
562 	zassert_not_null(buf3, "Failed to clone buffer");
563 	zassert_equal(buf3->data, buf2->data, "Cloned data doesn't match");
564 
565 	net_buf_unref(buf1);
566 	net_buf_unref(buf2);
567 	net_buf_unref(buf3);
568 
569 	zassert_equal(destroy_called, 3, "Incorrect destroy callback count");
570 }
571 
ZTEST(net_buf_tests,test_net_buf_byte_order)572 ZTEST(net_buf_tests, test_net_buf_byte_order)
573 {
574 	struct net_buf *buf;
575 	uint8_t le16[2] = { 0x02, 0x01 };
576 	uint8_t be16[2] = { 0x01, 0x02 };
577 	uint8_t le24[3] = { 0x03, 0x02, 0x01 };
578 	uint8_t be24[3] = { 0x01, 0x02, 0x03 };
579 	uint8_t le32[4] = { 0x04, 0x03, 0x02, 0x01 };
580 	uint8_t be32[4] = { 0x01, 0x02, 0x03, 0x04 };
581 	uint8_t le40[5] = { 0x05, 0x04, 0x03, 0x02, 0x01 };
582 	uint8_t be40[5] = { 0x01, 0x02, 0x03, 0x04, 0x05 };
583 	uint8_t le48[6] = { 0x06, 0x05, 0x04, 0x03, 0x02, 0x01 };
584 	uint8_t be48[6] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06 };
585 	uint8_t le64[8] = { 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01 };
586 	uint8_t be64[8] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 };
587 	uint16_t u16;
588 	uint32_t u32;
589 	uint64_t u64;
590 
591 	buf = net_buf_alloc_len(&fixed_pool, 16, K_FOREVER);
592 	zassert_not_null(buf, "Failed to get buffer");
593 
594 	/* add/pull byte order */
595 	net_buf_add_mem(buf, &le16, sizeof(le16));
596 	net_buf_add_mem(buf, &be16, sizeof(be16));
597 
598 	u16 = net_buf_pull_le16(buf);
599 	zassert_equal(u16, net_buf_pull_be16(buf),
600 		      "Invalid 16 bits byte order");
601 
602 	net_buf_reset(buf);
603 
604 	net_buf_add_le16(buf, u16);
605 	net_buf_add_be16(buf, u16);
606 
607 	zassert_mem_equal(le16, net_buf_pull_mem(buf, sizeof(le16)),
608 			  sizeof(le16), "Invalid 16 bits byte order");
609 	zassert_mem_equal(be16, net_buf_pull_mem(buf, sizeof(be16)),
610 			  sizeof(be16), "Invalid 16 bits byte order");
611 
612 	net_buf_reset(buf);
613 
614 	net_buf_add_mem(buf, &le24, sizeof(le24));
615 	net_buf_add_mem(buf, &be24, sizeof(be24));
616 
617 	u32 = net_buf_pull_le24(buf);
618 	zassert_equal(u32, net_buf_pull_be24(buf),
619 		      "Invalid 24 bits byte order");
620 
621 	net_buf_reset(buf);
622 
623 	net_buf_add_le24(buf, u32);
624 	net_buf_add_be24(buf, u32);
625 
626 	zassert_mem_equal(le24, net_buf_pull_mem(buf, sizeof(le24)),
627 			  sizeof(le24), "Invalid 24 bits byte order");
628 	zassert_mem_equal(be24, net_buf_pull_mem(buf, sizeof(be24)),
629 			  sizeof(be24), "Invalid 24 bits byte order");
630 
631 	net_buf_reset(buf);
632 
633 	net_buf_add_mem(buf, &le32, sizeof(le32));
634 	net_buf_add_mem(buf, &be32, sizeof(be32));
635 
636 	u32 = net_buf_pull_le32(buf);
637 	zassert_equal(u32, net_buf_pull_be32(buf),
638 		      "Invalid 32 bits byte order");
639 
640 	net_buf_reset(buf);
641 
642 	net_buf_add_le32(buf, u32);
643 	net_buf_add_be32(buf, u32);
644 
645 	zassert_mem_equal(le32, net_buf_pull_mem(buf, sizeof(le32)),
646 			  sizeof(le32), "Invalid 32 bits byte order");
647 	zassert_mem_equal(be32, net_buf_pull_mem(buf, sizeof(be32)),
648 			  sizeof(be32), "Invalid 32 bits byte order");
649 
650 	net_buf_reset(buf);
651 
652 	net_buf_add_mem(buf, &le40, sizeof(le40));
653 	net_buf_add_mem(buf, &be40, sizeof(be40));
654 
655 	u64 = net_buf_pull_le40(buf);
656 	zassert_equal(u64, net_buf_pull_be40(buf), "Invalid 40 bits byte order");
657 
658 	net_buf_reset(buf);
659 
660 	net_buf_add_le40(buf, u64);
661 	net_buf_add_be40(buf, u64);
662 
663 	zassert_mem_equal(le40, net_buf_pull_mem(buf, sizeof(le40)), sizeof(le40),
664 			  "Invalid 40 bits byte order");
665 	zassert_mem_equal(be40, net_buf_pull_mem(buf, sizeof(be40)), sizeof(be40),
666 			  "Invalid 40 bits byte order");
667 
668 	net_buf_reset(buf);
669 
670 	net_buf_add_mem(buf, &le48, sizeof(le48));
671 	net_buf_add_mem(buf, &be48, sizeof(be48));
672 
673 	u64 = net_buf_pull_le48(buf);
674 	zassert_equal(u64, net_buf_pull_be48(buf),
675 		      "Invalid 48 bits byte order");
676 
677 	net_buf_reset(buf);
678 
679 	net_buf_add_le48(buf, u64);
680 	net_buf_add_be48(buf, u64);
681 
682 	zassert_mem_equal(le48, net_buf_pull_mem(buf, sizeof(le48)),
683 			  sizeof(le48), "Invalid 48 bits byte order");
684 	zassert_mem_equal(be48, net_buf_pull_mem(buf, sizeof(be48)),
685 			  sizeof(be48), "Invalid 48 bits byte order");
686 
687 	net_buf_reset(buf);
688 
689 	net_buf_add_mem(buf, &le64, sizeof(le64));
690 	net_buf_add_mem(buf, &be64, sizeof(be64));
691 
692 	u64 = net_buf_pull_le64(buf);
693 	zassert_equal(u64, net_buf_pull_be64(buf),
694 		      "Invalid 64 bits byte order");
695 
696 	net_buf_reset(buf);
697 
698 	net_buf_add_le64(buf, u64);
699 	net_buf_add_be64(buf, u64);
700 
701 	zassert_mem_equal(le64, net_buf_pull_mem(buf, sizeof(le64)),
702 			  sizeof(le64), "Invalid 64 bits byte order");
703 	zassert_mem_equal(be64, net_buf_pull_mem(buf, sizeof(be64)),
704 			  sizeof(be64), "Invalid 64 bits byte order");
705 
706 	/* push/remove byte order */
707 	net_buf_reset(buf);
708 	net_buf_reserve(buf, 16);
709 
710 	net_buf_push_mem(buf, &le16, sizeof(le16));
711 	net_buf_push_mem(buf, &be16, sizeof(be16));
712 
713 	u16 = net_buf_remove_le16(buf);
714 	zassert_equal(u16, net_buf_remove_be16(buf),
715 		      "Invalid 16 bits byte order");
716 
717 	net_buf_reset(buf);
718 	net_buf_reserve(buf, 16);
719 
720 	net_buf_push_le16(buf, u16);
721 	net_buf_push_be16(buf, u16);
722 
723 	zassert_mem_equal(le16, net_buf_remove_mem(buf, sizeof(le16)),
724 			  sizeof(le16),  "Invalid 16 bits byte order");
725 	zassert_mem_equal(be16, net_buf_remove_mem(buf, sizeof(be16)),
726 			  sizeof(be16),  "Invalid 16 bits byte order");
727 
728 	net_buf_reset(buf);
729 	net_buf_reserve(buf, 16);
730 
731 	net_buf_push_mem(buf, &le24, sizeof(le24));
732 	net_buf_push_mem(buf, &be24, sizeof(be24));
733 
734 	u32 = net_buf_remove_le24(buf);
735 	zassert_equal(u32, net_buf_remove_be24(buf),
736 		      "Invalid 24 bits byte order");
737 
738 	net_buf_reset(buf);
739 	net_buf_reserve(buf, 16);
740 
741 	net_buf_push_le24(buf, u32);
742 	net_buf_push_be24(buf, u32);
743 
744 	zassert_mem_equal(le24, net_buf_remove_mem(buf, sizeof(le24)),
745 			  sizeof(le24),  "Invalid 24 bits byte order");
746 	zassert_mem_equal(be24, net_buf_remove_mem(buf, sizeof(be24)),
747 			  sizeof(be24),  "Invalid 24 bits byte order");
748 
749 	net_buf_reset(buf);
750 	net_buf_reserve(buf, 16);
751 
752 	net_buf_push_mem(buf, &le32, sizeof(le32));
753 	net_buf_push_mem(buf, &be32, sizeof(be32));
754 
755 	u32 = net_buf_remove_le32(buf);
756 	zassert_equal(u32, net_buf_remove_be32(buf),
757 		      "Invalid 32 bits byte order");
758 
759 	net_buf_reset(buf);
760 	net_buf_reserve(buf, 16);
761 
762 	net_buf_push_le32(buf, u32);
763 	net_buf_push_be32(buf, u32);
764 
765 	zassert_mem_equal(le32, net_buf_remove_mem(buf, sizeof(le32)),
766 			  sizeof(le32), "Invalid 32 bits byte order");
767 	zassert_mem_equal(be32, net_buf_remove_mem(buf, sizeof(be32)),
768 			  sizeof(be32), "Invalid 32 bits byte order");
769 
770 	net_buf_reset(buf);
771 	net_buf_reserve(buf, 16);
772 
773 	net_buf_push_mem(buf, &le40, sizeof(le40));
774 	net_buf_push_mem(buf, &be40, sizeof(be40));
775 
776 	u64 = net_buf_remove_le40(buf);
777 	zassert_equal(u64, net_buf_remove_be40(buf), "Invalid 40 bits byte order");
778 
779 	net_buf_reset(buf);
780 	net_buf_reserve(buf, 16);
781 
782 	net_buf_push_le40(buf, u64);
783 	net_buf_push_be40(buf, u64);
784 
785 	zassert_mem_equal(le40, net_buf_remove_mem(buf, sizeof(le40)), sizeof(le40),
786 			  "Invalid 40 bits byte order");
787 	zassert_mem_equal(be40, net_buf_remove_mem(buf, sizeof(be40)), sizeof(be40),
788 			  "Invalid 40 bits byte order");
789 
790 	net_buf_reset(buf);
791 	net_buf_reserve(buf, 16);
792 
793 	net_buf_push_mem(buf, &le48, sizeof(le48));
794 	net_buf_push_mem(buf, &be48, sizeof(be48));
795 
796 	u64 = net_buf_remove_le48(buf);
797 	zassert_equal(u64, net_buf_remove_be48(buf),
798 		      "Invalid 48 bits byte order");
799 
800 	net_buf_reset(buf);
801 	net_buf_reserve(buf, 16);
802 
803 	net_buf_push_le48(buf, u64);
804 	net_buf_push_be48(buf, u64);
805 
806 	zassert_mem_equal(le48, net_buf_remove_mem(buf, sizeof(le48)),
807 			  sizeof(le48),  "Invalid 48 bits byte order");
808 	zassert_mem_equal(be48, net_buf_remove_mem(buf, sizeof(be48)),
809 			  sizeof(be48),  "Invalid 48 bits byte order");
810 
811 	net_buf_reset(buf);
812 	net_buf_reserve(buf, 16);
813 
814 	net_buf_push_mem(buf, &le64, sizeof(le64));
815 	net_buf_push_mem(buf, &be64, sizeof(be64));
816 
817 	u64 = net_buf_remove_le64(buf);
818 	zassert_equal(u64, net_buf_remove_be64(buf),
819 		      "Invalid 64 bits byte order");
820 
821 	net_buf_reset(buf);
822 	net_buf_reserve(buf, 16);
823 
824 	net_buf_push_le64(buf, u64);
825 	net_buf_push_be64(buf, u64);
826 
827 	zassert_mem_equal(le64, net_buf_remove_mem(buf, sizeof(le64)),
828 			  sizeof(le64), "Invalid 64 bits byte order");
829 	zassert_mem_equal(be64, net_buf_remove_mem(buf, sizeof(be64)),
830 			  sizeof(be64), "Invalid 64 bits byte order");
831 
832 	net_buf_unref(buf);
833 }
834 
ZTEST(net_buf_tests,test_net_buf_user_data)835 ZTEST(net_buf_tests, test_net_buf_user_data)
836 {
837 	struct net_buf *buf;
838 
839 	/* Fixed Pool */
840 	buf = net_buf_alloc(&fixed_pool, K_NO_WAIT);
841 	zassert_not_null(buf, "Failed to get buffer");
842 
843 	zassert_equal(USER_DATA_FIXED, fixed_pool.user_data_size,
844 		"Bad user_data_size");
845 	zassert_equal(USER_DATA_FIXED, buf->user_data_size,
846 		"Bad user_data_size");
847 
848 	net_buf_unref(buf);
849 
850 	/* Heap Pool */
851 	buf = net_buf_alloc_len(&bufs_pool, 20, K_NO_WAIT);
852 	zassert_not_null(buf, "Failed to get buffer");
853 
854 	zassert_equal(USER_DATA_HEAP, bufs_pool.user_data_size,
855 		"Bad user_data_size");
856 	zassert_equal(USER_DATA_HEAP, buf->user_data_size,
857 		"Bad user_data_size");
858 
859 	net_buf_unref(buf);
860 
861 	/* Var Pool */
862 	buf = net_buf_alloc_len(&var_pool, 20, K_NO_WAIT);
863 	zassert_not_null(buf, "Failed to get buffer");
864 
865 	zassert_equal(USER_DATA_VAR, var_pool.user_data_size,
866 		"Bad user_data_size");
867 	zassert_equal(USER_DATA_VAR, buf->user_data_size,
868 		"Bad user_data_size");
869 
870 	net_buf_unref(buf);
871 }
872 
ZTEST(net_buf_tests,test_net_buf_user_data_copy)873 ZTEST(net_buf_tests, test_net_buf_user_data_copy)
874 {
875 	struct net_buf *buf_user_data_small, *buf_user_data_big;
876 	uint32_t *src_user_data, *dst_user_data;
877 
878 	buf_user_data_small = net_buf_alloc_len(&bufs_pool, 1, K_NO_WAIT);
879 	zassert_not_null(buf_user_data_small, "Failed to get buffer");
880 	src_user_data = net_buf_user_data(buf_user_data_small);
881 	*src_user_data = 0xAABBCCDD;
882 
883 	/* Happy case: Size of user data in destination buf is bigger than the source buf one */
884 	buf_user_data_big = net_buf_alloc_len(&var_pool, 1, K_NO_WAIT);
885 	zassert_not_null(buf_user_data_big, "Failed to get buffer");
886 	dst_user_data = net_buf_user_data(buf_user_data_big);
887 	*dst_user_data = 0x11223344;
888 
889 	zassert_ok(net_buf_user_data_copy(buf_user_data_big, buf_user_data_small));
890 	zassert_equal(*src_user_data, 0xAABBCCDD);
891 
892 	/* Error case: User data size of destination buffer is too small */
893 	zassert_not_ok(net_buf_user_data_copy(buf_user_data_small, buf_user_data_big),
894 		       "User data size in destination buffer too small");
895 
896 	net_buf_unref(buf_user_data_big);
897 
898 	/* Corner case: Same buffer used as source and target */
899 	zassert_ok(net_buf_user_data_copy(buf_user_data_small, buf_user_data_small),
900 		   "No-op is tolerated");
901 	zassert_equal(*src_user_data, 0xAABBCCDD, "User data remains the same");
902 
903 	net_buf_unref(buf_user_data_small);
904 }
905 
ZTEST(net_buf_tests,test_net_buf_comparison)906 ZTEST(net_buf_tests, test_net_buf_comparison)
907 {
908 	struct net_buf *buf;
909 	size_t written;
910 	size_t offset;
911 	size_t to_compare;
912 	size_t res;
913 	uint8_t data[FIXED_BUFFER_SIZE * 2];
914 
915 	/* Fill data buffer */
916 	for (int i = 0; i < sizeof(data); ++i) {
917 		data[i] = (uint8_t)i;
918 	}
919 
920 	/* Allocate a single net_buf  */
921 	buf = net_buf_alloc(&fixed_pool, K_NO_WAIT);
922 	zassert_not_null(buf, "Failed to get buffer");
923 
924 	written = net_buf_append_bytes(buf, buf->size, data, K_NO_WAIT, NULL, NULL);
925 	zassert_equal(written, buf->size, "Failed to fill the buffer");
926 	zassert_equal(buf->frags, NULL, "Additional buffer allocated");
927 
928 	/* Compare the whole buffer */
929 	res = net_buf_data_match(buf, 0, data, buf->size);
930 	zassert_equal(res, buf->size, "Whole net_buf comparison failed");
931 
932 	/* Compare from the offset */
933 	offset = buf->size / 2;
934 	to_compare = written - offset;
935 
936 	res = net_buf_data_match(buf, offset, &data[offset], to_compare);
937 	zassert_equal(res, to_compare, "Comparison with offset failed");
938 
939 	/* Write more data (it allocates more buffers) */
940 	written = net_buf_append_bytes(buf, sizeof(data) - written, &data[buf->size], K_NO_WAIT,
941 				       NULL, NULL);
942 	zassert_true(buf->frags, "Failed to allocate an additional net_buf");
943 
944 	/* Compare whole data with buffers' content */
945 	res = net_buf_data_match(buf, 0, data, sizeof(data));
946 	zassert_equal(res, sizeof(data), "Failed to compare data with multiple buffers");
947 
948 	/* Compare data with offset at the edge between two fragments */
949 	offset = buf->size - (buf->size / 2);
950 	res = net_buf_data_match(buf, offset, &data[offset], buf->size);
951 	zassert_equal(res, buf->size, "Failed to compare bytes within two buffers with offset");
952 
953 	/* Compare data with partial matching - change the data in the middle */
954 	data[sizeof(data) / 2] += 1;
955 	res = net_buf_data_match(buf, 0, data, sizeof(data));
956 	zassert_equal(res, sizeof(data) / 2, "Partial matching failed");
957 
958 	/* No buffer - expect 0 matching bytes */
959 	res = net_buf_data_match(NULL, 0, data, sizeof(data));
960 	zassert_equal(res, 0, "Matching without a buffer must fail");
961 
962 	/* No data - expect 0 matching bytes */
963 	res = net_buf_data_match(buf, 0, NULL, sizeof(data));
964 	zassert_equal(res, 0, "Matching without data must fail");
965 
966 	/* Too high offset - expect 0 matching bytes */
967 	res = net_buf_data_match(buf, FIXED_BUFFER_SIZE * 2, data, sizeof(data));
968 	zassert_equal(res, 0, "Matching with too high offset must fail");
969 
970 	/* Try to match more bytes than are in buffers - expect only partial match */
971 	offset = (FIXED_BUFFER_SIZE * 2) - 8;
972 	res = net_buf_data_match(buf, offset, &data[offset], 16);
973 	zassert_equal(res, 8, "Reaching out of bounds must return a partial match");
974 
975 	net_buf_unref(buf);
976 }
977 
ZTEST(net_buf_tests,test_net_buf_fixed_append)978 ZTEST(net_buf_tests, test_net_buf_fixed_append)
979 {
980 	struct net_buf *buf;
981 	uint8_t data[FIXED_BUFFER_SIZE * 2];
982 
983 	/* Fill data buffer */
984 	for (int i = 0; i < sizeof(data); ++i) {
985 		data[i] = (uint8_t)i;
986 	}
987 
988 	/* Fixed Pool */
989 	buf = net_buf_alloc(&fixed_pool, K_NO_WAIT);
990 	zassert_not_null(buf, "Failed to get fixed buffer");
991 	zassert_equal(buf->size, FIXED_BUFFER_SIZE, "Invalid fixed buffer size");
992 
993 	/* For fixed pool appending less bytes than buffer's free space must
994 	 * not add a new fragment
995 	 */
996 	net_buf_append_bytes(buf, buf->size - 8, data, K_NO_WAIT, NULL, NULL);
997 	zassert_equal(buf->len, buf->size - 8, "Invalid buffer len");
998 	zassert_is_null(buf->frags, "Unexpected buffer fragment");
999 
1000 	/* Filling rest of the space should not add an additional buffer */
1001 	net_buf_append_bytes(buf, 8, data, K_NO_WAIT, NULL, NULL);
1002 	zassert_equal(buf->len, buf->size, "Invalid buffer len");
1003 	zassert_is_null(buf->frags, "Unexpected buffer fragment");
1004 
1005 	/* Appending any number of bytes allocates an additional fragment */
1006 	net_buf_append_bytes(buf, 1, data, K_NO_WAIT, NULL, NULL);
1007 	zassert_not_null(buf->frags, "Lack of expected buffer fragment");
1008 	zassert_equal(buf->frags->len, 1, "Expected single byte in the new fragment");
1009 	zassert_equal(buf->frags->size, buf->size, "Different size of the fragment");
1010 
1011 	/* Remove 1-byte buffer */
1012 	net_buf_frag_del(buf, buf->frags);
1013 
1014 	/* Appending size bigger than single buffer's size will allocate multiple fragments */
1015 	net_buf_append_bytes(buf, sizeof(data), data, K_NO_WAIT, NULL, NULL);
1016 	zassert_not_null(buf->frags, "Missing first buffer fragment");
1017 	zassert_not_null(buf->frags->frags, "Missing second buffer fragment");
1018 	zassert_is_null(buf->frags->frags->frags, "Unexpected buffer fragment");
1019 
1020 	net_buf_unref(buf);
1021 }
1022 
ZTEST(net_buf_tests,test_net_buf_linearize)1023 ZTEST(net_buf_tests, test_net_buf_linearize)
1024 {
1025 	struct net_buf *buf, *frag;
1026 	uint8_t linear_buffer[256];
1027 	uint8_t expected_data[256];
1028 	size_t copied;
1029 
1030 	static const char fragment1_data[] = "Hello World! This is fragment 1";
1031 	static const char fragment2_data[] = "Fragment 2 data here";
1032 	static const char fragment3_data[] = "Final fragment data";
1033 	const size_t fragment1_len = sizeof(fragment1_data) - 1;
1034 	const size_t fragment2_len = sizeof(fragment2_data) - 1;
1035 	const size_t fragment3_len = sizeof(fragment3_data) - 1;
1036 	const size_t total_len = fragment1_len + fragment2_len + fragment3_len;
1037 
1038 	destroy_called = 0;
1039 
1040 	/* Create a buf that does not have any data to store, it just
1041 	 * contains link to fragments.
1042 	 */
1043 	buf = net_buf_alloc_len(&bufs_pool, 0, K_FOREVER);
1044 	zassert_not_null(buf, "Failed to get buffer");
1045 
1046 	/* Add first fragment with some data */
1047 	frag = net_buf_alloc_len(&bufs_pool, 50, K_FOREVER);
1048 	zassert_not_null(frag, "Failed to get fragment");
1049 	net_buf_add_mem(frag, fragment1_data, fragment1_len);
1050 	net_buf_frag_add(buf, frag);
1051 
1052 	/* Add second fragment with more data */
1053 	frag = net_buf_alloc_len(&bufs_pool, 50, K_FOREVER);
1054 	zassert_not_null(frag, "Failed to get fragment");
1055 	net_buf_add_mem(frag, fragment2_data, fragment2_len);
1056 	net_buf_frag_add(buf, frag);
1057 
1058 	/* Add third fragment */
1059 	frag = net_buf_alloc_len(&bufs_pool, 50, K_FOREVER);
1060 	zassert_not_null(frag, "Failed to get fragment");
1061 	net_buf_add_mem(frag, fragment3_data, fragment3_len);
1062 	net_buf_frag_add(buf, frag);
1063 
1064 	/* Prepare expected data (all fragments concatenated) */
1065 	memset(expected_data, 0, sizeof(expected_data));
1066 	memcpy(expected_data, fragment1_data, fragment1_len);
1067 	memcpy(expected_data + fragment1_len, fragment2_data, fragment2_len);
1068 	memcpy(expected_data + fragment1_len + fragment2_len, fragment3_data, fragment3_len);
1069 
1070 	/* Test 1: Linearize entire buffer */
1071 	memset(linear_buffer, 0, sizeof(linear_buffer));
1072 	copied = net_buf_linearize(linear_buffer, sizeof(linear_buffer), buf->frags, 0, total_len);
1073 	zassert_equal(copied, total_len, "Incorrect number of bytes copied");
1074 	zassert_mem_equal(linear_buffer, expected_data, total_len, "Linearized data doesn't match");
1075 
1076 	/* Test 2: Linearize with offset */
1077 	memset(linear_buffer, 0, sizeof(linear_buffer));
1078 	copied = net_buf_linearize(linear_buffer, sizeof(linear_buffer), buf->frags, 10, 10);
1079 	zassert_equal(copied, 10, "Incorrect number of bytes copied with offset");
1080 	zassert_mem_equal(linear_buffer, expected_data + 10, 10,
1081 			  "Linearized data with offset doesn't match");
1082 
1083 	/* Test 3: Linearize across fragment boundary */
1084 	memset(linear_buffer, 0, sizeof(linear_buffer));
1085 	copied = net_buf_linearize(linear_buffer, sizeof(linear_buffer), buf->frags,
1086 				   fragment1_len - 5, 20);
1087 	zassert_equal(copied, 20, "Incorrect number of bytes copied across boundary");
1088 	zassert_mem_equal(linear_buffer, expected_data + fragment1_len - 5, 20,
1089 			  "Linearized data across boundary doesn't match");
1090 
1091 	/* Test 4: Linearize with destination buffer too small */
1092 	memset(linear_buffer, 0, sizeof(linear_buffer));
1093 	copied = net_buf_linearize(linear_buffer, 10, buf->frags, 0, total_len);
1094 	zassert_equal(copied, 10, "Should copy only up to destination buffer size");
1095 	zassert_mem_equal(linear_buffer, expected_data, 10,
1096 			  "Partial linearized data doesn't match");
1097 
1098 	/* Test 5: Linearize with offset beyond available data */
1099 	memset(linear_buffer, 0, sizeof(linear_buffer));
1100 	copied = net_buf_linearize(linear_buffer, sizeof(linear_buffer), buf->frags, total_len + 10,
1101 				   20);
1102 	zassert_equal(copied, 0, "Should copy 0 bytes when offset is beyond data");
1103 
1104 	/* Test 6: Linearize with len beyond available data */
1105 	memset(linear_buffer, 0, sizeof(linear_buffer));
1106 	copied = net_buf_linearize(linear_buffer, sizeof(linear_buffer), buf->frags, 0,
1107 				   total_len + 10);
1108 	zassert_equal(copied, total_len, "Should copy only available data when len exceeds data");
1109 
1110 	/* Test 7: Linearize with NULL source */
1111 	copied = net_buf_linearize(linear_buffer, sizeof(linear_buffer), NULL, 0, 20);
1112 	zassert_equal(copied, 0, "Should return 0 for NULL source");
1113 
1114 	/* Test 8: Linearize with zero length */
1115 	memset(linear_buffer, 0, sizeof(linear_buffer));
1116 	copied = net_buf_linearize(linear_buffer, sizeof(linear_buffer), buf->frags, 0, 0);
1117 	zassert_equal(copied, 0, "Should copy 0 bytes when len is 0");
1118 
1119 	/* Test 9: Linearize with zero destination length */
1120 	copied = net_buf_linearize(linear_buffer, 0, buf->frags, 0, 20);
1121 	zassert_equal(copied, 0, "Should copy 0 bytes when destination length is 0");
1122 
1123 	net_buf_unref(buf);
1124 	zassert_equal(destroy_called, 4, "Incorrect destroy callback count");
1125 }
1126 
ZTEST(net_buf_tests,test_net_buf_var_pool_aligned)1127 ZTEST(net_buf_tests, test_net_buf_var_pool_aligned)
1128 {
1129 	struct net_buf *buf1, *buf2, *buf3;
1130 
1131 	destroy_called = 0;
1132 
1133 	zassert_equal(var_pool_aligned.alloc->alignment, VAR_POOL_ALIGN,
1134 		      "Expected %d-byte alignment for variable pool",
1135 		      VAR_POOL_ALIGN);
1136 
1137 	buf1 = net_buf_alloc_len(&var_pool_aligned, 20, K_NO_WAIT);
1138 	zassert_not_null(buf1, "Failed to get buffer");
1139 
1140 	zassert_true(IS_ALIGNED((uintptr_t)buf1->data, VAR_POOL_ALIGN),
1141 		     "Buffer data pointer is not aligned to %d bytes",
1142 		     VAR_POOL_ALIGN);
1143 
1144 	buf2 = net_buf_alloc_len(&var_pool_aligned_small, 29, K_NO_WAIT);
1145 	zassert_not_null(buf2, "Failed to get buffer");
1146 
1147 	zassert_true(IS_ALIGNED((uintptr_t)buf2->data, VAR_POOL_ALIGN_SMALL),
1148 		     "Buffer data pointer is not aligned to %d bytes",
1149 		     VAR_POOL_ALIGN_SMALL);
1150 
1151 	buf3 = net_buf_alloc_len(&var_pool_aligned, VAR_POOL_ALIGN_SMALL, K_NO_WAIT);
1152 	zassert_is_null(buf3,
1153 			"Managed to get buffer even if alignment %d is larger than size %d",
1154 			VAR_POOL_ALIGN, VAR_POOL_ALIGN_SMALL);
1155 
1156 	buf3 = net_buf_alloc_len(&var_pool_aligned, VAR_POOL_ALIGN, K_NO_WAIT);
1157 	zassert_not_null(buf3, "Failed to get buffer");
1158 
1159 	net_buf_unref(buf1);
1160 	net_buf_unref(buf2);
1161 	net_buf_unref(buf3);
1162 
1163 	zassert_equal(destroy_called, 3, "Incorrect destroy callback count");
1164 }
1165 
1166 ZTEST_SUITE(net_buf_tests, NULL, NULL, NULL, NULL, NULL);
1167