1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest_assert.h>
9 #include <zephyr/types.h>
10 #include <stddef.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <zephyr/net/net_pkt.h>
14 #include <zephyr/net/net_if.h>
15 #include <zephyr/net/net_ip.h>
16 #include <zephyr/net/ethernet.h>
17 #include <zephyr/random/random.h>
18 
19 #include <zephyr/ztest.h>
20 
21 static uint8_t mac_addr[sizeof(struct net_eth_addr)];
22 static struct net_if *eth_if;
23 static uint8_t small_buffer[512];
24 
25 /************************\
26  * FAKE ETHERNET DEVICE *
27 \************************/
28 
fake_dev_iface_init(struct net_if * iface)29 static void fake_dev_iface_init(struct net_if *iface)
30 {
31 	if (mac_addr[2] == 0U) {
32 		/* 00-00-5E-00-53-xx Documentation RFC 7042 */
33 		mac_addr[0] = 0x00;
34 		mac_addr[1] = 0x00;
35 		mac_addr[2] = 0x5E;
36 		mac_addr[3] = 0x00;
37 		mac_addr[4] = 0x53;
38 		mac_addr[5] = sys_rand8_get();
39 	}
40 
41 	net_if_set_link_addr(iface, mac_addr, 6, NET_LINK_ETHERNET);
42 
43 	eth_if = iface;
44 }
45 
fake_dev_send(const struct device * dev,struct net_pkt * pkt)46 static int fake_dev_send(const struct device *dev, struct net_pkt *pkt)
47 {
48 	return 0;
49 }
50 
fake_dev_init(const struct device * dev)51 int fake_dev_init(const struct device *dev)
52 {
53 	ARG_UNUSED(dev);
54 
55 	return 0;
56 }
57 
58 #if defined(CONFIG_NET_L2_ETHERNET)
59 static const struct ethernet_api fake_dev_api = {
60 	.iface_api.init = fake_dev_iface_init,
61 	.send = fake_dev_send,
62 };
63 
64 #define _ETH_L2_LAYER ETHERNET_L2
65 #define _ETH_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(ETHERNET_L2)
66 #define L2_HDR_SIZE sizeof(struct net_eth_hdr)
67 #else
68 static const struct dummy_api fake_dev_api = {
69 	.iface_api.init = fake_dev_iface_init,
70 	.send = fake_dev_send,
71 };
72 
73 #define _ETH_L2_LAYER DUMMY_L2
74 #define _ETH_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(DUMMY_L2)
75 #define L2_HDR_SIZE 0
76 #endif
77 
78 NET_DEVICE_INIT(fake_dev, "fake_dev",
79 		fake_dev_init, NULL, NULL, NULL,
80 		CONFIG_KERNEL_INIT_PRIORITY_DEFAULT,
81 		&fake_dev_api, _ETH_L2_LAYER, _ETH_L2_CTX_TYPE,
82 		NET_ETH_MTU);
83 
84 /*********************\
85  * UTILITY FUNCTIONS *
86 \*********************/
87 
pkt_is_of_size(struct net_pkt * pkt,size_t size)88 static bool pkt_is_of_size(struct net_pkt *pkt, size_t size)
89 {
90 	return (net_pkt_available_buffer(pkt) == size);
91 }
92 
pkt_print_cursor(struct net_pkt * pkt)93 static void pkt_print_cursor(struct net_pkt *pkt)
94 {
95 	if (!pkt || !pkt->cursor.buf || !pkt->cursor.pos) {
96 		printk("Unknown position\n");
97 	} else {
98 		printk("Position %zu (%p) in net_buf %p (data %p)\n",
99 		       pkt->cursor.pos - pkt->cursor.buf->data,
100 		       pkt->cursor.pos, pkt->cursor.buf,
101 		       pkt->cursor.buf->data);
102 	}
103 }
104 
105 
106 /*****************************\
107  * HOW TO ALLOCATE - 2 TESTS *
108 \*****************************/
ZTEST(net_pkt_test_suite,test_net_pkt_allocate_wo_buffer)109 ZTEST(net_pkt_test_suite, test_net_pkt_allocate_wo_buffer)
110 {
111 	struct net_pkt *pkt;
112 
113 	/* How to allocate a packet, with no buffer */
114 	pkt = net_pkt_alloc(K_NO_WAIT);
115 	zassert_true(pkt != NULL, "Pkt not allocated");
116 
117 	/* Freeing the packet */
118 	net_pkt_unref(pkt);
119 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
120 		     "Pkt not properly unreferenced");
121 
122 	/* Note that, if you already know the iface to which the packet
123 	 * belongs to, you will be able to use net_pkt_alloc_on_iface().
124 	 */
125 	pkt = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
126 	zassert_true(pkt != NULL, "Pkt not allocated");
127 
128 	net_pkt_unref(pkt);
129 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
130 		     "Pkt not properly unreferenced");
131 }
132 
ZTEST(net_pkt_test_suite,test_net_pkt_allocate_with_buffer)133 ZTEST(net_pkt_test_suite, test_net_pkt_allocate_with_buffer)
134 {
135 	struct net_pkt *pkt;
136 
137 	/* How to allocate a packet, with buffer
138 	 * a) - with a size that will fit MTU, let's say 512 bytes
139 	 * Note: we don't care of the family/protocol for now
140 	 */
141 	pkt = net_pkt_alloc_with_buffer(eth_if, 512,
142 					AF_UNSPEC, 0, K_NO_WAIT);
143 	zassert_true(pkt != NULL, "Pkt not allocated");
144 
145 	/* Did we get the requested size? */
146 	zassert_true(pkt_is_of_size(pkt, 512), "Pkt size is not right");
147 
148 	/* Freeing the packet */
149 	net_pkt_unref(pkt);
150 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
151 		     "Pkt not properly unreferenced");
152 
153 	/*
154 	 * b) - with a size that will not fit MTU, let's say 1800 bytes
155 	 * Note: again we don't care of family/protocol for now.
156 	 */
157 	pkt = net_pkt_alloc_with_buffer(eth_if, 1800,
158 					AF_UNSPEC, 0, K_NO_WAIT);
159 	zassert_true(pkt != NULL, "Pkt not allocated");
160 
161 	zassert_false(pkt_is_of_size(pkt, 1800), "Pkt size is not right");
162 	zassert_true(pkt_is_of_size(pkt, net_if_get_mtu(eth_if) + L2_HDR_SIZE),
163 		     "Pkt size is not right");
164 
165 	/* Freeing the packet */
166 	net_pkt_unref(pkt);
167 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
168 		     "Pkt not properly unreferenced");
169 
170 	/*
171 	 * c) - Now with 512 bytes but on IPv4/UDP
172 	 */
173 	pkt = net_pkt_alloc_with_buffer(eth_if, 512, AF_INET,
174 					IPPROTO_UDP, K_NO_WAIT);
175 	zassert_true(pkt != NULL, "Pkt not allocated");
176 
177 	/* Because 512 + NET_IPV4UDPH_LEN fits MTU, total must be that one */
178 	zassert_true(pkt_is_of_size(pkt, 512 + NET_IPV4UDPH_LEN),
179 		     "Pkt overall size does not match");
180 
181 	/* Freeing the packet */
182 	net_pkt_unref(pkt);
183 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
184 		     "Pkt not properly unreferenced");
185 
186 	/*
187 	 * c) - Now with 1800 bytes but on IPv4/UDP
188 	 */
189 	pkt = net_pkt_alloc_with_buffer(eth_if, 1800, AF_INET,
190 					IPPROTO_UDP, K_NO_WAIT);
191 	zassert_true(pkt != NULL, "Pkt not allocated");
192 
193 	/* Because 1800 + NET_IPV4UDPH_LEN won't fit MTU, payload size
194 	 * should be MTU
195 	 */
196 	zassert_true(net_pkt_available_buffer(pkt) ==
197 		     net_if_get_mtu(eth_if),
198 		     "Payload buf size does not match for ipv4/udp");
199 
200 	/* Freeing the packet */
201 	net_pkt_unref(pkt);
202 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
203 		     "Pkt not properly unreferenced");
204 
205 	/* d) - with a zero payload but AF_INET family
206 	 */
207 	pkt = net_pkt_alloc_with_buffer(eth_if, 0,
208 					AF_INET, 0, K_NO_WAIT);
209 	zassert_true(pkt != NULL, "Pkt not allocated");
210 
211 	/* Did we get the requested size? */
212 	zassert_true(pkt_is_of_size(pkt, NET_IPV4H_LEN),
213 		     "Pkt size is not right");
214 
215 	/* Freeing the packet */
216 	net_pkt_unref(pkt);
217 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
218 		     "Pkt not properly unreferenced");
219 
220 	/* e) - with a zero payload but AF_PACKET family
221 	 */
222 	pkt = net_pkt_alloc_with_buffer(eth_if, 0,
223 					AF_PACKET, 0, K_NO_WAIT);
224 	zassert_true(pkt != NULL, "Pkt not allocated");
225 
226 	/* Did we get the requested size? */
227 	zassert_true(pkt_is_of_size(pkt, 0), "Pkt size is not right");
228 
229 	/* Freeing the packet */
230 	net_pkt_unref(pkt);
231 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
232 		     "Pkt not properly unreferenced");
233 }
234 
235 /********************************\
236  * HOW TO R/W A PACKET -  TESTS *
237 \********************************/
238 
ZTEST(net_pkt_test_suite,test_net_pkt_basics_of_rw)239 ZTEST(net_pkt_test_suite, test_net_pkt_basics_of_rw)
240 {
241 	struct net_pkt_cursor backup;
242 	struct net_pkt *pkt;
243 	uint16_t value16;
244 	int ret;
245 
246 	pkt = net_pkt_alloc_with_buffer(eth_if, 512,
247 					AF_UNSPEC, 0, K_NO_WAIT);
248 	zassert_true(pkt != NULL, "Pkt not allocated");
249 
250 	/* Once newly allocated with buffer,
251 	 * a packet has no data accounted for in its buffer
252 	 */
253 	zassert_true(net_pkt_get_len(pkt) == 0,
254 		     "Pkt initial length should be 0");
255 
256 	/* This is done through net_buf which can distinguish
257 	 * the size of a buffer from the length of the data in it.
258 	 */
259 
260 	/* Let's subsequently write 1 byte, then 2 bytes and 4 bytes
261 	 * We write values made of 0s
262 	 */
263 	ret = net_pkt_write_u8(pkt, 0);
264 	zassert_true(ret == 0, "Pkt write failed");
265 
266 	/* Length should be 1 now */
267 	zassert_true(net_pkt_get_len(pkt) == 1, "Pkt length mismatch");
268 
269 	ret = net_pkt_write_be16(pkt, 0);
270 	zassert_true(ret == 0, "Pkt write failed");
271 
272 	/* Length should be 3 now */
273 	zassert_true(net_pkt_get_len(pkt) == 3, "Pkt length mismatch");
274 
275 	/* Verify that the data is properly written to net_buf */
276 	net_pkt_cursor_backup(pkt, &backup);
277 	net_pkt_cursor_init(pkt);
278 	net_pkt_set_overwrite(pkt, true);
279 	net_pkt_skip(pkt, 1);
280 	net_pkt_read_be16(pkt, &value16);
281 	zassert_equal(value16, 0, "Invalid value %d read, expected %d",
282 		      value16, 0);
283 
284 	/* Then write new value, overwriting the old one */
285 	net_pkt_cursor_init(pkt);
286 	net_pkt_skip(pkt, 1);
287 	ret = net_pkt_write_be16(pkt, 42);
288 	zassert_true(ret == 0, "Pkt write failed");
289 
290 	/* And re-read the value again */
291 	net_pkt_cursor_init(pkt);
292 	net_pkt_skip(pkt, 1);
293 	ret = net_pkt_read_be16(pkt, &value16);
294 	zassert_true(ret == 0, "Pkt read failed");
295 	zassert_equal(value16, 42, "Invalid value %d read, expected %d",
296 		      value16, 42);
297 
298 	net_pkt_set_overwrite(pkt, false);
299 	net_pkt_cursor_restore(pkt, &backup);
300 
301 	ret = net_pkt_write_be32(pkt, 0);
302 	zassert_true(ret == 0, "Pkt write failed");
303 
304 	/* Length should be 7 now */
305 	zassert_true(net_pkt_get_len(pkt) == 7, "Pkt length mismatch");
306 
307 	/* All these writing functions use net_ptk_write(), which works
308 	 * this way:
309 	 */
310 	ret = net_pkt_write(pkt, small_buffer, 9);
311 	zassert_true(ret == 0, "Pkt write failed");
312 
313 	/* Length should be 16 now */
314 	zassert_true(net_pkt_get_len(pkt) == 16, "Pkt length mismatch");
315 
316 	/* Now let's say you want to memset some data */
317 	ret = net_pkt_memset(pkt, 0, 4);
318 	zassert_true(ret == 0, "Pkt memset failed");
319 
320 	/* Length should be 20 now */
321 	zassert_true(net_pkt_get_len(pkt) == 20, "Pkt length mismatch");
322 
323 	/* So memset affects the length exactly as write does */
324 
325 	/* Sometimes you might want to advance in the buffer without caring
326 	 * what's written there since you'll eventually come back for that.
327 	 * net_pkt_skip() is used for it.
328 	 * Note: usually you will not have to use that function a lot yourself.
329 	 */
330 	ret = net_pkt_skip(pkt, 20);
331 	zassert_true(ret == 0, "Pkt skip failed");
332 
333 	/* Length should be 40 now */
334 	zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
335 
336 	/* Again, skip affected the length also, like a write
337 	 * But wait a minute: how to get back then, in order to write at
338 	 * the position we just skipped?
339 	 *
340 	 * So let's introduce the concept of buffer cursor. (which could
341 	 * be named 'cursor' if such name has more relevancy. Basically, each
342 	 * net_pkt embeds such 'cursor': it's like a head of a tape
343 	 * recorder/reader, it holds the current position in the buffer where
344 	 * you can r/w. All operations use and update it below.
345 	 * There is, however, a catch: buffer is described through net_buf
346 	 * and these are like a simple linked-list.
347 	 * Which means that unlike a tape recorder/reader: you are not
348 	 * able to go backward. Only back from starting point and forward.
349 	 * Thus why there is a net_pkt_cursor_init(pkt) which will let you going
350 	 * back from the start. We could hold more info in order to avoid that,
351 	 * but that would mean growing each an every net_buf.
352 	 */
353 	net_pkt_cursor_init(pkt);
354 
355 	/* But isn't it so that if I want to go at the previous position I
356 	 * skipped, I'll use skip again but then won't it affect again the
357 	 * length?
358 	 * Answer is yes. Hopefully there is a mean to avoid that. Basically
359 	 * for data that already "exists" in the buffer (aka: data accounted
360 	 * for in the buffer, through the length) you'll need to set the packet
361 	 * to overwrite: all subsequent operations will then work on existing
362 	 * data and will not affect the length (it won't add more data)
363 	 */
364 	net_pkt_set_overwrite(pkt, true);
365 
366 	zassert_true(net_pkt_is_being_overwritten(pkt),
367 		     "Pkt is not set to overwrite");
368 
369 	/* Ok so previous skipped position was at offset 20 */
370 	ret = net_pkt_skip(pkt, 20);
371 	zassert_true(ret == 0, "Pkt skip failed");
372 
373 	/* Length should _still_ be 40 */
374 	zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
375 
376 	/* And you can write stuff */
377 	ret = net_pkt_write_le32(pkt, 0);
378 	zassert_true(ret == 0, "Pkt write failed");
379 
380 	/* Again, length should _still_ be 40 */
381 	zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
382 
383 	/* Let's memset the rest */
384 	ret = net_pkt_memset(pkt, 0, 16);
385 	zassert_true(ret == 0, "Pkt memset failed");
386 
387 	/* Again, length should _still_ be 40 */
388 	zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
389 
390 	/* We are now back at the end of the existing data in the buffer
391 	 * Since overwrite is still on, we should not be able to r/w
392 	 * anything.
393 	 * This is completely nominal, as being set, overwrite allows r/w only
394 	 * on existing data in the buffer:
395 	 */
396 	ret = net_pkt_write_be32(pkt, 0);
397 	zassert_true(ret != 0, "Pkt write succeeded where it shouldn't have");
398 
399 	/* Logically, in order to be able to add new data in the buffer,
400 	 * overwrite should be disabled:
401 	 */
402 	net_pkt_set_overwrite(pkt, false);
403 
404 	/* But it will fail: */
405 	ret = net_pkt_write_le32(pkt, 0);
406 	zassert_true(ret != 0, "Pkt write succeeded?");
407 
408 	/* Why is that?
409 	 * This is because in case of r/w error: the iterator is invalidated.
410 	 * This a design choice, once you get a r/w error it means your code
411 	 * messed up requesting smaller buffer than you actually needed, or
412 	 * writing too much data than it should have been etc...).
413 	 * So you must drop your packet entirely.
414 	 */
415 
416 	/* Freeing the packet */
417 	net_pkt_unref(pkt);
418 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
419 		     "Pkt not properly unreferenced");
420 }
421 
ZTEST(net_pkt_test_suite,test_net_pkt_advanced_basics)422 ZTEST(net_pkt_test_suite, test_net_pkt_advanced_basics)
423 {
424 	struct net_pkt_cursor backup;
425 	struct net_pkt *pkt;
426 	int ret;
427 
428 	pkt = net_pkt_alloc_with_buffer(eth_if, 512,
429 					AF_INET, IPPROTO_UDP, K_NO_WAIT);
430 	zassert_true(pkt != NULL, "Pkt not allocated");
431 
432 	pkt_print_cursor(pkt);
433 
434 	/* As stated earlier, initializing the cursor, is the way to go
435 	 * back from the start in the buffer (either header or payload then).
436 	 * We also showed that using net_pkt_skip() could be used to move
437 	 * forward in the buffer.
438 	 * But what if you are far in the buffer, you need to go backward,
439 	 * and back again to your previous position?
440 	 * You could certainly do:
441 	 */
442 	ret = net_pkt_write(pkt, small_buffer, 20);
443 	zassert_true(ret == 0, "Pkt write failed");
444 
445 	pkt_print_cursor(pkt);
446 
447 	net_pkt_cursor_init(pkt);
448 
449 	pkt_print_cursor(pkt);
450 
451 	/* ... do something here ... */
452 
453 	/* And finally go back with overwrite/skip: */
454 	net_pkt_set_overwrite(pkt, true);
455 	ret = net_pkt_skip(pkt, 20);
456 	zassert_true(ret == 0, "Pkt skip failed");
457 	net_pkt_set_overwrite(pkt, false);
458 
459 	pkt_print_cursor(pkt);
460 
461 	/* In this example, do not focus on the 20 bytes. It is just for
462 	 * the sake of the example.
463 	 * The other method is backup/restore the packet cursor.
464 	 */
465 	net_pkt_cursor_backup(pkt, &backup);
466 
467 	net_pkt_cursor_init(pkt);
468 
469 	/* ... do something here ... */
470 
471 	/* and restore: */
472 	net_pkt_cursor_restore(pkt, &backup);
473 
474 	pkt_print_cursor(pkt);
475 
476 	/* Another feature, is how you access your data. Earlier was
477 	 * presented basic r/w functions. But sometime you might want to
478 	 * access your data directly through a structure/type etc...
479 	 * Due to the "fragmented" possible nature of your buffer, you
480 	 * need to know if the data you are trying to access is in
481 	 * contiguous area.
482 	 * For this, you'll use:
483 	 */
484 	ret = (int) net_pkt_is_contiguous(pkt, 4);
485 	zassert_true(ret == 1, "Pkt contiguity check failed");
486 
487 	/* If that's successful you should be able to get the actual
488 	 * position in the buffer and cast it to the type you want.
489 	 */
490 	{
491 		uint32_t *val = (uint32_t *)net_pkt_cursor_get_pos(pkt);
492 
493 		*val = 0U;
494 		/* etc... */
495 	}
496 
497 	/* However, to advance your cursor, since none of the usual r/w
498 	 * functions got used: net_pkt_skip() should be called relevantly:
499 	 */
500 	net_pkt_skip(pkt, 4);
501 
502 	/* Freeing the packet */
503 	net_pkt_unref(pkt);
504 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
505 		     "Pkt not properly unreferenced");
506 
507 	/* Obviously one will very rarely use these 2 last low level functions
508 	 * - net_pkt_is_contiguous()
509 	 * - net_pkt_cursor_update()
510 	 *
511 	 * Let's see why next.
512 	 */
513 }
514 
ZTEST(net_pkt_test_suite,test_net_pkt_easier_rw_usage)515 ZTEST(net_pkt_test_suite, test_net_pkt_easier_rw_usage)
516 {
517 	struct net_pkt *pkt;
518 	int ret;
519 
520 	pkt = net_pkt_alloc_with_buffer(eth_if, 512,
521 					AF_INET, IPPROTO_UDP, K_NO_WAIT);
522 	zassert_true(pkt != NULL, "Pkt not allocated");
523 
524 	/* In net core, all goes down in fine to header manipulation.
525 	 * Either it's an IP header, UDP, ICMP, TCP one etc...
526 	 * One would then prefer to access those directly via there
527 	 * descriptors (struct net_udp_hdr, struct net_icmp_hdr, ...)
528 	 * rather than building it byte by bytes etc...
529 	 *
530 	 * As seen earlier, it is possible to cast on current position.
531 	 * However, due to the "fragmented" possible nature of the buffer,
532 	 * it should also be possible to handle the case the data being
533 	 * accessed is scattered on 1+ net_buf.
534 	 *
535 	 * To avoid redoing the contiguity check, cast or copy on failure,
536 	 * a complex type named struct net_pkt_header_access exists.
537 	 * It solves both cases (accessing data contiguous or not), without
538 	 * the need for runtime allocation (all is on stack)
539 	 */
540 	{
541 		NET_PKT_DATA_ACCESS_DEFINE(ip_access, struct net_ipv4_hdr);
542 		struct net_ipv4_hdr *ip_hdr;
543 
544 		ip_hdr = (struct net_ipv4_hdr *)
545 			net_pkt_get_data(pkt, &ip_access);
546 		zassert_not_null(ip_hdr, "Accessor failed");
547 
548 		ip_hdr->tos = 0x00;
549 
550 		ret = net_pkt_set_data(pkt, &ip_access);
551 		zassert_true(ret == 0, "Accessor failed");
552 
553 		zassert_true(net_pkt_get_len(pkt) == NET_IPV4H_LEN,
554 			     "Pkt length mismatch");
555 	}
556 
557 	/* As you can notice: get/set take also care of handling the cursor
558 	 * and updating the packet length relevantly thus why packet length
559 	 * has properly grown.
560 	 */
561 
562 	/* Freeing the packet */
563 	net_pkt_unref(pkt);
564 	zassert_true(atomic_get(&pkt->atomic_ref) == 0,
565 		     "Pkt not properly unreferenced");
566 }
567 
568 uint8_t b5_data[10] = "qrstuvwxyz";
569 struct net_buf b5 = {
570 	.ref   = 1,
571 	.data  = b5_data,
572 	.len   = 0,
573 	.size  = 0,
574 	.__buf  = b5_data,
575 };
576 
577 uint8_t b4_data[4] = "mnop";
578 struct net_buf b4 = {
579 	.frags = &b5,
580 	.ref   = 1,
581 	.data  = b4_data,
582 	.len   = sizeof(b4_data) - 2,
583 	.size  = sizeof(b4_data),
584 	.__buf  = b4_data,
585 };
586 
587 struct net_buf b3 = {
588 	.frags = &b4,
589 	.ref   = 1,
590 	.data  = NULL,
591 	.__buf  = NULL,
592 };
593 
594 uint8_t b2_data[8] = "efghijkl";
595 struct net_buf b2 = {
596 	.frags = &b3,
597 	.ref   = 1,
598 	.data  = b2_data,
599 	.len   = 0,
600 	.size  = sizeof(b2_data),
601 	.__buf  = b2_data,
602 };
603 
604 uint8_t b1_data[4] = "abcd";
605 struct net_buf b1 = {
606 	.frags = &b2,
607 	.ref   = 1,
608 	.data  = b1_data,
609 	.len   = sizeof(b1_data) - 2,
610 	.size  = sizeof(b1_data),
611 	.__buf  = b1_data,
612 };
613 
ZTEST(net_pkt_test_suite,test_net_pkt_copy)614 ZTEST(net_pkt_test_suite, test_net_pkt_copy)
615 {
616 	struct net_pkt *pkt_src;
617 	struct net_pkt *pkt_dst;
618 
619 	pkt_src = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
620 	zassert_true(pkt_src != NULL, "Pkt not allocated");
621 
622 	pkt_print_cursor(pkt_src);
623 
624 	/* Let's append the buffers */
625 	net_pkt_append_buffer(pkt_src, &b1);
626 
627 	net_pkt_set_overwrite(pkt_src, true);
628 
629 	/* There should be some space left */
630 	zassert_true(net_pkt_available_buffer(pkt_src) != 0, "No space left?");
631 	/* Length should be 4 */
632 	zassert_true(net_pkt_get_len(pkt_src) == 4, "Wrong length");
633 
634 	/* Actual space left is 12 (in b1, b2 and b4) */
635 	zassert_true(net_pkt_available_buffer(pkt_src) == 12,
636 		     "Wrong space left?");
637 
638 	pkt_print_cursor(pkt_src);
639 
640 	/* Now let's clone the pkt
641 	 * This will test net_pkt_copy_new() as it uses it for the buffers
642 	 */
643 	pkt_dst = net_pkt_clone(pkt_src, K_NO_WAIT);
644 	zassert_true(pkt_dst != NULL, "Pkt not clone");
645 
646 	/* Cloning does not take into account left space,
647 	 * but only occupied one
648 	 */
649 	zassert_true(net_pkt_available_buffer(pkt_dst) == 0, "Space left");
650 	zassert_true(net_pkt_get_len(pkt_src) == net_pkt_get_len(pkt_dst),
651 		     "Not same amount?");
652 
653 	/* It also did not care to copy the net_buf itself, only the content
654 	 * so, knowing that the base buffer size is bigger than necessary,
655 	 * pkt_dst has only one net_buf
656 	 */
657 	zassert_true(pkt_dst->buffer->frags == NULL, "Not only one buffer?");
658 
659 	/* Freeing the packet */
660 	pkt_src->buffer = NULL;
661 	net_pkt_unref(pkt_src);
662 	zassert_true(atomic_get(&pkt_src->atomic_ref) == 0,
663 		     "Pkt not properly unreferenced");
664 	net_pkt_unref(pkt_dst);
665 	zassert_true(atomic_get(&pkt_dst->atomic_ref) == 0,
666 		     "Pkt not properly unreferenced");
667 }
668 
669 #define PULL_TEST_PKT_DATA_SIZE 600
670 
ZTEST(net_pkt_test_suite,test_net_pkt_pull)671 ZTEST(net_pkt_test_suite, test_net_pkt_pull)
672 {
673 	const int PULL_AMOUNT = 8;
674 	const int LARGE_PULL_AMOUNT = 200;
675 	struct net_pkt *dummy_pkt;
676 	static uint8_t pkt_data[PULL_TEST_PKT_DATA_SIZE];
677 	static uint8_t pkt_data_readback[PULL_TEST_PKT_DATA_SIZE];
678 	size_t len;
679 	int i, ret;
680 
681 	for (i = 0; i < PULL_TEST_PKT_DATA_SIZE; ++i) {
682 		pkt_data[i] = i & 0xff;
683 	}
684 
685 	dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
686 					      PULL_TEST_PKT_DATA_SIZE,
687 					      AF_UNSPEC,
688 					      0,
689 					      K_NO_WAIT);
690 	zassert_true(dummy_pkt != NULL, "Pkt not allocated");
691 
692 	zassert_true(net_pkt_write(dummy_pkt,
693 				   pkt_data,
694 				   PULL_TEST_PKT_DATA_SIZE) == 0,
695 		     "Write packet failed");
696 
697 	net_pkt_cursor_init(dummy_pkt);
698 	net_pkt_pull(dummy_pkt, PULL_AMOUNT);
699 	zassert_equal(net_pkt_get_len(dummy_pkt),
700 		      PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT,
701 		      "Pull failed to set new size");
702 	zassert_true(net_pkt_read(dummy_pkt,
703 				  pkt_data_readback,
704 				  PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT) == 0,
705 		     "Read packet failed");
706 	zassert_mem_equal(pkt_data_readback,
707 			  &pkt_data[PULL_AMOUNT],
708 			  PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT,
709 			  "Packet data changed");
710 
711 	net_pkt_cursor_init(dummy_pkt);
712 	net_pkt_pull(dummy_pkt, LARGE_PULL_AMOUNT);
713 	zassert_equal(net_pkt_get_len(dummy_pkt),
714 		      PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT -
715 		      LARGE_PULL_AMOUNT,
716 		      "Large pull failed to set new size (%d vs %d)",
717 		      net_pkt_get_len(dummy_pkt),
718 		      PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT -
719 		      LARGE_PULL_AMOUNT);
720 
721 	net_pkt_cursor_init(dummy_pkt);
722 	net_pkt_pull(dummy_pkt, net_pkt_get_len(dummy_pkt));
723 	zassert_equal(net_pkt_get_len(dummy_pkt), 0,
724 		      "Full pull failed to set new size (%d)",
725 		      net_pkt_get_len(dummy_pkt));
726 
727 	net_pkt_cursor_init(dummy_pkt);
728 	ret = net_pkt_pull(dummy_pkt, 1);
729 	zassert_equal(ret, -ENOBUFS, "Did not return error");
730 	zassert_equal(net_pkt_get_len(dummy_pkt), 0,
731 		      "Empty pull set new size (%d)",
732 		      net_pkt_get_len(dummy_pkt));
733 
734 	net_pkt_unref(dummy_pkt);
735 
736 	dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
737 					      PULL_TEST_PKT_DATA_SIZE,
738 					      AF_UNSPEC,
739 					      0,
740 					      K_NO_WAIT);
741 	zassert_true(dummy_pkt != NULL, "Pkt not allocated");
742 
743 	zassert_true(net_pkt_write(dummy_pkt,
744 				   pkt_data,
745 				   PULL_TEST_PKT_DATA_SIZE) == 0,
746 		     "Write packet failed");
747 
748 	net_pkt_cursor_init(dummy_pkt);
749 	ret = net_pkt_pull(dummy_pkt, net_pkt_get_len(dummy_pkt) + 1);
750 	zassert_equal(ret, -ENOBUFS, "Did not return error");
751 	zassert_equal(net_pkt_get_len(dummy_pkt), 0,
752 		      "Not empty after full pull (%d)",
753 		      net_pkt_get_len(dummy_pkt));
754 
755 	net_pkt_unref(dummy_pkt);
756 
757 	dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
758 					      PULL_TEST_PKT_DATA_SIZE,
759 					      AF_UNSPEC,
760 					      0,
761 					      K_NO_WAIT);
762 	zassert_true(dummy_pkt != NULL, "Pkt not allocated");
763 
764 	zassert_true(net_pkt_write(dummy_pkt,
765 				   pkt_data,
766 				   PULL_TEST_PKT_DATA_SIZE) == 0,
767 		     "Write packet failed");
768 
769 	net_pkt_cursor_init(dummy_pkt);
770 	len = net_pkt_get_len(dummy_pkt);
771 
772 	for (i = 0; i < len; i++) {
773 		ret = net_pkt_pull(dummy_pkt, 1);
774 		zassert_equal(ret, 0, "Did return error");
775 	}
776 
777 	ret = net_pkt_pull(dummy_pkt, 1);
778 	zassert_equal(ret, -ENOBUFS, "Did not return error");
779 
780 	zassert_equal(dummy_pkt->buffer, NULL, "buffer list not empty");
781 
782 	net_pkt_unref(dummy_pkt);
783 }
784 
ZTEST(net_pkt_test_suite,test_net_pkt_clone)785 ZTEST(net_pkt_test_suite, test_net_pkt_clone)
786 {
787 	uint8_t buf[26] = {"abcdefghijklmnopqrstuvwxyz"};
788 	struct net_pkt *pkt;
789 	struct net_pkt *cloned_pkt;
790 	int ret;
791 
792 	pkt = net_pkt_alloc_with_buffer(eth_if, 64,
793 					AF_UNSPEC, 0, K_NO_WAIT);
794 	zassert_true(pkt != NULL, "Pkt not allocated");
795 
796 	ret = net_pkt_write(pkt, buf, sizeof(buf));
797 	zassert_true(ret == 0, "Pkt write failed");
798 
799 	zassert_true(net_pkt_get_len(pkt) == sizeof(buf),
800 		     "Pkt length mismatch");
801 
802 	net_pkt_cursor_init(pkt);
803 	net_pkt_set_overwrite(pkt, true);
804 	net_pkt_skip(pkt, 6);
805 	zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(pkt),
806 		     "Pkt remaining data mismatch");
807 
808 	net_pkt_lladdr_src(pkt)->addr = pkt->buffer->data;
809 	net_pkt_lladdr_src(pkt)->len = NET_LINK_ADDR_MAX_LENGTH;
810 	net_pkt_lladdr_src(pkt)->type = NET_LINK_ETHERNET;
811 	zassert_mem_equal(net_pkt_lladdr_src(pkt)->addr, buf, NET_LINK_ADDR_MAX_LENGTH);
812 	net_pkt_lladdr_dst(pkt)->addr = net_pkt_cursor_get_pos(pkt);
813 	net_pkt_lladdr_dst(pkt)->len = NET_LINK_ADDR_MAX_LENGTH;
814 	net_pkt_lladdr_dst(pkt)->type = NET_LINK_ETHERNET;
815 	zassert_mem_equal(net_pkt_lladdr_dst(pkt)->addr, &buf[6], NET_LINK_ADDR_MAX_LENGTH);
816 
817 	net_pkt_set_family(pkt, AF_INET6);
818 	net_pkt_set_captured(pkt, true);
819 	net_pkt_set_eof(pkt, true);
820 	net_pkt_set_ptp(pkt, true);
821 	net_pkt_set_tx_timestamping(pkt, true);
822 	net_pkt_set_rx_timestamping(pkt, true);
823 	net_pkt_set_forwarding(pkt, true);
824 
825 	net_pkt_set_l2_bridged(pkt, true);
826 	net_pkt_set_l2_processed(pkt, true);
827 	net_pkt_set_ll_proto_type(pkt, ETH_P_IEEE802154);
828 
829 	net_pkt_set_overwrite(pkt, false);
830 	cloned_pkt = net_pkt_clone(pkt, K_NO_WAIT);
831 	zassert_true(cloned_pkt != NULL, "Pkt not cloned");
832 
833 	zassert_true(net_pkt_get_len(cloned_pkt) == sizeof(buf),
834 		     "Cloned pkt length mismatch");
835 
836 	zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(pkt),
837 		     "Pkt remaining data mismatch");
838 
839 	zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(cloned_pkt),
840 		     "Cloned pkt remaining data mismatch");
841 
842 	zassert_false(net_pkt_is_being_overwritten(cloned_pkt),
843 		     "Cloned pkt overwrite flag not restored");
844 
845 	zassert_false(net_pkt_is_being_overwritten(pkt),
846 		     "Pkt overwrite flag not restored");
847 
848 	zassert_equal(net_pkt_family(cloned_pkt), AF_INET6,
849 		     "Address family value mismatch");
850 
851 	zassert_true(net_pkt_is_captured(cloned_pkt),
852 		     "Cloned pkt captured flag mismatch");
853 
854 	zassert_true(net_pkt_eof(cloned_pkt),
855 		     "Cloned pkt eof flag mismatch");
856 
857 	zassert_true(net_pkt_is_ptp(cloned_pkt),
858 		     "Cloned pkt ptp_pkt flag mismatch");
859 
860 #if CONFIG_NET_PKT_TIMESTAMP
861 	zassert_true(net_pkt_is_tx_timestamping(cloned_pkt),
862 		     "Cloned pkt tx_timestamping flag mismatch");
863 
864 	zassert_true(net_pkt_is_rx_timestamping(cloned_pkt),
865 		     "Cloned pkt rx_timestamping flag mismatch");
866 #endif
867 
868 	zassert_true(net_pkt_forwarding(cloned_pkt),
869 		     "Cloned pkt forwarding flag mismatch");
870 
871 	zassert_true(net_pkt_is_l2_bridged(cloned_pkt),
872 		     "Cloned pkt l2_bridged flag mismatch");
873 
874 	zassert_true(net_pkt_is_l2_processed(cloned_pkt),
875 		     "Cloned pkt l2_processed flag mismatch");
876 
877 	zassert_mem_equal(net_pkt_lladdr_src(cloned_pkt)->addr, buf, NET_LINK_ADDR_MAX_LENGTH);
878 	zassert_true(net_pkt_lladdr_src(cloned_pkt)->addr == cloned_pkt->buffer->data,
879 		     "Cloned pkt ll src addr mismatch");
880 
881 	zassert_mem_equal(net_pkt_lladdr_dst(cloned_pkt)->addr, &buf[6], NET_LINK_ADDR_MAX_LENGTH);
882 	zassert_true(net_pkt_lladdr_dst(cloned_pkt)->addr == net_pkt_cursor_get_pos(cloned_pkt),
883 		     "Cloned pkt ll dst addr mismatch");
884 
885 	zassert_equal(net_pkt_ll_proto_type(cloned_pkt), ETH_P_IEEE802154,
886 		     "Address ll_proto_type value mismatch");
887 
888 	net_pkt_unref(pkt);
889 	net_pkt_unref(cloned_pkt);
890 }
891 
892 NET_BUF_POOL_FIXED_DEFINE(test_net_pkt_headroom_pool, 4, 2, 4, NULL);
893 
ZTEST(net_pkt_test_suite,test_net_pkt_headroom)894 ZTEST(net_pkt_test_suite, test_net_pkt_headroom)
895 {
896 	struct net_pkt *pkt;
897 	struct net_buf *frag1;
898 	struct net_buf *frag2;
899 	struct net_buf *frag3;
900 	struct net_buf *frag4;
901 
902 	/*
903 	 * Create a net_pkt; append net_bufs with reserved bytes (headroom).
904 	 *
905 	 * Layout to be crafted before writing to the net_buf: "HA|HH|HA|AA"
906 	 *  H: Headroom
907 	 *  |: net_buf/fragment delimiter
908 	 *  A: available byte
909 	 */
910 	pkt = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
911 	zassert_true(pkt != NULL, "Pkt not allocated");
912 
913 	/* 1st fragment has 1 byte headroom and one byte available: "HA" */
914 	frag1 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
915 	net_buf_reserve(frag1, 1);
916 	net_pkt_append_buffer(pkt, frag1);
917 	zassert_equal(net_pkt_available_buffer(pkt), 1, "Wrong space left");
918 	zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
919 
920 	/* 2nd fragment affecting neither size nor length: "HH" */
921 	frag2 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
922 	net_buf_reserve(frag2, 2);
923 	net_pkt_append_buffer(pkt, frag2);
924 	zassert_equal(net_pkt_available_buffer(pkt), 1, "Wrong space left");
925 	zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
926 
927 	/* 3rd fragment has 1 byte headroom and one byte available: "HA" */
928 	frag3 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
929 	net_buf_reserve(frag3, 1);
930 	net_pkt_append_buffer(pkt, frag3);
931 	zassert_equal(net_pkt_available_buffer(pkt), 2, "Wrong space left");
932 	zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
933 
934 	/* 4th fragment has no headroom and two available bytes: "AA" */
935 	frag4 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
936 	net_pkt_append_buffer(pkt, frag4);
937 	zassert_equal(net_pkt_available_buffer(pkt), 4, "Wrong space left");
938 	zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
939 
940 	/* Writing net_pkt via cursor, spanning all 4 fragments */
941 	net_pkt_cursor_init(pkt);
942 	zassert_true(net_pkt_write(pkt, "1234", 4) == 0, "Pkt write failed");
943 
944 	/* Expected layout across all four fragments: "H1|HH|H2|34" */
945 	zassert_equal(frag1->size, 2, "Size mismatch");
946 	zassert_equal(frag1->len, 1, "Length mismatch");
947 	zassert_equal(frag2->size, 2, "Size mismatch");
948 	zassert_equal(frag2->len, 0, "Length mismatch");
949 	zassert_equal(frag3->size, 2, "Size mismatch");
950 	zassert_equal(frag3->len, 1, "Length mismatch");
951 	zassert_equal(frag4->size, 2, "Size mismatch");
952 	zassert_equal(frag4->len, 2, "Length mismatch");
953 	net_pkt_cursor_init(pkt);
954 	zassert_true(net_pkt_read(pkt, small_buffer, 4) == 0, "Read failed");
955 	zassert_mem_equal(small_buffer, "1234", 4, "Data mismatch");
956 
957 	/* Making use of the headrooms */
958 	net_buf_push_u8(frag3, 'D');
959 	net_buf_push_u8(frag2, 'C');
960 	net_buf_push_u8(frag2, 'B');
961 	net_buf_push_u8(frag1, 'A');
962 	net_pkt_cursor_init(pkt);
963 	zassert_true(net_pkt_read(pkt, small_buffer, 8) == 0, "Read failed");
964 	zassert_mem_equal(small_buffer, "A1BCD234", 8, "Data mismatch");
965 
966 	net_pkt_unref(pkt);
967 }
968 
969 NET_BUF_POOL_VAR_DEFINE(test_net_pkt_headroom_copy_pool, 2, 128, 4, NULL);
970 
ZTEST(net_pkt_test_suite,test_net_pkt_headroom_copy)971 ZTEST(net_pkt_test_suite, test_net_pkt_headroom_copy)
972 {
973 	struct net_pkt *pkt_src;
974 	struct net_pkt *pkt_dst;
975 	struct net_buf *frag1_dst;
976 	struct net_buf *frag2_dst;
977 	int res;
978 
979 	/* Create et_pkt containing the bytes "0123" */
980 	pkt_src = net_pkt_alloc_with_buffer(eth_if, 4,
981 					AF_UNSPEC, 0, K_NO_WAIT);
982 	zassert_true(pkt_src != NULL, "Pkt not allocated");
983 	res = net_pkt_write(pkt_src, "0123", 4);
984 	zassert_equal(res, 0, "Pkt write failed");
985 
986 	/* Create net_pkt consisting of net_buf fragments with reserved bytes */
987 	pkt_dst = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
988 	zassert_true(pkt_src != NULL, "Pkt not allocated");
989 
990 	frag1_dst = net_buf_alloc_len(&test_net_pkt_headroom_copy_pool, 2,
991 				      K_NO_WAIT);
992 	net_buf_reserve(frag1_dst, 1);
993 	net_pkt_append_buffer(pkt_dst, frag1_dst);
994 	frag2_dst = net_buf_alloc_len(&test_net_pkt_headroom_copy_pool, 4,
995 				      K_NO_WAIT);
996 	net_buf_reserve(frag2_dst, 1);
997 	net_pkt_append_buffer(pkt_dst, frag2_dst);
998 	zassert_equal(net_pkt_available_buffer(pkt_dst), 4, "Wrong space left");
999 	zassert_equal(net_pkt_get_len(pkt_dst), 0, "Length missmatch");
1000 
1001 	/* Copy to net_pkt which contains fragments with reserved bytes */
1002 	net_pkt_cursor_init(pkt_src);
1003 	net_pkt_cursor_init(pkt_dst);
1004 	res = net_pkt_copy(pkt_dst, pkt_src, 4);
1005 	zassert_equal(res, 0, "Pkt copy failed");
1006 	zassert_equal(net_pkt_available_buffer(pkt_dst), 0, "Wrong space left");
1007 	zassert_equal(net_pkt_get_len(pkt_dst), 4, "Length missmatch");
1008 
1009 	net_pkt_cursor_init(pkt_dst);
1010 	zassert_true(net_pkt_read(pkt_dst, small_buffer, 4) == 0,
1011 		     "Pkt read failed");
1012 	zassert_mem_equal(small_buffer, "0123", 4, "Data mismatch");
1013 
1014 	net_pkt_unref(pkt_dst);
1015 	net_pkt_unref(pkt_src);
1016 }
1017 
ZTEST(net_pkt_test_suite,test_net_pkt_get_contiguous_len)1018 ZTEST(net_pkt_test_suite, test_net_pkt_get_contiguous_len)
1019 {
1020 	size_t cont_len;
1021 	int res;
1022 	/* Allocate pkt with 2 fragments */
1023 	struct net_pkt *pkt = net_pkt_rx_alloc_with_buffer(
1024 					   NULL, CONFIG_NET_BUF_DATA_SIZE * 2,
1025 					   AF_UNSPEC, 0, K_NO_WAIT);
1026 
1027 	zassert_not_null(pkt, "Pkt not allocated");
1028 
1029 	net_pkt_cursor_init(pkt);
1030 
1031 	cont_len = net_pkt_get_contiguous_len(pkt);
1032 	zassert_equal(CONFIG_NET_BUF_DATA_SIZE, cont_len,
1033 		      "Expected one complete available net_buf");
1034 
1035 	net_pkt_set_overwrite(pkt, false);
1036 
1037 	/* now write 3 byte into the pkt */
1038 	for (int i = 0; i < 3; ++i) {
1039 		res = net_pkt_write_u8(pkt, 0xAA);
1040 		zassert_equal(0, res, "Write packet failed");
1041 	}
1042 
1043 	cont_len = net_pkt_get_contiguous_len(pkt);
1044 	zassert_equal(CONFIG_NET_BUF_DATA_SIZE - 3, cont_len,
1045 		      "Expected a three byte reduction");
1046 
1047 	/* Fill the first fragment up until only 3 bytes are free */
1048 	for (int i = 0; i < CONFIG_NET_BUF_DATA_SIZE - 6; ++i) {
1049 		res = net_pkt_write_u8(pkt, 0xAA);
1050 		zassert_equal(0, res, "Write packet failed");
1051 	}
1052 
1053 	cont_len = net_pkt_get_contiguous_len(pkt);
1054 	zassert_equal(3, cont_len, "Expected only three bytes are available");
1055 
1056 	/* Fill the complete first fragment, so the cursor points to the second
1057 	 * fragment.
1058 	 */
1059 	for (int i = 0; i < 3; ++i) {
1060 		res = net_pkt_write_u8(pkt, 0xAA);
1061 		zassert_equal(0, res, "Write packet failed");
1062 	}
1063 
1064 	cont_len = net_pkt_get_contiguous_len(pkt);
1065 	zassert_equal(CONFIG_NET_BUF_DATA_SIZE, cont_len,
1066 		      "Expected next full net_buf is available");
1067 
1068 	/* Fill the last fragment */
1069 	for (int i = 0; i < CONFIG_NET_BUF_DATA_SIZE; ++i) {
1070 		res = net_pkt_write_u8(pkt, 0xAA);
1071 		zassert_equal(0, res, "Write packet failed");
1072 	}
1073 
1074 	cont_len = net_pkt_get_contiguous_len(pkt);
1075 	zassert_equal(0, cont_len, "Expected no available space");
1076 
1077 	net_pkt_unref(pkt);
1078 }
1079 
ZTEST(net_pkt_test_suite,test_net_pkt_remove_tail)1080 ZTEST(net_pkt_test_suite, test_net_pkt_remove_tail)
1081 {
1082 	struct net_pkt *pkt;
1083 	int err;
1084 
1085 	pkt = net_pkt_alloc_with_buffer(NULL,
1086 					CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1087 					AF_UNSPEC, 0, K_NO_WAIT);
1088 	zassert_true(pkt != NULL, "Pkt not allocated");
1089 
1090 	net_pkt_cursor_init(pkt);
1091 	net_pkt_write(pkt, small_buffer, CONFIG_NET_BUF_DATA_SIZE * 2 + 3);
1092 
1093 	zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1094 		      "Pkt length is invalid");
1095 	zassert_equal(pkt->frags->frags->frags->len, 3,
1096 		      "3rd buffer length is invalid");
1097 
1098 	/* Remove some bytes from last buffer */
1099 	err = net_pkt_remove_tail(pkt, 2);
1100 	zassert_equal(err, 0, "Failed to remove tail");
1101 
1102 	zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 1,
1103 		      "Pkt length is invalid");
1104 	zassert_not_equal(pkt->frags->frags->frags, NULL,
1105 			  "3rd buffer was removed");
1106 	zassert_equal(pkt->frags->frags->frags->len, 1,
1107 		      "3rd buffer length is invalid");
1108 
1109 	/* Remove last byte from last buffer */
1110 	err = net_pkt_remove_tail(pkt, 1);
1111 	zassert_equal(err, 0, "Failed to remove tail");
1112 
1113 	zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2,
1114 		      "Pkt length is invalid");
1115 	zassert_equal(pkt->frags->frags->frags, NULL,
1116 		      "3rd buffer was not removed");
1117 	zassert_equal(pkt->frags->frags->len, CONFIG_NET_BUF_DATA_SIZE,
1118 		      "2nd buffer length is invalid");
1119 
1120 	/* Remove 2nd buffer and one byte from 1st buffer */
1121 	err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE + 1);
1122 	zassert_equal(err, 0, "Failed to remove tail");
1123 
1124 	zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE - 1,
1125 		      "Pkt length is invalid");
1126 	zassert_equal(pkt->frags->frags, NULL,
1127 		      "2nd buffer was not removed");
1128 	zassert_equal(pkt->frags->len, CONFIG_NET_BUF_DATA_SIZE - 1,
1129 		      "1st buffer length is invalid");
1130 
1131 	net_pkt_unref(pkt);
1132 
1133 	pkt = net_pkt_rx_alloc_with_buffer(NULL,
1134 					   CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1135 					   AF_UNSPEC, 0, K_NO_WAIT);
1136 
1137 	net_pkt_cursor_init(pkt);
1138 	net_pkt_write(pkt, small_buffer, CONFIG_NET_BUF_DATA_SIZE * 2 + 3);
1139 
1140 	zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1141 		      "Pkt length is invalid");
1142 	zassert_equal(pkt->frags->frags->frags->len, 3,
1143 		      "3rd buffer length is invalid");
1144 
1145 	/* Remove bytes spanning 3 buffers */
1146 	err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE + 5);
1147 	zassert_equal(err, 0, "Failed to remove tail");
1148 
1149 	zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE - 2,
1150 		      "Pkt length is invalid");
1151 	zassert_equal(pkt->frags->frags, NULL,
1152 		      "2nd buffer was not removed");
1153 	zassert_equal(pkt->frags->len, CONFIG_NET_BUF_DATA_SIZE - 2,
1154 		      "1st buffer length is invalid");
1155 
1156 	/* Try to remove more bytes than packet has */
1157 	err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE);
1158 	zassert_equal(err, -EINVAL,
1159 		      "Removing more bytes than available should fail");
1160 
1161 	net_pkt_unref(pkt);
1162 }
1163 
ZTEST(net_pkt_test_suite,test_net_pkt_shallow_clone_noleak_buf)1164 ZTEST(net_pkt_test_suite, test_net_pkt_shallow_clone_noleak_buf)
1165 {
1166 	const int bufs_to_allocate = 3;
1167 	const size_t pkt_size = CONFIG_NET_BUF_DATA_SIZE * bufs_to_allocate;
1168 	struct net_pkt *pkt, *shallow_pkt;
1169 	struct net_buf_pool *tx_data;
1170 
1171 	pkt = net_pkt_alloc_with_buffer(NULL, pkt_size,
1172 					AF_UNSPEC, 0, K_NO_WAIT);
1173 
1174 	zassert_true(pkt != NULL, "Pkt not allocated");
1175 
1176 	net_pkt_get_info(NULL, NULL, NULL, &tx_data);
1177 	zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count - bufs_to_allocate,
1178 		      "Incorrect net buf allocation");
1179 
1180 	shallow_pkt = net_pkt_shallow_clone(pkt, K_NO_WAIT);
1181 	zassert_true(shallow_pkt != NULL, "Pkt not allocated");
1182 	zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count - bufs_to_allocate,
1183 		      "Incorrect available net buf count");
1184 
1185 	net_pkt_unref(pkt);
1186 	zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count - bufs_to_allocate,
1187 		      "Incorrect available net buf count");
1188 
1189 	net_pkt_unref(shallow_pkt);
1190 	zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count,
1191 		      "Leak detected");
1192 
1193 }
1194 
test_net_pkt_shallow_clone_append_buf(int extra_frag_refcounts)1195 void test_net_pkt_shallow_clone_append_buf(int extra_frag_refcounts)
1196 {
1197 	const int bufs_to_allocate = 3;
1198 	const int bufs_frag = 2;
1199 
1200 	zassert_true(bufs_frag + bufs_to_allocate < CONFIG_NET_BUF_DATA_SIZE,
1201 		     "Total bufs to allocate must less than available space");
1202 
1203 	const size_t pkt_size = CONFIG_NET_BUF_DATA_SIZE * bufs_to_allocate;
1204 
1205 	struct net_pkt *pkt, *shallow_pkt;
1206 	struct net_buf *frag_head;
1207 	struct net_buf *frag;
1208 	struct net_buf_pool *tx_data;
1209 
1210 	pkt = net_pkt_alloc_with_buffer(NULL, pkt_size, AF_UNSPEC, 0, K_NO_WAIT);
1211 	zassert_true(pkt != NULL, "Pkt not allocated");
1212 
1213 	net_pkt_get_info(NULL, NULL, NULL, &tx_data);
1214 	zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count
1215 		      - bufs_to_allocate, "Incorrect net buf allocation");
1216 
1217 	shallow_pkt = net_pkt_shallow_clone(pkt, K_NO_WAIT);
1218 	zassert_true(shallow_pkt != NULL, "Pkt not allocated");
1219 
1220 	/* allocate buffers for the frag */
1221 	for (int i = 0; i < bufs_frag; i++) {
1222 		frag = net_buf_alloc_len(tx_data, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
1223 		zassert_true(frag != NULL, "Frag not allocated");
1224 		net_pkt_append_buffer(pkt, frag);
1225 		if (i == 0) {
1226 			frag_head = frag;
1227 		}
1228 	}
1229 
1230 	zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count
1231 		      - bufs_to_allocate - bufs_frag, "Incorrect net buf allocation");
1232 
1233 	/* Note: if the frag is appended to a net buf, then the nut buf */
1234 	/* takes ownership of one ref count. Otherwise net_buf_unref() must */
1235 	/* be called on the frag to free the buffers. */
1236 
1237 	for (int i = 0; i < extra_frag_refcounts; i++) {
1238 		frag_head = net_buf_ref(frag_head);
1239 	}
1240 
1241 	net_pkt_unref(pkt);
1242 
1243 	/* we shouldn't have freed any buffers yet */
1244 	zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count
1245 		      - bufs_to_allocate - bufs_frag,
1246 		      "Incorrect net buf allocation");
1247 
1248 	net_pkt_unref(shallow_pkt);
1249 
1250 	if (extra_frag_refcounts == 0) {
1251 		/* if no extra ref counts to frag were added then we should free */
1252 		/* all the buffers at this point */
1253 		zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count,
1254 			      "Leak detected");
1255 	} else {
1256 		/* otherwise only bufs_frag should be available, and frag could */
1257 		/* still used at this point */
1258 		zassert_equal(atomic_get(&tx_data->avail_count),
1259 			      tx_data->buf_count - bufs_frag, "Leak detected");
1260 	}
1261 
1262 	for (int i = 0; i < extra_frag_refcounts; i++) {
1263 		net_buf_unref(frag_head);
1264 	}
1265 
1266 	/* all the buffers should be freed now */
1267 	zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count,
1268 		      "Leak detected");
1269 }
1270 
ZTEST(net_pkt_test_suite,test_net_pkt_shallow_clone_append_buf_0)1271 ZTEST(net_pkt_test_suite, test_net_pkt_shallow_clone_append_buf_0)
1272 {
1273 	test_net_pkt_shallow_clone_append_buf(0);
1274 }
1275 
ZTEST(net_pkt_test_suite,test_net_pkt_shallow_clone_append_buf_1)1276 ZTEST(net_pkt_test_suite, test_net_pkt_shallow_clone_append_buf_1)
1277 {
1278 	test_net_pkt_shallow_clone_append_buf(1);
1279 }
1280 
ZTEST(net_pkt_test_suite,test_net_pkt_shallow_clone_append_buf_2)1281 ZTEST(net_pkt_test_suite, test_net_pkt_shallow_clone_append_buf_2)
1282 {
1283 	test_net_pkt_shallow_clone_append_buf(2);
1284 }
1285 
1286 ZTEST_SUITE(net_pkt_test_suite, NULL, NULL, NULL, NULL, NULL);
1287