1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest_assert.h>
9 #include <zephyr/types.h>
10 #include <stddef.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <zephyr/net/net_pkt.h>
14 #include <zephyr/net/net_if.h>
15 #include <zephyr/net/net_ip.h>
16 #include <zephyr/net/ethernet.h>
17 #include <zephyr/random/random.h>
18
19 #include <zephyr/ztest.h>
20
21 static uint8_t mac_addr[sizeof(struct net_eth_addr)];
22 static struct net_if *eth_if;
23 static uint8_t small_buffer[512];
24
25 /************************\
26 * FAKE ETHERNET DEVICE *
27 \************************/
28
fake_dev_iface_init(struct net_if * iface)29 static void fake_dev_iface_init(struct net_if *iface)
30 {
31 if (mac_addr[2] == 0U) {
32 /* 00-00-5E-00-53-xx Documentation RFC 7042 */
33 mac_addr[0] = 0x00;
34 mac_addr[1] = 0x00;
35 mac_addr[2] = 0x5E;
36 mac_addr[3] = 0x00;
37 mac_addr[4] = 0x53;
38 mac_addr[5] = sys_rand8_get();
39 }
40
41 net_if_set_link_addr(iface, mac_addr, 6, NET_LINK_ETHERNET);
42
43 eth_if = iface;
44 }
45
fake_dev_send(const struct device * dev,struct net_pkt * pkt)46 static int fake_dev_send(const struct device *dev, struct net_pkt *pkt)
47 {
48 return 0;
49 }
50
fake_dev_init(const struct device * dev)51 int fake_dev_init(const struct device *dev)
52 {
53 ARG_UNUSED(dev);
54
55 return 0;
56 }
57
58 #if defined(CONFIG_NET_L2_ETHERNET)
59 static const struct ethernet_api fake_dev_api = {
60 .iface_api.init = fake_dev_iface_init,
61 .send = fake_dev_send,
62 };
63
64 #define _ETH_L2_LAYER ETHERNET_L2
65 #define _ETH_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(ETHERNET_L2)
66 #define L2_HDR_SIZE sizeof(struct net_eth_hdr)
67 #else
68 static const struct dummy_api fake_dev_api = {
69 .iface_api.init = fake_dev_iface_init,
70 .send = fake_dev_send,
71 };
72
73 #define _ETH_L2_LAYER DUMMY_L2
74 #define _ETH_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(DUMMY_L2)
75 #define L2_HDR_SIZE 0
76 #endif
77
78 NET_DEVICE_INIT(fake_dev, "fake_dev",
79 fake_dev_init, NULL, NULL, NULL,
80 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT,
81 &fake_dev_api, _ETH_L2_LAYER, _ETH_L2_CTX_TYPE,
82 NET_ETH_MTU);
83
84 /*********************\
85 * UTILITY FUNCTIONS *
86 \*********************/
87
pkt_is_of_size(struct net_pkt * pkt,size_t size)88 static bool pkt_is_of_size(struct net_pkt *pkt, size_t size)
89 {
90 return (net_pkt_available_buffer(pkt) == size);
91 }
92
pkt_print_cursor(struct net_pkt * pkt)93 static void pkt_print_cursor(struct net_pkt *pkt)
94 {
95 if (!pkt || !pkt->cursor.buf || !pkt->cursor.pos) {
96 printk("Unknown position\n");
97 } else {
98 printk("Position %zu (%p) in net_buf %p (data %p)\n",
99 pkt->cursor.pos - pkt->cursor.buf->data,
100 pkt->cursor.pos, pkt->cursor.buf,
101 pkt->cursor.buf->data);
102 }
103 }
104
105
106 /*****************************\
107 * HOW TO ALLOCATE - 2 TESTS *
108 \*****************************/
ZTEST(net_pkt_test_suite,test_net_pkt_allocate_wo_buffer)109 ZTEST(net_pkt_test_suite, test_net_pkt_allocate_wo_buffer)
110 {
111 struct net_pkt *pkt;
112
113 /* How to allocate a packet, with no buffer */
114 pkt = net_pkt_alloc(K_NO_WAIT);
115 zassert_true(pkt != NULL, "Pkt not allocated");
116
117 /* Freeing the packet */
118 net_pkt_unref(pkt);
119 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
120 "Pkt not properly unreferenced");
121
122 /* Note that, if you already know the iface to which the packet
123 * belongs to, you will be able to use net_pkt_alloc_on_iface().
124 */
125 pkt = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
126 zassert_true(pkt != NULL, "Pkt not allocated");
127
128 net_pkt_unref(pkt);
129 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
130 "Pkt not properly unreferenced");
131 }
132
ZTEST(net_pkt_test_suite,test_net_pkt_allocate_with_buffer)133 ZTEST(net_pkt_test_suite, test_net_pkt_allocate_with_buffer)
134 {
135 struct net_pkt *pkt;
136
137 /* How to allocate a packet, with buffer
138 * a) - with a size that will fit MTU, let's say 512 bytes
139 * Note: we don't care of the family/protocol for now
140 */
141 pkt = net_pkt_alloc_with_buffer(eth_if, 512,
142 AF_UNSPEC, 0, K_NO_WAIT);
143 zassert_true(pkt != NULL, "Pkt not allocated");
144
145 /* Did we get the requested size? */
146 zassert_true(pkt_is_of_size(pkt, 512), "Pkt size is not right");
147
148 /* Freeing the packet */
149 net_pkt_unref(pkt);
150 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
151 "Pkt not properly unreferenced");
152
153 /*
154 * b) - with a size that will not fit MTU, let's say 1800 bytes
155 * Note: again we don't care of family/protocol for now.
156 */
157 pkt = net_pkt_alloc_with_buffer(eth_if, 1800,
158 AF_UNSPEC, 0, K_NO_WAIT);
159 zassert_true(pkt != NULL, "Pkt not allocated");
160
161 zassert_false(pkt_is_of_size(pkt, 1800), "Pkt size is not right");
162 zassert_true(pkt_is_of_size(pkt, net_if_get_mtu(eth_if) + L2_HDR_SIZE),
163 "Pkt size is not right");
164
165 /* Freeing the packet */
166 net_pkt_unref(pkt);
167 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
168 "Pkt not properly unreferenced");
169
170 /*
171 * c) - Now with 512 bytes but on IPv4/UDP
172 */
173 pkt = net_pkt_alloc_with_buffer(eth_if, 512, AF_INET,
174 IPPROTO_UDP, K_NO_WAIT);
175 zassert_true(pkt != NULL, "Pkt not allocated");
176
177 /* Because 512 + NET_IPV4UDPH_LEN fits MTU, total must be that one */
178 zassert_true(pkt_is_of_size(pkt, 512 + NET_IPV4UDPH_LEN),
179 "Pkt overall size does not match");
180
181 /* Freeing the packet */
182 net_pkt_unref(pkt);
183 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
184 "Pkt not properly unreferenced");
185
186 /*
187 * c) - Now with 1800 bytes but on IPv4/UDP
188 */
189 pkt = net_pkt_alloc_with_buffer(eth_if, 1800, AF_INET,
190 IPPROTO_UDP, K_NO_WAIT);
191 zassert_true(pkt != NULL, "Pkt not allocated");
192
193 /* Because 1800 + NET_IPV4UDPH_LEN won't fit MTU, payload size
194 * should be MTU
195 */
196 zassert_true(net_pkt_available_buffer(pkt) ==
197 net_if_get_mtu(eth_if),
198 "Payload buf size does not match for ipv4/udp");
199
200 /* Freeing the packet */
201 net_pkt_unref(pkt);
202 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
203 "Pkt not properly unreferenced");
204
205 /* d) - with a zero payload but AF_INET family
206 */
207 pkt = net_pkt_alloc_with_buffer(eth_if, 0,
208 AF_INET, 0, K_NO_WAIT);
209 zassert_true(pkt != NULL, "Pkt not allocated");
210
211 /* Did we get the requested size? */
212 zassert_true(pkt_is_of_size(pkt, NET_IPV4H_LEN),
213 "Pkt size is not right");
214
215 /* Freeing the packet */
216 net_pkt_unref(pkt);
217 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
218 "Pkt not properly unreferenced");
219
220 /* e) - with a zero payload but AF_PACKET family
221 */
222 pkt = net_pkt_alloc_with_buffer(eth_if, 0,
223 AF_PACKET, 0, K_NO_WAIT);
224 zassert_true(pkt != NULL, "Pkt not allocated");
225
226 /* Did we get the requested size? */
227 zassert_true(pkt_is_of_size(pkt, 0), "Pkt size is not right");
228
229 /* Freeing the packet */
230 net_pkt_unref(pkt);
231 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
232 "Pkt not properly unreferenced");
233 }
234
235 /********************************\
236 * HOW TO R/W A PACKET - TESTS *
237 \********************************/
238
ZTEST(net_pkt_test_suite,test_net_pkt_basics_of_rw)239 ZTEST(net_pkt_test_suite, test_net_pkt_basics_of_rw)
240 {
241 struct net_pkt_cursor backup;
242 struct net_pkt *pkt;
243 uint16_t value16;
244 int ret;
245
246 pkt = net_pkt_alloc_with_buffer(eth_if, 512,
247 AF_UNSPEC, 0, K_NO_WAIT);
248 zassert_true(pkt != NULL, "Pkt not allocated");
249
250 /* Once newly allocated with buffer,
251 * a packet has no data accounted for in its buffer
252 */
253 zassert_true(net_pkt_get_len(pkt) == 0,
254 "Pkt initial length should be 0");
255
256 /* This is done through net_buf which can distinguish
257 * the size of a buffer from the length of the data in it.
258 */
259
260 /* Let's subsequently write 1 byte, then 2 bytes and 4 bytes
261 * We write values made of 0s
262 */
263 ret = net_pkt_write_u8(pkt, 0);
264 zassert_true(ret == 0, "Pkt write failed");
265
266 /* Length should be 1 now */
267 zassert_true(net_pkt_get_len(pkt) == 1, "Pkt length mismatch");
268
269 ret = net_pkt_write_be16(pkt, 0);
270 zassert_true(ret == 0, "Pkt write failed");
271
272 /* Length should be 3 now */
273 zassert_true(net_pkt_get_len(pkt) == 3, "Pkt length mismatch");
274
275 /* Verify that the data is properly written to net_buf */
276 net_pkt_cursor_backup(pkt, &backup);
277 net_pkt_cursor_init(pkt);
278 net_pkt_set_overwrite(pkt, true);
279 net_pkt_skip(pkt, 1);
280 net_pkt_read_be16(pkt, &value16);
281 zassert_equal(value16, 0, "Invalid value %d read, expected %d",
282 value16, 0);
283
284 /* Then write new value, overwriting the old one */
285 net_pkt_cursor_init(pkt);
286 net_pkt_skip(pkt, 1);
287 ret = net_pkt_write_be16(pkt, 42);
288 zassert_true(ret == 0, "Pkt write failed");
289
290 /* And re-read the value again */
291 net_pkt_cursor_init(pkt);
292 net_pkt_skip(pkt, 1);
293 ret = net_pkt_read_be16(pkt, &value16);
294 zassert_true(ret == 0, "Pkt read failed");
295 zassert_equal(value16, 42, "Invalid value %d read, expected %d",
296 value16, 42);
297
298 net_pkt_set_overwrite(pkt, false);
299 net_pkt_cursor_restore(pkt, &backup);
300
301 ret = net_pkt_write_be32(pkt, 0);
302 zassert_true(ret == 0, "Pkt write failed");
303
304 /* Length should be 7 now */
305 zassert_true(net_pkt_get_len(pkt) == 7, "Pkt length mismatch");
306
307 /* All these writing functions use net_ptk_write(), which works
308 * this way:
309 */
310 ret = net_pkt_write(pkt, small_buffer, 9);
311 zassert_true(ret == 0, "Pkt write failed");
312
313 /* Length should be 16 now */
314 zassert_true(net_pkt_get_len(pkt) == 16, "Pkt length mismatch");
315
316 /* Now let's say you want to memset some data */
317 ret = net_pkt_memset(pkt, 0, 4);
318 zassert_true(ret == 0, "Pkt memset failed");
319
320 /* Length should be 20 now */
321 zassert_true(net_pkt_get_len(pkt) == 20, "Pkt length mismatch");
322
323 /* So memset affects the length exactly as write does */
324
325 /* Sometimes you might want to advance in the buffer without caring
326 * what's written there since you'll eventually come back for that.
327 * net_pkt_skip() is used for it.
328 * Note: usually you will not have to use that function a lot yourself.
329 */
330 ret = net_pkt_skip(pkt, 20);
331 zassert_true(ret == 0, "Pkt skip failed");
332
333 /* Length should be 40 now */
334 zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
335
336 /* Again, skip affected the length also, like a write
337 * But wait a minute: how to get back then, in order to write at
338 * the position we just skipped?
339 *
340 * So let's introduce the concept of buffer cursor. (which could
341 * be named 'cursor' if such name has more relevancy. Basically, each
342 * net_pkt embeds such 'cursor': it's like a head of a tape
343 * recorder/reader, it holds the current position in the buffer where
344 * you can r/w. All operations use and update it below.
345 * There is, however, a catch: buffer is described through net_buf
346 * and these are like a simple linked-list.
347 * Which means that unlike a tape recorder/reader: you are not
348 * able to go backward. Only back from starting point and forward.
349 * Thus why there is a net_pkt_cursor_init(pkt) which will let you going
350 * back from the start. We could hold more info in order to avoid that,
351 * but that would mean growing each an every net_buf.
352 */
353 net_pkt_cursor_init(pkt);
354
355 /* But isn't it so that if I want to go at the previous position I
356 * skipped, I'll use skip again but then won't it affect again the
357 * length?
358 * Answer is yes. Hopefully there is a mean to avoid that. Basically
359 * for data that already "exists" in the buffer (aka: data accounted
360 * for in the buffer, through the length) you'll need to set the packet
361 * to overwrite: all subsequent operations will then work on existing
362 * data and will not affect the length (it won't add more data)
363 */
364 net_pkt_set_overwrite(pkt, true);
365
366 zassert_true(net_pkt_is_being_overwritten(pkt),
367 "Pkt is not set to overwrite");
368
369 /* Ok so previous skipped position was at offset 20 */
370 ret = net_pkt_skip(pkt, 20);
371 zassert_true(ret == 0, "Pkt skip failed");
372
373 /* Length should _still_ be 40 */
374 zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
375
376 /* And you can write stuff */
377 ret = net_pkt_write_le32(pkt, 0);
378 zassert_true(ret == 0, "Pkt write failed");
379
380 /* Again, length should _still_ be 40 */
381 zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
382
383 /* Let's memset the rest */
384 ret = net_pkt_memset(pkt, 0, 16);
385 zassert_true(ret == 0, "Pkt memset failed");
386
387 /* Again, length should _still_ be 40 */
388 zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
389
390 /* We are now back at the end of the existing data in the buffer
391 * Since overwrite is still on, we should not be able to r/w
392 * anything.
393 * This is completely nominal, as being set, overwrite allows r/w only
394 * on existing data in the buffer:
395 */
396 ret = net_pkt_write_be32(pkt, 0);
397 zassert_true(ret != 0, "Pkt write succeeded where it shouldn't have");
398
399 /* Logically, in order to be able to add new data in the buffer,
400 * overwrite should be disabled:
401 */
402 net_pkt_set_overwrite(pkt, false);
403
404 /* But it will fail: */
405 ret = net_pkt_write_le32(pkt, 0);
406 zassert_true(ret != 0, "Pkt write succeeded?");
407
408 /* Why is that?
409 * This is because in case of r/w error: the iterator is invalidated.
410 * This a design choice, once you get a r/w error it means your code
411 * messed up requesting smaller buffer than you actually needed, or
412 * writing too much data than it should have been etc...).
413 * So you must drop your packet entirely.
414 */
415
416 /* Freeing the packet */
417 net_pkt_unref(pkt);
418 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
419 "Pkt not properly unreferenced");
420 }
421
ZTEST(net_pkt_test_suite,test_net_pkt_advanced_basics)422 ZTEST(net_pkt_test_suite, test_net_pkt_advanced_basics)
423 {
424 struct net_pkt_cursor backup;
425 struct net_pkt *pkt;
426 int ret;
427
428 pkt = net_pkt_alloc_with_buffer(eth_if, 512,
429 AF_INET, IPPROTO_UDP, K_NO_WAIT);
430 zassert_true(pkt != NULL, "Pkt not allocated");
431
432 pkt_print_cursor(pkt);
433
434 /* As stated earlier, initializing the cursor, is the way to go
435 * back from the start in the buffer (either header or payload then).
436 * We also showed that using net_pkt_skip() could be used to move
437 * forward in the buffer.
438 * But what if you are far in the buffer, you need to go backward,
439 * and back again to your previous position?
440 * You could certainly do:
441 */
442 ret = net_pkt_write(pkt, small_buffer, 20);
443 zassert_true(ret == 0, "Pkt write failed");
444
445 pkt_print_cursor(pkt);
446
447 net_pkt_cursor_init(pkt);
448
449 pkt_print_cursor(pkt);
450
451 /* ... do something here ... */
452
453 /* And finally go back with overwrite/skip: */
454 net_pkt_set_overwrite(pkt, true);
455 ret = net_pkt_skip(pkt, 20);
456 zassert_true(ret == 0, "Pkt skip failed");
457 net_pkt_set_overwrite(pkt, false);
458
459 pkt_print_cursor(pkt);
460
461 /* In this example, do not focus on the 20 bytes. It is just for
462 * the sake of the example.
463 * The other method is backup/restore the packet cursor.
464 */
465 net_pkt_cursor_backup(pkt, &backup);
466
467 net_pkt_cursor_init(pkt);
468
469 /* ... do something here ... */
470
471 /* and restore: */
472 net_pkt_cursor_restore(pkt, &backup);
473
474 pkt_print_cursor(pkt);
475
476 /* Another feature, is how you access your data. Earlier was
477 * presented basic r/w functions. But sometime you might want to
478 * access your data directly through a structure/type etc...
479 * Due to the "fragmented" possible nature of your buffer, you
480 * need to know if the data you are trying to access is in
481 * contiguous area.
482 * For this, you'll use:
483 */
484 ret = (int) net_pkt_is_contiguous(pkt, 4);
485 zassert_true(ret == 1, "Pkt contiguity check failed");
486
487 /* If that's successful you should be able to get the actual
488 * position in the buffer and cast it to the type you want.
489 */
490 {
491 uint32_t *val = (uint32_t *)net_pkt_cursor_get_pos(pkt);
492
493 *val = 0U;
494 /* etc... */
495 }
496
497 /* However, to advance your cursor, since none of the usual r/w
498 * functions got used: net_pkt_skip() should be called relevantly:
499 */
500 net_pkt_skip(pkt, 4);
501
502 /* Freeing the packet */
503 net_pkt_unref(pkt);
504 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
505 "Pkt not properly unreferenced");
506
507 /* Obviously one will very rarely use these 2 last low level functions
508 * - net_pkt_is_contiguous()
509 * - net_pkt_cursor_update()
510 *
511 * Let's see why next.
512 */
513 }
514
ZTEST(net_pkt_test_suite,test_net_pkt_easier_rw_usage)515 ZTEST(net_pkt_test_suite, test_net_pkt_easier_rw_usage)
516 {
517 struct net_pkt *pkt;
518 int ret;
519
520 pkt = net_pkt_alloc_with_buffer(eth_if, 512,
521 AF_INET, IPPROTO_UDP, K_NO_WAIT);
522 zassert_true(pkt != NULL, "Pkt not allocated");
523
524 /* In net core, all goes down in fine to header manipulation.
525 * Either it's an IP header, UDP, ICMP, TCP one etc...
526 * One would then prefer to access those directly via there
527 * descriptors (struct net_udp_hdr, struct net_icmp_hdr, ...)
528 * rather than building it byte by bytes etc...
529 *
530 * As seen earlier, it is possible to cast on current position.
531 * However, due to the "fragmented" possible nature of the buffer,
532 * it should also be possible to handle the case the data being
533 * accessed is scattered on 1+ net_buf.
534 *
535 * To avoid redoing the contiguity check, cast or copy on failure,
536 * a complex type named struct net_pkt_header_access exists.
537 * It solves both cases (accessing data contiguous or not), without
538 * the need for runtime allocation (all is on stack)
539 */
540 {
541 NET_PKT_DATA_ACCESS_DEFINE(ip_access, struct net_ipv4_hdr);
542 struct net_ipv4_hdr *ip_hdr;
543
544 ip_hdr = (struct net_ipv4_hdr *)
545 net_pkt_get_data(pkt, &ip_access);
546 zassert_not_null(ip_hdr, "Accessor failed");
547
548 ip_hdr->tos = 0x00;
549
550 ret = net_pkt_set_data(pkt, &ip_access);
551 zassert_true(ret == 0, "Accessor failed");
552
553 zassert_true(net_pkt_get_len(pkt) == NET_IPV4H_LEN,
554 "Pkt length mismatch");
555 }
556
557 /* As you can notice: get/set take also care of handling the cursor
558 * and updating the packet length relevantly thus why packet length
559 * has properly grown.
560 */
561
562 /* Freeing the packet */
563 net_pkt_unref(pkt);
564 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
565 "Pkt not properly unreferenced");
566 }
567
568 uint8_t b5_data[10] = "qrstuvwxyz";
569 struct net_buf b5 = {
570 .ref = 1,
571 .data = b5_data,
572 .len = 0,
573 .size = 0,
574 .__buf = b5_data,
575 };
576
577 uint8_t b4_data[4] = "mnop";
578 struct net_buf b4 = {
579 .frags = &b5,
580 .ref = 1,
581 .data = b4_data,
582 .len = sizeof(b4_data) - 2,
583 .size = sizeof(b4_data),
584 .__buf = b4_data,
585 };
586
587 struct net_buf b3 = {
588 .frags = &b4,
589 .ref = 1,
590 .data = NULL,
591 .__buf = NULL,
592 };
593
594 uint8_t b2_data[8] = "efghijkl";
595 struct net_buf b2 = {
596 .frags = &b3,
597 .ref = 1,
598 .data = b2_data,
599 .len = 0,
600 .size = sizeof(b2_data),
601 .__buf = b2_data,
602 };
603
604 uint8_t b1_data[4] = "abcd";
605 struct net_buf b1 = {
606 .frags = &b2,
607 .ref = 1,
608 .data = b1_data,
609 .len = sizeof(b1_data) - 2,
610 .size = sizeof(b1_data),
611 .__buf = b1_data,
612 };
613
ZTEST(net_pkt_test_suite,test_net_pkt_copy)614 ZTEST(net_pkt_test_suite, test_net_pkt_copy)
615 {
616 struct net_pkt *pkt_src;
617 struct net_pkt *pkt_dst;
618
619 pkt_src = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
620 zassert_true(pkt_src != NULL, "Pkt not allocated");
621
622 pkt_print_cursor(pkt_src);
623
624 /* Let's append the buffers */
625 net_pkt_append_buffer(pkt_src, &b1);
626
627 net_pkt_set_overwrite(pkt_src, true);
628
629 /* There should be some space left */
630 zassert_true(net_pkt_available_buffer(pkt_src) != 0, "No space left?");
631 /* Length should be 4 */
632 zassert_true(net_pkt_get_len(pkt_src) == 4, "Wrong length");
633
634 /* Actual space left is 12 (in b1, b2 and b4) */
635 zassert_true(net_pkt_available_buffer(pkt_src) == 12,
636 "Wrong space left?");
637
638 pkt_print_cursor(pkt_src);
639
640 /* Now let's clone the pkt
641 * This will test net_pkt_copy_new() as it uses it for the buffers
642 */
643 pkt_dst = net_pkt_clone(pkt_src, K_NO_WAIT);
644 zassert_true(pkt_dst != NULL, "Pkt not clone");
645
646 /* Cloning does not take into account left space,
647 * but only occupied one
648 */
649 zassert_true(net_pkt_available_buffer(pkt_dst) == 0, "Space left");
650 zassert_true(net_pkt_get_len(pkt_src) == net_pkt_get_len(pkt_dst),
651 "Not same amount?");
652
653 /* It also did not care to copy the net_buf itself, only the content
654 * so, knowing that the base buffer size is bigger than necessary,
655 * pkt_dst has only one net_buf
656 */
657 zassert_true(pkt_dst->buffer->frags == NULL, "Not only one buffer?");
658
659 /* Freeing the packet */
660 pkt_src->buffer = NULL;
661 net_pkt_unref(pkt_src);
662 zassert_true(atomic_get(&pkt_src->atomic_ref) == 0,
663 "Pkt not properly unreferenced");
664 net_pkt_unref(pkt_dst);
665 zassert_true(atomic_get(&pkt_dst->atomic_ref) == 0,
666 "Pkt not properly unreferenced");
667 }
668
669 #define PULL_TEST_PKT_DATA_SIZE 600
670
ZTEST(net_pkt_test_suite,test_net_pkt_pull)671 ZTEST(net_pkt_test_suite, test_net_pkt_pull)
672 {
673 const int PULL_AMOUNT = 8;
674 const int LARGE_PULL_AMOUNT = 200;
675 struct net_pkt *dummy_pkt;
676 static uint8_t pkt_data[PULL_TEST_PKT_DATA_SIZE];
677 static uint8_t pkt_data_readback[PULL_TEST_PKT_DATA_SIZE];
678 size_t len;
679 int i, ret;
680
681 for (i = 0; i < PULL_TEST_PKT_DATA_SIZE; ++i) {
682 pkt_data[i] = i & 0xff;
683 }
684
685 dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
686 PULL_TEST_PKT_DATA_SIZE,
687 AF_UNSPEC,
688 0,
689 K_NO_WAIT);
690 zassert_true(dummy_pkt != NULL, "Pkt not allocated");
691
692 zassert_true(net_pkt_write(dummy_pkt,
693 pkt_data,
694 PULL_TEST_PKT_DATA_SIZE) == 0,
695 "Write packet failed");
696
697 net_pkt_cursor_init(dummy_pkt);
698 net_pkt_pull(dummy_pkt, PULL_AMOUNT);
699 zassert_equal(net_pkt_get_len(dummy_pkt),
700 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT,
701 "Pull failed to set new size");
702 zassert_true(net_pkt_read(dummy_pkt,
703 pkt_data_readback,
704 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT) == 0,
705 "Read packet failed");
706 zassert_mem_equal(pkt_data_readback,
707 &pkt_data[PULL_AMOUNT],
708 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT,
709 "Packet data changed");
710
711 net_pkt_cursor_init(dummy_pkt);
712 net_pkt_pull(dummy_pkt, LARGE_PULL_AMOUNT);
713 zassert_equal(net_pkt_get_len(dummy_pkt),
714 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT -
715 LARGE_PULL_AMOUNT,
716 "Large pull failed to set new size (%d vs %d)",
717 net_pkt_get_len(dummy_pkt),
718 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT -
719 LARGE_PULL_AMOUNT);
720
721 net_pkt_cursor_init(dummy_pkt);
722 net_pkt_pull(dummy_pkt, net_pkt_get_len(dummy_pkt));
723 zassert_equal(net_pkt_get_len(dummy_pkt), 0,
724 "Full pull failed to set new size (%d)",
725 net_pkt_get_len(dummy_pkt));
726
727 net_pkt_cursor_init(dummy_pkt);
728 ret = net_pkt_pull(dummy_pkt, 1);
729 zassert_equal(ret, -ENOBUFS, "Did not return error");
730 zassert_equal(net_pkt_get_len(dummy_pkt), 0,
731 "Empty pull set new size (%d)",
732 net_pkt_get_len(dummy_pkt));
733
734 net_pkt_unref(dummy_pkt);
735
736 dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
737 PULL_TEST_PKT_DATA_SIZE,
738 AF_UNSPEC,
739 0,
740 K_NO_WAIT);
741 zassert_true(dummy_pkt != NULL, "Pkt not allocated");
742
743 zassert_true(net_pkt_write(dummy_pkt,
744 pkt_data,
745 PULL_TEST_PKT_DATA_SIZE) == 0,
746 "Write packet failed");
747
748 net_pkt_cursor_init(dummy_pkt);
749 ret = net_pkt_pull(dummy_pkt, net_pkt_get_len(dummy_pkt) + 1);
750 zassert_equal(ret, -ENOBUFS, "Did not return error");
751 zassert_equal(net_pkt_get_len(dummy_pkt), 0,
752 "Not empty after full pull (%d)",
753 net_pkt_get_len(dummy_pkt));
754
755 net_pkt_unref(dummy_pkt);
756
757 dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
758 PULL_TEST_PKT_DATA_SIZE,
759 AF_UNSPEC,
760 0,
761 K_NO_WAIT);
762 zassert_true(dummy_pkt != NULL, "Pkt not allocated");
763
764 zassert_true(net_pkt_write(dummy_pkt,
765 pkt_data,
766 PULL_TEST_PKT_DATA_SIZE) == 0,
767 "Write packet failed");
768
769 net_pkt_cursor_init(dummy_pkt);
770 len = net_pkt_get_len(dummy_pkt);
771
772 for (i = 0; i < len; i++) {
773 ret = net_pkt_pull(dummy_pkt, 1);
774 zassert_equal(ret, 0, "Did return error");
775 }
776
777 ret = net_pkt_pull(dummy_pkt, 1);
778 zassert_equal(ret, -ENOBUFS, "Did not return error");
779
780 zassert_equal(dummy_pkt->buffer, NULL, "buffer list not empty");
781
782 net_pkt_unref(dummy_pkt);
783 }
784
ZTEST(net_pkt_test_suite,test_net_pkt_clone)785 ZTEST(net_pkt_test_suite, test_net_pkt_clone)
786 {
787 uint8_t buf[26] = {"abcdefghijklmnopqrstuvwxyz"};
788 struct net_pkt *pkt;
789 struct net_pkt *cloned_pkt;
790 int ret;
791
792 pkt = net_pkt_alloc_with_buffer(eth_if, 64,
793 AF_UNSPEC, 0, K_NO_WAIT);
794 zassert_true(pkt != NULL, "Pkt not allocated");
795
796 ret = net_pkt_write(pkt, buf, sizeof(buf));
797 zassert_true(ret == 0, "Pkt write failed");
798
799 zassert_true(net_pkt_get_len(pkt) == sizeof(buf),
800 "Pkt length mismatch");
801
802 net_pkt_cursor_init(pkt);
803 net_pkt_set_overwrite(pkt, true);
804 net_pkt_skip(pkt, 6);
805 zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(pkt),
806 "Pkt remaining data mismatch");
807
808 memcpy(net_pkt_lladdr_src(pkt)->addr,
809 pkt->buffer->data,
810 NET_LINK_ADDR_MAX_LENGTH);
811 net_pkt_lladdr_src(pkt)->len = NET_LINK_ADDR_MAX_LENGTH;
812 net_pkt_lladdr_src(pkt)->type = NET_LINK_ETHERNET;
813 zassert_mem_equal(net_pkt_lladdr_src(pkt)->addr, buf, NET_LINK_ADDR_MAX_LENGTH);
814 memcpy(net_pkt_lladdr_dst(pkt)->addr,
815 net_pkt_cursor_get_pos(pkt),
816 NET_LINK_ADDR_MAX_LENGTH);
817 net_pkt_lladdr_dst(pkt)->len = NET_LINK_ADDR_MAX_LENGTH;
818 net_pkt_lladdr_dst(pkt)->type = NET_LINK_ETHERNET;
819 zassert_mem_equal(net_pkt_lladdr_dst(pkt)->addr, &buf[6], NET_LINK_ADDR_MAX_LENGTH);
820
821 net_pkt_set_family(pkt, AF_INET6);
822 net_pkt_set_captured(pkt, true);
823 net_pkt_set_eof(pkt, true);
824 net_pkt_set_ptp(pkt, true);
825 net_pkt_set_tx_timestamping(pkt, true);
826 net_pkt_set_rx_timestamping(pkt, true);
827 net_pkt_set_forwarding(pkt, true);
828
829 net_pkt_set_l2_bridged(pkt, true);
830 net_pkt_set_l2_processed(pkt, true);
831 net_pkt_set_ll_proto_type(pkt, ETH_P_IEEE802154);
832
833 net_pkt_set_overwrite(pkt, false);
834 cloned_pkt = net_pkt_clone(pkt, K_NO_WAIT);
835 zassert_true(cloned_pkt != NULL, "Pkt not cloned");
836
837 zassert_true(net_pkt_get_len(cloned_pkt) == sizeof(buf),
838 "Cloned pkt length mismatch");
839
840 zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(pkt),
841 "Pkt remaining data mismatch");
842
843 zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(cloned_pkt),
844 "Cloned pkt remaining data mismatch");
845
846 zassert_false(net_pkt_is_being_overwritten(cloned_pkt),
847 "Cloned pkt overwrite flag not restored");
848
849 zassert_false(net_pkt_is_being_overwritten(pkt),
850 "Pkt overwrite flag not restored");
851
852 zassert_equal(net_pkt_family(cloned_pkt), AF_INET6,
853 "Address family value mismatch");
854
855 zassert_true(net_pkt_is_captured(cloned_pkt),
856 "Cloned pkt captured flag mismatch");
857
858 zassert_true(net_pkt_eof(cloned_pkt),
859 "Cloned pkt eof flag mismatch");
860
861 zassert_true(net_pkt_is_ptp(cloned_pkt),
862 "Cloned pkt ptp_pkt flag mismatch");
863
864 #if CONFIG_NET_PKT_TIMESTAMP
865 zassert_true(net_pkt_is_tx_timestamping(cloned_pkt),
866 "Cloned pkt tx_timestamping flag mismatch");
867
868 zassert_true(net_pkt_is_rx_timestamping(cloned_pkt),
869 "Cloned pkt rx_timestamping flag mismatch");
870 #endif
871
872 zassert_true(net_pkt_forwarding(cloned_pkt),
873 "Cloned pkt forwarding flag mismatch");
874
875 zassert_true(net_pkt_is_l2_bridged(cloned_pkt),
876 "Cloned pkt l2_bridged flag mismatch");
877
878 zassert_true(net_pkt_is_l2_processed(cloned_pkt),
879 "Cloned pkt l2_processed flag mismatch");
880
881 zassert_mem_equal(net_pkt_lladdr_src(cloned_pkt)->addr, buf, NET_LINK_ADDR_MAX_LENGTH);
882 zassert_true(memcmp(net_pkt_lladdr_src(cloned_pkt)->addr,
883 cloned_pkt->buffer->data,
884 NET_LINK_ADDR_MAX_LENGTH) == 0,
885 "Cloned pkt ll src addr mismatch");
886
887 zassert_mem_equal(net_pkt_lladdr_dst(cloned_pkt)->addr, &buf[6], NET_LINK_ADDR_MAX_LENGTH);
888 zassert_true(memcmp(net_pkt_lladdr_dst(cloned_pkt)->addr,
889 net_pkt_cursor_get_pos(cloned_pkt),
890 NET_LINK_ADDR_MAX_LENGTH) == 0,
891 "Cloned pkt ll dst addr mismatch");
892
893 zassert_equal(net_pkt_ll_proto_type(cloned_pkt), ETH_P_IEEE802154,
894 "Address ll_proto_type value mismatch");
895
896 net_pkt_unref(pkt);
897 net_pkt_unref(cloned_pkt);
898 }
899
900 NET_BUF_POOL_FIXED_DEFINE(test_net_pkt_headroom_pool, 4, 2, 4, NULL);
901
ZTEST(net_pkt_test_suite,test_net_pkt_headroom)902 ZTEST(net_pkt_test_suite, test_net_pkt_headroom)
903 {
904 struct net_pkt *pkt;
905 struct net_buf *frag1;
906 struct net_buf *frag2;
907 struct net_buf *frag3;
908 struct net_buf *frag4;
909
910 /*
911 * Create a net_pkt; append net_bufs with reserved bytes (headroom).
912 *
913 * Layout to be crafted before writing to the net_buf: "HA|HH|HA|AA"
914 * H: Headroom
915 * |: net_buf/fragment delimiter
916 * A: available byte
917 */
918 pkt = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
919 zassert_true(pkt != NULL, "Pkt not allocated");
920
921 /* 1st fragment has 1 byte headroom and one byte available: "HA" */
922 frag1 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
923 net_buf_reserve(frag1, 1);
924 net_pkt_append_buffer(pkt, frag1);
925 zassert_equal(net_pkt_available_buffer(pkt), 1, "Wrong space left");
926 zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
927
928 /* 2nd fragment affecting neither size nor length: "HH" */
929 frag2 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
930 net_buf_reserve(frag2, 2);
931 net_pkt_append_buffer(pkt, frag2);
932 zassert_equal(net_pkt_available_buffer(pkt), 1, "Wrong space left");
933 zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
934
935 /* 3rd fragment has 1 byte headroom and one byte available: "HA" */
936 frag3 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
937 net_buf_reserve(frag3, 1);
938 net_pkt_append_buffer(pkt, frag3);
939 zassert_equal(net_pkt_available_buffer(pkt), 2, "Wrong space left");
940 zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
941
942 /* 4th fragment has no headroom and two available bytes: "AA" */
943 frag4 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
944 net_pkt_append_buffer(pkt, frag4);
945 zassert_equal(net_pkt_available_buffer(pkt), 4, "Wrong space left");
946 zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
947
948 /* Writing net_pkt via cursor, spanning all 4 fragments */
949 net_pkt_cursor_init(pkt);
950 zassert_true(net_pkt_write(pkt, "1234", 4) == 0, "Pkt write failed");
951
952 /* Expected layout across all four fragments: "H1|HH|H2|34" */
953 zassert_equal(frag1->size, 2, "Size mismatch");
954 zassert_equal(frag1->len, 1, "Length mismatch");
955 zassert_equal(frag2->size, 2, "Size mismatch");
956 zassert_equal(frag2->len, 0, "Length mismatch");
957 zassert_equal(frag3->size, 2, "Size mismatch");
958 zassert_equal(frag3->len, 1, "Length mismatch");
959 zassert_equal(frag4->size, 2, "Size mismatch");
960 zassert_equal(frag4->len, 2, "Length mismatch");
961 net_pkt_cursor_init(pkt);
962 zassert_true(net_pkt_read(pkt, small_buffer, 4) == 0, "Read failed");
963 zassert_mem_equal(small_buffer, "1234", 4, "Data mismatch");
964
965 /* Making use of the headrooms */
966 net_buf_push_u8(frag3, 'D');
967 net_buf_push_u8(frag2, 'C');
968 net_buf_push_u8(frag2, 'B');
969 net_buf_push_u8(frag1, 'A');
970 net_pkt_cursor_init(pkt);
971 zassert_true(net_pkt_read(pkt, small_buffer, 8) == 0, "Read failed");
972 zassert_mem_equal(small_buffer, "A1BCD234", 8, "Data mismatch");
973
974 net_pkt_unref(pkt);
975 }
976
977 NET_BUF_POOL_VAR_DEFINE(test_net_pkt_headroom_copy_pool, 2, 128, 4, NULL);
978
ZTEST(net_pkt_test_suite,test_net_pkt_headroom_copy)979 ZTEST(net_pkt_test_suite, test_net_pkt_headroom_copy)
980 {
981 struct net_pkt *pkt_src;
982 struct net_pkt *pkt_dst;
983 struct net_buf *frag1_dst;
984 struct net_buf *frag2_dst;
985 int res;
986
987 /* Create et_pkt containing the bytes "0123" */
988 pkt_src = net_pkt_alloc_with_buffer(eth_if, 4,
989 AF_UNSPEC, 0, K_NO_WAIT);
990 zassert_true(pkt_src != NULL, "Pkt not allocated");
991 res = net_pkt_write(pkt_src, "0123", 4);
992 zassert_equal(res, 0, "Pkt write failed");
993
994 /* Create net_pkt consisting of net_buf fragments with reserved bytes */
995 pkt_dst = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
996 zassert_true(pkt_src != NULL, "Pkt not allocated");
997
998 frag1_dst = net_buf_alloc_len(&test_net_pkt_headroom_copy_pool, 2,
999 K_NO_WAIT);
1000 net_buf_reserve(frag1_dst, 1);
1001 net_pkt_append_buffer(pkt_dst, frag1_dst);
1002 frag2_dst = net_buf_alloc_len(&test_net_pkt_headroom_copy_pool, 4,
1003 K_NO_WAIT);
1004 net_buf_reserve(frag2_dst, 1);
1005 net_pkt_append_buffer(pkt_dst, frag2_dst);
1006 zassert_equal(net_pkt_available_buffer(pkt_dst), 4, "Wrong space left");
1007 zassert_equal(net_pkt_get_len(pkt_dst), 0, "Length missmatch");
1008
1009 /* Copy to net_pkt which contains fragments with reserved bytes */
1010 net_pkt_cursor_init(pkt_src);
1011 net_pkt_cursor_init(pkt_dst);
1012 res = net_pkt_copy(pkt_dst, pkt_src, 4);
1013 zassert_equal(res, 0, "Pkt copy failed");
1014 zassert_equal(net_pkt_available_buffer(pkt_dst), 0, "Wrong space left");
1015 zassert_equal(net_pkt_get_len(pkt_dst), 4, "Length missmatch");
1016
1017 net_pkt_cursor_init(pkt_dst);
1018 zassert_true(net_pkt_read(pkt_dst, small_buffer, 4) == 0,
1019 "Pkt read failed");
1020 zassert_mem_equal(small_buffer, "0123", 4, "Data mismatch");
1021
1022 net_pkt_unref(pkt_dst);
1023 net_pkt_unref(pkt_src);
1024 }
1025
ZTEST(net_pkt_test_suite,test_net_pkt_get_contiguous_len)1026 ZTEST(net_pkt_test_suite, test_net_pkt_get_contiguous_len)
1027 {
1028 size_t cont_len;
1029 int res;
1030 /* Allocate pkt with 2 fragments */
1031 struct net_pkt *pkt = net_pkt_rx_alloc_with_buffer(
1032 NULL, CONFIG_NET_BUF_DATA_SIZE * 2,
1033 AF_UNSPEC, 0, K_NO_WAIT);
1034
1035 zassert_not_null(pkt, "Pkt not allocated");
1036
1037 net_pkt_cursor_init(pkt);
1038
1039 cont_len = net_pkt_get_contiguous_len(pkt);
1040 zassert_equal(CONFIG_NET_BUF_DATA_SIZE, cont_len,
1041 "Expected one complete available net_buf");
1042
1043 net_pkt_set_overwrite(pkt, false);
1044
1045 /* now write 3 byte into the pkt */
1046 for (int i = 0; i < 3; ++i) {
1047 res = net_pkt_write_u8(pkt, 0xAA);
1048 zassert_equal(0, res, "Write packet failed");
1049 }
1050
1051 cont_len = net_pkt_get_contiguous_len(pkt);
1052 zassert_equal(CONFIG_NET_BUF_DATA_SIZE - 3, cont_len,
1053 "Expected a three byte reduction");
1054
1055 /* Fill the first fragment up until only 3 bytes are free */
1056 for (int i = 0; i < CONFIG_NET_BUF_DATA_SIZE - 6; ++i) {
1057 res = net_pkt_write_u8(pkt, 0xAA);
1058 zassert_equal(0, res, "Write packet failed");
1059 }
1060
1061 cont_len = net_pkt_get_contiguous_len(pkt);
1062 zassert_equal(3, cont_len, "Expected only three bytes are available");
1063
1064 /* Fill the complete first fragment, so the cursor points to the second
1065 * fragment.
1066 */
1067 for (int i = 0; i < 3; ++i) {
1068 res = net_pkt_write_u8(pkt, 0xAA);
1069 zassert_equal(0, res, "Write packet failed");
1070 }
1071
1072 cont_len = net_pkt_get_contiguous_len(pkt);
1073 zassert_equal(CONFIG_NET_BUF_DATA_SIZE, cont_len,
1074 "Expected next full net_buf is available");
1075
1076 /* Fill the last fragment */
1077 for (int i = 0; i < CONFIG_NET_BUF_DATA_SIZE; ++i) {
1078 res = net_pkt_write_u8(pkt, 0xAA);
1079 zassert_equal(0, res, "Write packet failed");
1080 }
1081
1082 cont_len = net_pkt_get_contiguous_len(pkt);
1083 zassert_equal(0, cont_len, "Expected no available space");
1084
1085 net_pkt_unref(pkt);
1086 }
1087
ZTEST(net_pkt_test_suite,test_net_pkt_remove_tail)1088 ZTEST(net_pkt_test_suite, test_net_pkt_remove_tail)
1089 {
1090 struct net_pkt *pkt;
1091 int err;
1092
1093 pkt = net_pkt_alloc_with_buffer(NULL,
1094 CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1095 AF_UNSPEC, 0, K_NO_WAIT);
1096 zassert_true(pkt != NULL, "Pkt not allocated");
1097
1098 net_pkt_cursor_init(pkt);
1099 net_pkt_write(pkt, small_buffer, CONFIG_NET_BUF_DATA_SIZE * 2 + 3);
1100
1101 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1102 "Pkt length is invalid");
1103 zassert_equal(pkt->frags->frags->frags->len, 3,
1104 "3rd buffer length is invalid");
1105
1106 /* Remove some bytes from last buffer */
1107 err = net_pkt_remove_tail(pkt, 2);
1108 zassert_equal(err, 0, "Failed to remove tail");
1109
1110 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 1,
1111 "Pkt length is invalid");
1112 zassert_not_equal(pkt->frags->frags->frags, NULL,
1113 "3rd buffer was removed");
1114 zassert_equal(pkt->frags->frags->frags->len, 1,
1115 "3rd buffer length is invalid");
1116
1117 /* Remove last byte from last buffer */
1118 err = net_pkt_remove_tail(pkt, 1);
1119 zassert_equal(err, 0, "Failed to remove tail");
1120
1121 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2,
1122 "Pkt length is invalid");
1123 zassert_equal(pkt->frags->frags->frags, NULL,
1124 "3rd buffer was not removed");
1125 zassert_equal(pkt->frags->frags->len, CONFIG_NET_BUF_DATA_SIZE,
1126 "2nd buffer length is invalid");
1127
1128 /* Remove 2nd buffer and one byte from 1st buffer */
1129 err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE + 1);
1130 zassert_equal(err, 0, "Failed to remove tail");
1131
1132 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE - 1,
1133 "Pkt length is invalid");
1134 zassert_equal(pkt->frags->frags, NULL,
1135 "2nd buffer was not removed");
1136 zassert_equal(pkt->frags->len, CONFIG_NET_BUF_DATA_SIZE - 1,
1137 "1st buffer length is invalid");
1138
1139 net_pkt_unref(pkt);
1140
1141 pkt = net_pkt_rx_alloc_with_buffer(NULL,
1142 CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1143 AF_UNSPEC, 0, K_NO_WAIT);
1144
1145 net_pkt_cursor_init(pkt);
1146 net_pkt_write(pkt, small_buffer, CONFIG_NET_BUF_DATA_SIZE * 2 + 3);
1147
1148 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1149 "Pkt length is invalid");
1150 zassert_equal(pkt->frags->frags->frags->len, 3,
1151 "3rd buffer length is invalid");
1152
1153 /* Remove bytes spanning 3 buffers */
1154 err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE + 5);
1155 zassert_equal(err, 0, "Failed to remove tail");
1156
1157 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE - 2,
1158 "Pkt length is invalid");
1159 zassert_equal(pkt->frags->frags, NULL,
1160 "2nd buffer was not removed");
1161 zassert_equal(pkt->frags->len, CONFIG_NET_BUF_DATA_SIZE - 2,
1162 "1st buffer length is invalid");
1163
1164 /* Try to remove more bytes than packet has */
1165 err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE);
1166 zassert_equal(err, -EINVAL,
1167 "Removing more bytes than available should fail");
1168
1169 net_pkt_unref(pkt);
1170 }
1171
ZTEST(net_pkt_test_suite,test_net_pkt_shallow_clone_noleak_buf)1172 ZTEST(net_pkt_test_suite, test_net_pkt_shallow_clone_noleak_buf)
1173 {
1174 const int bufs_to_allocate = 3;
1175 const size_t pkt_size = CONFIG_NET_BUF_DATA_SIZE * bufs_to_allocate;
1176 struct net_pkt *pkt, *shallow_pkt;
1177 struct net_buf_pool *tx_data;
1178
1179 pkt = net_pkt_alloc_with_buffer(NULL, pkt_size,
1180 AF_UNSPEC, 0, K_NO_WAIT);
1181
1182 zassert_true(pkt != NULL, "Pkt not allocated");
1183
1184 net_pkt_get_info(NULL, NULL, NULL, &tx_data);
1185 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count - bufs_to_allocate,
1186 "Incorrect net buf allocation");
1187
1188 shallow_pkt = net_pkt_shallow_clone(pkt, K_NO_WAIT);
1189 zassert_true(shallow_pkt != NULL, "Pkt not allocated");
1190 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count - bufs_to_allocate,
1191 "Incorrect available net buf count");
1192
1193 net_pkt_unref(pkt);
1194 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count - bufs_to_allocate,
1195 "Incorrect available net buf count");
1196
1197 net_pkt_unref(shallow_pkt);
1198 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count,
1199 "Leak detected");
1200
1201 }
1202
test_net_pkt_shallow_clone_append_buf(int extra_frag_refcounts)1203 void test_net_pkt_shallow_clone_append_buf(int extra_frag_refcounts)
1204 {
1205 const int bufs_to_allocate = 3;
1206 const int bufs_frag = 2;
1207
1208 zassert_true(bufs_frag + bufs_to_allocate < CONFIG_NET_BUF_DATA_SIZE,
1209 "Total bufs to allocate must less than available space");
1210
1211 const size_t pkt_size = CONFIG_NET_BUF_DATA_SIZE * bufs_to_allocate;
1212
1213 struct net_pkt *pkt, *shallow_pkt;
1214 struct net_buf *frag_head;
1215 struct net_buf *frag;
1216 struct net_buf_pool *tx_data;
1217
1218 pkt = net_pkt_alloc_with_buffer(NULL, pkt_size, AF_UNSPEC, 0, K_NO_WAIT);
1219 zassert_true(pkt != NULL, "Pkt not allocated");
1220
1221 net_pkt_get_info(NULL, NULL, NULL, &tx_data);
1222 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count
1223 - bufs_to_allocate, "Incorrect net buf allocation");
1224
1225 shallow_pkt = net_pkt_shallow_clone(pkt, K_NO_WAIT);
1226 zassert_true(shallow_pkt != NULL, "Pkt not allocated");
1227
1228 /* allocate buffers for the frag */
1229 for (int i = 0; i < bufs_frag; i++) {
1230 frag = net_buf_alloc_len(tx_data, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
1231 zassert_true(frag != NULL, "Frag not allocated");
1232 net_pkt_append_buffer(pkt, frag);
1233 if (i == 0) {
1234 frag_head = frag;
1235 }
1236 }
1237
1238 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count
1239 - bufs_to_allocate - bufs_frag, "Incorrect net buf allocation");
1240
1241 /* Note: if the frag is appended to a net buf, then the nut buf */
1242 /* takes ownership of one ref count. Otherwise net_buf_unref() must */
1243 /* be called on the frag to free the buffers. */
1244
1245 for (int i = 0; i < extra_frag_refcounts; i++) {
1246 frag_head = net_buf_ref(frag_head);
1247 }
1248
1249 net_pkt_unref(pkt);
1250
1251 /* we shouldn't have freed any buffers yet */
1252 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count
1253 - bufs_to_allocate - bufs_frag,
1254 "Incorrect net buf allocation");
1255
1256 net_pkt_unref(shallow_pkt);
1257
1258 if (extra_frag_refcounts == 0) {
1259 /* if no extra ref counts to frag were added then we should free */
1260 /* all the buffers at this point */
1261 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count,
1262 "Leak detected");
1263 } else {
1264 /* otherwise only bufs_frag should be available, and frag could */
1265 /* still used at this point */
1266 zassert_equal(atomic_get(&tx_data->avail_count),
1267 tx_data->buf_count - bufs_frag, "Leak detected");
1268 }
1269
1270 for (int i = 0; i < extra_frag_refcounts; i++) {
1271 net_buf_unref(frag_head);
1272 }
1273
1274 /* all the buffers should be freed now */
1275 zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count,
1276 "Leak detected");
1277 }
1278
ZTEST(net_pkt_test_suite,test_net_pkt_shallow_clone_append_buf_0)1279 ZTEST(net_pkt_test_suite, test_net_pkt_shallow_clone_append_buf_0)
1280 {
1281 test_net_pkt_shallow_clone_append_buf(0);
1282 }
1283
ZTEST(net_pkt_test_suite,test_net_pkt_shallow_clone_append_buf_1)1284 ZTEST(net_pkt_test_suite, test_net_pkt_shallow_clone_append_buf_1)
1285 {
1286 test_net_pkt_shallow_clone_append_buf(1);
1287 }
1288
ZTEST(net_pkt_test_suite,test_net_pkt_shallow_clone_append_buf_2)1289 ZTEST(net_pkt_test_suite, test_net_pkt_shallow_clone_append_buf_2)
1290 {
1291 test_net_pkt_shallow_clone_append_buf(2);
1292 }
1293
1294 ZTEST_SUITE(net_pkt_test_suite, NULL, NULL, NULL, NULL, NULL);
1295