1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/types.h>
8 #include <stddef.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <net/net_pkt.h>
12 #include <net/net_if.h>
13 #include <net/net_ip.h>
14 #include <net/ethernet.h>
15 #include <random/rand32.h>
16
17 #include <ztest.h>
18
19 static uint8_t mac_addr[sizeof(struct net_eth_addr)];
20 static struct net_if *eth_if;
21 static uint8_t small_buffer[512];
22
23 /************************\
24 * FAKE ETHERNET DEVICE *
25 \************************/
26
fake_dev_iface_init(struct net_if * iface)27 static void fake_dev_iface_init(struct net_if *iface)
28 {
29 if (mac_addr[2] == 0U) {
30 /* 00-00-5E-00-53-xx Documentation RFC 7042 */
31 mac_addr[0] = 0x00;
32 mac_addr[1] = 0x00;
33 mac_addr[2] = 0x5E;
34 mac_addr[3] = 0x00;
35 mac_addr[4] = 0x53;
36 mac_addr[5] = sys_rand32_get();
37 }
38
39 net_if_set_link_addr(iface, mac_addr, 6, NET_LINK_ETHERNET);
40
41 eth_if = iface;
42 }
43
fake_dev_send(const struct device * dev,struct net_pkt * pkt)44 static int fake_dev_send(const struct device *dev, struct net_pkt *pkt)
45 {
46 return 0;
47 }
48
fake_dev_init(const struct device * dev)49 int fake_dev_init(const struct device *dev)
50 {
51 ARG_UNUSED(dev);
52
53 return 0;
54 }
55
56 #if defined(CONFIG_NET_L2_ETHERNET)
57 static const struct ethernet_api fake_dev_api = {
58 .iface_api.init = fake_dev_iface_init,
59 .send = fake_dev_send,
60 };
61
62 #define _ETH_L2_LAYER ETHERNET_L2
63 #define _ETH_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(ETHERNET_L2)
64 #define L2_HDR_SIZE sizeof(struct net_eth_hdr)
65 #else
66 static const struct dummy_api fake_dev_api = {
67 .iface_api.init = fake_dev_iface_init,
68 .send = fake_dev_send,
69 };
70
71 #define _ETH_L2_LAYER DUMMY_L2
72 #define _ETH_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(DUMMY_L2)
73 #define L2_HDR_SIZE 0
74 #endif
75
76 NET_DEVICE_INIT(fake_dev, "fake_dev",
77 fake_dev_init, NULL, NULL, NULL,
78 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT,
79 &fake_dev_api, _ETH_L2_LAYER, _ETH_L2_CTX_TYPE,
80 NET_ETH_MTU);
81
82 /*********************\
83 * UTILITY FUNCTIONS *
84 \*********************/
85
pkt_is_of_size(struct net_pkt * pkt,size_t size)86 static bool pkt_is_of_size(struct net_pkt *pkt, size_t size)
87 {
88 return (net_pkt_available_buffer(pkt) == size);
89 }
90
pkt_print_cursor(struct net_pkt * pkt)91 static void pkt_print_cursor(struct net_pkt *pkt)
92 {
93 if (!pkt || !pkt->cursor.buf || !pkt->cursor.pos) {
94 printk("Unknown position\n");
95 } else {
96 printk("Position %zu (%p) in net_buf %p (data %p)\n",
97 pkt->cursor.pos - pkt->cursor.buf->data,
98 pkt->cursor.pos, pkt->cursor.buf,
99 pkt->cursor.buf->data);
100 }
101 }
102
103
104 /*****************************\
105 * HOW TO ALLOCATE - 2 TESTS *
106 \*****************************/
107
test_net_pkt_allocate_wo_buffer(void)108 static void test_net_pkt_allocate_wo_buffer(void)
109 {
110 struct net_pkt *pkt;
111
112 /* How to allocate a packet, with no buffer */
113 pkt = net_pkt_alloc(K_NO_WAIT);
114 zassert_true(pkt != NULL, "Pkt not allocated");
115
116 /* Freeing the packet */
117 net_pkt_unref(pkt);
118 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
119 "Pkt not properly unreferenced");
120
121 /* Note that, if you already know the iface to which the packet
122 * belongs to, you will be able to use net_pkt_alloc_on_iface().
123 */
124 pkt = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
125 zassert_true(pkt != NULL, "Pkt not allocated");
126
127 net_pkt_unref(pkt);
128 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
129 "Pkt not properly unreferenced");
130 }
131
test_net_pkt_allocate_with_buffer(void)132 static void test_net_pkt_allocate_with_buffer(void)
133 {
134 struct net_pkt *pkt;
135
136 /* How to allocate a packet, with buffer
137 * a) - with a size that will fit MTU, let's say 512 bytes
138 * Note: we don't care of the family/protocol for now
139 */
140 pkt = net_pkt_alloc_with_buffer(eth_if, 512,
141 AF_UNSPEC, 0, K_NO_WAIT);
142 zassert_true(pkt != NULL, "Pkt not allocated");
143
144 /* Did we get the requested size? */
145 zassert_true(pkt_is_of_size(pkt, 512), "Pkt size is not right");
146
147 /* Freeing the packet */
148 net_pkt_unref(pkt);
149 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
150 "Pkt not properly unreferenced");
151
152 /*
153 * b) - with a size that will not fit MTU, let's say 1800 bytes
154 * Note: again we don't care of family/protocol for now.
155 */
156 pkt = net_pkt_alloc_with_buffer(eth_if, 1800,
157 AF_UNSPEC, 0, K_NO_WAIT);
158 zassert_true(pkt != NULL, "Pkt not allocated");
159
160 zassert_false(pkt_is_of_size(pkt, 1800), "Pkt size is not right");
161 zassert_true(pkt_is_of_size(pkt, net_if_get_mtu(eth_if) + L2_HDR_SIZE),
162 "Pkt size is not right");
163
164 /* Freeing the packet */
165 net_pkt_unref(pkt);
166 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
167 "Pkt not properly unreferenced");
168
169 /*
170 * c) - Now with 512 bytes but on IPv4/UDP
171 */
172 pkt = net_pkt_alloc_with_buffer(eth_if, 512, AF_INET,
173 IPPROTO_UDP, K_NO_WAIT);
174 zassert_true(pkt != NULL, "Pkt not allocated");
175
176 /* Because 512 + NET_IPV4UDPH_LEN fits MTU, total must be that one */
177 zassert_true(pkt_is_of_size(pkt, 512 + NET_IPV4UDPH_LEN),
178 "Pkt overall size does not match");
179
180 /* Freeing the packet */
181 net_pkt_unref(pkt);
182 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
183 "Pkt not properly unreferenced");
184
185 /*
186 * c) - Now with 1800 bytes but on IPv4/UDP
187 */
188 pkt = net_pkt_alloc_with_buffer(eth_if, 1800, AF_INET,
189 IPPROTO_UDP, K_NO_WAIT);
190 zassert_true(pkt != NULL, "Pkt not allocated");
191
192 /* Because 1800 + NET_IPV4UDPH_LEN won't fit MTU, payload size
193 * should be MTU
194 */
195 zassert_true(net_pkt_available_buffer(pkt) ==
196 net_if_get_mtu(eth_if),
197 "Payload buf size does not match for ipv4/udp");
198
199 /* Freeing the packet */
200 net_pkt_unref(pkt);
201 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
202 "Pkt not properly unreferenced");
203 }
204
205 /********************************\
206 * HOW TO R/W A PACKET - TESTS *
207 \********************************/
208
test_net_pkt_basics_of_rw(void)209 static void test_net_pkt_basics_of_rw(void)
210 {
211 struct net_pkt_cursor backup;
212 struct net_pkt *pkt;
213 uint16_t value16;
214 int ret;
215
216 pkt = net_pkt_alloc_with_buffer(eth_if, 512,
217 AF_UNSPEC, 0, K_NO_WAIT);
218 zassert_true(pkt != NULL, "Pkt not allocated");
219
220 /* Once newly allocated with buffer,
221 * a packet has no data accounted for in its buffer
222 */
223 zassert_true(net_pkt_get_len(pkt) == 0,
224 "Pkt initial length should be 0");
225
226 /* This is done through net_buf which can distinguish
227 * the size of a buffer from the length of the data in it.
228 */
229
230 /* Let's subsequently write 1 byte, then 2 bytes and 4 bytes
231 * We write values made of 0s
232 */
233 ret = net_pkt_write_u8(pkt, 0);
234 zassert_true(ret == 0, "Pkt write failed");
235
236 /* Length should be 1 now */
237 zassert_true(net_pkt_get_len(pkt) == 1, "Pkt length mismatch");
238
239 ret = net_pkt_write_be16(pkt, 0);
240 zassert_true(ret == 0, "Pkt write failed");
241
242 /* Length should be 3 now */
243 zassert_true(net_pkt_get_len(pkt) == 3, "Pkt length mismatch");
244
245 /* Verify that the data is properly written to net_buf */
246 net_pkt_cursor_backup(pkt, &backup);
247 net_pkt_cursor_init(pkt);
248 net_pkt_set_overwrite(pkt, true);
249 net_pkt_skip(pkt, 1);
250 net_pkt_read_be16(pkt, &value16);
251 zassert_equal(value16, 0, "Invalid value %d read, expected %d",
252 value16, 0);
253
254 /* Then write new value, overwriting the old one */
255 net_pkt_cursor_init(pkt);
256 net_pkt_skip(pkt, 1);
257 ret = net_pkt_write_be16(pkt, 42);
258 zassert_true(ret == 0, "Pkt write failed");
259
260 /* And re-read the value again */
261 net_pkt_cursor_init(pkt);
262 net_pkt_skip(pkt, 1);
263 ret = net_pkt_read_be16(pkt, &value16);
264 zassert_true(ret == 0, "Pkt read failed");
265 zassert_equal(value16, 42, "Invalid value %d read, expected %d",
266 value16, 42);
267
268 net_pkt_set_overwrite(pkt, false);
269 net_pkt_cursor_restore(pkt, &backup);
270
271 ret = net_pkt_write_be32(pkt, 0);
272 zassert_true(ret == 0, "Pkt write failed");
273
274 /* Length should be 7 now */
275 zassert_true(net_pkt_get_len(pkt) == 7, "Pkt length mismatch");
276
277 /* All these writing functions use net_ptk_write(), which works
278 * this way:
279 */
280 ret = net_pkt_write(pkt, small_buffer, 9);
281 zassert_true(ret == 0, "Pkt write failed");
282
283 /* Length should be 16 now */
284 zassert_true(net_pkt_get_len(pkt) == 16, "Pkt length mismatch");
285
286 /* Now let's say you want to memset some data */
287 ret = net_pkt_memset(pkt, 0, 4);
288 zassert_true(ret == 0, "Pkt memset failed");
289
290 /* Length should be 20 now */
291 zassert_true(net_pkt_get_len(pkt) == 20, "Pkt length mismatch");
292
293 /* So memset affects the length exactly as write does */
294
295 /* Sometimes you might want to advance in the buffer without caring
296 * what's written there since you'll eventually come back for that.
297 * net_pkt_skip() is used for it.
298 * Note: usually you will not have to use that function a lot yourself.
299 */
300 ret = net_pkt_skip(pkt, 20);
301 zassert_true(ret == 0, "Pkt skip failed");
302
303 /* Length should be 40 now */
304 zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
305
306 /* Again, skip affected the length also, like a write
307 * But wait a minute: how to get back then, in order to write at
308 * the position we just skipped?
309 *
310 * So let's introduce the concept of buffer cursor. (which could
311 * be named 'cursor' if such name has more relevancy. Basically, each
312 * net_pkt embeds such 'cursor': it's like a head of a tape
313 * recorder/reader, it holds the current position in the buffer where
314 * you can r/w. All operations use and update it below.
315 * There is, however, a catch: buffer is described through net_buf
316 * and these are like a simple linked-list.
317 * Which means that unlike a tape recorder/reader: you are not
318 * able to go backward. Only back from starting point and forward.
319 * Thus why there is a net_pkt_cursor_init(pkt) which will let you going
320 * back from the start. We could hold more info in order to avoid that,
321 * but that would mean growing each an every net_buf.
322 */
323 net_pkt_cursor_init(pkt);
324
325 /* But isn't it so that if I want to go at the previous position I
326 * skipped, I'll use skip again but then won't it affect again the
327 * length?
328 * Answer is yes. Hopefully there is a mean to avoid that. Basically
329 * for data that already "exists" in the buffer (aka: data accounted
330 * for in the buffer, through the length) you'll need to set the packet
331 * to overwrite: all subsequent operations will then work on existing
332 * data and will not affect the length (it won't add more data)
333 */
334 net_pkt_set_overwrite(pkt, true);
335
336 zassert_true(net_pkt_is_being_overwritten(pkt),
337 "Pkt is not set to overwrite");
338
339 /* Ok so previous skipped position was at offset 20 */
340 ret = net_pkt_skip(pkt, 20);
341 zassert_true(ret == 0, "Pkt skip failed");
342
343 /* Length should _still_ be 40 */
344 zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
345
346 /* And you can write stuff */
347 ret = net_pkt_write_le32(pkt, 0);
348 zassert_true(ret == 0, "Pkt write failed");
349
350 /* Again, length should _still_ be 40 */
351 zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
352
353 /* Let's memset the rest */
354 ret = net_pkt_memset(pkt, 0, 16);
355 zassert_true(ret == 0, "Pkt memset failed");
356
357 /* Again, length should _still_ be 40 */
358 zassert_true(net_pkt_get_len(pkt) == 40, "Pkt length mismatch");
359
360 /* We are now back at the end of the existing data in the buffer
361 * Since overwrite is still on, we should not be able to r/w
362 * anything.
363 * This is completely nominal, as being set, overwrite allows r/w only
364 * on existing data in the buffer:
365 */
366 ret = net_pkt_write_be32(pkt, 0);
367 zassert_true(ret != 0, "Pkt write succeeded where it shouldn't have");
368
369 /* Logically, in order to be able to add new data in the buffer,
370 * overwrite should be disabled:
371 */
372 net_pkt_set_overwrite(pkt, false);
373
374 /* But it will fail: */
375 ret = net_pkt_write_le32(pkt, 0);
376 zassert_true(ret != 0, "Pkt write succeeded?");
377
378 /* Why is that?
379 * This is because in case of r/w error: the iterator is invalidated.
380 * This a design choice, once you get a r/w error it means your code
381 * messed up requesting smaller buffer than you actually needed, or
382 * writing too much data than it should have been etc...).
383 * So you must drop your packet entirely.
384 */
385
386 /* Freeing the packet */
387 net_pkt_unref(pkt);
388 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
389 "Pkt not properly unreferenced");
390 }
391
test_net_pkt_advanced_basics(void)392 void test_net_pkt_advanced_basics(void)
393 {
394 struct net_pkt_cursor backup;
395 struct net_pkt *pkt;
396 int ret;
397
398 pkt = net_pkt_alloc_with_buffer(eth_if, 512,
399 AF_INET, IPPROTO_UDP, K_NO_WAIT);
400 zassert_true(pkt != NULL, "Pkt not allocated");
401
402 pkt_print_cursor(pkt);
403
404 /* As stated earlier, initializing the cursor, is the way to go
405 * back from the start in the buffer (either header or payload then).
406 * We also showed that using net_pkt_skip() could be used to move
407 * forward in the buffer.
408 * But what if you are far in the buffer, you need to go backward,
409 * and back again to your previous position?
410 * You could certainly do:
411 */
412 ret = net_pkt_write(pkt, small_buffer, 20);
413 zassert_true(ret == 0, "Pkt write failed");
414
415 pkt_print_cursor(pkt);
416
417 net_pkt_cursor_init(pkt);
418
419 pkt_print_cursor(pkt);
420
421 /* ... do something here ... */
422
423 /* And finally go back with overwrite/skip: */
424 net_pkt_set_overwrite(pkt, true);
425 ret = net_pkt_skip(pkt, 20);
426 zassert_true(ret == 0, "Pkt skip failed");
427 net_pkt_set_overwrite(pkt, false);
428
429 pkt_print_cursor(pkt);
430
431 /* In this example, do not focus on the 20 bytes. It is just for
432 * the sake of the example.
433 * The other method is backup/restore the packet cursor.
434 */
435 net_pkt_cursor_backup(pkt, &backup);
436
437 net_pkt_cursor_init(pkt);
438
439 /* ... do something here ... */
440
441 /* and restore: */
442 net_pkt_cursor_restore(pkt, &backup);
443
444 pkt_print_cursor(pkt);
445
446 /* Another feature, is how you access your data. Earlier was
447 * presented basic r/w functions. But sometime you might want to
448 * access your data directly through a structure/type etc...
449 * Due to the "fragmented" possible nature of your buffer, you
450 * need to know if the data you are trying to access is in
451 * contiguous area.
452 * For this, you'll use:
453 */
454 ret = (int) net_pkt_is_contiguous(pkt, 4);
455 zassert_true(ret == 1, "Pkt contiguity check failed");
456
457 /* If that's successful you should be able to get the actual
458 * position in the buffer and cast it to the type you want.
459 */
460 {
461 uint32_t *val = (uint32_t *)net_pkt_cursor_get_pos(pkt);
462
463 *val = 0U;
464 /* etc... */
465 }
466
467 /* However, to advance your cursor, since none of the usual r/w
468 * functions got used: net_pkt_skip() should be called relevantly:
469 */
470 net_pkt_skip(pkt, 4);
471
472 /* Freeing the packet */
473 net_pkt_unref(pkt);
474 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
475 "Pkt not properly unreferenced");
476
477 /* Obviously one will very rarely use these 2 last low level functions
478 * - net_pkt_is_contiguous()
479 * - net_pkt_cursor_update()
480 *
481 * Let's see why next.
482 */
483 }
484
test_net_pkt_easier_rw_usage(void)485 void test_net_pkt_easier_rw_usage(void)
486 {
487 struct net_pkt *pkt;
488 int ret;
489
490 pkt = net_pkt_alloc_with_buffer(eth_if, 512,
491 AF_INET, IPPROTO_UDP, K_NO_WAIT);
492 zassert_true(pkt != NULL, "Pkt not allocated");
493
494 /* In net core, all goes down in fine to header manipulation.
495 * Either it's an IP header, UDP, ICMP, TCP one etc...
496 * One would then prefer to access those directly via there
497 * descriptors (struct net_udp_hdr, struct net_icmp_hdr, ...)
498 * rather than building it byte by bytes etc...
499 *
500 * As seen earlier, it is possible to cast on current position.
501 * However, due to the "fragmented" possible nature of the buffer,
502 * it should also be possible to handle the case the data being
503 * accessed is scattered on 1+ net_buf.
504 *
505 * To avoid redoing the contiguity check, cast or copy on failure,
506 * a complex type named struct net_pkt_header_access exists.
507 * It solves both cases (accessing data contiguous or not), without
508 * the need for runtime allocation (all is on stack)
509 */
510 {
511 NET_PKT_DATA_ACCESS_DEFINE(ip_access, struct net_ipv4_hdr);
512 struct net_ipv4_hdr *ip_hdr;
513
514 ip_hdr = (struct net_ipv4_hdr *)
515 net_pkt_get_data(pkt, &ip_access);
516 zassert_not_null(ip_hdr, "Accessor failed");
517
518 ip_hdr->tos = 0x00;
519
520 ret = net_pkt_set_data(pkt, &ip_access);
521 zassert_true(ret == 0, "Accessor failed");
522
523 zassert_true(net_pkt_get_len(pkt) == NET_IPV4H_LEN,
524 "Pkt length mismatch");
525 }
526
527 /* As you can notice: get/set take also care of handling the cursor
528 * and updating the packet length relevantly thus why packet length
529 * has properly grown.
530 */
531
532 /* Freeing the packet */
533 net_pkt_unref(pkt);
534 zassert_true(atomic_get(&pkt->atomic_ref) == 0,
535 "Pkt not properly unreferenced");
536 }
537
538 uint8_t b5_data[10] = "qrstuvwxyz";
539 struct net_buf b5 = {
540 .ref = 1,
541 .data = b5_data,
542 .len = 0,
543 .size = 0,
544 .__buf = b5_data,
545 };
546
547 uint8_t b4_data[4] = "mnop";
548 struct net_buf b4 = {
549 .frags = &b5,
550 .ref = 1,
551 .data = b4_data,
552 .len = sizeof(b4_data) - 2,
553 .size = sizeof(b4_data),
554 .__buf = b4_data,
555 };
556
557 struct net_buf b3 = {
558 .frags = &b4,
559 .ref = 1,
560 .data = NULL,
561 .__buf = NULL,
562 };
563
564 uint8_t b2_data[8] = "efghijkl";
565 struct net_buf b2 = {
566 .frags = &b3,
567 .ref = 1,
568 .data = b2_data,
569 .len = 0,
570 .size = sizeof(b2_data),
571 .__buf = b2_data,
572 };
573
574 uint8_t b1_data[4] = "abcd";
575 struct net_buf b1 = {
576 .frags = &b2,
577 .ref = 1,
578 .data = b1_data,
579 .len = sizeof(b1_data) - 2,
580 .size = sizeof(b1_data),
581 .__buf = b1_data,
582 };
583
test_net_pkt_copy(void)584 void test_net_pkt_copy(void)
585 {
586 struct net_pkt *pkt_src;
587 struct net_pkt *pkt_dst;
588
589 pkt_src = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
590 zassert_true(pkt_src != NULL, "Pkt not allocated");
591
592 pkt_print_cursor(pkt_src);
593
594 /* Let's append the buffers */
595 net_pkt_append_buffer(pkt_src, &b1);
596
597 net_pkt_set_overwrite(pkt_src, true);
598
599 /* There should be some space left */
600 zassert_true(net_pkt_available_buffer(pkt_src) != 0, "No space left?");
601 /* Length should be 4 */
602 zassert_true(net_pkt_get_len(pkt_src) == 4, "Wrong length");
603
604 /* Actual space left is 12 (in b1, b2 and b4) */
605 zassert_true(net_pkt_available_buffer(pkt_src) == 12,
606 "Wrong space left?");
607
608 pkt_print_cursor(pkt_src);
609
610 /* Now let's clone the pkt
611 * This will test net_pkt_copy_new() as it uses it for the buffers
612 */
613 pkt_dst = net_pkt_clone(pkt_src, K_NO_WAIT);
614 zassert_true(pkt_dst != NULL, "Pkt not clone");
615
616 /* Cloning does not take into account left space,
617 * but only occupied one
618 */
619 zassert_true(net_pkt_available_buffer(pkt_dst) == 0, "Space left");
620 zassert_true(net_pkt_get_len(pkt_src) == net_pkt_get_len(pkt_dst),
621 "Not same amount?");
622
623 /* It also did not care to copy the net_buf itself, only the content
624 * so, knowing that the base buffer size is bigger than necessary,
625 * pkt_dst has only one net_buf
626 */
627 zassert_true(pkt_dst->buffer->frags == NULL, "Not only one buffer?");
628
629 /* Freeing the packet */
630 pkt_src->buffer = NULL;
631 net_pkt_unref(pkt_src);
632 zassert_true(atomic_get(&pkt_src->atomic_ref) == 0,
633 "Pkt not properly unreferenced");
634 net_pkt_unref(pkt_dst);
635 zassert_true(atomic_get(&pkt_dst->atomic_ref) == 0,
636 "Pkt not properly unreferenced");
637 }
638
639 #define PULL_TEST_PKT_DATA_SIZE 600
640
test_net_pkt_pull(void)641 void test_net_pkt_pull(void)
642 {
643 const int PULL_AMOUNT = 8;
644 const int LARGE_PULL_AMOUNT = 200;
645 struct net_pkt *dummy_pkt;
646 static uint8_t pkt_data[PULL_TEST_PKT_DATA_SIZE];
647 static uint8_t pkt_data_readback[PULL_TEST_PKT_DATA_SIZE];
648 size_t len;
649 int i, ret;
650
651 for (i = 0; i < PULL_TEST_PKT_DATA_SIZE; ++i) {
652 pkt_data[i] = i & 0xff;
653 }
654
655 dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
656 PULL_TEST_PKT_DATA_SIZE,
657 AF_UNSPEC,
658 0,
659 K_NO_WAIT);
660 zassert_true(dummy_pkt != NULL, "Pkt not allocated");
661
662 zassert_true(net_pkt_write(dummy_pkt,
663 pkt_data,
664 PULL_TEST_PKT_DATA_SIZE) == 0,
665 "Write packet failed");
666
667 net_pkt_cursor_init(dummy_pkt);
668 net_pkt_pull(dummy_pkt, PULL_AMOUNT);
669 zassert_equal(net_pkt_get_len(dummy_pkt),
670 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT,
671 "Pull failed to set new size");
672 zassert_true(net_pkt_read(dummy_pkt,
673 pkt_data_readback,
674 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT) == 0,
675 "Read packet failed");
676 zassert_mem_equal(pkt_data_readback,
677 &pkt_data[PULL_AMOUNT],
678 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT,
679 "Packet data changed");
680
681 net_pkt_cursor_init(dummy_pkt);
682 net_pkt_pull(dummy_pkt, LARGE_PULL_AMOUNT);
683 zassert_equal(net_pkt_get_len(dummy_pkt),
684 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT -
685 LARGE_PULL_AMOUNT,
686 "Large pull failed to set new size (%d vs %d)",
687 net_pkt_get_len(dummy_pkt),
688 PULL_TEST_PKT_DATA_SIZE - PULL_AMOUNT -
689 LARGE_PULL_AMOUNT);
690
691 net_pkt_cursor_init(dummy_pkt);
692 net_pkt_pull(dummy_pkt, net_pkt_get_len(dummy_pkt));
693 zassert_equal(net_pkt_get_len(dummy_pkt), 0,
694 "Full pull failed to set new size (%d)",
695 net_pkt_get_len(dummy_pkt));
696
697 net_pkt_cursor_init(dummy_pkt);
698 ret = net_pkt_pull(dummy_pkt, 1);
699 zassert_equal(ret, -ENOBUFS, "Did not return error");
700 zassert_equal(net_pkt_get_len(dummy_pkt), 0,
701 "Empty pull set new size (%d)",
702 net_pkt_get_len(dummy_pkt));
703
704 net_pkt_unref(dummy_pkt);
705
706 dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
707 PULL_TEST_PKT_DATA_SIZE,
708 AF_UNSPEC,
709 0,
710 K_NO_WAIT);
711 zassert_true(dummy_pkt != NULL, "Pkt not allocated");
712
713 zassert_true(net_pkt_write(dummy_pkt,
714 pkt_data,
715 PULL_TEST_PKT_DATA_SIZE) == 0,
716 "Write packet failed");
717
718 net_pkt_cursor_init(dummy_pkt);
719 ret = net_pkt_pull(dummy_pkt, net_pkt_get_len(dummy_pkt) + 1);
720 zassert_equal(ret, -ENOBUFS, "Did not return error");
721 zassert_equal(net_pkt_get_len(dummy_pkt), 0,
722 "Not empty after full pull (%d)",
723 net_pkt_get_len(dummy_pkt));
724
725 net_pkt_unref(dummy_pkt);
726
727 dummy_pkt = net_pkt_alloc_with_buffer(eth_if,
728 PULL_TEST_PKT_DATA_SIZE,
729 AF_UNSPEC,
730 0,
731 K_NO_WAIT);
732 zassert_true(dummy_pkt != NULL, "Pkt not allocated");
733
734 zassert_true(net_pkt_write(dummy_pkt,
735 pkt_data,
736 PULL_TEST_PKT_DATA_SIZE) == 0,
737 "Write packet failed");
738
739 net_pkt_cursor_init(dummy_pkt);
740 len = net_pkt_get_len(dummy_pkt);
741
742 for (i = 0; i < len; i++) {
743 ret = net_pkt_pull(dummy_pkt, 1);
744 zassert_equal(ret, 0, "Did return error");
745 }
746
747 ret = net_pkt_pull(dummy_pkt, 1);
748 zassert_equal(ret, -ENOBUFS, "Did not return error");
749
750 zassert_equal(dummy_pkt->buffer, NULL, "buffer list not empty");
751
752 net_pkt_unref(dummy_pkt);
753 }
754
test_net_pkt_clone(void)755 void test_net_pkt_clone(void)
756 {
757 uint8_t buf[26] = {"abcdefghijklmnopqrstuvwxyz"};
758 struct net_pkt *pkt;
759 struct net_pkt *cloned_pkt;
760 int ret;
761
762 pkt = net_pkt_alloc_with_buffer(eth_if, 64,
763 AF_UNSPEC, 0, K_NO_WAIT);
764 zassert_true(pkt != NULL, "Pkt not allocated");
765
766 ret = net_pkt_write(pkt, buf, sizeof(buf));
767 zassert_true(ret == 0, "Pkt write failed");
768
769 zassert_true(net_pkt_get_len(pkt) == sizeof(buf),
770 "Pkt length mismatch");
771
772 net_pkt_cursor_init(pkt);
773 net_pkt_set_overwrite(pkt, true);
774 net_pkt_skip(pkt, 6);
775 zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(pkt),
776 "Pkt remaining data mismatch");
777
778 cloned_pkt = net_pkt_clone(pkt, K_NO_WAIT);
779 zassert_true(cloned_pkt != NULL, "Pkt not cloned");
780
781 zassert_true(net_pkt_get_len(cloned_pkt) == sizeof(buf),
782 "Cloned pkt length mismatch");
783
784 zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(pkt),
785 "Pkt remaining data mismatch");
786
787 zassert_true(sizeof(buf) - 6 == net_pkt_remaining_data(cloned_pkt),
788 "Cloned pkt remaining data mismatch");
789
790 net_pkt_unref(pkt);
791 net_pkt_unref(cloned_pkt);
792 }
793
794 NET_BUF_POOL_FIXED_DEFINE(test_net_pkt_headroom_pool, 4, 2, NULL);
test_net_pkt_headroom(void)795 void test_net_pkt_headroom(void)
796 {
797 struct net_pkt *pkt;
798 struct net_buf *frag1;
799 struct net_buf *frag2;
800 struct net_buf *frag3;
801 struct net_buf *frag4;
802
803 /*
804 * Create a net_pkt; append net_bufs with reserved bytes (headroom).
805 *
806 * Layout to be crafted before writing to the net_buf: "HA|HH|HA|AA"
807 * H: Headroom
808 * |: net_buf/fragment delimiter
809 * A: available byte
810 */
811 pkt = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
812 zassert_true(pkt != NULL, "Pkt not allocated");
813
814 /* 1st fragment has 1 byte headroom and one byte available: "HA" */
815 frag1 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
816 net_buf_reserve(frag1, 1);
817 net_pkt_append_buffer(pkt, frag1);
818 zassert_equal(net_pkt_available_buffer(pkt), 1, "Wrong space left");
819 zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
820
821 /* 2nd fragment affecting neither size nor length: "HH" */
822 frag2 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
823 net_buf_reserve(frag2, 2);
824 net_pkt_append_buffer(pkt, frag2);
825 zassert_equal(net_pkt_available_buffer(pkt), 1, "Wrong space left");
826 zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
827
828 /* 3rd fragment has 1 byte headroom and one byte available: "HA" */
829 frag3 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
830 net_buf_reserve(frag3, 1);
831 net_pkt_append_buffer(pkt, frag3);
832 zassert_equal(net_pkt_available_buffer(pkt), 2, "Wrong space left");
833 zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
834
835 /* 4th fragment has no headroom and two available bytes: "AA" */
836 frag4 = net_buf_alloc_len(&test_net_pkt_headroom_pool, 2, K_NO_WAIT);
837 net_pkt_append_buffer(pkt, frag4);
838 zassert_equal(net_pkt_available_buffer(pkt), 4, "Wrong space left");
839 zassert_equal(net_pkt_get_len(pkt), 0, "Length mismatch");
840
841 /* Writing net_pkt via cursor, spanning all 4 fragments */
842 net_pkt_cursor_init(pkt);
843 zassert_true(net_pkt_write(pkt, "1234", 4) == 0, "Pkt write failed");
844
845 /* Expected layout across all four fragments: "H1|HH|H2|34" */
846 zassert_equal(frag1->size, 2, "Size mismatch");
847 zassert_equal(frag1->len, 1, "Length mismatch");
848 zassert_equal(frag2->size, 2, "Size mismatch");
849 zassert_equal(frag2->len, 0, "Length mismatch");
850 zassert_equal(frag3->size, 2, "Size mismatch");
851 zassert_equal(frag3->len, 1, "Length mismatch");
852 zassert_equal(frag4->size, 2, "Size mismatch");
853 zassert_equal(frag4->len, 2, "Length mismatch");
854 net_pkt_cursor_init(pkt);
855 zassert_true(net_pkt_read(pkt, small_buffer, 4) == 0, "Read failed");
856 zassert_mem_equal(small_buffer, "1234", 4, "Data mismatch");
857
858 /* Making use of the headrooms */
859 net_buf_push_u8(frag3, 'D');
860 net_buf_push_u8(frag2, 'C');
861 net_buf_push_u8(frag2, 'B');
862 net_buf_push_u8(frag1, 'A');
863 net_pkt_cursor_init(pkt);
864 zassert_true(net_pkt_read(pkt, small_buffer, 8) == 0, "Read failed");
865 zassert_mem_equal(small_buffer, "A1BCD234", 8, "Data mismatch");
866
867 net_pkt_unref(pkt);
868 }
869
870 NET_BUF_POOL_FIXED_DEFINE(test_net_pkt_headroom_copy_pool, 2, 4, NULL);
test_net_pkt_headroom_copy(void)871 void test_net_pkt_headroom_copy(void)
872 {
873 struct net_pkt *pkt_src;
874 struct net_pkt *pkt_dst;
875 struct net_buf *frag1_dst;
876 struct net_buf *frag2_dst;
877
878 /* Create et_pkt containing the bytes "0123" */
879 pkt_src = net_pkt_alloc_with_buffer(eth_if, 4,
880 AF_UNSPEC, 0, K_NO_WAIT);
881 zassert_true(pkt_src != NULL, "Pkt not allocated");
882 net_pkt_write(pkt_src, "0123", 4);
883
884 /* Create net_pkt consisting of net_buf fragments with reserved bytes */
885 pkt_dst = net_pkt_alloc_on_iface(eth_if, K_NO_WAIT);
886 zassert_true(pkt_src != NULL, "Pkt not allocated");
887
888 frag1_dst = net_buf_alloc_len(&test_net_pkt_headroom_copy_pool, 2,
889 K_NO_WAIT);
890 net_buf_reserve(frag1_dst, 1);
891 net_pkt_append_buffer(pkt_dst, frag1_dst);
892 frag2_dst = net_buf_alloc_len(&test_net_pkt_headroom_copy_pool, 4,
893 K_NO_WAIT);
894 net_buf_reserve(frag2_dst, 1);
895 net_pkt_append_buffer(pkt_dst, frag2_dst);
896 zassert_equal(net_pkt_available_buffer(pkt_dst), 4, "Wrong space left");
897 zassert_equal(net_pkt_get_len(pkt_dst), 0, "Length missmatch");
898
899 /* Copy to net_pkt which contains fragments with reserved bytes */
900 net_pkt_cursor_init(pkt_src);
901 net_pkt_cursor_init(pkt_dst);
902 net_pkt_copy(pkt_dst, pkt_src, 4);
903 zassert_equal(net_pkt_available_buffer(pkt_dst), 0, "Wrong space left");
904 zassert_equal(net_pkt_get_len(pkt_dst), 4, "Length missmatch");
905
906 net_pkt_cursor_init(pkt_dst);
907 zassert_true(net_pkt_read(pkt_dst, small_buffer, 4) == 0,
908 "Pkt read failed");
909 zassert_mem_equal(small_buffer, "0123", 4, "Data mismatch");
910
911 net_pkt_unref(pkt_dst);
912 net_pkt_unref(pkt_src);
913 }
914
test_net_pkt_get_contiguous_len(void)915 static void test_net_pkt_get_contiguous_len(void)
916 {
917 size_t cont_len;
918 int res;
919 /* Allocate pkt with 2 fragments */
920 struct net_pkt *pkt = net_pkt_rx_alloc_with_buffer(
921 NULL, CONFIG_NET_BUF_DATA_SIZE * 2,
922 AF_UNSPEC, 0, K_NO_WAIT);
923
924 zassert_not_null(pkt, "Pkt not allocated");
925
926 net_pkt_cursor_init(pkt);
927
928 cont_len = net_pkt_get_contiguous_len(pkt);
929 zassert_equal(CONFIG_NET_BUF_DATA_SIZE, cont_len,
930 "Expected one complete available net_buf");
931
932 net_pkt_set_overwrite(pkt, false);
933
934 /* now write 3 byte into the pkt */
935 for (int i = 0; i < 3; ++i) {
936 res = net_pkt_write_u8(pkt, 0xAA);
937 zassert_equal(0, res, "Write packet failed");
938 }
939
940 cont_len = net_pkt_get_contiguous_len(pkt);
941 zassert_equal(CONFIG_NET_BUF_DATA_SIZE - 3, cont_len,
942 "Expected a three byte reduction");
943
944 /* Fill the first fragment up until only 3 bytes are free */
945 for (int i = 0; i < CONFIG_NET_BUF_DATA_SIZE - 6; ++i) {
946 res = net_pkt_write_u8(pkt, 0xAA);
947 zassert_equal(0, res, "Write packet failed");
948 }
949
950 cont_len = net_pkt_get_contiguous_len(pkt);
951 zassert_equal(3, cont_len, "Expected only three bytes are available");
952
953 /* Fill the complete first fragment, so the cursor points to the second
954 * fragment.
955 */
956 for (int i = 0; i < 3; ++i) {
957 res = net_pkt_write_u8(pkt, 0xAA);
958 zassert_equal(0, res, "Write packet failed");
959 }
960
961 cont_len = net_pkt_get_contiguous_len(pkt);
962 zassert_equal(CONFIG_NET_BUF_DATA_SIZE, cont_len,
963 "Expected next full net_buf is available");
964
965 /* Fill the last fragment */
966 for (int i = 0; i < CONFIG_NET_BUF_DATA_SIZE; ++i) {
967 res = net_pkt_write_u8(pkt, 0xAA);
968 zassert_equal(0, res, "Write packet failed");
969 }
970
971 cont_len = net_pkt_get_contiguous_len(pkt);
972 zassert_equal(0, cont_len, "Expected no available space");
973
974 net_pkt_unref(pkt);
975 }
976
test_net_pkt_remove_tail(void)977 void test_net_pkt_remove_tail(void)
978 {
979 struct net_pkt *pkt;
980 int err;
981
982 pkt = net_pkt_alloc_with_buffer(NULL,
983 CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
984 AF_UNSPEC, 0, K_NO_WAIT);
985 zassert_true(pkt != NULL, "Pkt not allocated");
986
987 net_pkt_cursor_init(pkt);
988 net_pkt_write(pkt, small_buffer, CONFIG_NET_BUF_DATA_SIZE * 2 + 3);
989
990 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
991 "Pkt length is invalid");
992 zassert_equal(pkt->frags->frags->frags->len, 3,
993 "3rd buffer length is invalid");
994
995 /* Remove some bytes from last buffer */
996 err = net_pkt_remove_tail(pkt, 2);
997 zassert_equal(err, 0, "Failed to remove tail");
998
999 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 1,
1000 "Pkt length is invalid");
1001 zassert_not_equal(pkt->frags->frags->frags, NULL,
1002 "3rd buffer was removed");
1003 zassert_equal(pkt->frags->frags->frags->len, 1,
1004 "3rd buffer length is invalid");
1005
1006 /* Remove last byte from last buffer */
1007 err = net_pkt_remove_tail(pkt, 1);
1008 zassert_equal(err, 0, "Failed to remove tail");
1009
1010 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2,
1011 "Pkt length is invalid");
1012 zassert_equal(pkt->frags->frags->frags, NULL,
1013 "3rd buffer was not removed");
1014 zassert_equal(pkt->frags->frags->len, CONFIG_NET_BUF_DATA_SIZE,
1015 "2nd buffer length is invalid");
1016
1017 /* Remove 2nd buffer and one byte from 1st buffer */
1018 err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE + 1);
1019 zassert_equal(err, 0, "Failed to remove tail");
1020
1021 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE - 1,
1022 "Pkt length is invalid");
1023 zassert_equal(pkt->frags->frags, NULL,
1024 "2nd buffer was not removed");
1025 zassert_equal(pkt->frags->len, CONFIG_NET_BUF_DATA_SIZE - 1,
1026 "1st buffer length is invalid");
1027
1028 net_pkt_unref(pkt);
1029
1030 pkt = net_pkt_rx_alloc_with_buffer(NULL,
1031 CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1032 AF_UNSPEC, 0, K_NO_WAIT);
1033
1034 net_pkt_cursor_init(pkt);
1035 net_pkt_write(pkt, small_buffer, CONFIG_NET_BUF_DATA_SIZE * 2 + 3);
1036
1037 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE * 2 + 3,
1038 "Pkt length is invalid");
1039 zassert_equal(pkt->frags->frags->frags->len, 3,
1040 "3rd buffer length is invalid");
1041
1042 /* Remove bytes spanning 3 buffers */
1043 err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE + 5);
1044 zassert_equal(err, 0, "Failed to remove tail");
1045
1046 zassert_equal(net_pkt_get_len(pkt), CONFIG_NET_BUF_DATA_SIZE - 2,
1047 "Pkt length is invalid");
1048 zassert_equal(pkt->frags->frags, NULL,
1049 "2nd buffer was not removed");
1050 zassert_equal(pkt->frags->len, CONFIG_NET_BUF_DATA_SIZE - 2,
1051 "1st buffer length is invalid");
1052
1053 /* Try to remove more bytes than packet has */
1054 err = net_pkt_remove_tail(pkt, CONFIG_NET_BUF_DATA_SIZE);
1055 zassert_equal(err, -EINVAL,
1056 "Removing more bytes than available should fail");
1057
1058 net_pkt_unref(pkt);
1059 }
1060
test_main(void)1061 void test_main(void)
1062 {
1063 eth_if = net_if_get_default();
1064
1065 ztest_test_suite(net_pkt_tests,
1066 ztest_unit_test(test_net_pkt_allocate_wo_buffer),
1067 ztest_unit_test(test_net_pkt_allocate_with_buffer),
1068 ztest_unit_test(test_net_pkt_basics_of_rw),
1069 ztest_unit_test(test_net_pkt_advanced_basics),
1070 ztest_unit_test(test_net_pkt_easier_rw_usage),
1071 ztest_unit_test(test_net_pkt_copy),
1072 ztest_unit_test(test_net_pkt_pull),
1073 ztest_unit_test(test_net_pkt_clone),
1074 ztest_unit_test(test_net_pkt_headroom),
1075 ztest_unit_test(test_net_pkt_headroom_copy),
1076 ztest_unit_test(test_net_pkt_get_contiguous_len),
1077 ztest_unit_test(test_net_pkt_remove_tail)
1078 );
1079
1080 ztest_run_test_suite(net_pkt_tests);
1081 }
1082