1 /*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_capture, CONFIG_NET_CAPTURE_LOG_LEVEL);
9
10 #include <zephyr/kernel.h>
11 #include <stdlib.h>
12 #include <zephyr/sys/slist.h>
13 #include <zephyr/net/net_core.h>
14 #include <zephyr/net/net_ip.h>
15 #include <zephyr/net/net_if.h>
16 #include <zephyr/net/net_pkt.h>
17 #include <zephyr/net/virtual.h>
18 #include <zephyr/net/virtual_mgmt.h>
19 #include <zephyr/net/capture.h>
20 #include <zephyr/net/ethernet.h>
21
22 #include "net_private.h"
23 #include "ipv4.h"
24 #include "ipv6.h"
25 #include "udp_internal.h"
26 #include "net_stats.h"
27
28 #define PKT_ALLOC_TIME K_MSEC(50)
29 #define DEFAULT_PORT 4242
30
31 #if defined(CONFIG_NET_CAPTURE_TX_DEBUG)
32 #define DEBUG_TX 1
33 #else
34 #define DEBUG_TX 0
35 #endif
36
37 static K_MUTEX_DEFINE(lock);
38
39 NET_PKT_SLAB_DEFINE(capture_pkts, CONFIG_NET_CAPTURE_PKT_COUNT);
40
41 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
42 NET_BUF_POOL_FIXED_DEFINE(capture_bufs, CONFIG_NET_CAPTURE_BUF_COUNT,
43 CONFIG_NET_BUF_DATA_SIZE, 4, NULL);
44 #else
45 #define DATA_POOL_SIZE MAX(CONFIG_NET_PKT_BUF_RX_DATA_POOL_SIZE, \
46 CONFIG_NET_PKT_BUF_TX_DATA_POOL_SIZE)
47
48 NET_BUF_POOL_VAR_DEFINE(capture_bufs, CONFIG_NET_CAPTURE_BUF_COUNT,
49 DATA_POOL_SIZE, 4, NULL);
50 #endif
51
52 static sys_slist_t net_capture_devlist;
53
54 struct net_capture {
55 sys_snode_t node;
56
57 /** The capture device */
58 const struct device *dev;
59
60 /**
61 * Network interface where we are capturing network packets.
62 */
63 struct net_if *capture_iface;
64
65 /**
66 * IPIP tunnel network interface where the capture API sends the
67 * captured network packets.
68 */
69 struct net_if *tunnel_iface;
70
71 /**
72 * Network context that is used to store net_buf pool information.
73 */
74 struct net_context *context;
75
76 /**
77 * Peer (inner) tunnel IP address.
78 */
79 struct sockaddr peer;
80
81 /**
82 * Local (inner) tunnel IP address. This will be set
83 * as a local address to tunnel network interface.
84 */
85 struct sockaddr local;
86
87 /**
88 * Is this context setup already
89 */
90 bool in_use : 1;
91
92 /**
93 * Is this active or not?
94 */
95 bool is_enabled : 1;
96
97 /**
98 * Is this context initialized yet
99 */
100 bool init_done : 1;
101 };
102
get_net_pkt(void)103 static struct k_mem_slab *get_net_pkt(void)
104 {
105 return &capture_pkts;
106 }
107
get_net_buf(void)108 static struct net_buf_pool *get_net_buf(void)
109 {
110 return &capture_bufs;
111 }
112
net_capture_foreach(net_capture_cb_t cb,void * user_data)113 void net_capture_foreach(net_capture_cb_t cb, void *user_data)
114 {
115 struct net_capture *ctx = NULL;
116 sys_snode_t *sn, *sns;
117
118 k_mutex_lock(&lock, K_FOREVER);
119
120 SYS_SLIST_FOR_EACH_NODE_SAFE(&net_capture_devlist, sn, sns) {
121 struct net_capture_info info;
122
123 ctx = CONTAINER_OF(sn, struct net_capture, node);
124 if (!ctx->in_use) {
125 continue;
126 }
127
128 info.capture_dev = ctx->dev;
129 info.capture_iface = ctx->capture_iface;
130 info.tunnel_iface = ctx->tunnel_iface;
131 info.peer = &ctx->peer;
132 info.local = &ctx->local;
133 info.is_enabled = ctx->is_enabled;
134
135 k_mutex_unlock(&lock);
136 cb(&info, user_data);
137 k_mutex_lock(&lock, K_FOREVER);
138 }
139
140 k_mutex_unlock(&lock);
141 }
142
alloc_capture_dev(void)143 static struct net_capture *alloc_capture_dev(void)
144 {
145 struct net_capture *ctx = NULL;
146 sys_snode_t *sn, *sns;
147
148 k_mutex_lock(&lock, K_FOREVER);
149
150 SYS_SLIST_FOR_EACH_NODE_SAFE(&net_capture_devlist, sn, sns) {
151 ctx = CONTAINER_OF(sn, struct net_capture, node);
152 if (ctx->in_use) {
153 ctx = NULL;
154 continue;
155 }
156
157 ctx->in_use = true;
158 goto out;
159 }
160
161 out:
162 k_mutex_unlock(&lock);
163
164 return ctx;
165 }
166
is_ipip_interface(struct net_if * iface)167 static bool is_ipip_interface(struct net_if *iface)
168 {
169 return net_virtual_get_iface_capabilities(iface) &
170 VIRTUAL_INTERFACE_IPIP;
171 }
172
is_ipip_tunnel(struct net_if * iface)173 static bool is_ipip_tunnel(struct net_if *iface)
174 {
175 if ((net_if_l2(iface) == &NET_L2_GET_NAME(VIRTUAL)) &&
176 is_ipip_interface(iface)) {
177 return true;
178 }
179
180 return false;
181 }
182
iface_cb(struct net_if * iface,void * user_data)183 static void iface_cb(struct net_if *iface, void *user_data)
184 {
185 struct net_if **ret_iface = user_data;
186
187 if (!is_ipip_tunnel(iface)) {
188 return;
189 }
190
191 *ret_iface = iface;
192 }
193
setup_iface(struct net_if * iface,const char * ipaddr,struct sockaddr * addr,int * addr_len)194 static int setup_iface(struct net_if *iface, const char *ipaddr,
195 struct sockaddr *addr, int *addr_len)
196 {
197 struct net_if_addr *ifaddr;
198
199 if (!net_ipaddr_parse(ipaddr, strlen(ipaddr), addr)) {
200 NET_ERR("Tunnel local address \"%s\" invalid.",
201 ipaddr);
202 return -EINVAL;
203 }
204
205 if (IS_ENABLED(CONFIG_NET_IPV6) && addr->sa_family == AF_INET6) {
206 /* No need to have dual address for IPIP tunnel interface */
207 net_if_flag_clear(iface, NET_IF_IPV4);
208 net_if_flag_set(iface, NET_IF_IPV6);
209
210 ifaddr = net_if_ipv6_addr_add(iface, &net_sin6(addr)->sin6_addr,
211 NET_ADDR_MANUAL, 0);
212 if (!ifaddr) {
213 NET_ERR("Cannot add %s to interface %d",
214 ipaddr, net_if_get_by_iface(iface));
215 return -EINVAL;
216 }
217
218 *addr_len = sizeof(struct sockaddr_in6);
219
220 } else if (IS_ENABLED(CONFIG_NET_IPV4) && addr->sa_family == AF_INET) {
221 struct in_addr netmask = { { { 255, 255, 255, 255 } } };
222
223 net_if_flag_clear(iface, NET_IF_IPV6);
224 net_if_flag_set(iface, NET_IF_IPV4);
225
226 ifaddr = net_if_ipv4_addr_add(iface, &net_sin(addr)->sin_addr,
227 NET_ADDR_MANUAL, 0);
228 if (!ifaddr) {
229 NET_ERR("Cannot add %s to interface %d",
230 ipaddr, net_if_get_by_iface(iface));
231 return -EINVAL;
232 }
233
234 /* Set the netmask so that we do not get IPv4 traffic routed
235 * into this interface.
236 */
237 net_if_ipv4_set_netmask_by_addr(iface,
238 &net_sin(addr)->sin_addr,
239 &netmask);
240
241 *addr_len = sizeof(struct sockaddr_in);
242 } else {
243 return -EINVAL;
244 }
245
246 return 0;
247 }
248
cleanup_iface(struct net_if * iface,struct sockaddr * addr)249 static int cleanup_iface(struct net_if *iface, struct sockaddr *addr)
250 {
251 int ret = -EINVAL;
252
253 if (IS_ENABLED(CONFIG_NET_IPV6) && addr->sa_family == AF_INET6) {
254 ret = net_if_ipv6_addr_rm(iface, &net_sin6(addr)->sin6_addr);
255 if (!ret) {
256 NET_ERR("Cannot remove %s from interface %d",
257 net_sprint_ipv6_addr(&net_sin6(addr)->sin6_addr),
258 net_if_get_by_iface(iface));
259 ret = -EINVAL;
260 }
261
262 net_if_flag_clear(iface, NET_IF_IPV6);
263
264 } else if (IS_ENABLED(CONFIG_NET_IPV4) && addr->sa_family == AF_INET) {
265 ret = net_if_ipv4_addr_rm(iface, &net_sin(addr)->sin_addr);
266 if (!ret) {
267 NET_ERR("Cannot remove %s from interface %d",
268 net_sprint_ipv4_addr(&net_sin(addr)->sin_addr),
269 net_if_get_by_iface(iface));
270 }
271
272 net_if_flag_clear(iface, NET_IF_IPV4);
273 }
274
275 return ret;
276 }
277
net_capture_setup(const char * remote_addr,const char * my_local_addr,const char * peer_addr,const struct device ** dev)278 int net_capture_setup(const char *remote_addr, const char *my_local_addr,
279 const char *peer_addr, const struct device **dev)
280 {
281 struct virtual_interface_req_params params = { 0 };
282 struct net_context *context = NULL;
283 struct net_if *ipip_iface = NULL;
284 struct sockaddr remote = { 0 };
285 struct sockaddr local = { 0 };
286 struct sockaddr peer = { 0 };
287 struct net_if *remote_iface;
288 struct net_capture *ctx;
289 int local_addr_len;
290 int orig_mtu;
291 int ret;
292 int mtu;
293
294 if (dev == NULL || remote_addr == NULL || my_local_addr == NULL ||
295 peer_addr == NULL) {
296 ret = -EINVAL;
297 goto fail;
298 }
299
300 if (!net_ipaddr_parse(remote_addr, strlen(remote_addr), &remote)) {
301 NET_ERR("IPIP tunnel %s address \"%s\" invalid.",
302 "remote", remote_addr);
303 ret = -EINVAL;
304 goto fail;
305 }
306
307 if (!net_ipaddr_parse(peer_addr, strlen(peer_addr), &peer)) {
308 NET_ERR("IPIP tunnel %s address \"%s\" invalid.",
309 "peer", peer_addr);
310 ret = -EINVAL;
311 goto fail;
312 }
313
314 if (IS_ENABLED(CONFIG_NET_IPV6) && remote.sa_family == AF_INET6) {
315 remote_iface = net_if_ipv6_select_src_iface(
316 &net_sin6(&remote)->sin6_addr);
317 params.family = AF_INET6;
318 net_ipaddr_copy(¶ms.peer6addr,
319 &net_sin6(&remote)->sin6_addr);
320 orig_mtu = net_if_get_mtu(remote_iface);
321 mtu = orig_mtu - sizeof(struct net_ipv6_hdr) -
322 sizeof(struct net_udp_hdr);
323 } else if (IS_ENABLED(CONFIG_NET_IPV4) && remote.sa_family == AF_INET) {
324 remote_iface = net_if_ipv4_select_src_iface(
325 &net_sin(&remote)->sin_addr);
326 params.family = AF_INET;
327 net_ipaddr_copy(¶ms.peer4addr,
328 &net_sin(&remote)->sin_addr);
329 orig_mtu = net_if_get_mtu(remote_iface);
330 mtu = orig_mtu - sizeof(struct net_ipv4_hdr) -
331 sizeof(struct net_udp_hdr);
332 } else {
333 NET_ERR("Invalid address family %d", remote.sa_family);
334 ret = -EINVAL;
335 goto fail;
336 }
337
338 if (remote_iface == NULL) {
339 NET_ERR("Remote address %s unreachable", remote_addr);
340 ret = -ENETUNREACH;
341 goto fail;
342 }
343
344 /* We only get net_context so that net_pkt allocation routines
345 * can allocate net_buf's from our net_buf pool.
346 */
347 ret = net_context_get(params.family, SOCK_DGRAM, IPPROTO_UDP,
348 &context);
349 if (ret < 0) {
350 NET_ERR("Cannot allocate net_context (%d)", ret);
351 return ret;
352 }
353
354 /* Then select the IPIP tunnel. The capture device is hooked to it.
355 */
356 net_if_foreach(iface_cb, &ipip_iface);
357
358 if (ipip_iface == NULL) {
359 NET_ERR("Cannot find available %s interface", "ipip");
360 ret = -ENOENT;
361 goto fail;
362 }
363
364 ret = net_mgmt(NET_REQUEST_VIRTUAL_INTERFACE_SET_PEER_ADDRESS,
365 ipip_iface, ¶ms, sizeof(params));
366 if (ret < 0) {
367 NET_ERR("Cannot set remote address %s to interface %d (%d)",
368 remote_addr, net_if_get_by_iface(ipip_iface), ret);
369 goto fail;
370 }
371
372 params.mtu = orig_mtu;
373
374 ret = net_mgmt(NET_REQUEST_VIRTUAL_INTERFACE_SET_MTU,
375 ipip_iface, ¶ms, sizeof(params));
376 if (ret < 0) {
377 NET_ERR("Cannot set interface %d MTU to %d (%d)",
378 net_if_get_by_iface(ipip_iface), params.mtu, ret);
379 goto fail;
380 }
381
382 ret = setup_iface(ipip_iface, my_local_addr, &local, &local_addr_len);
383 if (ret < 0) {
384 NET_ERR("Cannot set IP address %s to tunnel interface",
385 my_local_addr);
386 goto fail;
387 }
388
389 if (peer.sa_family != local.sa_family) {
390 NET_ERR("Peer and local address are not the same family "
391 "(%d vs %d)", peer.sa_family, local.sa_family);
392 ret = -EINVAL;
393 goto fail;
394 }
395
396 ctx = alloc_capture_dev();
397 if (ctx == NULL) {
398 ret = -ENOMEM;
399 goto fail;
400 }
401
402 /* Lower the remote interface MTU so that our packets can fit to it */
403 net_if_set_mtu(remote_iface, mtu);
404
405 ctx->context = context;
406 net_context_setup_pools(ctx->context, get_net_pkt, get_net_buf);
407
408 ctx->tunnel_iface = ipip_iface;
409 *dev = ctx->dev;
410
411 memcpy(&ctx->peer, &peer, local_addr_len);
412 memcpy(&ctx->local, &local, local_addr_len);
413
414 if (net_sin(&ctx->peer)->sin_port == 0) {
415 net_sin(&ctx->peer)->sin_port = htons(DEFAULT_PORT);
416 }
417
418 if (net_sin(&ctx->local)->sin_port == 0) {
419 net_sin(&ctx->local)->sin_port = htons(DEFAULT_PORT);
420 }
421
422 ret = net_virtual_interface_attach(ctx->tunnel_iface, remote_iface);
423 if (ret < 0 && ret != -EALREADY) {
424 NET_ERR("Cannot attach IPIP interface %d to interface %d",
425 net_if_get_by_iface(ipip_iface),
426 net_if_get_by_iface(remote_iface));
427 (void)net_capture_cleanup(ctx->dev);
428
429 /* net_context is cleared by the cleanup so no need to goto
430 * the fail label.
431 */
432 return ret;
433 }
434
435 net_virtual_set_name(ipip_iface, "Capture tunnel");
436
437 return 0;
438
439 fail:
440 if (context) {
441 net_context_unref(context);
442 }
443
444 return ret;
445 }
446
capture_cleanup(const struct device * dev)447 static int capture_cleanup(const struct device *dev)
448 {
449 struct net_capture *ctx = dev->data;
450
451 (void)net_capture_disable(dev);
452 (void)net_virtual_interface_attach(ctx->tunnel_iface, NULL);
453
454 if (ctx->context) {
455 net_context_put(ctx->context);
456 }
457
458 (void)cleanup_iface(ctx->tunnel_iface, &ctx->local);
459
460 ctx->tunnel_iface = NULL;
461 ctx->in_use = false;
462
463 return 0;
464 }
465
capture_is_enabled(const struct device * dev)466 static bool capture_is_enabled(const struct device *dev)
467 {
468 struct net_capture *ctx = dev->data;
469
470 return ctx->is_enabled ? true : false;
471 }
472
capture_enable(const struct device * dev,struct net_if * iface)473 static int capture_enable(const struct device *dev, struct net_if *iface)
474 {
475 struct net_capture *ctx = dev->data;
476
477 if (ctx->is_enabled) {
478 return -EALREADY;
479 }
480
481 /* We cannot capture the tunnel interface as that would cause
482 * recursion.
483 */
484 if (ctx->tunnel_iface == iface) {
485 return -EINVAL;
486 }
487
488 ctx->capture_iface = iface;
489 ctx->is_enabled = true;
490
491 net_mgmt_event_notify(NET_EVENT_CAPTURE_STARTED, iface);
492
493 net_if_up(ctx->tunnel_iface);
494
495 return 0;
496 }
497
capture_disable(const struct device * dev)498 static int capture_disable(const struct device *dev)
499 {
500 struct net_capture *ctx = dev->data;
501 struct net_if *iface = ctx->capture_iface;
502
503 ctx->capture_iface = NULL;
504 ctx->is_enabled = false;
505
506 net_if_down(ctx->tunnel_iface);
507
508 net_mgmt_event_notify(NET_EVENT_CAPTURE_STOPPED, iface);
509
510 return 0;
511 }
512
net_capture_pkt_with_status(struct net_if * iface,struct net_pkt * pkt)513 int net_capture_pkt_with_status(struct net_if *iface, struct net_pkt *pkt)
514 {
515 struct k_mem_slab *orig_slab;
516 struct net_pkt *captured;
517 sys_snode_t *sn, *sns;
518 bool skip_clone = false;
519 int ret = -ENOENT;
520
521 /* We must prevent to capture network packet that is already captured
522 * in order to avoid recursion.
523 */
524 if (net_pkt_is_captured(pkt)) {
525 return -EALREADY;
526 }
527
528 k_mutex_lock(&lock, K_FOREVER);
529
530 SYS_SLIST_FOR_EACH_NODE_SAFE(&net_capture_devlist, sn, sns) {
531 struct net_capture *ctx = CONTAINER_OF(sn, struct net_capture,
532 node);
533
534 if (!ctx->in_use || !ctx->is_enabled ||
535 ctx->capture_iface != iface) {
536 continue;
537 }
538
539 /* If the packet is marked as "cooked", then it means that the
540 * packet was directed here by "any" interface and was already
541 * cooked mode captured. So no need to clone it here.
542 */
543 if (net_pkt_is_cooked_mode(pkt)) {
544 skip_clone = true;
545 }
546
547 if (skip_clone) {
548 captured = pkt;
549 } else {
550 orig_slab = pkt->slab;
551 pkt->slab = get_net_pkt();
552
553 captured = net_pkt_clone(pkt, K_NO_WAIT);
554
555 pkt->slab = orig_slab;
556
557 if (captured == NULL) {
558 NET_DBG("Captured pkt %s", "dropped");
559 net_stats_update_processing_error(ctx->tunnel_iface);
560 ret = -ENOMEM;
561 goto out;
562 }
563 }
564
565 net_pkt_set_orig_iface(captured, iface);
566 net_pkt_set_iface(captured, ctx->tunnel_iface);
567 net_pkt_set_captured(pkt, true);
568
569 ret = net_capture_send(ctx->dev, ctx->tunnel_iface, captured);
570 if (ret < 0) {
571 if (!skip_clone) {
572 net_pkt_unref(captured);
573 }
574 }
575
576 net_pkt_set_cooked_mode(pkt, false);
577
578 goto out;
579 }
580
581 out:
582 k_mutex_unlock(&lock);
583
584 return ret;
585 }
586
net_capture_pkt(struct net_if * iface,struct net_pkt * pkt)587 void net_capture_pkt(struct net_if *iface, struct net_pkt *pkt)
588 {
589 (void)net_capture_pkt_with_status(iface, pkt);
590 }
591
capture_dev_init(const struct device * dev)592 static int capture_dev_init(const struct device *dev)
593 {
594 struct net_capture *ctx = dev->data;
595
596 k_mutex_lock(&lock, K_FOREVER);
597
598 sys_slist_find_and_remove(&net_capture_devlist, &ctx->node);
599 sys_slist_prepend(&net_capture_devlist, &ctx->node);
600
601 ctx->dev = dev;
602 ctx->init_done = true;
603
604 k_mutex_unlock(&lock);
605
606 return 0;
607 }
608
capture_send(const struct device * dev,struct net_if * iface,struct net_pkt * pkt)609 static int capture_send(const struct device *dev, struct net_if *iface,
610 struct net_pkt *pkt)
611 {
612 struct net_capture *ctx = dev->data;
613 enum net_verdict verdict;
614 struct net_pkt *ip;
615 int ret;
616 int len;
617
618 if (!ctx->in_use) {
619 return -ENOENT;
620 }
621
622 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.sa_family == AF_INET) {
623 len = sizeof(struct net_ipv4_hdr);
624 } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.sa_family == AF_INET6) {
625 len = sizeof(struct net_ipv6_hdr);
626 } else {
627 return -EINVAL;
628 }
629
630 len += sizeof(struct net_udp_hdr);
631
632 /* Add IP and UDP header */
633 ip = net_pkt_alloc_from_slab(ctx->context->tx_slab(), PKT_ALLOC_TIME);
634 if (!ip) {
635 return -ENOMEM;
636 }
637
638 net_pkt_set_context(ip, ctx->context);
639 net_pkt_set_family(ip, ctx->local.sa_family);
640 net_pkt_set_iface(ip, ctx->tunnel_iface);
641
642 ret = net_pkt_alloc_buffer(ip, len, IPPROTO_UDP, PKT_ALLOC_TIME);
643 if (ret < 0) {
644 net_pkt_unref(ip);
645 return ret;
646 }
647
648 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.sa_family == AF_INET) {
649 net_pkt_set_ipv4_ttl(ip,
650 net_if_ipv4_get_ttl(ctx->tunnel_iface));
651
652 ret = net_ipv4_create(ip, &net_sin(&ctx->local)->sin_addr,
653 &net_sin(&ctx->peer)->sin_addr);
654 } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.sa_family == AF_INET6) {
655 ret = net_ipv6_create(ip, &net_sin6(&ctx->local)->sin6_addr,
656 &net_sin6(&ctx->peer)->sin6_addr);
657 } else {
658 CODE_UNREACHABLE;
659 }
660
661 if (ret < 0) {
662 net_pkt_unref(ip);
663 return ret;
664 }
665
666 (void)net_udp_create(ip, net_sin(&ctx->local)->sin_port,
667 net_sin(&ctx->peer)->sin_port);
668
669 net_buf_frag_add(ip->buffer, pkt->buffer);
670 pkt->buffer = ip->buffer;
671 ip->buffer = NULL;
672 net_pkt_unref(ip);
673
674 /* Clear the context if it was set as the pkt was cloned and we
675 * do not want to affect the original pkt.
676 */
677 net_pkt_set_context(pkt, NULL);
678 net_pkt_set_captured(pkt, true);
679 net_pkt_set_iface(pkt, ctx->tunnel_iface);
680 net_pkt_set_family(pkt, ctx->local.sa_family);
681 net_pkt_set_ipv6_ext_len(pkt, 0);
682
683 net_pkt_cursor_init(pkt);
684
685 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.sa_family == AF_INET) {
686 net_pkt_set_ip_hdr_len(pkt, sizeof(struct net_ipv4_hdr));
687 net_pkt_set_ipv4_opts_len(pkt, 0);
688
689 net_ipv4_finalize(pkt, IPPROTO_UDP);
690 } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.sa_family == AF_INET6) {
691 net_pkt_set_ip_hdr_len(pkt, sizeof(struct net_ipv6_hdr));
692 net_pkt_set_ipv6_ext_opt_len(pkt, 0);
693
694 net_ipv6_finalize(pkt, IPPROTO_UDP);
695 } else {
696 CODE_UNREACHABLE;
697 }
698
699 if (DEBUG_TX) {
700 char str[sizeof("TX iface xx")];
701
702 snprintk(str, sizeof(str), "TX iface %d",
703 net_if_get_by_iface(net_pkt_iface(pkt)));
704
705 net_pkt_hexdump(pkt, str);
706 }
707
708 net_pkt_cursor_init(pkt);
709
710 verdict = net_if_send_data(ctx->tunnel_iface, pkt);
711 if (verdict == NET_DROP) {
712 ret = -EIO;
713 }
714
715 return ret;
716 }
717
718 static const struct net_capture_interface_api capture_interface_api = {
719 .cleanup = capture_cleanup,
720 .enable = capture_enable,
721 .disable = capture_disable,
722 .is_enabled = capture_is_enabled,
723 .send = capture_send,
724 };
725
726 #define DEFINE_NET_CAPTURE_DEV_DATA(x, _) \
727 static struct net_capture capture_dev_data_##x
728
729 #define DEFINE_NET_CAPTURE_DEVICE(x, _) \
730 DEVICE_DEFINE(net_capture_##x, \
731 "NET_CAPTURE" #x, \
732 &capture_dev_init, \
733 NULL, \
734 &capture_dev_data_##x, \
735 NULL, \
736 POST_KERNEL, \
737 CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
738 &capture_interface_api)
739
740 LISTIFY(CONFIG_NET_CAPTURE_DEVICE_COUNT, DEFINE_NET_CAPTURE_DEV_DATA, (;), _);
741 LISTIFY(CONFIG_NET_CAPTURE_DEVICE_COUNT, DEFINE_NET_CAPTURE_DEVICE, (;), _);
742