1 /*
2 * Copyright (c) 2016 Intel Corporation.
3 * Copyright (c) 2023 Nordic Semiconductor ASA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
10
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/linker/sections.h>
14 #include <zephyr/random/random.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <zephyr/net/igmp.h>
19 #include <zephyr/net/ipv4_autoconf.h>
20 #include <zephyr/net/mld.h>
21 #include <zephyr/net/net_core.h>
22 #include <zephyr/net/net_event.h>
23 #include <zephyr/net/net_pkt.h>
24 #include <zephyr/net/net_if.h>
25 #include <zephyr/net/net_mgmt.h>
26 #include <zephyr/net/ethernet.h>
27 #ifdef CONFIG_WIFI_NM
28 #include <zephyr/net/wifi_nm.h>
29 #endif
30 #include <zephyr/net/offloaded_netdev.h>
31 #include <zephyr/net/virtual.h>
32 #include <zephyr/net/socket.h>
33 #include <zephyr/sys/iterable_sections.h>
34
35 #include "net_private.h"
36 #include "ipv4.h"
37 #include "ipv6.h"
38
39 #include "net_stats.h"
40
41 #define REACHABLE_TIME (MSEC_PER_SEC * 30) /* in ms */
42 /*
43 * split the min/max random reachable factors into numerator/denominator
44 * so that integer-based math works better
45 */
46 #define MIN_RANDOM_NUMER (1)
47 #define MIN_RANDOM_DENOM (2)
48 #define MAX_RANDOM_NUMER (3)
49 #define MAX_RANDOM_DENOM (2)
50
51 static K_MUTEX_DEFINE(lock);
52
53 /* net_if dedicated section limiters */
54 extern struct net_if _net_if_list_start[];
55 extern struct net_if _net_if_list_end[];
56
57 static struct net_if *default_iface;
58
59 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
60 static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
61 static struct k_work_delayable router_timer;
62 static sys_slist_t active_router_timers;
63 #endif
64
65 #if defined(CONFIG_NET_NATIVE_IPV6)
66 /* Timer that triggers network address renewal */
67 static struct k_work_delayable address_lifetime_timer;
68
69 /* Track currently active address lifetime timers */
70 static sys_slist_t active_address_lifetime_timers;
71
72 /* Timer that triggers IPv6 prefix lifetime */
73 static struct k_work_delayable prefix_lifetime_timer;
74
75 /* Track currently active IPv6 prefix lifetime timers */
76 static sys_slist_t active_prefix_lifetime_timers;
77
78 #if defined(CONFIG_NET_IPV6_DAD)
79 /** Duplicate address detection (DAD) timer */
80 static struct k_work_delayable dad_timer;
81 static sys_slist_t active_dad_timers;
82 #endif
83
84 #if defined(CONFIG_NET_IPV6_ND)
85 static struct k_work_delayable rs_timer;
86 static sys_slist_t active_rs_timers;
87 #endif
88 #endif /* CONFIG_NET_NATIVE_IPV6 */
89
90 #if defined(CONFIG_NET_IPV6)
91 static struct {
92 struct net_if_ipv6 ipv6;
93 struct net_if *iface;
94 } ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
95 #endif /* CONFIG_NET_NATIVE_IPV6 */
96
97 #if defined(CONFIG_NET_IPV4)
98 static struct {
99 struct net_if_ipv4 ipv4;
100 struct net_if *iface;
101 } ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
102 #endif /* CONFIG_NET_NATIVE_IPV4 */
103
104 /* We keep track of the link callbacks in this list.
105 */
106 static sys_slist_t link_callbacks;
107
108 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
109 /* Multicast join/leave tracking.
110 */
111 static sys_slist_t mcast_monitor_callbacks;
112 #endif
113
114 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
115 #if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
116 #define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
117 #endif
118
119 K_KERNEL_STACK_DEFINE(tx_ts_stack, CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
120 K_FIFO_DEFINE(tx_ts_queue);
121
122 static struct k_thread tx_thread_ts;
123
124 /* We keep track of the timestamp callbacks in this list.
125 */
126 static sys_slist_t timestamp_callbacks;
127 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
128
129 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
130 #define debug_check_packet(pkt) \
131 do { \
132 NET_DBG("Processing (pkt %p, prio %d) network packet " \
133 "iface %d (%p)", \
134 pkt, net_pkt_priority(pkt), \
135 net_if_get_by_iface(net_pkt_iface(pkt)), \
136 net_pkt_iface(pkt)); \
137 \
138 NET_ASSERT(pkt->frags); \
139 } while (false)
140 #else
141 #define debug_check_packet(...)
142 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
143
z_impl_net_if_get_by_index(int index)144 struct net_if *z_impl_net_if_get_by_index(int index)
145 {
146 if (index <= 0) {
147 return NULL;
148 }
149
150 if (&_net_if_list_start[index - 1] >= _net_if_list_end) {
151 NET_DBG("Index %d is too large", index);
152 return NULL;
153 }
154
155 return &_net_if_list_start[index - 1];
156 }
157
158 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_get_by_index(int index)159 struct net_if *z_vrfy_net_if_get_by_index(int index)
160 {
161 struct net_if *iface;
162
163 iface = net_if_get_by_index(index);
164 if (!iface) {
165 return NULL;
166 }
167
168 if (!k_object_is_valid(iface, K_OBJ_NET_IF)) {
169 return NULL;
170 }
171
172 return iface;
173 }
174
175 #include <zephyr/syscalls/net_if_get_by_index_mrsh.c>
176 #endif
177
178 #if defined(CONFIG_NET_NATIVE)
net_context_send_cb(struct net_context * context,int status)179 static inline void net_context_send_cb(struct net_context *context,
180 int status)
181 {
182 if (!context) {
183 return;
184 }
185
186 if (context->send_cb) {
187 context->send_cb(context, status, context->user_data);
188 }
189
190 if (IS_ENABLED(CONFIG_NET_UDP) &&
191 net_context_get_proto(context) == IPPROTO_UDP) {
192 net_stats_update_udp_sent(net_context_get_iface(context));
193 } else if (IS_ENABLED(CONFIG_NET_TCP) &&
194 net_context_get_proto(context) == IPPROTO_TCP) {
195 net_stats_update_tcp_seg_sent(net_context_get_iface(context));
196 }
197 }
198
update_txtime_stats_detail(struct net_pkt * pkt,uint32_t start_time,uint32_t stop_time)199 static void update_txtime_stats_detail(struct net_pkt *pkt,
200 uint32_t start_time, uint32_t stop_time)
201 {
202 uint32_t val, prev = start_time;
203 int i;
204
205 for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
206 if (!net_pkt_stats_tick(pkt)[i]) {
207 break;
208 }
209
210 val = net_pkt_stats_tick(pkt)[i] - prev;
211 prev = net_pkt_stats_tick(pkt)[i];
212 net_pkt_stats_tick(pkt)[i] = val;
213 }
214 }
215
net_if_tx(struct net_if * iface,struct net_pkt * pkt)216 static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
217 {
218 struct net_linkaddr ll_dst = { 0 };
219 struct net_context *context;
220 uint32_t create_time;
221 int status;
222
223 /* We collect send statistics for each socket priority if enabled */
224 uint8_t pkt_priority;
225
226 if (!pkt) {
227 return false;
228 }
229
230 create_time = net_pkt_create_time(pkt);
231
232 debug_check_packet(pkt);
233
234 /* If there're any link callbacks, with such a callback receiving
235 * a destination address, copy that address out of packet, just in
236 * case packet is freed before callback is called.
237 */
238 if (!sys_slist_is_empty(&link_callbacks)) {
239 if (net_linkaddr_set(&ll_dst,
240 net_pkt_lladdr_dst(pkt)->addr,
241 net_pkt_lladdr_dst(pkt)->len) < 0) {
242 return false;
243 }
244 }
245
246 context = net_pkt_context(pkt);
247
248 if (net_if_flag_is_set(iface, NET_IF_LOWER_UP)) {
249 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) ||
250 IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
251 pkt_priority = net_pkt_priority(pkt);
252
253 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
254 /* Make sure the statistics information is not
255 * lost by keeping the net_pkt over L2 send.
256 */
257 net_pkt_ref(pkt);
258 }
259 }
260
261 net_if_tx_lock(iface);
262 status = net_if_l2(iface)->send(iface, pkt);
263 net_if_tx_unlock(iface);
264
265 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) ||
266 IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
267 uint32_t end_tick = k_cycle_get_32();
268
269 net_pkt_set_tx_stats_tick(pkt, end_tick);
270
271 net_stats_update_tc_tx_time(iface,
272 pkt_priority,
273 create_time,
274 end_tick);
275
276 SYS_PORT_TRACING_FUNC(net, tx_time, pkt, end_tick);
277
278 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
279 update_txtime_stats_detail(
280 pkt,
281 create_time,
282 end_tick);
283
284 net_stats_update_tc_tx_time_detail(
285 iface, pkt_priority,
286 net_pkt_stats_tick(pkt));
287
288 /* For TCP connections, we might keep the pkt
289 * longer so that we can resend it if needed.
290 * Because of that we need to clear the
291 * statistics here.
292 */
293 net_pkt_stats_tick_reset(pkt);
294
295 net_pkt_unref(pkt);
296 }
297 }
298
299 } else {
300 /* Drop packet if interface is not up */
301 NET_WARN("iface %p is down", iface);
302 status = -ENETDOWN;
303 }
304
305 if (status < 0) {
306 net_pkt_unref(pkt);
307 } else {
308 net_stats_update_bytes_sent(iface, status);
309 }
310
311 if (context) {
312 NET_DBG("Calling context send cb %p status %d",
313 context, status);
314
315 net_context_send_cb(context, status);
316 }
317
318 if (ll_dst.len > 0) {
319 net_if_call_link_cb(iface, &ll_dst, status);
320 }
321
322 return true;
323 }
324
net_process_tx_packet(struct net_pkt * pkt)325 void net_process_tx_packet(struct net_pkt *pkt)
326 {
327 struct net_if *iface;
328
329 net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
330
331 iface = net_pkt_iface(pkt);
332
333 net_if_tx(iface, pkt);
334
335 #if defined(CONFIG_NET_POWER_MANAGEMENT)
336 iface->tx_pending--;
337 #endif
338 }
339
net_if_try_queue_tx(struct net_if * iface,struct net_pkt * pkt,k_timeout_t timeout)340 void net_if_try_queue_tx(struct net_if *iface, struct net_pkt *pkt, k_timeout_t timeout)
341 {
342 if (!net_pkt_filter_send_ok(pkt)) {
343 /* silently drop the packet */
344 net_pkt_unref(pkt);
345 return;
346 }
347
348 size_t len = net_pkt_get_len(pkt);
349 uint8_t prio = net_pkt_priority(pkt);
350 uint8_t tc = net_tx_priority2tc(prio);
351
352 #if NET_TC_TX_COUNT > 1
353 NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
354 #endif
355
356 /* For highest priority packet, skip the TX queue and push directly to
357 * the driver. Also if there are no TX queue/thread, push the packet
358 * directly to the driver.
359 */
360 if ((IS_ENABLED(CONFIG_NET_TC_TX_SKIP_FOR_HIGH_PRIO) &&
361 prio >= NET_PRIORITY_CA) || NET_TC_TX_COUNT == 0) {
362 net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
363
364 net_if_tx(net_pkt_iface(pkt), pkt);
365 } else {
366 if (net_tc_try_submit_to_tx_queue(tc, pkt, timeout) != NET_OK) {
367 goto drop;
368 }
369 #if defined(CONFIG_NET_POWER_MANAGEMENT)
370 iface->tx_pending++;
371 #endif
372 }
373
374 net_stats_update_tc_sent_pkt(iface, tc);
375 net_stats_update_tc_sent_bytes(iface, tc, len);
376 net_stats_update_tc_sent_priority(iface, tc, prio);
377 return;
378
379 drop:
380 net_pkt_unref(pkt);
381 net_stats_update_tc_sent_dropped(iface, tc);
382 return;
383 }
384 #endif /* CONFIG_NET_NATIVE */
385
net_if_stats_reset(struct net_if * iface)386 void net_if_stats_reset(struct net_if *iface)
387 {
388 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
389 STRUCT_SECTION_FOREACH(net_if, tmp) {
390 if (iface == tmp) {
391 net_if_lock(iface);
392 memset(&iface->stats, 0, sizeof(iface->stats));
393 net_if_unlock(iface);
394 return;
395 }
396 }
397 #else
398 ARG_UNUSED(iface);
399 #endif
400 }
401
net_if_stats_reset_all(void)402 void net_if_stats_reset_all(void)
403 {
404 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
405 STRUCT_SECTION_FOREACH(net_if, iface) {
406 net_if_lock(iface);
407 memset(&iface->stats, 0, sizeof(iface->stats));
408 net_if_unlock(iface);
409 }
410 #endif
411 }
412
init_iface(struct net_if * iface)413 static inline void init_iface(struct net_if *iface)
414 {
415 const struct net_if_api *api = net_if_get_device(iface)->api;
416
417 if (!api || !api->init) {
418 NET_ERR("Iface %p driver API init NULL", iface);
419 return;
420 }
421
422 /* By default IPv4 and IPv6 are enabled for a given network interface.
423 * These can be turned off later if needed.
424 */
425 #if defined(CONFIG_NET_NATIVE_IPV4)
426 net_if_flag_set(iface, NET_IF_IPV4);
427 #endif
428 #if defined(CONFIG_NET_NATIVE_IPV6)
429 net_if_flag_set(iface, NET_IF_IPV6);
430 #endif
431
432 net_virtual_init(iface);
433
434 NET_DBG("On iface %p", iface);
435
436 #ifdef CONFIG_USERSPACE
437 k_object_init(iface);
438 #endif
439
440 k_mutex_init(&iface->lock);
441 k_mutex_init(&iface->tx_lock);
442
443 api->init(iface);
444
445 net_ipv6_pe_init(iface);
446 }
447
448 #if defined(CONFIG_NET_NATIVE)
net_if_try_send_data(struct net_if * iface,struct net_pkt * pkt,k_timeout_t timeout)449 enum net_verdict net_if_try_send_data(struct net_if *iface, struct net_pkt *pkt,
450 k_timeout_t timeout)
451 {
452 const struct net_l2 *l2;
453 struct net_context *context = net_pkt_context(pkt);
454 struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
455 enum net_verdict verdict = NET_OK;
456 int status = -EIO;
457
458 if (!net_if_flag_is_set(iface, NET_IF_LOWER_UP) ||
459 net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
460 /* Drop packet if interface is not up */
461 NET_WARN("iface %p is down", iface);
462 verdict = NET_DROP;
463 status = -ENETDOWN;
464 goto done;
465 }
466
467 /* The check for CONFIG_NET_*_OFFLOAD here is an optimization;
468 * This is currently the only way for net_if_l2 to be NULL or missing send().
469 */
470 if (IS_ENABLED(CONFIG_NET_OFFLOAD) || IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD)) {
471 l2 = net_if_l2(iface);
472 if (l2 == NULL) {
473 /* Offloaded ifaces may choose not to use an L2 at all. */
474 NET_WARN("no l2 for iface %p, discard pkt", iface);
475 verdict = NET_DROP;
476 goto done;
477 } else if (l2->send == NULL) {
478 /* Or, their chosen L2 (for example, OFFLOADED_NETDEV_L2)
479 * might simply not implement send.
480 */
481 NET_WARN("l2 for iface %p cannot send, discard pkt", iface);
482 verdict = NET_DROP;
483 goto done;
484 }
485 }
486
487 /* If the ll address is not set at all, then we must set
488 * it here.
489 * Workaround Linux bug, see:
490 * https://github.com/zephyrproject-rtos/zephyr/issues/3111
491 */
492 if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
493 net_pkt_lladdr_src(pkt)->len == 0) {
494 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
495 net_pkt_lladdr_if(pkt)->addr,
496 net_pkt_lladdr_if(pkt)->len);
497 }
498
499 #if defined(CONFIG_NET_LOOPBACK)
500 /* If the packet is destined back to us, then there is no need to do
501 * additional checks, so let the packet through.
502 */
503 if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
504 goto done;
505 }
506 #endif
507
508 /* Bypass the IP stack with SOCK_RAW/IPPROTO_RAW sockets */
509 if (IS_ENABLED(CONFIG_NET_SOCKETS_PACKET) &&
510 context && net_context_get_type(context) == SOCK_RAW &&
511 net_context_get_proto(context) == IPPROTO_RAW) {
512 goto done;
513 }
514
515 /* If the ll dst address is not set check if it is present in the nbr
516 * cache.
517 */
518 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
519 verdict = net_ipv6_prepare_for_send(pkt);
520 }
521
522 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
523 verdict = net_ipv4_prepare_for_send(pkt);
524 }
525
526 done:
527 /* NET_OK in which case packet has checked successfully. In this case
528 * the net_context callback is called after successful delivery in
529 * net_if_tx_thread().
530 *
531 * NET_DROP in which case we call net_context callback that will
532 * give the status to user application.
533 *
534 * NET_CONTINUE in which case the sending of the packet is delayed.
535 * This can happen for example if we need to do IPv6 ND to figure
536 * out link layer address.
537 */
538 if (verdict == NET_DROP) {
539 if (context) {
540 NET_DBG("Calling ctx send cb %p verdict %d",
541 context, verdict);
542 net_context_send_cb(context, status);
543 }
544
545 if (dst->len > 0) {
546 net_if_call_link_cb(iface, dst, status);
547 }
548 } else if (verdict == NET_OK) {
549 /* Packet is ready to be sent by L2, let's queue */
550 net_if_try_queue_tx(iface, pkt, timeout);
551 }
552
553 return verdict;
554 }
555 #endif /* CONFIG_NET_NATIVE */
556
net_if_set_link_addr_locked(struct net_if * iface,uint8_t * addr,uint8_t len,enum net_link_type type)557 int net_if_set_link_addr_locked(struct net_if *iface,
558 uint8_t *addr, uint8_t len,
559 enum net_link_type type)
560 {
561 int ret;
562
563 net_if_lock(iface);
564
565 ret = net_if_set_link_addr_unlocked(iface, addr, len, type);
566
567 net_if_unlock(iface);
568
569 return ret;
570 }
571
net_if_get_by_link_addr(struct net_linkaddr * ll_addr)572 struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
573 {
574 STRUCT_SECTION_FOREACH(net_if, iface) {
575 net_if_lock(iface);
576 if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
577 ll_addr->len)) {
578 net_if_unlock(iface);
579 return iface;
580 }
581 net_if_unlock(iface);
582 }
583
584 return NULL;
585 }
586
net_if_lookup_by_dev(const struct device * dev)587 struct net_if *net_if_lookup_by_dev(const struct device *dev)
588 {
589 STRUCT_SECTION_FOREACH(net_if, iface) {
590 if (net_if_get_device(iface) == dev) {
591 return iface;
592 }
593 }
594
595 return NULL;
596 }
597
net_if_set_default(struct net_if * iface)598 void net_if_set_default(struct net_if *iface)
599 {
600 default_iface = iface;
601 }
602
net_if_get_default(void)603 struct net_if *net_if_get_default(void)
604 {
605 struct net_if *iface = NULL;
606
607 if (&_net_if_list_start[0] == &_net_if_list_end[0]) {
608 NET_WARN("No default interface found!");
609 return NULL;
610 }
611
612 if (default_iface != NULL) {
613 return default_iface;
614 }
615
616 #if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
617 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
618 #endif
619 #if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
620 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
621 #endif
622 #if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
623 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
624 #endif
625 #if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
626 iface = net_if_get_first_by_type(NULL);
627 #endif
628 #if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
629 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
630 #endif
631 #if defined(CONFIG_NET_DEFAULT_IF_PPP)
632 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(PPP));
633 #endif
634 #if defined(CONFIG_NET_DEFAULT_IF_OFFLOADED_NETDEV)
635 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(OFFLOADED_NETDEV));
636 #endif
637 #if defined(CONFIG_NET_DEFAULT_IF_UP)
638 iface = net_if_get_first_up();
639 #endif
640 #if defined(CONFIG_NET_DEFAULT_IF_WIFI)
641 iface = net_if_get_first_wifi();
642 #endif
643 return iface ? iface : _net_if_list_start;
644 }
645
net_if_get_first_by_type(const struct net_l2 * l2)646 struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
647 {
648 STRUCT_SECTION_FOREACH(net_if, iface) {
649 if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
650 !l2 && net_if_offload(iface)) {
651 return iface;
652 }
653
654 if (net_if_l2(iface) == l2) {
655 return iface;
656 }
657 }
658
659 return NULL;
660 }
661
net_if_get_first_up(void)662 struct net_if *net_if_get_first_up(void)
663 {
664 STRUCT_SECTION_FOREACH(net_if, iface) {
665 if (net_if_flag_is_set(iface, NET_IF_UP)) {
666 return iface;
667 }
668 }
669
670 return NULL;
671 }
672
l2_flags_get(struct net_if * iface)673 static enum net_l2_flags l2_flags_get(struct net_if *iface)
674 {
675 enum net_l2_flags flags = 0;
676
677 if (net_if_l2(iface) && net_if_l2(iface)->get_flags) {
678 flags = net_if_l2(iface)->get_flags(iface);
679 }
680
681 return flags;
682 }
683
684 #if defined(CONFIG_NET_IP)
685 /* Return how many bits are shared between two IP addresses */
get_ipaddr_diff(const uint8_t * src,const uint8_t * dst,int addr_len)686 static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_len)
687 {
688 uint8_t j, k, xor;
689 uint8_t len = 0U;
690
691 for (j = 0U; j < addr_len; j++) {
692 if (src[j] == dst[j]) {
693 len += 8U;
694 } else {
695 xor = src[j] ^ dst[j];
696 for (k = 0U; k < 8; k++) {
697 if (!(xor & 0x80)) {
698 len++;
699 xor <<= 1;
700 } else {
701 break;
702 }
703 }
704 break;
705 }
706 }
707
708 return len;
709 }
710 #endif /* CONFIG_NET_IP */
711
712 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
iface_router_lookup(struct net_if * iface,uint8_t family,void * addr)713 static struct net_if_router *iface_router_lookup(struct net_if *iface,
714 uint8_t family, void *addr)
715 {
716 struct net_if_router *router = NULL;
717 int i;
718
719 k_mutex_lock(&lock, K_FOREVER);
720
721 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
722 if (!routers[i].is_used ||
723 routers[i].address.family != family ||
724 routers[i].iface != iface) {
725 continue;
726 }
727
728 if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
729 net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
730 (struct in6_addr *)addr)) ||
731 (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
732 net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
733 (struct in_addr *)addr))) {
734 router = &routers[i];
735 goto out;
736 }
737 }
738
739 out:
740 k_mutex_unlock(&lock);
741
742 return router;
743 }
744
iface_router_notify_deletion(struct net_if_router * router,const char * delete_reason)745 static void iface_router_notify_deletion(struct net_if_router *router,
746 const char *delete_reason)
747 {
748 if (IS_ENABLED(CONFIG_NET_IPV6) &&
749 router->address.family == AF_INET6) {
750 NET_DBG("IPv6 router %s %s",
751 net_sprint_ipv6_addr(net_if_router_ipv6(router)),
752 delete_reason);
753
754 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
755 router->iface,
756 &router->address.in6_addr,
757 sizeof(struct in6_addr));
758 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
759 router->address.family == AF_INET) {
760 NET_DBG("IPv4 router %s %s",
761 net_sprint_ipv4_addr(net_if_router_ipv4(router)),
762 delete_reason);
763
764 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
765 router->iface,
766 &router->address.in_addr,
767 sizeof(struct in6_addr));
768 }
769 }
770
iface_router_ends(const struct net_if_router * router,uint32_t now)771 static inline int32_t iface_router_ends(const struct net_if_router *router,
772 uint32_t now)
773 {
774 uint32_t ends = router->life_start;
775
776 ends += MSEC_PER_SEC * router->lifetime;
777
778 /* Signed number of ms until router lifetime ends */
779 return (int32_t)(ends - now);
780 }
781
iface_router_update_timer(uint32_t now)782 static void iface_router_update_timer(uint32_t now)
783 {
784 struct net_if_router *router, *next;
785 uint32_t new_delay = UINT32_MAX;
786
787 k_mutex_lock(&lock, K_FOREVER);
788
789 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
790 router, next, node) {
791 int32_t ends = iface_router_ends(router, now);
792
793 if (ends <= 0) {
794 new_delay = 0;
795 break;
796 }
797
798 new_delay = MIN((uint32_t)ends, new_delay);
799 }
800
801 if (new_delay == UINT32_MAX) {
802 k_work_cancel_delayable(&router_timer);
803 } else {
804 k_work_reschedule(&router_timer, K_MSEC(new_delay));
805 }
806
807 k_mutex_unlock(&lock);
808 }
809
iface_router_expired(struct k_work * work)810 static void iface_router_expired(struct k_work *work)
811 {
812 uint32_t current_time = k_uptime_get_32();
813 struct net_if_router *router, *next;
814 sys_snode_t *prev_node = NULL;
815
816 ARG_UNUSED(work);
817
818 k_mutex_lock(&lock, K_FOREVER);
819
820 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
821 router, next, node) {
822 int32_t ends = iface_router_ends(router, current_time);
823
824 if (ends > 0) {
825 /* We have to loop on all active routers as their
826 * lifetime differ from each other.
827 */
828 prev_node = &router->node;
829 continue;
830 }
831
832 iface_router_notify_deletion(router, "has expired");
833 sys_slist_remove(&active_router_timers,
834 prev_node, &router->node);
835 router->is_used = false;
836 }
837
838 iface_router_update_timer(current_time);
839
840 k_mutex_unlock(&lock);
841 }
842
iface_router_add(struct net_if * iface,uint8_t family,void * addr,bool is_default,uint16_t lifetime)843 static struct net_if_router *iface_router_add(struct net_if *iface,
844 uint8_t family, void *addr,
845 bool is_default,
846 uint16_t lifetime)
847 {
848 struct net_if_router *router = NULL;
849 int i;
850
851 k_mutex_lock(&lock, K_FOREVER);
852
853 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
854 if (routers[i].is_used) {
855 continue;
856 }
857
858 routers[i].is_used = true;
859 routers[i].iface = iface;
860 routers[i].address.family = family;
861
862 if (lifetime) {
863 routers[i].is_default = true;
864 routers[i].is_infinite = false;
865 routers[i].lifetime = lifetime;
866 routers[i].life_start = k_uptime_get_32();
867
868 sys_slist_append(&active_router_timers,
869 &routers[i].node);
870
871 iface_router_update_timer(routers[i].life_start);
872 } else {
873 routers[i].is_default = false;
874 routers[i].is_infinite = true;
875 routers[i].lifetime = 0;
876 }
877
878 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
879 memcpy(net_if_router_ipv6(&routers[i]), addr,
880 sizeof(struct in6_addr));
881 net_mgmt_event_notify_with_info(
882 NET_EVENT_IPV6_ROUTER_ADD, iface,
883 &routers[i].address.in6_addr,
884 sizeof(struct in6_addr));
885
886 NET_DBG("interface %p router %s lifetime %u default %d "
887 "added", iface,
888 net_sprint_ipv6_addr((struct in6_addr *)addr),
889 lifetime, routers[i].is_default);
890 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
891 memcpy(net_if_router_ipv4(&routers[i]), addr,
892 sizeof(struct in_addr));
893 routers[i].is_default = is_default;
894
895 net_mgmt_event_notify_with_info(
896 NET_EVENT_IPV4_ROUTER_ADD, iface,
897 &routers[i].address.in_addr,
898 sizeof(struct in_addr));
899
900 NET_DBG("interface %p router %s lifetime %u default %d "
901 "added", iface,
902 net_sprint_ipv4_addr((struct in_addr *)addr),
903 lifetime, is_default);
904 }
905
906 router = &routers[i];
907 goto out;
908 }
909
910 out:
911 k_mutex_unlock(&lock);
912
913 return router;
914 }
915
iface_router_rm(struct net_if_router * router)916 static bool iface_router_rm(struct net_if_router *router)
917 {
918 bool ret = false;
919
920 k_mutex_lock(&lock, K_FOREVER);
921
922 if (!router->is_used) {
923 goto out;
924 }
925
926 iface_router_notify_deletion(router, "has been removed");
927
928 /* We recompute the timer if only the router was time limited */
929 if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
930 iface_router_update_timer(k_uptime_get_32());
931 }
932
933 router->is_used = false;
934 ret = true;
935
936 out:
937 k_mutex_unlock(&lock);
938
939 return ret;
940 }
941
net_if_router_rm(struct net_if_router * router)942 void net_if_router_rm(struct net_if_router *router)
943 {
944 k_mutex_lock(&lock, K_FOREVER);
945
946 router->is_used = false;
947
948 /* FIXME - remove timer */
949
950 k_mutex_unlock(&lock);
951 }
952
iface_router_find_default(struct net_if * iface,uint8_t family,void * addr)953 static struct net_if_router *iface_router_find_default(struct net_if *iface,
954 uint8_t family, void *addr)
955 {
956 struct net_if_router *router = NULL;
957 int i;
958
959 /* Todo: addr will need to be handled */
960 ARG_UNUSED(addr);
961
962 k_mutex_lock(&lock, K_FOREVER);
963
964 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
965 if (!routers[i].is_used ||
966 !routers[i].is_default ||
967 routers[i].address.family != family) {
968 continue;
969 }
970
971 if (iface && iface != routers[i].iface) {
972 continue;
973 }
974
975 router = &routers[i];
976 goto out;
977 }
978
979 out:
980 k_mutex_unlock(&lock);
981
982 return router;
983 }
984
iface_router_init(void)985 static void iface_router_init(void)
986 {
987 k_work_init_delayable(&router_timer, iface_router_expired);
988 sys_slist_init(&active_router_timers);
989 }
990 #else
991 #define iface_router_init(...)
992 #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */
993
994 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
net_if_mcast_mon_register(struct net_if_mcast_monitor * mon,struct net_if * iface,net_if_mcast_callback_t cb)995 void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
996 struct net_if *iface,
997 net_if_mcast_callback_t cb)
998 {
999 k_mutex_lock(&lock, K_FOREVER);
1000
1001 sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
1002 sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
1003
1004 mon->iface = iface;
1005 mon->cb = cb;
1006
1007 k_mutex_unlock(&lock);
1008 }
1009
net_if_mcast_mon_unregister(struct net_if_mcast_monitor * mon)1010 void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
1011 {
1012 k_mutex_lock(&lock, K_FOREVER);
1013
1014 sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
1015
1016 k_mutex_unlock(&lock);
1017 }
1018
net_if_mcast_monitor(struct net_if * iface,const struct net_addr * addr,bool is_joined)1019 void net_if_mcast_monitor(struct net_if *iface,
1020 const struct net_addr *addr,
1021 bool is_joined)
1022 {
1023 struct net_if_mcast_monitor *mon, *tmp;
1024
1025 k_mutex_lock(&lock, K_FOREVER);
1026
1027 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
1028 mon, tmp, node) {
1029 if (iface == mon->iface || mon->iface == NULL) {
1030 mon->cb(iface, addr, is_joined);
1031 }
1032 }
1033
1034 k_mutex_unlock(&lock);
1035 }
1036 #else
1037 #define net_if_mcast_mon_register(...)
1038 #define net_if_mcast_mon_unregister(...)
1039 #define net_if_mcast_monitor(...)
1040 #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */
1041
1042 #if defined(CONFIG_NET_IPV6)
net_if_config_ipv6_get(struct net_if * iface,struct net_if_ipv6 ** ipv6)1043 int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
1044 {
1045 int ret = 0;
1046 int i;
1047
1048 net_if_lock(iface);
1049
1050 if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1051 ret = -ENOTSUP;
1052 goto out;
1053 }
1054
1055 if (iface->config.ip.ipv6) {
1056 if (ipv6) {
1057 *ipv6 = iface->config.ip.ipv6;
1058 }
1059
1060 goto out;
1061 }
1062
1063 k_mutex_lock(&lock, K_FOREVER);
1064
1065 for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1066 if (ipv6_addresses[i].iface) {
1067 continue;
1068 }
1069
1070 iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
1071 ipv6_addresses[i].iface = iface;
1072
1073 if (ipv6) {
1074 *ipv6 = &ipv6_addresses[i].ipv6;
1075 }
1076
1077 k_mutex_unlock(&lock);
1078 goto out;
1079 }
1080
1081 k_mutex_unlock(&lock);
1082
1083 ret = -ESRCH;
1084 out:
1085 net_if_unlock(iface);
1086
1087 return ret;
1088 }
1089
net_if_config_ipv6_put(struct net_if * iface)1090 int net_if_config_ipv6_put(struct net_if *iface)
1091 {
1092 int ret = 0;
1093 int i;
1094
1095 net_if_lock(iface);
1096
1097 if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1098 ret = -ENOTSUP;
1099 goto out;
1100 }
1101
1102 if (!iface->config.ip.ipv6) {
1103 ret = -EALREADY;
1104 goto out;
1105 }
1106
1107 k_mutex_lock(&lock, K_FOREVER);
1108
1109 for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1110 if (ipv6_addresses[i].iface != iface) {
1111 continue;
1112 }
1113
1114 iface->config.ip.ipv6 = NULL;
1115 ipv6_addresses[i].iface = NULL;
1116
1117 k_mutex_unlock(&lock);
1118 goto out;
1119 }
1120
1121 k_mutex_unlock(&lock);
1122
1123 ret = -ESRCH;
1124 out:
1125 net_if_unlock(iface);
1126
1127 return ret;
1128 }
1129
1130 #if defined(CONFIG_NET_NATIVE_IPV6)
1131 #if defined(CONFIG_NET_IPV6_MLD)
join_mcast_allnodes(struct net_if * iface)1132 static void join_mcast_allnodes(struct net_if *iface)
1133 {
1134 struct in6_addr addr;
1135 int ret;
1136
1137 if (iface->config.ip.ipv6 == NULL) {
1138 return;
1139 }
1140
1141 net_ipv6_addr_create_ll_allnodes_mcast(&addr);
1142
1143 ret = net_ipv6_mld_join(iface, &addr);
1144 if (ret < 0 && ret != -EALREADY && ret != -ENETDOWN) {
1145 NET_ERR("Cannot join all nodes address %s for %d (%d)",
1146 net_sprint_ipv6_addr(&addr),
1147 net_if_get_by_iface(iface), ret);
1148 }
1149 }
1150
join_mcast_solicit_node(struct net_if * iface,struct in6_addr * my_addr)1151 static void join_mcast_solicit_node(struct net_if *iface,
1152 struct in6_addr *my_addr)
1153 {
1154 struct in6_addr addr;
1155 int ret;
1156
1157 if (iface->config.ip.ipv6 == NULL) {
1158 return;
1159 }
1160
1161 /* Join to needed multicast groups, RFC 4291 ch 2.8 */
1162 net_ipv6_addr_create_solicited_node(my_addr, &addr);
1163
1164 ret = net_ipv6_mld_join(iface, &addr);
1165 if (ret < 0) {
1166 if (ret != -EALREADY && ret != -ENETDOWN) {
1167 NET_ERR("Cannot join solicit node address %s for %d (%d)",
1168 net_sprint_ipv6_addr(&addr),
1169 net_if_get_by_iface(iface), ret);
1170 }
1171 } else {
1172 NET_DBG("Join solicit node address %s (ifindex %d)",
1173 net_sprint_ipv6_addr(&addr),
1174 net_if_get_by_iface(iface));
1175 }
1176 }
1177
leave_mcast_all(struct net_if * iface)1178 static void leave_mcast_all(struct net_if *iface)
1179 {
1180 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1181 int i;
1182
1183 if (!ipv6) {
1184 return;
1185 }
1186
1187 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1188 if (!ipv6->mcast[i].is_used ||
1189 !ipv6->mcast[i].is_joined) {
1190 continue;
1191 }
1192
1193 net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
1194 }
1195 }
1196
join_mcast_nodes(struct net_if * iface,struct in6_addr * addr)1197 static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
1198 {
1199 enum net_l2_flags flags = 0;
1200
1201 if (iface->config.ip.ipv6 == NULL) {
1202 return;
1203 }
1204
1205 flags = l2_flags_get(iface);
1206 if (flags & NET_L2_MULTICAST) {
1207 join_mcast_allnodes(iface);
1208
1209 if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
1210 join_mcast_solicit_node(iface, addr);
1211 }
1212 }
1213 }
1214 #else
1215 #define join_mcast_allnodes(...)
1216 #define join_mcast_solicit_node(...)
1217 #define leave_mcast_all(...)
1218 #define join_mcast_nodes(...)
1219 #endif /* CONFIG_NET_IPV6_MLD */
1220
1221 #if defined(CONFIG_NET_IPV6_DAD)
1222 #define DAD_TIMEOUT 100U /* ms */
1223
dad_timeout(struct k_work * work)1224 static void dad_timeout(struct k_work *work)
1225 {
1226 uint32_t current_time = k_uptime_get_32();
1227 struct net_if_addr *ifaddr, *next;
1228 int32_t delay = -1;
1229 sys_slist_t expired_list;
1230
1231 ARG_UNUSED(work);
1232
1233 sys_slist_init(&expired_list);
1234
1235 k_mutex_lock(&lock, K_FOREVER);
1236
1237 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
1238 ifaddr, next, dad_node) {
1239 /* DAD entries are ordered by construction. Stop when
1240 * we find one that hasn't expired.
1241 */
1242 delay = (int32_t)(ifaddr->dad_start +
1243 DAD_TIMEOUT - current_time);
1244 if (delay > 0) {
1245 break;
1246 }
1247
1248 /* Removing the ifaddr from active_dad_timers list */
1249 sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
1250 sys_slist_append(&expired_list, &ifaddr->dad_node);
1251
1252 ifaddr = NULL;
1253 }
1254
1255 if ((ifaddr != NULL) && (delay > 0)) {
1256 k_work_reschedule(&dad_timer, K_MSEC((uint32_t)delay));
1257 }
1258
1259 k_mutex_unlock(&lock);
1260
1261 SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ifaddr, dad_node) {
1262 struct net_if *iface;
1263
1264 NET_DBG("DAD succeeded for %s at interface %d",
1265 net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1266 ifaddr->ifindex);
1267
1268 ifaddr->addr_state = NET_ADDR_PREFERRED;
1269 iface = net_if_get_by_index(ifaddr->ifindex);
1270
1271 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_SUCCEED,
1272 iface,
1273 &ifaddr->address.in6_addr,
1274 sizeof(struct in6_addr));
1275
1276 /* The address gets added to neighbor cache which is not
1277 * needed in this case as the address is our own one.
1278 */
1279 net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
1280 }
1281 }
1282
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1283 void net_if_ipv6_start_dad(struct net_if *iface,
1284 struct net_if_addr *ifaddr)
1285 {
1286 ifaddr->addr_state = NET_ADDR_TENTATIVE;
1287
1288 if (net_if_is_up(iface)) {
1289 NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
1290 iface,
1291 net_sprint_ll_addr(
1292 net_if_get_link_addr(iface)->addr,
1293 net_if_get_link_addr(iface)->len),
1294 net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1295
1296 ifaddr->dad_count = 1U;
1297
1298 if (net_ipv6_start_dad(iface, ifaddr) != 0) {
1299 NET_ERR("Interface %p failed to send DAD query for %s",
1300 iface,
1301 net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1302 }
1303
1304 ifaddr->dad_start = k_uptime_get_32();
1305 ifaddr->ifindex = net_if_get_by_iface(iface);
1306
1307 k_mutex_lock(&lock, K_FOREVER);
1308 sys_slist_find_and_remove(&active_dad_timers,
1309 &ifaddr->dad_node);
1310 sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
1311 k_mutex_unlock(&lock);
1312
1313 /* FUTURE: use schedule, not reschedule. */
1314 if (!k_work_delayable_remaining_get(&dad_timer)) {
1315 k_work_reschedule(&dad_timer,
1316 K_MSEC(DAD_TIMEOUT));
1317 }
1318 } else {
1319 NET_DBG("Interface %p is down, starting DAD for %s later.",
1320 iface,
1321 net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1322 }
1323 }
1324
net_if_start_dad(struct net_if * iface)1325 void net_if_start_dad(struct net_if *iface)
1326 {
1327 struct net_if_addr *ifaddr, *next;
1328 struct net_if_ipv6 *ipv6;
1329 sys_slist_t dad_needed;
1330 struct in6_addr addr = { };
1331 int ret;
1332
1333 net_if_lock(iface);
1334
1335 NET_DBG("Starting DAD for iface %p", iface);
1336
1337 ret = net_if_config_ipv6_get(iface, &ipv6);
1338 if (ret < 0) {
1339 if (ret != -ENOTSUP) {
1340 NET_WARN("Cannot do DAD IPv6 config is not valid.");
1341 }
1342
1343 goto out;
1344 }
1345
1346 if (!ipv6) {
1347 goto out;
1348 }
1349
1350 ret = net_ipv6_addr_generate_iid(iface, NULL,
1351 COND_CODE_1(CONFIG_NET_IPV6_IID_STABLE,
1352 ((uint8_t *)&ipv6->network_counter),
1353 (NULL)),
1354 COND_CODE_1(CONFIG_NET_IPV6_IID_STABLE,
1355 (sizeof(ipv6->network_counter)),
1356 (0U)),
1357 COND_CODE_1(CONFIG_NET_IPV6_IID_STABLE,
1358 (ipv6->iid ? ipv6->iid->dad_count : 0U),
1359 (0U)),
1360 &addr,
1361 net_if_get_link_addr(iface));
1362 if (ret < 0) {
1363 NET_WARN("IPv6 IID generation issue (%d)", ret);
1364 goto out;
1365 }
1366
1367 ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
1368 if (!ifaddr) {
1369 NET_ERR("Cannot add %s address to interface %p, DAD fails",
1370 net_sprint_ipv6_addr(&addr), iface);
1371 goto out;
1372 }
1373
1374 IF_ENABLED(CONFIG_NET_IPV6_IID_STABLE, (ipv6->iid = ifaddr));
1375
1376 /* Start DAD for all the addresses that were added earlier when
1377 * the interface was down.
1378 */
1379 sys_slist_init(&dad_needed);
1380
1381 ARRAY_FOR_EACH(ipv6->unicast, i) {
1382 if (!ipv6->unicast[i].is_used ||
1383 ipv6->unicast[i].address.family != AF_INET6 ||
1384 &ipv6->unicast[i] == ifaddr ||
1385 net_ipv6_is_addr_loopback(
1386 &ipv6->unicast[i].address.in6_addr)) {
1387 continue;
1388 }
1389
1390 sys_slist_prepend(&dad_needed, &ipv6->unicast[i].dad_need_node);
1391 }
1392
1393 net_if_unlock(iface);
1394
1395 /* Start DAD for all the addresses without holding the iface lock
1396 * to avoid any possible mutex deadlock issues.
1397 */
1398 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&dad_needed,
1399 ifaddr, next, dad_need_node) {
1400 net_if_ipv6_start_dad(iface, ifaddr);
1401 }
1402
1403 return;
1404
1405 out:
1406 net_if_unlock(iface);
1407 }
1408
net_if_ipv6_dad_failed(struct net_if * iface,const struct in6_addr * addr)1409 void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
1410 {
1411 struct net_if_addr *ifaddr;
1412 uint32_t timeout, preferred_lifetime;
1413
1414 net_if_lock(iface);
1415
1416 ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
1417 if (!ifaddr) {
1418 NET_ERR("Cannot find %s address in interface %p",
1419 net_sprint_ipv6_addr(addr), iface);
1420 goto out;
1421 }
1422
1423 if (IS_ENABLED(CONFIG_NET_IPV6_IID_STABLE) || IS_ENABLED(CONFIG_NET_IPV6_PE)) {
1424 ifaddr->dad_count++;
1425 }
1426
1427 if (IS_ENABLED(CONFIG_NET_IPV6_PE)) {
1428 timeout = COND_CODE_1(CONFIG_NET_IPV6_PE,
1429 (ifaddr->addr_timeout), (0));
1430 preferred_lifetime = COND_CODE_1(CONFIG_NET_IPV6_PE,
1431 (ifaddr->addr_preferred_lifetime), (0U));
1432
1433 if (!net_ipv6_pe_check_dad(ifaddr->dad_count)) {
1434 NET_ERR("Cannot generate PE address for interface %p",
1435 iface);
1436 iface->pe_enabled = false;
1437 net_mgmt_event_notify(NET_EVENT_IPV6_PE_DISABLED, iface);
1438 }
1439 }
1440
1441 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
1442 &ifaddr->address.in6_addr,
1443 sizeof(struct in6_addr));
1444
1445 /* The old address needs to be removed from the interface before we can
1446 * start new DAD for the new PE address as the amount of address slots
1447 * is limited.
1448 */
1449 net_if_ipv6_addr_rm(iface, addr);
1450
1451 if (IS_ENABLED(CONFIG_NET_IPV6_PE) && iface->pe_enabled) {
1452 net_if_unlock(iface);
1453
1454 net_ipv6_pe_start(iface, addr, timeout, preferred_lifetime);
1455 return;
1456 }
1457
1458 out:
1459 net_if_unlock(iface);
1460 }
1461
iface_ipv6_dad_init(void)1462 static inline void iface_ipv6_dad_init(void)
1463 {
1464 k_work_init_delayable(&dad_timer, dad_timeout);
1465 sys_slist_init(&active_dad_timers);
1466 }
1467
1468 #else
1469 #define net_if_ipv6_start_dad(...)
1470 #define iface_ipv6_dad_init(...)
1471 #endif /* CONFIG_NET_IPV6_DAD */
1472
1473 #if defined(CONFIG_NET_IPV6_ND)
1474 #define RS_TIMEOUT (CONFIG_NET_IPV6_RS_TIMEOUT * MSEC_PER_SEC)
1475 #define RS_COUNT 3
1476
rs_timeout(struct k_work * work)1477 static void rs_timeout(struct k_work *work)
1478 {
1479 uint32_t current_time = k_uptime_get_32();
1480 struct net_if_ipv6 *ipv6, *next;
1481 int32_t delay = -1;
1482 sys_slist_t expired_list;
1483
1484 ARG_UNUSED(work);
1485
1486 sys_slist_init(&expired_list);
1487
1488 k_mutex_lock(&lock, K_FOREVER);
1489
1490 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
1491 ipv6, next, rs_node) {
1492 /* RS entries are ordered by construction. Stop when
1493 * we find one that hasn't expired.
1494 */
1495 delay = (int32_t)(ipv6->rs_start + RS_TIMEOUT - current_time);
1496 if (delay > 0) {
1497 break;
1498 }
1499
1500 /* Removing the ipv6 from active_rs_timers list */
1501 sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
1502 sys_slist_append(&expired_list, &ipv6->rs_node);
1503
1504 ipv6 = NULL;
1505 }
1506
1507 if ((ipv6 != NULL) && (delay > 0)) {
1508 k_work_reschedule(&rs_timer, K_MSEC(ipv6->rs_start +
1509 RS_TIMEOUT - current_time));
1510 }
1511
1512 k_mutex_unlock(&lock);
1513
1514 SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ipv6, rs_node) {
1515 struct net_if *iface = NULL;
1516
1517 /* Did not receive RA yet. */
1518 ipv6->rs_count++;
1519
1520 STRUCT_SECTION_FOREACH(net_if, tmp) {
1521 if (tmp->config.ip.ipv6 == ipv6) {
1522 iface = tmp;
1523 break;
1524 }
1525 }
1526
1527 if (iface) {
1528 NET_DBG("RS no respond iface %p count %d",
1529 iface, ipv6->rs_count);
1530 if (ipv6->rs_count < RS_COUNT) {
1531 net_if_start_rs(iface);
1532 }
1533 } else {
1534 NET_DBG("Interface IPv6 config %p not found", ipv6);
1535 }
1536 }
1537 }
1538
net_if_start_rs(struct net_if * iface)1539 void net_if_start_rs(struct net_if *iface)
1540 {
1541 struct net_if_ipv6 *ipv6;
1542
1543 net_if_lock(iface);
1544
1545 if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1546 goto out;
1547 }
1548
1549 ipv6 = iface->config.ip.ipv6;
1550 if (!ipv6) {
1551 goto out;
1552 }
1553
1554 net_if_unlock(iface);
1555
1556 NET_DBG("Starting ND/RS for iface %p", iface);
1557
1558 if (!net_ipv6_start_rs(iface)) {
1559 ipv6->rs_start = k_uptime_get_32();
1560
1561 k_mutex_lock(&lock, K_FOREVER);
1562 sys_slist_append(&active_rs_timers, &ipv6->rs_node);
1563 k_mutex_unlock(&lock);
1564
1565 /* FUTURE: use schedule, not reschedule. */
1566 if (!k_work_delayable_remaining_get(&rs_timer)) {
1567 k_work_reschedule(&rs_timer, K_MSEC(RS_TIMEOUT));
1568 }
1569 }
1570
1571 return;
1572 out:
1573 net_if_unlock(iface);
1574 }
1575
net_if_stop_rs(struct net_if * iface)1576 void net_if_stop_rs(struct net_if *iface)
1577 {
1578 struct net_if_ipv6 *ipv6;
1579
1580 net_if_lock(iface);
1581
1582 ipv6 = iface->config.ip.ipv6;
1583 if (!ipv6) {
1584 goto out;
1585 }
1586
1587 NET_DBG("Stopping ND/RS for iface %p", iface);
1588
1589 k_mutex_lock(&lock, K_FOREVER);
1590 sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
1591 k_mutex_unlock(&lock);
1592
1593 out:
1594 net_if_unlock(iface);
1595 }
1596
iface_ipv6_nd_init(void)1597 static inline void iface_ipv6_nd_init(void)
1598 {
1599 k_work_init_delayable(&rs_timer, rs_timeout);
1600 sys_slist_init(&active_rs_timers);
1601 }
1602
1603 #else
1604 #define net_if_start_rs(...)
1605 #define net_if_stop_rs(...)
1606 #define iface_ipv6_nd_init(...)
1607 #endif /* CONFIG_NET_IPV6_ND */
1608
1609 #if defined(CONFIG_NET_IPV6_ND) && defined(CONFIG_NET_NATIVE_IPV6)
1610
net_if_nbr_reachability_hint(struct net_if * iface,const struct in6_addr * ipv6_addr)1611 void net_if_nbr_reachability_hint(struct net_if *iface, const struct in6_addr *ipv6_addr)
1612 {
1613 net_if_lock(iface);
1614
1615 if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1616 goto out;
1617 }
1618
1619 if (!iface->config.ip.ipv6) {
1620 goto out;
1621 }
1622
1623 net_ipv6_nbr_reachability_hint(iface, ipv6_addr);
1624
1625 out:
1626 net_if_unlock(iface);
1627 }
1628
1629 #endif
1630
1631 /* To be called when interface comes up so that all the non-joined multicast
1632 * groups are joined.
1633 */
rejoin_ipv6_mcast_groups(struct net_if * iface)1634 static void rejoin_ipv6_mcast_groups(struct net_if *iface)
1635 {
1636 struct net_if_mcast_addr *ifaddr, *next;
1637 struct net_if_ipv6 *ipv6;
1638 sys_slist_t rejoin_needed;
1639
1640 net_if_lock(iface);
1641
1642 if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
1643 net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1644 goto out;
1645 }
1646
1647 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1648 goto out;
1649 }
1650
1651 /* Rejoin solicited node multicasts. */
1652 ARRAY_FOR_EACH(ipv6->unicast, i) {
1653 if (!ipv6->unicast[i].is_used) {
1654 continue;
1655 }
1656
1657 join_mcast_nodes(iface, &ipv6->unicast[i].address.in6_addr);
1658 }
1659
1660 sys_slist_init(&rejoin_needed);
1661
1662 /* Rejoin any mcast address present on the interface, but marked as not joined. */
1663 ARRAY_FOR_EACH(ipv6->mcast, i) {
1664 if (!ipv6->mcast[i].is_used ||
1665 net_if_ipv6_maddr_is_joined(&ipv6->mcast[i])) {
1666 continue;
1667 }
1668
1669 sys_slist_prepend(&rejoin_needed, &ipv6->mcast[i].rejoin_node);
1670 }
1671
1672 net_if_unlock(iface);
1673
1674 /* Start DAD for all the addresses without holding the iface lock
1675 * to avoid any possible mutex deadlock issues.
1676 */
1677 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&rejoin_needed,
1678 ifaddr, next, rejoin_node) {
1679 int ret;
1680
1681 ret = net_ipv6_mld_join(iface, &ifaddr->address.in6_addr);
1682 if (ret < 0) {
1683 NET_ERR("Cannot join mcast address %s for %d (%d)",
1684 net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1685 net_if_get_by_iface(iface), ret);
1686 } else {
1687 NET_DBG("Rejoined mcast address %s for %d",
1688 net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1689 net_if_get_by_iface(iface));
1690 }
1691 }
1692
1693 return;
1694
1695 out:
1696 net_if_unlock(iface);
1697 }
1698
1699 /* To be called when interface comes operational down so that multicast
1700 * groups are rejoined when back up.
1701 */
clear_joined_ipv6_mcast_groups(struct net_if * iface)1702 static void clear_joined_ipv6_mcast_groups(struct net_if *iface)
1703 {
1704 struct net_if_ipv6 *ipv6;
1705
1706 net_if_lock(iface);
1707
1708 if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1709 goto out;
1710 }
1711
1712 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1713 goto out;
1714 }
1715
1716 ARRAY_FOR_EACH(ipv6->mcast, i) {
1717 if (!ipv6->mcast[i].is_used) {
1718 continue;
1719 }
1720
1721 net_if_ipv6_maddr_leave(iface, &ipv6->mcast[i]);
1722 }
1723
1724 out:
1725 net_if_unlock(iface);
1726 }
1727
address_expired(struct net_if_addr * ifaddr)1728 static void address_expired(struct net_if_addr *ifaddr)
1729 {
1730 NET_DBG("IPv6 address %s is expired",
1731 net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1732
1733 sys_slist_find_and_remove(&active_address_lifetime_timers,
1734 &ifaddr->lifetime.node);
1735
1736 net_timeout_set(&ifaddr->lifetime, 0, 0);
1737
1738 STRUCT_SECTION_FOREACH(net_if, iface) {
1739 ARRAY_FOR_EACH(iface->config.ip.ipv6->unicast, i) {
1740 if (&iface->config.ip.ipv6->unicast[i] == ifaddr) {
1741 net_if_ipv6_addr_rm(iface,
1742 &iface->config.ip.ipv6->unicast[i].address.in6_addr);
1743 return;
1744 }
1745 }
1746 }
1747 }
1748
address_lifetime_timeout(struct k_work * work)1749 static void address_lifetime_timeout(struct k_work *work)
1750 {
1751 uint32_t next_update = UINT32_MAX;
1752 uint32_t current_time = k_uptime_get_32();
1753 struct net_if_addr *current, *next;
1754
1755 ARG_UNUSED(work);
1756
1757 k_mutex_lock(&lock, K_FOREVER);
1758
1759 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
1760 current, next, lifetime.node) {
1761 struct net_timeout *timeout = ¤t->lifetime;
1762 uint32_t this_update = net_timeout_evaluate(timeout,
1763 current_time);
1764
1765 if (this_update == 0U) {
1766 address_expired(current);
1767 continue;
1768 }
1769
1770 if (this_update < next_update) {
1771 next_update = this_update;
1772 }
1773
1774 if (current == next) {
1775 break;
1776 }
1777 }
1778
1779 if (next_update != UINT32_MAX) {
1780 NET_DBG("Waiting for %d ms", (int32_t)next_update);
1781
1782 k_work_reschedule(&address_lifetime_timer, K_MSEC(next_update));
1783 }
1784
1785 k_mutex_unlock(&lock);
1786 }
1787
1788 #if defined(CONFIG_NET_TEST)
net_address_lifetime_timeout(void)1789 void net_address_lifetime_timeout(void)
1790 {
1791 address_lifetime_timeout(NULL);
1792 }
1793 #endif
1794
address_start_timer(struct net_if_addr * ifaddr,uint32_t vlifetime)1795 static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
1796 {
1797 /* Make sure that we do not insert the address twice to
1798 * the lifetime timer list.
1799 */
1800 sys_slist_find_and_remove(&active_address_lifetime_timers,
1801 &ifaddr->lifetime.node);
1802
1803 sys_slist_append(&active_address_lifetime_timers,
1804 &ifaddr->lifetime.node);
1805
1806 net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
1807 k_work_reschedule(&address_lifetime_timer, K_NO_WAIT);
1808 }
1809 #else /* CONFIG_NET_NATIVE_IPV6 */
1810 #define address_start_timer(...)
1811 #define net_if_ipv6_start_dad(...)
1812 #define join_mcast_nodes(...)
1813 #endif /* CONFIG_NET_NATIVE_IPV6 */
1814
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)1815 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
1816 struct net_if **ret)
1817 {
1818 struct net_if_addr *ifaddr = NULL;
1819
1820 STRUCT_SECTION_FOREACH(net_if, iface) {
1821 struct net_if_ipv6 *ipv6;
1822
1823 net_if_lock(iface);
1824
1825 ipv6 = iface->config.ip.ipv6;
1826 if (!ipv6) {
1827 net_if_unlock(iface);
1828 continue;
1829 }
1830
1831 ARRAY_FOR_EACH(ipv6->unicast, i) {
1832 if (!ipv6->unicast[i].is_used ||
1833 ipv6->unicast[i].address.family != AF_INET6) {
1834 continue;
1835 }
1836
1837 if (net_ipv6_is_prefix(
1838 addr->s6_addr,
1839 ipv6->unicast[i].address.in6_addr.s6_addr,
1840 128)) {
1841
1842 if (ret) {
1843 *ret = iface;
1844 }
1845
1846 ifaddr = &ipv6->unicast[i];
1847 net_if_unlock(iface);
1848 goto out;
1849 }
1850 }
1851
1852 net_if_unlock(iface);
1853 }
1854
1855 out:
1856 return ifaddr;
1857 }
1858
net_if_ipv6_addr_lookup_by_iface(struct net_if * iface,struct in6_addr * addr)1859 struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
1860 struct in6_addr *addr)
1861 {
1862 struct net_if_addr *ifaddr = NULL;
1863 struct net_if_ipv6 *ipv6;
1864
1865 net_if_lock(iface);
1866
1867 ipv6 = iface->config.ip.ipv6;
1868 if (!ipv6) {
1869 goto out;
1870 }
1871
1872 ARRAY_FOR_EACH(ipv6->unicast, i) {
1873 if (!ipv6->unicast[i].is_used ||
1874 ipv6->unicast[i].address.family != AF_INET6) {
1875 continue;
1876 }
1877
1878 if (net_ipv6_is_prefix(
1879 addr->s6_addr,
1880 ipv6->unicast[i].address.in6_addr.s6_addr,
1881 128)) {
1882 ifaddr = &ipv6->unicast[i];
1883 goto out;
1884 }
1885 }
1886
1887 out:
1888 net_if_unlock(iface);
1889
1890 return ifaddr;
1891 }
1892
z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1893 int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
1894 {
1895 struct net_if *iface = NULL;
1896 struct net_if_addr *if_addr;
1897
1898 if_addr = net_if_ipv6_addr_lookup(addr, &iface);
1899 if (!if_addr) {
1900 return 0;
1901 }
1902
1903 return net_if_get_by_iface(iface);
1904 }
1905
1906 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1907 static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
1908 const struct in6_addr *addr)
1909 {
1910 struct in6_addr addr_v6;
1911
1912 K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1913
1914 return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
1915 }
1916 #include <zephyr/syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
1917 #endif
1918
net_if_ipv6_addr_update_lifetime(struct net_if_addr * ifaddr,uint32_t vlifetime)1919 void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
1920 uint32_t vlifetime)
1921 {
1922 k_mutex_lock(&lock, K_FOREVER);
1923
1924 NET_DBG("Updating expire time of %s by %u secs",
1925 net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1926 vlifetime);
1927
1928 ifaddr->addr_state = NET_ADDR_PREFERRED;
1929
1930 address_start_timer(ifaddr, vlifetime);
1931
1932 k_mutex_unlock(&lock);
1933 }
1934
ipv6_addr_find(struct net_if * iface,struct in6_addr * addr)1935 static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
1936 struct in6_addr *addr)
1937 {
1938 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1939
1940 ARRAY_FOR_EACH(ipv6->unicast, i) {
1941 if (!ipv6->unicast[i].is_used) {
1942 continue;
1943 }
1944
1945 if (net_ipv6_addr_cmp(
1946 addr, &ipv6->unicast[i].address.in6_addr)) {
1947
1948 return &ipv6->unicast[i];
1949 }
1950 }
1951
1952 return NULL;
1953 }
1954
net_if_addr_init(struct net_if_addr * ifaddr,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1955 static inline void net_if_addr_init(struct net_if_addr *ifaddr,
1956 struct in6_addr *addr,
1957 enum net_addr_type addr_type,
1958 uint32_t vlifetime)
1959 {
1960 ifaddr->is_used = true;
1961 ifaddr->is_added = true;
1962 ifaddr->is_temporary = false;
1963 ifaddr->address.family = AF_INET6;
1964 ifaddr->addr_type = addr_type;
1965 ifaddr->atomic_ref = ATOMIC_INIT(1);
1966
1967 net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
1968
1969 /* FIXME - set the mcast addr for this node */
1970
1971 if (vlifetime) {
1972 ifaddr->is_infinite = false;
1973
1974 NET_DBG("Expiring %s in %u secs",
1975 net_sprint_ipv6_addr(addr),
1976 vlifetime);
1977
1978 net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
1979 } else {
1980 ifaddr->is_infinite = true;
1981 }
1982 }
1983
net_if_ipv6_addr_add(struct net_if * iface,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1984 struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
1985 struct in6_addr *addr,
1986 enum net_addr_type addr_type,
1987 uint32_t vlifetime)
1988 {
1989 struct net_if_addr *ifaddr = NULL;
1990 struct net_if_ipv6 *ipv6;
1991 bool do_dad = false;
1992
1993 net_if_lock(iface);
1994
1995 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1996 goto out;
1997 }
1998
1999 ifaddr = ipv6_addr_find(iface, addr);
2000 if (ifaddr) {
2001 /* Address already exists, just return it but update ref count
2002 * if it was not updated. This could happen if the address was
2003 * added and then removed but for example an active connection
2004 * was still using it. In this case we must update the ref count
2005 * so that the address is not removed if the connection is closed.
2006 */
2007 if (!ifaddr->is_added) {
2008 atomic_inc(&ifaddr->atomic_ref);
2009 ifaddr->is_added = true;
2010 }
2011
2012 goto out;
2013 }
2014
2015 ARRAY_FOR_EACH(ipv6->unicast, i) {
2016 if (ipv6->unicast[i].is_used) {
2017 continue;
2018 }
2019
2020 net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
2021 vlifetime);
2022
2023 NET_DBG("[%zu] interface %d (%p) address %s type %s added", i,
2024 net_if_get_by_iface(iface), iface,
2025 net_sprint_ipv6_addr(addr),
2026 net_addr_type2str(addr_type));
2027
2028 if (IS_ENABLED(CONFIG_NET_IPV6_DAD) &&
2029 !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
2030 !net_ipv6_is_addr_loopback(addr) &&
2031 !net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
2032 /* The groups are joined without locks held */
2033 do_dad = true;
2034 } else {
2035 /* If DAD is not done for point-to-point links, then
2036 * the address is usable immediately.
2037 */
2038 ipv6->unicast[i].addr_state = NET_ADDR_PREFERRED;
2039 }
2040
2041 net_mgmt_event_notify_with_info(
2042 NET_EVENT_IPV6_ADDR_ADD, iface,
2043 &ipv6->unicast[i].address.in6_addr,
2044 sizeof(struct in6_addr));
2045
2046 ifaddr = &ipv6->unicast[i];
2047 break;
2048 }
2049
2050 net_if_unlock(iface);
2051
2052 if (ifaddr != NULL && do_dad) {
2053 /* RFC 4862 5.4.2
2054 * Before sending a Neighbor Solicitation, an interface
2055 * MUST join the all-nodes multicast address and the
2056 * solicited-node multicast address of the tentative
2057 * address.
2058 */
2059 /* The allnodes multicast group is only joined once as
2060 * net_ipv6_mld_join() checks if we have already
2061 * joined.
2062 */
2063 join_mcast_nodes(iface, &ifaddr->address.in6_addr);
2064
2065 net_if_ipv6_start_dad(iface, ifaddr);
2066 }
2067
2068 return ifaddr;
2069
2070 out:
2071 net_if_unlock(iface);
2072
2073 return ifaddr;
2074 }
2075
net_if_ipv6_addr_rm(struct net_if * iface,const struct in6_addr * addr)2076 bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
2077 {
2078 struct net_if_addr *ifaddr;
2079 struct net_if_ipv6 *ipv6;
2080 bool result = true;
2081 int ret;
2082
2083 if (iface == NULL || addr == NULL) {
2084 return false;
2085 }
2086
2087 net_if_lock(iface);
2088
2089 ipv6 = iface->config.ip.ipv6;
2090 if (!ipv6) {
2091 result = false;
2092 goto out;
2093 }
2094
2095 ret = net_if_addr_unref(iface, AF_INET6, addr, &ifaddr);
2096 if (ret > 0) {
2097 NET_DBG("Address %s still in use (ref %d)",
2098 net_sprint_ipv6_addr(addr), ret);
2099 result = false;
2100 ifaddr->is_added = false;
2101 goto out;
2102 } else if (ret < 0) {
2103 NET_DBG("Address %s not found (%d)",
2104 net_sprint_ipv6_addr(addr), ret);
2105 }
2106
2107 out:
2108 net_if_unlock(iface);
2109
2110 return result;
2111 }
2112
z_impl_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)2113 bool z_impl_net_if_ipv6_addr_add_by_index(int index,
2114 struct in6_addr *addr,
2115 enum net_addr_type addr_type,
2116 uint32_t vlifetime)
2117 {
2118 struct net_if *iface;
2119
2120 iface = net_if_get_by_index(index);
2121 if (!iface) {
2122 return false;
2123 }
2124
2125 return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
2126 true : false;
2127 }
2128
2129 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)2130 bool z_vrfy_net_if_ipv6_addr_add_by_index(int index,
2131 struct in6_addr *addr,
2132 enum net_addr_type addr_type,
2133 uint32_t vlifetime)
2134 {
2135 struct in6_addr addr_v6;
2136 struct net_if *iface;
2137
2138 iface = z_vrfy_net_if_get_by_index(index);
2139 if (!iface) {
2140 return false;
2141 }
2142
2143 K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
2144
2145 return z_impl_net_if_ipv6_addr_add_by_index(index,
2146 &addr_v6,
2147 addr_type,
2148 vlifetime);
2149 }
2150
2151 #include <zephyr/syscalls/net_if_ipv6_addr_add_by_index_mrsh.c>
2152 #endif /* CONFIG_USERSPACE */
2153
z_impl_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)2154 bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
2155 const struct in6_addr *addr)
2156 {
2157 struct net_if *iface;
2158
2159 iface = net_if_get_by_index(index);
2160 if (!iface) {
2161 return false;
2162 }
2163
2164 return net_if_ipv6_addr_rm(iface, addr);
2165 }
2166
2167 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)2168 bool z_vrfy_net_if_ipv6_addr_rm_by_index(int index,
2169 const struct in6_addr *addr)
2170 {
2171 struct in6_addr addr_v6;
2172 struct net_if *iface;
2173
2174 iface = z_vrfy_net_if_get_by_index(index);
2175 if (!iface) {
2176 return false;
2177 }
2178
2179 K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
2180
2181 return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
2182 }
2183
2184 #include <zephyr/syscalls/net_if_ipv6_addr_rm_by_index_mrsh.c>
2185 #endif /* CONFIG_USERSPACE */
2186
net_if_ipv6_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)2187 void net_if_ipv6_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
2188 void *user_data)
2189 {
2190 struct net_if_ipv6 *ipv6;
2191
2192 if (iface == NULL) {
2193 return;
2194 }
2195
2196 net_if_lock(iface);
2197
2198 ipv6 = iface->config.ip.ipv6;
2199 if (ipv6 == NULL) {
2200 goto out;
2201 }
2202
2203 ARRAY_FOR_EACH(ipv6->unicast, i) {
2204 struct net_if_addr *if_addr = &ipv6->unicast[i];
2205
2206 if (!if_addr->is_used) {
2207 continue;
2208 }
2209
2210 cb(iface, if_addr, user_data);
2211 }
2212
2213 out:
2214 net_if_unlock(iface);
2215 }
2216
net_if_ipv6_maddr_add(struct net_if * iface,const struct in6_addr * addr)2217 struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
2218 const struct in6_addr *addr)
2219 {
2220 struct net_if_mcast_addr *ifmaddr = NULL;
2221 struct net_if_ipv6 *ipv6;
2222
2223 net_if_lock(iface);
2224
2225 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2226 goto out;
2227 }
2228
2229 if (!net_ipv6_is_addr_mcast(addr)) {
2230 NET_DBG("Address %s is not a multicast address.",
2231 net_sprint_ipv6_addr(addr));
2232 goto out;
2233 }
2234
2235 if (net_if_ipv6_maddr_lookup(addr, &iface)) {
2236 NET_WARN("Multicast address %s is already registered.",
2237 net_sprint_ipv6_addr(addr));
2238 goto out;
2239 }
2240
2241 ARRAY_FOR_EACH(ipv6->mcast, i) {
2242 if (ipv6->mcast[i].is_used) {
2243 continue;
2244 }
2245
2246 ipv6->mcast[i].is_used = true;
2247 ipv6->mcast[i].address.family = AF_INET6;
2248 memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
2249
2250 NET_DBG("[%zu] interface %d (%p) address %s added", i,
2251 net_if_get_by_iface(iface), iface,
2252 net_sprint_ipv6_addr(addr));
2253
2254 net_mgmt_event_notify_with_info(
2255 NET_EVENT_IPV6_MADDR_ADD, iface,
2256 &ipv6->mcast[i].address.in6_addr,
2257 sizeof(struct in6_addr));
2258
2259 ifmaddr = &ipv6->mcast[i];
2260 goto out;
2261 }
2262
2263 out:
2264 net_if_unlock(iface);
2265
2266 return ifmaddr;
2267 }
2268
net_if_ipv6_maddr_rm(struct net_if * iface,const struct in6_addr * addr)2269 bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
2270 {
2271 bool ret = false;
2272 struct net_if_ipv6 *ipv6;
2273
2274 net_if_lock(iface);
2275
2276 ipv6 = iface->config.ip.ipv6;
2277 if (!ipv6) {
2278 goto out;
2279 }
2280
2281 ARRAY_FOR_EACH(ipv6->mcast, i) {
2282 if (!ipv6->mcast[i].is_used) {
2283 continue;
2284 }
2285
2286 if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
2287 addr)) {
2288 continue;
2289 }
2290
2291 ipv6->mcast[i].is_used = false;
2292
2293 NET_DBG("[%zu] interface %d (%p) address %s removed",
2294 i, net_if_get_by_iface(iface), iface,
2295 net_sprint_ipv6_addr(addr));
2296
2297 net_mgmt_event_notify_with_info(
2298 NET_EVENT_IPV6_MADDR_DEL, iface,
2299 &ipv6->mcast[i].address.in6_addr,
2300 sizeof(struct in6_addr));
2301
2302 ret = true;
2303 goto out;
2304 }
2305
2306 out:
2307 net_if_unlock(iface);
2308
2309 return ret;
2310 }
2311
net_if_ipv6_maddr_foreach(struct net_if * iface,net_if_ip_maddr_cb_t cb,void * user_data)2312 void net_if_ipv6_maddr_foreach(struct net_if *iface, net_if_ip_maddr_cb_t cb,
2313 void *user_data)
2314 {
2315 struct net_if_ipv6 *ipv6;
2316
2317 if (iface == NULL || cb == NULL) {
2318 return;
2319 }
2320
2321 net_if_lock(iface);
2322
2323 ipv6 = iface->config.ip.ipv6;
2324 if (!ipv6) {
2325 goto out;
2326 }
2327
2328 for (int i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2329 if (!ipv6->mcast[i].is_used) {
2330 continue;
2331 }
2332
2333 cb(iface, &ipv6->mcast[i], user_data);
2334 }
2335
2336 out:
2337 net_if_unlock(iface);
2338 }
2339
net_if_ipv6_maddr_lookup(const struct in6_addr * maddr,struct net_if ** ret)2340 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
2341 struct net_if **ret)
2342 {
2343 struct net_if_mcast_addr *ifmaddr = NULL;
2344
2345 STRUCT_SECTION_FOREACH(net_if, iface) {
2346 struct net_if_ipv6 *ipv6;
2347
2348 if (ret && *ret && iface != *ret) {
2349 continue;
2350 }
2351
2352 net_if_lock(iface);
2353
2354 ipv6 = iface->config.ip.ipv6;
2355 if (!ipv6) {
2356 net_if_unlock(iface);
2357 continue;
2358 }
2359
2360 ARRAY_FOR_EACH(ipv6->mcast, i) {
2361 if (!ipv6->mcast[i].is_used ||
2362 ipv6->mcast[i].address.family != AF_INET6) {
2363 continue;
2364 }
2365
2366 if (net_ipv6_is_prefix(
2367 maddr->s6_addr,
2368 ipv6->mcast[i].address.in6_addr.s6_addr,
2369 128)) {
2370 if (ret) {
2371 *ret = iface;
2372 }
2373
2374 ifmaddr = &ipv6->mcast[i];
2375 net_if_unlock(iface);
2376 goto out;
2377 }
2378 }
2379
2380 net_if_unlock(iface);
2381 }
2382
2383 out:
2384 return ifmaddr;
2385 }
2386
net_if_ipv6_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)2387 void net_if_ipv6_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
2388 {
2389 if (iface == NULL || addr == NULL) {
2390 return;
2391 }
2392
2393 net_if_lock(iface);
2394 addr->is_joined = false;
2395 net_if_unlock(iface);
2396 }
2397
net_if_ipv6_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)2398 void net_if_ipv6_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
2399 {
2400 if (iface == NULL || addr == NULL) {
2401 return;
2402 }
2403
2404 net_if_lock(iface);
2405 addr->is_joined = true;
2406 net_if_unlock(iface);
2407 }
2408
net_if_ipv6_get_ll(struct net_if * iface,enum net_addr_state addr_state)2409 struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
2410 enum net_addr_state addr_state)
2411 {
2412 struct in6_addr *addr = NULL;
2413 struct net_if_ipv6 *ipv6;
2414
2415 net_if_lock(iface);
2416
2417 ipv6 = iface->config.ip.ipv6;
2418 if (!ipv6) {
2419 goto out;
2420 }
2421
2422 ARRAY_FOR_EACH(ipv6->unicast, i) {
2423 if (!ipv6->unicast[i].is_used ||
2424 (addr_state != NET_ADDR_ANY_STATE &&
2425 ipv6->unicast[i].addr_state != addr_state) ||
2426 ipv6->unicast[i].address.family != AF_INET6) {
2427 continue;
2428 }
2429
2430 if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2431 addr = &ipv6->unicast[i].address.in6_addr;
2432 goto out;
2433 }
2434 }
2435
2436 out:
2437 net_if_unlock(iface);
2438
2439 return addr;
2440 }
2441
net_if_ipv6_get_ll_addr(enum net_addr_state state,struct net_if ** iface)2442 struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
2443 struct net_if **iface)
2444 {
2445 struct in6_addr *addr = NULL;
2446
2447 STRUCT_SECTION_FOREACH(net_if, tmp) {
2448 net_if_lock(tmp);
2449
2450 addr = net_if_ipv6_get_ll(tmp, state);
2451 if (addr) {
2452 if (iface) {
2453 *iface = tmp;
2454 }
2455
2456 net_if_unlock(tmp);
2457 goto out;
2458 }
2459
2460 net_if_unlock(tmp);
2461 }
2462
2463 out:
2464 return addr;
2465 }
2466
check_global_addr(struct net_if * iface,enum net_addr_state state)2467 static inline struct in6_addr *check_global_addr(struct net_if *iface,
2468 enum net_addr_state state)
2469 {
2470 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2471
2472 if (!ipv6) {
2473 return NULL;
2474 }
2475
2476 ARRAY_FOR_EACH(ipv6->unicast, i) {
2477 if (!ipv6->unicast[i].is_used ||
2478 (ipv6->unicast[i].addr_state != state) ||
2479 ipv6->unicast[i].address.family != AF_INET6) {
2480 continue;
2481 }
2482
2483 if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2484 return &ipv6->unicast[i].address.in6_addr;
2485 }
2486 }
2487
2488 return NULL;
2489 }
2490
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2491 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2492 struct net_if **iface)
2493 {
2494 struct in6_addr *addr = NULL;
2495
2496 STRUCT_SECTION_FOREACH(net_if, tmp) {
2497 if (iface && *iface && tmp != *iface) {
2498 continue;
2499 }
2500
2501 net_if_lock(tmp);
2502 addr = check_global_addr(tmp, state);
2503 if (addr) {
2504 if (iface) {
2505 *iface = tmp;
2506 }
2507
2508 net_if_unlock(tmp);
2509 goto out;
2510 }
2511
2512 net_if_unlock(tmp);
2513 }
2514
2515 out:
2516
2517 return addr;
2518 }
2519
2520 #if defined(CONFIG_NET_NATIVE_IPV6)
remove_prefix_addresses(struct net_if * iface,struct net_if_ipv6 * ipv6,struct in6_addr * addr,uint8_t len)2521 static void remove_prefix_addresses(struct net_if *iface,
2522 struct net_if_ipv6 *ipv6,
2523 struct in6_addr *addr,
2524 uint8_t len)
2525 {
2526 ARRAY_FOR_EACH(ipv6->unicast, i) {
2527 if (!ipv6->unicast[i].is_used ||
2528 ipv6->unicast[i].address.family != AF_INET6 ||
2529 ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
2530 continue;
2531 }
2532
2533 if (net_ipv6_is_prefix(
2534 addr->s6_addr,
2535 ipv6->unicast[i].address.in6_addr.s6_addr,
2536 len)) {
2537 net_if_ipv6_addr_rm(iface,
2538 &ipv6->unicast[i].address.in6_addr);
2539 }
2540 }
2541 }
2542
prefix_lifetime_expired(struct net_if_ipv6_prefix * ifprefix)2543 static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
2544 {
2545 struct net_if_ipv6 *ipv6;
2546
2547 net_if_lock(ifprefix->iface);
2548
2549 NET_DBG("Prefix %s/%d expired",
2550 net_sprint_ipv6_addr(&ifprefix->prefix),
2551 ifprefix->len);
2552
2553 ifprefix->is_used = false;
2554
2555 if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
2556 return;
2557 }
2558
2559 /* Remove also all auto addresses if the they have the same prefix.
2560 */
2561 remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
2562 ifprefix->len);
2563
2564 if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2565 struct net_event_ipv6_prefix info;
2566
2567 net_ipaddr_copy(&info.addr, &ifprefix->prefix);
2568 info.len = ifprefix->len;
2569 info.lifetime = 0;
2570
2571 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2572 ifprefix->iface,
2573 (const void *) &info,
2574 sizeof(struct net_event_ipv6_prefix));
2575 } else {
2576 net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface);
2577 }
2578
2579 net_if_unlock(ifprefix->iface);
2580 }
2581
prefix_timer_remove(struct net_if_ipv6_prefix * ifprefix)2582 static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
2583 {
2584 k_mutex_lock(&lock, K_FOREVER);
2585
2586 NET_DBG("IPv6 prefix %s/%d removed",
2587 net_sprint_ipv6_addr(&ifprefix->prefix),
2588 ifprefix->len);
2589
2590 sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2591 &ifprefix->lifetime.node);
2592
2593 net_timeout_set(&ifprefix->lifetime, 0, 0);
2594
2595 k_mutex_unlock(&lock);
2596 }
2597
prefix_lifetime_timeout(struct k_work * work)2598 static void prefix_lifetime_timeout(struct k_work *work)
2599 {
2600 uint32_t next_update = UINT32_MAX;
2601 uint32_t current_time = k_uptime_get_32();
2602 struct net_if_ipv6_prefix *current, *next;
2603 sys_slist_t expired_list;
2604
2605 ARG_UNUSED(work);
2606
2607 sys_slist_init(&expired_list);
2608
2609 k_mutex_lock(&lock, K_FOREVER);
2610
2611 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
2612 current, next, lifetime.node) {
2613 struct net_timeout *timeout = ¤t->lifetime;
2614 uint32_t this_update = net_timeout_evaluate(timeout,
2615 current_time);
2616
2617 if (this_update == 0U) {
2618 sys_slist_find_and_remove(
2619 &active_prefix_lifetime_timers,
2620 ¤t->lifetime.node);
2621 sys_slist_append(&expired_list,
2622 ¤t->lifetime.node);
2623 continue;
2624 }
2625
2626 if (this_update < next_update) {
2627 next_update = this_update;
2628 }
2629
2630 if (current == next) {
2631 break;
2632 }
2633 }
2634
2635 if (next_update != UINT32_MAX) {
2636 k_work_reschedule(&prefix_lifetime_timer, K_MSEC(next_update));
2637 }
2638
2639 k_mutex_unlock(&lock);
2640
2641 SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, current, lifetime.node) {
2642 prefix_lifetime_expired(current);
2643 }
2644 }
2645
prefix_start_timer(struct net_if_ipv6_prefix * ifprefix,uint32_t lifetime)2646 static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
2647 uint32_t lifetime)
2648 {
2649 k_mutex_lock(&lock, K_FOREVER);
2650
2651 (void)sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2652 &ifprefix->lifetime.node);
2653 sys_slist_append(&active_prefix_lifetime_timers,
2654 &ifprefix->lifetime.node);
2655
2656 net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
2657 k_work_reschedule(&prefix_lifetime_timer, K_NO_WAIT);
2658
2659 k_mutex_unlock(&lock);
2660 }
2661
ipv6_prefix_find(struct net_if * iface,struct in6_addr * prefix,uint8_t prefix_len)2662 static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
2663 struct in6_addr *prefix,
2664 uint8_t prefix_len)
2665 {
2666 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2667
2668 if (!ipv6) {
2669 return NULL;
2670 }
2671
2672 ARRAY_FOR_EACH(ipv6->prefix, i) {
2673 if (!ipv6->prefix[i].is_used) {
2674 continue;
2675 }
2676
2677 if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
2678 prefix_len == ipv6->prefix[i].len) {
2679 return &ipv6->prefix[i];
2680 }
2681 }
2682
2683 return NULL;
2684 }
2685
net_if_ipv6_prefix_init(struct net_if * iface,struct net_if_ipv6_prefix * ifprefix,struct in6_addr * addr,uint8_t len,uint32_t lifetime)2686 static void net_if_ipv6_prefix_init(struct net_if *iface,
2687 struct net_if_ipv6_prefix *ifprefix,
2688 struct in6_addr *addr, uint8_t len,
2689 uint32_t lifetime)
2690 {
2691 ifprefix->is_used = true;
2692 ifprefix->len = len;
2693 ifprefix->iface = iface;
2694 net_ipaddr_copy(&ifprefix->prefix, addr);
2695
2696 if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
2697 ifprefix->is_infinite = true;
2698 } else {
2699 ifprefix->is_infinite = false;
2700 }
2701 }
2702
net_if_ipv6_prefix_add(struct net_if * iface,struct in6_addr * prefix,uint8_t len,uint32_t lifetime)2703 struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
2704 struct in6_addr *prefix,
2705 uint8_t len,
2706 uint32_t lifetime)
2707 {
2708 struct net_if_ipv6_prefix *ifprefix = NULL;
2709 struct net_if_ipv6 *ipv6;
2710
2711 net_if_lock(iface);
2712
2713 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2714 goto out;
2715 }
2716
2717 ifprefix = ipv6_prefix_find(iface, prefix, len);
2718 if (ifprefix) {
2719 goto out;
2720 }
2721
2722 if (!ipv6) {
2723 goto out;
2724 }
2725
2726 ARRAY_FOR_EACH(ipv6->prefix, i) {
2727 if (ipv6->prefix[i].is_used) {
2728 continue;
2729 }
2730
2731 net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
2732 len, lifetime);
2733
2734 NET_DBG("[%zu] interface %p prefix %s/%d added", i, iface,
2735 net_sprint_ipv6_addr(prefix), len);
2736
2737 if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2738 struct net_event_ipv6_prefix info;
2739
2740 net_ipaddr_copy(&info.addr, prefix);
2741 info.len = len;
2742 info.lifetime = lifetime;
2743
2744 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_ADD,
2745 iface, (const void *) &info,
2746 sizeof(struct net_event_ipv6_prefix));
2747 } else {
2748 net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_ADD, iface);
2749 }
2750
2751 ifprefix = &ipv6->prefix[i];
2752 goto out;
2753 }
2754
2755 out:
2756 net_if_unlock(iface);
2757
2758 return ifprefix;
2759 }
2760
net_if_ipv6_prefix_rm(struct net_if * iface,struct in6_addr * addr,uint8_t len)2761 bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
2762 uint8_t len)
2763 {
2764 bool ret = false;
2765 struct net_if_ipv6 *ipv6;
2766
2767 net_if_lock(iface);
2768
2769 ipv6 = iface->config.ip.ipv6;
2770 if (!ipv6) {
2771 goto out;
2772 }
2773
2774 ARRAY_FOR_EACH(ipv6->prefix, i) {
2775 if (!ipv6->prefix[i].is_used) {
2776 continue;
2777 }
2778
2779 if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
2780 ipv6->prefix[i].len != len) {
2781 continue;
2782 }
2783
2784 net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
2785
2786 ipv6->prefix[i].is_used = false;
2787
2788 /* Remove also all auto addresses if the they have the same
2789 * prefix.
2790 */
2791 remove_prefix_addresses(iface, ipv6, addr, len);
2792
2793 if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2794 struct net_event_ipv6_prefix info;
2795
2796 net_ipaddr_copy(&info.addr, addr);
2797 info.len = len;
2798 info.lifetime = 0;
2799
2800 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2801 iface, (const void *) &info,
2802 sizeof(struct net_event_ipv6_prefix));
2803 } else {
2804 net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, iface);
2805 }
2806
2807 ret = true;
2808 goto out;
2809 }
2810
2811 out:
2812 net_if_unlock(iface);
2813
2814 return ret;
2815 }
2816
net_if_ipv6_prefix_get(struct net_if * iface,const struct in6_addr * addr)2817 struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
2818 const struct in6_addr *addr)
2819 {
2820 struct net_if_ipv6_prefix *prefix = NULL;
2821 struct net_if_ipv6 *ipv6;
2822
2823 if (!iface) {
2824 iface = net_if_get_default();
2825 }
2826
2827 if (!iface) {
2828 return NULL;
2829 }
2830
2831 net_if_lock(iface);
2832
2833 ipv6 = iface->config.ip.ipv6;
2834 if (!ipv6) {
2835 goto out;
2836 }
2837
2838 ARRAY_FOR_EACH(ipv6->prefix, i) {
2839 if (!ipv6->prefix[i].is_used) {
2840 continue;
2841 }
2842
2843 if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2844 addr->s6_addr,
2845 ipv6->prefix[i].len)) {
2846 if (!prefix || prefix->len > ipv6->prefix[i].len) {
2847 prefix = &ipv6->prefix[i];
2848 }
2849 }
2850 }
2851
2852 out:
2853 net_if_unlock(iface);
2854
2855 return prefix;
2856 }
2857
net_if_ipv6_prefix_lookup(struct net_if * iface,struct in6_addr * addr,uint8_t len)2858 struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
2859 struct in6_addr *addr,
2860 uint8_t len)
2861 {
2862 struct net_if_ipv6_prefix *prefix = NULL;
2863 struct net_if_ipv6 *ipv6;
2864
2865 net_if_lock(iface);
2866
2867 ipv6 = iface->config.ip.ipv6;
2868 if (!ipv6) {
2869 goto out;
2870 }
2871
2872 ARRAY_FOR_EACH(ipv6->prefix, i) {
2873 if (!ipv6->prefix[i].is_used) {
2874 continue;
2875 }
2876
2877 if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2878 addr->s6_addr, len)) {
2879 prefix = &ipv6->prefix[i];
2880 goto out;
2881 }
2882 }
2883
2884 out:
2885 net_if_unlock(iface);
2886
2887 return prefix;
2888 }
2889
net_if_ipv6_addr_onlink(struct net_if ** iface,struct in6_addr * addr)2890 bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
2891 {
2892 bool ret = false;
2893
2894 STRUCT_SECTION_FOREACH(net_if, tmp) {
2895 struct net_if_ipv6 *ipv6;
2896
2897 if (iface && *iface && *iface != tmp) {
2898 continue;
2899 }
2900
2901 net_if_lock(tmp);
2902
2903 ipv6 = tmp->config.ip.ipv6;
2904 if (!ipv6) {
2905 net_if_unlock(tmp);
2906 continue;
2907 }
2908
2909 ARRAY_FOR_EACH(ipv6->prefix, i) {
2910 if (ipv6->prefix[i].is_used &&
2911 net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2912 addr->s6_addr,
2913 ipv6->prefix[i].len)) {
2914 if (iface) {
2915 *iface = tmp;
2916 }
2917
2918 ret = true;
2919 net_if_unlock(tmp);
2920 goto out;
2921 }
2922 }
2923
2924 net_if_unlock(tmp);
2925 }
2926
2927 out:
2928 return ret;
2929 }
2930
net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix * prefix,uint32_t lifetime)2931 void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
2932 uint32_t lifetime)
2933 {
2934 /* No need to set a timer for infinite timeout */
2935 if (lifetime == 0xffffffff) {
2936 return;
2937 }
2938
2939 NET_DBG("Prefix lifetime %u sec", lifetime);
2940
2941 prefix_start_timer(prefix, lifetime);
2942 }
2943
net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix * prefix)2944 void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
2945 {
2946 if (!prefix->is_used) {
2947 return;
2948 }
2949
2950 prefix_timer_remove(prefix);
2951 }
2952
net_if_ipv6_router_lookup(struct net_if * iface,struct in6_addr * addr)2953 struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
2954 struct in6_addr *addr)
2955 {
2956 return iface_router_lookup(iface, AF_INET6, addr);
2957 }
2958
net_if_ipv6_router_find_default(struct net_if * iface,struct in6_addr * addr)2959 struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
2960 struct in6_addr *addr)
2961 {
2962 return iface_router_find_default(iface, AF_INET6, addr);
2963 }
2964
net_if_ipv6_router_update_lifetime(struct net_if_router * router,uint16_t lifetime)2965 void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
2966 uint16_t lifetime)
2967 {
2968 NET_DBG("Updating expire time of %s by %u secs",
2969 net_sprint_ipv6_addr(&router->address.in6_addr),
2970 lifetime);
2971
2972 router->life_start = k_uptime_get_32();
2973 router->lifetime = lifetime;
2974
2975 iface_router_update_timer(router->life_start);
2976 }
2977
net_if_ipv6_router_add(struct net_if * iface,struct in6_addr * addr,uint16_t lifetime)2978 struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
2979 struct in6_addr *addr,
2980 uint16_t lifetime)
2981 {
2982 return iface_router_add(iface, AF_INET6, addr, false, lifetime);
2983 }
2984
net_if_ipv6_router_rm(struct net_if_router * router)2985 bool net_if_ipv6_router_rm(struct net_if_router *router)
2986 {
2987 return iface_router_rm(router);
2988 }
2989
net_if_ipv6_get_mcast_hop_limit(struct net_if * iface)2990 uint8_t net_if_ipv6_get_mcast_hop_limit(struct net_if *iface)
2991 {
2992 int ret = 0;
2993
2994 net_if_lock(iface);
2995
2996 if (net_if_config_ipv6_get(iface, NULL) < 0) {
2997 goto out;
2998 }
2999
3000 if (!iface->config.ip.ipv6) {
3001 goto out;
3002 }
3003
3004 ret = iface->config.ip.ipv6->mcast_hop_limit;
3005 out:
3006 net_if_unlock(iface);
3007
3008 return ret;
3009 }
3010
net_if_ipv6_set_mcast_hop_limit(struct net_if * iface,uint8_t hop_limit)3011 void net_if_ipv6_set_mcast_hop_limit(struct net_if *iface, uint8_t hop_limit)
3012 {
3013 net_if_lock(iface);
3014
3015 if (net_if_config_ipv6_get(iface, NULL) < 0) {
3016 goto out;
3017 }
3018
3019 if (!iface->config.ip.ipv6) {
3020 goto out;
3021 }
3022
3023 iface->config.ip.ipv6->mcast_hop_limit = hop_limit;
3024 out:
3025 net_if_unlock(iface);
3026 }
3027
net_if_ipv6_get_hop_limit(struct net_if * iface)3028 uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface)
3029 {
3030 int ret = 0;
3031
3032 net_if_lock(iface);
3033
3034 if (net_if_config_ipv6_get(iface, NULL) < 0) {
3035 goto out;
3036 }
3037
3038 if (!iface->config.ip.ipv6) {
3039 goto out;
3040 }
3041
3042 ret = iface->config.ip.ipv6->hop_limit;
3043 out:
3044 net_if_unlock(iface);
3045
3046 return ret;
3047 }
3048
net_if_ipv6_set_hop_limit(struct net_if * iface,uint8_t hop_limit)3049 void net_if_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit)
3050 {
3051 net_if_lock(iface);
3052
3053 if (net_if_config_ipv6_get(iface, NULL) < 0) {
3054 goto out;
3055 }
3056
3057 if (!iface->config.ip.ipv6) {
3058 goto out;
3059 }
3060
3061 iface->config.ip.ipv6->hop_limit = hop_limit;
3062 out:
3063 net_if_unlock(iface);
3064 }
3065
3066 #endif /* CONFIG_NET_NATIVE_IPV6 */
3067
get_diff_ipv6(const struct in6_addr * src,const struct in6_addr * dst)3068 static uint8_t get_diff_ipv6(const struct in6_addr *src,
3069 const struct in6_addr *dst)
3070 {
3071 return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 16);
3072 }
3073
is_proper_ipv6_address(struct net_if_addr * addr)3074 static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
3075 {
3076 if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3077 addr->address.family == AF_INET6 &&
3078 !net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
3079 return true;
3080 }
3081
3082 return false;
3083 }
3084
use_public_address(bool prefer_public,bool is_temporary,int flags)3085 static bool use_public_address(bool prefer_public, bool is_temporary,
3086 int flags)
3087 {
3088 if (IS_ENABLED(CONFIG_NET_IPV6_PE)) {
3089 if (!prefer_public && is_temporary) {
3090
3091 /* Allow socket to override the kconfig option */
3092 if (flags & IPV6_PREFER_SRC_PUBLIC) {
3093 return true;
3094 }
3095
3096 return false;
3097 }
3098 }
3099
3100 if (flags & IPV6_PREFER_SRC_TMP) {
3101 return false;
3102 }
3103
3104 return true;
3105 }
3106
net_if_ipv6_get_best_match(struct net_if * iface,const struct in6_addr * dst,uint8_t prefix_len,uint8_t * best_so_far,int flags)3107 static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
3108 const struct in6_addr *dst,
3109 uint8_t prefix_len,
3110 uint8_t *best_so_far,
3111 int flags)
3112 {
3113 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
3114 struct net_if_addr *public_addr = NULL;
3115 struct in6_addr *src = NULL;
3116 uint8_t public_addr_len = 0;
3117 struct in6_addr *temp_addr = NULL;
3118 uint8_t len, temp_addr_len = 0;
3119 bool ret;
3120
3121 net_if_lock(iface);
3122
3123 ipv6 = iface->config.ip.ipv6;
3124 if (!ipv6) {
3125 goto out;
3126 }
3127
3128 ARRAY_FOR_EACH(ipv6->unicast, i) {
3129 if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
3130 continue;
3131 }
3132
3133 len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
3134 if (len >= prefix_len) {
3135 len = prefix_len;
3136 }
3137
3138 if (len >= *best_so_far) {
3139 /* Mesh local address can only be selected for the same
3140 * subnet.
3141 */
3142 if (ipv6->unicast[i].is_mesh_local && len < 64 &&
3143 !net_ipv6_is_addr_mcast_mesh(dst)) {
3144 continue;
3145 }
3146
3147 ret = use_public_address(iface->pe_prefer_public,
3148 ipv6->unicast[i].is_temporary,
3149 flags);
3150 if (!ret) {
3151 temp_addr = &ipv6->unicast[i].address.in6_addr;
3152 temp_addr_len = len;
3153
3154 *best_so_far = len;
3155 src = &ipv6->unicast[i].address.in6_addr;
3156 continue;
3157 }
3158
3159 if (!ipv6->unicast[i].is_temporary) {
3160 public_addr = &ipv6->unicast[i];
3161 public_addr_len = len;
3162 }
3163
3164 *best_so_far = len;
3165 src = &ipv6->unicast[i].address.in6_addr;
3166 }
3167 }
3168
3169 if (IS_ENABLED(CONFIG_NET_IPV6_PE) && !iface->pe_prefer_public && temp_addr) {
3170 if (temp_addr_len >= *best_so_far) {
3171 *best_so_far = temp_addr_len;
3172 src = temp_addr;
3173 }
3174 } else {
3175 /* By default prefer always public address if found */
3176 if (flags & IPV6_PREFER_SRC_PUBLIC) {
3177 use_public:
3178 if (public_addr &&
3179 !net_ipv6_addr_cmp(&public_addr->address.in6_addr, src)) {
3180 src = &public_addr->address.in6_addr;
3181 *best_so_far = public_addr_len;
3182 }
3183 } else if (flags & IPV6_PREFER_SRC_TMP) {
3184 if (temp_addr && !net_ipv6_addr_cmp(temp_addr, src)) {
3185 src = temp_addr;
3186 *best_so_far = temp_addr_len;
3187 }
3188 } else if (flags & IPV6_PREFER_SRC_PUBTMP_DEFAULT) {
3189 goto use_public;
3190 }
3191 }
3192
3193 out:
3194 net_if_unlock(iface);
3195
3196 return src;
3197 }
3198
net_if_ipv6_select_src_addr_hint(struct net_if * dst_iface,const struct in6_addr * dst,int flags)3199 const struct in6_addr *net_if_ipv6_select_src_addr_hint(struct net_if *dst_iface,
3200 const struct in6_addr *dst,
3201 int flags)
3202 {
3203 const struct in6_addr *src = NULL;
3204 uint8_t best_match = 0U;
3205
3206 if (dst == NULL) {
3207 return NULL;
3208 }
3209
3210 if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast_link(dst)) {
3211 struct net_if_ipv6_prefix *prefix;
3212 uint8_t prefix_len = 128;
3213
3214 prefix = net_if_ipv6_prefix_get(dst_iface, dst);
3215 if (prefix) {
3216 prefix_len = prefix->len;
3217 }
3218
3219 /* If caller has supplied interface, then use that */
3220 if (dst_iface) {
3221 src = net_if_ipv6_get_best_match(dst_iface, dst,
3222 prefix_len,
3223 &best_match,
3224 flags);
3225 } else {
3226 STRUCT_SECTION_FOREACH(net_if, iface) {
3227 struct in6_addr *addr;
3228
3229 addr = net_if_ipv6_get_best_match(iface, dst,
3230 prefix_len,
3231 &best_match,
3232 flags);
3233 if (addr) {
3234 src = addr;
3235 }
3236 }
3237 }
3238
3239 } else {
3240 if (dst_iface) {
3241 src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
3242 } else {
3243 struct in6_addr *addr;
3244
3245 addr = net_if_ipv6_get_ll(net_if_get_default(), NET_ADDR_PREFERRED);
3246 if (addr) {
3247 src = addr;
3248 goto out;
3249 }
3250
3251 STRUCT_SECTION_FOREACH(net_if, iface) {
3252 addr = net_if_ipv6_get_ll(iface,
3253 NET_ADDR_PREFERRED);
3254 if (addr) {
3255 src = addr;
3256 break;
3257 }
3258 }
3259 }
3260 }
3261
3262 if (!src) {
3263 src = net_ipv6_unspecified_address();
3264 }
3265
3266 out:
3267 return src;
3268 }
3269
net_if_ipv6_select_src_addr(struct net_if * dst_iface,const struct in6_addr * dst)3270 const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
3271 const struct in6_addr *dst)
3272 {
3273 return net_if_ipv6_select_src_addr_hint(dst_iface,
3274 dst,
3275 IPV6_PREFER_SRC_PUBTMP_DEFAULT);
3276 }
3277
net_if_ipv6_select_src_iface(const struct in6_addr * dst)3278 struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
3279 {
3280 struct net_if *iface = NULL;
3281 const struct in6_addr *src;
3282
3283 src = net_if_ipv6_select_src_addr(NULL, dst);
3284 if (src != net_ipv6_unspecified_address()) {
3285 net_if_ipv6_addr_lookup(src, &iface);
3286 }
3287
3288 if (iface == NULL) {
3289 iface = net_if_get_default();
3290 }
3291
3292 return iface;
3293 }
3294
3295 #if defined(CONFIG_NET_NATIVE_IPV6)
3296
net_if_ipv6_calc_reachable_time(struct net_if_ipv6 * ipv6)3297 uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
3298 {
3299 uint32_t min_reachable, max_reachable;
3300
3301 min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
3302 / MIN_RANDOM_DENOM;
3303 max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
3304 / MAX_RANDOM_DENOM;
3305
3306 NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
3307 max_reachable);
3308
3309 return min_reachable +
3310 sys_rand32_get() % (max_reachable - min_reachable);
3311 }
3312
iface_ipv6_start(struct net_if * iface)3313 static void iface_ipv6_start(struct net_if *iface)
3314 {
3315 if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
3316 net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
3317 return;
3318 }
3319
3320 if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
3321 net_if_start_dad(iface);
3322 } else {
3323 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
3324
3325 if (ipv6 != NULL) {
3326 join_mcast_nodes(iface,
3327 &ipv6->mcast[0].address.in6_addr);
3328 }
3329 }
3330
3331 net_if_start_rs(iface);
3332 }
3333
iface_ipv6_stop(struct net_if * iface)3334 static void iface_ipv6_stop(struct net_if *iface)
3335 {
3336 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
3337
3338 if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
3339 net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
3340 return;
3341 }
3342
3343 if (ipv6 == NULL) {
3344 return;
3345 }
3346
3347 IF_ENABLED(CONFIG_NET_IPV6_IID_STABLE, (ipv6->network_counter++));
3348 IF_ENABLED(CONFIG_NET_IPV6_IID_STABLE, (ipv6->iid = NULL));
3349
3350 /* Remove all autoconf addresses */
3351 ARRAY_FOR_EACH(ipv6->unicast, i) {
3352 if (ipv6->unicast[i].is_used &&
3353 ipv6->unicast[i].address.family == AF_INET6 &&
3354 ipv6->unicast[i].addr_type == NET_ADDR_AUTOCONF) {
3355 (void)net_if_ipv6_addr_rm(iface,
3356 &ipv6->unicast[i].address.in6_addr);
3357 }
3358 }
3359 }
3360
iface_ipv6_init(int if_count)3361 static void iface_ipv6_init(int if_count)
3362 {
3363 iface_ipv6_dad_init();
3364 iface_ipv6_nd_init();
3365
3366 k_work_init_delayable(&address_lifetime_timer,
3367 address_lifetime_timeout);
3368 k_work_init_delayable(&prefix_lifetime_timer, prefix_lifetime_timeout);
3369
3370 if (if_count > ARRAY_SIZE(ipv6_addresses)) {
3371 NET_WARN("You have %zu IPv6 net_if addresses but %d "
3372 "network interfaces", ARRAY_SIZE(ipv6_addresses),
3373 if_count);
3374 NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
3375 "value.");
3376 }
3377
3378 ARRAY_FOR_EACH(ipv6_addresses, i) {
3379 ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
3380 ipv6_addresses[i].ipv6.mcast_hop_limit = CONFIG_NET_INITIAL_MCAST_HOP_LIMIT;
3381 ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
3382
3383 net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
3384 }
3385 }
3386 #endif /* CONFIG_NET_NATIVE_IPV6 */
3387 #else /* CONFIG_NET_IPV6 */
net_if_ipv6_maddr_lookup(const struct in6_addr * addr,struct net_if ** iface)3388 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
3389 struct net_if **iface)
3390 {
3391 ARG_UNUSED(addr);
3392 ARG_UNUSED(iface);
3393
3394 return NULL;
3395 }
3396
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)3397 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
3398 struct net_if **ret)
3399 {
3400 ARG_UNUSED(addr);
3401 ARG_UNUSED(ret);
3402
3403 return NULL;
3404 }
3405
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)3406 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
3407 struct net_if **iface)
3408 {
3409 ARG_UNUSED(state);
3410 ARG_UNUSED(iface);
3411
3412 return NULL;
3413 }
3414 #endif /* CONFIG_NET_IPV6 */
3415
3416 #if !defined(CONFIG_NET_NATIVE_IPV6)
3417 #define join_mcast_allnodes(...)
3418 #define leave_mcast_all(...)
3419 #define clear_joined_ipv6_mcast_groups(...)
3420 #define iface_ipv6_start(...)
3421 #define iface_ipv6_stop(...)
3422 #define iface_ipv6_init(...)
3423 #endif /* !CONFIG_NET_NATIVE_IPV4 */
3424
3425 #if defined(CONFIG_NET_IPV4)
net_if_config_ipv4_get(struct net_if * iface,struct net_if_ipv4 ** ipv4)3426 int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
3427 {
3428 int ret = 0;
3429
3430 net_if_lock(iface);
3431
3432 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3433 ret = -ENOTSUP;
3434 goto out;
3435 }
3436
3437 if (iface->config.ip.ipv4) {
3438 if (ipv4) {
3439 *ipv4 = iface->config.ip.ipv4;
3440 }
3441
3442 goto out;
3443 }
3444
3445 k_mutex_lock(&lock, K_FOREVER);
3446
3447 ARRAY_FOR_EACH(ipv4_addresses, i) {
3448 if (ipv4_addresses[i].iface) {
3449 continue;
3450 }
3451
3452 iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
3453 ipv4_addresses[i].iface = iface;
3454
3455 if (ipv4) {
3456 *ipv4 = &ipv4_addresses[i].ipv4;
3457 }
3458
3459 k_mutex_unlock(&lock);
3460 goto out;
3461 }
3462
3463 k_mutex_unlock(&lock);
3464
3465 ret = -ESRCH;
3466 out:
3467 net_if_unlock(iface);
3468
3469 return ret;
3470 }
3471
net_if_config_ipv4_put(struct net_if * iface)3472 int net_if_config_ipv4_put(struct net_if *iface)
3473 {
3474 int ret = 0;
3475
3476 net_if_lock(iface);
3477
3478 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3479 ret = -ENOTSUP;
3480 goto out;
3481 }
3482
3483 if (!iface->config.ip.ipv4) {
3484 ret = -EALREADY;
3485 goto out;
3486 }
3487
3488 k_mutex_lock(&lock, K_FOREVER);
3489
3490 ARRAY_FOR_EACH(ipv4_addresses, i) {
3491 if (ipv4_addresses[i].iface != iface) {
3492 continue;
3493 }
3494
3495 iface->config.ip.ipv4 = NULL;
3496 ipv4_addresses[i].iface = NULL;
3497
3498 k_mutex_unlock(&lock);
3499 goto out;
3500 }
3501
3502 k_mutex_unlock(&lock);
3503
3504 ret = -ESRCH;
3505 out:
3506 net_if_unlock(iface);
3507
3508 return ret;
3509 }
3510
net_if_ipv4_addr_mask_cmp(struct net_if * iface,const struct in_addr * addr)3511 bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
3512 const struct in_addr *addr)
3513 {
3514 bool ret = false;
3515 struct net_if_ipv4 *ipv4;
3516 uint32_t subnet;
3517
3518 net_if_lock(iface);
3519
3520 ipv4 = iface->config.ip.ipv4;
3521 if (!ipv4) {
3522 goto out;
3523 }
3524
3525 ARRAY_FOR_EACH(ipv4->unicast, i) {
3526 if (!ipv4->unicast[i].ipv4.is_used ||
3527 ipv4->unicast[i].ipv4.address.family != AF_INET) {
3528 continue;
3529 }
3530
3531 subnet = UNALIGNED_GET(&addr->s_addr) &
3532 ipv4->unicast[i].netmask.s_addr;
3533
3534 if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3535 ipv4->unicast[i].netmask.s_addr) == subnet) {
3536 ret = true;
3537 goto out;
3538 }
3539 }
3540
3541 out:
3542 net_if_unlock(iface);
3543
3544 return ret;
3545 }
3546
ipv4_is_broadcast_address(struct net_if * iface,const struct in_addr * addr)3547 static bool ipv4_is_broadcast_address(struct net_if *iface,
3548 const struct in_addr *addr)
3549 {
3550 struct net_if_ipv4 *ipv4;
3551 bool ret = false;
3552 struct in_addr bcast;
3553
3554 net_if_lock(iface);
3555
3556 ipv4 = iface->config.ip.ipv4;
3557 if (!ipv4) {
3558 ret = false;
3559 goto out;
3560 }
3561
3562 ARRAY_FOR_EACH(ipv4->unicast, i) {
3563 if (!ipv4->unicast[i].ipv4.is_used ||
3564 ipv4->unicast[i].ipv4.address.family != AF_INET) {
3565 continue;
3566 }
3567
3568 bcast.s_addr = ipv4->unicast[i].ipv4.address.in_addr.s_addr |
3569 ~ipv4->unicast[i].netmask.s_addr;
3570
3571 if (bcast.s_addr == UNALIGNED_GET(&addr->s_addr)) {
3572 ret = true;
3573 goto out;
3574 }
3575 }
3576
3577 out:
3578 net_if_unlock(iface);
3579 return ret;
3580 }
3581
net_if_ipv4_is_addr_bcast(struct net_if * iface,const struct in_addr * addr)3582 bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
3583 const struct in_addr *addr)
3584 {
3585 bool ret = false;
3586
3587 if (iface) {
3588 ret = ipv4_is_broadcast_address(iface, addr);
3589 goto out;
3590 }
3591
3592 STRUCT_SECTION_FOREACH(net_if, one_iface) {
3593 ret = ipv4_is_broadcast_address(one_iface, addr);
3594 if (ret) {
3595 goto out;
3596 }
3597 }
3598
3599 out:
3600 return ret;
3601 }
3602
net_if_ipv4_select_src_iface(const struct in_addr * dst)3603 struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
3604 {
3605 struct net_if *selected = NULL;
3606 const struct in_addr *src;
3607
3608 src = net_if_ipv4_select_src_addr(NULL, dst);
3609 if (src != net_ipv4_unspecified_address()) {
3610 net_if_ipv4_addr_lookup(src, &selected);
3611 }
3612
3613 if (selected == NULL) {
3614 selected = net_if_get_default();
3615 }
3616
3617 return selected;
3618 }
3619
get_diff_ipv4(const struct in_addr * src,const struct in_addr * dst)3620 static uint8_t get_diff_ipv4(const struct in_addr *src,
3621 const struct in_addr *dst)
3622 {
3623 return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 4);
3624 }
3625
is_proper_ipv4_address(struct net_if_addr * addr)3626 static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
3627 {
3628 if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3629 addr->address.family == AF_INET) {
3630 return true;
3631 }
3632
3633 return false;
3634 }
3635
net_if_ipv4_get_best_match(struct net_if * iface,const struct in_addr * dst,uint8_t * best_so_far,bool ll)3636 static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
3637 const struct in_addr *dst,
3638 uint8_t *best_so_far, bool ll)
3639 {
3640 struct net_if_ipv4 *ipv4;
3641 struct in_addr *src = NULL;
3642 uint8_t len;
3643
3644 net_if_lock(iface);
3645
3646 ipv4 = iface->config.ip.ipv4;
3647 if (!ipv4) {
3648 goto out;
3649 }
3650
3651 ARRAY_FOR_EACH(ipv4->unicast, i) {
3652 struct in_addr subnet;
3653
3654 if (!is_proper_ipv4_address(&ipv4->unicast[i].ipv4)) {
3655 continue;
3656 }
3657
3658 if (net_ipv4_is_ll_addr(&ipv4->unicast[i].ipv4.address.in_addr) != ll) {
3659 continue;
3660 }
3661
3662 subnet.s_addr = ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3663 ipv4->unicast[i].netmask.s_addr;
3664 len = get_diff_ipv4(dst, &subnet);
3665 if (len >= *best_so_far) {
3666 *best_so_far = len;
3667 src = &ipv4->unicast[i].ipv4.address.in_addr;
3668 }
3669 }
3670
3671 out:
3672 net_if_unlock(iface);
3673
3674 return src;
3675 }
3676
if_ipv4_get_addr(struct net_if * iface,enum net_addr_state addr_state,bool ll)3677 static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
3678 enum net_addr_state addr_state, bool ll)
3679 {
3680 struct in_addr *addr = NULL;
3681 struct net_if_ipv4 *ipv4;
3682
3683 if (!iface) {
3684 return NULL;
3685 }
3686
3687 net_if_lock(iface);
3688
3689 ipv4 = iface->config.ip.ipv4;
3690 if (!ipv4) {
3691 goto out;
3692 }
3693
3694 ARRAY_FOR_EACH(ipv4->unicast, i) {
3695 if (!ipv4->unicast[i].ipv4.is_used ||
3696 (addr_state != NET_ADDR_ANY_STATE &&
3697 ipv4->unicast[i].ipv4.addr_state != addr_state) ||
3698 ipv4->unicast[i].ipv4.address.family != AF_INET) {
3699 continue;
3700 }
3701
3702 if (net_ipv4_is_ll_addr(&ipv4->unicast[i].ipv4.address.in_addr)) {
3703 if (!ll) {
3704 continue;
3705 }
3706 } else {
3707 if (ll) {
3708 continue;
3709 }
3710 }
3711
3712 addr = &ipv4->unicast[i].ipv4.address.in_addr;
3713 goto out;
3714 }
3715
3716 out:
3717 net_if_unlock(iface);
3718
3719 return addr;
3720 }
3721
net_if_ipv4_get_ll(struct net_if * iface,enum net_addr_state addr_state)3722 struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
3723 enum net_addr_state addr_state)
3724 {
3725 return if_ipv4_get_addr(iface, addr_state, true);
3726 }
3727
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3728 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3729 enum net_addr_state addr_state)
3730 {
3731 return if_ipv4_get_addr(iface, addr_state, false);
3732 }
3733
net_if_ipv4_select_src_addr(struct net_if * dst_iface,const struct in_addr * dst)3734 const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
3735 const struct in_addr *dst)
3736 {
3737 const struct in_addr *src = NULL;
3738 uint8_t best_match = 0U;
3739
3740 if (dst == NULL) {
3741 return NULL;
3742 }
3743
3744 if (!net_ipv4_is_ll_addr(dst)) {
3745
3746 /* If caller has supplied interface, then use that */
3747 if (dst_iface) {
3748 src = net_if_ipv4_get_best_match(dst_iface, dst,
3749 &best_match, false);
3750 } else {
3751 STRUCT_SECTION_FOREACH(net_if, iface) {
3752 struct in_addr *addr;
3753
3754 addr = net_if_ipv4_get_best_match(iface, dst,
3755 &best_match,
3756 false);
3757 if (addr) {
3758 src = addr;
3759 }
3760 }
3761 }
3762
3763 } else {
3764 if (dst_iface) {
3765 src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3766 } else {
3767 struct in_addr *addr;
3768
3769 STRUCT_SECTION_FOREACH(net_if, iface) {
3770 addr = net_if_ipv4_get_best_match(iface, dst,
3771 &best_match,
3772 true);
3773 if (addr) {
3774 src = addr;
3775 }
3776 }
3777
3778 /* Check the default interface again. It will only
3779 * be used if it has a valid LL address, and there was
3780 * no better match on any other interface.
3781 */
3782 addr = net_if_ipv4_get_best_match(net_if_get_default(),
3783 dst, &best_match,
3784 true);
3785 if (addr) {
3786 src = addr;
3787 }
3788 }
3789 }
3790
3791 if (!src) {
3792 src = net_if_ipv4_get_global_addr(dst_iface,
3793 NET_ADDR_PREFERRED);
3794
3795 if (IS_ENABLED(CONFIG_NET_IPV4_AUTO) && !src) {
3796 /* Try to use LL address if there's really no other
3797 * address available.
3798 */
3799 src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3800 }
3801
3802 if (!src) {
3803 src = net_ipv4_unspecified_address();
3804 }
3805 }
3806
3807 return src;
3808 }
3809
3810 /* Internal function to get the first IPv4 address of the interface */
net_if_ipv4_addr_get_first_by_index(int ifindex)3811 struct net_if_addr *net_if_ipv4_addr_get_first_by_index(int ifindex)
3812 {
3813 struct net_if *iface = net_if_get_by_index(ifindex);
3814 struct net_if_addr *ifaddr = NULL;
3815 struct net_if_ipv4 *ipv4;
3816
3817 if (!iface) {
3818 return NULL;
3819 }
3820
3821 net_if_lock(iface);
3822
3823 ipv4 = iface->config.ip.ipv4;
3824 if (!ipv4) {
3825 goto out;
3826 }
3827
3828 ARRAY_FOR_EACH(ipv4->unicast, i) {
3829 if (!ipv4->unicast[i].ipv4.is_used ||
3830 ipv4->unicast[i].ipv4.address.family != AF_INET) {
3831 continue;
3832 }
3833
3834 ifaddr = &ipv4->unicast[i].ipv4;
3835 break;
3836 }
3837
3838 out:
3839 net_if_unlock(iface);
3840
3841 return ifaddr;
3842 }
3843
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3844 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3845 struct net_if **ret)
3846 {
3847 struct net_if_addr *ifaddr = NULL;
3848
3849 STRUCT_SECTION_FOREACH(net_if, iface) {
3850 struct net_if_ipv4 *ipv4;
3851
3852 net_if_lock(iface);
3853
3854 ipv4 = iface->config.ip.ipv4;
3855 if (!ipv4) {
3856 net_if_unlock(iface);
3857 continue;
3858 }
3859
3860 ARRAY_FOR_EACH(ipv4->unicast, i) {
3861 if (!ipv4->unicast[i].ipv4.is_used ||
3862 ipv4->unicast[i].ipv4.address.family != AF_INET) {
3863 continue;
3864 }
3865
3866 if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
3867 ipv4->unicast[i].ipv4.address.in_addr.s_addr) {
3868
3869 if (ret) {
3870 *ret = iface;
3871 }
3872
3873 ifaddr = &ipv4->unicast[i].ipv4;
3874 net_if_unlock(iface);
3875 goto out;
3876 }
3877 }
3878
3879 net_if_unlock(iface);
3880 }
3881
3882 out:
3883 return ifaddr;
3884 }
3885
z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3886 int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
3887 {
3888 struct net_if_addr *if_addr;
3889 struct net_if *iface = NULL;
3890
3891 if_addr = net_if_ipv4_addr_lookup(addr, &iface);
3892 if (!if_addr) {
3893 return 0;
3894 }
3895
3896 return net_if_get_by_iface(iface);
3897 }
3898
3899 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3900 static inline int z_vrfy_net_if_ipv4_addr_lookup_by_index(
3901 const struct in_addr *addr)
3902 {
3903 struct in_addr addr_v4;
3904
3905 K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3906
3907 return z_impl_net_if_ipv4_addr_lookup_by_index(&addr_v4);
3908 }
3909 #include <zephyr/syscalls/net_if_ipv4_addr_lookup_by_index_mrsh.c>
3910 #endif
3911
net_if_ipv4_get_netmask_by_addr(struct net_if * iface,const struct in_addr * addr)3912 struct in_addr net_if_ipv4_get_netmask_by_addr(struct net_if *iface,
3913 const struct in_addr *addr)
3914 {
3915 struct in_addr netmask = { 0 };
3916 struct net_if_ipv4 *ipv4;
3917 uint32_t subnet;
3918
3919 net_if_lock(iface);
3920
3921 if (net_if_config_ipv4_get(iface, NULL) < 0) {
3922 goto out;
3923 }
3924
3925 ipv4 = iface->config.ip.ipv4;
3926 if (ipv4 == NULL) {
3927 goto out;
3928 }
3929
3930 ARRAY_FOR_EACH(ipv4->unicast, i) {
3931 if (!ipv4->unicast[i].ipv4.is_used ||
3932 ipv4->unicast[i].ipv4.address.family != AF_INET) {
3933 continue;
3934 }
3935
3936 subnet = UNALIGNED_GET(&addr->s_addr) &
3937 ipv4->unicast[i].netmask.s_addr;
3938
3939 if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3940 ipv4->unicast[i].netmask.s_addr) == subnet) {
3941 netmask = ipv4->unicast[i].netmask;
3942 goto out;
3943 }
3944 }
3945
3946 out:
3947 net_if_unlock(iface);
3948
3949 return netmask;
3950 }
3951
net_if_ipv4_set_netmask_by_addr(struct net_if * iface,const struct in_addr * addr,const struct in_addr * netmask)3952 bool net_if_ipv4_set_netmask_by_addr(struct net_if *iface,
3953 const struct in_addr *addr,
3954 const struct in_addr *netmask)
3955 {
3956 struct net_if_ipv4 *ipv4;
3957 uint32_t subnet;
3958 bool ret = false;
3959
3960 net_if_lock(iface);
3961
3962 if (net_if_config_ipv4_get(iface, NULL) < 0) {
3963 goto out;
3964 }
3965
3966 ipv4 = iface->config.ip.ipv4;
3967 if (ipv4 == NULL) {
3968 goto out;
3969 }
3970
3971 ARRAY_FOR_EACH(ipv4->unicast, i) {
3972 if (!ipv4->unicast[i].ipv4.is_used ||
3973 ipv4->unicast[i].ipv4.address.family != AF_INET) {
3974 continue;
3975 }
3976
3977 subnet = UNALIGNED_GET(&addr->s_addr) &
3978 ipv4->unicast[i].netmask.s_addr;
3979
3980 if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3981 ipv4->unicast[i].netmask.s_addr) == subnet) {
3982 ipv4->unicast[i].netmask = *netmask;
3983 ret = true;
3984 goto out;
3985 }
3986 }
3987
3988 out:
3989 net_if_unlock(iface);
3990
3991 return ret;
3992 }
3993
3994 /* Using this function is problematic as if we have multiple
3995 * addresses configured, which one to return. Use heuristic
3996 * in this case and return the first one found. Please use
3997 * net_if_ipv4_get_netmask_by_addr() instead.
3998 */
net_if_ipv4_get_netmask(struct net_if * iface)3999 struct in_addr net_if_ipv4_get_netmask(struct net_if *iface)
4000 {
4001 struct in_addr netmask = { 0 };
4002 struct net_if_ipv4 *ipv4;
4003
4004 net_if_lock(iface);
4005
4006 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4007 goto out;
4008 }
4009
4010 ipv4 = iface->config.ip.ipv4;
4011 if (ipv4 == NULL) {
4012 goto out;
4013 }
4014
4015 ARRAY_FOR_EACH(ipv4->unicast, i) {
4016 if (!ipv4->unicast[i].ipv4.is_used ||
4017 ipv4->unicast[i].ipv4.address.family != AF_INET) {
4018 continue;
4019 }
4020
4021 netmask = iface->config.ip.ipv4->unicast[i].netmask;
4022 break;
4023 }
4024
4025 out:
4026 net_if_unlock(iface);
4027
4028 return netmask;
4029 }
4030
4031 /* Using this function is problematic as if we have multiple
4032 * addresses configured, which one to set. Use heuristic
4033 * in this case and set the first one found. Please use
4034 * net_if_ipv4_set_netmask_by_addr() instead.
4035 */
net_if_ipv4_set_netmask_deprecated(struct net_if * iface,const struct in_addr * netmask)4036 static void net_if_ipv4_set_netmask_deprecated(struct net_if *iface,
4037 const struct in_addr *netmask)
4038 {
4039 struct net_if_ipv4 *ipv4;
4040
4041 net_if_lock(iface);
4042
4043 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4044 goto out;
4045 }
4046
4047 ipv4 = iface->config.ip.ipv4;
4048 if (ipv4 == NULL) {
4049 goto out;
4050 }
4051
4052 ARRAY_FOR_EACH(ipv4->unicast, i) {
4053 if (!ipv4->unicast[i].ipv4.is_used ||
4054 ipv4->unicast[i].ipv4.address.family != AF_INET) {
4055 continue;
4056 }
4057
4058 net_ipaddr_copy(&ipv4->unicast[i].netmask, netmask);
4059 break;
4060 }
4061
4062 out:
4063 net_if_unlock(iface);
4064 }
4065
net_if_ipv4_set_netmask(struct net_if * iface,const struct in_addr * netmask)4066 void net_if_ipv4_set_netmask(struct net_if *iface,
4067 const struct in_addr *netmask)
4068 {
4069 net_if_ipv4_set_netmask_deprecated(iface, netmask);
4070 }
4071
z_impl_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)4072 bool z_impl_net_if_ipv4_set_netmask_by_index(int index,
4073 const struct in_addr *netmask)
4074 {
4075 struct net_if *iface;
4076
4077 iface = net_if_get_by_index(index);
4078 if (!iface) {
4079 return false;
4080 }
4081
4082 net_if_ipv4_set_netmask_deprecated(iface, netmask);
4083
4084 return true;
4085 }
4086
z_impl_net_if_ipv4_set_netmask_by_addr_by_index(int index,const struct in_addr * addr,const struct in_addr * netmask)4087 bool z_impl_net_if_ipv4_set_netmask_by_addr_by_index(int index,
4088 const struct in_addr *addr,
4089 const struct in_addr *netmask)
4090 {
4091 struct net_if *iface;
4092
4093 iface = net_if_get_by_index(index);
4094 if (!iface) {
4095 return false;
4096 }
4097
4098 net_if_ipv4_set_netmask_by_addr(iface, addr, netmask);
4099
4100 return true;
4101 }
4102
4103 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)4104 bool z_vrfy_net_if_ipv4_set_netmask_by_index(int index,
4105 const struct in_addr *netmask)
4106 {
4107 struct in_addr netmask_addr;
4108 struct net_if *iface;
4109
4110 iface = z_vrfy_net_if_get_by_index(index);
4111 if (!iface) {
4112 return false;
4113 }
4114
4115 K_OOPS(k_usermode_from_copy(&netmask_addr, (void *)netmask,
4116 sizeof(netmask_addr)));
4117
4118 return z_impl_net_if_ipv4_set_netmask_by_index(index, &netmask_addr);
4119 }
4120
4121 #include <zephyr/syscalls/net_if_ipv4_set_netmask_by_index_mrsh.c>
4122
z_vrfy_net_if_ipv4_set_netmask_by_addr_by_index(int index,const struct in_addr * addr,const struct in_addr * netmask)4123 bool z_vrfy_net_if_ipv4_set_netmask_by_addr_by_index(int index,
4124 const struct in_addr *addr,
4125 const struct in_addr *netmask)
4126 {
4127 struct in_addr ipv4_addr, netmask_addr;
4128 struct net_if *iface;
4129
4130 iface = z_vrfy_net_if_get_by_index(index);
4131 if (!iface) {
4132 return false;
4133 }
4134
4135 K_OOPS(k_usermode_from_copy(&ipv4_addr, (void *)addr,
4136 sizeof(ipv4_addr)));
4137 K_OOPS(k_usermode_from_copy(&netmask_addr, (void *)netmask,
4138 sizeof(netmask_addr)));
4139
4140 return z_impl_net_if_ipv4_set_netmask_by_addr_by_index(index,
4141 &ipv4_addr,
4142 &netmask_addr);
4143 }
4144
4145 #include <zephyr/syscalls/net_if_ipv4_set_netmask_by_addr_by_index_mrsh.c>
4146 #endif /* CONFIG_USERSPACE */
4147
net_if_ipv4_get_gw(struct net_if * iface)4148 struct in_addr net_if_ipv4_get_gw(struct net_if *iface)
4149 {
4150 struct in_addr gw = { 0 };
4151
4152 net_if_lock(iface);
4153
4154 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4155 goto out;
4156 }
4157
4158 if (!iface->config.ip.ipv4) {
4159 goto out;
4160 }
4161
4162 gw = iface->config.ip.ipv4->gw;
4163 out:
4164 net_if_unlock(iface);
4165
4166 return gw;
4167 }
4168
net_if_ipv4_set_gw(struct net_if * iface,const struct in_addr * gw)4169 void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw)
4170 {
4171 net_if_lock(iface);
4172
4173 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4174 goto out;
4175 }
4176
4177 if (!iface->config.ip.ipv4) {
4178 goto out;
4179 }
4180
4181 net_ipaddr_copy(&iface->config.ip.ipv4->gw, gw);
4182 out:
4183 net_if_unlock(iface);
4184 }
4185
z_impl_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)4186 bool z_impl_net_if_ipv4_set_gw_by_index(int index,
4187 const struct in_addr *gw)
4188 {
4189 struct net_if *iface;
4190
4191 iface = net_if_get_by_index(index);
4192 if (!iface) {
4193 return false;
4194 }
4195
4196 net_if_ipv4_set_gw(iface, gw);
4197
4198 return true;
4199 }
4200
4201 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)4202 bool z_vrfy_net_if_ipv4_set_gw_by_index(int index,
4203 const struct in_addr *gw)
4204 {
4205 struct in_addr gw_addr;
4206 struct net_if *iface;
4207
4208 iface = z_vrfy_net_if_get_by_index(index);
4209 if (!iface) {
4210 return false;
4211 }
4212
4213 K_OOPS(k_usermode_from_copy(&gw_addr, (void *)gw, sizeof(gw_addr)));
4214
4215 return z_impl_net_if_ipv4_set_gw_by_index(index, &gw_addr);
4216 }
4217
4218 #include <zephyr/syscalls/net_if_ipv4_set_gw_by_index_mrsh.c>
4219 #endif /* CONFIG_USERSPACE */
4220
ipv4_addr_find(struct net_if * iface,struct in_addr * addr)4221 static struct net_if_addr *ipv4_addr_find(struct net_if *iface,
4222 struct in_addr *addr)
4223 {
4224 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4225
4226 ARRAY_FOR_EACH(ipv4->unicast, i) {
4227 if (!ipv4->unicast[i].ipv4.is_used) {
4228 continue;
4229 }
4230
4231 if (net_ipv4_addr_cmp(addr,
4232 &ipv4->unicast[i].ipv4.address.in_addr)) {
4233 return &ipv4->unicast[i].ipv4;
4234 }
4235 }
4236
4237 return NULL;
4238 }
4239
4240 #if defined(CONFIG_NET_IPV4_ACD)
net_if_ipv4_acd_succeeded(struct net_if * iface,struct net_if_addr * ifaddr)4241 void net_if_ipv4_acd_succeeded(struct net_if *iface, struct net_if_addr *ifaddr)
4242 {
4243 net_if_lock(iface);
4244
4245 NET_DBG("ACD succeeded for %s at interface %d",
4246 net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4247 ifaddr->ifindex);
4248
4249 ifaddr->addr_state = NET_ADDR_PREFERRED;
4250
4251 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ACD_SUCCEED, iface,
4252 &ifaddr->address.in_addr,
4253 sizeof(struct in_addr));
4254
4255 net_if_unlock(iface);
4256 }
4257
net_if_ipv4_acd_failed(struct net_if * iface,struct net_if_addr * ifaddr)4258 void net_if_ipv4_acd_failed(struct net_if *iface, struct net_if_addr *ifaddr)
4259 {
4260 net_if_lock(iface);
4261
4262 NET_DBG("ACD failed for %s at interface %d",
4263 net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4264 ifaddr->ifindex);
4265
4266 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ACD_FAILED, iface,
4267 &ifaddr->address.in_addr,
4268 sizeof(struct in_addr));
4269
4270 net_if_ipv4_addr_rm(iface, &ifaddr->address.in_addr);
4271
4272 net_if_unlock(iface);
4273 }
4274
net_if_ipv4_start_acd(struct net_if * iface,struct net_if_addr * ifaddr)4275 void net_if_ipv4_start_acd(struct net_if *iface, struct net_if_addr *ifaddr)
4276 {
4277 ifaddr->addr_state = NET_ADDR_TENTATIVE;
4278
4279 if (net_if_is_up(iface)) {
4280 NET_DBG("Interface %p ll addr %s tentative IPv4 addr %s",
4281 iface,
4282 net_sprint_ll_addr(net_if_get_link_addr(iface)->addr,
4283 net_if_get_link_addr(iface)->len),
4284 net_sprint_ipv4_addr(&ifaddr->address.in_addr));
4285
4286 if (net_ipv4_acd_start(iface, ifaddr) != 0) {
4287 NET_DBG("Failed to start ACD for %s on iface %p.",
4288 net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4289 iface);
4290
4291 /* Just act as if no conflict was detected. */
4292 net_if_ipv4_acd_succeeded(iface, ifaddr);
4293 }
4294 } else {
4295 NET_DBG("Interface %p is down, starting ACD for %s later.",
4296 iface, net_sprint_ipv4_addr(&ifaddr->address.in_addr));
4297 }
4298 }
4299
net_if_start_acd(struct net_if * iface)4300 void net_if_start_acd(struct net_if *iface)
4301 {
4302 struct net_if_addr *ifaddr, *next;
4303 struct net_if_ipv4 *ipv4;
4304 sys_slist_t acd_needed;
4305 int ret;
4306
4307 net_if_lock(iface);
4308
4309 NET_DBG("Starting ACD for iface %p", iface);
4310
4311 ret = net_if_config_ipv4_get(iface, &ipv4);
4312 if (ret < 0) {
4313 if (ret != -ENOTSUP) {
4314 NET_WARN("Cannot do ACD IPv4 config is not valid.");
4315 }
4316
4317 goto out;
4318 }
4319
4320 if (!ipv4) {
4321 goto out;
4322 }
4323
4324 ipv4->conflict_cnt = 0;
4325
4326 /* Start ACD for all the addresses that were added earlier when
4327 * the interface was down.
4328 */
4329 sys_slist_init(&acd_needed);
4330
4331 /* Start ACD for all the addresses that were added earlier when
4332 * the interface was down.
4333 */
4334 ARRAY_FOR_EACH(ipv4->unicast, i) {
4335 if (!ipv4->unicast[i].ipv4.is_used ||
4336 ipv4->unicast[i].ipv4.address.family != AF_INET ||
4337 net_ipv4_is_addr_loopback(
4338 &ipv4->unicast[i].ipv4.address.in_addr)) {
4339 continue;
4340 }
4341
4342 sys_slist_prepend(&acd_needed, &ipv4->unicast[i].ipv4.acd_need_node);
4343 }
4344
4345 net_if_unlock(iface);
4346
4347 /* Start ACD for all the addresses without holding the iface lock
4348 * to avoid any possible mutex deadlock issues.
4349 */
4350 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&acd_needed,
4351 ifaddr, next, acd_need_node) {
4352 net_if_ipv4_start_acd(iface, ifaddr);
4353 }
4354
4355 return;
4356
4357 out:
4358 net_if_unlock(iface);
4359 }
4360 #else
4361 #define net_if_ipv4_start_acd(...)
4362 #define net_if_start_acd(...)
4363 #endif /* CONFIG_NET_IPV4_ACD */
4364
net_if_ipv4_addr_add(struct net_if * iface,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4365 struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
4366 struct in_addr *addr,
4367 enum net_addr_type addr_type,
4368 uint32_t vlifetime)
4369 {
4370 uint32_t default_netmask = UINT32_MAX << (32 - CONFIG_NET_IPV4_DEFAULT_NETMASK);
4371 struct net_if_addr *ifaddr = NULL;
4372 struct net_if_addr_ipv4 *cur;
4373 struct net_if_ipv4 *ipv4;
4374 int idx;
4375
4376 net_if_lock(iface);
4377
4378 if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
4379 goto out;
4380 }
4381
4382 ifaddr = ipv4_addr_find(iface, addr);
4383 if (ifaddr) {
4384 /* TODO: should set addr_type/vlifetime */
4385 /* Address already exists, just return it but update ref count
4386 * if it was not updated. This could happen if the address was
4387 * added and then removed but for example an active connection
4388 * was still using it. In this case we must update the ref count
4389 * so that the address is not removed if the connection is closed.
4390 */
4391 if (!ifaddr->is_added) {
4392 atomic_inc(&ifaddr->atomic_ref);
4393 ifaddr->is_added = true;
4394 }
4395
4396 goto out;
4397 }
4398
4399 ARRAY_FOR_EACH(ipv4->unicast, i) {
4400 cur = &ipv4->unicast[i];
4401
4402 if (addr_type == NET_ADDR_DHCP
4403 && cur->ipv4.addr_type == NET_ADDR_OVERRIDABLE) {
4404 ifaddr = &cur->ipv4;
4405 idx = i;
4406 break;
4407 }
4408
4409 if (!ipv4->unicast[i].ipv4.is_used) {
4410 ifaddr = &cur->ipv4;
4411 idx = i;
4412 break;
4413 }
4414 }
4415
4416 if (ifaddr) {
4417 ifaddr->is_used = true;
4418 ifaddr->is_added = true;
4419 ifaddr->address.family = AF_INET;
4420 ifaddr->address.in_addr.s4_addr32[0] =
4421 addr->s4_addr32[0];
4422 ifaddr->addr_type = addr_type;
4423 ifaddr->atomic_ref = ATOMIC_INIT(1);
4424
4425 /* Caller has to take care of timers and their expiry */
4426 if (vlifetime) {
4427 ifaddr->is_infinite = false;
4428 } else {
4429 ifaddr->is_infinite = true;
4430 }
4431
4432 /**
4433 * TODO: Handle properly PREFERRED/DEPRECATED state when
4434 * address in use, expired and renewal state.
4435 */
4436
4437 NET_DBG("[%d] interface %d (%p) address %s type %s added",
4438 idx, net_if_get_by_iface(iface), iface,
4439 net_sprint_ipv4_addr(addr),
4440 net_addr_type2str(addr_type));
4441
4442 if (IS_ENABLED(CONFIG_NET_IPV4_ACD) &&
4443 !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
4444 !net_ipv4_is_addr_loopback(addr)) {
4445 /* ACD is started after the lock is released. */
4446 ;
4447 } else {
4448 ifaddr->addr_state = NET_ADDR_PREFERRED;
4449 }
4450
4451 cur->netmask.s_addr = htonl(default_netmask);
4452
4453 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface,
4454 &ifaddr->address.in_addr,
4455 sizeof(struct in_addr));
4456
4457 net_if_unlock(iface);
4458
4459 net_if_ipv4_start_acd(iface, ifaddr);
4460
4461 return ifaddr;
4462 }
4463
4464 out:
4465 net_if_unlock(iface);
4466
4467 return ifaddr;
4468 }
4469
net_if_ipv4_addr_rm(struct net_if * iface,const struct in_addr * addr)4470 bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr)
4471 {
4472 struct net_if_addr *ifaddr;
4473 struct net_if_ipv4 *ipv4;
4474 bool result = true;
4475 int ret;
4476
4477 if (iface == NULL || addr == NULL) {
4478 return false;
4479 }
4480
4481 net_if_lock(iface);
4482
4483 ipv4 = iface->config.ip.ipv4;
4484 if (!ipv4) {
4485 result = false;
4486 goto out;
4487 }
4488
4489 ret = net_if_addr_unref(iface, AF_INET, addr, &ifaddr);
4490 if (ret > 0) {
4491 NET_DBG("Address %s still in use (ref %d)",
4492 net_sprint_ipv4_addr(addr), ret);
4493 result = false;
4494 ifaddr->is_added = false;
4495 goto out;
4496 } else if (ret < 0) {
4497 NET_DBG("Address %s not found (%d)",
4498 net_sprint_ipv4_addr(addr), ret);
4499 }
4500
4501 out:
4502 net_if_unlock(iface);
4503
4504 return result;
4505 }
4506
z_impl_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4507 bool z_impl_net_if_ipv4_addr_add_by_index(int index,
4508 struct in_addr *addr,
4509 enum net_addr_type addr_type,
4510 uint32_t vlifetime)
4511 {
4512 struct net_if *iface;
4513 struct net_if_addr *if_addr;
4514
4515 iface = net_if_get_by_index(index);
4516 if (!iface) {
4517 return false;
4518 }
4519
4520 if_addr = net_if_ipv4_addr_add(iface, addr, addr_type, vlifetime);
4521 return if_addr ? true : false;
4522 }
4523
4524 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4525 bool z_vrfy_net_if_ipv4_addr_add_by_index(int index,
4526 struct in_addr *addr,
4527 enum net_addr_type addr_type,
4528 uint32_t vlifetime)
4529 {
4530 struct in_addr addr_v4;
4531 struct net_if *iface;
4532
4533 iface = z_vrfy_net_if_get_by_index(index);
4534 if (!iface) {
4535 return false;
4536 }
4537
4538 K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
4539
4540 return z_impl_net_if_ipv4_addr_add_by_index(index,
4541 &addr_v4,
4542 addr_type,
4543 vlifetime);
4544 }
4545
4546 #include <zephyr/syscalls/net_if_ipv4_addr_add_by_index_mrsh.c>
4547 #endif /* CONFIG_USERSPACE */
4548
z_impl_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)4549 bool z_impl_net_if_ipv4_addr_rm_by_index(int index,
4550 const struct in_addr *addr)
4551 {
4552 struct net_if *iface;
4553
4554 iface = net_if_get_by_index(index);
4555 if (!iface) {
4556 return false;
4557 }
4558
4559 return net_if_ipv4_addr_rm(iface, addr);
4560 }
4561
4562 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)4563 bool z_vrfy_net_if_ipv4_addr_rm_by_index(int index,
4564 const struct in_addr *addr)
4565 {
4566 struct in_addr addr_v4;
4567 struct net_if *iface;
4568
4569 iface = z_vrfy_net_if_get_by_index(index);
4570 if (!iface) {
4571 return false;
4572 }
4573
4574 K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
4575
4576 return (uint32_t)z_impl_net_if_ipv4_addr_rm_by_index(index, &addr_v4);
4577 }
4578
4579 #include <zephyr/syscalls/net_if_ipv4_addr_rm_by_index_mrsh.c>
4580 #endif /* CONFIG_USERSPACE */
4581
net_if_ipv4_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)4582 void net_if_ipv4_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
4583 void *user_data)
4584 {
4585 struct net_if_ipv4 *ipv4;
4586
4587 if (iface == NULL) {
4588 return;
4589 }
4590
4591 net_if_lock(iface);
4592
4593 ipv4 = iface->config.ip.ipv4;
4594 if (ipv4 == NULL) {
4595 goto out;
4596 }
4597
4598 ARRAY_FOR_EACH(ipv4->unicast, i) {
4599 struct net_if_addr *if_addr = &ipv4->unicast[i].ipv4;
4600
4601 if (!if_addr->is_used) {
4602 continue;
4603 }
4604
4605 cb(iface, if_addr, user_data);
4606 }
4607
4608 out:
4609 net_if_unlock(iface);
4610 }
4611
ipv4_maddr_find(struct net_if * iface,bool is_used,const struct in_addr * addr)4612 static struct net_if_mcast_addr *ipv4_maddr_find(struct net_if *iface,
4613 bool is_used,
4614 const struct in_addr *addr)
4615 {
4616 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4617
4618 if (!ipv4) {
4619 return NULL;
4620 }
4621
4622 ARRAY_FOR_EACH(ipv4->mcast, i) {
4623 if ((is_used && !ipv4->mcast[i].is_used) ||
4624 (!is_used && ipv4->mcast[i].is_used)) {
4625 continue;
4626 }
4627
4628 if (addr) {
4629 if (!net_ipv4_addr_cmp(&ipv4->mcast[i].address.in_addr,
4630 addr)) {
4631 continue;
4632 }
4633 }
4634
4635 return &ipv4->mcast[i];
4636 }
4637
4638 return NULL;
4639 }
net_if_ipv4_maddr_add(struct net_if * iface,const struct in_addr * addr)4640 struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
4641 const struct in_addr *addr)
4642 {
4643 struct net_if_mcast_addr *maddr = NULL;
4644
4645 net_if_lock(iface);
4646
4647 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4648 goto out;
4649 }
4650
4651 if (!net_ipv4_is_addr_mcast(addr)) {
4652 NET_DBG("Address %s is not a multicast address.",
4653 net_sprint_ipv4_addr(addr));
4654 goto out;
4655 }
4656
4657 maddr = ipv4_maddr_find(iface, false, NULL);
4658 if (maddr) {
4659 maddr->is_used = true;
4660 maddr->address.family = AF_INET;
4661 maddr->address.in_addr.s4_addr32[0] = addr->s4_addr32[0];
4662
4663 NET_DBG("interface %d (%p) address %s added",
4664 net_if_get_by_iface(iface), iface,
4665 net_sprint_ipv4_addr(addr));
4666
4667 net_mgmt_event_notify_with_info(
4668 NET_EVENT_IPV4_MADDR_ADD, iface,
4669 &maddr->address.in_addr,
4670 sizeof(struct in_addr));
4671 }
4672
4673 out:
4674 net_if_unlock(iface);
4675
4676 return maddr;
4677 }
4678
net_if_ipv4_maddr_rm(struct net_if * iface,const struct in_addr * addr)4679 bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr)
4680 {
4681 struct net_if_mcast_addr *maddr;
4682 bool ret = false;
4683
4684 net_if_lock(iface);
4685
4686 maddr = ipv4_maddr_find(iface, true, addr);
4687 if (maddr) {
4688 maddr->is_used = false;
4689
4690 NET_DBG("interface %d (%p) address %s removed",
4691 net_if_get_by_iface(iface), iface,
4692 net_sprint_ipv4_addr(addr));
4693
4694 net_mgmt_event_notify_with_info(
4695 NET_EVENT_IPV4_MADDR_DEL, iface,
4696 &maddr->address.in_addr,
4697 sizeof(struct in_addr));
4698
4699 ret = true;
4700 }
4701
4702 net_if_unlock(iface);
4703
4704 return ret;
4705 }
4706
net_if_ipv4_maddr_foreach(struct net_if * iface,net_if_ip_maddr_cb_t cb,void * user_data)4707 void net_if_ipv4_maddr_foreach(struct net_if *iface, net_if_ip_maddr_cb_t cb,
4708 void *user_data)
4709 {
4710 struct net_if_ipv4 *ipv4;
4711
4712 if (iface == NULL || cb == NULL) {
4713 return;
4714 }
4715
4716 net_if_lock(iface);
4717
4718 ipv4 = iface->config.ip.ipv4;
4719 if (!ipv4) {
4720 goto out;
4721 }
4722
4723 for (int i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
4724 if (!ipv4->mcast[i].is_used) {
4725 continue;
4726 }
4727
4728 cb(iface, &ipv4->mcast[i], user_data);
4729 }
4730
4731 out:
4732 net_if_unlock(iface);
4733 }
4734
net_if_ipv4_maddr_lookup(const struct in_addr * maddr,struct net_if ** ret)4735 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *maddr,
4736 struct net_if **ret)
4737 {
4738 struct net_if_mcast_addr *addr = NULL;
4739
4740 STRUCT_SECTION_FOREACH(net_if, iface) {
4741 if (ret && *ret && iface != *ret) {
4742 continue;
4743 }
4744
4745 net_if_lock(iface);
4746
4747 addr = ipv4_maddr_find(iface, true, maddr);
4748 if (addr) {
4749 if (ret) {
4750 *ret = iface;
4751 }
4752
4753 net_if_unlock(iface);
4754 goto out;
4755 }
4756
4757 net_if_unlock(iface);
4758 }
4759
4760 out:
4761 return addr;
4762 }
4763
net_if_ipv4_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)4764 void net_if_ipv4_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
4765 {
4766 if (iface == NULL || addr == NULL) {
4767 return;
4768 }
4769
4770 net_if_lock(iface);
4771 addr->is_joined = false;
4772 net_if_unlock(iface);
4773 }
4774
net_if_ipv4_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)4775 void net_if_ipv4_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
4776 {
4777 if (iface == NULL || addr == NULL) {
4778 return;
4779 }
4780
4781 net_if_lock(iface);
4782 addr->is_joined = true;
4783 net_if_unlock(iface);
4784 }
4785
4786 #if defined(CONFIG_NET_NATIVE_IPV4)
net_if_ipv4_get_ttl(struct net_if * iface)4787 uint8_t net_if_ipv4_get_ttl(struct net_if *iface)
4788 {
4789 int ret = 0;
4790
4791 net_if_lock(iface);
4792
4793 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4794 goto out;
4795 }
4796
4797 if (!iface->config.ip.ipv4) {
4798 goto out;
4799 }
4800
4801 ret = iface->config.ip.ipv4->ttl;
4802 out:
4803 net_if_unlock(iface);
4804
4805 return ret;
4806 }
4807
net_if_ipv4_set_ttl(struct net_if * iface,uint8_t ttl)4808 void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl)
4809 {
4810 net_if_lock(iface);
4811
4812 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4813 goto out;
4814 }
4815
4816 if (!iface->config.ip.ipv4) {
4817 goto out;
4818 }
4819
4820 iface->config.ip.ipv4->ttl = ttl;
4821 out:
4822 net_if_unlock(iface);
4823 }
4824
net_if_ipv4_get_mcast_ttl(struct net_if * iface)4825 uint8_t net_if_ipv4_get_mcast_ttl(struct net_if *iface)
4826 {
4827 int ret = 0;
4828
4829 net_if_lock(iface);
4830
4831 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4832 goto out;
4833 }
4834
4835 if (!iface->config.ip.ipv4) {
4836 goto out;
4837 }
4838
4839 ret = iface->config.ip.ipv4->mcast_ttl;
4840 out:
4841 net_if_unlock(iface);
4842
4843 return ret;
4844 }
4845
net_if_ipv4_set_mcast_ttl(struct net_if * iface,uint8_t ttl)4846 void net_if_ipv4_set_mcast_ttl(struct net_if *iface, uint8_t ttl)
4847 {
4848 net_if_lock(iface);
4849
4850 if (net_if_config_ipv4_get(iface, NULL) < 0) {
4851 goto out;
4852 }
4853
4854 if (!iface->config.ip.ipv4) {
4855 goto out;
4856 }
4857
4858 iface->config.ip.ipv4->mcast_ttl = ttl;
4859 out:
4860 net_if_unlock(iface);
4861 }
4862
net_if_ipv4_router_lookup(struct net_if * iface,struct in_addr * addr)4863 struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
4864 struct in_addr *addr)
4865 {
4866 return iface_router_lookup(iface, AF_INET, addr);
4867 }
4868
net_if_ipv4_router_find_default(struct net_if * iface,struct in_addr * addr)4869 struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
4870 struct in_addr *addr)
4871 {
4872 return iface_router_find_default(iface, AF_INET, addr);
4873 }
4874
net_if_ipv4_router_add(struct net_if * iface,struct in_addr * addr,bool is_default,uint16_t lifetime)4875 struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
4876 struct in_addr *addr,
4877 bool is_default,
4878 uint16_t lifetime)
4879 {
4880 return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
4881 }
4882
net_if_ipv4_router_rm(struct net_if_router * router)4883 bool net_if_ipv4_router_rm(struct net_if_router *router)
4884 {
4885 return iface_router_rm(router);
4886 }
4887
4888
iface_ipv4_init(int if_count)4889 static void iface_ipv4_init(int if_count)
4890 {
4891 int i;
4892
4893 if (if_count > ARRAY_SIZE(ipv4_addresses)) {
4894 NET_WARN("You have %zu IPv4 net_if addresses but %d "
4895 "network interfaces", ARRAY_SIZE(ipv4_addresses),
4896 if_count);
4897 NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV4_COUNT "
4898 "value.");
4899 }
4900
4901 for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
4902 ipv4_addresses[i].ipv4.ttl = CONFIG_NET_INITIAL_TTL;
4903 ipv4_addresses[i].ipv4.mcast_ttl = CONFIG_NET_INITIAL_MCAST_TTL;
4904 }
4905 }
4906
leave_ipv4_mcast_all(struct net_if * iface)4907 static void leave_ipv4_mcast_all(struct net_if *iface)
4908 {
4909 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4910
4911 if (!ipv4) {
4912 return;
4913 }
4914
4915 ARRAY_FOR_EACH(ipv4->mcast, i) {
4916 if (!ipv4->mcast[i].is_used ||
4917 !ipv4->mcast[i].is_joined) {
4918 continue;
4919 }
4920
4921 net_ipv4_igmp_leave(iface, &ipv4->mcast[i].address.in_addr);
4922 }
4923 }
4924
iface_ipv4_start(struct net_if * iface)4925 static void iface_ipv4_start(struct net_if *iface)
4926 {
4927 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
4928 return;
4929 }
4930
4931 if (IS_ENABLED(CONFIG_NET_IPV4_ACD)) {
4932 net_if_start_acd(iface);
4933 }
4934 }
4935
4936 /* To be called when interface comes up so that all the non-joined multicast
4937 * groups are joined.
4938 */
rejoin_ipv4_mcast_groups(struct net_if * iface)4939 static void rejoin_ipv4_mcast_groups(struct net_if *iface)
4940 {
4941 struct net_if_mcast_addr *ifaddr, *next;
4942 struct net_if_ipv4 *ipv4;
4943 sys_slist_t rejoin_needed;
4944
4945 net_if_lock(iface);
4946
4947 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
4948 goto out;
4949 }
4950
4951 if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
4952 goto out;
4953 }
4954
4955 sys_slist_init(&rejoin_needed);
4956
4957 /* Rejoin any mcast address present on the interface, but marked as not joined. */
4958 ARRAY_FOR_EACH(ipv4->mcast, i) {
4959 if (!ipv4->mcast[i].is_used ||
4960 net_if_ipv4_maddr_is_joined(&ipv4->mcast[i])) {
4961 continue;
4962 }
4963
4964 sys_slist_prepend(&rejoin_needed, &ipv4->mcast[i].rejoin_node);
4965 }
4966
4967 net_if_unlock(iface);
4968
4969 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&rejoin_needed, ifaddr, next, rejoin_node) {
4970 int ret;
4971
4972 ret = net_ipv4_igmp_join(iface, &ifaddr->address.in_addr, NULL);
4973 if (ret < 0) {
4974 NET_ERR("Cannot join mcast address %s for %d (%d)",
4975 net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4976 net_if_get_by_iface(iface), ret);
4977 } else {
4978 NET_DBG("Rejoined mcast address %s for %d",
4979 net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4980 net_if_get_by_iface(iface));
4981 }
4982 }
4983
4984 return;
4985
4986 out:
4987 net_if_unlock(iface);
4988 }
4989
4990 /* To be called when interface comes operational down so that multicast
4991 * groups are rejoined when back up.
4992 */
clear_joined_ipv4_mcast_groups(struct net_if * iface)4993 static void clear_joined_ipv4_mcast_groups(struct net_if *iface)
4994 {
4995 struct net_if_ipv4 *ipv4;
4996
4997 net_if_lock(iface);
4998
4999 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
5000 goto out;
5001 }
5002
5003 if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
5004 goto out;
5005 }
5006
5007 ARRAY_FOR_EACH(ipv4->mcast, i) {
5008 if (!ipv4->mcast[i].is_used) {
5009 continue;
5010 }
5011
5012 net_if_ipv4_maddr_leave(iface, &ipv4->mcast[i]);
5013 }
5014
5015 out:
5016 net_if_unlock(iface);
5017 }
5018
5019 #endif /* CONFIG_NET_NATIVE_IPV4 */
5020 #else /* CONFIG_NET_IPV4 */
net_if_ipv4_maddr_lookup(const struct in_addr * addr,struct net_if ** iface)5021 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
5022 struct net_if **iface)
5023 {
5024 ARG_UNUSED(addr);
5025 ARG_UNUSED(iface);
5026
5027 return NULL;
5028 }
5029
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)5030 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
5031 struct net_if **ret)
5032 {
5033 ARG_UNUSED(addr);
5034 ARG_UNUSED(ret);
5035
5036 return NULL;
5037 }
5038
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)5039 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
5040 enum net_addr_state addr_state)
5041 {
5042 ARG_UNUSED(addr_state);
5043 ARG_UNUSED(iface);
5044
5045 return NULL;
5046 }
5047 #endif /* CONFIG_NET_IPV4 */
5048
5049 #if !defined(CONFIG_NET_NATIVE_IPV4)
5050 #define leave_ipv4_mcast_all(...)
5051 #define clear_joined_ipv4_mcast_groups(...)
5052 #define iface_ipv4_init(...)
5053 #define iface_ipv4_start(...)
5054 #endif /* !CONFIG_NET_NATIVE_IPV4 */
5055
net_if_select_src_iface(const struct sockaddr * dst)5056 struct net_if *net_if_select_src_iface(const struct sockaddr *dst)
5057 {
5058 struct net_if *iface = NULL;
5059
5060 if (!dst) {
5061 goto out;
5062 }
5063
5064 if (IS_ENABLED(CONFIG_NET_IPV6) && dst->sa_family == AF_INET6) {
5065 iface = net_if_ipv6_select_src_iface(&net_sin6(dst)->sin6_addr);
5066 goto out;
5067 }
5068
5069 if (IS_ENABLED(CONFIG_NET_IPV4) && dst->sa_family == AF_INET) {
5070 iface = net_if_ipv4_select_src_iface(&net_sin(dst)->sin_addr);
5071 goto out;
5072 }
5073
5074 out:
5075 if (iface == NULL) {
5076 iface = net_if_get_default();
5077 }
5078
5079 return iface;
5080 }
5081
get_ifaddr(struct net_if * iface,sa_family_t family,const void * addr,unsigned int * mcast_addr_count)5082 static struct net_if_addr *get_ifaddr(struct net_if *iface,
5083 sa_family_t family,
5084 const void *addr,
5085 unsigned int *mcast_addr_count)
5086 {
5087 struct net_if_addr *ifaddr = NULL;
5088
5089 net_if_lock(iface);
5090
5091 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
5092 struct net_if_ipv6 *ipv6 =
5093 COND_CODE_1(CONFIG_NET_IPV6, (iface->config.ip.ipv6), (NULL));
5094 struct in6_addr maddr;
5095 unsigned int maddr_count = 0;
5096 int found = -1;
5097
5098 if (ipv6 == NULL) {
5099 goto out;
5100 }
5101
5102 net_ipv6_addr_create_solicited_node((struct in6_addr *)addr,
5103 &maddr);
5104
5105 ARRAY_FOR_EACH(ipv6->unicast, i) {
5106 struct in6_addr unicast_maddr;
5107
5108 if (!ipv6->unicast[i].is_used) {
5109 continue;
5110 }
5111
5112 /* Count how many times this solicited-node multicast address is identical
5113 * for all the used unicast addresses
5114 */
5115 net_ipv6_addr_create_solicited_node(
5116 &ipv6->unicast[i].address.in6_addr,
5117 &unicast_maddr);
5118
5119 if (net_ipv6_addr_cmp(&maddr, &unicast_maddr)) {
5120 maddr_count++;
5121 }
5122
5123 if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr, addr)) {
5124 continue;
5125 }
5126
5127 found = i;
5128 }
5129
5130 if (found >= 0) {
5131 ifaddr = &ipv6->unicast[found];
5132
5133 if (mcast_addr_count != NULL) {
5134 *mcast_addr_count = maddr_count;
5135 }
5136 }
5137
5138 goto out;
5139 }
5140
5141 if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
5142 struct net_if_ipv4 *ipv4 =
5143 COND_CODE_1(CONFIG_NET_IPV4, (iface->config.ip.ipv4), (NULL));
5144
5145 if (ipv4 == NULL) {
5146 goto out;
5147 }
5148
5149 ARRAY_FOR_EACH(ipv4->unicast, i) {
5150 if (!ipv4->unicast[i].ipv4.is_used) {
5151 continue;
5152 }
5153
5154 if (!net_ipv4_addr_cmp(&ipv4->unicast[i].ipv4.address.in_addr,
5155 addr)) {
5156 continue;
5157 }
5158
5159 ifaddr = &ipv4->unicast[i].ipv4;
5160
5161 goto out;
5162 }
5163 }
5164
5165 out:
5166 net_if_unlock(iface);
5167
5168 return ifaddr;
5169 }
5170
remove_ipv6_ifaddr(struct net_if * iface,struct net_if_addr * ifaddr,unsigned int maddr_count)5171 static void remove_ipv6_ifaddr(struct net_if *iface,
5172 struct net_if_addr *ifaddr,
5173 unsigned int maddr_count)
5174 {
5175 struct net_if_ipv6 *ipv6;
5176
5177 net_if_lock(iface);
5178
5179 ipv6 = COND_CODE_1(CONFIG_NET_IPV6, (iface->config.ip.ipv6), (NULL));
5180 if (!ipv6) {
5181 goto out;
5182 }
5183
5184 if (!ifaddr->is_infinite) {
5185 k_mutex_lock(&lock, K_FOREVER);
5186
5187 #if defined(CONFIG_NET_NATIVE_IPV6)
5188 sys_slist_find_and_remove(&active_address_lifetime_timers,
5189 &ifaddr->lifetime.node);
5190
5191 if (sys_slist_is_empty(&active_address_lifetime_timers)) {
5192 k_work_cancel_delayable(&address_lifetime_timer);
5193 }
5194 #endif
5195 k_mutex_unlock(&lock);
5196 }
5197
5198 #if defined(CONFIG_NET_IPV6_DAD)
5199 if (!net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
5200 k_mutex_lock(&lock, K_FOREVER);
5201 if (sys_slist_find_and_remove(&active_dad_timers,
5202 &ifaddr->dad_node)) {
5203 /* Addreess with active DAD timer would still have
5204 * stale entry in the neighbor cache.
5205 */
5206 net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
5207 }
5208 k_mutex_unlock(&lock);
5209 }
5210 #endif
5211
5212 if (maddr_count == 1) {
5213 /* Remove the solicited-node multicast address only if no other
5214 * unicast address is also using it
5215 */
5216 struct in6_addr maddr;
5217
5218 net_ipv6_addr_create_solicited_node(&ifaddr->address.in6_addr,
5219 &maddr);
5220 net_if_ipv6_maddr_rm(iface, &maddr);
5221 }
5222
5223 /* Using the IPv6 address pointer here can give false
5224 * info if someone adds a new IP address into this position
5225 * in the address array. This is quite unlikely thou.
5226 */
5227 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ADDR_DEL,
5228 iface,
5229 &ifaddr->address.in6_addr,
5230 sizeof(struct in6_addr));
5231 out:
5232 net_if_unlock(iface);
5233 }
5234
remove_ipv4_ifaddr(struct net_if * iface,struct net_if_addr * ifaddr)5235 static void remove_ipv4_ifaddr(struct net_if *iface,
5236 struct net_if_addr *ifaddr)
5237 {
5238 struct net_if_ipv4 *ipv4;
5239
5240 net_if_lock(iface);
5241
5242 ipv4 = COND_CODE_1(CONFIG_NET_IPV4, (iface->config.ip.ipv4), (NULL));
5243 if (!ipv4) {
5244 goto out;
5245 }
5246
5247 #if defined(CONFIG_NET_IPV4_ACD)
5248 net_ipv4_acd_cancel(iface, ifaddr);
5249 #endif
5250
5251 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_DEL,
5252 iface,
5253 &ifaddr->address.in_addr,
5254 sizeof(struct in_addr));
5255 out:
5256 net_if_unlock(iface);
5257 }
5258
5259 #if defined(CONFIG_NET_IF_LOG_LEVEL)
5260 #define NET_LOG_LEVEL CONFIG_NET_IF_LOG_LEVEL
5261 #else
5262 #define NET_LOG_LEVEL 0
5263 #endif
5264
5265 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_if_addr_ref_debug(struct net_if * iface,sa_family_t family,const void * addr,const char * caller,int line)5266 struct net_if_addr *net_if_addr_ref_debug(struct net_if *iface,
5267 sa_family_t family,
5268 const void *addr,
5269 const char *caller,
5270 int line)
5271 #else
5272 struct net_if_addr *net_if_addr_ref(struct net_if *iface,
5273 sa_family_t family,
5274 const void *addr)
5275 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
5276 {
5277 struct net_if_addr *ifaddr;
5278 atomic_val_t ref;
5279
5280 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5281 char addr_str[IS_ENABLED(CONFIG_NET_IPV6) ?
5282 INET6_ADDRSTRLEN : INET_ADDRSTRLEN];
5283
5284 __ASSERT(iface, "iface is NULL (%s():%d)", caller, line);
5285 #endif
5286
5287 ifaddr = get_ifaddr(iface, family, addr, NULL);
5288
5289 do {
5290 ref = ifaddr ? atomic_get(&ifaddr->atomic_ref) : 0;
5291 if (!ref) {
5292 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5293 NET_ERR("iface %d addr %s (%s():%d)",
5294 net_if_get_by_iface(iface),
5295 net_addr_ntop(family,
5296 addr,
5297 addr_str, sizeof(addr_str)),
5298 caller, line);
5299 #endif
5300 return NULL;
5301 }
5302 } while (!atomic_cas(&ifaddr->atomic_ref, ref, ref + 1));
5303
5304 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5305 NET_DBG("[%d] ifaddr %s state %d ref %ld (%s():%d)",
5306 net_if_get_by_iface(iface),
5307 net_addr_ntop(ifaddr->address.family,
5308 (void *)&ifaddr->address.in_addr,
5309 addr_str, sizeof(addr_str)),
5310 ifaddr->addr_state,
5311 ref + 1,
5312 caller, line);
5313 #endif
5314
5315 return ifaddr;
5316 }
5317
5318 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_if_addr_unref_debug(struct net_if * iface,sa_family_t family,const void * addr,struct net_if_addr ** ret_ifaddr,const char * caller,int line)5319 int net_if_addr_unref_debug(struct net_if *iface,
5320 sa_family_t family,
5321 const void *addr,
5322 struct net_if_addr **ret_ifaddr,
5323 const char *caller, int line)
5324 #else
5325 int net_if_addr_unref(struct net_if *iface,
5326 sa_family_t family,
5327 const void *addr,
5328 struct net_if_addr **ret_ifaddr)
5329 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
5330 {
5331 struct net_if_addr *ifaddr;
5332 unsigned int maddr_count = 0;
5333 atomic_val_t ref;
5334
5335 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5336 char addr_str[IS_ENABLED(CONFIG_NET_IPV6) ?
5337 INET6_ADDRSTRLEN : INET_ADDRSTRLEN];
5338
5339 __ASSERT(iface, "iface is NULL (%s():%d)", caller, line);
5340 #endif
5341
5342 ifaddr = get_ifaddr(iface, family, addr, &maddr_count);
5343
5344 if (!ifaddr) {
5345 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5346 NET_ERR("iface %d addr %s (%s():%d)",
5347 net_if_get_by_iface(iface),
5348 net_addr_ntop(family,
5349 addr,
5350 addr_str, sizeof(addr_str)),
5351 caller, line);
5352 #endif
5353 return -EINVAL;
5354 }
5355
5356 do {
5357 ref = atomic_get(&ifaddr->atomic_ref);
5358 if (!ref) {
5359 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5360 NET_ERR("*** ERROR *** iface %d ifaddr %p "
5361 "is freed already (%s():%d)",
5362 net_if_get_by_iface(iface),
5363 ifaddr,
5364 caller, line);
5365 #endif
5366 return -EINVAL;
5367 }
5368
5369 } while (!atomic_cas(&ifaddr->atomic_ref, ref, ref - 1));
5370
5371 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5372 NET_DBG("[%d] ifaddr %s state %d ref %ld (%s():%d)",
5373 net_if_get_by_iface(iface),
5374 net_addr_ntop(ifaddr->address.family,
5375 (void *)&ifaddr->address.in_addr,
5376 addr_str, sizeof(addr_str)),
5377 ifaddr->addr_state,
5378 ref - 1, caller, line);
5379 #endif
5380
5381 if (ref > 1) {
5382 if (ret_ifaddr) {
5383 *ret_ifaddr = ifaddr;
5384 }
5385
5386 return ref - 1;
5387 }
5388
5389 ifaddr->is_used = false;
5390
5391 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 && addr != NULL) {
5392 remove_ipv6_ifaddr(iface, ifaddr, maddr_count);
5393 }
5394
5395 if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET && addr != NULL) {
5396 remove_ipv4_ifaddr(iface, ifaddr);
5397 }
5398
5399 return 0;
5400 }
5401
net_if_recv_data(struct net_if * iface,struct net_pkt * pkt)5402 enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt)
5403 {
5404 if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) &&
5405 net_if_is_promisc(iface)) {
5406 struct net_pkt *new_pkt;
5407
5408 new_pkt = net_pkt_clone(pkt, K_NO_WAIT);
5409
5410 if (net_promisc_mode_input(new_pkt) == NET_DROP) {
5411 net_pkt_unref(new_pkt);
5412 }
5413 }
5414
5415 return net_if_l2(iface)->recv(iface, pkt);
5416 }
5417
net_if_register_link_cb(struct net_if_link_cb * link,net_if_link_callback_t cb)5418 void net_if_register_link_cb(struct net_if_link_cb *link,
5419 net_if_link_callback_t cb)
5420 {
5421 k_mutex_lock(&lock, K_FOREVER);
5422
5423 sys_slist_find_and_remove(&link_callbacks, &link->node);
5424 sys_slist_prepend(&link_callbacks, &link->node);
5425
5426 link->cb = cb;
5427
5428 k_mutex_unlock(&lock);
5429 }
5430
net_if_unregister_link_cb(struct net_if_link_cb * link)5431 void net_if_unregister_link_cb(struct net_if_link_cb *link)
5432 {
5433 k_mutex_lock(&lock, K_FOREVER);
5434
5435 sys_slist_find_and_remove(&link_callbacks, &link->node);
5436
5437 k_mutex_unlock(&lock);
5438 }
5439
net_if_call_link_cb(struct net_if * iface,struct net_linkaddr * lladdr,int status)5440 void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
5441 int status)
5442 {
5443 struct net_if_link_cb *link, *tmp;
5444
5445 k_mutex_lock(&lock, K_FOREVER);
5446
5447 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&link_callbacks, link, tmp, node) {
5448 link->cb(iface, lladdr, status);
5449 }
5450
5451 k_mutex_unlock(&lock);
5452 }
5453
need_calc_checksum(struct net_if * iface,enum ethernet_hw_caps caps,enum net_if_checksum_type chksum_type)5454 static bool need_calc_checksum(struct net_if *iface, enum ethernet_hw_caps caps,
5455 enum net_if_checksum_type chksum_type)
5456 {
5457 #if defined(CONFIG_NET_L2_ETHERNET)
5458 struct ethernet_config config;
5459 enum ethernet_config_type config_type;
5460
5461 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
5462 /* For VLANs, figure out the main Ethernet interface and
5463 * get the offloading capabilities from it.
5464 */
5465 if (IS_ENABLED(CONFIG_NET_VLAN) && net_eth_is_vlan_interface(iface)) {
5466 iface = net_eth_get_vlan_main(iface);
5467 if (iface == NULL) {
5468 return true;
5469 }
5470
5471 NET_ASSERT(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET));
5472 } else {
5473 return true;
5474 }
5475 }
5476
5477 if (!(net_eth_get_hw_capabilities(iface) & caps)) {
5478 return true; /* No checksum offload*/
5479 }
5480
5481 if (caps == ETHERNET_HW_RX_CHKSUM_OFFLOAD) {
5482 config_type = ETHERNET_CONFIG_TYPE_RX_CHECKSUM_SUPPORT;
5483 } else {
5484 config_type = ETHERNET_CONFIG_TYPE_TX_CHECKSUM_SUPPORT;
5485 }
5486
5487 if (net_eth_get_hw_config(iface, config_type, &config) != 0) {
5488 return false; /* No extra info, assume all offloaded. */
5489 }
5490
5491 /* bitmaps are encoded such that this works */
5492 return !((config.chksum_support & chksum_type) == chksum_type);
5493 #else
5494 ARG_UNUSED(iface);
5495 ARG_UNUSED(caps);
5496
5497 return true;
5498 #endif
5499 }
5500
net_if_need_calc_tx_checksum(struct net_if * iface,enum net_if_checksum_type chksum_type)5501 bool net_if_need_calc_tx_checksum(struct net_if *iface, enum net_if_checksum_type chksum_type)
5502 {
5503 return need_calc_checksum(iface, ETHERNET_HW_TX_CHKSUM_OFFLOAD, chksum_type);
5504 }
5505
net_if_need_calc_rx_checksum(struct net_if * iface,enum net_if_checksum_type chksum_type)5506 bool net_if_need_calc_rx_checksum(struct net_if *iface, enum net_if_checksum_type chksum_type)
5507 {
5508 return need_calc_checksum(iface, ETHERNET_HW_RX_CHKSUM_OFFLOAD, chksum_type);
5509 }
5510
net_if_get_by_iface(struct net_if * iface)5511 int net_if_get_by_iface(struct net_if *iface)
5512 {
5513 if (!(iface >= _net_if_list_start && iface < _net_if_list_end)) {
5514 return -1;
5515 }
5516
5517 return (iface - _net_if_list_start) + 1;
5518 }
5519
net_if_foreach(net_if_cb_t cb,void * user_data)5520 void net_if_foreach(net_if_cb_t cb, void *user_data)
5521 {
5522 STRUCT_SECTION_FOREACH(net_if, iface) {
5523 cb(iface, user_data);
5524 }
5525 }
5526
net_if_is_offloaded(struct net_if * iface)5527 bool net_if_is_offloaded(struct net_if *iface)
5528 {
5529 return (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
5530 net_if_is_ip_offloaded(iface)) ||
5531 (IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD) &&
5532 net_if_is_socket_offloaded(iface));
5533 }
5534
rejoin_multicast_groups(struct net_if * iface)5535 static void rejoin_multicast_groups(struct net_if *iface)
5536 {
5537 #if defined(CONFIG_NET_NATIVE_IPV6)
5538 rejoin_ipv6_mcast_groups(iface);
5539 if (l2_flags_get(iface) & NET_L2_MULTICAST) {
5540 join_mcast_allnodes(iface);
5541 }
5542 #endif
5543 #if defined(CONFIG_NET_NATIVE_IPV4)
5544 rejoin_ipv4_mcast_groups(iface);
5545 #endif
5546 #if !defined(CONFIG_NET_NATIVE_IPV6) && !defined(CONFIG_NET_NATIVE_IPV4)
5547 ARG_UNUSED(iface);
5548 #endif
5549 }
5550
notify_iface_up(struct net_if * iface)5551 static void notify_iface_up(struct net_if *iface)
5552 {
5553 /* In many places it's assumed that link address was set with
5554 * net_if_set_link_addr(). Better check that now.
5555 */
5556 if (IS_ENABLED(CONFIG_NET_L2_CANBUS_RAW) &&
5557 IS_ENABLED(CONFIG_NET_SOCKETS_CAN) &&
5558 (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW))) {
5559 /* CAN does not require link address. */
5560 } else {
5561 if (!net_if_is_offloaded(iface)) {
5562 NET_ASSERT(net_if_get_link_addr(iface)->addr != NULL);
5563 }
5564 }
5565
5566 net_if_flag_set(iface, NET_IF_RUNNING);
5567 net_mgmt_event_notify(NET_EVENT_IF_UP, iface);
5568 net_virtual_enable(iface);
5569
5570 /* If the interface is only having point-to-point traffic then we do
5571 * not need to run DAD etc for it.
5572 */
5573 if (!net_if_is_offloaded(iface) &&
5574 !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
5575 /* Make sure that we update the IPv6 addresses and join the
5576 * multicast groups.
5577 */
5578 rejoin_multicast_groups(iface);
5579 iface_ipv6_start(iface);
5580 iface_ipv4_start(iface);
5581 net_ipv4_autoconf_start(iface);
5582 }
5583 }
5584
notify_iface_down(struct net_if * iface)5585 static void notify_iface_down(struct net_if *iface)
5586 {
5587 net_if_flag_clear(iface, NET_IF_RUNNING);
5588 net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
5589 net_virtual_disable(iface);
5590
5591 if (!net_if_is_offloaded(iface) &&
5592 !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
5593 iface_ipv6_stop(iface);
5594 clear_joined_ipv6_mcast_groups(iface);
5595 clear_joined_ipv4_mcast_groups(iface);
5596 net_ipv4_autoconf_reset(iface);
5597 }
5598 }
5599
net_if_oper_state2str(enum net_if_oper_state state)5600 const char *net_if_oper_state2str(enum net_if_oper_state state)
5601 {
5602 switch (state) {
5603 case NET_IF_OPER_UNKNOWN:
5604 return "UNKNOWN";
5605 case NET_IF_OPER_NOTPRESENT:
5606 return "NOTPRESENT";
5607 case NET_IF_OPER_DOWN:
5608 return "DOWN";
5609 case NET_IF_OPER_LOWERLAYERDOWN:
5610 return "LOWERLAYERDOWN";
5611 case NET_IF_OPER_TESTING:
5612 return "TESTING";
5613 case NET_IF_OPER_DORMANT:
5614 return "DORMANT";
5615 case NET_IF_OPER_UP:
5616 return "UP";
5617 default:
5618 break;
5619 }
5620
5621 return "<invalid>";
5622 }
5623
update_operational_state(struct net_if * iface)5624 static void update_operational_state(struct net_if *iface)
5625 {
5626 enum net_if_oper_state prev_state = iface->if_dev->oper_state;
5627 enum net_if_oper_state new_state = NET_IF_OPER_UNKNOWN;
5628
5629 if (!net_if_is_admin_up(iface)) {
5630 new_state = NET_IF_OPER_DOWN;
5631 goto exit;
5632 }
5633
5634 if (!device_is_ready(net_if_get_device(iface))) {
5635 new_state = NET_IF_OPER_LOWERLAYERDOWN;
5636 goto exit;
5637 }
5638
5639 if (!net_if_is_carrier_ok(iface)) {
5640 #if defined(CONFIG_NET_L2_VIRTUAL)
5641 if (net_if_l2(iface) == &NET_L2_GET_NAME(VIRTUAL)) {
5642 new_state = NET_IF_OPER_LOWERLAYERDOWN;
5643 } else
5644 #endif /* CONFIG_NET_L2_VIRTUAL */
5645 {
5646 new_state = NET_IF_OPER_DOWN;
5647 }
5648
5649 goto exit;
5650 }
5651
5652 if (net_if_is_dormant(iface)) {
5653 new_state = NET_IF_OPER_DORMANT;
5654 goto exit;
5655 }
5656
5657 new_state = NET_IF_OPER_UP;
5658
5659 exit:
5660 if (net_if_oper_state_set(iface, new_state) != new_state) {
5661 NET_ERR("Failed to update oper state to %d", new_state);
5662 return;
5663 }
5664
5665 NET_DBG("iface %d (%p), oper state %s admin %s carrier %s dormant %s",
5666 net_if_get_by_iface(iface), iface,
5667 net_if_oper_state2str(net_if_oper_state(iface)),
5668 net_if_is_admin_up(iface) ? "UP" : "DOWN",
5669 net_if_is_carrier_ok(iface) ? "ON" : "OFF",
5670 net_if_is_dormant(iface) ? "ON" : "OFF");
5671
5672 if (net_if_oper_state(iface) == NET_IF_OPER_UP) {
5673 if (prev_state != NET_IF_OPER_UP) {
5674 notify_iface_up(iface);
5675 }
5676 } else {
5677 if (prev_state == NET_IF_OPER_UP) {
5678 notify_iface_down(iface);
5679 }
5680 }
5681 }
5682
init_igmp(struct net_if * iface)5683 static void init_igmp(struct net_if *iface)
5684 {
5685 #if defined(CONFIG_NET_IPV4_IGMP)
5686 /* Ensure IPv4 is enabled for this interface. */
5687 if (net_if_config_ipv4_get(iface, NULL)) {
5688 return;
5689 }
5690
5691 net_ipv4_igmp_init(iface);
5692 #else
5693 ARG_UNUSED(iface);
5694 return;
5695 #endif
5696 }
5697
net_if_up(struct net_if * iface)5698 int net_if_up(struct net_if *iface)
5699 {
5700 int status = 0;
5701
5702 NET_DBG("iface %d (%p)", net_if_get_by_iface(iface), iface);
5703
5704 net_if_lock(iface);
5705
5706 if (net_if_flag_is_set(iface, NET_IF_UP)) {
5707 status = -EALREADY;
5708 goto out;
5709 }
5710
5711 /* If the L2 does not support enable just set the flag */
5712 if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
5713 goto done;
5714 } else {
5715 /* If the L2 does not implement enable(), then the network
5716 * device driver cannot implement start(), in which case
5717 * we can do simple check here and not try to bring interface
5718 * up as the device is not ready.
5719 *
5720 * If the network device driver does implement start(), then
5721 * it could bring the interface up when the enable() is called
5722 * few lines below.
5723 */
5724 const struct device *dev;
5725
5726 dev = net_if_get_device(iface);
5727 NET_ASSERT(dev);
5728
5729 /* If the device is not ready it is pointless trying to take it up. */
5730 if (!device_is_ready(dev)) {
5731 NET_DBG("Device %s (%p) is not ready", dev->name, dev);
5732 status = -ENXIO;
5733 goto out;
5734 }
5735 }
5736
5737 /* Notify L2 to enable the interface. Note that the interface is still down
5738 * at this point from network interface point of view i.e., the NET_IF_UP
5739 * flag has not been set yet.
5740 */
5741 status = net_if_l2(iface)->enable(iface, true);
5742 if (status < 0) {
5743 NET_DBG("Cannot take interface %d up (%d)",
5744 net_if_get_by_iface(iface), status);
5745 goto out;
5746 }
5747
5748 init_igmp(iface);
5749
5750 done:
5751 net_if_flag_set(iface, NET_IF_UP);
5752 net_mgmt_event_notify(NET_EVENT_IF_ADMIN_UP, iface);
5753 update_operational_state(iface);
5754
5755 out:
5756 net_if_unlock(iface);
5757
5758 return status;
5759 }
5760
net_if_down(struct net_if * iface)5761 int net_if_down(struct net_if *iface)
5762 {
5763 int status = 0;
5764
5765 NET_DBG("iface %p", iface);
5766
5767 net_if_lock(iface);
5768
5769 if (!net_if_flag_is_set(iface, NET_IF_UP)) {
5770 status = -EALREADY;
5771 goto out;
5772 }
5773
5774 leave_mcast_all(iface);
5775 leave_ipv4_mcast_all(iface);
5776
5777 /* If the L2 does not support enable just clear the flag */
5778 if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
5779 goto done;
5780 }
5781
5782 /* Notify L2 to disable the interface */
5783 status = net_if_l2(iface)->enable(iface, false);
5784 if (status < 0) {
5785 goto out;
5786 }
5787
5788 done:
5789 net_if_flag_clear(iface, NET_IF_UP);
5790 net_mgmt_event_notify(NET_EVENT_IF_ADMIN_DOWN, iface);
5791 update_operational_state(iface);
5792
5793 out:
5794 net_if_unlock(iface);
5795
5796 return status;
5797 }
5798
net_if_carrier_on(struct net_if * iface)5799 void net_if_carrier_on(struct net_if *iface)
5800 {
5801 if (iface == NULL) {
5802 return;
5803 }
5804
5805 net_if_lock(iface);
5806
5807 if (!net_if_flag_test_and_set(iface, NET_IF_LOWER_UP)) {
5808 update_operational_state(iface);
5809 }
5810
5811 net_if_unlock(iface);
5812 }
5813
net_if_carrier_off(struct net_if * iface)5814 void net_if_carrier_off(struct net_if *iface)
5815 {
5816 if (iface == NULL) {
5817 return;
5818 }
5819
5820 net_if_lock(iface);
5821
5822 if (net_if_flag_test_and_clear(iface, NET_IF_LOWER_UP)) {
5823 update_operational_state(iface);
5824 }
5825
5826 net_if_unlock(iface);
5827 }
5828
net_if_dormant_on(struct net_if * iface)5829 void net_if_dormant_on(struct net_if *iface)
5830 {
5831 if (iface == NULL) {
5832 return;
5833 }
5834
5835 net_if_lock(iface);
5836
5837 if (!net_if_flag_test_and_set(iface, NET_IF_DORMANT)) {
5838 update_operational_state(iface);
5839 }
5840
5841 net_if_unlock(iface);
5842 }
5843
net_if_dormant_off(struct net_if * iface)5844 void net_if_dormant_off(struct net_if *iface)
5845 {
5846 if (iface == NULL) {
5847 return;
5848 }
5849
5850 net_if_lock(iface);
5851
5852 if (net_if_flag_test_and_clear(iface, NET_IF_DORMANT)) {
5853 update_operational_state(iface);
5854 }
5855
5856 net_if_unlock(iface);
5857 }
5858
5859 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
promisc_mode_set(struct net_if * iface,bool enable)5860 static int promisc_mode_set(struct net_if *iface, bool enable)
5861 {
5862 enum net_l2_flags l2_flags = 0;
5863
5864 if (iface == NULL) {
5865 return -EINVAL;
5866 }
5867
5868 l2_flags = l2_flags_get(iface);
5869 if (!(l2_flags & NET_L2_PROMISC_MODE)) {
5870 return -ENOTSUP;
5871 }
5872
5873 #if defined(CONFIG_NET_L2_ETHERNET)
5874 if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
5875 int ret = net_eth_promisc_mode(iface, enable);
5876
5877 if (ret < 0) {
5878 return ret;
5879 }
5880 }
5881 #else
5882 ARG_UNUSED(enable);
5883
5884 return -ENOTSUP;
5885 #endif
5886
5887 return 0;
5888 }
5889
net_if_set_promisc(struct net_if * iface)5890 int net_if_set_promisc(struct net_if *iface)
5891 {
5892 int ret;
5893
5894 net_if_lock(iface);
5895
5896 ret = promisc_mode_set(iface, true);
5897 if (ret < 0 && ret != -EALREADY) {
5898 goto out;
5899 }
5900
5901 ret = net_if_flag_test_and_set(iface, NET_IF_PROMISC);
5902 if (ret) {
5903 ret = -EALREADY;
5904 goto out;
5905 }
5906
5907 out:
5908 net_if_unlock(iface);
5909
5910 return ret;
5911 }
5912
net_if_unset_promisc(struct net_if * iface)5913 void net_if_unset_promisc(struct net_if *iface)
5914 {
5915 int ret;
5916
5917 net_if_lock(iface);
5918
5919 ret = promisc_mode_set(iface, false);
5920 if (ret < 0) {
5921 goto out;
5922 }
5923
5924 net_if_flag_clear(iface, NET_IF_PROMISC);
5925
5926 out:
5927 net_if_unlock(iface);
5928 }
5929
net_if_is_promisc(struct net_if * iface)5930 bool net_if_is_promisc(struct net_if *iface)
5931 {
5932 if (iface == NULL) {
5933 return false;
5934 }
5935
5936 return net_if_flag_is_set(iface, NET_IF_PROMISC);
5937 }
5938 #endif /* CONFIG_NET_PROMISCUOUS_MODE */
5939
5940 #ifdef CONFIG_NET_POWER_MANAGEMENT
5941
net_if_suspend(struct net_if * iface)5942 int net_if_suspend(struct net_if *iface)
5943 {
5944 int ret = 0;
5945
5946 net_if_lock(iface);
5947
5948 if (net_if_are_pending_tx_packets(iface)) {
5949 ret = -EBUSY;
5950 goto out;
5951 }
5952
5953 if (net_if_flag_test_and_set(iface, NET_IF_SUSPENDED)) {
5954 ret = -EALREADY;
5955 goto out;
5956 }
5957
5958 net_stats_add_suspend_start_time(iface, k_cycle_get_32());
5959
5960 out:
5961 net_if_unlock(iface);
5962
5963 return ret;
5964 }
5965
net_if_resume(struct net_if * iface)5966 int net_if_resume(struct net_if *iface)
5967 {
5968 int ret = 0;
5969
5970 net_if_lock(iface);
5971
5972 if (!net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
5973 ret = -EALREADY;
5974 goto out;
5975 }
5976
5977 net_if_flag_clear(iface, NET_IF_SUSPENDED);
5978
5979 net_stats_add_suspend_end_time(iface, k_cycle_get_32());
5980
5981 out:
5982 net_if_unlock(iface);
5983
5984 return ret;
5985 }
5986
net_if_is_suspended(struct net_if * iface)5987 bool net_if_is_suspended(struct net_if *iface)
5988 {
5989 return net_if_flag_is_set(iface, NET_IF_SUSPENDED);
5990 }
5991
5992 #endif /* CONFIG_NET_POWER_MANAGEMENT */
5993
5994 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
net_tx_ts_thread(void * p1,void * p2,void * p3)5995 static void net_tx_ts_thread(void *p1, void *p2, void *p3)
5996 {
5997 ARG_UNUSED(p1);
5998 ARG_UNUSED(p2);
5999 ARG_UNUSED(p3);
6000
6001 struct net_pkt *pkt;
6002
6003 NET_DBG("Starting TX timestamp callback thread");
6004
6005 while (1) {
6006 pkt = k_fifo_get(&tx_ts_queue, K_FOREVER);
6007 if (pkt) {
6008 net_if_call_timestamp_cb(pkt);
6009 }
6010 net_pkt_unref(pkt);
6011 }
6012 }
6013
net_if_register_timestamp_cb(struct net_if_timestamp_cb * handle,struct net_pkt * pkt,struct net_if * iface,net_if_timestamp_callback_t cb)6014 void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
6015 struct net_pkt *pkt,
6016 struct net_if *iface,
6017 net_if_timestamp_callback_t cb)
6018 {
6019 k_mutex_lock(&lock, K_FOREVER);
6020
6021 sys_slist_find_and_remove(×tamp_callbacks, &handle->node);
6022 sys_slist_prepend(×tamp_callbacks, &handle->node);
6023
6024 handle->iface = iface;
6025 handle->cb = cb;
6026 handle->pkt = pkt;
6027
6028 k_mutex_unlock(&lock);
6029 }
6030
net_if_unregister_timestamp_cb(struct net_if_timestamp_cb * handle)6031 void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle)
6032 {
6033 k_mutex_lock(&lock, K_FOREVER);
6034
6035 sys_slist_find_and_remove(×tamp_callbacks, &handle->node);
6036
6037 k_mutex_unlock(&lock);
6038 }
6039
net_if_call_timestamp_cb(struct net_pkt * pkt)6040 void net_if_call_timestamp_cb(struct net_pkt *pkt)
6041 {
6042 sys_snode_t *sn, *sns;
6043
6044 k_mutex_lock(&lock, K_FOREVER);
6045
6046 SYS_SLIST_FOR_EACH_NODE_SAFE(×tamp_callbacks, sn, sns) {
6047 struct net_if_timestamp_cb *handle =
6048 CONTAINER_OF(sn, struct net_if_timestamp_cb, node);
6049
6050 if (((handle->iface == NULL) ||
6051 (handle->iface == net_pkt_iface(pkt))) &&
6052 (handle->pkt == NULL || handle->pkt == pkt)) {
6053 handle->cb(pkt);
6054 }
6055 }
6056
6057 k_mutex_unlock(&lock);
6058 }
6059
net_if_add_tx_timestamp(struct net_pkt * pkt)6060 void net_if_add_tx_timestamp(struct net_pkt *pkt)
6061 {
6062 k_fifo_put(&tx_ts_queue, pkt);
6063 net_pkt_ref(pkt);
6064 }
6065 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
6066
net_if_is_wifi(struct net_if * iface)6067 bool net_if_is_wifi(struct net_if *iface)
6068 {
6069 if (net_if_is_offloaded(iface)) {
6070 return net_off_is_wifi_offloaded(iface);
6071 }
6072
6073 if (IS_ENABLED(CONFIG_NET_L2_ETHERNET)) {
6074 return net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) &&
6075 net_eth_type_is_wifi(iface);
6076 }
6077
6078 return false;
6079 }
6080
net_if_get_first_wifi(void)6081 struct net_if *net_if_get_first_wifi(void)
6082 {
6083 STRUCT_SECTION_FOREACH(net_if, iface) {
6084 if (net_if_is_wifi(iface)) {
6085 return iface;
6086 }
6087 }
6088 return NULL;
6089 }
6090
net_if_get_wifi_sta(void)6091 struct net_if *net_if_get_wifi_sta(void)
6092 {
6093 STRUCT_SECTION_FOREACH(net_if, iface) {
6094 if (net_if_is_wifi(iface)
6095 #ifdef CONFIG_WIFI_NM
6096 && wifi_nm_iface_is_sta(iface)
6097 #endif
6098 ) {
6099 return iface;
6100 }
6101 }
6102
6103 /* If no STA interface is found, return the first WiFi interface */
6104 return net_if_get_first_wifi();
6105 }
6106
net_if_get_wifi_sap(void)6107 struct net_if *net_if_get_wifi_sap(void)
6108 {
6109 STRUCT_SECTION_FOREACH(net_if, iface) {
6110 if (net_if_is_wifi(iface)
6111 #ifdef CONFIG_WIFI_NM
6112 && wifi_nm_iface_is_sap(iface)
6113 #endif
6114 ) {
6115 return iface;
6116 }
6117 }
6118
6119 /* If no STA interface is found, return the first WiFi interface */
6120 return net_if_get_first_wifi();
6121 }
6122
net_if_get_name(struct net_if * iface,char * buf,int len)6123 int net_if_get_name(struct net_if *iface, char *buf, int len)
6124 {
6125 #if defined(CONFIG_NET_INTERFACE_NAME)
6126 int name_len;
6127
6128 if (iface == NULL || buf == NULL || len <= 0) {
6129 return -EINVAL;
6130 }
6131
6132 name_len = strlen(net_if_get_config(iface)->name);
6133 if (name_len >= len) {
6134 return -ERANGE;
6135 }
6136
6137 /* Copy string and null terminator */
6138 memcpy(buf, net_if_get_config(iface)->name, name_len + 1);
6139
6140 return name_len;
6141 #else
6142 return -ENOTSUP;
6143 #endif
6144 }
6145
net_if_set_name(struct net_if * iface,const char * buf)6146 int net_if_set_name(struct net_if *iface, const char *buf)
6147 {
6148 #if defined(CONFIG_NET_INTERFACE_NAME)
6149 int name_len;
6150
6151 if (iface == NULL || buf == NULL) {
6152 return -EINVAL;
6153 }
6154
6155 name_len = strlen(buf);
6156 if (name_len >= sizeof(iface->config.name)) {
6157 return -ENAMETOOLONG;
6158 }
6159
6160 STRUCT_SECTION_FOREACH(net_if, iface_check) {
6161 if (iface_check == iface) {
6162 continue;
6163 }
6164
6165 if (memcmp(net_if_get_config(iface_check)->name,
6166 buf,
6167 name_len + 1) == 0) {
6168 return -EALREADY;
6169 }
6170 }
6171
6172 /* Copy string and null terminator */
6173 memcpy(net_if_get_config(iface)->name, buf, name_len + 1);
6174
6175 return 0;
6176 #else
6177 return -ENOTSUP;
6178 #endif
6179 }
6180
net_if_get_by_name(const char * name)6181 int net_if_get_by_name(const char *name)
6182 {
6183 #if defined(CONFIG_NET_INTERFACE_NAME)
6184 if (name == NULL) {
6185 return -EINVAL;
6186 }
6187
6188 STRUCT_SECTION_FOREACH(net_if, iface) {
6189 if (strncmp(net_if_get_config(iface)->name, name, strlen(name)) == 0) {
6190 return net_if_get_by_iface(iface);
6191 }
6192 }
6193
6194 return -ENOENT;
6195 #else
6196 return -ENOTSUP;
6197 #endif
6198 }
6199
6200 #if defined(CONFIG_NET_INTERFACE_NAME)
set_default_name(struct net_if * iface)6201 static void set_default_name(struct net_if *iface)
6202 {
6203 char name[CONFIG_NET_INTERFACE_NAME_LEN + 1];
6204 int ret;
6205
6206 if (net_if_is_wifi(iface)) {
6207 static int count;
6208
6209 snprintk(name, sizeof(name), "wlan%d", count++);
6210
6211 } else if (IS_ENABLED(CONFIG_NET_L2_ETHERNET) &&
6212 (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET))) {
6213 static int count;
6214
6215 snprintk(name, sizeof(name), "eth%d", count++);
6216 } else if (IS_ENABLED(CONFIG_NET_L2_IEEE802154) &&
6217 (net_if_l2(iface) == &NET_L2_GET_NAME(IEEE802154))) {
6218 static int count;
6219
6220 snprintk(name, sizeof(name), "ieee%d", count++);
6221 } else if (IS_ENABLED(CONFIG_NET_L2_DUMMY) &&
6222 (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY))) {
6223 static int count;
6224
6225 snprintk(name, sizeof(name), "dummy%d", count++);
6226 } else if (IS_ENABLED(CONFIG_NET_L2_CANBUS_RAW) &&
6227 (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW))) {
6228 static int count;
6229
6230 snprintk(name, sizeof(name), "can%d", count++);
6231 } else if (IS_ENABLED(CONFIG_NET_L2_PPP) &&
6232 (net_if_l2(iface) == &NET_L2_GET_NAME(PPP))) {
6233 static int count;
6234
6235 snprintk(name, sizeof(name) - 1, "ppp%d", count++);
6236 } else if (IS_ENABLED(CONFIG_NET_L2_OPENTHREAD) &&
6237 (net_if_l2(iface) == &NET_L2_GET_NAME(OPENTHREAD))) {
6238 static int count;
6239
6240 snprintk(name, sizeof(name), "thread%d", count++);
6241 } else {
6242 static int count;
6243
6244 snprintk(name, sizeof(name), "net%d", count++);
6245 }
6246
6247 ret = net_if_set_name(iface, name);
6248 if (ret < 0) {
6249 NET_WARN("Cannot set default name for interface %d (%p) (%d)",
6250 net_if_get_by_iface(iface), iface, ret);
6251 }
6252 }
6253 #endif /* CONFIG_NET_INTERFACE_NAME */
6254
net_if_init(void)6255 void net_if_init(void)
6256 {
6257 int if_count = 0;
6258
6259 NET_DBG("");
6260
6261 k_mutex_lock(&lock, K_FOREVER);
6262
6263 net_tc_tx_init();
6264
6265 STRUCT_SECTION_FOREACH(net_if, iface) {
6266 #if defined(CONFIG_NET_INTERFACE_NAME)
6267 memset(net_if_get_config(iface)->name, 0,
6268 sizeof(iface->config.name));
6269 #endif
6270
6271 init_iface(iface);
6272
6273 #if defined(CONFIG_NET_INTERFACE_NAME)
6274 /* If the driver did not set the name, then set
6275 * a default name for the network interface.
6276 */
6277 if (net_if_get_config(iface)->name[0] == '\0') {
6278 set_default_name(iface);
6279 }
6280 #endif
6281
6282 net_stats_prometheus_init(iface);
6283
6284 if_count++;
6285 }
6286
6287 if (if_count == 0) {
6288 NET_ERR("There is no network interface to work with!");
6289 goto out;
6290 }
6291
6292 #if defined(CONFIG_ASSERT)
6293 /* Do extra check that verifies that interface count is properly
6294 * done.
6295 */
6296 int count_if;
6297
6298 NET_IFACE_COUNT(&count_if);
6299 NET_ASSERT(count_if == if_count);
6300 #endif
6301
6302 iface_ipv6_init(if_count);
6303 iface_ipv4_init(if_count);
6304 iface_router_init();
6305
6306 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
6307 k_thread_create(&tx_thread_ts, tx_ts_stack,
6308 K_KERNEL_STACK_SIZEOF(tx_ts_stack),
6309 net_tx_ts_thread,
6310 NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT);
6311 k_thread_name_set(&tx_thread_ts, "tx_tstamp");
6312 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
6313
6314 out:
6315 k_mutex_unlock(&lock);
6316 }
6317
net_if_post_init(void)6318 void net_if_post_init(void)
6319 {
6320 NET_DBG("");
6321
6322 /* After TX is running, attempt to bring the interface up */
6323 STRUCT_SECTION_FOREACH(net_if, iface) {
6324 if (!net_if_flag_is_set(iface, NET_IF_NO_AUTO_START)) {
6325 net_if_up(iface);
6326 }
6327 }
6328 }
6329