1 /*
2  * Copyright (c) 2016 Intel Corporation.
3  * Copyright (c) 2023 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
10 
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/linker/sections.h>
14 #include <zephyr/random/random.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <zephyr/net/igmp.h>
19 #include <zephyr/net/ipv4_autoconf.h>
20 #include <zephyr/net/mld.h>
21 #include <zephyr/net/net_core.h>
22 #include <zephyr/net/net_event.h>
23 #include <zephyr/net/net_pkt.h>
24 #include <zephyr/net/net_if.h>
25 #include <zephyr/net/net_mgmt.h>
26 #include <zephyr/net/ethernet.h>
27 #ifdef CONFIG_WIFI_NM
28 #include <zephyr/net/wifi_nm.h>
29 #endif
30 #include <zephyr/net/offloaded_netdev.h>
31 #include <zephyr/net/virtual.h>
32 #include <zephyr/net/socket.h>
33 #include <zephyr/sys/iterable_sections.h>
34 
35 #include "net_private.h"
36 #include "ipv4.h"
37 #include "ipv6.h"
38 
39 #include "net_stats.h"
40 
41 #define REACHABLE_TIME (MSEC_PER_SEC * 30) /* in ms */
42 /*
43  * split the min/max random reachable factors into numerator/denominator
44  * so that integer-based math works better
45  */
46 #define MIN_RANDOM_NUMER (1)
47 #define MIN_RANDOM_DENOM (2)
48 #define MAX_RANDOM_NUMER (3)
49 #define MAX_RANDOM_DENOM (2)
50 
51 static K_MUTEX_DEFINE(lock);
52 
53 /* net_if dedicated section limiters */
54 extern struct net_if _net_if_list_start[];
55 extern struct net_if _net_if_list_end[];
56 
57 static struct net_if *default_iface;
58 
59 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
60 static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
61 static struct k_work_delayable router_timer;
62 static sys_slist_t active_router_timers;
63 #endif
64 
65 #if defined(CONFIG_NET_NATIVE_IPV6)
66 /* Timer that triggers network address renewal */
67 static struct k_work_delayable address_lifetime_timer;
68 
69 /* Track currently active address lifetime timers */
70 static sys_slist_t active_address_lifetime_timers;
71 
72 /* Timer that triggers IPv6 prefix lifetime */
73 static struct k_work_delayable prefix_lifetime_timer;
74 
75 /* Track currently active IPv6 prefix lifetime timers */
76 static sys_slist_t active_prefix_lifetime_timers;
77 
78 #if defined(CONFIG_NET_IPV6_DAD)
79 /** Duplicate address detection (DAD) timer */
80 static struct k_work_delayable dad_timer;
81 static sys_slist_t active_dad_timers;
82 #endif
83 
84 #if defined(CONFIG_NET_IPV6_ND)
85 static struct k_work_delayable rs_timer;
86 static sys_slist_t active_rs_timers;
87 #endif
88 #endif /* CONFIG_NET_NATIVE_IPV6 */
89 
90 #if defined(CONFIG_NET_IPV6)
91 static struct {
92 	struct net_if_ipv6 ipv6;
93 	struct net_if *iface;
94 } ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
95 #endif /* CONFIG_NET_NATIVE_IPV6 */
96 
97 #if defined(CONFIG_NET_IPV4)
98 static struct {
99 	struct net_if_ipv4 ipv4;
100 	struct net_if *iface;
101 } ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
102 #endif /* CONFIG_NET_NATIVE_IPV4 */
103 
104 /* We keep track of the link callbacks in this list.
105  */
106 static sys_slist_t link_callbacks;
107 
108 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
109 /* Multicast join/leave tracking.
110  */
111 static sys_slist_t mcast_monitor_callbacks;
112 #endif
113 
114 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
115 #if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
116 #define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
117 #endif
118 
119 K_KERNEL_STACK_DEFINE(tx_ts_stack, CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
120 K_FIFO_DEFINE(tx_ts_queue);
121 
122 static struct k_thread tx_thread_ts;
123 
124 /* We keep track of the timestamp callbacks in this list.
125  */
126 static sys_slist_t timestamp_callbacks;
127 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
128 
129 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
130 #define debug_check_packet(pkt)						\
131 	do {								\
132 		NET_DBG("Processing (pkt %p, prio %d) network packet "	\
133 			"iface %d (%p)",				\
134 			pkt, net_pkt_priority(pkt),			\
135 			net_if_get_by_iface(net_pkt_iface(pkt)),	\
136 			net_pkt_iface(pkt));				\
137 									\
138 		NET_ASSERT(pkt->frags);					\
139 	} while (false)
140 #else
141 #define debug_check_packet(...)
142 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
143 
z_impl_net_if_get_by_index(int index)144 struct net_if *z_impl_net_if_get_by_index(int index)
145 {
146 	if (index <= 0) {
147 		return NULL;
148 	}
149 
150 	if (&_net_if_list_start[index - 1] >= _net_if_list_end) {
151 		NET_DBG("Index %d is too large", index);
152 		return NULL;
153 	}
154 
155 	return &_net_if_list_start[index - 1];
156 }
157 
158 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_get_by_index(int index)159 struct net_if *z_vrfy_net_if_get_by_index(int index)
160 {
161 	struct net_if *iface;
162 
163 	iface = net_if_get_by_index(index);
164 	if (!iface) {
165 		return NULL;
166 	}
167 
168 	if (!k_object_is_valid(iface, K_OBJ_NET_IF)) {
169 		return NULL;
170 	}
171 
172 	return iface;
173 }
174 
175 #include <zephyr/syscalls/net_if_get_by_index_mrsh.c>
176 #endif
177 
178 #if defined(CONFIG_NET_NATIVE)
net_context_send_cb(struct net_context * context,int status)179 static inline void net_context_send_cb(struct net_context *context,
180 				       int status)
181 {
182 	if (!context) {
183 		return;
184 	}
185 
186 	if (context->send_cb) {
187 		context->send_cb(context, status, context->user_data);
188 	}
189 
190 	if (IS_ENABLED(CONFIG_NET_UDP) &&
191 	    net_context_get_proto(context) == IPPROTO_UDP) {
192 		net_stats_update_udp_sent(net_context_get_iface(context));
193 	} else if (IS_ENABLED(CONFIG_NET_TCP) &&
194 		   net_context_get_proto(context) == IPPROTO_TCP) {
195 		net_stats_update_tcp_seg_sent(net_context_get_iface(context));
196 	}
197 }
198 
update_txtime_stats_detail(struct net_pkt * pkt,uint32_t start_time,uint32_t stop_time)199 static void update_txtime_stats_detail(struct net_pkt *pkt,
200 				       uint32_t start_time, uint32_t stop_time)
201 {
202 	uint32_t val, prev = start_time;
203 	int i;
204 
205 	for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
206 		if (!net_pkt_stats_tick(pkt)[i]) {
207 			break;
208 		}
209 
210 		val = net_pkt_stats_tick(pkt)[i] - prev;
211 		prev = net_pkt_stats_tick(pkt)[i];
212 		net_pkt_stats_tick(pkt)[i] = val;
213 	}
214 }
215 
net_if_tx(struct net_if * iface,struct net_pkt * pkt)216 static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
217 {
218 	struct net_linkaddr ll_dst = {
219 		.addr = NULL
220 	};
221 	struct net_linkaddr_storage ll_dst_storage;
222 	struct net_context *context;
223 	uint32_t create_time;
224 	int status;
225 
226 	/* We collect send statistics for each socket priority if enabled */
227 	uint8_t pkt_priority;
228 
229 	if (!pkt) {
230 		return false;
231 	}
232 
233 	create_time = net_pkt_create_time(pkt);
234 
235 	debug_check_packet(pkt);
236 
237 	/* If there're any link callbacks, with such a callback receiving
238 	 * a destination address, copy that address out of packet, just in
239 	 * case packet is freed before callback is called.
240 	 */
241 	if (!sys_slist_is_empty(&link_callbacks)) {
242 		if (net_linkaddr_set(&ll_dst_storage,
243 				     net_pkt_lladdr_dst(pkt)->addr,
244 				     net_pkt_lladdr_dst(pkt)->len) == 0) {
245 			ll_dst.addr = ll_dst_storage.addr;
246 			ll_dst.len = ll_dst_storage.len;
247 			ll_dst.type = net_pkt_lladdr_dst(pkt)->type;
248 		}
249 	}
250 
251 	context = net_pkt_context(pkt);
252 
253 	if (net_if_flag_is_set(iface, NET_IF_LOWER_UP)) {
254 		if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) ||
255 		    IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
256 			pkt_priority = net_pkt_priority(pkt);
257 
258 			if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
259 				/* Make sure the statistics information is not
260 				 * lost by keeping the net_pkt over L2 send.
261 				 */
262 				net_pkt_ref(pkt);
263 			}
264 		}
265 
266 		net_if_tx_lock(iface);
267 		status = net_if_l2(iface)->send(iface, pkt);
268 		net_if_tx_unlock(iface);
269 
270 		if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) ||
271 		    IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
272 			uint32_t end_tick = k_cycle_get_32();
273 
274 			net_pkt_set_tx_stats_tick(pkt, end_tick);
275 
276 			net_stats_update_tc_tx_time(iface,
277 						    pkt_priority,
278 						    create_time,
279 						    end_tick);
280 
281 			SYS_PORT_TRACING_FUNC(net, tx_time, pkt, end_tick);
282 
283 			if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
284 				update_txtime_stats_detail(
285 					pkt,
286 					create_time,
287 					end_tick);
288 
289 				net_stats_update_tc_tx_time_detail(
290 					iface, pkt_priority,
291 					net_pkt_stats_tick(pkt));
292 
293 				/* For TCP connections, we might keep the pkt
294 				 * longer so that we can resend it if needed.
295 				 * Because of that we need to clear the
296 				 * statistics here.
297 				 */
298 				net_pkt_stats_tick_reset(pkt);
299 
300 				net_pkt_unref(pkt);
301 			}
302 		}
303 
304 	} else {
305 		/* Drop packet if interface is not up */
306 		NET_WARN("iface %p is down", iface);
307 		status = -ENETDOWN;
308 	}
309 
310 	if (status < 0) {
311 		net_pkt_unref(pkt);
312 	} else {
313 		net_stats_update_bytes_sent(iface, status);
314 	}
315 
316 	if (context) {
317 		NET_DBG("Calling context send cb %p status %d",
318 			context, status);
319 
320 		net_context_send_cb(context, status);
321 	}
322 
323 	if (ll_dst.addr) {
324 		net_if_call_link_cb(iface, &ll_dst, status);
325 	}
326 
327 	return true;
328 }
329 
net_process_tx_packet(struct net_pkt * pkt)330 void net_process_tx_packet(struct net_pkt *pkt)
331 {
332 	struct net_if *iface;
333 
334 	net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
335 
336 	iface = net_pkt_iface(pkt);
337 
338 	net_if_tx(iface, pkt);
339 
340 #if defined(CONFIG_NET_POWER_MANAGEMENT)
341 	iface->tx_pending--;
342 #endif
343 }
344 
net_if_queue_tx(struct net_if * iface,struct net_pkt * pkt)345 void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
346 {
347 	if (!net_pkt_filter_send_ok(pkt)) {
348 		/* silently drop the packet */
349 		net_pkt_unref(pkt);
350 		return;
351 	}
352 
353 	uint8_t prio = net_pkt_priority(pkt);
354 	uint8_t tc = net_tx_priority2tc(prio);
355 
356 	net_stats_update_tc_sent_pkt(iface, tc);
357 	net_stats_update_tc_sent_bytes(iface, tc, net_pkt_get_len(pkt));
358 	net_stats_update_tc_sent_priority(iface, tc, prio);
359 
360 	/* For highest priority packet, skip the TX queue and push directly to
361 	 * the driver. Also if there are no TX queue/thread, push the packet
362 	 * directly to the driver.
363 	 */
364 	if ((IS_ENABLED(CONFIG_NET_TC_SKIP_FOR_HIGH_PRIO) &&
365 	     prio >= NET_PRIORITY_CA) || NET_TC_TX_COUNT == 0) {
366 		net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
367 
368 		net_if_tx(net_pkt_iface(pkt), pkt);
369 		return;
370 	}
371 
372 #if NET_TC_TX_COUNT > 1
373 	NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
374 #endif
375 
376 #if defined(CONFIG_NET_POWER_MANAGEMENT)
377 	iface->tx_pending++;
378 #endif
379 
380 	if (!net_tc_submit_to_tx_queue(tc, pkt)) {
381 #if defined(CONFIG_NET_POWER_MANAGEMENT)
382 		iface->tx_pending--
383 #endif
384 			;
385 	}
386 }
387 #endif /* CONFIG_NET_NATIVE */
388 
net_if_stats_reset(struct net_if * iface)389 void net_if_stats_reset(struct net_if *iface)
390 {
391 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
392 	STRUCT_SECTION_FOREACH(net_if, tmp) {
393 		if (iface == tmp) {
394 			net_if_lock(iface);
395 			memset(&iface->stats, 0, sizeof(iface->stats));
396 			net_if_unlock(iface);
397 			return;
398 		}
399 	}
400 #else
401 	ARG_UNUSED(iface);
402 #endif
403 }
404 
net_if_stats_reset_all(void)405 void net_if_stats_reset_all(void)
406 {
407 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
408 	STRUCT_SECTION_FOREACH(net_if, iface) {
409 		net_if_lock(iface);
410 		memset(&iface->stats, 0, sizeof(iface->stats));
411 		net_if_unlock(iface);
412 	}
413 #endif
414 }
415 
init_iface(struct net_if * iface)416 static inline void init_iface(struct net_if *iface)
417 {
418 	const struct net_if_api *api = net_if_get_device(iface)->api;
419 
420 	if (!api || !api->init) {
421 		NET_ERR("Iface %p driver API init NULL", iface);
422 		return;
423 	}
424 
425 	/* By default IPv4 and IPv6 are enabled for a given network interface.
426 	 * These can be turned off later if needed.
427 	 */
428 #if defined(CONFIG_NET_NATIVE_IPV4)
429 	net_if_flag_set(iface, NET_IF_IPV4);
430 #endif
431 #if defined(CONFIG_NET_NATIVE_IPV6)
432 	net_if_flag_set(iface, NET_IF_IPV6);
433 #endif
434 
435 	net_virtual_init(iface);
436 
437 	NET_DBG("On iface %p", iface);
438 
439 #ifdef CONFIG_USERSPACE
440 	k_object_init(iface);
441 #endif
442 
443 	k_mutex_init(&iface->lock);
444 	k_mutex_init(&iface->tx_lock);
445 
446 	api->init(iface);
447 
448 	net_ipv6_pe_init(iface);
449 }
450 
451 #if defined(CONFIG_NET_NATIVE)
net_if_send_data(struct net_if * iface,struct net_pkt * pkt)452 enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt)
453 {
454 	const struct net_l2 *l2;
455 	struct net_context *context = net_pkt_context(pkt);
456 	struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
457 	enum net_verdict verdict = NET_OK;
458 	int status = -EIO;
459 
460 	if (!net_if_flag_is_set(iface, NET_IF_LOWER_UP) ||
461 	    net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
462 		/* Drop packet if interface is not up */
463 		NET_WARN("iface %p is down", iface);
464 		verdict = NET_DROP;
465 		status = -ENETDOWN;
466 		goto done;
467 	}
468 
469 	/* The check for CONFIG_NET_*_OFFLOAD here is an optimization;
470 	 * This is currently the only way for net_if_l2 to be NULL or missing send().
471 	 */
472 	if (IS_ENABLED(CONFIG_NET_OFFLOAD) || IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD)) {
473 		l2 = net_if_l2(iface);
474 		if (l2 == NULL) {
475 			/* Offloaded ifaces may choose not to use an L2 at all. */
476 			NET_WARN("no l2 for iface %p, discard pkt", iface);
477 			verdict = NET_DROP;
478 			goto done;
479 		} else if (l2->send == NULL) {
480 			/* Or, their chosen L2 (for example, OFFLOADED_NETDEV_L2)
481 			 * might simply not implement send.
482 			 */
483 			NET_WARN("l2 for iface %p cannot send, discard pkt", iface);
484 			verdict = NET_DROP;
485 			goto done;
486 		}
487 	}
488 
489 	/* If the ll address is not set at all, then we must set
490 	 * it here.
491 	 * Workaround Linux bug, see:
492 	 * https://github.com/zephyrproject-rtos/zephyr/issues/3111
493 	 */
494 	if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
495 	    !net_pkt_lladdr_src(pkt)->addr) {
496 		net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_if(pkt)->addr;
497 		net_pkt_lladdr_src(pkt)->len = net_pkt_lladdr_if(pkt)->len;
498 	}
499 
500 #if defined(CONFIG_NET_LOOPBACK)
501 	/* If the packet is destined back to us, then there is no need to do
502 	 * additional checks, so let the packet through.
503 	 */
504 	if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
505 		goto done;
506 	}
507 #endif
508 
509 	/* Bypass the IP stack with SOCK_RAW/IPPROTO_RAW sockets */
510 	if (IS_ENABLED(CONFIG_NET_SOCKETS_PACKET) &&
511 	    context && net_context_get_type(context) == SOCK_RAW &&
512 	    net_context_get_proto(context) == IPPROTO_RAW) {
513 		goto done;
514 	}
515 
516 	/* If the ll dst address is not set check if it is present in the nbr
517 	 * cache.
518 	 */
519 	if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
520 		verdict = net_ipv6_prepare_for_send(pkt);
521 	}
522 
523 	if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
524 		verdict = net_ipv4_prepare_for_send(pkt);
525 	}
526 
527 done:
528 	/*   NET_OK in which case packet has checked successfully. In this case
529 	 *   the net_context callback is called after successful delivery in
530 	 *   net_if_tx_thread().
531 	 *
532 	 *   NET_DROP in which case we call net_context callback that will
533 	 *   give the status to user application.
534 	 *
535 	 *   NET_CONTINUE in which case the sending of the packet is delayed.
536 	 *   This can happen for example if we need to do IPv6 ND to figure
537 	 *   out link layer address.
538 	 */
539 	if (verdict == NET_DROP) {
540 		if (context) {
541 			NET_DBG("Calling ctx send cb %p verdict %d",
542 				context, verdict);
543 			net_context_send_cb(context, status);
544 		}
545 
546 		if (dst->addr) {
547 			net_if_call_link_cb(iface, dst, status);
548 		}
549 	} else if (verdict == NET_OK) {
550 		/* Packet is ready to be sent by L2, let's queue */
551 		net_if_queue_tx(iface, pkt);
552 	}
553 
554 	return verdict;
555 }
556 #endif /* CONFIG_NET_NATIVE */
557 
net_if_set_link_addr_locked(struct net_if * iface,uint8_t * addr,uint8_t len,enum net_link_type type)558 int net_if_set_link_addr_locked(struct net_if *iface,
559 				uint8_t *addr, uint8_t len,
560 				enum net_link_type type)
561 {
562 	int ret;
563 
564 	net_if_lock(iface);
565 
566 	ret = net_if_set_link_addr_unlocked(iface, addr, len, type);
567 
568 	net_if_unlock(iface);
569 
570 	return ret;
571 }
572 
net_if_get_by_link_addr(struct net_linkaddr * ll_addr)573 struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
574 {
575 	STRUCT_SECTION_FOREACH(net_if, iface) {
576 		net_if_lock(iface);
577 		if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
578 			    ll_addr->len)) {
579 			net_if_unlock(iface);
580 			return iface;
581 		}
582 		net_if_unlock(iface);
583 	}
584 
585 	return NULL;
586 }
587 
net_if_lookup_by_dev(const struct device * dev)588 struct net_if *net_if_lookup_by_dev(const struct device *dev)
589 {
590 	STRUCT_SECTION_FOREACH(net_if, iface) {
591 		if (net_if_get_device(iface) == dev) {
592 			return iface;
593 		}
594 	}
595 
596 	return NULL;
597 }
598 
net_if_set_default(struct net_if * iface)599 void net_if_set_default(struct net_if *iface)
600 {
601 	default_iface = iface;
602 }
603 
net_if_get_default(void)604 struct net_if *net_if_get_default(void)
605 {
606 	struct net_if *iface = NULL;
607 
608 	if (&_net_if_list_start[0] == &_net_if_list_end[0]) {
609 		NET_WARN("No default interface found!");
610 		return NULL;
611 	}
612 
613 	if (default_iface != NULL) {
614 		return default_iface;
615 	}
616 
617 #if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
618 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
619 #endif
620 #if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
621 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
622 #endif
623 #if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
624 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
625 #endif
626 #if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
627 	iface = net_if_get_first_by_type(NULL);
628 #endif
629 #if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
630 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
631 #endif
632 #if defined(CONFIG_NET_DEFAULT_IF_PPP)
633 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(PPP));
634 #endif
635 #if defined(CONFIG_NET_DEFAULT_IF_UP)
636 	iface = net_if_get_first_up();
637 #endif
638 #if defined(CONFIG_NET_DEFAULT_IF_WIFI)
639 	iface = net_if_get_first_wifi();
640 #endif
641 	return iface ? iface : _net_if_list_start;
642 }
643 
net_if_get_first_by_type(const struct net_l2 * l2)644 struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
645 {
646 	STRUCT_SECTION_FOREACH(net_if, iface) {
647 		if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
648 		    !l2 && net_if_offload(iface)) {
649 			return iface;
650 		}
651 
652 		if (net_if_l2(iface) == l2) {
653 			return iface;
654 		}
655 	}
656 
657 	return NULL;
658 }
659 
net_if_get_first_up(void)660 struct net_if *net_if_get_first_up(void)
661 {
662 	STRUCT_SECTION_FOREACH(net_if, iface) {
663 		if (net_if_flag_is_set(iface, NET_IF_UP)) {
664 			return iface;
665 		}
666 	}
667 
668 	return NULL;
669 }
670 
l2_flags_get(struct net_if * iface)671 static enum net_l2_flags l2_flags_get(struct net_if *iface)
672 {
673 	enum net_l2_flags flags = 0;
674 
675 	if (net_if_l2(iface) && net_if_l2(iface)->get_flags) {
676 		flags = net_if_l2(iface)->get_flags(iface);
677 	}
678 
679 	return flags;
680 }
681 
682 #if defined(CONFIG_NET_IP)
683 /* Return how many bits are shared between two IP addresses */
get_ipaddr_diff(const uint8_t * src,const uint8_t * dst,int addr_len)684 static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_len)
685 {
686 	uint8_t j, k, xor;
687 	uint8_t len = 0U;
688 
689 	for (j = 0U; j < addr_len; j++) {
690 		if (src[j] == dst[j]) {
691 			len += 8U;
692 		} else {
693 			xor = src[j] ^ dst[j];
694 			for (k = 0U; k < 8; k++) {
695 				if (!(xor & 0x80)) {
696 					len++;
697 					xor <<= 1;
698 				} else {
699 					break;
700 				}
701 			}
702 			break;
703 		}
704 	}
705 
706 	return len;
707 }
708 #endif /* CONFIG_NET_IP */
709 
710 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
iface_router_lookup(struct net_if * iface,uint8_t family,void * addr)711 static struct net_if_router *iface_router_lookup(struct net_if *iface,
712 						 uint8_t family, void *addr)
713 {
714 	struct net_if_router *router = NULL;
715 	int i;
716 
717 	k_mutex_lock(&lock, K_FOREVER);
718 
719 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
720 		if (!routers[i].is_used ||
721 		    routers[i].address.family != family ||
722 		    routers[i].iface != iface) {
723 			continue;
724 		}
725 
726 		if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
727 		     net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
728 				       (struct in6_addr *)addr)) ||
729 		    (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
730 		     net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
731 				       (struct in_addr *)addr))) {
732 			router = &routers[i];
733 			goto out;
734 		}
735 	}
736 
737 out:
738 	k_mutex_unlock(&lock);
739 
740 	return router;
741 }
742 
iface_router_notify_deletion(struct net_if_router * router,const char * delete_reason)743 static void iface_router_notify_deletion(struct net_if_router *router,
744 					 const char *delete_reason)
745 {
746 	if (IS_ENABLED(CONFIG_NET_IPV6) &&
747 	    router->address.family == AF_INET6) {
748 		NET_DBG("IPv6 router %s %s",
749 			net_sprint_ipv6_addr(net_if_router_ipv6(router)),
750 			delete_reason);
751 
752 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
753 						router->iface,
754 						&router->address.in6_addr,
755 						sizeof(struct in6_addr));
756 	} else if (IS_ENABLED(CONFIG_NET_IPV4) &&
757 		   router->address.family == AF_INET) {
758 		NET_DBG("IPv4 router %s %s",
759 			net_sprint_ipv4_addr(net_if_router_ipv4(router)),
760 			delete_reason);
761 
762 		net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
763 						router->iface,
764 						&router->address.in_addr,
765 						sizeof(struct in6_addr));
766 	}
767 }
768 
iface_router_ends(const struct net_if_router * router,uint32_t now)769 static inline int32_t iface_router_ends(const struct net_if_router *router,
770 					uint32_t now)
771 {
772 	uint32_t ends = router->life_start;
773 
774 	ends += MSEC_PER_SEC * router->lifetime;
775 
776 	/* Signed number of ms until router lifetime ends */
777 	return (int32_t)(ends - now);
778 }
779 
iface_router_update_timer(uint32_t now)780 static void iface_router_update_timer(uint32_t now)
781 {
782 	struct net_if_router *router, *next;
783 	uint32_t new_delay = UINT32_MAX;
784 
785 	k_mutex_lock(&lock, K_FOREVER);
786 
787 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
788 					 router, next, node) {
789 		int32_t ends = iface_router_ends(router, now);
790 
791 		if (ends <= 0) {
792 			new_delay = 0;
793 			break;
794 		}
795 
796 		new_delay = MIN((uint32_t)ends, new_delay);
797 	}
798 
799 	if (new_delay == UINT32_MAX) {
800 		k_work_cancel_delayable(&router_timer);
801 	} else {
802 		k_work_reschedule(&router_timer, K_MSEC(new_delay));
803 	}
804 
805 	k_mutex_unlock(&lock);
806 }
807 
iface_router_expired(struct k_work * work)808 static void iface_router_expired(struct k_work *work)
809 {
810 	uint32_t current_time = k_uptime_get_32();
811 	struct net_if_router *router, *next;
812 	sys_snode_t *prev_node = NULL;
813 
814 	ARG_UNUSED(work);
815 
816 	k_mutex_lock(&lock, K_FOREVER);
817 
818 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
819 					  router, next, node) {
820 		int32_t ends = iface_router_ends(router, current_time);
821 
822 		if (ends > 0) {
823 			/* We have to loop on all active routers as their
824 			 * lifetime differ from each other.
825 			 */
826 			prev_node = &router->node;
827 			continue;
828 		}
829 
830 		iface_router_notify_deletion(router, "has expired");
831 		sys_slist_remove(&active_router_timers,
832 				 prev_node, &router->node);
833 		router->is_used = false;
834 	}
835 
836 	iface_router_update_timer(current_time);
837 
838 	k_mutex_unlock(&lock);
839 }
840 
iface_router_add(struct net_if * iface,uint8_t family,void * addr,bool is_default,uint16_t lifetime)841 static struct net_if_router *iface_router_add(struct net_if *iface,
842 					      uint8_t family, void *addr,
843 					      bool is_default,
844 					      uint16_t lifetime)
845 {
846 	struct net_if_router *router = NULL;
847 	int i;
848 
849 	k_mutex_lock(&lock, K_FOREVER);
850 
851 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
852 		if (routers[i].is_used) {
853 			continue;
854 		}
855 
856 		routers[i].is_used = true;
857 		routers[i].iface = iface;
858 		routers[i].address.family = family;
859 
860 		if (lifetime) {
861 			routers[i].is_default = true;
862 			routers[i].is_infinite = false;
863 			routers[i].lifetime = lifetime;
864 			routers[i].life_start = k_uptime_get_32();
865 
866 			sys_slist_append(&active_router_timers,
867 					 &routers[i].node);
868 
869 			iface_router_update_timer(routers[i].life_start);
870 		} else {
871 			routers[i].is_default = false;
872 			routers[i].is_infinite = true;
873 			routers[i].lifetime = 0;
874 		}
875 
876 		if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
877 			memcpy(net_if_router_ipv6(&routers[i]), addr,
878 			       sizeof(struct in6_addr));
879 			net_mgmt_event_notify_with_info(
880 					NET_EVENT_IPV6_ROUTER_ADD, iface,
881 					&routers[i].address.in6_addr,
882 					sizeof(struct in6_addr));
883 
884 			NET_DBG("interface %p router %s lifetime %u default %d "
885 				"added", iface,
886 				net_sprint_ipv6_addr((struct in6_addr *)addr),
887 				lifetime, routers[i].is_default);
888 		} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
889 			memcpy(net_if_router_ipv4(&routers[i]), addr,
890 			       sizeof(struct in_addr));
891 			routers[i].is_default = is_default;
892 
893 			net_mgmt_event_notify_with_info(
894 					NET_EVENT_IPV4_ROUTER_ADD, iface,
895 					&routers[i].address.in_addr,
896 					sizeof(struct in_addr));
897 
898 			NET_DBG("interface %p router %s lifetime %u default %d "
899 				"added", iface,
900 				net_sprint_ipv4_addr((struct in_addr *)addr),
901 				lifetime, is_default);
902 		}
903 
904 		router = &routers[i];
905 		goto out;
906 	}
907 
908 out:
909 	k_mutex_unlock(&lock);
910 
911 	return router;
912 }
913 
iface_router_rm(struct net_if_router * router)914 static bool iface_router_rm(struct net_if_router *router)
915 {
916 	bool ret = false;
917 
918 	k_mutex_lock(&lock, K_FOREVER);
919 
920 	if (!router->is_used) {
921 		goto out;
922 	}
923 
924 	iface_router_notify_deletion(router, "has been removed");
925 
926 	/* We recompute the timer if only the router was time limited */
927 	if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
928 		iface_router_update_timer(k_uptime_get_32());
929 	}
930 
931 	router->is_used = false;
932 	ret = true;
933 
934 out:
935 	k_mutex_unlock(&lock);
936 
937 	return ret;
938 }
939 
net_if_router_rm(struct net_if_router * router)940 void net_if_router_rm(struct net_if_router *router)
941 {
942 	k_mutex_lock(&lock, K_FOREVER);
943 
944 	router->is_used = false;
945 
946 	/* FIXME - remove timer */
947 
948 	k_mutex_unlock(&lock);
949 }
950 
iface_router_find_default(struct net_if * iface,uint8_t family,void * addr)951 static struct net_if_router *iface_router_find_default(struct net_if *iface,
952 						       uint8_t family, void *addr)
953 {
954 	struct net_if_router *router = NULL;
955 	int i;
956 
957 	/* Todo: addr will need to be handled */
958 	ARG_UNUSED(addr);
959 
960 	k_mutex_lock(&lock, K_FOREVER);
961 
962 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
963 		if (!routers[i].is_used ||
964 		    !routers[i].is_default ||
965 		    routers[i].address.family != family) {
966 			continue;
967 		}
968 
969 		if (iface && iface != routers[i].iface) {
970 			continue;
971 		}
972 
973 		router = &routers[i];
974 		goto out;
975 	}
976 
977 out:
978 	k_mutex_unlock(&lock);
979 
980 	return router;
981 }
982 
iface_router_init(void)983 static void iface_router_init(void)
984 {
985 	k_work_init_delayable(&router_timer, iface_router_expired);
986 	sys_slist_init(&active_router_timers);
987 }
988 #else
989 #define iface_router_init(...)
990 #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */
991 
992 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
net_if_mcast_mon_register(struct net_if_mcast_monitor * mon,struct net_if * iface,net_if_mcast_callback_t cb)993 void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
994 			       struct net_if *iface,
995 			       net_if_mcast_callback_t cb)
996 {
997 	k_mutex_lock(&lock, K_FOREVER);
998 
999 	sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
1000 	sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
1001 
1002 	mon->iface = iface;
1003 	mon->cb = cb;
1004 
1005 	k_mutex_unlock(&lock);
1006 }
1007 
net_if_mcast_mon_unregister(struct net_if_mcast_monitor * mon)1008 void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
1009 {
1010 	k_mutex_lock(&lock, K_FOREVER);
1011 
1012 	sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
1013 
1014 	k_mutex_unlock(&lock);
1015 }
1016 
net_if_mcast_monitor(struct net_if * iface,const struct net_addr * addr,bool is_joined)1017 void net_if_mcast_monitor(struct net_if *iface,
1018 			  const struct net_addr *addr,
1019 			  bool is_joined)
1020 {
1021 	struct net_if_mcast_monitor *mon, *tmp;
1022 
1023 	k_mutex_lock(&lock, K_FOREVER);
1024 
1025 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
1026 					  mon, tmp, node) {
1027 		if (iface == mon->iface || mon->iface == NULL) {
1028 			mon->cb(iface, addr, is_joined);
1029 		}
1030 	}
1031 
1032 	k_mutex_unlock(&lock);
1033 }
1034 #else
1035 #define net_if_mcast_mon_register(...)
1036 #define net_if_mcast_mon_unregister(...)
1037 #define net_if_mcast_monitor(...)
1038 #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */
1039 
1040 #if defined(CONFIG_NET_IPV6)
net_if_config_ipv6_get(struct net_if * iface,struct net_if_ipv6 ** ipv6)1041 int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
1042 {
1043 	int ret = 0;
1044 	int i;
1045 
1046 	net_if_lock(iface);
1047 
1048 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1049 		ret = -ENOTSUP;
1050 		goto out;
1051 	}
1052 
1053 	if (iface->config.ip.ipv6) {
1054 		if (ipv6) {
1055 			*ipv6 = iface->config.ip.ipv6;
1056 		}
1057 
1058 		goto out;
1059 	}
1060 
1061 	k_mutex_lock(&lock, K_FOREVER);
1062 
1063 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1064 		if (ipv6_addresses[i].iface) {
1065 			continue;
1066 		}
1067 
1068 		iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
1069 		ipv6_addresses[i].iface = iface;
1070 
1071 		if (ipv6) {
1072 			*ipv6 = &ipv6_addresses[i].ipv6;
1073 		}
1074 
1075 		k_mutex_unlock(&lock);
1076 		goto out;
1077 	}
1078 
1079 	k_mutex_unlock(&lock);
1080 
1081 	ret = -ESRCH;
1082 out:
1083 	net_if_unlock(iface);
1084 
1085 	return ret;
1086 }
1087 
net_if_config_ipv6_put(struct net_if * iface)1088 int net_if_config_ipv6_put(struct net_if *iface)
1089 {
1090 	int ret = 0;
1091 	int i;
1092 
1093 	net_if_lock(iface);
1094 
1095 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1096 		ret = -ENOTSUP;
1097 		goto out;
1098 	}
1099 
1100 	if (!iface->config.ip.ipv6) {
1101 		ret = -EALREADY;
1102 		goto out;
1103 	}
1104 
1105 	k_mutex_lock(&lock, K_FOREVER);
1106 
1107 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1108 		if (ipv6_addresses[i].iface != iface) {
1109 			continue;
1110 		}
1111 
1112 		iface->config.ip.ipv6 = NULL;
1113 		ipv6_addresses[i].iface = NULL;
1114 
1115 		k_mutex_unlock(&lock);
1116 		goto out;
1117 	}
1118 
1119 	k_mutex_unlock(&lock);
1120 
1121 	ret = -ESRCH;
1122 out:
1123 	net_if_unlock(iface);
1124 
1125 	return ret;
1126 }
1127 
1128 #if defined(CONFIG_NET_NATIVE_IPV6)
1129 #if defined(CONFIG_NET_IPV6_MLD)
join_mcast_allnodes(struct net_if * iface)1130 static void join_mcast_allnodes(struct net_if *iface)
1131 {
1132 	struct in6_addr addr;
1133 	int ret;
1134 
1135 	if (iface->config.ip.ipv6 == NULL) {
1136 		return;
1137 	}
1138 
1139 	net_ipv6_addr_create_ll_allnodes_mcast(&addr);
1140 
1141 	ret = net_ipv6_mld_join(iface, &addr);
1142 	if (ret < 0 && ret != -EALREADY && ret != -ENETDOWN) {
1143 		NET_ERR("Cannot join all nodes address %s for %d (%d)",
1144 			net_sprint_ipv6_addr(&addr),
1145 			net_if_get_by_iface(iface), ret);
1146 	}
1147 }
1148 
join_mcast_solicit_node(struct net_if * iface,struct in6_addr * my_addr)1149 static void join_mcast_solicit_node(struct net_if *iface,
1150 				    struct in6_addr *my_addr)
1151 {
1152 	struct in6_addr addr;
1153 	int ret;
1154 
1155 	if (iface->config.ip.ipv6 == NULL) {
1156 		return;
1157 	}
1158 
1159 	/* Join to needed multicast groups, RFC 4291 ch 2.8 */
1160 	net_ipv6_addr_create_solicited_node(my_addr, &addr);
1161 
1162 	ret = net_ipv6_mld_join(iface, &addr);
1163 	if (ret < 0) {
1164 		if (ret != -EALREADY && ret != -ENETDOWN) {
1165 			NET_ERR("Cannot join solicit node address %s for %d (%d)",
1166 				net_sprint_ipv6_addr(&addr),
1167 				net_if_get_by_iface(iface), ret);
1168 		}
1169 	} else {
1170 		NET_DBG("Join solicit node address %s (ifindex %d)",
1171 			net_sprint_ipv6_addr(&addr),
1172 			net_if_get_by_iface(iface));
1173 	}
1174 }
1175 
leave_mcast_all(struct net_if * iface)1176 static void leave_mcast_all(struct net_if *iface)
1177 {
1178 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1179 	int i;
1180 
1181 	if (!ipv6) {
1182 		return;
1183 	}
1184 
1185 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1186 		if (!ipv6->mcast[i].is_used ||
1187 		    !ipv6->mcast[i].is_joined) {
1188 			continue;
1189 		}
1190 
1191 		net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
1192 	}
1193 }
1194 
join_mcast_nodes(struct net_if * iface,struct in6_addr * addr)1195 static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
1196 {
1197 	enum net_l2_flags flags = 0;
1198 
1199 	if (iface->config.ip.ipv6 == NULL) {
1200 		return;
1201 	}
1202 
1203 	flags = l2_flags_get(iface);
1204 	if (flags & NET_L2_MULTICAST) {
1205 		join_mcast_allnodes(iface);
1206 
1207 		if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
1208 			join_mcast_solicit_node(iface, addr);
1209 		}
1210 	}
1211 }
1212 #else
1213 #define join_mcast_allnodes(...)
1214 #define join_mcast_solicit_node(...)
1215 #define leave_mcast_all(...)
1216 #define join_mcast_nodes(...)
1217 #endif /* CONFIG_NET_IPV6_MLD */
1218 
1219 #if defined(CONFIG_NET_IPV6_DAD)
1220 #define DAD_TIMEOUT 100U /* ms */
1221 
dad_timeout(struct k_work * work)1222 static void dad_timeout(struct k_work *work)
1223 {
1224 	uint32_t current_time = k_uptime_get_32();
1225 	struct net_if_addr *ifaddr, *next;
1226 	int32_t delay = -1;
1227 	sys_slist_t expired_list;
1228 
1229 	ARG_UNUSED(work);
1230 
1231 	sys_slist_init(&expired_list);
1232 
1233 	k_mutex_lock(&lock, K_FOREVER);
1234 
1235 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
1236 					  ifaddr, next, dad_node) {
1237 		/* DAD entries are ordered by construction.  Stop when
1238 		 * we find one that hasn't expired.
1239 		 */
1240 		delay = (int32_t)(ifaddr->dad_start +
1241 				  DAD_TIMEOUT - current_time);
1242 		if (delay > 0) {
1243 			break;
1244 		}
1245 
1246 		/* Removing the ifaddr from active_dad_timers list */
1247 		sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
1248 		sys_slist_append(&expired_list, &ifaddr->dad_node);
1249 
1250 		ifaddr = NULL;
1251 	}
1252 
1253 	if ((ifaddr != NULL) && (delay > 0)) {
1254 		k_work_reschedule(&dad_timer, K_MSEC((uint32_t)delay));
1255 	}
1256 
1257 	k_mutex_unlock(&lock);
1258 
1259 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ifaddr, dad_node) {
1260 		struct net_if *iface;
1261 
1262 		NET_DBG("DAD succeeded for %s at interface %d",
1263 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1264 			ifaddr->ifindex);
1265 
1266 		ifaddr->addr_state = NET_ADDR_PREFERRED;
1267 		iface = net_if_get_by_index(ifaddr->ifindex);
1268 
1269 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_SUCCEED,
1270 						iface,
1271 						&ifaddr->address.in6_addr,
1272 						sizeof(struct in6_addr));
1273 
1274 		/* The address gets added to neighbor cache which is not
1275 		 * needed in this case as the address is our own one.
1276 		 */
1277 		net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
1278 	}
1279 }
1280 
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1281 void net_if_ipv6_start_dad(struct net_if *iface,
1282 			   struct net_if_addr *ifaddr)
1283 {
1284 	ifaddr->addr_state = NET_ADDR_TENTATIVE;
1285 
1286 	if (net_if_is_up(iface)) {
1287 		NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
1288 			iface,
1289 			net_sprint_ll_addr(
1290 					   net_if_get_link_addr(iface)->addr,
1291 					   net_if_get_link_addr(iface)->len),
1292 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1293 
1294 		ifaddr->dad_count = 1U;
1295 
1296 		if (!net_ipv6_start_dad(iface, ifaddr)) {
1297 			ifaddr->dad_start = k_uptime_get_32();
1298 			ifaddr->ifindex = net_if_get_by_iface(iface);
1299 
1300 			k_mutex_lock(&lock, K_FOREVER);
1301 			sys_slist_find_and_remove(&active_dad_timers,
1302 						  &ifaddr->dad_node);
1303 			sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
1304 			k_mutex_unlock(&lock);
1305 
1306 			/* FUTURE: use schedule, not reschedule. */
1307 			if (!k_work_delayable_remaining_get(&dad_timer)) {
1308 				k_work_reschedule(&dad_timer,
1309 						  K_MSEC(DAD_TIMEOUT));
1310 			}
1311 		}
1312 	} else {
1313 		NET_DBG("Interface %p is down, starting DAD for %s later.",
1314 			iface,
1315 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1316 	}
1317 }
1318 
net_if_start_dad(struct net_if * iface)1319 void net_if_start_dad(struct net_if *iface)
1320 {
1321 	struct net_if_addr *ifaddr;
1322 	struct net_if_ipv6 *ipv6;
1323 	struct in6_addr addr = { };
1324 	int ret;
1325 
1326 	net_if_lock(iface);
1327 
1328 	NET_DBG("Starting DAD for iface %p", iface);
1329 
1330 	ret = net_if_config_ipv6_get(iface, &ipv6);
1331 	if (ret < 0) {
1332 		if (ret != -ENOTSUP) {
1333 			NET_WARN("Cannot do DAD IPv6 config is not valid.");
1334 		}
1335 
1336 		goto out;
1337 	}
1338 
1339 	if (!ipv6) {
1340 		goto out;
1341 	}
1342 
1343 	ret = net_ipv6_addr_generate_iid(iface, NULL,
1344 					 COND_CODE_1(CONFIG_NET_IPV6_IID_STABLE,
1345 						     ((uint8_t *)&ipv6->network_counter),
1346 						     (NULL)),
1347 					 COND_CODE_1(CONFIG_NET_IPV6_IID_STABLE,
1348 						     (sizeof(ipv6->network_counter)),
1349 						     (0U)),
1350 					 COND_CODE_1(CONFIG_NET_IPV6_IID_STABLE,
1351 						     (ipv6->iid ? ipv6->iid->dad_count : 0U),
1352 						     (0U)),
1353 					 &addr,
1354 					 net_if_get_link_addr(iface));
1355 	if (ret < 0) {
1356 		NET_WARN("IPv6 IID generation issue (%d)", ret);
1357 		goto out;
1358 	}
1359 
1360 	ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
1361 	if (!ifaddr) {
1362 		NET_ERR("Cannot add %s address to interface %p, DAD fails",
1363 			net_sprint_ipv6_addr(&addr), iface);
1364 		goto out;
1365 	}
1366 
1367 	IF_ENABLED(CONFIG_NET_IPV6_IID_STABLE, (ipv6->iid = ifaddr));
1368 
1369 	/* Start DAD for all the addresses that were added earlier when
1370 	 * the interface was down.
1371 	 */
1372 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1373 		if (!ipv6->unicast[i].is_used ||
1374 		    ipv6->unicast[i].address.family != AF_INET6 ||
1375 		    &ipv6->unicast[i] == ifaddr ||
1376 		    net_ipv6_is_addr_loopback(
1377 			    &ipv6->unicast[i].address.in6_addr)) {
1378 			continue;
1379 		}
1380 
1381 		net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1382 	}
1383 
1384 out:
1385 	net_if_unlock(iface);
1386 }
1387 
net_if_ipv6_dad_failed(struct net_if * iface,const struct in6_addr * addr)1388 void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
1389 {
1390 	struct net_if_addr *ifaddr;
1391 	uint32_t timeout, preferred_lifetime;
1392 
1393 	net_if_lock(iface);
1394 
1395 	ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
1396 	if (!ifaddr) {
1397 		NET_ERR("Cannot find %s address in interface %p",
1398 			net_sprint_ipv6_addr(addr), iface);
1399 		goto out;
1400 	}
1401 
1402 	if (IS_ENABLED(CONFIG_NET_IPV6_IID_STABLE) || IS_ENABLED(CONFIG_NET_IPV6_PE)) {
1403 		ifaddr->dad_count++;
1404 	}
1405 
1406 	if (IS_ENABLED(CONFIG_NET_IPV6_PE)) {
1407 		timeout = COND_CODE_1(CONFIG_NET_IPV6_PE,
1408 				      (ifaddr->addr_timeout), (0));
1409 		preferred_lifetime = COND_CODE_1(CONFIG_NET_IPV6_PE,
1410 						 (ifaddr->addr_preferred_lifetime), (0U));
1411 
1412 		if (!net_ipv6_pe_check_dad(ifaddr->dad_count)) {
1413 			NET_ERR("Cannot generate PE address for interface %p",
1414 				iface);
1415 			iface->pe_enabled = false;
1416 			net_mgmt_event_notify(NET_EVENT_IPV6_PE_DISABLED, iface);
1417 		}
1418 	}
1419 
1420 	net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
1421 					&ifaddr->address.in6_addr,
1422 					sizeof(struct in6_addr));
1423 
1424 	/* The old address needs to be removed from the interface before we can
1425 	 * start new DAD for the new PE address as the amount of address slots
1426 	 * is limited.
1427 	 */
1428 	net_if_ipv6_addr_rm(iface, addr);
1429 
1430 	if (IS_ENABLED(CONFIG_NET_IPV6_PE) && iface->pe_enabled) {
1431 		net_ipv6_pe_start(iface, addr, timeout, preferred_lifetime);
1432 	}
1433 
1434 out:
1435 	net_if_unlock(iface);
1436 }
1437 
iface_ipv6_dad_init(void)1438 static inline void iface_ipv6_dad_init(void)
1439 {
1440 	k_work_init_delayable(&dad_timer, dad_timeout);
1441 	sys_slist_init(&active_dad_timers);
1442 }
1443 
1444 #else
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1445 static inline void net_if_ipv6_start_dad(struct net_if *iface,
1446 					 struct net_if_addr *ifaddr)
1447 {
1448 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1449 }
1450 
1451 #define iface_ipv6_dad_init(...)
1452 #endif /* CONFIG_NET_IPV6_DAD */
1453 
1454 #if defined(CONFIG_NET_IPV6_ND)
1455 #define RS_TIMEOUT (CONFIG_NET_IPV6_RS_TIMEOUT * MSEC_PER_SEC)
1456 #define RS_COUNT 3
1457 
rs_timeout(struct k_work * work)1458 static void rs_timeout(struct k_work *work)
1459 {
1460 	uint32_t current_time = k_uptime_get_32();
1461 	struct net_if_ipv6 *ipv6, *next;
1462 	int32_t delay = -1;
1463 	sys_slist_t expired_list;
1464 
1465 	ARG_UNUSED(work);
1466 
1467 	sys_slist_init(&expired_list);
1468 
1469 	k_mutex_lock(&lock, K_FOREVER);
1470 
1471 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
1472 					  ipv6, next, rs_node) {
1473 		/* RS entries are ordered by construction.  Stop when
1474 		 * we find one that hasn't expired.
1475 		 */
1476 		delay = (int32_t)(ipv6->rs_start + RS_TIMEOUT - current_time);
1477 		if (delay > 0) {
1478 			break;
1479 		}
1480 
1481 		/* Removing the ipv6 from active_rs_timers list */
1482 		sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
1483 		sys_slist_append(&expired_list, &ipv6->rs_node);
1484 
1485 		ipv6 = NULL;
1486 	}
1487 
1488 	if ((ipv6 != NULL) && (delay > 0)) {
1489 		k_work_reschedule(&rs_timer, K_MSEC(ipv6->rs_start +
1490 						    RS_TIMEOUT - current_time));
1491 	}
1492 
1493 	k_mutex_unlock(&lock);
1494 
1495 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ipv6, rs_node) {
1496 		struct net_if *iface = NULL;
1497 
1498 		/* Did not receive RA yet. */
1499 		ipv6->rs_count++;
1500 
1501 		STRUCT_SECTION_FOREACH(net_if, tmp) {
1502 			if (tmp->config.ip.ipv6 == ipv6) {
1503 				iface = tmp;
1504 				break;
1505 			}
1506 		}
1507 
1508 		if (iface) {
1509 			NET_DBG("RS no respond iface %p count %d",
1510 				iface, ipv6->rs_count);
1511 			if (ipv6->rs_count < RS_COUNT) {
1512 				net_if_start_rs(iface);
1513 			}
1514 		} else {
1515 			NET_DBG("Interface IPv6 config %p not found", ipv6);
1516 		}
1517 	}
1518 }
1519 
net_if_start_rs(struct net_if * iface)1520 void net_if_start_rs(struct net_if *iface)
1521 {
1522 	struct net_if_ipv6 *ipv6;
1523 
1524 	net_if_lock(iface);
1525 
1526 	if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1527 		goto out;
1528 	}
1529 
1530 	ipv6 = iface->config.ip.ipv6;
1531 	if (!ipv6) {
1532 		goto out;
1533 	}
1534 
1535 	NET_DBG("Starting ND/RS for iface %p", iface);
1536 
1537 	if (!net_ipv6_start_rs(iface)) {
1538 		ipv6->rs_start = k_uptime_get_32();
1539 
1540 		k_mutex_lock(&lock, K_FOREVER);
1541 		sys_slist_append(&active_rs_timers, &ipv6->rs_node);
1542 		k_mutex_unlock(&lock);
1543 
1544 		/* FUTURE: use schedule, not reschedule. */
1545 		if (!k_work_delayable_remaining_get(&rs_timer)) {
1546 			k_work_reschedule(&rs_timer, K_MSEC(RS_TIMEOUT));
1547 		}
1548 	}
1549 
1550 out:
1551 	net_if_unlock(iface);
1552 }
1553 
net_if_stop_rs(struct net_if * iface)1554 void net_if_stop_rs(struct net_if *iface)
1555 {
1556 	struct net_if_ipv6 *ipv6;
1557 
1558 	net_if_lock(iface);
1559 
1560 	ipv6 = iface->config.ip.ipv6;
1561 	if (!ipv6) {
1562 		goto out;
1563 	}
1564 
1565 	NET_DBG("Stopping ND/RS for iface %p", iface);
1566 
1567 	k_mutex_lock(&lock, K_FOREVER);
1568 	sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
1569 	k_mutex_unlock(&lock);
1570 
1571 out:
1572 	net_if_unlock(iface);
1573 }
1574 
iface_ipv6_nd_init(void)1575 static inline void iface_ipv6_nd_init(void)
1576 {
1577 	k_work_init_delayable(&rs_timer, rs_timeout);
1578 	sys_slist_init(&active_rs_timers);
1579 }
1580 
1581 #else
1582 #define net_if_start_rs(...)
1583 #define net_if_stop_rs(...)
1584 #define iface_ipv6_nd_init(...)
1585 #endif /* CONFIG_NET_IPV6_ND */
1586 
1587 #if defined(CONFIG_NET_IPV6_ND) && defined(CONFIG_NET_NATIVE_IPV6)
1588 
net_if_nbr_reachability_hint(struct net_if * iface,const struct in6_addr * ipv6_addr)1589 void net_if_nbr_reachability_hint(struct net_if *iface, const struct in6_addr *ipv6_addr)
1590 {
1591 	net_if_lock(iface);
1592 
1593 	if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1594 		goto out;
1595 	}
1596 
1597 	if (!iface->config.ip.ipv6) {
1598 		goto out;
1599 	}
1600 
1601 	net_ipv6_nbr_reachability_hint(iface, ipv6_addr);
1602 
1603 out:
1604 	net_if_unlock(iface);
1605 }
1606 
1607 #endif
1608 
1609 /* To be called when interface comes up so that all the non-joined multicast
1610  * groups are joined.
1611  */
rejoin_ipv6_mcast_groups(struct net_if * iface)1612 static void rejoin_ipv6_mcast_groups(struct net_if *iface)
1613 {
1614 	struct net_if_ipv6 *ipv6;
1615 
1616 	net_if_lock(iface);
1617 
1618 	if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
1619 	    net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1620 		goto out;
1621 	}
1622 
1623 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1624 		goto out;
1625 	}
1626 
1627 	/* Rejoin solicited node multicasts. */
1628 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1629 		if (!ipv6->unicast[i].is_used) {
1630 			continue;
1631 		}
1632 
1633 		join_mcast_nodes(iface, &ipv6->unicast[i].address.in6_addr);
1634 	}
1635 
1636 	/* Rejoin any mcast address present on the interface, but marked as not joined. */
1637 	ARRAY_FOR_EACH(ipv6->mcast, i) {
1638 		int ret;
1639 
1640 		if (!ipv6->mcast[i].is_used ||
1641 		    net_if_ipv6_maddr_is_joined(&ipv6->mcast[i])) {
1642 			continue;
1643 		}
1644 
1645 		ret = net_ipv6_mld_join(iface, &ipv6->mcast[i].address.in6_addr);
1646 		if (ret < 0) {
1647 			NET_ERR("Cannot join mcast address %s for %d (%d)",
1648 				net_sprint_ipv6_addr(&ipv6->mcast[i].address.in6_addr),
1649 				net_if_get_by_iface(iface), ret);
1650 		} else {
1651 			NET_DBG("Rejoined mcast address %s for %d",
1652 				net_sprint_ipv6_addr(&ipv6->mcast[i].address.in6_addr),
1653 				net_if_get_by_iface(iface));
1654 		}
1655 	}
1656 
1657 out:
1658 	net_if_unlock(iface);
1659 }
1660 
1661 /* To be called when interface comes operational down so that multicast
1662  * groups are rejoined when back up.
1663  */
clear_joined_ipv6_mcast_groups(struct net_if * iface)1664 static void clear_joined_ipv6_mcast_groups(struct net_if *iface)
1665 {
1666 	struct net_if_ipv6 *ipv6;
1667 
1668 	net_if_lock(iface);
1669 
1670 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1671 		goto out;
1672 	}
1673 
1674 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1675 		goto out;
1676 	}
1677 
1678 	ARRAY_FOR_EACH(ipv6->mcast, i) {
1679 		if (!ipv6->mcast[i].is_used) {
1680 			continue;
1681 		}
1682 
1683 		net_if_ipv6_maddr_leave(iface, &ipv6->mcast[i]);
1684 	}
1685 
1686 out:
1687 	net_if_unlock(iface);
1688 }
1689 
address_expired(struct net_if_addr * ifaddr)1690 static void address_expired(struct net_if_addr *ifaddr)
1691 {
1692 	NET_DBG("IPv6 address %s is expired",
1693 		net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1694 
1695 	sys_slist_find_and_remove(&active_address_lifetime_timers,
1696 				  &ifaddr->lifetime.node);
1697 
1698 	net_timeout_set(&ifaddr->lifetime, 0, 0);
1699 
1700 	STRUCT_SECTION_FOREACH(net_if, iface) {
1701 		ARRAY_FOR_EACH(iface->config.ip.ipv6->unicast, i) {
1702 			if (&iface->config.ip.ipv6->unicast[i] == ifaddr) {
1703 				net_if_ipv6_addr_rm(iface,
1704 					&iface->config.ip.ipv6->unicast[i].address.in6_addr);
1705 				return;
1706 			}
1707 		}
1708 	}
1709 }
1710 
address_lifetime_timeout(struct k_work * work)1711 static void address_lifetime_timeout(struct k_work *work)
1712 {
1713 	uint32_t next_update = UINT32_MAX;
1714 	uint32_t current_time = k_uptime_get_32();
1715 	struct net_if_addr *current, *next;
1716 
1717 	ARG_UNUSED(work);
1718 
1719 	k_mutex_lock(&lock, K_FOREVER);
1720 
1721 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
1722 					  current, next, lifetime.node) {
1723 		struct net_timeout *timeout = &current->lifetime;
1724 		uint32_t this_update = net_timeout_evaluate(timeout,
1725 							     current_time);
1726 
1727 		if (this_update == 0U) {
1728 			address_expired(current);
1729 			continue;
1730 		}
1731 
1732 		if (this_update < next_update) {
1733 			next_update = this_update;
1734 		}
1735 
1736 		if (current == next) {
1737 			break;
1738 		}
1739 	}
1740 
1741 	if (next_update != UINT32_MAX) {
1742 		NET_DBG("Waiting for %d ms", (int32_t)next_update);
1743 
1744 		k_work_reschedule(&address_lifetime_timer, K_MSEC(next_update));
1745 	}
1746 
1747 	k_mutex_unlock(&lock);
1748 }
1749 
1750 #if defined(CONFIG_NET_TEST)
net_address_lifetime_timeout(void)1751 void net_address_lifetime_timeout(void)
1752 {
1753 	address_lifetime_timeout(NULL);
1754 }
1755 #endif
1756 
address_start_timer(struct net_if_addr * ifaddr,uint32_t vlifetime)1757 static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
1758 {
1759 	/* Make sure that we do not insert the address twice to
1760 	 * the lifetime timer list.
1761 	 */
1762 	sys_slist_find_and_remove(&active_address_lifetime_timers,
1763 				  &ifaddr->lifetime.node);
1764 
1765 	sys_slist_append(&active_address_lifetime_timers,
1766 			 &ifaddr->lifetime.node);
1767 
1768 	net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
1769 	k_work_reschedule(&address_lifetime_timer, K_NO_WAIT);
1770 }
1771 #else /* CONFIG_NET_NATIVE_IPV6 */
1772 #define address_start_timer(...)
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1773 static inline void net_if_ipv6_start_dad(struct net_if *iface,
1774 					 struct net_if_addr *ifaddr)
1775 {
1776 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1777 }
1778 #define join_mcast_nodes(...)
1779 #endif /* CONFIG_NET_NATIVE_IPV6 */
1780 
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)1781 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
1782 					    struct net_if **ret)
1783 {
1784 	struct net_if_addr *ifaddr = NULL;
1785 
1786 	STRUCT_SECTION_FOREACH(net_if, iface) {
1787 		struct net_if_ipv6 *ipv6;
1788 
1789 		net_if_lock(iface);
1790 
1791 		ipv6 = iface->config.ip.ipv6;
1792 		if (!ipv6) {
1793 			net_if_unlock(iface);
1794 			continue;
1795 		}
1796 
1797 		ARRAY_FOR_EACH(ipv6->unicast, i) {
1798 			if (!ipv6->unicast[i].is_used ||
1799 			    ipv6->unicast[i].address.family != AF_INET6) {
1800 				continue;
1801 			}
1802 
1803 			if (net_ipv6_is_prefix(
1804 				    addr->s6_addr,
1805 				    ipv6->unicast[i].address.in6_addr.s6_addr,
1806 				    128)) {
1807 
1808 				if (ret) {
1809 					*ret = iface;
1810 				}
1811 
1812 				ifaddr = &ipv6->unicast[i];
1813 				net_if_unlock(iface);
1814 				goto out;
1815 			}
1816 		}
1817 
1818 		net_if_unlock(iface);
1819 	}
1820 
1821 out:
1822 	return ifaddr;
1823 }
1824 
net_if_ipv6_addr_lookup_by_iface(struct net_if * iface,struct in6_addr * addr)1825 struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
1826 						     struct in6_addr *addr)
1827 {
1828 	struct net_if_addr *ifaddr = NULL;
1829 	struct net_if_ipv6 *ipv6;
1830 
1831 	net_if_lock(iface);
1832 
1833 	ipv6 = iface->config.ip.ipv6;
1834 	if (!ipv6) {
1835 		goto out;
1836 	}
1837 
1838 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1839 		if (!ipv6->unicast[i].is_used ||
1840 		    ipv6->unicast[i].address.family != AF_INET6) {
1841 			continue;
1842 		}
1843 
1844 		if (net_ipv6_is_prefix(
1845 			    addr->s6_addr,
1846 			    ipv6->unicast[i].address.in6_addr.s6_addr,
1847 			    128)) {
1848 			ifaddr = &ipv6->unicast[i];
1849 			goto out;
1850 		}
1851 	}
1852 
1853 out:
1854 	net_if_unlock(iface);
1855 
1856 	return ifaddr;
1857 }
1858 
z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1859 int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
1860 {
1861 	struct net_if *iface = NULL;
1862 	struct net_if_addr *if_addr;
1863 
1864 	if_addr = net_if_ipv6_addr_lookup(addr, &iface);
1865 	if (!if_addr) {
1866 		return 0;
1867 	}
1868 
1869 	return net_if_get_by_iface(iface);
1870 }
1871 
1872 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1873 static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
1874 					  const struct in6_addr *addr)
1875 {
1876 	struct in6_addr addr_v6;
1877 
1878 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1879 
1880 	return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
1881 }
1882 #include <zephyr/syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
1883 #endif
1884 
net_if_ipv6_addr_update_lifetime(struct net_if_addr * ifaddr,uint32_t vlifetime)1885 void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
1886 				      uint32_t vlifetime)
1887 {
1888 	k_mutex_lock(&lock, K_FOREVER);
1889 
1890 	NET_DBG("Updating expire time of %s by %u secs",
1891 		net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1892 		vlifetime);
1893 
1894 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1895 
1896 	address_start_timer(ifaddr, vlifetime);
1897 
1898 	k_mutex_unlock(&lock);
1899 }
1900 
ipv6_addr_find(struct net_if * iface,struct in6_addr * addr)1901 static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
1902 					  struct in6_addr *addr)
1903 {
1904 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1905 
1906 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1907 		if (!ipv6->unicast[i].is_used) {
1908 			continue;
1909 		}
1910 
1911 		if (net_ipv6_addr_cmp(
1912 			    addr, &ipv6->unicast[i].address.in6_addr)) {
1913 
1914 			return &ipv6->unicast[i];
1915 		}
1916 	}
1917 
1918 	return NULL;
1919 }
1920 
net_if_addr_init(struct net_if_addr * ifaddr,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1921 static inline void net_if_addr_init(struct net_if_addr *ifaddr,
1922 				    struct in6_addr *addr,
1923 				    enum net_addr_type addr_type,
1924 				    uint32_t vlifetime)
1925 {
1926 	ifaddr->is_used = true;
1927 	ifaddr->is_temporary = false;
1928 	ifaddr->address.family = AF_INET6;
1929 	ifaddr->addr_type = addr_type;
1930 	ifaddr->atomic_ref = ATOMIC_INIT(1);
1931 
1932 	net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
1933 
1934 	/* FIXME - set the mcast addr for this node */
1935 
1936 	if (vlifetime) {
1937 		ifaddr->is_infinite = false;
1938 
1939 		NET_DBG("Expiring %s in %u secs",
1940 			net_sprint_ipv6_addr(addr),
1941 			vlifetime);
1942 
1943 		net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
1944 	} else {
1945 		ifaddr->is_infinite = true;
1946 	}
1947 }
1948 
net_if_ipv6_addr_add(struct net_if * iface,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1949 struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
1950 					 struct in6_addr *addr,
1951 					 enum net_addr_type addr_type,
1952 					 uint32_t vlifetime)
1953 {
1954 	struct net_if_addr *ifaddr = NULL;
1955 	struct net_if_ipv6 *ipv6;
1956 
1957 	net_if_lock(iface);
1958 
1959 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1960 		goto out;
1961 	}
1962 
1963 	ifaddr = ipv6_addr_find(iface, addr);
1964 	if (ifaddr) {
1965 		goto out;
1966 	}
1967 
1968 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1969 		if (ipv6->unicast[i].is_used) {
1970 			continue;
1971 		}
1972 
1973 		net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
1974 				 vlifetime);
1975 
1976 		NET_DBG("[%zu] interface %d (%p) address %s type %s added", i,
1977 			net_if_get_by_iface(iface), iface,
1978 			net_sprint_ipv6_addr(addr),
1979 			net_addr_type2str(addr_type));
1980 
1981 		if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
1982 		    !net_ipv6_is_addr_loopback(addr) &&
1983 		    !net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1984 			/* RFC 4862 5.4.2
1985 			 * Before sending a Neighbor Solicitation, an interface
1986 			 * MUST join the all-nodes multicast address and the
1987 			 * solicited-node multicast address of the tentative
1988 			 * address.
1989 			 */
1990 			/* The allnodes multicast group is only joined once as
1991 			 * net_ipv6_mld_join() checks if we have already
1992 			 * joined.
1993 			 */
1994 			join_mcast_nodes(iface,
1995 					 &ipv6->unicast[i].address.in6_addr);
1996 
1997 			net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1998 		} else {
1999 			/* If DAD is not done for point-to-point links, then
2000 			 * the address is usable immediately.
2001 			 */
2002 			ipv6->unicast[i].addr_state = NET_ADDR_PREFERRED;
2003 		}
2004 
2005 		net_mgmt_event_notify_with_info(
2006 			NET_EVENT_IPV6_ADDR_ADD, iface,
2007 			&ipv6->unicast[i].address.in6_addr,
2008 			sizeof(struct in6_addr));
2009 
2010 		ifaddr = &ipv6->unicast[i];
2011 		goto out;
2012 	}
2013 
2014 out:
2015 	net_if_unlock(iface);
2016 
2017 	return ifaddr;
2018 }
2019 
net_if_ipv6_addr_rm(struct net_if * iface,const struct in6_addr * addr)2020 bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
2021 {
2022 	struct net_if_ipv6 *ipv6;
2023 	bool result = true;
2024 	int ret;
2025 
2026 	if (iface == NULL || addr == NULL) {
2027 		return false;
2028 	}
2029 
2030 	net_if_lock(iface);
2031 
2032 	ipv6 = iface->config.ip.ipv6;
2033 	if (!ipv6) {
2034 		result = false;
2035 		goto out;
2036 	}
2037 
2038 	ret = net_if_addr_unref(iface, AF_INET6, addr);
2039 	if (ret > 0) {
2040 		NET_DBG("Address %s still in use (ref %d)",
2041 			net_sprint_ipv6_addr(addr), ret);
2042 		result = false;
2043 		goto out;
2044 	} else if (ret < 0) {
2045 		NET_DBG("Address %s not found (%d)",
2046 			net_sprint_ipv6_addr(addr), ret);
2047 	}
2048 
2049 out:
2050 	net_if_unlock(iface);
2051 
2052 	return result;
2053 }
2054 
z_impl_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)2055 bool z_impl_net_if_ipv6_addr_add_by_index(int index,
2056 					  struct in6_addr *addr,
2057 					  enum net_addr_type addr_type,
2058 					  uint32_t vlifetime)
2059 {
2060 	struct net_if *iface;
2061 
2062 	iface = net_if_get_by_index(index);
2063 	if (!iface) {
2064 		return false;
2065 	}
2066 
2067 	return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
2068 		true : false;
2069 }
2070 
2071 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)2072 bool z_vrfy_net_if_ipv6_addr_add_by_index(int index,
2073 					  struct in6_addr *addr,
2074 					  enum net_addr_type addr_type,
2075 					  uint32_t vlifetime)
2076 {
2077 	struct in6_addr addr_v6;
2078 	struct net_if *iface;
2079 
2080 	iface = z_vrfy_net_if_get_by_index(index);
2081 	if (!iface) {
2082 		return false;
2083 	}
2084 
2085 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
2086 
2087 	return z_impl_net_if_ipv6_addr_add_by_index(index,
2088 						    &addr_v6,
2089 						    addr_type,
2090 						    vlifetime);
2091 }
2092 
2093 #include <zephyr/syscalls/net_if_ipv6_addr_add_by_index_mrsh.c>
2094 #endif /* CONFIG_USERSPACE */
2095 
z_impl_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)2096 bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
2097 					 const struct in6_addr *addr)
2098 {
2099 	struct net_if *iface;
2100 
2101 	iface = net_if_get_by_index(index);
2102 	if (!iface) {
2103 		return false;
2104 	}
2105 
2106 	return net_if_ipv6_addr_rm(iface, addr);
2107 }
2108 
2109 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)2110 bool z_vrfy_net_if_ipv6_addr_rm_by_index(int index,
2111 					 const struct in6_addr *addr)
2112 {
2113 	struct in6_addr addr_v6;
2114 	struct net_if *iface;
2115 
2116 	iface = z_vrfy_net_if_get_by_index(index);
2117 	if (!iface) {
2118 		return false;
2119 	}
2120 
2121 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
2122 
2123 	return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
2124 }
2125 
2126 #include <zephyr/syscalls/net_if_ipv6_addr_rm_by_index_mrsh.c>
2127 #endif /* CONFIG_USERSPACE */
2128 
net_if_ipv6_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)2129 void net_if_ipv6_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
2130 			      void *user_data)
2131 {
2132 	struct net_if_ipv6 *ipv6;
2133 
2134 	if (iface == NULL) {
2135 		return;
2136 	}
2137 
2138 	net_if_lock(iface);
2139 
2140 	ipv6 = iface->config.ip.ipv6;
2141 	if (ipv6 == NULL) {
2142 		goto out;
2143 	}
2144 
2145 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2146 		struct net_if_addr *if_addr = &ipv6->unicast[i];
2147 
2148 		if (!if_addr->is_used) {
2149 			continue;
2150 		}
2151 
2152 		cb(iface, if_addr, user_data);
2153 	}
2154 
2155 out:
2156 	net_if_unlock(iface);
2157 }
2158 
net_if_ipv6_maddr_add(struct net_if * iface,const struct in6_addr * addr)2159 struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
2160 						const struct in6_addr *addr)
2161 {
2162 	struct net_if_mcast_addr *ifmaddr = NULL;
2163 	struct net_if_ipv6 *ipv6;
2164 
2165 	net_if_lock(iface);
2166 
2167 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2168 		goto out;
2169 	}
2170 
2171 	if (!net_ipv6_is_addr_mcast(addr)) {
2172 		NET_DBG("Address %s is not a multicast address.",
2173 			net_sprint_ipv6_addr(addr));
2174 		goto out;
2175 	}
2176 
2177 	if (net_if_ipv6_maddr_lookup(addr, &iface)) {
2178 		NET_WARN("Multicast address %s is already registered.",
2179 			net_sprint_ipv6_addr(addr));
2180 		goto out;
2181 	}
2182 
2183 	ARRAY_FOR_EACH(ipv6->mcast, i) {
2184 		if (ipv6->mcast[i].is_used) {
2185 			continue;
2186 		}
2187 
2188 		ipv6->mcast[i].is_used = true;
2189 		ipv6->mcast[i].address.family = AF_INET6;
2190 		memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
2191 
2192 		NET_DBG("[%zu] interface %d (%p) address %s added", i,
2193 			net_if_get_by_iface(iface), iface,
2194 			net_sprint_ipv6_addr(addr));
2195 
2196 		net_mgmt_event_notify_with_info(
2197 			NET_EVENT_IPV6_MADDR_ADD, iface,
2198 			&ipv6->mcast[i].address.in6_addr,
2199 			sizeof(struct in6_addr));
2200 
2201 		ifmaddr = &ipv6->mcast[i];
2202 		goto out;
2203 	}
2204 
2205 out:
2206 	net_if_unlock(iface);
2207 
2208 	return ifmaddr;
2209 }
2210 
net_if_ipv6_maddr_rm(struct net_if * iface,const struct in6_addr * addr)2211 bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
2212 {
2213 	bool ret = false;
2214 	struct net_if_ipv6 *ipv6;
2215 
2216 	net_if_lock(iface);
2217 
2218 	ipv6 = iface->config.ip.ipv6;
2219 	if (!ipv6) {
2220 		goto out;
2221 	}
2222 
2223 	ARRAY_FOR_EACH(ipv6->mcast, i) {
2224 		if (!ipv6->mcast[i].is_used) {
2225 			continue;
2226 		}
2227 
2228 		if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
2229 				       addr)) {
2230 			continue;
2231 		}
2232 
2233 		ipv6->mcast[i].is_used = false;
2234 
2235 		NET_DBG("[%zu] interface %d (%p) address %s removed",
2236 			i, net_if_get_by_iface(iface), iface,
2237 			net_sprint_ipv6_addr(addr));
2238 
2239 		net_mgmt_event_notify_with_info(
2240 			NET_EVENT_IPV6_MADDR_DEL, iface,
2241 			&ipv6->mcast[i].address.in6_addr,
2242 			sizeof(struct in6_addr));
2243 
2244 		ret = true;
2245 		goto out;
2246 	}
2247 
2248 out:
2249 	net_if_unlock(iface);
2250 
2251 	return ret;
2252 }
2253 
net_if_ipv6_maddr_foreach(struct net_if * iface,net_if_ip_maddr_cb_t cb,void * user_data)2254 void net_if_ipv6_maddr_foreach(struct net_if *iface, net_if_ip_maddr_cb_t cb,
2255 			       void *user_data)
2256 {
2257 	struct net_if_ipv6 *ipv6;
2258 
2259 	if (iface == NULL || cb == NULL) {
2260 		return;
2261 	}
2262 
2263 	net_if_lock(iface);
2264 
2265 	ipv6 = iface->config.ip.ipv6;
2266 	if (!ipv6) {
2267 		goto out;
2268 	}
2269 
2270 	for (int i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2271 		if (!ipv6->mcast[i].is_used) {
2272 			continue;
2273 		}
2274 
2275 		cb(iface, &ipv6->mcast[i], user_data);
2276 	}
2277 
2278 out:
2279 	net_if_unlock(iface);
2280 }
2281 
net_if_ipv6_maddr_lookup(const struct in6_addr * maddr,struct net_if ** ret)2282 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
2283 						   struct net_if **ret)
2284 {
2285 	struct net_if_mcast_addr *ifmaddr = NULL;
2286 
2287 	STRUCT_SECTION_FOREACH(net_if, iface) {
2288 		struct net_if_ipv6 *ipv6;
2289 
2290 		if (ret && *ret && iface != *ret) {
2291 			continue;
2292 		}
2293 
2294 		net_if_lock(iface);
2295 
2296 		ipv6 = iface->config.ip.ipv6;
2297 		if (!ipv6) {
2298 			net_if_unlock(iface);
2299 			continue;
2300 		}
2301 
2302 		ARRAY_FOR_EACH(ipv6->mcast, i) {
2303 			if (!ipv6->mcast[i].is_used ||
2304 			    ipv6->mcast[i].address.family != AF_INET6) {
2305 				continue;
2306 			}
2307 
2308 			if (net_ipv6_is_prefix(
2309 				    maddr->s6_addr,
2310 				    ipv6->mcast[i].address.in6_addr.s6_addr,
2311 				    128)) {
2312 				if (ret) {
2313 					*ret = iface;
2314 				}
2315 
2316 				ifmaddr = &ipv6->mcast[i];
2317 				net_if_unlock(iface);
2318 				goto out;
2319 			}
2320 		}
2321 
2322 		net_if_unlock(iface);
2323 	}
2324 
2325 out:
2326 	return ifmaddr;
2327 }
2328 
net_if_ipv6_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)2329 void net_if_ipv6_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
2330 {
2331 	if (iface == NULL || addr == NULL) {
2332 		return;
2333 	}
2334 
2335 	net_if_lock(iface);
2336 	addr->is_joined = false;
2337 	net_if_unlock(iface);
2338 }
2339 
net_if_ipv6_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)2340 void net_if_ipv6_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
2341 {
2342 	if (iface == NULL || addr == NULL) {
2343 		return;
2344 	}
2345 
2346 	net_if_lock(iface);
2347 	addr->is_joined = true;
2348 	net_if_unlock(iface);
2349 }
2350 
net_if_ipv6_get_ll(struct net_if * iface,enum net_addr_state addr_state)2351 struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
2352 				    enum net_addr_state addr_state)
2353 {
2354 	struct in6_addr *addr = NULL;
2355 	struct net_if_ipv6 *ipv6;
2356 
2357 	net_if_lock(iface);
2358 
2359 	ipv6 = iface->config.ip.ipv6;
2360 	if (!ipv6) {
2361 		goto out;
2362 	}
2363 
2364 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2365 		if (!ipv6->unicast[i].is_used ||
2366 		    (addr_state != NET_ADDR_ANY_STATE &&
2367 		     ipv6->unicast[i].addr_state != addr_state) ||
2368 		    ipv6->unicast[i].address.family != AF_INET6) {
2369 			continue;
2370 		}
2371 
2372 		if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2373 			addr = &ipv6->unicast[i].address.in6_addr;
2374 			goto out;
2375 		}
2376 	}
2377 
2378 out:
2379 	net_if_unlock(iface);
2380 
2381 	return addr;
2382 }
2383 
net_if_ipv6_get_ll_addr(enum net_addr_state state,struct net_if ** iface)2384 struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
2385 					 struct net_if **iface)
2386 {
2387 	struct in6_addr *addr = NULL;
2388 
2389 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2390 		net_if_lock(tmp);
2391 
2392 		addr = net_if_ipv6_get_ll(tmp, state);
2393 		if (addr) {
2394 			if (iface) {
2395 				*iface = tmp;
2396 			}
2397 
2398 			net_if_unlock(tmp);
2399 			goto out;
2400 		}
2401 
2402 		net_if_unlock(tmp);
2403 	}
2404 
2405 out:
2406 	return addr;
2407 }
2408 
check_global_addr(struct net_if * iface,enum net_addr_state state)2409 static inline struct in6_addr *check_global_addr(struct net_if *iface,
2410 						 enum net_addr_state state)
2411 {
2412 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2413 
2414 	if (!ipv6) {
2415 		return NULL;
2416 	}
2417 
2418 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2419 		if (!ipv6->unicast[i].is_used ||
2420 		    (ipv6->unicast[i].addr_state != state) ||
2421 		    ipv6->unicast[i].address.family != AF_INET6) {
2422 			continue;
2423 		}
2424 
2425 		if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2426 			return &ipv6->unicast[i].address.in6_addr;
2427 		}
2428 	}
2429 
2430 	return NULL;
2431 }
2432 
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2433 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2434 					     struct net_if **iface)
2435 {
2436 	struct in6_addr *addr = NULL;
2437 
2438 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2439 		if (iface && *iface && tmp != *iface) {
2440 			continue;
2441 		}
2442 
2443 		net_if_lock(tmp);
2444 		addr = check_global_addr(tmp, state);
2445 		if (addr) {
2446 			if (iface) {
2447 				*iface = tmp;
2448 			}
2449 
2450 			net_if_unlock(tmp);
2451 			goto out;
2452 		}
2453 
2454 		net_if_unlock(tmp);
2455 	}
2456 
2457 out:
2458 
2459 	return addr;
2460 }
2461 
2462 #if defined(CONFIG_NET_NATIVE_IPV6)
remove_prefix_addresses(struct net_if * iface,struct net_if_ipv6 * ipv6,struct in6_addr * addr,uint8_t len)2463 static void remove_prefix_addresses(struct net_if *iface,
2464 				    struct net_if_ipv6 *ipv6,
2465 				    struct in6_addr *addr,
2466 				    uint8_t len)
2467 {
2468 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2469 		if (!ipv6->unicast[i].is_used ||
2470 		    ipv6->unicast[i].address.family != AF_INET6 ||
2471 		    ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
2472 			continue;
2473 		}
2474 
2475 		if (net_ipv6_is_prefix(
2476 				addr->s6_addr,
2477 				ipv6->unicast[i].address.in6_addr.s6_addr,
2478 				len)) {
2479 			net_if_ipv6_addr_rm(iface,
2480 					    &ipv6->unicast[i].address.in6_addr);
2481 		}
2482 	}
2483 }
2484 
prefix_lifetime_expired(struct net_if_ipv6_prefix * ifprefix)2485 static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
2486 {
2487 	struct net_if_ipv6 *ipv6;
2488 
2489 	net_if_lock(ifprefix->iface);
2490 
2491 	NET_DBG("Prefix %s/%d expired",
2492 		net_sprint_ipv6_addr(&ifprefix->prefix),
2493 		ifprefix->len);
2494 
2495 	ifprefix->is_used = false;
2496 
2497 	if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
2498 		return;
2499 	}
2500 
2501 	/* Remove also all auto addresses if the they have the same prefix.
2502 	 */
2503 	remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
2504 				ifprefix->len);
2505 
2506 	if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2507 		struct net_event_ipv6_prefix info;
2508 
2509 		net_ipaddr_copy(&info.addr, &ifprefix->prefix);
2510 		info.len = ifprefix->len;
2511 		info.lifetime = 0;
2512 
2513 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2514 						ifprefix->iface,
2515 						(const void *) &info,
2516 						sizeof(struct net_event_ipv6_prefix));
2517 	} else {
2518 		net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface);
2519 	}
2520 
2521 	net_if_unlock(ifprefix->iface);
2522 }
2523 
prefix_timer_remove(struct net_if_ipv6_prefix * ifprefix)2524 static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
2525 {
2526 	k_mutex_lock(&lock, K_FOREVER);
2527 
2528 	NET_DBG("IPv6 prefix %s/%d removed",
2529 		net_sprint_ipv6_addr(&ifprefix->prefix),
2530 		ifprefix->len);
2531 
2532 	sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2533 				  &ifprefix->lifetime.node);
2534 
2535 	net_timeout_set(&ifprefix->lifetime, 0, 0);
2536 
2537 	k_mutex_unlock(&lock);
2538 }
2539 
prefix_lifetime_timeout(struct k_work * work)2540 static void prefix_lifetime_timeout(struct k_work *work)
2541 {
2542 	uint32_t next_update = UINT32_MAX;
2543 	uint32_t current_time = k_uptime_get_32();
2544 	struct net_if_ipv6_prefix *current, *next;
2545 	sys_slist_t expired_list;
2546 
2547 	ARG_UNUSED(work);
2548 
2549 	sys_slist_init(&expired_list);
2550 
2551 	k_mutex_lock(&lock, K_FOREVER);
2552 
2553 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
2554 					  current, next, lifetime.node) {
2555 		struct net_timeout *timeout = &current->lifetime;
2556 		uint32_t this_update = net_timeout_evaluate(timeout,
2557 							    current_time);
2558 
2559 		if (this_update == 0U) {
2560 			sys_slist_find_and_remove(
2561 				&active_prefix_lifetime_timers,
2562 				&current->lifetime.node);
2563 			sys_slist_append(&expired_list,
2564 					 &current->lifetime.node);
2565 			continue;
2566 		}
2567 
2568 		if (this_update < next_update) {
2569 			next_update = this_update;
2570 		}
2571 
2572 		if (current == next) {
2573 			break;
2574 		}
2575 	}
2576 
2577 	if (next_update != UINT32_MAX) {
2578 		k_work_reschedule(&prefix_lifetime_timer, K_MSEC(next_update));
2579 	}
2580 
2581 	k_mutex_unlock(&lock);
2582 
2583 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, current, lifetime.node) {
2584 		prefix_lifetime_expired(current);
2585 	}
2586 }
2587 
prefix_start_timer(struct net_if_ipv6_prefix * ifprefix,uint32_t lifetime)2588 static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
2589 			       uint32_t lifetime)
2590 {
2591 	k_mutex_lock(&lock, K_FOREVER);
2592 
2593 	(void)sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2594 					&ifprefix->lifetime.node);
2595 	sys_slist_append(&active_prefix_lifetime_timers,
2596 			 &ifprefix->lifetime.node);
2597 
2598 	net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
2599 	k_work_reschedule(&prefix_lifetime_timer, K_NO_WAIT);
2600 
2601 	k_mutex_unlock(&lock);
2602 }
2603 
ipv6_prefix_find(struct net_if * iface,struct in6_addr * prefix,uint8_t prefix_len)2604 static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
2605 						   struct in6_addr *prefix,
2606 						   uint8_t prefix_len)
2607 {
2608 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2609 
2610 	if (!ipv6) {
2611 		return NULL;
2612 	}
2613 
2614 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2615 		if (!ipv6->prefix[i].is_used) {
2616 			continue;
2617 		}
2618 
2619 		if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
2620 		    prefix_len == ipv6->prefix[i].len) {
2621 			return &ipv6->prefix[i];
2622 		}
2623 	}
2624 
2625 	return NULL;
2626 }
2627 
net_if_ipv6_prefix_init(struct net_if * iface,struct net_if_ipv6_prefix * ifprefix,struct in6_addr * addr,uint8_t len,uint32_t lifetime)2628 static void net_if_ipv6_prefix_init(struct net_if *iface,
2629 				    struct net_if_ipv6_prefix *ifprefix,
2630 				    struct in6_addr *addr, uint8_t len,
2631 				    uint32_t lifetime)
2632 {
2633 	ifprefix->is_used = true;
2634 	ifprefix->len = len;
2635 	ifprefix->iface = iface;
2636 	net_ipaddr_copy(&ifprefix->prefix, addr);
2637 
2638 	if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
2639 		ifprefix->is_infinite = true;
2640 	} else {
2641 		ifprefix->is_infinite = false;
2642 	}
2643 }
2644 
net_if_ipv6_prefix_add(struct net_if * iface,struct in6_addr * prefix,uint8_t len,uint32_t lifetime)2645 struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
2646 						  struct in6_addr *prefix,
2647 						  uint8_t len,
2648 						  uint32_t lifetime)
2649 {
2650 	struct net_if_ipv6_prefix *ifprefix = NULL;
2651 	struct net_if_ipv6 *ipv6;
2652 
2653 	net_if_lock(iface);
2654 
2655 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2656 		goto out;
2657 	}
2658 
2659 	ifprefix = ipv6_prefix_find(iface, prefix, len);
2660 	if (ifprefix) {
2661 		goto out;
2662 	}
2663 
2664 	if (!ipv6) {
2665 		goto out;
2666 	}
2667 
2668 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2669 		if (ipv6->prefix[i].is_used) {
2670 			continue;
2671 		}
2672 
2673 		net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
2674 					len, lifetime);
2675 
2676 		NET_DBG("[%zu] interface %p prefix %s/%d added", i, iface,
2677 			net_sprint_ipv6_addr(prefix), len);
2678 
2679 		if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2680 			struct net_event_ipv6_prefix info;
2681 
2682 			net_ipaddr_copy(&info.addr, prefix);
2683 			info.len = len;
2684 			info.lifetime = lifetime;
2685 
2686 			net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_ADD,
2687 							iface, (const void *) &info,
2688 							sizeof(struct net_event_ipv6_prefix));
2689 		} else {
2690 			net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_ADD, iface);
2691 		}
2692 
2693 		ifprefix = &ipv6->prefix[i];
2694 		goto out;
2695 	}
2696 
2697 out:
2698 	net_if_unlock(iface);
2699 
2700 	return ifprefix;
2701 }
2702 
net_if_ipv6_prefix_rm(struct net_if * iface,struct in6_addr * addr,uint8_t len)2703 bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
2704 			   uint8_t len)
2705 {
2706 	bool ret = false;
2707 	struct net_if_ipv6 *ipv6;
2708 
2709 	net_if_lock(iface);
2710 
2711 	ipv6 = iface->config.ip.ipv6;
2712 	if (!ipv6) {
2713 		goto out;
2714 	}
2715 
2716 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2717 		if (!ipv6->prefix[i].is_used) {
2718 			continue;
2719 		}
2720 
2721 		if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
2722 		    ipv6->prefix[i].len != len) {
2723 			continue;
2724 		}
2725 
2726 		net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
2727 
2728 		ipv6->prefix[i].is_used = false;
2729 
2730 		/* Remove also all auto addresses if the they have the same
2731 		 * prefix.
2732 		 */
2733 		remove_prefix_addresses(iface, ipv6, addr, len);
2734 
2735 		if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2736 			struct net_event_ipv6_prefix info;
2737 
2738 			net_ipaddr_copy(&info.addr, addr);
2739 			info.len = len;
2740 			info.lifetime = 0;
2741 
2742 			net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2743 							iface, (const void *) &info,
2744 							sizeof(struct net_event_ipv6_prefix));
2745 		} else {
2746 			net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, iface);
2747 		}
2748 
2749 		ret = true;
2750 		goto out;
2751 	}
2752 
2753 out:
2754 	net_if_unlock(iface);
2755 
2756 	return ret;
2757 }
2758 
net_if_ipv6_prefix_get(struct net_if * iface,const struct in6_addr * addr)2759 struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
2760 						  const struct in6_addr *addr)
2761 {
2762 	struct net_if_ipv6_prefix *prefix = NULL;
2763 	struct net_if_ipv6 *ipv6;
2764 
2765 	if (!iface) {
2766 		iface = net_if_get_default();
2767 	}
2768 
2769 	if (!iface) {
2770 		return NULL;
2771 	}
2772 
2773 	net_if_lock(iface);
2774 
2775 	ipv6 = iface->config.ip.ipv6;
2776 	if (!ipv6) {
2777 		goto out;
2778 	}
2779 
2780 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2781 		if (!ipv6->prefix[i].is_used) {
2782 			continue;
2783 		}
2784 
2785 		if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2786 				       addr->s6_addr,
2787 				       ipv6->prefix[i].len)) {
2788 			if (!prefix || prefix->len > ipv6->prefix[i].len) {
2789 				prefix = &ipv6->prefix[i];
2790 			}
2791 		}
2792 	}
2793 
2794 out:
2795 	net_if_unlock(iface);
2796 
2797 	return prefix;
2798 }
2799 
net_if_ipv6_prefix_lookup(struct net_if * iface,struct in6_addr * addr,uint8_t len)2800 struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
2801 						     struct in6_addr *addr,
2802 						     uint8_t len)
2803 {
2804 	struct net_if_ipv6_prefix *prefix = NULL;
2805 	struct net_if_ipv6 *ipv6;
2806 
2807 	net_if_lock(iface);
2808 
2809 	ipv6 = iface->config.ip.ipv6;
2810 	if (!ipv6) {
2811 		goto out;
2812 	}
2813 
2814 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2815 		if (!ipv6->prefix[i].is_used) {
2816 			continue;
2817 		}
2818 
2819 		if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2820 				       addr->s6_addr, len)) {
2821 			prefix = &ipv6->prefix[i];
2822 			goto out;
2823 		}
2824 	}
2825 
2826 out:
2827 	net_if_unlock(iface);
2828 
2829 	return prefix;
2830 }
2831 
net_if_ipv6_addr_onlink(struct net_if ** iface,struct in6_addr * addr)2832 bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
2833 {
2834 	bool ret = false;
2835 
2836 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2837 		struct net_if_ipv6 *ipv6;
2838 
2839 		if (iface && *iface && *iface != tmp) {
2840 			continue;
2841 		}
2842 
2843 		net_if_lock(tmp);
2844 
2845 		ipv6 = tmp->config.ip.ipv6;
2846 		if (!ipv6) {
2847 			net_if_unlock(tmp);
2848 			continue;
2849 		}
2850 
2851 		ARRAY_FOR_EACH(ipv6->prefix, i) {
2852 			if (ipv6->prefix[i].is_used &&
2853 			    net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2854 					       addr->s6_addr,
2855 					       ipv6->prefix[i].len)) {
2856 				if (iface) {
2857 					*iface = tmp;
2858 				}
2859 
2860 				ret = true;
2861 				net_if_unlock(tmp);
2862 				goto out;
2863 			}
2864 		}
2865 
2866 		net_if_unlock(tmp);
2867 	}
2868 
2869 out:
2870 	return ret;
2871 }
2872 
net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix * prefix,uint32_t lifetime)2873 void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
2874 				  uint32_t lifetime)
2875 {
2876 	/* No need to set a timer for infinite timeout */
2877 	if (lifetime == 0xffffffff) {
2878 		return;
2879 	}
2880 
2881 	NET_DBG("Prefix lifetime %u sec", lifetime);
2882 
2883 	prefix_start_timer(prefix, lifetime);
2884 }
2885 
net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix * prefix)2886 void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
2887 {
2888 	if (!prefix->is_used) {
2889 		return;
2890 	}
2891 
2892 	prefix_timer_remove(prefix);
2893 }
2894 
net_if_ipv6_router_lookup(struct net_if * iface,struct in6_addr * addr)2895 struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
2896 						struct in6_addr *addr)
2897 {
2898 	return iface_router_lookup(iface, AF_INET6, addr);
2899 }
2900 
net_if_ipv6_router_find_default(struct net_if * iface,struct in6_addr * addr)2901 struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
2902 						      struct in6_addr *addr)
2903 {
2904 	return iface_router_find_default(iface, AF_INET6, addr);
2905 }
2906 
net_if_ipv6_router_update_lifetime(struct net_if_router * router,uint16_t lifetime)2907 void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
2908 					uint16_t lifetime)
2909 {
2910 	NET_DBG("Updating expire time of %s by %u secs",
2911 		net_sprint_ipv6_addr(&router->address.in6_addr),
2912 		lifetime);
2913 
2914 	router->life_start = k_uptime_get_32();
2915 	router->lifetime = lifetime;
2916 
2917 	iface_router_update_timer(router->life_start);
2918 }
2919 
net_if_ipv6_router_add(struct net_if * iface,struct in6_addr * addr,uint16_t lifetime)2920 struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
2921 					     struct in6_addr *addr,
2922 					     uint16_t lifetime)
2923 {
2924 	return iface_router_add(iface, AF_INET6, addr, false, lifetime);
2925 }
2926 
net_if_ipv6_router_rm(struct net_if_router * router)2927 bool net_if_ipv6_router_rm(struct net_if_router *router)
2928 {
2929 	return iface_router_rm(router);
2930 }
2931 
net_if_ipv6_get_mcast_hop_limit(struct net_if * iface)2932 uint8_t net_if_ipv6_get_mcast_hop_limit(struct net_if *iface)
2933 {
2934 	int ret = 0;
2935 
2936 	net_if_lock(iface);
2937 
2938 	if (net_if_config_ipv6_get(iface, NULL) < 0) {
2939 		goto out;
2940 	}
2941 
2942 	if (!iface->config.ip.ipv6) {
2943 		goto out;
2944 	}
2945 
2946 	ret = iface->config.ip.ipv6->mcast_hop_limit;
2947 out:
2948 	net_if_unlock(iface);
2949 
2950 	return ret;
2951 }
2952 
net_if_ipv6_set_mcast_hop_limit(struct net_if * iface,uint8_t hop_limit)2953 void net_if_ipv6_set_mcast_hop_limit(struct net_if *iface, uint8_t hop_limit)
2954 {
2955 	net_if_lock(iface);
2956 
2957 	if (net_if_config_ipv6_get(iface, NULL) < 0) {
2958 		goto out;
2959 	}
2960 
2961 	if (!iface->config.ip.ipv6) {
2962 		goto out;
2963 	}
2964 
2965 	iface->config.ip.ipv6->mcast_hop_limit = hop_limit;
2966 out:
2967 	net_if_unlock(iface);
2968 }
2969 
net_if_ipv6_get_hop_limit(struct net_if * iface)2970 uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface)
2971 {
2972 	int ret = 0;
2973 
2974 	net_if_lock(iface);
2975 
2976 	if (net_if_config_ipv6_get(iface, NULL) < 0) {
2977 		goto out;
2978 	}
2979 
2980 	if (!iface->config.ip.ipv6) {
2981 		goto out;
2982 	}
2983 
2984 	ret = iface->config.ip.ipv6->hop_limit;
2985 out:
2986 	net_if_unlock(iface);
2987 
2988 	return ret;
2989 }
2990 
net_if_ipv6_set_hop_limit(struct net_if * iface,uint8_t hop_limit)2991 void net_if_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit)
2992 {
2993 	net_if_lock(iface);
2994 
2995 	if (net_if_config_ipv6_get(iface, NULL) < 0) {
2996 		goto out;
2997 	}
2998 
2999 	if (!iface->config.ip.ipv6) {
3000 		goto out;
3001 	}
3002 
3003 	iface->config.ip.ipv6->hop_limit = hop_limit;
3004 out:
3005 	net_if_unlock(iface);
3006 }
3007 
3008 #endif /* CONFIG_NET_NATIVE_IPV6 */
3009 
get_diff_ipv6(const struct in6_addr * src,const struct in6_addr * dst)3010 static uint8_t get_diff_ipv6(const struct in6_addr *src,
3011 			  const struct in6_addr *dst)
3012 {
3013 	return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 16);
3014 }
3015 
is_proper_ipv6_address(struct net_if_addr * addr)3016 static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
3017 {
3018 	if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3019 	    addr->address.family == AF_INET6 &&
3020 	    !net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
3021 		return true;
3022 	}
3023 
3024 	return false;
3025 }
3026 
use_public_address(bool prefer_public,bool is_temporary,int flags)3027 static bool use_public_address(bool prefer_public, bool is_temporary,
3028 			       int flags)
3029 {
3030 	if (IS_ENABLED(CONFIG_NET_IPV6_PE)) {
3031 		if (!prefer_public && is_temporary) {
3032 
3033 			/* Allow socket to override the kconfig option */
3034 			if (flags & IPV6_PREFER_SRC_PUBLIC) {
3035 				return true;
3036 			}
3037 
3038 			return false;
3039 		}
3040 	}
3041 
3042 	if (flags & IPV6_PREFER_SRC_TMP) {
3043 		return false;
3044 	}
3045 
3046 	return true;
3047 }
3048 
net_if_ipv6_get_best_match(struct net_if * iface,const struct in6_addr * dst,uint8_t prefix_len,uint8_t * best_so_far,int flags)3049 static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
3050 						   const struct in6_addr *dst,
3051 						   uint8_t prefix_len,
3052 						   uint8_t *best_so_far,
3053 						   int flags)
3054 {
3055 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
3056 	struct net_if_addr *public_addr = NULL;
3057 	struct in6_addr *src = NULL;
3058 	uint8_t public_addr_len = 0;
3059 	struct in6_addr *temp_addr = NULL;
3060 	uint8_t len, temp_addr_len = 0;
3061 	bool ret;
3062 
3063 	net_if_lock(iface);
3064 
3065 	ipv6 = iface->config.ip.ipv6;
3066 	if (!ipv6) {
3067 		goto out;
3068 	}
3069 
3070 	ARRAY_FOR_EACH(ipv6->unicast, i) {
3071 		if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
3072 			continue;
3073 		}
3074 
3075 		len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
3076 		if (len >= prefix_len) {
3077 			len = prefix_len;
3078 		}
3079 
3080 		if (len >= *best_so_far) {
3081 			/* Mesh local address can only be selected for the same
3082 			 * subnet.
3083 			 */
3084 			if (ipv6->unicast[i].is_mesh_local && len < 64 &&
3085 			    !net_ipv6_is_addr_mcast_mesh(dst)) {
3086 				continue;
3087 			}
3088 
3089 			ret = use_public_address(iface->pe_prefer_public,
3090 						 ipv6->unicast[i].is_temporary,
3091 						 flags);
3092 			if (!ret) {
3093 				temp_addr = &ipv6->unicast[i].address.in6_addr;
3094 				temp_addr_len = len;
3095 
3096 				*best_so_far = len;
3097 				src = &ipv6->unicast[i].address.in6_addr;
3098 				continue;
3099 			}
3100 
3101 			if (!ipv6->unicast[i].is_temporary) {
3102 				public_addr = &ipv6->unicast[i];
3103 				public_addr_len = len;
3104 			}
3105 
3106 			*best_so_far = len;
3107 			src = &ipv6->unicast[i].address.in6_addr;
3108 		}
3109 	}
3110 
3111 	if (IS_ENABLED(CONFIG_NET_IPV6_PE) && !iface->pe_prefer_public && temp_addr) {
3112 		if (temp_addr_len >= *best_so_far) {
3113 			*best_so_far = temp_addr_len;
3114 			src = temp_addr;
3115 		}
3116 	} else {
3117 		/* By default prefer always public address if found */
3118 		if (flags & IPV6_PREFER_SRC_PUBLIC) {
3119 use_public:
3120 			if (public_addr &&
3121 			    !net_ipv6_addr_cmp(&public_addr->address.in6_addr, src)) {
3122 				src = &public_addr->address.in6_addr;
3123 				*best_so_far = public_addr_len;
3124 			}
3125 		} else if (flags & IPV6_PREFER_SRC_TMP) {
3126 			if (temp_addr && !net_ipv6_addr_cmp(temp_addr, src)) {
3127 				src = temp_addr;
3128 				*best_so_far = temp_addr_len;
3129 			}
3130 		} else if (flags & IPV6_PREFER_SRC_PUBTMP_DEFAULT) {
3131 			goto use_public;
3132 		}
3133 	}
3134 
3135 out:
3136 	net_if_unlock(iface);
3137 
3138 	return src;
3139 }
3140 
net_if_ipv6_select_src_addr_hint(struct net_if * dst_iface,const struct in6_addr * dst,int flags)3141 const struct in6_addr *net_if_ipv6_select_src_addr_hint(struct net_if *dst_iface,
3142 							const struct in6_addr *dst,
3143 							int flags)
3144 {
3145 	const struct in6_addr *src = NULL;
3146 	uint8_t best_match = 0U;
3147 
3148 	if (dst == NULL) {
3149 		return NULL;
3150 	}
3151 
3152 	if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast_link(dst)) {
3153 		struct net_if_ipv6_prefix *prefix;
3154 		uint8_t prefix_len = 128;
3155 
3156 		prefix = net_if_ipv6_prefix_get(dst_iface, dst);
3157 		if (prefix) {
3158 			prefix_len = prefix->len;
3159 		}
3160 
3161 		/* If caller has supplied interface, then use that */
3162 		if (dst_iface) {
3163 			src = net_if_ipv6_get_best_match(dst_iface, dst,
3164 							 prefix_len,
3165 							 &best_match,
3166 							 flags);
3167 		} else {
3168 			STRUCT_SECTION_FOREACH(net_if, iface) {
3169 				struct in6_addr *addr;
3170 
3171 				addr = net_if_ipv6_get_best_match(iface, dst,
3172 								  prefix_len,
3173 								  &best_match,
3174 								  flags);
3175 				if (addr) {
3176 					src = addr;
3177 				}
3178 			}
3179 		}
3180 
3181 	} else {
3182 		if (dst_iface) {
3183 			src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
3184 		} else {
3185 			struct in6_addr *addr;
3186 
3187 			addr = net_if_ipv6_get_ll(net_if_get_default(), NET_ADDR_PREFERRED);
3188 			if (addr) {
3189 				src = addr;
3190 				goto out;
3191 			}
3192 
3193 			STRUCT_SECTION_FOREACH(net_if, iface) {
3194 				addr = net_if_ipv6_get_ll(iface,
3195 							  NET_ADDR_PREFERRED);
3196 				if (addr) {
3197 					src = addr;
3198 					break;
3199 				}
3200 			}
3201 		}
3202 	}
3203 
3204 	if (!src) {
3205 		src = net_ipv6_unspecified_address();
3206 	}
3207 
3208 out:
3209 	return src;
3210 }
3211 
net_if_ipv6_select_src_addr(struct net_if * dst_iface,const struct in6_addr * dst)3212 const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
3213 						   const struct in6_addr *dst)
3214 {
3215 	return net_if_ipv6_select_src_addr_hint(dst_iface,
3216 						dst,
3217 						IPV6_PREFER_SRC_PUBTMP_DEFAULT);
3218 }
3219 
net_if_ipv6_select_src_iface(const struct in6_addr * dst)3220 struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
3221 {
3222 	struct net_if *iface = NULL;
3223 	const struct in6_addr *src;
3224 
3225 	src = net_if_ipv6_select_src_addr(NULL, dst);
3226 	if (src != net_ipv6_unspecified_address()) {
3227 		net_if_ipv6_addr_lookup(src, &iface);
3228 	}
3229 
3230 	if (iface == NULL) {
3231 		iface = net_if_get_default();
3232 	}
3233 
3234 	return iface;
3235 }
3236 
3237 #if defined(CONFIG_NET_NATIVE_IPV6)
3238 
net_if_ipv6_calc_reachable_time(struct net_if_ipv6 * ipv6)3239 uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
3240 {
3241 	uint32_t min_reachable, max_reachable;
3242 
3243 	min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
3244 			/ MIN_RANDOM_DENOM;
3245 	max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
3246 			/ MAX_RANDOM_DENOM;
3247 
3248 	NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
3249 		max_reachable);
3250 
3251 	return min_reachable +
3252 	       sys_rand32_get() % (max_reachable - min_reachable);
3253 }
3254 
iface_ipv6_start(struct net_if * iface)3255 static void iface_ipv6_start(struct net_if *iface)
3256 {
3257 	if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
3258 	    net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
3259 		return;
3260 	}
3261 
3262 	if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
3263 		net_if_start_dad(iface);
3264 	} else {
3265 		struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
3266 
3267 		if (ipv6 != NULL) {
3268 			join_mcast_nodes(iface,
3269 					 &ipv6->mcast[0].address.in6_addr);
3270 		}
3271 	}
3272 
3273 	net_if_start_rs(iface);
3274 }
3275 
iface_ipv6_stop(struct net_if * iface)3276 static void iface_ipv6_stop(struct net_if *iface)
3277 {
3278 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
3279 
3280 	if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
3281 	    net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
3282 		return;
3283 	}
3284 
3285 	if (ipv6 == NULL) {
3286 		return;
3287 	}
3288 
3289 	IF_ENABLED(CONFIG_NET_IPV6_IID_STABLE, (ipv6->network_counter++));
3290 	IF_ENABLED(CONFIG_NET_IPV6_IID_STABLE, (ipv6->iid = NULL));
3291 
3292 	/* Remove all autoconf addresses */
3293 	ARRAY_FOR_EACH(ipv6->unicast, i) {
3294 		if (ipv6->unicast[i].is_used &&
3295 		    ipv6->unicast[i].address.family == AF_INET6 &&
3296 		    ipv6->unicast[i].addr_type == NET_ADDR_AUTOCONF) {
3297 			(void)net_if_ipv6_addr_rm(iface,
3298 						  &ipv6->unicast[i].address.in6_addr);
3299 		}
3300 	}
3301 }
3302 
iface_ipv6_init(int if_count)3303 static void iface_ipv6_init(int if_count)
3304 {
3305 	iface_ipv6_dad_init();
3306 	iface_ipv6_nd_init();
3307 
3308 	k_work_init_delayable(&address_lifetime_timer,
3309 			      address_lifetime_timeout);
3310 	k_work_init_delayable(&prefix_lifetime_timer, prefix_lifetime_timeout);
3311 
3312 	if (if_count > ARRAY_SIZE(ipv6_addresses)) {
3313 		NET_WARN("You have %zu IPv6 net_if addresses but %d "
3314 			 "network interfaces", ARRAY_SIZE(ipv6_addresses),
3315 			 if_count);
3316 		NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
3317 			 "value.");
3318 	}
3319 
3320 	ARRAY_FOR_EACH(ipv6_addresses, i) {
3321 		ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
3322 		ipv6_addresses[i].ipv6.mcast_hop_limit = CONFIG_NET_INITIAL_MCAST_HOP_LIMIT;
3323 		ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
3324 
3325 		net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
3326 	}
3327 }
3328 #endif /* CONFIG_NET_NATIVE_IPV6 */
3329 #else /* CONFIG_NET_IPV6 */
net_if_ipv6_maddr_lookup(const struct in6_addr * addr,struct net_if ** iface)3330 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
3331 						   struct net_if **iface)
3332 {
3333 	ARG_UNUSED(addr);
3334 	ARG_UNUSED(iface);
3335 
3336 	return NULL;
3337 }
3338 
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)3339 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
3340 					    struct net_if **ret)
3341 {
3342 	ARG_UNUSED(addr);
3343 	ARG_UNUSED(ret);
3344 
3345 	return NULL;
3346 }
3347 
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)3348 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
3349 					     struct net_if **iface)
3350 {
3351 	ARG_UNUSED(state);
3352 	ARG_UNUSED(iface);
3353 
3354 	return NULL;
3355 }
3356 #endif /* CONFIG_NET_IPV6 */
3357 
3358 #if !defined(CONFIG_NET_NATIVE_IPV6)
3359 #define join_mcast_allnodes(...)
3360 #define leave_mcast_all(...)
3361 #define clear_joined_ipv6_mcast_groups(...)
3362 #define iface_ipv6_start(...)
3363 #define iface_ipv6_stop(...)
3364 #define iface_ipv6_init(...)
3365 #endif /* !CONFIG_NET_NATIVE_IPV4 */
3366 
3367 #if defined(CONFIG_NET_IPV4)
net_if_config_ipv4_get(struct net_if * iface,struct net_if_ipv4 ** ipv4)3368 int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
3369 {
3370 	int ret = 0;
3371 
3372 	net_if_lock(iface);
3373 
3374 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3375 		ret = -ENOTSUP;
3376 		goto out;
3377 	}
3378 
3379 	if (iface->config.ip.ipv4) {
3380 		if (ipv4) {
3381 			*ipv4 = iface->config.ip.ipv4;
3382 		}
3383 
3384 		goto out;
3385 	}
3386 
3387 	k_mutex_lock(&lock, K_FOREVER);
3388 
3389 	ARRAY_FOR_EACH(ipv4_addresses, i) {
3390 		if (ipv4_addresses[i].iface) {
3391 			continue;
3392 		}
3393 
3394 		iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
3395 		ipv4_addresses[i].iface = iface;
3396 
3397 		if (ipv4) {
3398 			*ipv4 = &ipv4_addresses[i].ipv4;
3399 		}
3400 
3401 		k_mutex_unlock(&lock);
3402 		goto out;
3403 	}
3404 
3405 	k_mutex_unlock(&lock);
3406 
3407 	ret = -ESRCH;
3408 out:
3409 	net_if_unlock(iface);
3410 
3411 	return ret;
3412 }
3413 
net_if_config_ipv4_put(struct net_if * iface)3414 int net_if_config_ipv4_put(struct net_if *iface)
3415 {
3416 	int ret = 0;
3417 
3418 	net_if_lock(iface);
3419 
3420 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3421 		ret = -ENOTSUP;
3422 		goto out;
3423 	}
3424 
3425 	if (!iface->config.ip.ipv4) {
3426 		ret = -EALREADY;
3427 		goto out;
3428 	}
3429 
3430 	k_mutex_lock(&lock, K_FOREVER);
3431 
3432 	ARRAY_FOR_EACH(ipv4_addresses, i) {
3433 		if (ipv4_addresses[i].iface != iface) {
3434 			continue;
3435 		}
3436 
3437 		iface->config.ip.ipv4 = NULL;
3438 		ipv4_addresses[i].iface = NULL;
3439 
3440 		k_mutex_unlock(&lock);
3441 		goto out;
3442 	}
3443 
3444 	k_mutex_unlock(&lock);
3445 
3446 	ret = -ESRCH;
3447 out:
3448 	net_if_unlock(iface);
3449 
3450 	return ret;
3451 }
3452 
net_if_ipv4_addr_mask_cmp(struct net_if * iface,const struct in_addr * addr)3453 bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
3454 			       const struct in_addr *addr)
3455 {
3456 	bool ret = false;
3457 	struct net_if_ipv4 *ipv4;
3458 	uint32_t subnet;
3459 
3460 	net_if_lock(iface);
3461 
3462 	ipv4 = iface->config.ip.ipv4;
3463 	if (!ipv4) {
3464 		goto out;
3465 	}
3466 
3467 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3468 		if (!ipv4->unicast[i].ipv4.is_used ||
3469 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3470 			continue;
3471 		}
3472 
3473 		subnet = UNALIGNED_GET(&addr->s_addr) &
3474 			 ipv4->unicast[i].netmask.s_addr;
3475 
3476 		if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3477 		     ipv4->unicast[i].netmask.s_addr) == subnet) {
3478 			ret = true;
3479 			goto out;
3480 		}
3481 	}
3482 
3483 out:
3484 	net_if_unlock(iface);
3485 
3486 	return ret;
3487 }
3488 
ipv4_is_broadcast_address(struct net_if * iface,const struct in_addr * addr)3489 static bool ipv4_is_broadcast_address(struct net_if *iface,
3490 				      const struct in_addr *addr)
3491 {
3492 	struct net_if_ipv4 *ipv4;
3493 	bool ret = false;
3494 	struct in_addr bcast;
3495 
3496 	net_if_lock(iface);
3497 
3498 	ipv4 = iface->config.ip.ipv4;
3499 	if (!ipv4) {
3500 		ret = false;
3501 		goto out;
3502 	}
3503 
3504 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3505 		if (!ipv4->unicast[i].ipv4.is_used ||
3506 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3507 			continue;
3508 		}
3509 
3510 		bcast.s_addr = ipv4->unicast[i].ipv4.address.in_addr.s_addr |
3511 			       ~ipv4->unicast[i].netmask.s_addr;
3512 
3513 		if (bcast.s_addr == UNALIGNED_GET(&addr->s_addr)) {
3514 			ret = true;
3515 			goto out;
3516 		}
3517 	}
3518 
3519 out:
3520 	net_if_unlock(iface);
3521 	return ret;
3522 }
3523 
net_if_ipv4_is_addr_bcast(struct net_if * iface,const struct in_addr * addr)3524 bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
3525 			       const struct in_addr *addr)
3526 {
3527 	bool ret = false;
3528 
3529 	if (iface) {
3530 		ret = ipv4_is_broadcast_address(iface, addr);
3531 		goto out;
3532 	}
3533 
3534 	STRUCT_SECTION_FOREACH(net_if, one_iface) {
3535 		ret = ipv4_is_broadcast_address(one_iface, addr);
3536 		if (ret) {
3537 			goto out;
3538 		}
3539 	}
3540 
3541 out:
3542 	return ret;
3543 }
3544 
net_if_ipv4_select_src_iface(const struct in_addr * dst)3545 struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
3546 {
3547 	struct net_if *selected = NULL;
3548 	const struct in_addr *src;
3549 
3550 	src = net_if_ipv4_select_src_addr(NULL, dst);
3551 	if (src != net_ipv4_unspecified_address()) {
3552 		net_if_ipv4_addr_lookup(src, &selected);
3553 	}
3554 
3555 	if (selected == NULL) {
3556 		selected = net_if_get_default();
3557 	}
3558 
3559 	return selected;
3560 }
3561 
get_diff_ipv4(const struct in_addr * src,const struct in_addr * dst)3562 static uint8_t get_diff_ipv4(const struct in_addr *src,
3563 			  const struct in_addr *dst)
3564 {
3565 	return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 4);
3566 }
3567 
is_proper_ipv4_address(struct net_if_addr * addr)3568 static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
3569 {
3570 	if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3571 	    addr->address.family == AF_INET) {
3572 		return true;
3573 	}
3574 
3575 	return false;
3576 }
3577 
net_if_ipv4_get_best_match(struct net_if * iface,const struct in_addr * dst,uint8_t * best_so_far,bool ll)3578 static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
3579 						  const struct in_addr *dst,
3580 						  uint8_t *best_so_far, bool ll)
3581 {
3582 	struct net_if_ipv4 *ipv4;
3583 	struct in_addr *src = NULL;
3584 	uint8_t len;
3585 
3586 	net_if_lock(iface);
3587 
3588 	ipv4 = iface->config.ip.ipv4;
3589 	if (!ipv4) {
3590 		goto out;
3591 	}
3592 
3593 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3594 		struct in_addr subnet;
3595 
3596 		if (!is_proper_ipv4_address(&ipv4->unicast[i].ipv4)) {
3597 			continue;
3598 		}
3599 
3600 		if (net_ipv4_is_ll_addr(&ipv4->unicast[i].ipv4.address.in_addr) != ll) {
3601 			continue;
3602 		}
3603 
3604 		subnet.s_addr = ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3605 				ipv4->unicast[i].netmask.s_addr;
3606 		len = get_diff_ipv4(dst, &subnet);
3607 		if (len >= *best_so_far) {
3608 			*best_so_far = len;
3609 			src = &ipv4->unicast[i].ipv4.address.in_addr;
3610 		}
3611 	}
3612 
3613 out:
3614 	net_if_unlock(iface);
3615 
3616 	return src;
3617 }
3618 
if_ipv4_get_addr(struct net_if * iface,enum net_addr_state addr_state,bool ll)3619 static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
3620 					enum net_addr_state addr_state, bool ll)
3621 {
3622 	struct in_addr *addr = NULL;
3623 	struct net_if_ipv4 *ipv4;
3624 
3625 	if (!iface) {
3626 		return NULL;
3627 	}
3628 
3629 	net_if_lock(iface);
3630 
3631 	ipv4 = iface->config.ip.ipv4;
3632 	if (!ipv4) {
3633 		goto out;
3634 	}
3635 
3636 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3637 		if (!ipv4->unicast[i].ipv4.is_used ||
3638 		    (addr_state != NET_ADDR_ANY_STATE &&
3639 		     ipv4->unicast[i].ipv4.addr_state != addr_state) ||
3640 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3641 			continue;
3642 		}
3643 
3644 		if (net_ipv4_is_ll_addr(&ipv4->unicast[i].ipv4.address.in_addr)) {
3645 			if (!ll) {
3646 				continue;
3647 			}
3648 		} else {
3649 			if (ll) {
3650 				continue;
3651 			}
3652 		}
3653 
3654 		addr = &ipv4->unicast[i].ipv4.address.in_addr;
3655 		goto out;
3656 	}
3657 
3658 out:
3659 	net_if_unlock(iface);
3660 
3661 	return addr;
3662 }
3663 
net_if_ipv4_get_ll(struct net_if * iface,enum net_addr_state addr_state)3664 struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
3665 				   enum net_addr_state addr_state)
3666 {
3667 	return if_ipv4_get_addr(iface, addr_state, true);
3668 }
3669 
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3670 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3671 					    enum net_addr_state addr_state)
3672 {
3673 	return if_ipv4_get_addr(iface, addr_state, false);
3674 }
3675 
net_if_ipv4_select_src_addr(struct net_if * dst_iface,const struct in_addr * dst)3676 const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
3677 						  const struct in_addr *dst)
3678 {
3679 	const struct in_addr *src = NULL;
3680 	uint8_t best_match = 0U;
3681 
3682 	if (dst == NULL) {
3683 		return NULL;
3684 	}
3685 
3686 	if (!net_ipv4_is_ll_addr(dst)) {
3687 
3688 		/* If caller has supplied interface, then use that */
3689 		if (dst_iface) {
3690 			src = net_if_ipv4_get_best_match(dst_iface, dst,
3691 							 &best_match, false);
3692 		} else {
3693 			STRUCT_SECTION_FOREACH(net_if, iface) {
3694 				struct in_addr *addr;
3695 
3696 				addr = net_if_ipv4_get_best_match(iface, dst,
3697 								  &best_match,
3698 								  false);
3699 				if (addr) {
3700 					src = addr;
3701 				}
3702 			}
3703 		}
3704 
3705 	} else {
3706 		if (dst_iface) {
3707 			src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3708 		} else {
3709 			struct in_addr *addr;
3710 
3711 			STRUCT_SECTION_FOREACH(net_if, iface) {
3712 				addr = net_if_ipv4_get_best_match(iface, dst,
3713 								  &best_match,
3714 								  true);
3715 				if (addr) {
3716 					src = addr;
3717 				}
3718 			}
3719 
3720 			/* Check the default interface again. It will only
3721 			 * be used if it has a valid LL address, and there was
3722 			 * no better match on any other interface.
3723 			 */
3724 			addr = net_if_ipv4_get_best_match(net_if_get_default(),
3725 							  dst, &best_match,
3726 							  true);
3727 			if (addr) {
3728 				src = addr;
3729 			}
3730 		}
3731 	}
3732 
3733 	if (!src) {
3734 		src = net_if_ipv4_get_global_addr(dst_iface,
3735 						  NET_ADDR_PREFERRED);
3736 
3737 		if (IS_ENABLED(CONFIG_NET_IPV4_AUTO) && !src) {
3738 			/* Try to use LL address if there's really no other
3739 			 * address available.
3740 			 */
3741 			src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3742 		}
3743 
3744 		if (!src) {
3745 			src = net_ipv4_unspecified_address();
3746 		}
3747 	}
3748 
3749 	return src;
3750 }
3751 
3752 /* Internal function to get the first IPv4 address of the interface */
net_if_ipv4_addr_get_first_by_index(int ifindex)3753 struct net_if_addr *net_if_ipv4_addr_get_first_by_index(int ifindex)
3754 {
3755 	struct net_if *iface = net_if_get_by_index(ifindex);
3756 	struct net_if_addr *ifaddr = NULL;
3757 	struct net_if_ipv4 *ipv4;
3758 
3759 	if (!iface) {
3760 		return NULL;
3761 	}
3762 
3763 	net_if_lock(iface);
3764 
3765 	ipv4 = iface->config.ip.ipv4;
3766 	if (!ipv4) {
3767 		goto out;
3768 	}
3769 
3770 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3771 		if (!ipv4->unicast[i].ipv4.is_used ||
3772 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3773 			continue;
3774 		}
3775 
3776 		ifaddr = &ipv4->unicast[i].ipv4;
3777 		break;
3778 	}
3779 
3780 out:
3781 	net_if_unlock(iface);
3782 
3783 	return ifaddr;
3784 }
3785 
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3786 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3787 					    struct net_if **ret)
3788 {
3789 	struct net_if_addr *ifaddr = NULL;
3790 
3791 	STRUCT_SECTION_FOREACH(net_if, iface) {
3792 		struct net_if_ipv4 *ipv4;
3793 
3794 		net_if_lock(iface);
3795 
3796 		ipv4 = iface->config.ip.ipv4;
3797 		if (!ipv4) {
3798 			net_if_unlock(iface);
3799 			continue;
3800 		}
3801 
3802 		ARRAY_FOR_EACH(ipv4->unicast, i) {
3803 			if (!ipv4->unicast[i].ipv4.is_used ||
3804 			    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3805 				continue;
3806 			}
3807 
3808 			if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
3809 			    ipv4->unicast[i].ipv4.address.in_addr.s_addr) {
3810 
3811 				if (ret) {
3812 					*ret = iface;
3813 				}
3814 
3815 				ifaddr = &ipv4->unicast[i].ipv4;
3816 				net_if_unlock(iface);
3817 				goto out;
3818 			}
3819 		}
3820 
3821 		net_if_unlock(iface);
3822 	}
3823 
3824 out:
3825 	return ifaddr;
3826 }
3827 
z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3828 int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
3829 {
3830 	struct net_if_addr *if_addr;
3831 	struct net_if *iface = NULL;
3832 
3833 	if_addr = net_if_ipv4_addr_lookup(addr, &iface);
3834 	if (!if_addr) {
3835 		return 0;
3836 	}
3837 
3838 	return net_if_get_by_iface(iface);
3839 }
3840 
3841 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3842 static inline int z_vrfy_net_if_ipv4_addr_lookup_by_index(
3843 					  const struct in_addr *addr)
3844 {
3845 	struct in_addr addr_v4;
3846 
3847 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3848 
3849 	return z_impl_net_if_ipv4_addr_lookup_by_index(&addr_v4);
3850 }
3851 #include <zephyr/syscalls/net_if_ipv4_addr_lookup_by_index_mrsh.c>
3852 #endif
3853 
net_if_ipv4_get_netmask_by_addr(struct net_if * iface,const struct in_addr * addr)3854 struct in_addr net_if_ipv4_get_netmask_by_addr(struct net_if *iface,
3855 					       const struct in_addr *addr)
3856 {
3857 	struct in_addr netmask = { 0 };
3858 	struct net_if_ipv4 *ipv4;
3859 	uint32_t subnet;
3860 
3861 	net_if_lock(iface);
3862 
3863 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3864 		goto out;
3865 	}
3866 
3867 	ipv4 = iface->config.ip.ipv4;
3868 	if (ipv4 == NULL) {
3869 		goto out;
3870 	}
3871 
3872 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3873 		if (!ipv4->unicast[i].ipv4.is_used ||
3874 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3875 			continue;
3876 		}
3877 
3878 		subnet = UNALIGNED_GET(&addr->s_addr) &
3879 			 ipv4->unicast[i].netmask.s_addr;
3880 
3881 		if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3882 		     ipv4->unicast[i].netmask.s_addr) == subnet) {
3883 			netmask = ipv4->unicast[i].netmask;
3884 			goto out;
3885 		}
3886 	}
3887 
3888 out:
3889 	net_if_unlock(iface);
3890 
3891 	return netmask;
3892 }
3893 
net_if_ipv4_set_netmask_by_addr(struct net_if * iface,const struct in_addr * addr,const struct in_addr * netmask)3894 bool net_if_ipv4_set_netmask_by_addr(struct net_if *iface,
3895 				     const struct in_addr *addr,
3896 				     const struct in_addr *netmask)
3897 {
3898 	struct net_if_ipv4 *ipv4;
3899 	uint32_t subnet;
3900 	bool ret = false;
3901 
3902 	net_if_lock(iface);
3903 
3904 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3905 		goto out;
3906 	}
3907 
3908 	ipv4 = iface->config.ip.ipv4;
3909 	if (ipv4 == NULL) {
3910 		goto out;
3911 	}
3912 
3913 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3914 		if (!ipv4->unicast[i].ipv4.is_used ||
3915 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3916 			continue;
3917 		}
3918 
3919 		subnet = UNALIGNED_GET(&addr->s_addr) &
3920 			 ipv4->unicast[i].netmask.s_addr;
3921 
3922 		if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3923 		     ipv4->unicast[i].netmask.s_addr) == subnet) {
3924 			ipv4->unicast[i].netmask = *netmask;
3925 			ret = true;
3926 			goto out;
3927 		}
3928 	}
3929 
3930 out:
3931 	net_if_unlock(iface);
3932 
3933 	return ret;
3934 }
3935 
3936 /* Using this function is problematic as if we have multiple
3937  * addresses configured, which one to return. Use heuristic
3938  * in this case and return the first one found. Please use
3939  * net_if_ipv4_get_netmask_by_addr() instead.
3940  */
net_if_ipv4_get_netmask(struct net_if * iface)3941 struct in_addr net_if_ipv4_get_netmask(struct net_if *iface)
3942 {
3943 	struct in_addr netmask = { 0 };
3944 	struct net_if_ipv4 *ipv4;
3945 
3946 	net_if_lock(iface);
3947 
3948 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3949 		goto out;
3950 	}
3951 
3952 	ipv4 = iface->config.ip.ipv4;
3953 	if (ipv4 == NULL) {
3954 		goto out;
3955 	}
3956 
3957 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3958 		if (!ipv4->unicast[i].ipv4.is_used ||
3959 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3960 			continue;
3961 		}
3962 
3963 		netmask = iface->config.ip.ipv4->unicast[i].netmask;
3964 		break;
3965 	}
3966 
3967 out:
3968 	net_if_unlock(iface);
3969 
3970 	return netmask;
3971 }
3972 
3973 /* Using this function is problematic as if we have multiple
3974  * addresses configured, which one to set. Use heuristic
3975  * in this case and set the first one found. Please use
3976  * net_if_ipv4_set_netmask_by_addr() instead.
3977  */
net_if_ipv4_set_netmask_deprecated(struct net_if * iface,const struct in_addr * netmask)3978 static void net_if_ipv4_set_netmask_deprecated(struct net_if *iface,
3979 					       const struct in_addr *netmask)
3980 {
3981 	struct net_if_ipv4 *ipv4;
3982 
3983 	net_if_lock(iface);
3984 
3985 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3986 		goto out;
3987 	}
3988 
3989 	ipv4 = iface->config.ip.ipv4;
3990 	if (ipv4 == NULL) {
3991 		goto out;
3992 	}
3993 
3994 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3995 		if (!ipv4->unicast[i].ipv4.is_used ||
3996 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3997 			continue;
3998 		}
3999 
4000 		net_ipaddr_copy(&ipv4->unicast[i].netmask, netmask);
4001 		break;
4002 	}
4003 
4004 out:
4005 	net_if_unlock(iface);
4006 }
4007 
net_if_ipv4_set_netmask(struct net_if * iface,const struct in_addr * netmask)4008 void net_if_ipv4_set_netmask(struct net_if *iface,
4009 			     const struct in_addr *netmask)
4010 {
4011 	net_if_ipv4_set_netmask_deprecated(iface, netmask);
4012 }
4013 
z_impl_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)4014 bool z_impl_net_if_ipv4_set_netmask_by_index(int index,
4015 					     const struct in_addr *netmask)
4016 {
4017 	struct net_if *iface;
4018 
4019 	iface = net_if_get_by_index(index);
4020 	if (!iface) {
4021 		return false;
4022 	}
4023 
4024 	net_if_ipv4_set_netmask_deprecated(iface, netmask);
4025 
4026 	return true;
4027 }
4028 
z_impl_net_if_ipv4_set_netmask_by_addr_by_index(int index,const struct in_addr * addr,const struct in_addr * netmask)4029 bool z_impl_net_if_ipv4_set_netmask_by_addr_by_index(int index,
4030 						     const struct in_addr *addr,
4031 						     const struct in_addr *netmask)
4032 {
4033 	struct net_if *iface;
4034 
4035 	iface = net_if_get_by_index(index);
4036 	if (!iface) {
4037 		return false;
4038 	}
4039 
4040 	net_if_ipv4_set_netmask_by_addr(iface, addr, netmask);
4041 
4042 	return true;
4043 }
4044 
4045 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)4046 bool z_vrfy_net_if_ipv4_set_netmask_by_index(int index,
4047 					     const struct in_addr *netmask)
4048 {
4049 	struct in_addr netmask_addr;
4050 	struct net_if *iface;
4051 
4052 	iface = z_vrfy_net_if_get_by_index(index);
4053 	if (!iface) {
4054 		return false;
4055 	}
4056 
4057 	K_OOPS(k_usermode_from_copy(&netmask_addr, (void *)netmask,
4058 				sizeof(netmask_addr)));
4059 
4060 	return z_impl_net_if_ipv4_set_netmask_by_index(index, &netmask_addr);
4061 }
4062 
4063 #include <zephyr/syscalls/net_if_ipv4_set_netmask_by_index_mrsh.c>
4064 
z_vrfy_net_if_ipv4_set_netmask_by_addr_by_index(int index,const struct in_addr * addr,const struct in_addr * netmask)4065 bool z_vrfy_net_if_ipv4_set_netmask_by_addr_by_index(int index,
4066 						     const struct in_addr *addr,
4067 						     const struct in_addr *netmask)
4068 {
4069 	struct in_addr ipv4_addr, netmask_addr;
4070 	struct net_if *iface;
4071 
4072 	iface = z_vrfy_net_if_get_by_index(index);
4073 	if (!iface) {
4074 		return false;
4075 	}
4076 
4077 	K_OOPS(k_usermode_from_copy(&ipv4_addr, (void *)addr,
4078 				    sizeof(ipv4_addr)));
4079 	K_OOPS(k_usermode_from_copy(&netmask_addr, (void *)netmask,
4080 				    sizeof(netmask_addr)));
4081 
4082 	return z_impl_net_if_ipv4_set_netmask_by_addr_by_index(index,
4083 							       &ipv4_addr,
4084 							       &netmask_addr);
4085 }
4086 
4087 #include <zephyr/syscalls/net_if_ipv4_set_netmask_by_addr_by_index_mrsh.c>
4088 #endif /* CONFIG_USERSPACE */
4089 
net_if_ipv4_get_gw(struct net_if * iface)4090 struct in_addr net_if_ipv4_get_gw(struct net_if *iface)
4091 {
4092 	struct in_addr gw = { 0 };
4093 
4094 	net_if_lock(iface);
4095 
4096 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4097 		goto out;
4098 	}
4099 
4100 	if (!iface->config.ip.ipv4) {
4101 		goto out;
4102 	}
4103 
4104 	gw = iface->config.ip.ipv4->gw;
4105 out:
4106 	net_if_unlock(iface);
4107 
4108 	return gw;
4109 }
4110 
net_if_ipv4_set_gw(struct net_if * iface,const struct in_addr * gw)4111 void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw)
4112 {
4113 	net_if_lock(iface);
4114 
4115 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4116 		goto out;
4117 	}
4118 
4119 	if (!iface->config.ip.ipv4) {
4120 		goto out;
4121 	}
4122 
4123 	net_ipaddr_copy(&iface->config.ip.ipv4->gw, gw);
4124 out:
4125 	net_if_unlock(iface);
4126 }
4127 
z_impl_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)4128 bool z_impl_net_if_ipv4_set_gw_by_index(int index,
4129 					const struct in_addr *gw)
4130 {
4131 	struct net_if *iface;
4132 
4133 	iface = net_if_get_by_index(index);
4134 	if (!iface) {
4135 		return false;
4136 	}
4137 
4138 	net_if_ipv4_set_gw(iface, gw);
4139 
4140 	return true;
4141 }
4142 
4143 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)4144 bool z_vrfy_net_if_ipv4_set_gw_by_index(int index,
4145 					const struct in_addr *gw)
4146 {
4147 	struct in_addr gw_addr;
4148 	struct net_if *iface;
4149 
4150 	iface = z_vrfy_net_if_get_by_index(index);
4151 	if (!iface) {
4152 		return false;
4153 	}
4154 
4155 	K_OOPS(k_usermode_from_copy(&gw_addr, (void *)gw, sizeof(gw_addr)));
4156 
4157 	return z_impl_net_if_ipv4_set_gw_by_index(index, &gw_addr);
4158 }
4159 
4160 #include <zephyr/syscalls/net_if_ipv4_set_gw_by_index_mrsh.c>
4161 #endif /* CONFIG_USERSPACE */
4162 
ipv4_addr_find(struct net_if * iface,struct in_addr * addr)4163 static struct net_if_addr *ipv4_addr_find(struct net_if *iface,
4164 					  struct in_addr *addr)
4165 {
4166 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4167 
4168 	ARRAY_FOR_EACH(ipv4->unicast, i) {
4169 		if (!ipv4->unicast[i].ipv4.is_used) {
4170 			continue;
4171 		}
4172 
4173 		if (net_ipv4_addr_cmp(addr,
4174 				      &ipv4->unicast[i].ipv4.address.in_addr)) {
4175 			return &ipv4->unicast[i].ipv4;
4176 		}
4177 	}
4178 
4179 	return NULL;
4180 }
4181 
4182 #if defined(CONFIG_NET_IPV4_ACD)
net_if_ipv4_acd_succeeded(struct net_if * iface,struct net_if_addr * ifaddr)4183 void net_if_ipv4_acd_succeeded(struct net_if *iface, struct net_if_addr *ifaddr)
4184 {
4185 	net_if_lock(iface);
4186 
4187 	NET_DBG("ACD succeeded for %s at interface %d",
4188 		net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4189 		ifaddr->ifindex);
4190 
4191 	ifaddr->addr_state = NET_ADDR_PREFERRED;
4192 
4193 	net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ACD_SUCCEED, iface,
4194 					&ifaddr->address.in_addr,
4195 					sizeof(struct in_addr));
4196 
4197 	net_if_unlock(iface);
4198 }
4199 
net_if_ipv4_acd_failed(struct net_if * iface,struct net_if_addr * ifaddr)4200 void net_if_ipv4_acd_failed(struct net_if *iface, struct net_if_addr *ifaddr)
4201 {
4202 	net_if_lock(iface);
4203 
4204 	NET_DBG("ACD failed for %s at interface %d",
4205 		net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4206 		ifaddr->ifindex);
4207 
4208 	net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ACD_FAILED, iface,
4209 					&ifaddr->address.in_addr,
4210 					sizeof(struct in_addr));
4211 
4212 	net_if_ipv4_addr_rm(iface, &ifaddr->address.in_addr);
4213 
4214 	net_if_unlock(iface);
4215 }
4216 
net_if_ipv4_start_acd(struct net_if * iface,struct net_if_addr * ifaddr)4217 void net_if_ipv4_start_acd(struct net_if *iface, struct net_if_addr *ifaddr)
4218 {
4219 	ifaddr->addr_state = NET_ADDR_TENTATIVE;
4220 
4221 	if (net_if_is_up(iface)) {
4222 		NET_DBG("Interface %p ll addr %s tentative IPv4 addr %s",
4223 			iface,
4224 			net_sprint_ll_addr(net_if_get_link_addr(iface)->addr,
4225 					   net_if_get_link_addr(iface)->len),
4226 			net_sprint_ipv4_addr(&ifaddr->address.in_addr));
4227 
4228 		if (net_ipv4_acd_start(iface, ifaddr) != 0) {
4229 			NET_DBG("Failed to start ACD for %s on iface %p.",
4230 				net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4231 				iface);
4232 
4233 			/* Just act as if no conflict was detected. */
4234 			net_if_ipv4_acd_succeeded(iface, ifaddr);
4235 		}
4236 	} else {
4237 		NET_DBG("Interface %p is down, starting ACD for %s later.",
4238 			iface, net_sprint_ipv4_addr(&ifaddr->address.in_addr));
4239 	}
4240 }
4241 
net_if_start_acd(struct net_if * iface)4242 void net_if_start_acd(struct net_if *iface)
4243 {
4244 	struct net_if_ipv4 *ipv4;
4245 	int ret;
4246 
4247 	net_if_lock(iface);
4248 
4249 	NET_DBG("Starting ACD for iface %p", iface);
4250 
4251 	ret = net_if_config_ipv4_get(iface, &ipv4);
4252 	if (ret < 0) {
4253 		if (ret != -ENOTSUP) {
4254 			NET_WARN("Cannot do ACD IPv4 config is not valid.");
4255 		}
4256 
4257 		goto out;
4258 	}
4259 
4260 	if (!ipv4) {
4261 		goto out;
4262 	}
4263 
4264 	ipv4->conflict_cnt = 0;
4265 
4266 	/* Start ACD for all the addresses that were added earlier when
4267 	 * the interface was down.
4268 	 */
4269 	ARRAY_FOR_EACH(ipv4->unicast, i) {
4270 		if (!ipv4->unicast[i].ipv4.is_used ||
4271 		    ipv4->unicast[i].ipv4.address.family != AF_INET ||
4272 		    net_ipv4_is_addr_loopback(
4273 			    &ipv4->unicast[i].ipv4.address.in_addr)) {
4274 			continue;
4275 		}
4276 
4277 		net_if_ipv4_start_acd(iface, &ipv4->unicast[i].ipv4);
4278 	}
4279 
4280 out:
4281 	net_if_unlock(iface);
4282 }
4283 #else
net_if_ipv4_start_acd(struct net_if * iface,struct net_if_addr * ifaddr)4284 void net_if_ipv4_start_acd(struct net_if *iface, struct net_if_addr *ifaddr)
4285 {
4286 	ARG_UNUSED(iface);
4287 
4288 	ifaddr->addr_state = NET_ADDR_PREFERRED;
4289 }
4290 
4291 #define net_if_start_acd(...)
4292 #endif /* CONFIG_NET_IPV4_ACD */
4293 
net_if_ipv4_addr_add(struct net_if * iface,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4294 struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
4295 					 struct in_addr *addr,
4296 					 enum net_addr_type addr_type,
4297 					 uint32_t vlifetime)
4298 {
4299 	uint32_t default_netmask = UINT32_MAX << (32 - CONFIG_NET_IPV4_DEFAULT_NETMASK);
4300 	struct net_if_addr *ifaddr = NULL;
4301 	struct net_if_addr_ipv4 *cur;
4302 	struct net_if_ipv4 *ipv4;
4303 	int idx;
4304 
4305 	net_if_lock(iface);
4306 
4307 	if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
4308 		goto out;
4309 	}
4310 
4311 	ifaddr = ipv4_addr_find(iface, addr);
4312 	if (ifaddr) {
4313 		/* TODO: should set addr_type/vlifetime */
4314 		goto out;
4315 	}
4316 
4317 	ARRAY_FOR_EACH(ipv4->unicast, i) {
4318 		cur = &ipv4->unicast[i];
4319 
4320 		if (addr_type == NET_ADDR_DHCP
4321 		    && cur->ipv4.addr_type == NET_ADDR_OVERRIDABLE) {
4322 			ifaddr = &cur->ipv4;
4323 			idx = i;
4324 			break;
4325 		}
4326 
4327 		if (!ipv4->unicast[i].ipv4.is_used) {
4328 			ifaddr = &cur->ipv4;
4329 			idx = i;
4330 			break;
4331 		}
4332 	}
4333 
4334 	if (ifaddr) {
4335 		ifaddr->is_used = true;
4336 		ifaddr->address.family = AF_INET;
4337 		ifaddr->address.in_addr.s4_addr32[0] =
4338 						addr->s4_addr32[0];
4339 		ifaddr->addr_type = addr_type;
4340 		ifaddr->atomic_ref = ATOMIC_INIT(1);
4341 
4342 		/* Caller has to take care of timers and their expiry */
4343 		if (vlifetime) {
4344 			ifaddr->is_infinite = false;
4345 		} else {
4346 			ifaddr->is_infinite = true;
4347 		}
4348 
4349 		/**
4350 		 *  TODO: Handle properly PREFERRED/DEPRECATED state when
4351 		 *  address in use, expired and renewal state.
4352 		 */
4353 
4354 		NET_DBG("[%d] interface %d (%p) address %s type %s added",
4355 			idx, net_if_get_by_iface(iface), iface,
4356 			net_sprint_ipv4_addr(addr),
4357 			net_addr_type2str(addr_type));
4358 
4359 		if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
4360 		    !net_ipv4_is_addr_loopback(addr)) {
4361 			net_if_ipv4_start_acd(iface, ifaddr);
4362 		} else {
4363 			ifaddr->addr_state = NET_ADDR_PREFERRED;
4364 		}
4365 
4366 		cur->netmask.s_addr = htonl(default_netmask);
4367 
4368 		net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface,
4369 						&ifaddr->address.in_addr,
4370 						sizeof(struct in_addr));
4371 		goto out;
4372 	}
4373 
4374 out:
4375 	net_if_unlock(iface);
4376 
4377 	return ifaddr;
4378 }
4379 
net_if_ipv4_addr_rm(struct net_if * iface,const struct in_addr * addr)4380 bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr)
4381 {
4382 	struct net_if_ipv4 *ipv4;
4383 	bool result = true;
4384 	int ret;
4385 
4386 	if (iface == NULL || addr == NULL) {
4387 		return false;
4388 	}
4389 
4390 	net_if_lock(iface);
4391 
4392 	ipv4 = iface->config.ip.ipv4;
4393 	if (!ipv4) {
4394 		result = false;
4395 		goto out;
4396 	}
4397 
4398 	ret = net_if_addr_unref(iface, AF_INET, addr);
4399 	if (ret > 0) {
4400 		NET_DBG("Address %s still in use (ref %d)",
4401 			net_sprint_ipv4_addr(addr), ret);
4402 		result = false;
4403 		goto out;
4404 	} else if (ret < 0) {
4405 		NET_DBG("Address %s not found (%d)",
4406 			net_sprint_ipv4_addr(addr), ret);
4407 	}
4408 
4409 out:
4410 	net_if_unlock(iface);
4411 
4412 	return result;
4413 }
4414 
z_impl_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4415 bool z_impl_net_if_ipv4_addr_add_by_index(int index,
4416 					  struct in_addr *addr,
4417 					  enum net_addr_type addr_type,
4418 					  uint32_t vlifetime)
4419 {
4420 	struct net_if *iface;
4421 	struct net_if_addr *if_addr;
4422 
4423 	iface = net_if_get_by_index(index);
4424 	if (!iface) {
4425 		return false;
4426 	}
4427 
4428 	if_addr = net_if_ipv4_addr_add(iface, addr, addr_type, vlifetime);
4429 	return if_addr ? true : false;
4430 }
4431 
4432 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4433 bool z_vrfy_net_if_ipv4_addr_add_by_index(int index,
4434 					  struct in_addr *addr,
4435 					  enum net_addr_type addr_type,
4436 					  uint32_t vlifetime)
4437 {
4438 	struct in_addr addr_v4;
4439 	struct net_if *iface;
4440 
4441 	iface = z_vrfy_net_if_get_by_index(index);
4442 	if (!iface) {
4443 		return false;
4444 	}
4445 
4446 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
4447 
4448 	return z_impl_net_if_ipv4_addr_add_by_index(index,
4449 						    &addr_v4,
4450 						    addr_type,
4451 						    vlifetime);
4452 }
4453 
4454 #include <zephyr/syscalls/net_if_ipv4_addr_add_by_index_mrsh.c>
4455 #endif /* CONFIG_USERSPACE */
4456 
z_impl_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)4457 bool z_impl_net_if_ipv4_addr_rm_by_index(int index,
4458 					 const struct in_addr *addr)
4459 {
4460 	struct net_if *iface;
4461 
4462 	iface = net_if_get_by_index(index);
4463 	if (!iface) {
4464 		return false;
4465 	}
4466 
4467 	return net_if_ipv4_addr_rm(iface, addr);
4468 }
4469 
4470 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)4471 bool z_vrfy_net_if_ipv4_addr_rm_by_index(int index,
4472 					 const struct in_addr *addr)
4473 {
4474 	struct in_addr addr_v4;
4475 	struct net_if *iface;
4476 
4477 	iface = z_vrfy_net_if_get_by_index(index);
4478 	if (!iface) {
4479 		return false;
4480 	}
4481 
4482 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
4483 
4484 	return (uint32_t)z_impl_net_if_ipv4_addr_rm_by_index(index, &addr_v4);
4485 }
4486 
4487 #include <zephyr/syscalls/net_if_ipv4_addr_rm_by_index_mrsh.c>
4488 #endif /* CONFIG_USERSPACE */
4489 
net_if_ipv4_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)4490 void net_if_ipv4_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
4491 			      void *user_data)
4492 {
4493 	struct net_if_ipv4 *ipv4;
4494 
4495 	if (iface == NULL) {
4496 		return;
4497 	}
4498 
4499 	net_if_lock(iface);
4500 
4501 	ipv4 = iface->config.ip.ipv4;
4502 	if (ipv4 == NULL) {
4503 		goto out;
4504 	}
4505 
4506 	ARRAY_FOR_EACH(ipv4->unicast, i) {
4507 		struct net_if_addr *if_addr = &ipv4->unicast[i].ipv4;
4508 
4509 		if (!if_addr->is_used) {
4510 			continue;
4511 		}
4512 
4513 		cb(iface, if_addr, user_data);
4514 	}
4515 
4516 out:
4517 	net_if_unlock(iface);
4518 }
4519 
ipv4_maddr_find(struct net_if * iface,bool is_used,const struct in_addr * addr)4520 static struct net_if_mcast_addr *ipv4_maddr_find(struct net_if *iface,
4521 						 bool is_used,
4522 						 const struct in_addr *addr)
4523 {
4524 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4525 
4526 	if (!ipv4) {
4527 		return NULL;
4528 	}
4529 
4530 	ARRAY_FOR_EACH(ipv4->mcast, i) {
4531 		if ((is_used && !ipv4->mcast[i].is_used) ||
4532 		    (!is_used && ipv4->mcast[i].is_used)) {
4533 			continue;
4534 		}
4535 
4536 		if (addr) {
4537 			if (!net_ipv4_addr_cmp(&ipv4->mcast[i].address.in_addr,
4538 					       addr)) {
4539 				continue;
4540 			}
4541 		}
4542 
4543 		return &ipv4->mcast[i];
4544 	}
4545 
4546 	return NULL;
4547 }
net_if_ipv4_maddr_add(struct net_if * iface,const struct in_addr * addr)4548 struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
4549 						const struct in_addr *addr)
4550 {
4551 	struct net_if_mcast_addr *maddr = NULL;
4552 
4553 	net_if_lock(iface);
4554 
4555 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4556 		goto out;
4557 	}
4558 
4559 	if (!net_ipv4_is_addr_mcast(addr)) {
4560 		NET_DBG("Address %s is not a multicast address.",
4561 			net_sprint_ipv4_addr(addr));
4562 		goto out;
4563 	}
4564 
4565 	maddr = ipv4_maddr_find(iface, false, NULL);
4566 	if (maddr) {
4567 		maddr->is_used = true;
4568 		maddr->address.family = AF_INET;
4569 		maddr->address.in_addr.s4_addr32[0] = addr->s4_addr32[0];
4570 
4571 		NET_DBG("interface %d (%p) address %s added",
4572 			net_if_get_by_iface(iface), iface,
4573 			net_sprint_ipv4_addr(addr));
4574 
4575 		net_mgmt_event_notify_with_info(
4576 			NET_EVENT_IPV4_MADDR_ADD, iface,
4577 			&maddr->address.in_addr,
4578 			sizeof(struct in_addr));
4579 	}
4580 
4581 out:
4582 	net_if_unlock(iface);
4583 
4584 	return maddr;
4585 }
4586 
net_if_ipv4_maddr_rm(struct net_if * iface,const struct in_addr * addr)4587 bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr)
4588 {
4589 	struct net_if_mcast_addr *maddr;
4590 	bool ret = false;
4591 
4592 	net_if_lock(iface);
4593 
4594 	maddr = ipv4_maddr_find(iface, true, addr);
4595 	if (maddr) {
4596 		maddr->is_used = false;
4597 
4598 		NET_DBG("interface %d (%p) address %s removed",
4599 			net_if_get_by_iface(iface), iface,
4600 			net_sprint_ipv4_addr(addr));
4601 
4602 		net_mgmt_event_notify_with_info(
4603 			NET_EVENT_IPV4_MADDR_DEL, iface,
4604 			&maddr->address.in_addr,
4605 			sizeof(struct in_addr));
4606 
4607 		ret = true;
4608 	}
4609 
4610 	net_if_unlock(iface);
4611 
4612 	return ret;
4613 }
4614 
net_if_ipv4_maddr_foreach(struct net_if * iface,net_if_ip_maddr_cb_t cb,void * user_data)4615 void net_if_ipv4_maddr_foreach(struct net_if *iface, net_if_ip_maddr_cb_t cb,
4616 			       void *user_data)
4617 {
4618 	struct net_if_ipv4 *ipv4;
4619 
4620 	if (iface == NULL || cb == NULL) {
4621 		return;
4622 	}
4623 
4624 	net_if_lock(iface);
4625 
4626 	ipv4 = iface->config.ip.ipv4;
4627 	if (!ipv4) {
4628 		goto out;
4629 	}
4630 
4631 	for (int i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
4632 		if (!ipv4->mcast[i].is_used) {
4633 			continue;
4634 		}
4635 
4636 		cb(iface, &ipv4->mcast[i], user_data);
4637 	}
4638 
4639 out:
4640 	net_if_unlock(iface);
4641 }
4642 
net_if_ipv4_maddr_lookup(const struct in_addr * maddr,struct net_if ** ret)4643 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *maddr,
4644 						   struct net_if **ret)
4645 {
4646 	struct net_if_mcast_addr *addr = NULL;
4647 
4648 	STRUCT_SECTION_FOREACH(net_if, iface) {
4649 		if (ret && *ret && iface != *ret) {
4650 			continue;
4651 		}
4652 
4653 		net_if_lock(iface);
4654 
4655 		addr = ipv4_maddr_find(iface, true, maddr);
4656 		if (addr) {
4657 			if (ret) {
4658 				*ret = iface;
4659 			}
4660 
4661 			net_if_unlock(iface);
4662 			goto out;
4663 		}
4664 
4665 		net_if_unlock(iface);
4666 	}
4667 
4668 out:
4669 	return addr;
4670 }
4671 
net_if_ipv4_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)4672 void net_if_ipv4_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
4673 {
4674 	if (iface == NULL || addr == NULL) {
4675 		return;
4676 	}
4677 
4678 	net_if_lock(iface);
4679 	addr->is_joined = false;
4680 	net_if_unlock(iface);
4681 }
4682 
net_if_ipv4_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)4683 void net_if_ipv4_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
4684 {
4685 	if (iface == NULL || addr == NULL) {
4686 		return;
4687 	}
4688 
4689 	net_if_lock(iface);
4690 	addr->is_joined = true;
4691 	net_if_unlock(iface);
4692 }
4693 
4694 #if defined(CONFIG_NET_NATIVE_IPV4)
net_if_ipv4_get_ttl(struct net_if * iface)4695 uint8_t net_if_ipv4_get_ttl(struct net_if *iface)
4696 {
4697 	int ret = 0;
4698 
4699 	net_if_lock(iface);
4700 
4701 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4702 		goto out;
4703 	}
4704 
4705 	if (!iface->config.ip.ipv4) {
4706 		goto out;
4707 	}
4708 
4709 	ret = iface->config.ip.ipv4->ttl;
4710 out:
4711 	net_if_unlock(iface);
4712 
4713 	return ret;
4714 }
4715 
net_if_ipv4_set_ttl(struct net_if * iface,uint8_t ttl)4716 void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl)
4717 {
4718 	net_if_lock(iface);
4719 
4720 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4721 		goto out;
4722 	}
4723 
4724 	if (!iface->config.ip.ipv4) {
4725 		goto out;
4726 	}
4727 
4728 	iface->config.ip.ipv4->ttl = ttl;
4729 out:
4730 	net_if_unlock(iface);
4731 }
4732 
net_if_ipv4_get_mcast_ttl(struct net_if * iface)4733 uint8_t net_if_ipv4_get_mcast_ttl(struct net_if *iface)
4734 {
4735 	int ret = 0;
4736 
4737 	net_if_lock(iface);
4738 
4739 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4740 		goto out;
4741 	}
4742 
4743 	if (!iface->config.ip.ipv4) {
4744 		goto out;
4745 	}
4746 
4747 	ret = iface->config.ip.ipv4->mcast_ttl;
4748 out:
4749 	net_if_unlock(iface);
4750 
4751 	return ret;
4752 }
4753 
net_if_ipv4_set_mcast_ttl(struct net_if * iface,uint8_t ttl)4754 void net_if_ipv4_set_mcast_ttl(struct net_if *iface, uint8_t ttl)
4755 {
4756 	net_if_lock(iface);
4757 
4758 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4759 		goto out;
4760 	}
4761 
4762 	if (!iface->config.ip.ipv4) {
4763 		goto out;
4764 	}
4765 
4766 	iface->config.ip.ipv4->mcast_ttl = ttl;
4767 out:
4768 	net_if_unlock(iface);
4769 }
4770 
net_if_ipv4_router_lookup(struct net_if * iface,struct in_addr * addr)4771 struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
4772 						struct in_addr *addr)
4773 {
4774 	return iface_router_lookup(iface, AF_INET, addr);
4775 }
4776 
net_if_ipv4_router_find_default(struct net_if * iface,struct in_addr * addr)4777 struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
4778 						      struct in_addr *addr)
4779 {
4780 	return iface_router_find_default(iface, AF_INET, addr);
4781 }
4782 
net_if_ipv4_router_add(struct net_if * iface,struct in_addr * addr,bool is_default,uint16_t lifetime)4783 struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
4784 					     struct in_addr *addr,
4785 					     bool is_default,
4786 					     uint16_t lifetime)
4787 {
4788 	return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
4789 }
4790 
net_if_ipv4_router_rm(struct net_if_router * router)4791 bool net_if_ipv4_router_rm(struct net_if_router *router)
4792 {
4793 	return iface_router_rm(router);
4794 }
4795 
4796 
iface_ipv4_init(int if_count)4797 static void iface_ipv4_init(int if_count)
4798 {
4799 	int i;
4800 
4801 	if (if_count > ARRAY_SIZE(ipv4_addresses)) {
4802 		NET_WARN("You have %zu IPv4 net_if addresses but %d "
4803 			 "network interfaces", ARRAY_SIZE(ipv4_addresses),
4804 			 if_count);
4805 		NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV4_COUNT "
4806 			 "value.");
4807 	}
4808 
4809 	for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
4810 		ipv4_addresses[i].ipv4.ttl = CONFIG_NET_INITIAL_TTL;
4811 		ipv4_addresses[i].ipv4.mcast_ttl = CONFIG_NET_INITIAL_MCAST_TTL;
4812 	}
4813 }
4814 
leave_ipv4_mcast_all(struct net_if * iface)4815 static void leave_ipv4_mcast_all(struct net_if *iface)
4816 {
4817 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4818 
4819 	if (!ipv4) {
4820 		return;
4821 	}
4822 
4823 	ARRAY_FOR_EACH(ipv4->mcast, i) {
4824 		if (!ipv4->mcast[i].is_used ||
4825 		    !ipv4->mcast[i].is_joined) {
4826 			continue;
4827 		}
4828 
4829 		net_ipv4_igmp_leave(iface, &ipv4->mcast[i].address.in_addr);
4830 	}
4831 }
4832 
iface_ipv4_start(struct net_if * iface)4833 static void iface_ipv4_start(struct net_if *iface)
4834 {
4835 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
4836 		return;
4837 	}
4838 
4839 	if (IS_ENABLED(CONFIG_NET_IPV4_ACD)) {
4840 		net_if_start_acd(iface);
4841 	}
4842 }
4843 
4844 /* To be called when interface comes up so that all the non-joined multicast
4845  * groups are joined.
4846  */
rejoin_ipv4_mcast_groups(struct net_if * iface)4847 static void rejoin_ipv4_mcast_groups(struct net_if *iface)
4848 {
4849 	struct net_if_ipv4 *ipv4;
4850 
4851 	net_if_lock(iface);
4852 
4853 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
4854 		goto out;
4855 	}
4856 
4857 	if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
4858 		goto out;
4859 	}
4860 
4861 	/* Rejoin any mcast address present on the interface, but marked as not joined. */
4862 	ARRAY_FOR_EACH(ipv4->mcast, i) {
4863 		int ret;
4864 
4865 		if (!ipv4->mcast[i].is_used ||
4866 		    net_if_ipv4_maddr_is_joined(&ipv4->mcast[i])) {
4867 			continue;
4868 		}
4869 
4870 		ret = net_ipv4_igmp_join(iface, &ipv4->mcast[i].address.in_addr, NULL);
4871 		if (ret < 0) {
4872 			NET_ERR("Cannot join mcast address %s for %d (%d)",
4873 				net_sprint_ipv4_addr(&ipv4->mcast[i].address.in_addr),
4874 				net_if_get_by_iface(iface), ret);
4875 		} else {
4876 			NET_DBG("Rejoined mcast address %s for %d",
4877 				net_sprint_ipv4_addr(&ipv4->mcast[i].address.in_addr),
4878 				net_if_get_by_iface(iface));
4879 		}
4880 	}
4881 
4882 out:
4883 	net_if_unlock(iface);
4884 }
4885 
4886 /* To be called when interface comes operational down so that multicast
4887  * groups are rejoined when back up.
4888  */
clear_joined_ipv4_mcast_groups(struct net_if * iface)4889 static void clear_joined_ipv4_mcast_groups(struct net_if *iface)
4890 {
4891 	struct net_if_ipv4 *ipv4;
4892 
4893 	net_if_lock(iface);
4894 
4895 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
4896 		goto out;
4897 	}
4898 
4899 	if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
4900 		goto out;
4901 	}
4902 
4903 	ARRAY_FOR_EACH(ipv4->mcast, i) {
4904 		if (!ipv4->mcast[i].is_used) {
4905 			continue;
4906 		}
4907 
4908 		net_if_ipv4_maddr_leave(iface, &ipv4->mcast[i]);
4909 	}
4910 
4911 out:
4912 	net_if_unlock(iface);
4913 }
4914 
4915 #endif /* CONFIG_NET_NATIVE_IPV4 */
4916 #else  /* CONFIG_NET_IPV4 */
net_if_ipv4_maddr_lookup(const struct in_addr * addr,struct net_if ** iface)4917 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
4918 						   struct net_if **iface)
4919 {
4920 	ARG_UNUSED(addr);
4921 	ARG_UNUSED(iface);
4922 
4923 	return NULL;
4924 }
4925 
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)4926 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
4927 					    struct net_if **ret)
4928 {
4929 	ARG_UNUSED(addr);
4930 	ARG_UNUSED(ret);
4931 
4932 	return NULL;
4933 }
4934 
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)4935 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
4936 					    enum net_addr_state addr_state)
4937 {
4938 	ARG_UNUSED(addr_state);
4939 	ARG_UNUSED(iface);
4940 
4941 	return NULL;
4942 }
4943 #endif /* CONFIG_NET_IPV4 */
4944 
4945 #if !defined(CONFIG_NET_NATIVE_IPV4)
4946 #define leave_ipv4_mcast_all(...)
4947 #define clear_joined_ipv4_mcast_groups(...)
4948 #define iface_ipv4_init(...)
4949 #define iface_ipv4_start(...)
4950 #endif /* !CONFIG_NET_NATIVE_IPV4 */
4951 
net_if_select_src_iface(const struct sockaddr * dst)4952 struct net_if *net_if_select_src_iface(const struct sockaddr *dst)
4953 {
4954 	struct net_if *iface = NULL;
4955 
4956 	if (!dst) {
4957 		goto out;
4958 	}
4959 
4960 	if (IS_ENABLED(CONFIG_NET_IPV6) && dst->sa_family == AF_INET6) {
4961 		iface = net_if_ipv6_select_src_iface(&net_sin6(dst)->sin6_addr);
4962 		goto out;
4963 	}
4964 
4965 	if (IS_ENABLED(CONFIG_NET_IPV4) && dst->sa_family == AF_INET) {
4966 		iface = net_if_ipv4_select_src_iface(&net_sin(dst)->sin_addr);
4967 		goto out;
4968 	}
4969 
4970 out:
4971 	if (iface == NULL) {
4972 		iface = net_if_get_default();
4973 	}
4974 
4975 	return iface;
4976 }
4977 
get_ifaddr(struct net_if * iface,sa_family_t family,const void * addr,unsigned int * mcast_addr_count)4978 static struct net_if_addr *get_ifaddr(struct net_if *iface,
4979 				      sa_family_t family,
4980 				      const void *addr,
4981 				      unsigned int *mcast_addr_count)
4982 {
4983 	struct net_if_addr *ifaddr = NULL;
4984 
4985 	net_if_lock(iface);
4986 
4987 	if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
4988 		struct net_if_ipv6 *ipv6 =
4989 			COND_CODE_1(CONFIG_NET_IPV6, (iface->config.ip.ipv6), (NULL));
4990 		struct in6_addr maddr;
4991 		unsigned int maddr_count = 0;
4992 		int found = -1;
4993 
4994 		if (ipv6 == NULL) {
4995 			goto out;
4996 		}
4997 
4998 		net_ipv6_addr_create_solicited_node((struct in6_addr *)addr,
4999 						    &maddr);
5000 
5001 		ARRAY_FOR_EACH(ipv6->unicast, i) {
5002 			struct in6_addr unicast_maddr;
5003 
5004 			if (!ipv6->unicast[i].is_used) {
5005 				continue;
5006 			}
5007 
5008 			/* Count how many times this solicited-node multicast address is identical
5009 			 * for all the used unicast addresses
5010 			 */
5011 			net_ipv6_addr_create_solicited_node(
5012 				&ipv6->unicast[i].address.in6_addr,
5013 				&unicast_maddr);
5014 
5015 			if (net_ipv6_addr_cmp(&maddr, &unicast_maddr)) {
5016 				maddr_count++;
5017 			}
5018 
5019 			if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr, addr)) {
5020 				continue;
5021 			}
5022 
5023 			found = i;
5024 		}
5025 
5026 		if (found >= 0) {
5027 			ifaddr = &ipv6->unicast[found];
5028 
5029 			if (mcast_addr_count != NULL) {
5030 				*mcast_addr_count = maddr_count;
5031 			}
5032 		}
5033 
5034 		goto out;
5035 	}
5036 
5037 	if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
5038 		struct net_if_ipv4 *ipv4 =
5039 			COND_CODE_1(CONFIG_NET_IPV4, (iface->config.ip.ipv4), (NULL));
5040 
5041 		if (ipv4 == NULL) {
5042 			goto out;
5043 		}
5044 
5045 		ARRAY_FOR_EACH(ipv4->unicast, i) {
5046 			if (!ipv4->unicast[i].ipv4.is_used) {
5047 				continue;
5048 			}
5049 
5050 			if (!net_ipv4_addr_cmp(&ipv4->unicast[i].ipv4.address.in_addr,
5051 					       addr)) {
5052 				continue;
5053 			}
5054 
5055 			ifaddr = &ipv4->unicast[i].ipv4;
5056 
5057 			goto out;
5058 		}
5059 	}
5060 
5061 out:
5062 	net_if_unlock(iface);
5063 
5064 	return ifaddr;
5065 }
5066 
remove_ipv6_ifaddr(struct net_if * iface,struct net_if_addr * ifaddr,unsigned int maddr_count)5067 static void remove_ipv6_ifaddr(struct net_if *iface,
5068 			       struct net_if_addr *ifaddr,
5069 			       unsigned int maddr_count)
5070 {
5071 	struct net_if_ipv6 *ipv6;
5072 
5073 	net_if_lock(iface);
5074 
5075 	ipv6 = COND_CODE_1(CONFIG_NET_IPV6, (iface->config.ip.ipv6), (NULL));
5076 	if (!ipv6) {
5077 		goto out;
5078 	}
5079 
5080 	if (!ifaddr->is_infinite) {
5081 		k_mutex_lock(&lock, K_FOREVER);
5082 
5083 #if defined(CONFIG_NET_NATIVE_IPV6)
5084 		sys_slist_find_and_remove(&active_address_lifetime_timers,
5085 					  &ifaddr->lifetime.node);
5086 
5087 		if (sys_slist_is_empty(&active_address_lifetime_timers)) {
5088 			k_work_cancel_delayable(&address_lifetime_timer);
5089 		}
5090 #endif
5091 		k_mutex_unlock(&lock);
5092 	}
5093 
5094 #if defined(CONFIG_NET_IPV6_DAD)
5095 	if (!net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
5096 		k_mutex_lock(&lock, K_FOREVER);
5097 		sys_slist_find_and_remove(&active_dad_timers,
5098 					  &ifaddr->dad_node);
5099 		k_mutex_unlock(&lock);
5100 	}
5101 #endif
5102 
5103 	if (maddr_count == 1) {
5104 		/* Remove the solicited-node multicast address only if no other
5105 		 * unicast address is also using it
5106 		 */
5107 		struct in6_addr maddr;
5108 
5109 		net_ipv6_addr_create_solicited_node(&ifaddr->address.in6_addr,
5110 						    &maddr);
5111 		net_if_ipv6_maddr_rm(iface, &maddr);
5112 	}
5113 
5114 	/* Using the IPv6 address pointer here can give false
5115 	 * info if someone adds a new IP address into this position
5116 	 * in the address array. This is quite unlikely thou.
5117 	 */
5118 	net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ADDR_DEL,
5119 					iface,
5120 					&ifaddr->address.in6_addr,
5121 					sizeof(struct in6_addr));
5122 out:
5123 	net_if_unlock(iface);
5124 }
5125 
remove_ipv4_ifaddr(struct net_if * iface,struct net_if_addr * ifaddr)5126 static void remove_ipv4_ifaddr(struct net_if *iface,
5127 			       struct net_if_addr *ifaddr)
5128 {
5129 	struct net_if_ipv4 *ipv4;
5130 
5131 	net_if_lock(iface);
5132 
5133 	ipv4 = COND_CODE_1(CONFIG_NET_IPV4, (iface->config.ip.ipv4), (NULL));
5134 	if (!ipv4) {
5135 		goto out;
5136 	}
5137 
5138 #if defined(CONFIG_NET_IPV4_ACD)
5139 	net_ipv4_acd_cancel(iface, ifaddr);
5140 #endif
5141 
5142 	net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_DEL,
5143 					iface,
5144 					&ifaddr->address.in_addr,
5145 					sizeof(struct in_addr));
5146 out:
5147 	net_if_unlock(iface);
5148 }
5149 
5150 #if defined(CONFIG_NET_IF_LOG_LEVEL)
5151 #define NET_LOG_LEVEL CONFIG_NET_IF_LOG_LEVEL
5152 #else
5153 #define NET_LOG_LEVEL 0
5154 #endif
5155 
5156 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_if_addr_ref_debug(struct net_if * iface,sa_family_t family,const void * addr,const char * caller,int line)5157 struct net_if_addr *net_if_addr_ref_debug(struct net_if *iface,
5158 					  sa_family_t family,
5159 					  const void *addr,
5160 					  const char *caller,
5161 					  int line)
5162 #else
5163 struct net_if_addr *net_if_addr_ref(struct net_if *iface,
5164 				    sa_family_t family,
5165 				    const void *addr)
5166 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
5167 {
5168 	struct net_if_addr *ifaddr;
5169 	atomic_val_t ref;
5170 
5171 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5172 	char addr_str[IS_ENABLED(CONFIG_NET_IPV6) ?
5173 		      INET6_ADDRSTRLEN : INET_ADDRSTRLEN];
5174 
5175 	__ASSERT(iface, "iface is NULL (%s():%d)", caller, line);
5176 #endif
5177 
5178 	ifaddr = get_ifaddr(iface, family, addr, NULL);
5179 
5180 	do {
5181 		ref = ifaddr ? atomic_get(&ifaddr->atomic_ref) : 0;
5182 		if (!ref) {
5183 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5184 			NET_ERR("iface %d addr %s (%s():%d)",
5185 				net_if_get_by_iface(iface),
5186 				net_addr_ntop(family,
5187 					      addr,
5188 					      addr_str, sizeof(addr_str)),
5189 				caller, line);
5190 #endif
5191 			return NULL;
5192 		}
5193 	} while (!atomic_cas(&ifaddr->atomic_ref, ref, ref + 1));
5194 
5195 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5196 	NET_DBG("[%d] ifaddr %s state %d ref %ld (%s():%d)",
5197 		net_if_get_by_iface(iface),
5198 		net_addr_ntop(ifaddr->address.family,
5199 			      (void *)&ifaddr->address.in_addr,
5200 			      addr_str, sizeof(addr_str)),
5201 		ifaddr->addr_state,
5202 		ref + 1,
5203 		caller, line);
5204 #endif
5205 
5206 	return ifaddr;
5207 }
5208 
5209 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_if_addr_unref_debug(struct net_if * iface,sa_family_t family,const void * addr,const char * caller,int line)5210 int net_if_addr_unref_debug(struct net_if *iface,
5211 			    sa_family_t family,
5212 			    const void *addr,
5213 			    const char *caller, int line)
5214 #else
5215 int net_if_addr_unref(struct net_if *iface,
5216 		      sa_family_t family,
5217 		      const void *addr)
5218 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
5219 {
5220 	struct net_if_addr *ifaddr;
5221 	unsigned int maddr_count = 0;
5222 	atomic_val_t ref;
5223 
5224 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5225 	char addr_str[IS_ENABLED(CONFIG_NET_IPV6) ?
5226 		      INET6_ADDRSTRLEN : INET_ADDRSTRLEN];
5227 
5228 	__ASSERT(iface, "iface is NULL (%s():%d)", caller, line);
5229 #endif
5230 
5231 	ifaddr = get_ifaddr(iface, family, addr, &maddr_count);
5232 
5233 	if (!ifaddr) {
5234 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5235 		NET_ERR("iface %d addr %s (%s():%d)",
5236 			net_if_get_by_iface(iface),
5237 			net_addr_ntop(family,
5238 				      addr,
5239 				      addr_str, sizeof(addr_str)),
5240 			caller, line);
5241 #endif
5242 		return -EINVAL;
5243 	}
5244 
5245 	do {
5246 		ref = atomic_get(&ifaddr->atomic_ref);
5247 		if (!ref) {
5248 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5249 			NET_ERR("*** ERROR *** iface %d ifaddr %p "
5250 				"is freed already (%s():%d)",
5251 				net_if_get_by_iface(iface),
5252 				ifaddr,
5253 				caller, line);
5254 #endif
5255 			return -EINVAL;
5256 		}
5257 
5258 	} while (!atomic_cas(&ifaddr->atomic_ref, ref, ref - 1));
5259 
5260 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5261 	NET_DBG("[%d] ifaddr %s state %d ref %ld (%s():%d)",
5262 		net_if_get_by_iface(iface),
5263 		net_addr_ntop(ifaddr->address.family,
5264 			      (void *)&ifaddr->address.in_addr,
5265 			      addr_str, sizeof(addr_str)),
5266 		ifaddr->addr_state,
5267 		ref - 1, caller, line);
5268 #endif
5269 
5270 	if (ref > 1) {
5271 		return ref - 1;
5272 	}
5273 
5274 	ifaddr->is_used = false;
5275 
5276 	if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 && addr != NULL) {
5277 		remove_ipv6_ifaddr(iface, ifaddr, maddr_count);
5278 	}
5279 
5280 	if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET && addr != NULL) {
5281 		remove_ipv4_ifaddr(iface, ifaddr);
5282 	}
5283 
5284 	return 0;
5285 }
5286 
net_if_recv_data(struct net_if * iface,struct net_pkt * pkt)5287 enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt)
5288 {
5289 	if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) &&
5290 	    net_if_is_promisc(iface)) {
5291 		struct net_pkt *new_pkt;
5292 
5293 		new_pkt = net_pkt_clone(pkt, K_NO_WAIT);
5294 
5295 		if (net_promisc_mode_input(new_pkt) == NET_DROP) {
5296 			net_pkt_unref(new_pkt);
5297 		}
5298 	}
5299 
5300 	return net_if_l2(iface)->recv(iface, pkt);
5301 }
5302 
net_if_register_link_cb(struct net_if_link_cb * link,net_if_link_callback_t cb)5303 void net_if_register_link_cb(struct net_if_link_cb *link,
5304 			     net_if_link_callback_t cb)
5305 {
5306 	k_mutex_lock(&lock, K_FOREVER);
5307 
5308 	sys_slist_find_and_remove(&link_callbacks, &link->node);
5309 	sys_slist_prepend(&link_callbacks, &link->node);
5310 
5311 	link->cb = cb;
5312 
5313 	k_mutex_unlock(&lock);
5314 }
5315 
net_if_unregister_link_cb(struct net_if_link_cb * link)5316 void net_if_unregister_link_cb(struct net_if_link_cb *link)
5317 {
5318 	k_mutex_lock(&lock, K_FOREVER);
5319 
5320 	sys_slist_find_and_remove(&link_callbacks, &link->node);
5321 
5322 	k_mutex_unlock(&lock);
5323 }
5324 
net_if_call_link_cb(struct net_if * iface,struct net_linkaddr * lladdr,int status)5325 void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
5326 			 int status)
5327 {
5328 	struct net_if_link_cb *link, *tmp;
5329 
5330 	k_mutex_lock(&lock, K_FOREVER);
5331 
5332 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&link_callbacks, link, tmp, node) {
5333 		link->cb(iface, lladdr, status);
5334 	}
5335 
5336 	k_mutex_unlock(&lock);
5337 }
5338 
need_calc_checksum(struct net_if * iface,enum ethernet_hw_caps caps,enum net_if_checksum_type chksum_type)5339 static bool need_calc_checksum(struct net_if *iface, enum ethernet_hw_caps caps,
5340 			      enum net_if_checksum_type chksum_type)
5341 {
5342 #if defined(CONFIG_NET_L2_ETHERNET)
5343 	struct ethernet_config config;
5344 	enum ethernet_config_type config_type;
5345 
5346 	if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
5347 		/* For VLANs, figure out the main Ethernet interface and
5348 		 * get the offloading capabilities from it.
5349 		 */
5350 		if (IS_ENABLED(CONFIG_NET_VLAN) && net_eth_is_vlan_interface(iface)) {
5351 			iface = net_eth_get_vlan_main(iface);
5352 			if (iface == NULL) {
5353 				return true;
5354 			}
5355 
5356 			NET_ASSERT(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET));
5357 		} else {
5358 			return true;
5359 		}
5360 	}
5361 
5362 	if (!(net_eth_get_hw_capabilities(iface) & caps)) {
5363 		return true; /* No checksum offload*/
5364 	}
5365 
5366 	if (caps == ETHERNET_HW_RX_CHKSUM_OFFLOAD) {
5367 		config_type = ETHERNET_CONFIG_TYPE_RX_CHECKSUM_SUPPORT;
5368 	} else {
5369 		config_type = ETHERNET_CONFIG_TYPE_TX_CHECKSUM_SUPPORT;
5370 	}
5371 
5372 	if (net_eth_get_hw_config(iface, config_type, &config) != 0) {
5373 		return false; /* No extra info, assume all offloaded. */
5374 	}
5375 
5376 	/* bitmaps are encoded such that this works */
5377 	return !((config.chksum_support & chksum_type) == chksum_type);
5378 #else
5379 	ARG_UNUSED(iface);
5380 	ARG_UNUSED(caps);
5381 
5382 	return true;
5383 #endif
5384 }
5385 
net_if_need_calc_tx_checksum(struct net_if * iface,enum net_if_checksum_type chksum_type)5386 bool net_if_need_calc_tx_checksum(struct net_if *iface, enum net_if_checksum_type chksum_type)
5387 {
5388 	return need_calc_checksum(iface, ETHERNET_HW_TX_CHKSUM_OFFLOAD, chksum_type);
5389 }
5390 
net_if_need_calc_rx_checksum(struct net_if * iface,enum net_if_checksum_type chksum_type)5391 bool net_if_need_calc_rx_checksum(struct net_if *iface, enum net_if_checksum_type chksum_type)
5392 {
5393 	return need_calc_checksum(iface, ETHERNET_HW_RX_CHKSUM_OFFLOAD, chksum_type);
5394 }
5395 
net_if_get_by_iface(struct net_if * iface)5396 int net_if_get_by_iface(struct net_if *iface)
5397 {
5398 	if (!(iface >= _net_if_list_start && iface < _net_if_list_end)) {
5399 		return -1;
5400 	}
5401 
5402 	return (iface - _net_if_list_start) + 1;
5403 }
5404 
net_if_foreach(net_if_cb_t cb,void * user_data)5405 void net_if_foreach(net_if_cb_t cb, void *user_data)
5406 {
5407 	STRUCT_SECTION_FOREACH(net_if, iface) {
5408 		cb(iface, user_data);
5409 	}
5410 }
5411 
net_if_is_offloaded(struct net_if * iface)5412 bool net_if_is_offloaded(struct net_if *iface)
5413 {
5414 	return (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
5415 		net_if_is_ip_offloaded(iface)) ||
5416 	       (IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD) &&
5417 		net_if_is_socket_offloaded(iface));
5418 }
5419 
rejoin_multicast_groups(struct net_if * iface)5420 static void rejoin_multicast_groups(struct net_if *iface)
5421 {
5422 #if defined(CONFIG_NET_NATIVE_IPV6)
5423 	rejoin_ipv6_mcast_groups(iface);
5424 	if (l2_flags_get(iface) & NET_L2_MULTICAST) {
5425 		join_mcast_allnodes(iface);
5426 	}
5427 #endif
5428 #if defined(CONFIG_NET_NATIVE_IPV4)
5429 	rejoin_ipv4_mcast_groups(iface);
5430 #endif
5431 #if !defined(CONFIG_NET_NATIVE_IPV6) && !defined(CONFIG_NET_NATIVE_IPV4)
5432 	ARG_UNUSED(iface);
5433 #endif
5434 }
5435 
notify_iface_up(struct net_if * iface)5436 static void notify_iface_up(struct net_if *iface)
5437 {
5438 	/* In many places it's assumed that link address was set with
5439 	 * net_if_set_link_addr(). Better check that now.
5440 	 */
5441 	if (IS_ENABLED(CONFIG_NET_L2_CANBUS_RAW) &&
5442 	    IS_ENABLED(CONFIG_NET_SOCKETS_CAN) &&
5443 	    (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW)))	{
5444 		/* CAN does not require link address. */
5445 	} else {
5446 		if (!net_if_is_offloaded(iface)) {
5447 			NET_ASSERT(net_if_get_link_addr(iface)->addr != NULL);
5448 		}
5449 	}
5450 
5451 	net_if_flag_set(iface, NET_IF_RUNNING);
5452 	net_mgmt_event_notify(NET_EVENT_IF_UP, iface);
5453 	net_virtual_enable(iface);
5454 
5455 	/* If the interface is only having point-to-point traffic then we do
5456 	 * not need to run DAD etc for it.
5457 	 */
5458 	if (!net_if_is_offloaded(iface) &&
5459 	    !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
5460 		/* Make sure that we update the IPv6 addresses and join the
5461 		 * multicast groups.
5462 		 */
5463 		rejoin_multicast_groups(iface);
5464 		iface_ipv6_start(iface);
5465 		iface_ipv4_start(iface);
5466 		net_ipv4_autoconf_start(iface);
5467 	}
5468 }
5469 
notify_iface_down(struct net_if * iface)5470 static void notify_iface_down(struct net_if *iface)
5471 {
5472 	net_if_flag_clear(iface, NET_IF_RUNNING);
5473 	net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
5474 	net_virtual_disable(iface);
5475 
5476 	if (!net_if_is_offloaded(iface) &&
5477 	    !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
5478 		iface_ipv6_stop(iface);
5479 		clear_joined_ipv6_mcast_groups(iface);
5480 		clear_joined_ipv4_mcast_groups(iface);
5481 		net_ipv4_autoconf_reset(iface);
5482 	}
5483 }
5484 
net_if_oper_state2str(enum net_if_oper_state state)5485 static inline const char *net_if_oper_state2str(enum net_if_oper_state state)
5486 {
5487 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
5488 	switch (state) {
5489 	case NET_IF_OPER_UNKNOWN:
5490 		return "UNKNOWN";
5491 	case NET_IF_OPER_NOTPRESENT:
5492 		return "NOTPRESENT";
5493 	case NET_IF_OPER_DOWN:
5494 		return "DOWN";
5495 	case NET_IF_OPER_LOWERLAYERDOWN:
5496 		return "LOWERLAYERDOWN";
5497 	case NET_IF_OPER_TESTING:
5498 		return "TESTING";
5499 	case NET_IF_OPER_DORMANT:
5500 		return "DORMANT";
5501 	case NET_IF_OPER_UP:
5502 		return "UP";
5503 	default:
5504 		break;
5505 	}
5506 
5507 	return "<invalid>";
5508 #else
5509 	ARG_UNUSED(state);
5510 
5511 	return "";
5512 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
5513 }
5514 
update_operational_state(struct net_if * iface)5515 static void update_operational_state(struct net_if *iface)
5516 {
5517 	enum net_if_oper_state prev_state = iface->if_dev->oper_state;
5518 	enum net_if_oper_state new_state = NET_IF_OPER_UNKNOWN;
5519 
5520 	if (!net_if_is_admin_up(iface)) {
5521 		new_state = NET_IF_OPER_DOWN;
5522 		goto exit;
5523 	}
5524 
5525 	if (!device_is_ready(net_if_get_device(iface))) {
5526 		new_state = NET_IF_OPER_LOWERLAYERDOWN;
5527 		goto exit;
5528 	}
5529 
5530 	if (!net_if_is_carrier_ok(iface)) {
5531 #if defined(CONFIG_NET_L2_VIRTUAL)
5532 		if (net_if_l2(iface) == &NET_L2_GET_NAME(VIRTUAL)) {
5533 			new_state = NET_IF_OPER_LOWERLAYERDOWN;
5534 		} else
5535 #endif /* CONFIG_NET_L2_VIRTUAL */
5536 		{
5537 			new_state = NET_IF_OPER_DOWN;
5538 		}
5539 
5540 		goto exit;
5541 	}
5542 
5543 	if (net_if_is_dormant(iface)) {
5544 		new_state = NET_IF_OPER_DORMANT;
5545 		goto exit;
5546 	}
5547 
5548 	new_state = NET_IF_OPER_UP;
5549 
5550 exit:
5551 	if (net_if_oper_state_set(iface, new_state) != new_state) {
5552 		NET_ERR("Failed to update oper state to %d", new_state);
5553 		return;
5554 	}
5555 
5556 	NET_DBG("iface %d (%p), oper state %s admin %s carrier %s dormant %s",
5557 		net_if_get_by_iface(iface), iface,
5558 		net_if_oper_state2str(net_if_oper_state(iface)),
5559 		net_if_is_admin_up(iface) ? "UP" : "DOWN",
5560 		net_if_is_carrier_ok(iface) ? "ON" : "OFF",
5561 		net_if_is_dormant(iface) ? "ON" : "OFF");
5562 
5563 	if (net_if_oper_state(iface) == NET_IF_OPER_UP) {
5564 		if (prev_state != NET_IF_OPER_UP) {
5565 			notify_iface_up(iface);
5566 		}
5567 	} else {
5568 		if (prev_state == NET_IF_OPER_UP) {
5569 			notify_iface_down(iface);
5570 		}
5571 	}
5572 }
5573 
init_igmp(struct net_if * iface)5574 static void init_igmp(struct net_if *iface)
5575 {
5576 #if defined(CONFIG_NET_IPV4_IGMP)
5577 	/* Ensure IPv4 is enabled for this interface. */
5578 	if (net_if_config_ipv4_get(iface, NULL)) {
5579 		return;
5580 	}
5581 
5582 	net_ipv4_igmp_init(iface);
5583 #else
5584 	ARG_UNUSED(iface);
5585 	return;
5586 #endif
5587 }
5588 
net_if_up(struct net_if * iface)5589 int net_if_up(struct net_if *iface)
5590 {
5591 	int status = 0;
5592 
5593 	NET_DBG("iface %d (%p)", net_if_get_by_iface(iface), iface);
5594 
5595 	net_if_lock(iface);
5596 
5597 	if (net_if_flag_is_set(iface, NET_IF_UP)) {
5598 		status = -EALREADY;
5599 		goto out;
5600 	}
5601 
5602 	/* If the L2 does not support enable just set the flag */
5603 	if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
5604 		goto done;
5605 	} else {
5606 		/* If the L2 does not implement enable(), then the network
5607 		 * device driver cannot implement start(), in which case
5608 		 * we can do simple check here and not try to bring interface
5609 		 * up as the device is not ready.
5610 		 *
5611 		 * If the network device driver does implement start(), then
5612 		 * it could bring the interface up when the enable() is called
5613 		 * few lines below.
5614 		 */
5615 		const struct device *dev;
5616 
5617 		dev = net_if_get_device(iface);
5618 		NET_ASSERT(dev);
5619 
5620 		/* If the device is not ready it is pointless trying to take it up. */
5621 		if (!device_is_ready(dev)) {
5622 			NET_DBG("Device %s (%p) is not ready", dev->name, dev);
5623 			status = -ENXIO;
5624 			goto out;
5625 		}
5626 	}
5627 
5628 	/* Notify L2 to enable the interface. Note that the interface is still down
5629 	 * at this point from network interface point of view i.e., the NET_IF_UP
5630 	 * flag has not been set yet.
5631 	 */
5632 	status = net_if_l2(iface)->enable(iface, true);
5633 	if (status < 0) {
5634 		NET_DBG("Cannot take interface %d up (%d)",
5635 			net_if_get_by_iface(iface), status);
5636 		goto out;
5637 	}
5638 
5639 	init_igmp(iface);
5640 
5641 done:
5642 	net_if_flag_set(iface, NET_IF_UP);
5643 	net_mgmt_event_notify(NET_EVENT_IF_ADMIN_UP, iface);
5644 	update_operational_state(iface);
5645 
5646 out:
5647 	net_if_unlock(iface);
5648 
5649 	return status;
5650 }
5651 
net_if_down(struct net_if * iface)5652 int net_if_down(struct net_if *iface)
5653 {
5654 	int status = 0;
5655 
5656 	NET_DBG("iface %p", iface);
5657 
5658 	net_if_lock(iface);
5659 
5660 	if (!net_if_flag_is_set(iface, NET_IF_UP)) {
5661 		status = -EALREADY;
5662 		goto out;
5663 	}
5664 
5665 	leave_mcast_all(iface);
5666 	leave_ipv4_mcast_all(iface);
5667 
5668 	/* If the L2 does not support enable just clear the flag */
5669 	if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
5670 		goto done;
5671 	}
5672 
5673 	/* Notify L2 to disable the interface */
5674 	status = net_if_l2(iface)->enable(iface, false);
5675 	if (status < 0) {
5676 		goto out;
5677 	}
5678 
5679 done:
5680 	net_if_flag_clear(iface, NET_IF_UP);
5681 	net_mgmt_event_notify(NET_EVENT_IF_ADMIN_DOWN, iface);
5682 	update_operational_state(iface);
5683 
5684 out:
5685 	net_if_unlock(iface);
5686 
5687 	return status;
5688 }
5689 
net_if_carrier_on(struct net_if * iface)5690 void net_if_carrier_on(struct net_if *iface)
5691 {
5692 	if (iface == NULL) {
5693 		return;
5694 	}
5695 
5696 	net_if_lock(iface);
5697 
5698 	if (!net_if_flag_test_and_set(iface, NET_IF_LOWER_UP)) {
5699 		update_operational_state(iface);
5700 	}
5701 
5702 	net_if_unlock(iface);
5703 }
5704 
net_if_carrier_off(struct net_if * iface)5705 void net_if_carrier_off(struct net_if *iface)
5706 {
5707 	if (iface == NULL) {
5708 		return;
5709 	}
5710 
5711 	net_if_lock(iface);
5712 
5713 	if (net_if_flag_test_and_clear(iface, NET_IF_LOWER_UP)) {
5714 		update_operational_state(iface);
5715 	}
5716 
5717 	net_if_unlock(iface);
5718 }
5719 
net_if_dormant_on(struct net_if * iface)5720 void net_if_dormant_on(struct net_if *iface)
5721 {
5722 	if (iface == NULL) {
5723 		return;
5724 	}
5725 
5726 	net_if_lock(iface);
5727 
5728 	if (!net_if_flag_test_and_set(iface, NET_IF_DORMANT)) {
5729 		update_operational_state(iface);
5730 	}
5731 
5732 	net_if_unlock(iface);
5733 }
5734 
net_if_dormant_off(struct net_if * iface)5735 void net_if_dormant_off(struct net_if *iface)
5736 {
5737 	if (iface == NULL) {
5738 		return;
5739 	}
5740 
5741 	net_if_lock(iface);
5742 
5743 	if (net_if_flag_test_and_clear(iface, NET_IF_DORMANT)) {
5744 		update_operational_state(iface);
5745 	}
5746 
5747 	net_if_unlock(iface);
5748 }
5749 
5750 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
promisc_mode_set(struct net_if * iface,bool enable)5751 static int promisc_mode_set(struct net_if *iface, bool enable)
5752 {
5753 	enum net_l2_flags l2_flags = 0;
5754 
5755 	if (iface == NULL) {
5756 		return -EINVAL;
5757 	}
5758 
5759 	l2_flags = l2_flags_get(iface);
5760 	if (!(l2_flags & NET_L2_PROMISC_MODE)) {
5761 		return -ENOTSUP;
5762 	}
5763 
5764 #if defined(CONFIG_NET_L2_ETHERNET)
5765 	if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
5766 		int ret = net_eth_promisc_mode(iface, enable);
5767 
5768 		if (ret < 0) {
5769 			return ret;
5770 		}
5771 	}
5772 #else
5773 	ARG_UNUSED(enable);
5774 
5775 	return -ENOTSUP;
5776 #endif
5777 
5778 	return 0;
5779 }
5780 
net_if_set_promisc(struct net_if * iface)5781 int net_if_set_promisc(struct net_if *iface)
5782 {
5783 	int ret;
5784 
5785 	net_if_lock(iface);
5786 
5787 	ret = promisc_mode_set(iface, true);
5788 	if (ret < 0 && ret != -EALREADY) {
5789 		goto out;
5790 	}
5791 
5792 	ret = net_if_flag_test_and_set(iface, NET_IF_PROMISC);
5793 	if (ret) {
5794 		ret = -EALREADY;
5795 		goto out;
5796 	}
5797 
5798 out:
5799 	net_if_unlock(iface);
5800 
5801 	return ret;
5802 }
5803 
net_if_unset_promisc(struct net_if * iface)5804 void net_if_unset_promisc(struct net_if *iface)
5805 {
5806 	int ret;
5807 
5808 	net_if_lock(iface);
5809 
5810 	ret = promisc_mode_set(iface, false);
5811 	if (ret < 0) {
5812 		goto out;
5813 	}
5814 
5815 	net_if_flag_clear(iface, NET_IF_PROMISC);
5816 
5817 out:
5818 	net_if_unlock(iface);
5819 }
5820 
net_if_is_promisc(struct net_if * iface)5821 bool net_if_is_promisc(struct net_if *iface)
5822 {
5823 	if (iface == NULL) {
5824 		return false;
5825 	}
5826 
5827 	return net_if_flag_is_set(iface, NET_IF_PROMISC);
5828 }
5829 #endif /* CONFIG_NET_PROMISCUOUS_MODE */
5830 
5831 #ifdef CONFIG_NET_POWER_MANAGEMENT
5832 
net_if_suspend(struct net_if * iface)5833 int net_if_suspend(struct net_if *iface)
5834 {
5835 	int ret = 0;
5836 
5837 	net_if_lock(iface);
5838 
5839 	if (net_if_are_pending_tx_packets(iface)) {
5840 		ret = -EBUSY;
5841 		goto out;
5842 	}
5843 
5844 	if (net_if_flag_test_and_set(iface, NET_IF_SUSPENDED)) {
5845 		ret = -EALREADY;
5846 		goto out;
5847 	}
5848 
5849 	net_stats_add_suspend_start_time(iface, k_cycle_get_32());
5850 
5851 out:
5852 	net_if_unlock(iface);
5853 
5854 	return ret;
5855 }
5856 
net_if_resume(struct net_if * iface)5857 int net_if_resume(struct net_if *iface)
5858 {
5859 	int ret = 0;
5860 
5861 	net_if_lock(iface);
5862 
5863 	if (!net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
5864 		ret = -EALREADY;
5865 		goto out;
5866 	}
5867 
5868 	net_if_flag_clear(iface, NET_IF_SUSPENDED);
5869 
5870 	net_stats_add_suspend_end_time(iface, k_cycle_get_32());
5871 
5872 out:
5873 	net_if_unlock(iface);
5874 
5875 	return ret;
5876 }
5877 
net_if_is_suspended(struct net_if * iface)5878 bool net_if_is_suspended(struct net_if *iface)
5879 {
5880 	return net_if_flag_is_set(iface, NET_IF_SUSPENDED);
5881 }
5882 
5883 #endif /* CONFIG_NET_POWER_MANAGEMENT */
5884 
5885 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
net_tx_ts_thread(void * p1,void * p2,void * p3)5886 static void net_tx_ts_thread(void *p1, void *p2, void *p3)
5887 {
5888 	ARG_UNUSED(p1);
5889 	ARG_UNUSED(p2);
5890 	ARG_UNUSED(p3);
5891 
5892 	struct net_pkt *pkt;
5893 
5894 	NET_DBG("Starting TX timestamp callback thread");
5895 
5896 	while (1) {
5897 		pkt = k_fifo_get(&tx_ts_queue, K_FOREVER);
5898 		if (pkt) {
5899 			net_if_call_timestamp_cb(pkt);
5900 		}
5901 		net_pkt_unref(pkt);
5902 	}
5903 }
5904 
net_if_register_timestamp_cb(struct net_if_timestamp_cb * handle,struct net_pkt * pkt,struct net_if * iface,net_if_timestamp_callback_t cb)5905 void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
5906 				  struct net_pkt *pkt,
5907 				  struct net_if *iface,
5908 				  net_if_timestamp_callback_t cb)
5909 {
5910 	k_mutex_lock(&lock, K_FOREVER);
5911 
5912 	sys_slist_find_and_remove(&timestamp_callbacks, &handle->node);
5913 	sys_slist_prepend(&timestamp_callbacks, &handle->node);
5914 
5915 	handle->iface = iface;
5916 	handle->cb = cb;
5917 	handle->pkt = pkt;
5918 
5919 	k_mutex_unlock(&lock);
5920 }
5921 
net_if_unregister_timestamp_cb(struct net_if_timestamp_cb * handle)5922 void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle)
5923 {
5924 	k_mutex_lock(&lock, K_FOREVER);
5925 
5926 	sys_slist_find_and_remove(&timestamp_callbacks, &handle->node);
5927 
5928 	k_mutex_unlock(&lock);
5929 }
5930 
net_if_call_timestamp_cb(struct net_pkt * pkt)5931 void net_if_call_timestamp_cb(struct net_pkt *pkt)
5932 {
5933 	sys_snode_t *sn, *sns;
5934 
5935 	k_mutex_lock(&lock, K_FOREVER);
5936 
5937 	SYS_SLIST_FOR_EACH_NODE_SAFE(&timestamp_callbacks, sn, sns) {
5938 		struct net_if_timestamp_cb *handle =
5939 			CONTAINER_OF(sn, struct net_if_timestamp_cb, node);
5940 
5941 		if (((handle->iface == NULL) ||
5942 		     (handle->iface == net_pkt_iface(pkt))) &&
5943 		    (handle->pkt == NULL || handle->pkt == pkt)) {
5944 			handle->cb(pkt);
5945 		}
5946 	}
5947 
5948 	k_mutex_unlock(&lock);
5949 }
5950 
net_if_add_tx_timestamp(struct net_pkt * pkt)5951 void net_if_add_tx_timestamp(struct net_pkt *pkt)
5952 {
5953 	k_fifo_put(&tx_ts_queue, pkt);
5954 	net_pkt_ref(pkt);
5955 }
5956 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
5957 
net_if_is_wifi(struct net_if * iface)5958 bool net_if_is_wifi(struct net_if *iface)
5959 {
5960 	if (net_if_is_offloaded(iface)) {
5961 		return net_off_is_wifi_offloaded(iface);
5962 	}
5963 
5964 	if (IS_ENABLED(CONFIG_NET_L2_ETHERNET)) {
5965 		return net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) &&
5966 			net_eth_type_is_wifi(iface);
5967 	}
5968 
5969 	return false;
5970 }
5971 
net_if_get_first_wifi(void)5972 struct net_if *net_if_get_first_wifi(void)
5973 {
5974 	STRUCT_SECTION_FOREACH(net_if, iface) {
5975 		if (net_if_is_wifi(iface)) {
5976 			return iface;
5977 		}
5978 	}
5979 	return NULL;
5980 }
5981 
net_if_get_wifi_sta(void)5982 struct net_if *net_if_get_wifi_sta(void)
5983 {
5984 	STRUCT_SECTION_FOREACH(net_if, iface) {
5985 		if (net_if_is_wifi(iface)
5986 #ifdef CONFIG_WIFI_NM
5987 			&& wifi_nm_iface_is_sta(iface)
5988 #endif
5989 			) {
5990 			return iface;
5991 		}
5992 	}
5993 
5994 	/* If no STA interface is found, return the first WiFi interface */
5995 	return net_if_get_first_wifi();
5996 }
5997 
net_if_get_wifi_sap(void)5998 struct net_if *net_if_get_wifi_sap(void)
5999 {
6000 	STRUCT_SECTION_FOREACH(net_if, iface) {
6001 		if (net_if_is_wifi(iface)
6002 #ifdef CONFIG_WIFI_NM
6003 			&& wifi_nm_iface_is_sap(iface)
6004 #endif
6005 			) {
6006 			return iface;
6007 		}
6008 	}
6009 
6010 	/* If no STA interface is found, return the first WiFi interface */
6011 	return net_if_get_first_wifi();
6012 }
6013 
net_if_get_name(struct net_if * iface,char * buf,int len)6014 int net_if_get_name(struct net_if *iface, char *buf, int len)
6015 {
6016 #if defined(CONFIG_NET_INTERFACE_NAME)
6017 	int name_len;
6018 
6019 	if (iface == NULL || buf == NULL || len <= 0) {
6020 		return -EINVAL;
6021 	}
6022 
6023 	name_len = strlen(net_if_get_config(iface)->name);
6024 	if (name_len >= len) {
6025 		return -ERANGE;
6026 	}
6027 
6028 	/* Copy string and null terminator */
6029 	memcpy(buf, net_if_get_config(iface)->name, name_len + 1);
6030 
6031 	return name_len;
6032 #else
6033 	return -ENOTSUP;
6034 #endif
6035 }
6036 
net_if_set_name(struct net_if * iface,const char * buf)6037 int net_if_set_name(struct net_if *iface, const char *buf)
6038 {
6039 #if defined(CONFIG_NET_INTERFACE_NAME)
6040 	int name_len;
6041 
6042 	if (iface == NULL || buf == NULL) {
6043 		return -EINVAL;
6044 	}
6045 
6046 	name_len = strlen(buf);
6047 	if (name_len >= sizeof(iface->config.name)) {
6048 		return -ENAMETOOLONG;
6049 	}
6050 
6051 	STRUCT_SECTION_FOREACH(net_if, iface_check) {
6052 		if (iface_check == iface) {
6053 			continue;
6054 		}
6055 
6056 		if (memcmp(net_if_get_config(iface_check)->name,
6057 			   buf,
6058 			   name_len + 1) == 0) {
6059 			return -EALREADY;
6060 		}
6061 	}
6062 
6063 	/* Copy string and null terminator */
6064 	memcpy(net_if_get_config(iface)->name, buf, name_len + 1);
6065 
6066 	return 0;
6067 #else
6068 	return -ENOTSUP;
6069 #endif
6070 }
6071 
net_if_get_by_name(const char * name)6072 int net_if_get_by_name(const char *name)
6073 {
6074 #if defined(CONFIG_NET_INTERFACE_NAME)
6075 	if (name == NULL) {
6076 		return -EINVAL;
6077 	}
6078 
6079 	STRUCT_SECTION_FOREACH(net_if, iface) {
6080 		if (strncmp(net_if_get_config(iface)->name, name, strlen(name)) == 0) {
6081 			return net_if_get_by_iface(iface);
6082 		}
6083 	}
6084 
6085 	return -ENOENT;
6086 #else
6087 	return -ENOTSUP;
6088 #endif
6089 }
6090 
6091 #if defined(CONFIG_NET_INTERFACE_NAME)
set_default_name(struct net_if * iface)6092 static void set_default_name(struct net_if *iface)
6093 {
6094 	char name[CONFIG_NET_INTERFACE_NAME_LEN + 1];
6095 	int ret;
6096 
6097 	if (net_if_is_wifi(iface)) {
6098 		static int count;
6099 
6100 		snprintk(name, sizeof(name), "wlan%d", count++);
6101 
6102 	} else if (IS_ENABLED(CONFIG_NET_L2_ETHERNET) &&
6103 		   (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET))) {
6104 		static int count;
6105 
6106 		snprintk(name, sizeof(name), "eth%d", count++);
6107 	} else if (IS_ENABLED(CONFIG_NET_L2_IEEE802154) &&
6108 		   (net_if_l2(iface) == &NET_L2_GET_NAME(IEEE802154))) {
6109 		static int count;
6110 
6111 		snprintk(name, sizeof(name), "ieee%d", count++);
6112 	} else if (IS_ENABLED(CONFIG_NET_L2_DUMMY) &&
6113 		   (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY))) {
6114 		static int count;
6115 
6116 		snprintk(name, sizeof(name), "dummy%d", count++);
6117 	} else if (IS_ENABLED(CONFIG_NET_L2_CANBUS_RAW) &&
6118 		   (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW))) {
6119 		static int count;
6120 
6121 		snprintk(name, sizeof(name), "can%d", count++);
6122 	} else if (IS_ENABLED(CONFIG_NET_L2_PPP) &&
6123 		   (net_if_l2(iface) == &NET_L2_GET_NAME(PPP))) {
6124 		static int count;
6125 
6126 		snprintk(name, sizeof(name) - 1, "ppp%d", count++);
6127 	} else if (IS_ENABLED(CONFIG_NET_L2_OPENTHREAD) &&
6128 		   (net_if_l2(iface) == &NET_L2_GET_NAME(OPENTHREAD))) {
6129 		static int count;
6130 
6131 		snprintk(name, sizeof(name), "thread%d", count++);
6132 	} else {
6133 		static int count;
6134 
6135 		snprintk(name, sizeof(name), "net%d", count++);
6136 	}
6137 
6138 	ret = net_if_set_name(iface, name);
6139 	if (ret < 0) {
6140 		NET_WARN("Cannot set default name for interface %d (%p) (%d)",
6141 			 net_if_get_by_iface(iface), iface, ret);
6142 	}
6143 }
6144 #endif /* CONFIG_NET_INTERFACE_NAME */
6145 
net_if_init(void)6146 void net_if_init(void)
6147 {
6148 	int if_count = 0;
6149 
6150 	NET_DBG("");
6151 
6152 	k_mutex_lock(&lock, K_FOREVER);
6153 
6154 	net_tc_tx_init();
6155 
6156 	STRUCT_SECTION_FOREACH(net_if, iface) {
6157 #if defined(CONFIG_NET_INTERFACE_NAME)
6158 		memset(net_if_get_config(iface)->name, 0,
6159 		       sizeof(iface->config.name));
6160 #endif
6161 
6162 		init_iface(iface);
6163 
6164 #if defined(CONFIG_NET_INTERFACE_NAME)
6165 		/* If the driver did not set the name, then set
6166 		 * a default name for the network interface.
6167 		 */
6168 		if (net_if_get_config(iface)->name[0] == '\0') {
6169 			set_default_name(iface);
6170 		}
6171 #endif
6172 
6173 		net_stats_prometheus_init(iface);
6174 
6175 		if_count++;
6176 	}
6177 
6178 	if (if_count == 0) {
6179 		NET_ERR("There is no network interface to work with!");
6180 		goto out;
6181 	}
6182 
6183 #if defined(CONFIG_ASSERT)
6184 	/* Do extra check that verifies that interface count is properly
6185 	 * done.
6186 	 */
6187 	int count_if;
6188 
6189 	NET_IFACE_COUNT(&count_if);
6190 	NET_ASSERT(count_if == if_count);
6191 #endif
6192 
6193 	iface_ipv6_init(if_count);
6194 	iface_ipv4_init(if_count);
6195 	iface_router_init();
6196 
6197 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
6198 	k_thread_create(&tx_thread_ts, tx_ts_stack,
6199 			K_KERNEL_STACK_SIZEOF(tx_ts_stack),
6200 			net_tx_ts_thread,
6201 			NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT);
6202 	k_thread_name_set(&tx_thread_ts, "tx_tstamp");
6203 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
6204 
6205 out:
6206 	k_mutex_unlock(&lock);
6207 }
6208 
net_if_post_init(void)6209 void net_if_post_init(void)
6210 {
6211 	NET_DBG("");
6212 
6213 	/* After TX is running, attempt to bring the interface up */
6214 	STRUCT_SECTION_FOREACH(net_if, iface) {
6215 		if (!net_if_flag_is_set(iface, NET_IF_NO_AUTO_START)) {
6216 			net_if_up(iface);
6217 		}
6218 	}
6219 }
6220