1 /*
2  * Copyright (c) 2016 Intel Corporation.
3  * Copyright (c) 2023 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
10 
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/linker/sections.h>
14 #include <zephyr/random/random.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <zephyr/net/igmp.h>
19 #include <zephyr/net/ipv4_autoconf.h>
20 #include <zephyr/net/net_core.h>
21 #include <zephyr/net/net_event.h>
22 #include <zephyr/net/net_pkt.h>
23 #include <zephyr/net/net_if.h>
24 #include <zephyr/net/net_mgmt.h>
25 #include <zephyr/net/ethernet.h>
26 #ifdef CONFIG_WIFI_NM
27 #include <zephyr/net/wifi_nm.h>
28 #endif
29 #include <zephyr/net/offloaded_netdev.h>
30 #include <zephyr/net/virtual.h>
31 #include <zephyr/net/socket.h>
32 #include <zephyr/sys/iterable_sections.h>
33 
34 #include "net_private.h"
35 #include "ipv4.h"
36 #include "ipv6.h"
37 
38 #include "net_stats.h"
39 
40 #define REACHABLE_TIME (MSEC_PER_SEC * 30) /* in ms */
41 /*
42  * split the min/max random reachable factors into numerator/denominator
43  * so that integer-based math works better
44  */
45 #define MIN_RANDOM_NUMER (1)
46 #define MIN_RANDOM_DENOM (2)
47 #define MAX_RANDOM_NUMER (3)
48 #define MAX_RANDOM_DENOM (2)
49 
50 static K_MUTEX_DEFINE(lock);
51 
52 /* net_if dedicated section limiters */
53 extern struct net_if _net_if_list_start[];
54 extern struct net_if _net_if_list_end[];
55 
56 static struct net_if *default_iface;
57 
58 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
59 static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
60 static struct k_work_delayable router_timer;
61 static sys_slist_t active_router_timers;
62 #endif
63 
64 #if defined(CONFIG_NET_NATIVE_IPV6)
65 /* Timer that triggers network address renewal */
66 static struct k_work_delayable address_lifetime_timer;
67 
68 /* Track currently active address lifetime timers */
69 static sys_slist_t active_address_lifetime_timers;
70 
71 /* Timer that triggers IPv6 prefix lifetime */
72 static struct k_work_delayable prefix_lifetime_timer;
73 
74 /* Track currently active IPv6 prefix lifetime timers */
75 static sys_slist_t active_prefix_lifetime_timers;
76 
77 #if defined(CONFIG_NET_IPV6_DAD)
78 /** Duplicate address detection (DAD) timer */
79 static struct k_work_delayable dad_timer;
80 static sys_slist_t active_dad_timers;
81 #endif
82 
83 #if defined(CONFIG_NET_IPV6_ND)
84 static struct k_work_delayable rs_timer;
85 static sys_slist_t active_rs_timers;
86 #endif
87 
88 static struct {
89 	struct net_if_ipv6 ipv6;
90 	struct net_if *iface;
91 } ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
92 #endif /* CONFIG_NET_NATIVE_IPV6 */
93 
94 #if defined(CONFIG_NET_NATIVE_IPV4)
95 static struct {
96 	struct net_if_ipv4 ipv4;
97 	struct net_if *iface;
98 } ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
99 #endif /* CONFIG_NET_NATIVE_IPV4 */
100 
101 /* We keep track of the link callbacks in this list.
102  */
103 static sys_slist_t link_callbacks;
104 
105 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
106 /* Multicast join/leave tracking.
107  */
108 static sys_slist_t mcast_monitor_callbacks;
109 #endif
110 
111 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
112 #if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
113 #define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
114 #endif
115 
116 K_KERNEL_STACK_DEFINE(tx_ts_stack, CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
117 K_FIFO_DEFINE(tx_ts_queue);
118 
119 static struct k_thread tx_thread_ts;
120 
121 /* We keep track of the timestamp callbacks in this list.
122  */
123 static sys_slist_t timestamp_callbacks;
124 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
125 
126 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
127 #define debug_check_packet(pkt)						\
128 	do {								\
129 		NET_DBG("Processing (pkt %p, prio %d) network packet "	\
130 			"iface %d (%p)",				\
131 			pkt, net_pkt_priority(pkt),			\
132 			net_if_get_by_iface(net_pkt_iface(pkt)),	\
133 			net_pkt_iface(pkt));				\
134 									\
135 		NET_ASSERT(pkt->frags);					\
136 	} while (false)
137 #else
138 #define debug_check_packet(...)
139 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
140 
z_impl_net_if_get_by_index(int index)141 struct net_if *z_impl_net_if_get_by_index(int index)
142 {
143 	if (index <= 0) {
144 		return NULL;
145 	}
146 
147 	if (&_net_if_list_start[index - 1] >= _net_if_list_end) {
148 		NET_DBG("Index %d is too large", index);
149 		return NULL;
150 	}
151 
152 	return &_net_if_list_start[index - 1];
153 }
154 
155 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_get_by_index(int index)156 struct net_if *z_vrfy_net_if_get_by_index(int index)
157 {
158 	struct net_if *iface;
159 
160 	iface = net_if_get_by_index(index);
161 	if (!iface) {
162 		return NULL;
163 	}
164 
165 	if (!k_object_is_valid(iface, K_OBJ_NET_IF)) {
166 		return NULL;
167 	}
168 
169 	return iface;
170 }
171 
172 #include <zephyr/syscalls/net_if_get_by_index_mrsh.c>
173 #endif
174 
net_context_send_cb(struct net_context * context,int status)175 static inline void net_context_send_cb(struct net_context *context,
176 				       int status)
177 {
178 	if (!context) {
179 		return;
180 	}
181 
182 	if (context->send_cb) {
183 		context->send_cb(context, status, context->user_data);
184 	}
185 
186 	if (IS_ENABLED(CONFIG_NET_UDP) &&
187 	    net_context_get_proto(context) == IPPROTO_UDP) {
188 		net_stats_update_udp_sent(net_context_get_iface(context));
189 	} else if (IS_ENABLED(CONFIG_NET_TCP) &&
190 		   net_context_get_proto(context) == IPPROTO_TCP) {
191 		net_stats_update_tcp_seg_sent(net_context_get_iface(context));
192 	}
193 }
194 
update_txtime_stats_detail(struct net_pkt * pkt,uint32_t start_time,uint32_t stop_time)195 static void update_txtime_stats_detail(struct net_pkt *pkt,
196 				       uint32_t start_time, uint32_t stop_time)
197 {
198 	uint32_t val, prev = start_time;
199 	int i;
200 
201 	for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
202 		if (!net_pkt_stats_tick(pkt)[i]) {
203 			break;
204 		}
205 
206 		val = net_pkt_stats_tick(pkt)[i] - prev;
207 		prev = net_pkt_stats_tick(pkt)[i];
208 		net_pkt_stats_tick(pkt)[i] = val;
209 	}
210 }
211 
net_if_tx(struct net_if * iface,struct net_pkt * pkt)212 static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
213 {
214 	struct net_linkaddr ll_dst = {
215 		.addr = NULL
216 	};
217 	struct net_linkaddr_storage ll_dst_storage;
218 	struct net_context *context;
219 	uint32_t create_time;
220 	int status;
221 
222 	/* We collect send statistics for each socket priority if enabled */
223 	uint8_t pkt_priority;
224 
225 	if (!pkt) {
226 		return false;
227 	}
228 
229 	create_time = net_pkt_create_time(pkt);
230 
231 	debug_check_packet(pkt);
232 
233 	/* If there're any link callbacks, with such a callback receiving
234 	 * a destination address, copy that address out of packet, just in
235 	 * case packet is freed before callback is called.
236 	 */
237 	if (!sys_slist_is_empty(&link_callbacks)) {
238 		if (net_linkaddr_set(&ll_dst_storage,
239 				     net_pkt_lladdr_dst(pkt)->addr,
240 				     net_pkt_lladdr_dst(pkt)->len) == 0) {
241 			ll_dst.addr = ll_dst_storage.addr;
242 			ll_dst.len = ll_dst_storage.len;
243 			ll_dst.type = net_pkt_lladdr_dst(pkt)->type;
244 		}
245 	}
246 
247 	context = net_pkt_context(pkt);
248 
249 	if (net_if_flag_is_set(iface, NET_IF_LOWER_UP)) {
250 		if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
251 			pkt_priority = net_pkt_priority(pkt);
252 
253 			if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
254 				/* Make sure the statistics information is not
255 				 * lost by keeping the net_pkt over L2 send.
256 				 */
257 				net_pkt_ref(pkt);
258 			}
259 		}
260 
261 		net_if_tx_lock(iface);
262 		status = net_if_l2(iface)->send(iface, pkt);
263 		net_if_tx_unlock(iface);
264 
265 		if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
266 			uint32_t end_tick = k_cycle_get_32();
267 
268 			net_pkt_set_tx_stats_tick(pkt, end_tick);
269 
270 			net_stats_update_tc_tx_time(iface,
271 						    pkt_priority,
272 						    create_time,
273 						    end_tick);
274 
275 			if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
276 				update_txtime_stats_detail(
277 					pkt,
278 					create_time,
279 					end_tick);
280 
281 				net_stats_update_tc_tx_time_detail(
282 					iface, pkt_priority,
283 					net_pkt_stats_tick(pkt));
284 
285 				/* For TCP connections, we might keep the pkt
286 				 * longer so that we can resend it if needed.
287 				 * Because of that we need to clear the
288 				 * statistics here.
289 				 */
290 				net_pkt_stats_tick_reset(pkt);
291 
292 				net_pkt_unref(pkt);
293 			}
294 		}
295 
296 	} else {
297 		/* Drop packet if interface is not up */
298 		NET_WARN("iface %p is down", iface);
299 		status = -ENETDOWN;
300 	}
301 
302 	if (status < 0) {
303 		net_pkt_unref(pkt);
304 	} else {
305 		net_stats_update_bytes_sent(iface, status);
306 	}
307 
308 	if (context) {
309 		NET_DBG("Calling context send cb %p status %d",
310 			context, status);
311 
312 		net_context_send_cb(context, status);
313 	}
314 
315 	if (ll_dst.addr) {
316 		net_if_call_link_cb(iface, &ll_dst, status);
317 	}
318 
319 	return true;
320 }
321 
net_process_tx_packet(struct net_pkt * pkt)322 void net_process_tx_packet(struct net_pkt *pkt)
323 {
324 	struct net_if *iface;
325 
326 	net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
327 
328 	iface = net_pkt_iface(pkt);
329 
330 	net_if_tx(iface, pkt);
331 
332 #if defined(CONFIG_NET_POWER_MANAGEMENT)
333 	iface->tx_pending--;
334 #endif
335 }
336 
net_if_queue_tx(struct net_if * iface,struct net_pkt * pkt)337 void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
338 {
339 	if (!net_pkt_filter_send_ok(pkt)) {
340 		/* silently drop the packet */
341 		net_pkt_unref(pkt);
342 		return;
343 	}
344 
345 	uint8_t prio = net_pkt_priority(pkt);
346 	uint8_t tc = net_tx_priority2tc(prio);
347 
348 	net_stats_update_tc_sent_pkt(iface, tc);
349 	net_stats_update_tc_sent_bytes(iface, tc, net_pkt_get_len(pkt));
350 	net_stats_update_tc_sent_priority(iface, tc, prio);
351 
352 	/* For highest priority packet, skip the TX queue and push directly to
353 	 * the driver. Also if there are no TX queue/thread, push the packet
354 	 * directly to the driver.
355 	 */
356 	if ((IS_ENABLED(CONFIG_NET_TC_SKIP_FOR_HIGH_PRIO) &&
357 	     prio >= NET_PRIORITY_CA) || NET_TC_TX_COUNT == 0) {
358 		net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
359 
360 		net_if_tx(net_pkt_iface(pkt), pkt);
361 		return;
362 	}
363 
364 #if NET_TC_TX_COUNT > 1
365 	NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
366 #endif
367 
368 #if defined(CONFIG_NET_POWER_MANAGEMENT)
369 	iface->tx_pending++;
370 #endif
371 
372 	if (!net_tc_submit_to_tx_queue(tc, pkt)) {
373 #if defined(CONFIG_NET_POWER_MANAGEMENT)
374 		iface->tx_pending--
375 #endif
376 			;
377 	}
378 }
379 
net_if_stats_reset(struct net_if * iface)380 void net_if_stats_reset(struct net_if *iface)
381 {
382 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
383 	STRUCT_SECTION_FOREACH(net_if, tmp) {
384 		if (iface == tmp) {
385 			net_if_lock(iface);
386 			memset(&iface->stats, 0, sizeof(iface->stats));
387 			net_if_unlock(iface);
388 			return;
389 		}
390 	}
391 #else
392 	ARG_UNUSED(iface);
393 #endif
394 }
395 
net_if_stats_reset_all(void)396 void net_if_stats_reset_all(void)
397 {
398 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
399 	STRUCT_SECTION_FOREACH(net_if, iface) {
400 		net_if_lock(iface);
401 		memset(&iface->stats, 0, sizeof(iface->stats));
402 		net_if_unlock(iface);
403 	}
404 #endif
405 }
406 
init_iface(struct net_if * iface)407 static inline void init_iface(struct net_if *iface)
408 {
409 	const struct net_if_api *api = net_if_get_device(iface)->api;
410 
411 	if (!api || !api->init) {
412 		NET_ERR("Iface %p driver API init NULL", iface);
413 		return;
414 	}
415 
416 	/* By default IPv4 and IPv6 are enabled for a given network interface.
417 	 * These can be turned off later if needed.
418 	 */
419 #if defined(CONFIG_NET_NATIVE_IPV4)
420 	net_if_flag_set(iface, NET_IF_IPV4);
421 #endif
422 #if defined(CONFIG_NET_NATIVE_IPV6)
423 	net_if_flag_set(iface, NET_IF_IPV6);
424 #endif
425 
426 	net_virtual_init(iface);
427 
428 	NET_DBG("On iface %p", iface);
429 
430 #ifdef CONFIG_USERSPACE
431 	k_object_init(iface);
432 #endif
433 
434 	k_mutex_init(&iface->lock);
435 	k_mutex_init(&iface->tx_lock);
436 
437 	api->init(iface);
438 
439 	net_ipv6_pe_init(iface);
440 }
441 
net_if_send_data(struct net_if * iface,struct net_pkt * pkt)442 enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt)
443 {
444 	struct net_context *context = net_pkt_context(pkt);
445 	struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
446 	enum net_verdict verdict = NET_OK;
447 	int status = -EIO;
448 
449 	if (!net_if_flag_is_set(iface, NET_IF_LOWER_UP) ||
450 	    net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
451 		/* Drop packet if interface is not up */
452 		NET_WARN("iface %p is down", iface);
453 		verdict = NET_DROP;
454 		status = -ENETDOWN;
455 		goto done;
456 	}
457 
458 	if (IS_ENABLED(CONFIG_NET_OFFLOAD) && !net_if_l2(iface)) {
459 		NET_WARN("no l2 for iface %p, discard pkt", iface);
460 		verdict = NET_DROP;
461 		goto done;
462 	}
463 
464 	/* If the ll address is not set at all, then we must set
465 	 * it here.
466 	 * Workaround Linux bug, see:
467 	 * https://github.com/zephyrproject-rtos/zephyr/issues/3111
468 	 */
469 	if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
470 	    !net_pkt_lladdr_src(pkt)->addr) {
471 		net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_if(pkt)->addr;
472 		net_pkt_lladdr_src(pkt)->len = net_pkt_lladdr_if(pkt)->len;
473 	}
474 
475 #if defined(CONFIG_NET_LOOPBACK)
476 	/* If the packet is destined back to us, then there is no need to do
477 	 * additional checks, so let the packet through.
478 	 */
479 	if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
480 		goto done;
481 	}
482 #endif
483 
484 	/* Bypass the IP stack with SOCK_RAW/IPPROTO_RAW sockets */
485 	if (IS_ENABLED(CONFIG_NET_SOCKETS_PACKET) &&
486 	    context && net_context_get_type(context) == SOCK_RAW &&
487 	    net_context_get_proto(context) == IPPROTO_RAW) {
488 		goto done;
489 	}
490 
491 	/* If the ll dst address is not set check if it is present in the nbr
492 	 * cache.
493 	 */
494 	if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
495 		verdict = net_ipv6_prepare_for_send(pkt);
496 	}
497 
498 #if defined(CONFIG_NET_IPV4_FRAGMENT)
499 	if (net_pkt_family(pkt) == AF_INET) {
500 		verdict = net_ipv4_prepare_for_send(pkt);
501 	}
502 #endif
503 
504 done:
505 	/*   NET_OK in which case packet has checked successfully. In this case
506 	 *   the net_context callback is called after successful delivery in
507 	 *   net_if_tx_thread().
508 	 *
509 	 *   NET_DROP in which case we call net_context callback that will
510 	 *   give the status to user application.
511 	 *
512 	 *   NET_CONTINUE in which case the sending of the packet is delayed.
513 	 *   This can happen for example if we need to do IPv6 ND to figure
514 	 *   out link layer address.
515 	 */
516 	if (verdict == NET_DROP) {
517 		if (context) {
518 			NET_DBG("Calling ctx send cb %p verdict %d",
519 				context, verdict);
520 			net_context_send_cb(context, status);
521 		}
522 
523 		if (dst->addr) {
524 			net_if_call_link_cb(iface, dst, status);
525 		}
526 	} else if (verdict == NET_OK) {
527 		/* Packet is ready to be sent by L2, let's queue */
528 		net_if_queue_tx(iface, pkt);
529 	}
530 
531 	return verdict;
532 }
533 
net_if_set_link_addr_locked(struct net_if * iface,uint8_t * addr,uint8_t len,enum net_link_type type)534 int net_if_set_link_addr_locked(struct net_if *iface,
535 				uint8_t *addr, uint8_t len,
536 				enum net_link_type type)
537 {
538 	int ret;
539 
540 	net_if_lock(iface);
541 
542 	ret = net_if_set_link_addr_unlocked(iface, addr, len, type);
543 
544 	net_if_unlock(iface);
545 
546 	return ret;
547 }
548 
net_if_get_by_link_addr(struct net_linkaddr * ll_addr)549 struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
550 {
551 	STRUCT_SECTION_FOREACH(net_if, iface) {
552 		net_if_lock(iface);
553 		if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
554 			    ll_addr->len)) {
555 			net_if_unlock(iface);
556 			return iface;
557 		}
558 		net_if_unlock(iface);
559 	}
560 
561 	return NULL;
562 }
563 
net_if_lookup_by_dev(const struct device * dev)564 struct net_if *net_if_lookup_by_dev(const struct device *dev)
565 {
566 	STRUCT_SECTION_FOREACH(net_if, iface) {
567 		if (net_if_get_device(iface) == dev) {
568 			return iface;
569 		}
570 	}
571 
572 	return NULL;
573 }
574 
net_if_set_default(struct net_if * iface)575 void net_if_set_default(struct net_if *iface)
576 {
577 	default_iface = iface;
578 }
579 
net_if_get_default(void)580 struct net_if *net_if_get_default(void)
581 {
582 	struct net_if *iface = NULL;
583 
584 	if (&_net_if_list_start[0] == &_net_if_list_end[0]) {
585 		NET_WARN("No default interface found!");
586 		return NULL;
587 	}
588 
589 	if (default_iface != NULL) {
590 		return default_iface;
591 	}
592 
593 #if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
594 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
595 #endif
596 #if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
597 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
598 #endif
599 #if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
600 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
601 #endif
602 #if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
603 	iface = net_if_get_first_by_type(NULL);
604 #endif
605 #if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
606 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
607 #endif
608 #if defined(CONFIG_NET_DEFAULT_IF_PPP)
609 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(PPP));
610 #endif
611 #if defined(CONFIG_NET_DEFAULT_IF_UP)
612 	iface = net_if_get_first_up();
613 #endif
614 #if defined(CONFIG_NET_DEFAULT_IF_WIFI)
615 	iface = net_if_get_first_wifi();
616 #endif
617 	return iface ? iface : _net_if_list_start;
618 }
619 
net_if_get_first_by_type(const struct net_l2 * l2)620 struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
621 {
622 	STRUCT_SECTION_FOREACH(net_if, iface) {
623 		if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
624 		    !l2 && net_if_offload(iface)) {
625 			return iface;
626 		}
627 
628 		if (net_if_l2(iface) == l2) {
629 			return iface;
630 		}
631 	}
632 
633 	return NULL;
634 }
635 
net_if_get_first_up(void)636 struct net_if *net_if_get_first_up(void)
637 {
638 	STRUCT_SECTION_FOREACH(net_if, iface) {
639 		if (net_if_flag_is_set(iface, NET_IF_UP)) {
640 			return iface;
641 		}
642 	}
643 
644 	return NULL;
645 }
646 
l2_flags_get(struct net_if * iface)647 static enum net_l2_flags l2_flags_get(struct net_if *iface)
648 {
649 	enum net_l2_flags flags = 0;
650 
651 	if (net_if_l2(iface) && net_if_l2(iface)->get_flags) {
652 		flags = net_if_l2(iface)->get_flags(iface);
653 	}
654 
655 	return flags;
656 }
657 
658 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
659 /* Return how many bits are shared between two IP addresses */
get_ipaddr_diff(const uint8_t * src,const uint8_t * dst,int addr_len)660 static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_len)
661 {
662 	uint8_t j, k, xor;
663 	uint8_t len = 0U;
664 
665 	for (j = 0U; j < addr_len; j++) {
666 		if (src[j] == dst[j]) {
667 			len += 8U;
668 		} else {
669 			xor = src[j] ^ dst[j];
670 			for (k = 0U; k < 8; k++) {
671 				if (!(xor & 0x80)) {
672 					len++;
673 					xor <<= 1;
674 				} else {
675 					break;
676 				}
677 			}
678 			break;
679 		}
680 	}
681 
682 	return len;
683 }
684 
iface_router_lookup(struct net_if * iface,uint8_t family,void * addr)685 static struct net_if_router *iface_router_lookup(struct net_if *iface,
686 						 uint8_t family, void *addr)
687 {
688 	struct net_if_router *router = NULL;
689 	int i;
690 
691 	k_mutex_lock(&lock, K_FOREVER);
692 
693 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
694 		if (!routers[i].is_used ||
695 		    routers[i].address.family != family ||
696 		    routers[i].iface != iface) {
697 			continue;
698 		}
699 
700 		if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
701 		     net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
702 				       (struct in6_addr *)addr)) ||
703 		    (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
704 		     net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
705 				       (struct in_addr *)addr))) {
706 			router = &routers[i];
707 			goto out;
708 		}
709 	}
710 
711 out:
712 	k_mutex_unlock(&lock);
713 
714 	return router;
715 }
716 
iface_router_notify_deletion(struct net_if_router * router,const char * delete_reason)717 static void iface_router_notify_deletion(struct net_if_router *router,
718 					 const char *delete_reason)
719 {
720 	if (IS_ENABLED(CONFIG_NET_IPV6) &&
721 	    router->address.family == AF_INET6) {
722 		NET_DBG("IPv6 router %s %s",
723 			net_sprint_ipv6_addr(net_if_router_ipv6(router)),
724 			delete_reason);
725 
726 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
727 						router->iface,
728 						&router->address.in6_addr,
729 						sizeof(struct in6_addr));
730 	} else if (IS_ENABLED(CONFIG_NET_IPV4) &&
731 		   router->address.family == AF_INET) {
732 		NET_DBG("IPv4 router %s %s",
733 			net_sprint_ipv4_addr(net_if_router_ipv4(router)),
734 			delete_reason);
735 
736 		net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
737 						router->iface,
738 						&router->address.in_addr,
739 						sizeof(struct in6_addr));
740 	}
741 }
742 
iface_router_ends(const struct net_if_router * router,uint32_t now)743 static inline int32_t iface_router_ends(const struct net_if_router *router,
744 					uint32_t now)
745 {
746 	uint32_t ends = router->life_start;
747 
748 	ends += MSEC_PER_SEC * router->lifetime;
749 
750 	/* Signed number of ms until router lifetime ends */
751 	return (int32_t)(ends - now);
752 }
753 
iface_router_update_timer(uint32_t now)754 static void iface_router_update_timer(uint32_t now)
755 {
756 	struct net_if_router *router, *next;
757 	uint32_t new_delay = UINT32_MAX;
758 
759 	k_mutex_lock(&lock, K_FOREVER);
760 
761 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
762 					 router, next, node) {
763 		int32_t ends = iface_router_ends(router, now);
764 
765 		if (ends <= 0) {
766 			new_delay = 0;
767 			break;
768 		}
769 
770 		new_delay = MIN((uint32_t)ends, new_delay);
771 	}
772 
773 	if (new_delay == UINT32_MAX) {
774 		k_work_cancel_delayable(&router_timer);
775 	} else {
776 		k_work_reschedule(&router_timer, K_MSEC(new_delay));
777 	}
778 
779 	k_mutex_unlock(&lock);
780 }
781 
iface_router_expired(struct k_work * work)782 static void iface_router_expired(struct k_work *work)
783 {
784 	uint32_t current_time = k_uptime_get_32();
785 	struct net_if_router *router, *next;
786 	sys_snode_t *prev_node = NULL;
787 
788 	ARG_UNUSED(work);
789 
790 	k_mutex_lock(&lock, K_FOREVER);
791 
792 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
793 					  router, next, node) {
794 		int32_t ends = iface_router_ends(router, current_time);
795 
796 		if (ends > 0) {
797 			/* We have to loop on all active routers as their
798 			 * lifetime differ from each other.
799 			 */
800 			prev_node = &router->node;
801 			continue;
802 		}
803 
804 		iface_router_notify_deletion(router, "has expired");
805 		sys_slist_remove(&active_router_timers,
806 				 prev_node, &router->node);
807 		router->is_used = false;
808 	}
809 
810 	iface_router_update_timer(current_time);
811 
812 	k_mutex_unlock(&lock);
813 }
814 
iface_router_add(struct net_if * iface,uint8_t family,void * addr,bool is_default,uint16_t lifetime)815 static struct net_if_router *iface_router_add(struct net_if *iface,
816 					      uint8_t family, void *addr,
817 					      bool is_default,
818 					      uint16_t lifetime)
819 {
820 	struct net_if_router *router = NULL;
821 	int i;
822 
823 	k_mutex_lock(&lock, K_FOREVER);
824 
825 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
826 		if (routers[i].is_used) {
827 			continue;
828 		}
829 
830 		routers[i].is_used = true;
831 		routers[i].iface = iface;
832 		routers[i].address.family = family;
833 
834 		if (lifetime) {
835 			routers[i].is_default = true;
836 			routers[i].is_infinite = false;
837 			routers[i].lifetime = lifetime;
838 			routers[i].life_start = k_uptime_get_32();
839 
840 			sys_slist_append(&active_router_timers,
841 					 &routers[i].node);
842 
843 			iface_router_update_timer(routers[i].life_start);
844 		} else {
845 			routers[i].is_default = false;
846 			routers[i].is_infinite = true;
847 			routers[i].lifetime = 0;
848 		}
849 
850 		if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
851 			memcpy(net_if_router_ipv6(&routers[i]), addr,
852 			       sizeof(struct in6_addr));
853 			net_mgmt_event_notify_with_info(
854 					NET_EVENT_IPV6_ROUTER_ADD, iface,
855 					&routers[i].address.in6_addr,
856 					sizeof(struct in6_addr));
857 
858 			NET_DBG("interface %p router %s lifetime %u default %d "
859 				"added", iface,
860 				net_sprint_ipv6_addr((struct in6_addr *)addr),
861 				lifetime, routers[i].is_default);
862 		} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
863 			memcpy(net_if_router_ipv4(&routers[i]), addr,
864 			       sizeof(struct in_addr));
865 			routers[i].is_default = is_default;
866 
867 			net_mgmt_event_notify_with_info(
868 					NET_EVENT_IPV4_ROUTER_ADD, iface,
869 					&routers[i].address.in_addr,
870 					sizeof(struct in_addr));
871 
872 			NET_DBG("interface %p router %s lifetime %u default %d "
873 				"added", iface,
874 				net_sprint_ipv4_addr((struct in_addr *)addr),
875 				lifetime, is_default);
876 		}
877 
878 		router = &routers[i];
879 		goto out;
880 	}
881 
882 out:
883 	k_mutex_unlock(&lock);
884 
885 	return router;
886 }
887 
iface_router_rm(struct net_if_router * router)888 static bool iface_router_rm(struct net_if_router *router)
889 {
890 	bool ret = false;
891 
892 	k_mutex_lock(&lock, K_FOREVER);
893 
894 	if (!router->is_used) {
895 		goto out;
896 	}
897 
898 	iface_router_notify_deletion(router, "has been removed");
899 
900 	/* We recompute the timer if only the router was time limited */
901 	if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
902 		iface_router_update_timer(k_uptime_get_32());
903 	}
904 
905 	router->is_used = false;
906 	ret = true;
907 
908 out:
909 	k_mutex_unlock(&lock);
910 
911 	return ret;
912 }
913 
net_if_router_rm(struct net_if_router * router)914 void net_if_router_rm(struct net_if_router *router)
915 {
916 	k_mutex_lock(&lock, K_FOREVER);
917 
918 	router->is_used = false;
919 
920 	/* FIXME - remove timer */
921 
922 	k_mutex_unlock(&lock);
923 }
924 
iface_router_find_default(struct net_if * iface,uint8_t family,void * addr)925 static struct net_if_router *iface_router_find_default(struct net_if *iface,
926 						       uint8_t family, void *addr)
927 {
928 	struct net_if_router *router = NULL;
929 	int i;
930 
931 	/* Todo: addr will need to be handled */
932 	ARG_UNUSED(addr);
933 
934 	k_mutex_lock(&lock, K_FOREVER);
935 
936 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
937 		if (!routers[i].is_used ||
938 		    !routers[i].is_default ||
939 		    routers[i].address.family != family) {
940 			continue;
941 		}
942 
943 		if (iface && iface != routers[i].iface) {
944 			continue;
945 		}
946 
947 		router = &routers[i];
948 		goto out;
949 	}
950 
951 out:
952 	k_mutex_unlock(&lock);
953 
954 	return router;
955 }
956 
iface_router_init(void)957 static void iface_router_init(void)
958 {
959 	k_work_init_delayable(&router_timer, iface_router_expired);
960 	sys_slist_init(&active_router_timers);
961 }
962 #else
963 #define iface_router_init(...)
964 #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */
965 
966 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
net_if_mcast_mon_register(struct net_if_mcast_monitor * mon,struct net_if * iface,net_if_mcast_callback_t cb)967 void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
968 			       struct net_if *iface,
969 			       net_if_mcast_callback_t cb)
970 {
971 	k_mutex_lock(&lock, K_FOREVER);
972 
973 	sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
974 	sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
975 
976 	mon->iface = iface;
977 	mon->cb = cb;
978 
979 	k_mutex_unlock(&lock);
980 }
981 
net_if_mcast_mon_unregister(struct net_if_mcast_monitor * mon)982 void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
983 {
984 	k_mutex_lock(&lock, K_FOREVER);
985 
986 	sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
987 
988 	k_mutex_unlock(&lock);
989 }
990 
net_if_mcast_monitor(struct net_if * iface,const struct net_addr * addr,bool is_joined)991 void net_if_mcast_monitor(struct net_if *iface,
992 			  const struct net_addr *addr,
993 			  bool is_joined)
994 {
995 	struct net_if_mcast_monitor *mon, *tmp;
996 
997 	k_mutex_lock(&lock, K_FOREVER);
998 
999 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
1000 					  mon, tmp, node) {
1001 		if (iface == mon->iface || mon->iface == NULL) {
1002 			mon->cb(iface, addr, is_joined);
1003 		}
1004 	}
1005 
1006 	k_mutex_unlock(&lock);
1007 }
1008 #else
1009 #define net_if_mcast_mon_register(...)
1010 #define net_if_mcast_mon_unregister(...)
1011 #define net_if_mcast_monitor(...)
1012 #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */
1013 
1014 #if defined(CONFIG_NET_NATIVE_IPV6)
net_if_config_ipv6_get(struct net_if * iface,struct net_if_ipv6 ** ipv6)1015 int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
1016 {
1017 	int ret = 0;
1018 	int i;
1019 
1020 	net_if_lock(iface);
1021 
1022 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1023 		ret = -ENOTSUP;
1024 		goto out;
1025 	}
1026 
1027 	if (iface->config.ip.ipv6) {
1028 		if (ipv6) {
1029 			*ipv6 = iface->config.ip.ipv6;
1030 		}
1031 
1032 		goto out;
1033 	}
1034 
1035 	k_mutex_lock(&lock, K_FOREVER);
1036 
1037 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1038 		if (ipv6_addresses[i].iface) {
1039 			continue;
1040 		}
1041 
1042 		iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
1043 		ipv6_addresses[i].iface = iface;
1044 
1045 		if (ipv6) {
1046 			*ipv6 = &ipv6_addresses[i].ipv6;
1047 		}
1048 
1049 		k_mutex_unlock(&lock);
1050 		goto out;
1051 	}
1052 
1053 	k_mutex_unlock(&lock);
1054 
1055 	ret = -ESRCH;
1056 out:
1057 	net_if_unlock(iface);
1058 
1059 	return ret;
1060 }
1061 
net_if_config_ipv6_put(struct net_if * iface)1062 int net_if_config_ipv6_put(struct net_if *iface)
1063 {
1064 	int ret = 0;
1065 	int i;
1066 
1067 	net_if_lock(iface);
1068 
1069 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1070 		ret = -ENOTSUP;
1071 		goto out;
1072 	}
1073 
1074 	if (!iface->config.ip.ipv6) {
1075 		ret = -EALREADY;
1076 		goto out;
1077 	}
1078 
1079 	k_mutex_lock(&lock, K_FOREVER);
1080 
1081 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1082 		if (ipv6_addresses[i].iface != iface) {
1083 			continue;
1084 		}
1085 
1086 		iface->config.ip.ipv6 = NULL;
1087 		ipv6_addresses[i].iface = NULL;
1088 
1089 		k_mutex_unlock(&lock);
1090 		goto out;
1091 	}
1092 
1093 	k_mutex_unlock(&lock);
1094 
1095 	ret = -ESRCH;
1096 out:
1097 	net_if_unlock(iface);
1098 
1099 	return ret;
1100 }
1101 
1102 #if defined(CONFIG_NET_IPV6_MLD)
join_mcast_allnodes(struct net_if * iface)1103 static void join_mcast_allnodes(struct net_if *iface)
1104 {
1105 	struct in6_addr addr;
1106 	int ret;
1107 
1108 	if (iface->config.ip.ipv6 == NULL) {
1109 		return;
1110 	}
1111 
1112 	net_ipv6_addr_create_ll_allnodes_mcast(&addr);
1113 
1114 	ret = net_ipv6_mld_join(iface, &addr);
1115 	if (ret < 0 && ret != -EALREADY && ret != -ENETDOWN) {
1116 		NET_ERR("Cannot join all nodes address %s for %d (%d)",
1117 			net_sprint_ipv6_addr(&addr),
1118 			net_if_get_by_iface(iface), ret);
1119 	}
1120 }
1121 
join_mcast_solicit_node(struct net_if * iface,struct in6_addr * my_addr)1122 static void join_mcast_solicit_node(struct net_if *iface,
1123 				    struct in6_addr *my_addr)
1124 {
1125 	struct in6_addr addr;
1126 	int ret;
1127 
1128 	if (iface->config.ip.ipv6 == NULL) {
1129 		return;
1130 	}
1131 
1132 	/* Join to needed multicast groups, RFC 4291 ch 2.8 */
1133 	net_ipv6_addr_create_solicited_node(my_addr, &addr);
1134 
1135 	ret = net_ipv6_mld_join(iface, &addr);
1136 	if (ret < 0) {
1137 		if (ret != -EALREADY && ret != -ENETDOWN) {
1138 			NET_ERR("Cannot join solicit node address %s for %d (%d)",
1139 				net_sprint_ipv6_addr(&addr),
1140 				net_if_get_by_iface(iface), ret);
1141 		}
1142 	} else {
1143 		NET_DBG("Join solicit node address %s (ifindex %d)",
1144 			net_sprint_ipv6_addr(&addr),
1145 			net_if_get_by_iface(iface));
1146 	}
1147 }
1148 
leave_mcast_all(struct net_if * iface)1149 static void leave_mcast_all(struct net_if *iface)
1150 {
1151 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1152 	int i;
1153 
1154 	if (!ipv6) {
1155 		return;
1156 	}
1157 
1158 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1159 		if (!ipv6->mcast[i].is_used ||
1160 		    !ipv6->mcast[i].is_joined) {
1161 			continue;
1162 		}
1163 
1164 		net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
1165 	}
1166 }
1167 
join_mcast_nodes(struct net_if * iface,struct in6_addr * addr)1168 static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
1169 {
1170 	enum net_l2_flags flags = 0;
1171 
1172 	if (iface->config.ip.ipv6 == NULL) {
1173 		return;
1174 	}
1175 
1176 	flags = l2_flags_get(iface);
1177 	if (flags & NET_L2_MULTICAST) {
1178 		join_mcast_allnodes(iface);
1179 
1180 		if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
1181 			join_mcast_solicit_node(iface, addr);
1182 		}
1183 	}
1184 }
1185 #else
1186 #define join_mcast_allnodes(...)
1187 #define join_mcast_solicit_node(...)
1188 #define leave_mcast_all(...)
1189 #define join_mcast_nodes(...)
1190 #endif /* CONFIG_NET_IPV6_MLD */
1191 
1192 #if defined(CONFIG_NET_IPV6_DAD)
1193 #define DAD_TIMEOUT 100U /* ms */
1194 
dad_timeout(struct k_work * work)1195 static void dad_timeout(struct k_work *work)
1196 {
1197 	uint32_t current_time = k_uptime_get_32();
1198 	struct net_if_addr *ifaddr, *next;
1199 	int32_t delay = -1;
1200 	sys_slist_t expired_list;
1201 
1202 	ARG_UNUSED(work);
1203 
1204 	sys_slist_init(&expired_list);
1205 
1206 	k_mutex_lock(&lock, K_FOREVER);
1207 
1208 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
1209 					  ifaddr, next, dad_node) {
1210 		/* DAD entries are ordered by construction.  Stop when
1211 		 * we find one that hasn't expired.
1212 		 */
1213 		delay = (int32_t)(ifaddr->dad_start +
1214 				  DAD_TIMEOUT - current_time);
1215 		if (delay > 0) {
1216 			break;
1217 		}
1218 
1219 		/* Removing the ifaddr from active_dad_timers list */
1220 		sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
1221 		sys_slist_append(&expired_list, &ifaddr->dad_node);
1222 
1223 		ifaddr = NULL;
1224 	}
1225 
1226 	if ((ifaddr != NULL) && (delay > 0)) {
1227 		k_work_reschedule(&dad_timer, K_MSEC((uint32_t)delay));
1228 	}
1229 
1230 	k_mutex_unlock(&lock);
1231 
1232 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ifaddr, dad_node) {
1233 		struct net_if *iface;
1234 
1235 		NET_DBG("DAD succeeded for %s at interface %d",
1236 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1237 			ifaddr->ifindex);
1238 
1239 		ifaddr->addr_state = NET_ADDR_PREFERRED;
1240 		iface = net_if_get_by_index(ifaddr->ifindex);
1241 
1242 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_SUCCEED,
1243 						iface,
1244 						&ifaddr->address.in6_addr,
1245 						sizeof(struct in6_addr));
1246 
1247 		/* The address gets added to neighbor cache which is not
1248 		 * needed in this case as the address is our own one.
1249 		 */
1250 		net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
1251 	}
1252 }
1253 
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1254 void net_if_ipv6_start_dad(struct net_if *iface,
1255 			   struct net_if_addr *ifaddr)
1256 {
1257 	ifaddr->addr_state = NET_ADDR_TENTATIVE;
1258 
1259 	if (net_if_is_up(iface)) {
1260 		NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
1261 			iface,
1262 			net_sprint_ll_addr(
1263 					   net_if_get_link_addr(iface)->addr,
1264 					   net_if_get_link_addr(iface)->len),
1265 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1266 
1267 		ifaddr->dad_count = 1U;
1268 
1269 		if (!net_ipv6_start_dad(iface, ifaddr)) {
1270 			ifaddr->dad_start = k_uptime_get_32();
1271 			ifaddr->ifindex = net_if_get_by_iface(iface);
1272 
1273 			k_mutex_lock(&lock, K_FOREVER);
1274 			sys_slist_find_and_remove(&active_dad_timers,
1275 						  &ifaddr->dad_node);
1276 			sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
1277 			k_mutex_unlock(&lock);
1278 
1279 			/* FUTURE: use schedule, not reschedule. */
1280 			if (!k_work_delayable_remaining_get(&dad_timer)) {
1281 				k_work_reschedule(&dad_timer,
1282 						  K_MSEC(DAD_TIMEOUT));
1283 			}
1284 		}
1285 	} else {
1286 		NET_DBG("Interface %p is down, starting DAD for %s later.",
1287 			iface,
1288 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1289 	}
1290 }
1291 
net_if_start_dad(struct net_if * iface)1292 void net_if_start_dad(struct net_if *iface)
1293 {
1294 	struct net_if_addr *ifaddr;
1295 	struct net_if_ipv6 *ipv6;
1296 	struct in6_addr addr = { };
1297 	int ret;
1298 
1299 	net_if_lock(iface);
1300 
1301 	NET_DBG("Starting DAD for iface %p", iface);
1302 
1303 	ret = net_if_config_ipv6_get(iface, &ipv6);
1304 	if (ret < 0) {
1305 		if (ret != -ENOTSUP) {
1306 			NET_WARN("Cannot do DAD IPv6 config is not valid.");
1307 		}
1308 
1309 		goto out;
1310 	}
1311 
1312 	if (!ipv6) {
1313 		goto out;
1314 	}
1315 
1316 	net_ipv6_addr_create_iid(&addr, net_if_get_link_addr(iface));
1317 
1318 	ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
1319 	if (!ifaddr) {
1320 		NET_ERR("Cannot add %s address to interface %p, DAD fails",
1321 			net_sprint_ipv6_addr(&addr), iface);
1322 	}
1323 
1324 	/* Start DAD for all the addresses that were added earlier when
1325 	 * the interface was down.
1326 	 */
1327 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1328 		if (!ipv6->unicast[i].is_used ||
1329 		    ipv6->unicast[i].address.family != AF_INET6 ||
1330 		    &ipv6->unicast[i] == ifaddr ||
1331 		    net_ipv6_is_addr_loopback(
1332 			    &ipv6->unicast[i].address.in6_addr)) {
1333 			continue;
1334 		}
1335 
1336 		net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1337 	}
1338 
1339 out:
1340 	net_if_unlock(iface);
1341 }
1342 
net_if_ipv6_dad_failed(struct net_if * iface,const struct in6_addr * addr)1343 void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
1344 {
1345 	struct net_if_addr *ifaddr;
1346 	uint32_t timeout, preferred_lifetime;
1347 
1348 	net_if_lock(iface);
1349 
1350 	ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
1351 	if (!ifaddr) {
1352 		NET_ERR("Cannot find %s address in interface %p",
1353 			net_sprint_ipv6_addr(addr), iface);
1354 		goto out;
1355 	}
1356 
1357 
1358 	if (IS_ENABLED(CONFIG_NET_IPV6_PE)) {
1359 		ifaddr->dad_count++;
1360 
1361 		timeout = COND_CODE_1(CONFIG_NET_IPV6_PE,
1362 				      (ifaddr->addr_timeout), (0));
1363 		preferred_lifetime = COND_CODE_1(CONFIG_NET_IPV6_PE,
1364 						 (ifaddr->addr_preferred_lifetime), (0U));
1365 
1366 		if (!net_ipv6_pe_check_dad(ifaddr->dad_count)) {
1367 			NET_ERR("Cannot generate PE address for interface %p",
1368 				iface);
1369 			iface->pe_enabled = false;
1370 			net_mgmt_event_notify(NET_EVENT_IPV6_PE_DISABLED, iface);
1371 		}
1372 	}
1373 
1374 	net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
1375 					&ifaddr->address.in6_addr,
1376 					sizeof(struct in6_addr));
1377 
1378 	/* The old address needs to be removed from the interface before we can
1379 	 * start new DAD for the new PE address as the amount of address slots
1380 	 * is limited.
1381 	 */
1382 	net_if_ipv6_addr_rm(iface, addr);
1383 
1384 	if (IS_ENABLED(CONFIG_NET_IPV6_PE) && iface->pe_enabled) {
1385 		net_ipv6_pe_start(iface, addr, timeout, preferred_lifetime);
1386 	}
1387 
1388 out:
1389 	net_if_unlock(iface);
1390 }
1391 
iface_ipv6_dad_init(void)1392 static inline void iface_ipv6_dad_init(void)
1393 {
1394 	k_work_init_delayable(&dad_timer, dad_timeout);
1395 	sys_slist_init(&active_dad_timers);
1396 }
1397 
1398 #else
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1399 static inline void net_if_ipv6_start_dad(struct net_if *iface,
1400 					 struct net_if_addr *ifaddr)
1401 {
1402 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1403 }
1404 
1405 #define iface_ipv6_dad_init(...)
1406 #endif /* CONFIG_NET_IPV6_DAD */
1407 
1408 #if defined(CONFIG_NET_IPV6_ND)
1409 #define RS_TIMEOUT (1U * MSEC_PER_SEC)
1410 #define RS_COUNT 3
1411 
rs_timeout(struct k_work * work)1412 static void rs_timeout(struct k_work *work)
1413 {
1414 	uint32_t current_time = k_uptime_get_32();
1415 	struct net_if_ipv6 *ipv6, *next;
1416 	int32_t delay = -1;
1417 	sys_slist_t expired_list;
1418 
1419 	ARG_UNUSED(work);
1420 
1421 	sys_slist_init(&expired_list);
1422 
1423 	k_mutex_lock(&lock, K_FOREVER);
1424 
1425 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
1426 					  ipv6, next, rs_node) {
1427 		/* RS entries are ordered by construction.  Stop when
1428 		 * we find one that hasn't expired.
1429 		 */
1430 		delay = (int32_t)(ipv6->rs_start + RS_TIMEOUT - current_time);
1431 		if (delay > 0) {
1432 			break;
1433 		}
1434 
1435 		/* Removing the ipv6 from active_rs_timers list */
1436 		sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
1437 		sys_slist_append(&expired_list, &ipv6->rs_node);
1438 
1439 		ipv6 = NULL;
1440 	}
1441 
1442 	if ((ipv6 != NULL) && (delay > 0)) {
1443 		k_work_reschedule(&rs_timer, K_MSEC(ipv6->rs_start +
1444 						    RS_TIMEOUT - current_time));
1445 	}
1446 
1447 	k_mutex_unlock(&lock);
1448 
1449 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ipv6, rs_node) {
1450 		struct net_if *iface = NULL;
1451 
1452 		/* Did not receive RA yet. */
1453 		ipv6->rs_count++;
1454 
1455 		STRUCT_SECTION_FOREACH(net_if, tmp) {
1456 			if (tmp->config.ip.ipv6 == ipv6) {
1457 				iface = tmp;
1458 				break;
1459 			}
1460 		}
1461 
1462 		if (iface) {
1463 			NET_DBG("RS no respond iface %p count %d",
1464 				iface, ipv6->rs_count);
1465 			if (ipv6->rs_count < RS_COUNT) {
1466 				net_if_start_rs(iface);
1467 			}
1468 		} else {
1469 			NET_DBG("Interface IPv6 config %p not found", ipv6);
1470 		}
1471 	}
1472 }
1473 
net_if_start_rs(struct net_if * iface)1474 void net_if_start_rs(struct net_if *iface)
1475 {
1476 	struct net_if_ipv6 *ipv6;
1477 
1478 	net_if_lock(iface);
1479 
1480 	if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1481 		goto out;
1482 	}
1483 
1484 	ipv6 = iface->config.ip.ipv6;
1485 	if (!ipv6) {
1486 		goto out;
1487 	}
1488 
1489 	NET_DBG("Starting ND/RS for iface %p", iface);
1490 
1491 	if (!net_ipv6_start_rs(iface)) {
1492 		ipv6->rs_start = k_uptime_get_32();
1493 
1494 		k_mutex_lock(&lock, K_FOREVER);
1495 		sys_slist_append(&active_rs_timers, &ipv6->rs_node);
1496 		k_mutex_unlock(&lock);
1497 
1498 		/* FUTURE: use schedule, not reschedule. */
1499 		if (!k_work_delayable_remaining_get(&rs_timer)) {
1500 			k_work_reschedule(&rs_timer, K_MSEC(RS_TIMEOUT));
1501 		}
1502 	}
1503 
1504 out:
1505 	net_if_unlock(iface);
1506 }
1507 
net_if_stop_rs(struct net_if * iface)1508 void net_if_stop_rs(struct net_if *iface)
1509 {
1510 	struct net_if_ipv6 *ipv6;
1511 
1512 	net_if_lock(iface);
1513 
1514 	ipv6 = iface->config.ip.ipv6;
1515 	if (!ipv6) {
1516 		goto out;
1517 	}
1518 
1519 	NET_DBG("Stopping ND/RS for iface %p", iface);
1520 
1521 	k_mutex_lock(&lock, K_FOREVER);
1522 	sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
1523 	k_mutex_unlock(&lock);
1524 
1525 out:
1526 	net_if_unlock(iface);
1527 }
1528 
iface_ipv6_nd_init(void)1529 static inline void iface_ipv6_nd_init(void)
1530 {
1531 	k_work_init_delayable(&rs_timer, rs_timeout);
1532 	sys_slist_init(&active_rs_timers);
1533 }
1534 
1535 #else
1536 #define net_if_start_rs(...)
1537 #define net_if_stop_rs(...)
1538 #define iface_ipv6_nd_init(...)
1539 #endif /* CONFIG_NET_IPV6_ND */
1540 
1541 #if defined(CONFIG_NET_IPV6_ND) && defined(CONFIG_NET_NATIVE_IPV6)
1542 
net_if_nbr_reachability_hint(struct net_if * iface,const struct in6_addr * ipv6_addr)1543 void net_if_nbr_reachability_hint(struct net_if *iface, const struct in6_addr *ipv6_addr)
1544 {
1545 	net_if_lock(iface);
1546 
1547 	if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1548 		goto out;
1549 	}
1550 
1551 	if (!iface->config.ip.ipv6) {
1552 		goto out;
1553 	}
1554 
1555 	net_ipv6_nbr_reachability_hint(iface, ipv6_addr);
1556 
1557 out:
1558 	net_if_unlock(iface);
1559 }
1560 
1561 #endif
1562 
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)1563 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
1564 					    struct net_if **ret)
1565 {
1566 	struct net_if_addr *ifaddr = NULL;
1567 
1568 	STRUCT_SECTION_FOREACH(net_if, iface) {
1569 		struct net_if_ipv6 *ipv6;
1570 
1571 		net_if_lock(iface);
1572 
1573 		ipv6 = iface->config.ip.ipv6;
1574 		if (!ipv6) {
1575 			net_if_unlock(iface);
1576 			continue;
1577 		}
1578 
1579 		ARRAY_FOR_EACH(ipv6->unicast, i) {
1580 			if (!ipv6->unicast[i].is_used ||
1581 			    ipv6->unicast[i].address.family != AF_INET6) {
1582 				continue;
1583 			}
1584 
1585 			if (net_ipv6_is_prefix(
1586 				    addr->s6_addr,
1587 				    ipv6->unicast[i].address.in6_addr.s6_addr,
1588 				    128)) {
1589 
1590 				if (ret) {
1591 					*ret = iface;
1592 				}
1593 
1594 				ifaddr = &ipv6->unicast[i];
1595 				net_if_unlock(iface);
1596 				goto out;
1597 			}
1598 		}
1599 
1600 		net_if_unlock(iface);
1601 	}
1602 
1603 out:
1604 	return ifaddr;
1605 }
1606 
net_if_ipv6_addr_lookup_by_iface(struct net_if * iface,struct in6_addr * addr)1607 struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
1608 						     struct in6_addr *addr)
1609 {
1610 	struct net_if_addr *ifaddr = NULL;
1611 	struct net_if_ipv6 *ipv6;
1612 
1613 	net_if_lock(iface);
1614 
1615 	ipv6 = iface->config.ip.ipv6;
1616 	if (!ipv6) {
1617 		goto out;
1618 	}
1619 
1620 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1621 		if (!ipv6->unicast[i].is_used ||
1622 		    ipv6->unicast[i].address.family != AF_INET6) {
1623 			continue;
1624 		}
1625 
1626 		if (net_ipv6_is_prefix(
1627 			    addr->s6_addr,
1628 			    ipv6->unicast[i].address.in6_addr.s6_addr,
1629 			    128)) {
1630 			ifaddr = &ipv6->unicast[i];
1631 			goto out;
1632 		}
1633 	}
1634 
1635 out:
1636 	net_if_unlock(iface);
1637 
1638 	return ifaddr;
1639 }
1640 
z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1641 int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
1642 {
1643 	struct net_if *iface = NULL;
1644 	struct net_if_addr *if_addr;
1645 
1646 	if_addr = net_if_ipv6_addr_lookup(addr, &iface);
1647 	if (!if_addr) {
1648 		return 0;
1649 	}
1650 
1651 	return net_if_get_by_iface(iface);
1652 }
1653 
1654 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1655 static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
1656 					  const struct in6_addr *addr)
1657 {
1658 	struct in6_addr addr_v6;
1659 
1660 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1661 
1662 	return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
1663 }
1664 #include <zephyr/syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
1665 #endif
1666 
1667 /* To be called when interface comes up so that all the non-joined multicast
1668  * groups are joined.
1669  */
rejoin_ipv6_mcast_groups(struct net_if * iface)1670 static void rejoin_ipv6_mcast_groups(struct net_if *iface)
1671 {
1672 	struct net_if_ipv6 *ipv6;
1673 
1674 	net_if_lock(iface);
1675 
1676 	if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
1677 	    net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1678 		goto out;
1679 	}
1680 
1681 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1682 		goto out;
1683 	}
1684 
1685 	/* Rejoin solicited node multicasts. */
1686 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1687 		if (!ipv6->unicast[i].is_used) {
1688 			continue;
1689 		}
1690 
1691 		join_mcast_nodes(iface, &ipv6->unicast[i].address.in6_addr);
1692 	}
1693 
1694 	/* Rejoin any mcast address present on the interface, but marked as not joined. */
1695 	ARRAY_FOR_EACH(ipv6->mcast, i) {
1696 		int ret;
1697 
1698 		if (!ipv6->mcast[i].is_used ||
1699 		    net_if_ipv4_maddr_is_joined(&ipv6->mcast[i])) {
1700 			continue;
1701 		}
1702 
1703 		ret = net_ipv6_mld_join(iface, &ipv6->mcast[i].address.in6_addr);
1704 		if (ret < 0) {
1705 			NET_ERR("Cannot join mcast address %s for %d (%d)",
1706 				net_sprint_ipv6_addr(&ipv6->mcast[i].address.in6_addr),
1707 				net_if_get_by_iface(iface), ret);
1708 		}
1709 	}
1710 
1711 out:
1712 	net_if_unlock(iface);
1713 }
1714 
1715 /* To be called when interface comes operational down so that multicast
1716  * groups are rejoined when back up.
1717  */
clear_joined_ipv6_mcast_groups(struct net_if * iface)1718 static void clear_joined_ipv6_mcast_groups(struct net_if *iface)
1719 {
1720 	struct net_if_ipv6 *ipv6;
1721 
1722 	net_if_lock(iface);
1723 
1724 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1725 		goto out;
1726 	}
1727 
1728 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1729 		goto out;
1730 	}
1731 
1732 	ARRAY_FOR_EACH(ipv6->mcast, i) {
1733 		if (!ipv6->mcast[i].is_used) {
1734 			continue;
1735 		}
1736 
1737 		net_if_ipv6_maddr_leave(iface, &ipv6->mcast[i]);
1738 	}
1739 
1740 out:
1741 	net_if_unlock(iface);
1742 }
1743 
address_expired(struct net_if_addr * ifaddr)1744 static void address_expired(struct net_if_addr *ifaddr)
1745 {
1746 	NET_DBG("IPv6 address %s is expired",
1747 		net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1748 
1749 	sys_slist_find_and_remove(&active_address_lifetime_timers,
1750 				  &ifaddr->lifetime.node);
1751 
1752 	net_timeout_set(&ifaddr->lifetime, 0, 0);
1753 
1754 	STRUCT_SECTION_FOREACH(net_if, iface) {
1755 		ARRAY_FOR_EACH(iface->config.ip.ipv6->unicast, i) {
1756 			if (&iface->config.ip.ipv6->unicast[i] == ifaddr) {
1757 				net_if_ipv6_addr_rm(iface,
1758 					&iface->config.ip.ipv6->unicast[i].address.in6_addr);
1759 				return;
1760 			}
1761 		}
1762 	}
1763 }
1764 
address_lifetime_timeout(struct k_work * work)1765 static void address_lifetime_timeout(struct k_work *work)
1766 {
1767 	uint32_t next_update = UINT32_MAX;
1768 	uint32_t current_time = k_uptime_get_32();
1769 	struct net_if_addr *current, *next;
1770 
1771 	ARG_UNUSED(work);
1772 
1773 	k_mutex_lock(&lock, K_FOREVER);
1774 
1775 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
1776 					  current, next, lifetime.node) {
1777 		struct net_timeout *timeout = &current->lifetime;
1778 		uint32_t this_update = net_timeout_evaluate(timeout,
1779 							     current_time);
1780 
1781 		if (this_update == 0U) {
1782 			address_expired(current);
1783 			continue;
1784 		}
1785 
1786 		if (this_update < next_update) {
1787 			next_update = this_update;
1788 		}
1789 
1790 		if (current == next) {
1791 			break;
1792 		}
1793 	}
1794 
1795 	if (next_update != UINT32_MAX) {
1796 		NET_DBG("Waiting for %d ms", (int32_t)next_update);
1797 
1798 		k_work_reschedule(&address_lifetime_timer, K_MSEC(next_update));
1799 	}
1800 
1801 	k_mutex_unlock(&lock);
1802 }
1803 
1804 #if defined(CONFIG_NET_TEST)
net_address_lifetime_timeout(void)1805 void net_address_lifetime_timeout(void)
1806 {
1807 	address_lifetime_timeout(NULL);
1808 }
1809 #endif
1810 
address_start_timer(struct net_if_addr * ifaddr,uint32_t vlifetime)1811 static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
1812 {
1813 	/* Make sure that we do not insert the address twice to
1814 	 * the lifetime timer list.
1815 	 */
1816 	sys_slist_find_and_remove(&active_address_lifetime_timers,
1817 				  &ifaddr->lifetime.node);
1818 
1819 	sys_slist_append(&active_address_lifetime_timers,
1820 			 &ifaddr->lifetime.node);
1821 
1822 	net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
1823 	k_work_reschedule(&address_lifetime_timer, K_NO_WAIT);
1824 }
1825 
net_if_ipv6_addr_update_lifetime(struct net_if_addr * ifaddr,uint32_t vlifetime)1826 void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
1827 				      uint32_t vlifetime)
1828 {
1829 	k_mutex_lock(&lock, K_FOREVER);
1830 
1831 	NET_DBG("Updating expire time of %s by %u secs",
1832 		net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1833 		vlifetime);
1834 
1835 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1836 
1837 	address_start_timer(ifaddr, vlifetime);
1838 
1839 	k_mutex_unlock(&lock);
1840 }
1841 
ipv6_addr_find(struct net_if * iface,struct in6_addr * addr)1842 static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
1843 					  struct in6_addr *addr)
1844 {
1845 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1846 
1847 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1848 		if (!ipv6->unicast[i].is_used) {
1849 			continue;
1850 		}
1851 
1852 		if (net_ipv6_addr_cmp(
1853 			    addr, &ipv6->unicast[i].address.in6_addr)) {
1854 
1855 			return &ipv6->unicast[i];
1856 		}
1857 	}
1858 
1859 	return NULL;
1860 }
1861 
net_if_addr_init(struct net_if_addr * ifaddr,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1862 static inline void net_if_addr_init(struct net_if_addr *ifaddr,
1863 				    struct in6_addr *addr,
1864 				    enum net_addr_type addr_type,
1865 				    uint32_t vlifetime)
1866 {
1867 	ifaddr->is_used = true;
1868 	ifaddr->is_temporary = false;
1869 	ifaddr->address.family = AF_INET6;
1870 	ifaddr->addr_type = addr_type;
1871 	ifaddr->atomic_ref = ATOMIC_INIT(1);
1872 
1873 	net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
1874 
1875 	/* FIXME - set the mcast addr for this node */
1876 
1877 	if (vlifetime) {
1878 		ifaddr->is_infinite = false;
1879 
1880 		NET_DBG("Expiring %s in %u secs",
1881 			net_sprint_ipv6_addr(addr),
1882 			vlifetime);
1883 
1884 		net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
1885 	} else {
1886 		ifaddr->is_infinite = true;
1887 	}
1888 }
1889 
net_if_ipv6_addr_add(struct net_if * iface,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1890 struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
1891 					 struct in6_addr *addr,
1892 					 enum net_addr_type addr_type,
1893 					 uint32_t vlifetime)
1894 {
1895 	struct net_if_addr *ifaddr = NULL;
1896 	struct net_if_ipv6 *ipv6;
1897 
1898 	net_if_lock(iface);
1899 
1900 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1901 		goto out;
1902 	}
1903 
1904 	ifaddr = ipv6_addr_find(iface, addr);
1905 	if (ifaddr) {
1906 		goto out;
1907 	}
1908 
1909 	ARRAY_FOR_EACH(ipv6->unicast, i) {
1910 		if (ipv6->unicast[i].is_used) {
1911 			continue;
1912 		}
1913 
1914 		net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
1915 				 vlifetime);
1916 
1917 		NET_DBG("[%zu] interface %d (%p) address %s type %s added", i,
1918 			net_if_get_by_iface(iface), iface,
1919 			net_sprint_ipv6_addr(addr),
1920 			net_addr_type2str(addr_type));
1921 
1922 		if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
1923 		    !net_ipv6_is_addr_loopback(addr) &&
1924 		    !net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1925 			/* RFC 4862 5.4.2
1926 			 * Before sending a Neighbor Solicitation, an interface
1927 			 * MUST join the all-nodes multicast address and the
1928 			 * solicited-node multicast address of the tentative
1929 			 * address.
1930 			 */
1931 			/* The allnodes multicast group is only joined once as
1932 			 * net_ipv6_mld_join() checks if we have already
1933 			 * joined.
1934 			 */
1935 			join_mcast_nodes(iface,
1936 					 &ipv6->unicast[i].address.in6_addr);
1937 
1938 			net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1939 		} else {
1940 			/* If DAD is not done for point-to-point links, then
1941 			 * the address is usable immediately.
1942 			 */
1943 			ipv6->unicast[i].addr_state = NET_ADDR_PREFERRED;
1944 		}
1945 
1946 		net_mgmt_event_notify_with_info(
1947 			NET_EVENT_IPV6_ADDR_ADD, iface,
1948 			&ipv6->unicast[i].address.in6_addr,
1949 			sizeof(struct in6_addr));
1950 
1951 		ifaddr = &ipv6->unicast[i];
1952 		goto out;
1953 	}
1954 
1955 out:
1956 	net_if_unlock(iface);
1957 
1958 	return ifaddr;
1959 }
1960 
net_if_ipv6_addr_rm(struct net_if * iface,const struct in6_addr * addr)1961 bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
1962 {
1963 	struct net_if_ipv6 *ipv6;
1964 	int ret;
1965 
1966 	NET_ASSERT(addr);
1967 
1968 	ipv6 = iface->config.ip.ipv6;
1969 	if (!ipv6) {
1970 		return false;
1971 	}
1972 
1973 	ret = net_if_addr_unref(iface, AF_INET6, addr);
1974 	if (ret > 0) {
1975 		NET_DBG("Address %s still in use (ref %d)",
1976 			net_sprint_ipv6_addr(addr), ret);
1977 		return false;
1978 
1979 	} else if (ret < 0) {
1980 		NET_DBG("Address %s not found (%d)",
1981 			net_sprint_ipv6_addr(addr), ret);
1982 	}
1983 
1984 	return true;
1985 }
1986 
z_impl_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1987 bool z_impl_net_if_ipv6_addr_add_by_index(int index,
1988 					  struct in6_addr *addr,
1989 					  enum net_addr_type addr_type,
1990 					  uint32_t vlifetime)
1991 {
1992 	struct net_if *iface;
1993 
1994 	iface = net_if_get_by_index(index);
1995 	if (!iface) {
1996 		return false;
1997 	}
1998 
1999 	return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
2000 		true : false;
2001 }
2002 
2003 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)2004 bool z_vrfy_net_if_ipv6_addr_add_by_index(int index,
2005 					  struct in6_addr *addr,
2006 					  enum net_addr_type addr_type,
2007 					  uint32_t vlifetime)
2008 {
2009 	struct in6_addr addr_v6;
2010 	struct net_if *iface;
2011 
2012 	iface = z_vrfy_net_if_get_by_index(index);
2013 	if (!iface) {
2014 		return false;
2015 	}
2016 
2017 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
2018 
2019 	return z_impl_net_if_ipv6_addr_add_by_index(index,
2020 						    &addr_v6,
2021 						    addr_type,
2022 						    vlifetime);
2023 }
2024 
2025 #include <zephyr/syscalls/net_if_ipv6_addr_add_by_index_mrsh.c>
2026 #endif /* CONFIG_USERSPACE */
2027 
z_impl_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)2028 bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
2029 					 const struct in6_addr *addr)
2030 {
2031 	struct net_if *iface;
2032 
2033 	iface = net_if_get_by_index(index);
2034 	if (!iface) {
2035 		return false;
2036 	}
2037 
2038 	return net_if_ipv6_addr_rm(iface, addr);
2039 }
2040 
2041 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)2042 bool z_vrfy_net_if_ipv6_addr_rm_by_index(int index,
2043 					 const struct in6_addr *addr)
2044 {
2045 	struct in6_addr addr_v6;
2046 	struct net_if *iface;
2047 
2048 	iface = z_vrfy_net_if_get_by_index(index);
2049 	if (!iface) {
2050 		return false;
2051 	}
2052 
2053 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
2054 
2055 	return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
2056 }
2057 
2058 #include <zephyr/syscalls/net_if_ipv6_addr_rm_by_index_mrsh.c>
2059 #endif /* CONFIG_USERSPACE */
2060 
net_if_ipv6_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)2061 void net_if_ipv6_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
2062 			      void *user_data)
2063 {
2064 	struct net_if_ipv6 *ipv6;
2065 
2066 	if (iface == NULL) {
2067 		return;
2068 	}
2069 
2070 	net_if_lock(iface);
2071 
2072 	ipv6 = iface->config.ip.ipv6;
2073 	if (ipv6 == NULL) {
2074 		goto out;
2075 	}
2076 
2077 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2078 		struct net_if_addr *if_addr = &ipv6->unicast[i];
2079 
2080 		if (!if_addr->is_used) {
2081 			continue;
2082 		}
2083 
2084 		cb(iface, if_addr, user_data);
2085 	}
2086 
2087 out:
2088 	net_if_unlock(iface);
2089 }
2090 
net_if_ipv6_maddr_add(struct net_if * iface,const struct in6_addr * addr)2091 struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
2092 						const struct in6_addr *addr)
2093 {
2094 	struct net_if_mcast_addr *ifmaddr = NULL;
2095 	struct net_if_ipv6 *ipv6;
2096 
2097 	net_if_lock(iface);
2098 
2099 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2100 		goto out;
2101 	}
2102 
2103 	if (!net_ipv6_is_addr_mcast(addr)) {
2104 		NET_DBG("Address %s is not a multicast address.",
2105 			net_sprint_ipv6_addr(addr));
2106 		goto out;
2107 	}
2108 
2109 	if (net_if_ipv6_maddr_lookup(addr, &iface)) {
2110 		NET_WARN("Multicast address %s is already registered.",
2111 			net_sprint_ipv6_addr(addr));
2112 		goto out;
2113 	}
2114 
2115 	ARRAY_FOR_EACH(ipv6->mcast, i) {
2116 		if (ipv6->mcast[i].is_used) {
2117 			continue;
2118 		}
2119 
2120 		ipv6->mcast[i].is_used = true;
2121 		ipv6->mcast[i].address.family = AF_INET6;
2122 		memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
2123 
2124 		NET_DBG("[%zu] interface %d (%p) address %s added", i,
2125 			net_if_get_by_iface(iface), iface,
2126 			net_sprint_ipv6_addr(addr));
2127 
2128 		net_mgmt_event_notify_with_info(
2129 			NET_EVENT_IPV6_MADDR_ADD, iface,
2130 			&ipv6->mcast[i].address.in6_addr,
2131 			sizeof(struct in6_addr));
2132 
2133 		ifmaddr = &ipv6->mcast[i];
2134 		goto out;
2135 	}
2136 
2137 out:
2138 	net_if_unlock(iface);
2139 
2140 	return ifmaddr;
2141 }
2142 
net_if_ipv6_maddr_rm(struct net_if * iface,const struct in6_addr * addr)2143 bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
2144 {
2145 	bool ret = false;
2146 	struct net_if_ipv6 *ipv6;
2147 
2148 	net_if_lock(iface);
2149 
2150 	ipv6 = iface->config.ip.ipv6;
2151 	if (!ipv6) {
2152 		goto out;
2153 	}
2154 
2155 	ARRAY_FOR_EACH(ipv6->mcast, i) {
2156 		if (!ipv6->mcast[i].is_used) {
2157 			continue;
2158 		}
2159 
2160 		if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
2161 				       addr)) {
2162 			continue;
2163 		}
2164 
2165 		ipv6->mcast[i].is_used = false;
2166 
2167 		NET_DBG("[%zu] interface %d (%p) address %s removed",
2168 			i, net_if_get_by_iface(iface), iface,
2169 			net_sprint_ipv6_addr(addr));
2170 
2171 		net_mgmt_event_notify_with_info(
2172 			NET_EVENT_IPV6_MADDR_DEL, iface,
2173 			&ipv6->mcast[i].address.in6_addr,
2174 			sizeof(struct in6_addr));
2175 
2176 		ret = true;
2177 		goto out;
2178 	}
2179 
2180 out:
2181 	net_if_unlock(iface);
2182 
2183 	return ret;
2184 }
2185 
net_if_ipv6_maddr_foreach(struct net_if * iface,net_if_ip_maddr_cb_t cb,void * user_data)2186 void net_if_ipv6_maddr_foreach(struct net_if *iface, net_if_ip_maddr_cb_t cb,
2187 			       void *user_data)
2188 {
2189 	struct net_if_ipv6 *ipv6;
2190 
2191 	NET_ASSERT(iface);
2192 	NET_ASSERT(cb);
2193 
2194 	net_if_lock(iface);
2195 
2196 	ipv6 = iface->config.ip.ipv6;
2197 	if (!ipv6) {
2198 		goto out;
2199 	}
2200 
2201 	for (int i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2202 		if (!ipv6->mcast[i].is_used) {
2203 			continue;
2204 		}
2205 
2206 		cb(iface, &ipv6->mcast[i], user_data);
2207 	}
2208 
2209 out:
2210 	net_if_unlock(iface);
2211 }
2212 
net_if_ipv6_maddr_lookup(const struct in6_addr * maddr,struct net_if ** ret)2213 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
2214 						   struct net_if **ret)
2215 {
2216 	struct net_if_mcast_addr *ifmaddr = NULL;
2217 
2218 	STRUCT_SECTION_FOREACH(net_if, iface) {
2219 		struct net_if_ipv6 *ipv6;
2220 
2221 		if (ret && *ret && iface != *ret) {
2222 			continue;
2223 		}
2224 
2225 		net_if_lock(iface);
2226 
2227 		ipv6 = iface->config.ip.ipv6;
2228 		if (!ipv6) {
2229 			net_if_unlock(iface);
2230 			continue;
2231 		}
2232 
2233 		ARRAY_FOR_EACH(ipv6->mcast, i) {
2234 			if (!ipv6->mcast[i].is_used ||
2235 			    ipv6->mcast[i].address.family != AF_INET6) {
2236 				continue;
2237 			}
2238 
2239 			if (net_ipv6_is_prefix(
2240 				    maddr->s6_addr,
2241 				    ipv6->mcast[i].address.in6_addr.s6_addr,
2242 				    128)) {
2243 				if (ret) {
2244 					*ret = iface;
2245 				}
2246 
2247 				ifmaddr = &ipv6->mcast[i];
2248 				net_if_unlock(iface);
2249 				goto out;
2250 			}
2251 		}
2252 
2253 		net_if_unlock(iface);
2254 	}
2255 
2256 out:
2257 	return ifmaddr;
2258 }
2259 
net_if_ipv6_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)2260 void net_if_ipv6_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
2261 {
2262 	NET_ASSERT(iface);
2263 	NET_ASSERT(addr);
2264 
2265 	net_if_lock(iface);
2266 	addr->is_joined = false;
2267 	net_if_unlock(iface);
2268 }
2269 
net_if_ipv6_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)2270 void net_if_ipv6_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
2271 {
2272 	NET_ASSERT(iface);
2273 	NET_ASSERT(addr);
2274 
2275 	net_if_lock(iface);
2276 	addr->is_joined = true;
2277 	net_if_unlock(iface);
2278 }
2279 
remove_prefix_addresses(struct net_if * iface,struct net_if_ipv6 * ipv6,struct in6_addr * addr,uint8_t len)2280 static void remove_prefix_addresses(struct net_if *iface,
2281 				    struct net_if_ipv6 *ipv6,
2282 				    struct in6_addr *addr,
2283 				    uint8_t len)
2284 {
2285 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2286 		if (!ipv6->unicast[i].is_used ||
2287 		    ipv6->unicast[i].address.family != AF_INET6 ||
2288 		    ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
2289 			continue;
2290 		}
2291 
2292 		if (net_ipv6_is_prefix(
2293 				addr->s6_addr,
2294 				ipv6->unicast[i].address.in6_addr.s6_addr,
2295 				len)) {
2296 			net_if_ipv6_addr_rm(iface,
2297 					    &ipv6->unicast[i].address.in6_addr);
2298 		}
2299 	}
2300 }
2301 
prefix_lifetime_expired(struct net_if_ipv6_prefix * ifprefix)2302 static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
2303 {
2304 	struct net_if_ipv6 *ipv6;
2305 
2306 	net_if_lock(ifprefix->iface);
2307 
2308 	NET_DBG("Prefix %s/%d expired",
2309 		net_sprint_ipv6_addr(&ifprefix->prefix),
2310 		ifprefix->len);
2311 
2312 	ifprefix->is_used = false;
2313 
2314 	if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
2315 		return;
2316 	}
2317 
2318 	/* Remove also all auto addresses if the they have the same prefix.
2319 	 */
2320 	remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
2321 				ifprefix->len);
2322 
2323 	if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2324 		struct net_event_ipv6_prefix info;
2325 
2326 		net_ipaddr_copy(&info.addr, &ifprefix->prefix);
2327 		info.len = ifprefix->len;
2328 		info.lifetime = 0;
2329 
2330 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2331 						ifprefix->iface,
2332 						(const void *) &info,
2333 						sizeof(struct net_event_ipv6_prefix));
2334 	} else {
2335 		net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface);
2336 	}
2337 
2338 	net_if_unlock(ifprefix->iface);
2339 }
2340 
prefix_timer_remove(struct net_if_ipv6_prefix * ifprefix)2341 static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
2342 {
2343 	k_mutex_lock(&lock, K_FOREVER);
2344 
2345 	NET_DBG("IPv6 prefix %s/%d removed",
2346 		net_sprint_ipv6_addr(&ifprefix->prefix),
2347 		ifprefix->len);
2348 
2349 	sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2350 				  &ifprefix->lifetime.node);
2351 
2352 	net_timeout_set(&ifprefix->lifetime, 0, 0);
2353 
2354 	k_mutex_unlock(&lock);
2355 }
2356 
prefix_lifetime_timeout(struct k_work * work)2357 static void prefix_lifetime_timeout(struct k_work *work)
2358 {
2359 	uint32_t next_update = UINT32_MAX;
2360 	uint32_t current_time = k_uptime_get_32();
2361 	struct net_if_ipv6_prefix *current, *next;
2362 	sys_slist_t expired_list;
2363 
2364 	ARG_UNUSED(work);
2365 
2366 	sys_slist_init(&expired_list);
2367 
2368 	k_mutex_lock(&lock, K_FOREVER);
2369 
2370 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
2371 					  current, next, lifetime.node) {
2372 		struct net_timeout *timeout = &current->lifetime;
2373 		uint32_t this_update = net_timeout_evaluate(timeout,
2374 							    current_time);
2375 
2376 		if (this_update == 0U) {
2377 			sys_slist_find_and_remove(
2378 				&active_prefix_lifetime_timers,
2379 				&current->lifetime.node);
2380 			sys_slist_append(&expired_list,
2381 					 &current->lifetime.node);
2382 			continue;
2383 		}
2384 
2385 		if (this_update < next_update) {
2386 			next_update = this_update;
2387 		}
2388 
2389 		if (current == next) {
2390 			break;
2391 		}
2392 	}
2393 
2394 	if (next_update != UINT32_MAX) {
2395 		k_work_reschedule(&prefix_lifetime_timer, K_MSEC(next_update));
2396 	}
2397 
2398 	k_mutex_unlock(&lock);
2399 
2400 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, current, lifetime.node) {
2401 		prefix_lifetime_expired(current);
2402 	}
2403 }
2404 
prefix_start_timer(struct net_if_ipv6_prefix * ifprefix,uint32_t lifetime)2405 static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
2406 			       uint32_t lifetime)
2407 {
2408 	k_mutex_lock(&lock, K_FOREVER);
2409 
2410 	(void)sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2411 					&ifprefix->lifetime.node);
2412 	sys_slist_append(&active_prefix_lifetime_timers,
2413 			 &ifprefix->lifetime.node);
2414 
2415 	net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
2416 	k_work_reschedule(&prefix_lifetime_timer, K_NO_WAIT);
2417 
2418 	k_mutex_unlock(&lock);
2419 }
2420 
ipv6_prefix_find(struct net_if * iface,struct in6_addr * prefix,uint8_t prefix_len)2421 static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
2422 						   struct in6_addr *prefix,
2423 						   uint8_t prefix_len)
2424 {
2425 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2426 
2427 	if (!ipv6) {
2428 		return NULL;
2429 	}
2430 
2431 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2432 		if (!ipv6->prefix[i].is_used) {
2433 			continue;
2434 		}
2435 
2436 		if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
2437 		    prefix_len == ipv6->prefix[i].len) {
2438 			return &ipv6->prefix[i];
2439 		}
2440 	}
2441 
2442 	return NULL;
2443 }
2444 
net_if_ipv6_prefix_init(struct net_if * iface,struct net_if_ipv6_prefix * ifprefix,struct in6_addr * addr,uint8_t len,uint32_t lifetime)2445 static void net_if_ipv6_prefix_init(struct net_if *iface,
2446 				    struct net_if_ipv6_prefix *ifprefix,
2447 				    struct in6_addr *addr, uint8_t len,
2448 				    uint32_t lifetime)
2449 {
2450 	ifprefix->is_used = true;
2451 	ifprefix->len = len;
2452 	ifprefix->iface = iface;
2453 	net_ipaddr_copy(&ifprefix->prefix, addr);
2454 
2455 	if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
2456 		ifprefix->is_infinite = true;
2457 	} else {
2458 		ifprefix->is_infinite = false;
2459 	}
2460 }
2461 
net_if_ipv6_prefix_add(struct net_if * iface,struct in6_addr * prefix,uint8_t len,uint32_t lifetime)2462 struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
2463 						  struct in6_addr *prefix,
2464 						  uint8_t len,
2465 						  uint32_t lifetime)
2466 {
2467 	struct net_if_ipv6_prefix *ifprefix = NULL;
2468 	struct net_if_ipv6 *ipv6;
2469 
2470 	net_if_lock(iface);
2471 
2472 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2473 		goto out;
2474 	}
2475 
2476 	ifprefix = ipv6_prefix_find(iface, prefix, len);
2477 	if (ifprefix) {
2478 		goto out;
2479 	}
2480 
2481 	if (!ipv6) {
2482 		goto out;
2483 	}
2484 
2485 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2486 		if (ipv6->prefix[i].is_used) {
2487 			continue;
2488 		}
2489 
2490 		net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
2491 					len, lifetime);
2492 
2493 		NET_DBG("[%zu] interface %p prefix %s/%d added", i, iface,
2494 			net_sprint_ipv6_addr(prefix), len);
2495 
2496 		if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2497 			struct net_event_ipv6_prefix info;
2498 
2499 			net_ipaddr_copy(&info.addr, prefix);
2500 			info.len = len;
2501 			info.lifetime = lifetime;
2502 
2503 			net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_ADD,
2504 							iface, (const void *) &info,
2505 							sizeof(struct net_event_ipv6_prefix));
2506 		} else {
2507 			net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_ADD, iface);
2508 		}
2509 
2510 		ifprefix = &ipv6->prefix[i];
2511 		goto out;
2512 	}
2513 
2514 out:
2515 	net_if_unlock(iface);
2516 
2517 	return ifprefix;
2518 }
2519 
net_if_ipv6_prefix_rm(struct net_if * iface,struct in6_addr * addr,uint8_t len)2520 bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
2521 			   uint8_t len)
2522 {
2523 	bool ret = false;
2524 	struct net_if_ipv6 *ipv6;
2525 
2526 	net_if_lock(iface);
2527 
2528 	ipv6 = iface->config.ip.ipv6;
2529 	if (!ipv6) {
2530 		goto out;
2531 	}
2532 
2533 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2534 		if (!ipv6->prefix[i].is_used) {
2535 			continue;
2536 		}
2537 
2538 		if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
2539 		    ipv6->prefix[i].len != len) {
2540 			continue;
2541 		}
2542 
2543 		net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
2544 
2545 		ipv6->prefix[i].is_used = false;
2546 
2547 		/* Remove also all auto addresses if the they have the same
2548 		 * prefix.
2549 		 */
2550 		remove_prefix_addresses(iface, ipv6, addr, len);
2551 
2552 		if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2553 			struct net_event_ipv6_prefix info;
2554 
2555 			net_ipaddr_copy(&info.addr, addr);
2556 			info.len = len;
2557 			info.lifetime = 0;
2558 
2559 			net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2560 							iface, (const void *) &info,
2561 							sizeof(struct net_event_ipv6_prefix));
2562 		} else {
2563 			net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, iface);
2564 		}
2565 
2566 		ret = true;
2567 		goto out;
2568 	}
2569 
2570 out:
2571 	net_if_unlock(iface);
2572 
2573 	return ret;
2574 }
2575 
net_if_ipv6_prefix_get(struct net_if * iface,const struct in6_addr * addr)2576 struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
2577 						  const struct in6_addr *addr)
2578 {
2579 	struct net_if_ipv6_prefix *prefix = NULL;
2580 	struct net_if_ipv6 *ipv6;
2581 
2582 	if (!iface) {
2583 		iface = net_if_get_default();
2584 	}
2585 
2586 	if (!iface) {
2587 		return NULL;
2588 	}
2589 
2590 	net_if_lock(iface);
2591 
2592 	ipv6 = iface->config.ip.ipv6;
2593 	if (!ipv6) {
2594 		goto out;
2595 	}
2596 
2597 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2598 		if (!ipv6->prefix[i].is_used) {
2599 			continue;
2600 		}
2601 
2602 		if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2603 				       addr->s6_addr,
2604 				       ipv6->prefix[i].len)) {
2605 			if (!prefix || prefix->len > ipv6->prefix[i].len) {
2606 				prefix = &ipv6->prefix[i];
2607 			}
2608 		}
2609 	}
2610 
2611 out:
2612 	net_if_unlock(iface);
2613 
2614 	return prefix;
2615 }
2616 
net_if_ipv6_prefix_lookup(struct net_if * iface,struct in6_addr * addr,uint8_t len)2617 struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
2618 						     struct in6_addr *addr,
2619 						     uint8_t len)
2620 {
2621 	struct net_if_ipv6_prefix *prefix = NULL;
2622 	struct net_if_ipv6 *ipv6;
2623 
2624 	net_if_lock(iface);
2625 
2626 	ipv6 = iface->config.ip.ipv6;
2627 	if (!ipv6) {
2628 		goto out;
2629 	}
2630 
2631 	ARRAY_FOR_EACH(ipv6->prefix, i) {
2632 		if (!ipv6->prefix[i].is_used) {
2633 			continue;
2634 		}
2635 
2636 		if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2637 				       addr->s6_addr, len)) {
2638 			prefix = &ipv6->prefix[i];
2639 			goto out;
2640 		}
2641 	}
2642 
2643 out:
2644 	net_if_unlock(iface);
2645 
2646 	return prefix;
2647 }
2648 
net_if_ipv6_addr_onlink(struct net_if ** iface,struct in6_addr * addr)2649 bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
2650 {
2651 	bool ret = false;
2652 
2653 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2654 		struct net_if_ipv6 *ipv6;
2655 
2656 		if (iface && *iface && *iface != tmp) {
2657 			continue;
2658 		}
2659 
2660 		net_if_lock(tmp);
2661 
2662 		ipv6 = tmp->config.ip.ipv6;
2663 		if (!ipv6) {
2664 			net_if_unlock(tmp);
2665 			continue;
2666 		}
2667 
2668 		ARRAY_FOR_EACH(ipv6->prefix, i) {
2669 			if (ipv6->prefix[i].is_used &&
2670 			    net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2671 					       addr->s6_addr,
2672 					       ipv6->prefix[i].len)) {
2673 				if (iface) {
2674 					*iface = tmp;
2675 				}
2676 
2677 				ret = true;
2678 				net_if_unlock(tmp);
2679 				goto out;
2680 			}
2681 		}
2682 
2683 		net_if_unlock(tmp);
2684 	}
2685 
2686 out:
2687 	return ret;
2688 }
2689 
net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix * prefix,uint32_t lifetime)2690 void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
2691 				  uint32_t lifetime)
2692 {
2693 	/* No need to set a timer for infinite timeout */
2694 	if (lifetime == 0xffffffff) {
2695 		return;
2696 	}
2697 
2698 	NET_DBG("Prefix lifetime %u sec", lifetime);
2699 
2700 	prefix_start_timer(prefix, lifetime);
2701 }
2702 
net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix * prefix)2703 void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
2704 {
2705 	if (!prefix->is_used) {
2706 		return;
2707 	}
2708 
2709 	prefix_timer_remove(prefix);
2710 }
2711 
net_if_ipv6_router_lookup(struct net_if * iface,struct in6_addr * addr)2712 struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
2713 						struct in6_addr *addr)
2714 {
2715 	return iface_router_lookup(iface, AF_INET6, addr);
2716 }
2717 
net_if_ipv6_router_find_default(struct net_if * iface,struct in6_addr * addr)2718 struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
2719 						      struct in6_addr *addr)
2720 {
2721 	return iface_router_find_default(iface, AF_INET6, addr);
2722 }
2723 
net_if_ipv6_router_update_lifetime(struct net_if_router * router,uint16_t lifetime)2724 void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
2725 					uint16_t lifetime)
2726 {
2727 	NET_DBG("Updating expire time of %s by %u secs",
2728 		net_sprint_ipv6_addr(&router->address.in6_addr),
2729 		lifetime);
2730 
2731 	router->life_start = k_uptime_get_32();
2732 	router->lifetime = lifetime;
2733 
2734 	iface_router_update_timer(router->life_start);
2735 }
2736 
net_if_ipv6_router_add(struct net_if * iface,struct in6_addr * addr,uint16_t lifetime)2737 struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
2738 					     struct in6_addr *addr,
2739 					     uint16_t lifetime)
2740 {
2741 	return iface_router_add(iface, AF_INET6, addr, false, lifetime);
2742 }
2743 
net_if_ipv6_router_rm(struct net_if_router * router)2744 bool net_if_ipv6_router_rm(struct net_if_router *router)
2745 {
2746 	return iface_router_rm(router);
2747 }
2748 
net_if_ipv6_get_mcast_hop_limit(struct net_if * iface)2749 uint8_t net_if_ipv6_get_mcast_hop_limit(struct net_if *iface)
2750 {
2751 	int ret = 0;
2752 
2753 	net_if_lock(iface);
2754 
2755 	if (net_if_config_ipv6_get(iface, NULL) < 0) {
2756 		goto out;
2757 	}
2758 
2759 	if (!iface->config.ip.ipv6) {
2760 		goto out;
2761 	}
2762 
2763 	ret = iface->config.ip.ipv6->mcast_hop_limit;
2764 out:
2765 	net_if_unlock(iface);
2766 
2767 	return ret;
2768 }
2769 
net_if_ipv6_set_mcast_hop_limit(struct net_if * iface,uint8_t hop_limit)2770 void net_if_ipv6_set_mcast_hop_limit(struct net_if *iface, uint8_t hop_limit)
2771 {
2772 	net_if_lock(iface);
2773 
2774 	if (net_if_config_ipv6_get(iface, NULL) < 0) {
2775 		goto out;
2776 	}
2777 
2778 	if (!iface->config.ip.ipv6) {
2779 		goto out;
2780 	}
2781 
2782 	iface->config.ip.ipv6->mcast_hop_limit = hop_limit;
2783 out:
2784 	net_if_unlock(iface);
2785 }
2786 
net_if_ipv6_get_hop_limit(struct net_if * iface)2787 uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface)
2788 {
2789 	int ret = 0;
2790 
2791 	net_if_lock(iface);
2792 
2793 	if (net_if_config_ipv6_get(iface, NULL) < 0) {
2794 		goto out;
2795 	}
2796 
2797 	if (!iface->config.ip.ipv6) {
2798 		goto out;
2799 	}
2800 
2801 	ret = iface->config.ip.ipv6->hop_limit;
2802 out:
2803 	net_if_unlock(iface);
2804 
2805 	return ret;
2806 }
2807 
net_if_ipv6_set_hop_limit(struct net_if * iface,uint8_t hop_limit)2808 void net_if_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit)
2809 {
2810 	net_if_lock(iface);
2811 
2812 	if (net_if_config_ipv6_get(iface, NULL) < 0) {
2813 		goto out;
2814 	}
2815 
2816 	if (!iface->config.ip.ipv6) {
2817 		goto out;
2818 	}
2819 
2820 	iface->config.ip.ipv6->hop_limit = hop_limit;
2821 out:
2822 	net_if_unlock(iface);
2823 }
2824 
net_if_ipv6_get_ll(struct net_if * iface,enum net_addr_state addr_state)2825 struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
2826 				    enum net_addr_state addr_state)
2827 {
2828 	struct in6_addr *addr = NULL;
2829 	struct net_if_ipv6 *ipv6;
2830 
2831 	net_if_lock(iface);
2832 
2833 	ipv6 = iface->config.ip.ipv6;
2834 	if (!ipv6) {
2835 		goto out;
2836 	}
2837 
2838 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2839 		if (!ipv6->unicast[i].is_used ||
2840 		    (addr_state != NET_ADDR_ANY_STATE &&
2841 		     ipv6->unicast[i].addr_state != addr_state) ||
2842 		    ipv6->unicast[i].address.family != AF_INET6) {
2843 			continue;
2844 		}
2845 
2846 		if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2847 			addr = &ipv6->unicast[i].address.in6_addr;
2848 			goto out;
2849 		}
2850 	}
2851 
2852 out:
2853 	net_if_unlock(iface);
2854 
2855 	return addr;
2856 }
2857 
net_if_ipv6_get_ll_addr(enum net_addr_state state,struct net_if ** iface)2858 struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
2859 					 struct net_if **iface)
2860 {
2861 	struct in6_addr *addr = NULL;
2862 
2863 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2864 		net_if_lock(tmp);
2865 
2866 		addr = net_if_ipv6_get_ll(tmp, state);
2867 		if (addr) {
2868 			if (iface) {
2869 				*iface = tmp;
2870 			}
2871 
2872 			net_if_unlock(tmp);
2873 			goto out;
2874 		}
2875 
2876 		net_if_unlock(tmp);
2877 	}
2878 
2879 out:
2880 	return addr;
2881 }
2882 
check_global_addr(struct net_if * iface,enum net_addr_state state)2883 static inline struct in6_addr *check_global_addr(struct net_if *iface,
2884 						 enum net_addr_state state)
2885 {
2886 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2887 
2888 	if (!ipv6) {
2889 		return NULL;
2890 	}
2891 
2892 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2893 		if (!ipv6->unicast[i].is_used ||
2894 		    (ipv6->unicast[i].addr_state != state) ||
2895 		    ipv6->unicast[i].address.family != AF_INET6) {
2896 			continue;
2897 		}
2898 
2899 		if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2900 			return &ipv6->unicast[i].address.in6_addr;
2901 		}
2902 	}
2903 
2904 	return NULL;
2905 }
2906 
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2907 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2908 					     struct net_if **iface)
2909 {
2910 	struct in6_addr *addr = NULL;
2911 
2912 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2913 		if (iface && *iface && tmp != *iface) {
2914 			continue;
2915 		}
2916 
2917 		net_if_lock(tmp);
2918 		addr = check_global_addr(tmp, state);
2919 		if (addr) {
2920 			if (iface) {
2921 				*iface = tmp;
2922 			}
2923 
2924 			net_if_unlock(tmp);
2925 			goto out;
2926 		}
2927 
2928 		net_if_unlock(tmp);
2929 	}
2930 
2931 out:
2932 
2933 	return addr;
2934 }
2935 
get_diff_ipv6(const struct in6_addr * src,const struct in6_addr * dst)2936 static uint8_t get_diff_ipv6(const struct in6_addr *src,
2937 			  const struct in6_addr *dst)
2938 {
2939 	return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 16);
2940 }
2941 
is_proper_ipv6_address(struct net_if_addr * addr)2942 static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
2943 {
2944 	if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
2945 	    addr->address.family == AF_INET6 &&
2946 	    !net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
2947 		return true;
2948 	}
2949 
2950 	return false;
2951 }
2952 
use_public_address(bool prefer_public,bool is_temporary,int flags)2953 static bool use_public_address(bool prefer_public, bool is_temporary,
2954 			       int flags)
2955 {
2956 	if (IS_ENABLED(CONFIG_NET_IPV6_PE)) {
2957 		if (!prefer_public && is_temporary) {
2958 
2959 			/* Allow socket to override the kconfig option */
2960 			if (flags & IPV6_PREFER_SRC_PUBLIC) {
2961 				return true;
2962 			}
2963 
2964 			return false;
2965 		}
2966 	}
2967 
2968 	if (flags & IPV6_PREFER_SRC_TMP) {
2969 		return false;
2970 	}
2971 
2972 	return true;
2973 }
2974 
net_if_ipv6_get_best_match(struct net_if * iface,const struct in6_addr * dst,uint8_t prefix_len,uint8_t * best_so_far,int flags)2975 static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
2976 						   const struct in6_addr *dst,
2977 						   uint8_t prefix_len,
2978 						   uint8_t *best_so_far,
2979 						   int flags)
2980 {
2981 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2982 	struct net_if_addr *public_addr = NULL;
2983 	struct in6_addr *src = NULL;
2984 	uint8_t public_addr_len = 0;
2985 	struct in6_addr *temp_addr = NULL;
2986 	uint8_t len, temp_addr_len = 0;
2987 	bool ret;
2988 
2989 	net_if_lock(iface);
2990 
2991 	ipv6 = iface->config.ip.ipv6;
2992 	if (!ipv6) {
2993 		goto out;
2994 	}
2995 
2996 	ARRAY_FOR_EACH(ipv6->unicast, i) {
2997 		if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
2998 			continue;
2999 		}
3000 
3001 		len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
3002 		if (len >= prefix_len) {
3003 			len = prefix_len;
3004 		}
3005 
3006 		if (len >= *best_so_far) {
3007 			/* Mesh local address can only be selected for the same
3008 			 * subnet.
3009 			 */
3010 			if (ipv6->unicast[i].is_mesh_local && len < 64 &&
3011 			    !net_ipv6_is_addr_mcast_mesh(dst)) {
3012 				continue;
3013 			}
3014 
3015 			ret = use_public_address(iface->pe_prefer_public,
3016 						 ipv6->unicast[i].is_temporary,
3017 						 flags);
3018 			if (!ret) {
3019 				temp_addr = &ipv6->unicast[i].address.in6_addr;
3020 				temp_addr_len = len;
3021 
3022 				*best_so_far = len;
3023 				src = &ipv6->unicast[i].address.in6_addr;
3024 				continue;
3025 			}
3026 
3027 			if (!ipv6->unicast[i].is_temporary) {
3028 				public_addr = &ipv6->unicast[i];
3029 				public_addr_len = len;
3030 			}
3031 
3032 			*best_so_far = len;
3033 			src = &ipv6->unicast[i].address.in6_addr;
3034 		}
3035 	}
3036 
3037 	if (IS_ENABLED(CONFIG_NET_IPV6_PE) && !iface->pe_prefer_public && temp_addr) {
3038 		if (temp_addr_len >= *best_so_far) {
3039 			*best_so_far = temp_addr_len;
3040 			src = temp_addr;
3041 		}
3042 	} else {
3043 		/* By default prefer always public address if found */
3044 		if (flags & IPV6_PREFER_SRC_PUBLIC) {
3045 use_public:
3046 			if (public_addr &&
3047 			    !net_ipv6_addr_cmp(&public_addr->address.in6_addr, src)) {
3048 				src = &public_addr->address.in6_addr;
3049 				*best_so_far = public_addr_len;
3050 			}
3051 		} else if (flags & IPV6_PREFER_SRC_TMP) {
3052 			if (temp_addr && !net_ipv6_addr_cmp(temp_addr, src)) {
3053 				src = temp_addr;
3054 				*best_so_far = temp_addr_len;
3055 			}
3056 		} else if (flags & IPV6_PREFER_SRC_PUBTMP_DEFAULT) {
3057 			goto use_public;
3058 		}
3059 	}
3060 
3061 out:
3062 	net_if_unlock(iface);
3063 
3064 	return src;
3065 }
3066 
net_if_ipv6_select_src_addr_hint(struct net_if * dst_iface,const struct in6_addr * dst,int flags)3067 const struct in6_addr *net_if_ipv6_select_src_addr_hint(struct net_if *dst_iface,
3068 							const struct in6_addr *dst,
3069 							int flags)
3070 {
3071 	const struct in6_addr *src = NULL;
3072 	uint8_t best_match = 0U;
3073 
3074 	NET_ASSERT(dst);
3075 
3076 	if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast_link(dst)) {
3077 		struct net_if_ipv6_prefix *prefix;
3078 		uint8_t prefix_len = 128;
3079 
3080 		prefix = net_if_ipv6_prefix_get(dst_iface, dst);
3081 		if (prefix) {
3082 			prefix_len = prefix->len;
3083 		}
3084 
3085 		/* If caller has supplied interface, then use that */
3086 		if (dst_iface) {
3087 			src = net_if_ipv6_get_best_match(dst_iface, dst,
3088 							 prefix_len,
3089 							 &best_match,
3090 							 flags);
3091 		} else {
3092 			STRUCT_SECTION_FOREACH(net_if, iface) {
3093 				struct in6_addr *addr;
3094 
3095 				addr = net_if_ipv6_get_best_match(iface, dst,
3096 								  prefix_len,
3097 								  &best_match,
3098 								  flags);
3099 				if (addr) {
3100 					src = addr;
3101 				}
3102 			}
3103 		}
3104 
3105 	} else {
3106 		if (dst_iface) {
3107 			src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
3108 		} else {
3109 			struct in6_addr *addr;
3110 
3111 			addr = net_if_ipv6_get_ll(net_if_get_default(), NET_ADDR_PREFERRED);
3112 			if (addr) {
3113 				src = addr;
3114 				goto out;
3115 			}
3116 
3117 			STRUCT_SECTION_FOREACH(net_if, iface) {
3118 				addr = net_if_ipv6_get_ll(iface,
3119 							  NET_ADDR_PREFERRED);
3120 				if (addr) {
3121 					src = addr;
3122 					break;
3123 				}
3124 			}
3125 		}
3126 	}
3127 
3128 	if (!src) {
3129 		src = net_ipv6_unspecified_address();
3130 	}
3131 
3132 out:
3133 	return src;
3134 }
3135 
net_if_ipv6_select_src_addr(struct net_if * dst_iface,const struct in6_addr * dst)3136 const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
3137 						   const struct in6_addr *dst)
3138 {
3139 	return net_if_ipv6_select_src_addr_hint(dst_iface,
3140 						dst,
3141 						IPV6_PREFER_SRC_PUBTMP_DEFAULT);
3142 }
3143 
net_if_ipv6_select_src_iface(const struct in6_addr * dst)3144 struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
3145 {
3146 	struct net_if *iface = NULL;
3147 	const struct in6_addr *src;
3148 
3149 	src = net_if_ipv6_select_src_addr(NULL, dst);
3150 	if (src != net_ipv6_unspecified_address()) {
3151 		net_if_ipv6_addr_lookup(src, &iface);
3152 	}
3153 
3154 	if (iface == NULL) {
3155 		iface = net_if_get_default();
3156 	}
3157 
3158 	return iface;
3159 }
3160 
net_if_ipv6_calc_reachable_time(struct net_if_ipv6 * ipv6)3161 uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
3162 {
3163 	uint32_t min_reachable, max_reachable;
3164 
3165 	min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
3166 			/ MIN_RANDOM_DENOM;
3167 	max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
3168 			/ MAX_RANDOM_DENOM;
3169 
3170 	NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
3171 		max_reachable);
3172 
3173 	return min_reachable +
3174 	       sys_rand32_get() % (max_reachable - min_reachable);
3175 }
3176 
iface_ipv6_start(struct net_if * iface)3177 static void iface_ipv6_start(struct net_if *iface)
3178 {
3179 	if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
3180 	    net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
3181 		return;
3182 	}
3183 
3184 	if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
3185 		net_if_start_dad(iface);
3186 	} else {
3187 		struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
3188 
3189 		if (ipv6 != NULL) {
3190 			join_mcast_nodes(iface,
3191 					 &ipv6->mcast[0].address.in6_addr);
3192 		}
3193 	}
3194 
3195 	net_if_start_rs(iface);
3196 }
3197 
iface_ipv6_stop(struct net_if * iface)3198 static void iface_ipv6_stop(struct net_if *iface)
3199 {
3200 	struct in6_addr addr = { };
3201 
3202 	if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
3203 	    net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
3204 		return;
3205 	}
3206 
3207 	net_ipv6_addr_create_iid(&addr, net_if_get_link_addr(iface));
3208 
3209 	(void)net_if_ipv6_addr_rm(iface, &addr);
3210 }
3211 
iface_ipv6_init(int if_count)3212 static void iface_ipv6_init(int if_count)
3213 {
3214 	iface_ipv6_dad_init();
3215 	iface_ipv6_nd_init();
3216 
3217 	k_work_init_delayable(&address_lifetime_timer,
3218 			      address_lifetime_timeout);
3219 	k_work_init_delayable(&prefix_lifetime_timer, prefix_lifetime_timeout);
3220 
3221 	if (if_count > ARRAY_SIZE(ipv6_addresses)) {
3222 		NET_WARN("You have %zu IPv6 net_if addresses but %d "
3223 			 "network interfaces", ARRAY_SIZE(ipv6_addresses),
3224 			 if_count);
3225 		NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
3226 			 "value.");
3227 	}
3228 
3229 	ARRAY_FOR_EACH(ipv6_addresses, i) {
3230 		ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
3231 		ipv6_addresses[i].ipv6.mcast_hop_limit = CONFIG_NET_INITIAL_MCAST_HOP_LIMIT;
3232 		ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
3233 
3234 		net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
3235 	}
3236 }
3237 
3238 #else /* CONFIG_NET_NATIVE_IPV6 */
3239 #define join_mcast_allnodes(...)
3240 #define join_mcast_solicit_node(...)
3241 #define leave_mcast_all(...)
3242 #define clear_joined_ipv6_mcast_groups(...)
3243 #define join_mcast_nodes(...)
3244 #define iface_ipv6_start(...)
3245 #define iface_ipv6_stop(...)
3246 #define iface_ipv6_init(...)
3247 
net_if_ipv6_maddr_lookup(const struct in6_addr * addr,struct net_if ** iface)3248 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
3249 						   struct net_if **iface)
3250 {
3251 	ARG_UNUSED(addr);
3252 	ARG_UNUSED(iface);
3253 
3254 	return NULL;
3255 }
3256 
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)3257 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
3258 					    struct net_if **ret)
3259 {
3260 	ARG_UNUSED(addr);
3261 	ARG_UNUSED(ret);
3262 
3263 	return NULL;
3264 }
3265 
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)3266 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
3267 					     struct net_if **iface)
3268 {
3269 	ARG_UNUSED(state);
3270 	ARG_UNUSED(iface);
3271 
3272 	return NULL;
3273 }
3274 #endif /* CONFIG_NET_NATIVE_IPV6 */
3275 
3276 #if defined(CONFIG_NET_NATIVE_IPV4)
net_if_config_ipv4_get(struct net_if * iface,struct net_if_ipv4 ** ipv4)3277 int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
3278 {
3279 	int ret = 0;
3280 
3281 	net_if_lock(iface);
3282 
3283 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3284 		ret = -ENOTSUP;
3285 		goto out;
3286 	}
3287 
3288 	if (iface->config.ip.ipv4) {
3289 		if (ipv4) {
3290 			*ipv4 = iface->config.ip.ipv4;
3291 		}
3292 
3293 		goto out;
3294 	}
3295 
3296 	k_mutex_lock(&lock, K_FOREVER);
3297 
3298 	ARRAY_FOR_EACH(ipv4_addresses, i) {
3299 		if (ipv4_addresses[i].iface) {
3300 			continue;
3301 		}
3302 
3303 		iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
3304 		ipv4_addresses[i].iface = iface;
3305 
3306 		if (ipv4) {
3307 			*ipv4 = &ipv4_addresses[i].ipv4;
3308 		}
3309 
3310 		k_mutex_unlock(&lock);
3311 		goto out;
3312 	}
3313 
3314 	k_mutex_unlock(&lock);
3315 
3316 	ret = -ESRCH;
3317 out:
3318 	net_if_unlock(iface);
3319 
3320 	return ret;
3321 }
3322 
net_if_config_ipv4_put(struct net_if * iface)3323 int net_if_config_ipv4_put(struct net_if *iface)
3324 {
3325 	int ret = 0;
3326 
3327 	net_if_lock(iface);
3328 
3329 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3330 		ret = -ENOTSUP;
3331 		goto out;
3332 	}
3333 
3334 	if (!iface->config.ip.ipv4) {
3335 		ret = -EALREADY;
3336 		goto out;
3337 	}
3338 
3339 	k_mutex_lock(&lock, K_FOREVER);
3340 
3341 	ARRAY_FOR_EACH(ipv4_addresses, i) {
3342 		if (ipv4_addresses[i].iface != iface) {
3343 			continue;
3344 		}
3345 
3346 		iface->config.ip.ipv4 = NULL;
3347 		ipv4_addresses[i].iface = NULL;
3348 
3349 		k_mutex_unlock(&lock);
3350 		goto out;
3351 	}
3352 
3353 	k_mutex_unlock(&lock);
3354 
3355 	ret = -ESRCH;
3356 out:
3357 	net_if_unlock(iface);
3358 
3359 	return ret;
3360 }
3361 
net_if_ipv4_get_ttl(struct net_if * iface)3362 uint8_t net_if_ipv4_get_ttl(struct net_if *iface)
3363 {
3364 	int ret = 0;
3365 
3366 	net_if_lock(iface);
3367 
3368 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3369 		goto out;
3370 	}
3371 
3372 	if (!iface->config.ip.ipv4) {
3373 		goto out;
3374 	}
3375 
3376 	ret = iface->config.ip.ipv4->ttl;
3377 out:
3378 	net_if_unlock(iface);
3379 
3380 	return ret;
3381 }
3382 
net_if_ipv4_set_ttl(struct net_if * iface,uint8_t ttl)3383 void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl)
3384 {
3385 	net_if_lock(iface);
3386 
3387 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3388 		goto out;
3389 	}
3390 
3391 	if (!iface->config.ip.ipv4) {
3392 		goto out;
3393 	}
3394 
3395 	iface->config.ip.ipv4->ttl = ttl;
3396 out:
3397 	net_if_unlock(iface);
3398 }
3399 
net_if_ipv4_get_mcast_ttl(struct net_if * iface)3400 uint8_t net_if_ipv4_get_mcast_ttl(struct net_if *iface)
3401 {
3402 	int ret = 0;
3403 
3404 	net_if_lock(iface);
3405 
3406 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3407 		goto out;
3408 	}
3409 
3410 	if (!iface->config.ip.ipv4) {
3411 		goto out;
3412 	}
3413 
3414 	ret = iface->config.ip.ipv4->mcast_ttl;
3415 out:
3416 	net_if_unlock(iface);
3417 
3418 	return ret;
3419 }
3420 
net_if_ipv4_set_mcast_ttl(struct net_if * iface,uint8_t ttl)3421 void net_if_ipv4_set_mcast_ttl(struct net_if *iface, uint8_t ttl)
3422 {
3423 	net_if_lock(iface);
3424 
3425 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3426 		goto out;
3427 	}
3428 
3429 	if (!iface->config.ip.ipv4) {
3430 		goto out;
3431 	}
3432 
3433 	iface->config.ip.ipv4->mcast_ttl = ttl;
3434 out:
3435 	net_if_unlock(iface);
3436 }
3437 
net_if_ipv4_router_lookup(struct net_if * iface,struct in_addr * addr)3438 struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
3439 						struct in_addr *addr)
3440 {
3441 	return iface_router_lookup(iface, AF_INET, addr);
3442 }
3443 
net_if_ipv4_router_find_default(struct net_if * iface,struct in_addr * addr)3444 struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
3445 						      struct in_addr *addr)
3446 {
3447 	return iface_router_find_default(iface, AF_INET, addr);
3448 }
3449 
net_if_ipv4_router_add(struct net_if * iface,struct in_addr * addr,bool is_default,uint16_t lifetime)3450 struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
3451 					     struct in_addr *addr,
3452 					     bool is_default,
3453 					     uint16_t lifetime)
3454 {
3455 	return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
3456 }
3457 
net_if_ipv4_router_rm(struct net_if_router * router)3458 bool net_if_ipv4_router_rm(struct net_if_router *router)
3459 {
3460 	return iface_router_rm(router);
3461 }
3462 
net_if_ipv4_addr_mask_cmp(struct net_if * iface,const struct in_addr * addr)3463 bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
3464 			       const struct in_addr *addr)
3465 {
3466 	bool ret = false;
3467 	struct net_if_ipv4 *ipv4;
3468 	uint32_t subnet;
3469 
3470 	net_if_lock(iface);
3471 
3472 	ipv4 = iface->config.ip.ipv4;
3473 	if (!ipv4) {
3474 		goto out;
3475 	}
3476 
3477 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3478 		if (!ipv4->unicast[i].ipv4.is_used ||
3479 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3480 			continue;
3481 		}
3482 
3483 		subnet = UNALIGNED_GET(&addr->s_addr) &
3484 			 ipv4->unicast[i].netmask.s_addr;
3485 
3486 		if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3487 		     ipv4->unicast[i].netmask.s_addr) == subnet) {
3488 			ret = true;
3489 			goto out;
3490 		}
3491 	}
3492 
3493 out:
3494 	net_if_unlock(iface);
3495 
3496 	return ret;
3497 }
3498 
ipv4_is_broadcast_address(struct net_if * iface,const struct in_addr * addr)3499 static bool ipv4_is_broadcast_address(struct net_if *iface,
3500 				      const struct in_addr *addr)
3501 {
3502 	struct net_if_ipv4 *ipv4;
3503 	bool ret = false;
3504 	struct in_addr bcast;
3505 
3506 	net_if_lock(iface);
3507 
3508 	ipv4 = iface->config.ip.ipv4;
3509 	if (!ipv4) {
3510 		ret = false;
3511 		goto out;
3512 	}
3513 
3514 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3515 		if (!ipv4->unicast[i].ipv4.is_used ||
3516 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3517 			continue;
3518 		}
3519 
3520 		bcast.s_addr = ipv4->unicast[i].ipv4.address.in_addr.s_addr |
3521 			       ~ipv4->unicast[i].netmask.s_addr;
3522 
3523 		if (bcast.s_addr == UNALIGNED_GET(&addr->s_addr)) {
3524 			ret = true;
3525 			goto out;
3526 		}
3527 	}
3528 
3529 out:
3530 	net_if_unlock(iface);
3531 	return ret;
3532 }
3533 
net_if_ipv4_is_addr_bcast(struct net_if * iface,const struct in_addr * addr)3534 bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
3535 			       const struct in_addr *addr)
3536 {
3537 	bool ret = false;
3538 
3539 	if (iface) {
3540 		ret = ipv4_is_broadcast_address(iface, addr);
3541 		goto out;
3542 	}
3543 
3544 	STRUCT_SECTION_FOREACH(net_if, one_iface) {
3545 		ret = ipv4_is_broadcast_address(one_iface, addr);
3546 		if (ret) {
3547 			goto out;
3548 		}
3549 	}
3550 
3551 out:
3552 	return ret;
3553 }
3554 
net_if_ipv4_select_src_iface(const struct in_addr * dst)3555 struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
3556 {
3557 	struct net_if *selected = NULL;
3558 
3559 	STRUCT_SECTION_FOREACH(net_if, iface) {
3560 		bool ret;
3561 
3562 		ret = net_if_ipv4_addr_mask_cmp(iface, dst);
3563 		if (ret) {
3564 			selected = iface;
3565 			goto out;
3566 		}
3567 	}
3568 
3569 	if (selected == NULL) {
3570 		selected = net_if_get_default();
3571 	}
3572 
3573 out:
3574 	return selected;
3575 }
3576 
get_diff_ipv4(const struct in_addr * src,const struct in_addr * dst)3577 static uint8_t get_diff_ipv4(const struct in_addr *src,
3578 			  const struct in_addr *dst)
3579 {
3580 	return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 4);
3581 }
3582 
is_proper_ipv4_address(struct net_if_addr * addr)3583 static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
3584 {
3585 	if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3586 	    addr->address.family == AF_INET &&
3587 	    !net_ipv4_is_ll_addr(&addr->address.in_addr)) {
3588 		return true;
3589 	}
3590 
3591 	return false;
3592 }
3593 
net_if_ipv4_get_best_match(struct net_if * iface,const struct in_addr * dst,uint8_t * best_so_far)3594 static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
3595 						  const struct in_addr *dst,
3596 						  uint8_t *best_so_far)
3597 {
3598 	struct net_if_ipv4 *ipv4;
3599 	struct in_addr *src = NULL;
3600 	uint8_t len;
3601 
3602 	net_if_lock(iface);
3603 
3604 	ipv4 = iface->config.ip.ipv4;
3605 	if (!ipv4) {
3606 		goto out;
3607 	}
3608 
3609 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3610 		if (!is_proper_ipv4_address(&ipv4->unicast[i].ipv4)) {
3611 			continue;
3612 		}
3613 
3614 		len = get_diff_ipv4(dst, &ipv4->unicast[i].ipv4.address.in_addr);
3615 		if (len >= *best_so_far) {
3616 			*best_so_far = len;
3617 			src = &ipv4->unicast[i].ipv4.address.in_addr;
3618 		}
3619 	}
3620 
3621 out:
3622 	net_if_unlock(iface);
3623 
3624 	return src;
3625 }
3626 
if_ipv4_get_addr(struct net_if * iface,enum net_addr_state addr_state,bool ll)3627 static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
3628 					enum net_addr_state addr_state, bool ll)
3629 {
3630 	struct in_addr *addr = NULL;
3631 	struct net_if_ipv4 *ipv4;
3632 
3633 	if (!iface) {
3634 		return NULL;
3635 	}
3636 
3637 	net_if_lock(iface);
3638 
3639 	ipv4 = iface->config.ip.ipv4;
3640 	if (!ipv4) {
3641 		goto out;
3642 	}
3643 
3644 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3645 		if (!ipv4->unicast[i].ipv4.is_used ||
3646 		    (addr_state != NET_ADDR_ANY_STATE &&
3647 		     ipv4->unicast[i].ipv4.addr_state != addr_state) ||
3648 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3649 			continue;
3650 		}
3651 
3652 		if (net_ipv4_is_ll_addr(&ipv4->unicast[i].ipv4.address.in_addr)) {
3653 			if (!ll) {
3654 				continue;
3655 			}
3656 		} else {
3657 			if (ll) {
3658 				continue;
3659 			}
3660 		}
3661 
3662 		addr = &ipv4->unicast[i].ipv4.address.in_addr;
3663 		goto out;
3664 	}
3665 
3666 out:
3667 	net_if_unlock(iface);
3668 
3669 	return addr;
3670 }
3671 
net_if_ipv4_get_ll(struct net_if * iface,enum net_addr_state addr_state)3672 struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
3673 				   enum net_addr_state addr_state)
3674 {
3675 	return if_ipv4_get_addr(iface, addr_state, true);
3676 }
3677 
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3678 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3679 					    enum net_addr_state addr_state)
3680 {
3681 	return if_ipv4_get_addr(iface, addr_state, false);
3682 }
3683 
net_if_ipv4_select_src_addr(struct net_if * dst_iface,const struct in_addr * dst)3684 const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
3685 						  const struct in_addr *dst)
3686 {
3687 	const struct in_addr *src = NULL;
3688 	uint8_t best_match = 0U;
3689 
3690 	NET_ASSERT(dst);
3691 
3692 	if (!net_ipv4_is_ll_addr(dst)) {
3693 
3694 		/* If caller has supplied interface, then use that */
3695 		if (dst_iface) {
3696 			src = net_if_ipv4_get_best_match(dst_iface, dst,
3697 							 &best_match);
3698 		} else {
3699 			STRUCT_SECTION_FOREACH(net_if, iface) {
3700 				struct in_addr *addr;
3701 
3702 				addr = net_if_ipv4_get_best_match(iface, dst,
3703 								  &best_match);
3704 				if (addr) {
3705 					src = addr;
3706 				}
3707 			}
3708 		}
3709 
3710 	} else {
3711 		if (dst_iface) {
3712 			src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3713 		} else {
3714 			struct in_addr *addr;
3715 
3716 			addr = net_if_ipv4_get_ll(net_if_get_default(), NET_ADDR_PREFERRED);
3717 			if (addr) {
3718 				src = addr;
3719 				goto out;
3720 			}
3721 
3722 			STRUCT_SECTION_FOREACH(net_if, iface) {
3723 				addr = net_if_ipv4_get_ll(iface,
3724 							  NET_ADDR_PREFERRED);
3725 				if (addr) {
3726 					src = addr;
3727 					break;
3728 				}
3729 			}
3730 		}
3731 	}
3732 
3733 	if (!src) {
3734 		src = net_if_ipv4_get_global_addr(dst_iface,
3735 						  NET_ADDR_PREFERRED);
3736 
3737 		if (IS_ENABLED(CONFIG_NET_IPV4_AUTO) && !src) {
3738 			/* Try to use LL address if there's really no other
3739 			 * address available.
3740 			 */
3741 			src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3742 		}
3743 
3744 		if (!src) {
3745 			src = net_ipv4_unspecified_address();
3746 		}
3747 	}
3748 
3749 out:
3750 	return src;
3751 }
3752 
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3753 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3754 					    struct net_if **ret)
3755 {
3756 	struct net_if_addr *ifaddr = NULL;
3757 
3758 	STRUCT_SECTION_FOREACH(net_if, iface) {
3759 		struct net_if_ipv4 *ipv4;
3760 
3761 		net_if_lock(iface);
3762 
3763 		ipv4 = iface->config.ip.ipv4;
3764 		if (!ipv4) {
3765 			net_if_unlock(iface);
3766 			continue;
3767 		}
3768 
3769 		ARRAY_FOR_EACH(ipv4->unicast, i) {
3770 			if (!ipv4->unicast[i].ipv4.is_used ||
3771 			    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3772 				continue;
3773 			}
3774 
3775 			if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
3776 			    ipv4->unicast[i].ipv4.address.in_addr.s_addr) {
3777 
3778 				if (ret) {
3779 					*ret = iface;
3780 				}
3781 
3782 				ifaddr = &ipv4->unicast[i].ipv4;
3783 				net_if_unlock(iface);
3784 				goto out;
3785 			}
3786 		}
3787 
3788 		net_if_unlock(iface);
3789 	}
3790 
3791 out:
3792 	return ifaddr;
3793 }
3794 
z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3795 int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
3796 {
3797 	struct net_if_addr *if_addr;
3798 	struct net_if *iface = NULL;
3799 
3800 	if_addr = net_if_ipv4_addr_lookup(addr, &iface);
3801 	if (!if_addr) {
3802 		return 0;
3803 	}
3804 
3805 	return net_if_get_by_iface(iface);
3806 }
3807 
3808 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3809 static inline int z_vrfy_net_if_ipv4_addr_lookup_by_index(
3810 					  const struct in_addr *addr)
3811 {
3812 	struct in_addr addr_v4;
3813 
3814 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3815 
3816 	return z_impl_net_if_ipv4_addr_lookup_by_index(&addr_v4);
3817 }
3818 #include <zephyr/syscalls/net_if_ipv4_addr_lookup_by_index_mrsh.c>
3819 #endif
3820 
net_if_ipv4_get_netmask_by_addr(struct net_if * iface,const struct in_addr * addr)3821 struct in_addr net_if_ipv4_get_netmask_by_addr(struct net_if *iface,
3822 					       const struct in_addr *addr)
3823 {
3824 	struct in_addr netmask = { 0 };
3825 	struct net_if_ipv4 *ipv4;
3826 	uint32_t subnet;
3827 
3828 	net_if_lock(iface);
3829 
3830 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3831 		goto out;
3832 	}
3833 
3834 	ipv4 = iface->config.ip.ipv4;
3835 	if (ipv4 == NULL) {
3836 		goto out;
3837 	}
3838 
3839 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3840 		if (!ipv4->unicast[i].ipv4.is_used ||
3841 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3842 			continue;
3843 		}
3844 
3845 		subnet = UNALIGNED_GET(&addr->s_addr) &
3846 			 ipv4->unicast[i].netmask.s_addr;
3847 
3848 		if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3849 		     ipv4->unicast[i].netmask.s_addr) == subnet) {
3850 			netmask = ipv4->unicast[i].netmask;
3851 			goto out;
3852 		}
3853 	}
3854 
3855 out:
3856 	net_if_unlock(iface);
3857 
3858 	return netmask;
3859 }
3860 
net_if_ipv4_set_netmask_by_addr(struct net_if * iface,const struct in_addr * addr,const struct in_addr * netmask)3861 bool net_if_ipv4_set_netmask_by_addr(struct net_if *iface,
3862 				     const struct in_addr *addr,
3863 				     const struct in_addr *netmask)
3864 {
3865 	struct net_if_ipv4 *ipv4;
3866 	uint32_t subnet;
3867 	bool ret = false;
3868 
3869 	net_if_lock(iface);
3870 
3871 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3872 		goto out;
3873 	}
3874 
3875 	ipv4 = iface->config.ip.ipv4;
3876 	if (ipv4 == NULL) {
3877 		goto out;
3878 	}
3879 
3880 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3881 		if (!ipv4->unicast[i].ipv4.is_used ||
3882 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3883 			continue;
3884 		}
3885 
3886 		subnet = UNALIGNED_GET(&addr->s_addr) &
3887 			 ipv4->unicast[i].netmask.s_addr;
3888 
3889 		if ((ipv4->unicast[i].ipv4.address.in_addr.s_addr &
3890 		     ipv4->unicast[i].netmask.s_addr) == subnet) {
3891 			ipv4->unicast[i].netmask = *netmask;
3892 			ret = true;
3893 			goto out;
3894 		}
3895 	}
3896 
3897 out:
3898 	net_if_unlock(iface);
3899 
3900 	return ret;
3901 }
3902 
3903 /* Using this function is problematic as if we have multiple
3904  * addresses configured, which one to return. Use heuristic
3905  * in this case and return the first one found. Please use
3906  * net_if_ipv4_get_netmask_by_addr() instead.
3907  */
net_if_ipv4_get_netmask(struct net_if * iface)3908 struct in_addr net_if_ipv4_get_netmask(struct net_if *iface)
3909 {
3910 	struct in_addr netmask = { 0 };
3911 	struct net_if_ipv4 *ipv4;
3912 
3913 	net_if_lock(iface);
3914 
3915 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3916 		goto out;
3917 	}
3918 
3919 	ipv4 = iface->config.ip.ipv4;
3920 	if (ipv4 == NULL) {
3921 		goto out;
3922 	}
3923 
3924 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3925 		if (!ipv4->unicast[i].ipv4.is_used ||
3926 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3927 			continue;
3928 		}
3929 
3930 		netmask = iface->config.ip.ipv4->unicast[i].netmask;
3931 		break;
3932 	}
3933 
3934 out:
3935 	net_if_unlock(iface);
3936 
3937 	return netmask;
3938 }
3939 
3940 /* Using this function is problematic as if we have multiple
3941  * addresses configured, which one to set. Use heuristic
3942  * in this case and set the first one found. Please use
3943  * net_if_ipv4_set_netmask_by_addr() instead.
3944  */
net_if_ipv4_set_netmask_deprecated(struct net_if * iface,const struct in_addr * netmask)3945 static void net_if_ipv4_set_netmask_deprecated(struct net_if *iface,
3946 					       const struct in_addr *netmask)
3947 {
3948 	struct net_if_ipv4 *ipv4;
3949 
3950 	net_if_lock(iface);
3951 
3952 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3953 		goto out;
3954 	}
3955 
3956 	ipv4 = iface->config.ip.ipv4;
3957 	if (ipv4 == NULL) {
3958 		goto out;
3959 	}
3960 
3961 	ARRAY_FOR_EACH(ipv4->unicast, i) {
3962 		if (!ipv4->unicast[i].ipv4.is_used ||
3963 		    ipv4->unicast[i].ipv4.address.family != AF_INET) {
3964 			continue;
3965 		}
3966 
3967 		net_ipaddr_copy(&ipv4->unicast[i].netmask, netmask);
3968 		break;
3969 	}
3970 
3971 out:
3972 	net_if_unlock(iface);
3973 }
3974 
net_if_ipv4_set_netmask(struct net_if * iface,const struct in_addr * netmask)3975 void net_if_ipv4_set_netmask(struct net_if *iface,
3976 			     const struct in_addr *netmask)
3977 {
3978 	net_if_ipv4_set_netmask_deprecated(iface, netmask);
3979 }
3980 
z_impl_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3981 bool z_impl_net_if_ipv4_set_netmask_by_index(int index,
3982 					     const struct in_addr *netmask)
3983 {
3984 	struct net_if *iface;
3985 
3986 	iface = net_if_get_by_index(index);
3987 	if (!iface) {
3988 		return false;
3989 	}
3990 
3991 	net_if_ipv4_set_netmask_deprecated(iface, netmask);
3992 
3993 	return true;
3994 }
3995 
z_impl_net_if_ipv4_set_netmask_by_addr_by_index(int index,const struct in_addr * addr,const struct in_addr * netmask)3996 bool z_impl_net_if_ipv4_set_netmask_by_addr_by_index(int index,
3997 						     const struct in_addr *addr,
3998 						     const struct in_addr *netmask)
3999 {
4000 	struct net_if *iface;
4001 
4002 	iface = net_if_get_by_index(index);
4003 	if (!iface) {
4004 		return false;
4005 	}
4006 
4007 	net_if_ipv4_set_netmask_by_addr(iface, addr, netmask);
4008 
4009 	return true;
4010 }
4011 
4012 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)4013 bool z_vrfy_net_if_ipv4_set_netmask_by_index(int index,
4014 					     const struct in_addr *netmask)
4015 {
4016 	struct in_addr netmask_addr;
4017 	struct net_if *iface;
4018 
4019 	iface = z_vrfy_net_if_get_by_index(index);
4020 	if (!iface) {
4021 		return false;
4022 	}
4023 
4024 	K_OOPS(k_usermode_from_copy(&netmask_addr, (void *)netmask,
4025 				sizeof(netmask_addr)));
4026 
4027 	return z_impl_net_if_ipv4_set_netmask_by_index(index, &netmask_addr);
4028 }
4029 
4030 #include <zephyr/syscalls/net_if_ipv4_set_netmask_by_index_mrsh.c>
4031 
z_vrfy_net_if_ipv4_set_netmask_by_addr_by_index(int index,const struct in_addr * addr,const struct in_addr * netmask)4032 bool z_vrfy_net_if_ipv4_set_netmask_by_addr_by_index(int index,
4033 						     const struct in_addr *addr,
4034 						     const struct in_addr *netmask)
4035 {
4036 	struct in_addr ipv4_addr, netmask_addr;
4037 	struct net_if *iface;
4038 
4039 	iface = z_vrfy_net_if_get_by_index(index);
4040 	if (!iface) {
4041 		return false;
4042 	}
4043 
4044 	K_OOPS(k_usermode_from_copy(&ipv4_addr, (void *)addr,
4045 				    sizeof(ipv4_addr)));
4046 	K_OOPS(k_usermode_from_copy(&netmask_addr, (void *)netmask,
4047 				    sizeof(netmask_addr)));
4048 
4049 	return z_impl_net_if_ipv4_set_netmask_by_addr_by_index(index,
4050 							       &ipv4_addr,
4051 							       &netmask_addr);
4052 }
4053 
4054 #include <zephyr/syscalls/net_if_ipv4_set_netmask_by_addr_by_index_mrsh.c>
4055 #endif /* CONFIG_USERSPACE */
4056 
net_if_ipv4_get_gw(struct net_if * iface)4057 struct in_addr net_if_ipv4_get_gw(struct net_if *iface)
4058 {
4059 	struct in_addr gw = { 0 };
4060 
4061 	net_if_lock(iface);
4062 
4063 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4064 		goto out;
4065 	}
4066 
4067 	if (!iface->config.ip.ipv4) {
4068 		goto out;
4069 	}
4070 
4071 	gw = iface->config.ip.ipv4->gw;
4072 out:
4073 	net_if_unlock(iface);
4074 
4075 	return gw;
4076 }
4077 
net_if_ipv4_set_gw(struct net_if * iface,const struct in_addr * gw)4078 void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw)
4079 {
4080 	net_if_lock(iface);
4081 
4082 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4083 		goto out;
4084 	}
4085 
4086 	if (!iface->config.ip.ipv4) {
4087 		goto out;
4088 	}
4089 
4090 	net_ipaddr_copy(&iface->config.ip.ipv4->gw, gw);
4091 out:
4092 	net_if_unlock(iface);
4093 }
4094 
z_impl_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)4095 bool z_impl_net_if_ipv4_set_gw_by_index(int index,
4096 					const struct in_addr *gw)
4097 {
4098 	struct net_if *iface;
4099 
4100 	iface = net_if_get_by_index(index);
4101 	if (!iface) {
4102 		return false;
4103 	}
4104 
4105 	net_if_ipv4_set_gw(iface, gw);
4106 
4107 	return true;
4108 }
4109 
4110 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)4111 bool z_vrfy_net_if_ipv4_set_gw_by_index(int index,
4112 					const struct in_addr *gw)
4113 {
4114 	struct in_addr gw_addr;
4115 	struct net_if *iface;
4116 
4117 	iface = z_vrfy_net_if_get_by_index(index);
4118 	if (!iface) {
4119 		return false;
4120 	}
4121 
4122 	K_OOPS(k_usermode_from_copy(&gw_addr, (void *)gw, sizeof(gw_addr)));
4123 
4124 	return z_impl_net_if_ipv4_set_gw_by_index(index, &gw_addr);
4125 }
4126 
4127 #include <zephyr/syscalls/net_if_ipv4_set_gw_by_index_mrsh.c>
4128 #endif /* CONFIG_USERSPACE */
4129 
ipv4_addr_find(struct net_if * iface,struct in_addr * addr)4130 static struct net_if_addr *ipv4_addr_find(struct net_if *iface,
4131 					  struct in_addr *addr)
4132 {
4133 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4134 
4135 	ARRAY_FOR_EACH(ipv4->unicast, i) {
4136 		if (!ipv4->unicast[i].ipv4.is_used) {
4137 			continue;
4138 		}
4139 
4140 		if (net_ipv4_addr_cmp(addr,
4141 				      &ipv4->unicast[i].ipv4.address.in_addr)) {
4142 			return &ipv4->unicast[i].ipv4;
4143 		}
4144 	}
4145 
4146 	return NULL;
4147 }
4148 
4149 #if defined(CONFIG_NET_IPV4_ACD)
net_if_ipv4_acd_succeeded(struct net_if * iface,struct net_if_addr * ifaddr)4150 void net_if_ipv4_acd_succeeded(struct net_if *iface, struct net_if_addr *ifaddr)
4151 {
4152 	net_if_lock(iface);
4153 
4154 	NET_DBG("ACD succeeded for %s at interface %d",
4155 		net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4156 		ifaddr->ifindex);
4157 
4158 	ifaddr->addr_state = NET_ADDR_PREFERRED;
4159 
4160 	net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ACD_SUCCEED, iface,
4161 					&ifaddr->address.in_addr,
4162 					sizeof(struct in_addr));
4163 
4164 	net_if_unlock(iface);
4165 }
4166 
net_if_ipv4_acd_failed(struct net_if * iface,struct net_if_addr * ifaddr)4167 void net_if_ipv4_acd_failed(struct net_if *iface, struct net_if_addr *ifaddr)
4168 {
4169 	net_if_lock(iface);
4170 
4171 	NET_DBG("ACD failed for %s at interface %d",
4172 		net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4173 		ifaddr->ifindex);
4174 
4175 	net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ACD_FAILED, iface,
4176 					&ifaddr->address.in_addr,
4177 					sizeof(struct in_addr));
4178 
4179 	net_if_ipv4_addr_rm(iface, &ifaddr->address.in_addr);
4180 
4181 	net_if_unlock(iface);
4182 }
4183 
net_if_ipv4_start_acd(struct net_if * iface,struct net_if_addr * ifaddr)4184 void net_if_ipv4_start_acd(struct net_if *iface, struct net_if_addr *ifaddr)
4185 {
4186 	ifaddr->addr_state = NET_ADDR_TENTATIVE;
4187 
4188 	if (net_if_is_up(iface)) {
4189 		NET_DBG("Interface %p ll addr %s tentative IPv4 addr %s",
4190 			iface,
4191 			net_sprint_ll_addr(net_if_get_link_addr(iface)->addr,
4192 					   net_if_get_link_addr(iface)->len),
4193 			net_sprint_ipv4_addr(&ifaddr->address.in_addr));
4194 
4195 		if (net_ipv4_acd_start(iface, ifaddr) != 0) {
4196 			NET_DBG("Failed to start ACD for %s on iface %p.",
4197 				net_sprint_ipv4_addr(&ifaddr->address.in_addr),
4198 				iface);
4199 
4200 			/* Just act as if no conflict was detected. */
4201 			net_if_ipv4_acd_succeeded(iface, ifaddr);
4202 		}
4203 	} else {
4204 		NET_DBG("Interface %p is down, starting ACD for %s later.",
4205 			iface, net_sprint_ipv4_addr(&ifaddr->address.in_addr));
4206 	}
4207 }
4208 
net_if_start_acd(struct net_if * iface)4209 void net_if_start_acd(struct net_if *iface)
4210 {
4211 	struct net_if_ipv4 *ipv4;
4212 	int ret;
4213 
4214 	net_if_lock(iface);
4215 
4216 	NET_DBG("Starting ACD for iface %p", iface);
4217 
4218 	ret = net_if_config_ipv4_get(iface, &ipv4);
4219 	if (ret < 0) {
4220 		if (ret != -ENOTSUP) {
4221 			NET_WARN("Cannot do ACD IPv4 config is not valid.");
4222 		}
4223 
4224 		goto out;
4225 	}
4226 
4227 	if (!ipv4) {
4228 		goto out;
4229 	}
4230 
4231 	ipv4->conflict_cnt = 0;
4232 
4233 	/* Start ACD for all the addresses that were added earlier when
4234 	 * the interface was down.
4235 	 */
4236 	ARRAY_FOR_EACH(ipv4->unicast, i) {
4237 		if (!ipv4->unicast[i].ipv4.is_used ||
4238 		    ipv4->unicast[i].ipv4.address.family != AF_INET ||
4239 		    net_ipv4_is_addr_loopback(
4240 			    &ipv4->unicast[i].ipv4.address.in_addr)) {
4241 			continue;
4242 		}
4243 
4244 		net_if_ipv4_start_acd(iface, &ipv4->unicast[i].ipv4);
4245 	}
4246 
4247 out:
4248 	net_if_unlock(iface);
4249 }
4250 #else
net_if_ipv4_start_acd(struct net_if * iface,struct net_if_addr * ifaddr)4251 void net_if_ipv4_start_acd(struct net_if *iface, struct net_if_addr *ifaddr)
4252 {
4253 	ARG_UNUSED(iface);
4254 
4255 	ifaddr->addr_state = NET_ADDR_PREFERRED;
4256 }
4257 
4258 #define net_if_start_acd(...)
4259 #endif /* CONFIG_NET_IPV4_ACD */
4260 
net_if_ipv4_addr_add(struct net_if * iface,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4261 struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
4262 					 struct in_addr *addr,
4263 					 enum net_addr_type addr_type,
4264 					 uint32_t vlifetime)
4265 {
4266 	struct net_if_addr *ifaddr = NULL;
4267 	struct net_if_ipv4 *ipv4;
4268 	int idx;
4269 
4270 	net_if_lock(iface);
4271 
4272 	if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
4273 		goto out;
4274 	}
4275 
4276 	ifaddr = ipv4_addr_find(iface, addr);
4277 	if (ifaddr) {
4278 		/* TODO: should set addr_type/vlifetime */
4279 		goto out;
4280 	}
4281 
4282 	ARRAY_FOR_EACH(ipv4->unicast, i) {
4283 		struct net_if_addr *cur = &ipv4->unicast[i].ipv4;
4284 
4285 		if (addr_type == NET_ADDR_DHCP
4286 		    && cur->addr_type == NET_ADDR_OVERRIDABLE) {
4287 			ifaddr = cur;
4288 			idx = i;
4289 			break;
4290 		}
4291 
4292 		if (!ipv4->unicast[i].ipv4.is_used) {
4293 			ifaddr = cur;
4294 			idx = i;
4295 			break;
4296 		}
4297 	}
4298 
4299 	if (ifaddr) {
4300 		ifaddr->is_used = true;
4301 		ifaddr->address.family = AF_INET;
4302 		ifaddr->address.in_addr.s4_addr32[0] =
4303 						addr->s4_addr32[0];
4304 		ifaddr->addr_type = addr_type;
4305 		ifaddr->atomic_ref = ATOMIC_INIT(1);
4306 
4307 		/* Caller has to take care of timers and their expiry */
4308 		if (vlifetime) {
4309 			ifaddr->is_infinite = false;
4310 		} else {
4311 			ifaddr->is_infinite = true;
4312 		}
4313 
4314 		/**
4315 		 *  TODO: Handle properly PREFERRED/DEPRECATED state when
4316 		 *  address in use, expired and renewal state.
4317 		 */
4318 
4319 		NET_DBG("[%d] interface %d (%p) address %s type %s added",
4320 			idx, net_if_get_by_iface(iface), iface,
4321 			net_sprint_ipv4_addr(addr),
4322 			net_addr_type2str(addr_type));
4323 
4324 		if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
4325 		    !net_ipv4_is_addr_loopback(addr)) {
4326 			net_if_ipv4_start_acd(iface, ifaddr);
4327 		} else {
4328 			ifaddr->addr_state = NET_ADDR_PREFERRED;
4329 		}
4330 
4331 		net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface,
4332 						&ifaddr->address.in_addr,
4333 						sizeof(struct in_addr));
4334 		goto out;
4335 	}
4336 
4337 out:
4338 	net_if_unlock(iface);
4339 
4340 	return ifaddr;
4341 }
4342 
net_if_ipv4_addr_rm(struct net_if * iface,const struct in_addr * addr)4343 bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr)
4344 {
4345 	struct net_if_ipv4 *ipv4;
4346 	int ret;
4347 
4348 	NET_ASSERT(addr);
4349 
4350 	ipv4 = iface->config.ip.ipv4;
4351 	if (!ipv4) {
4352 		return false;
4353 	}
4354 
4355 	ret = net_if_addr_unref(iface, AF_INET, addr);
4356 	if (ret > 0) {
4357 		NET_DBG("Address %s still in use (ref %d)",
4358 			net_sprint_ipv4_addr(addr), ret);
4359 		return false;
4360 
4361 	} else if (ret < 0) {
4362 		NET_DBG("Address %s not found (%d)",
4363 			net_sprint_ipv4_addr(addr), ret);
4364 	}
4365 
4366 	return true;
4367 }
4368 
z_impl_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4369 bool z_impl_net_if_ipv4_addr_add_by_index(int index,
4370 					  struct in_addr *addr,
4371 					  enum net_addr_type addr_type,
4372 					  uint32_t vlifetime)
4373 {
4374 	struct net_if *iface;
4375 	struct net_if_addr *if_addr;
4376 
4377 	iface = net_if_get_by_index(index);
4378 	if (!iface) {
4379 		return false;
4380 	}
4381 
4382 	if_addr = net_if_ipv4_addr_add(iface, addr, addr_type, vlifetime);
4383 	return if_addr ? true : false;
4384 }
4385 
4386 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)4387 bool z_vrfy_net_if_ipv4_addr_add_by_index(int index,
4388 					  struct in_addr *addr,
4389 					  enum net_addr_type addr_type,
4390 					  uint32_t vlifetime)
4391 {
4392 	struct in_addr addr_v4;
4393 	struct net_if *iface;
4394 
4395 	iface = z_vrfy_net_if_get_by_index(index);
4396 	if (!iface) {
4397 		return false;
4398 	}
4399 
4400 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
4401 
4402 	return z_impl_net_if_ipv4_addr_add_by_index(index,
4403 						    &addr_v4,
4404 						    addr_type,
4405 						    vlifetime);
4406 }
4407 
4408 #include <zephyr/syscalls/net_if_ipv4_addr_add_by_index_mrsh.c>
4409 #endif /* CONFIG_USERSPACE */
4410 
z_impl_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)4411 bool z_impl_net_if_ipv4_addr_rm_by_index(int index,
4412 					 const struct in_addr *addr)
4413 {
4414 	struct net_if *iface;
4415 
4416 	iface = net_if_get_by_index(index);
4417 	if (!iface) {
4418 		return false;
4419 	}
4420 
4421 	return net_if_ipv4_addr_rm(iface, addr);
4422 }
4423 
4424 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)4425 bool z_vrfy_net_if_ipv4_addr_rm_by_index(int index,
4426 					 const struct in_addr *addr)
4427 {
4428 	struct in_addr addr_v4;
4429 	struct net_if *iface;
4430 
4431 	iface = z_vrfy_net_if_get_by_index(index);
4432 	if (!iface) {
4433 		return false;
4434 	}
4435 
4436 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
4437 
4438 	return (uint32_t)z_impl_net_if_ipv4_addr_rm_by_index(index, &addr_v4);
4439 }
4440 
4441 #include <zephyr/syscalls/net_if_ipv4_addr_rm_by_index_mrsh.c>
4442 #endif /* CONFIG_USERSPACE */
4443 
net_if_ipv4_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)4444 void net_if_ipv4_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
4445 			      void *user_data)
4446 {
4447 	struct net_if_ipv4 *ipv4;
4448 
4449 	if (iface == NULL) {
4450 		return;
4451 	}
4452 
4453 	net_if_lock(iface);
4454 
4455 	ipv4 = iface->config.ip.ipv4;
4456 	if (ipv4 == NULL) {
4457 		goto out;
4458 	}
4459 
4460 	ARRAY_FOR_EACH(ipv4->unicast, i) {
4461 		struct net_if_addr *if_addr = &ipv4->unicast[i].ipv4;
4462 
4463 		if (!if_addr->is_used) {
4464 			continue;
4465 		}
4466 
4467 		cb(iface, if_addr, user_data);
4468 	}
4469 
4470 out:
4471 	net_if_unlock(iface);
4472 }
4473 
ipv4_maddr_find(struct net_if * iface,bool is_used,const struct in_addr * addr)4474 static struct net_if_mcast_addr *ipv4_maddr_find(struct net_if *iface,
4475 						 bool is_used,
4476 						 const struct in_addr *addr)
4477 {
4478 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4479 
4480 	if (!ipv4) {
4481 		return NULL;
4482 	}
4483 
4484 	ARRAY_FOR_EACH(ipv4->mcast, i) {
4485 		if ((is_used && !ipv4->mcast[i].is_used) ||
4486 		    (!is_used && ipv4->mcast[i].is_used)) {
4487 			continue;
4488 		}
4489 
4490 		if (addr) {
4491 			if (!net_ipv4_addr_cmp(&ipv4->mcast[i].address.in_addr,
4492 					       addr)) {
4493 				continue;
4494 			}
4495 		}
4496 
4497 		return &ipv4->mcast[i];
4498 	}
4499 
4500 	return NULL;
4501 }
net_if_ipv4_maddr_add(struct net_if * iface,const struct in_addr * addr)4502 struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
4503 						const struct in_addr *addr)
4504 {
4505 	struct net_if_mcast_addr *maddr = NULL;
4506 
4507 	net_if_lock(iface);
4508 
4509 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4510 		goto out;
4511 	}
4512 
4513 	if (!net_ipv4_is_addr_mcast(addr)) {
4514 		NET_DBG("Address %s is not a multicast address.",
4515 			net_sprint_ipv4_addr(addr));
4516 		goto out;
4517 	}
4518 
4519 	maddr = ipv4_maddr_find(iface, false, NULL);
4520 	if (maddr) {
4521 		maddr->is_used = true;
4522 		maddr->address.family = AF_INET;
4523 		maddr->address.in_addr.s4_addr32[0] = addr->s4_addr32[0];
4524 
4525 		NET_DBG("interface %d (%p) address %s added",
4526 			net_if_get_by_iface(iface), iface,
4527 			net_sprint_ipv4_addr(addr));
4528 
4529 		net_mgmt_event_notify_with_info(
4530 			NET_EVENT_IPV4_MADDR_ADD, iface,
4531 			&maddr->address.in_addr,
4532 			sizeof(struct in_addr));
4533 	}
4534 
4535 out:
4536 	net_if_unlock(iface);
4537 
4538 	return maddr;
4539 }
4540 
net_if_ipv4_maddr_rm(struct net_if * iface,const struct in_addr * addr)4541 bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr)
4542 {
4543 	struct net_if_mcast_addr *maddr;
4544 	bool ret = false;
4545 
4546 	net_if_lock(iface);
4547 
4548 	maddr = ipv4_maddr_find(iface, true, addr);
4549 	if (maddr) {
4550 		maddr->is_used = false;
4551 
4552 		NET_DBG("interface %d (%p) address %s removed",
4553 			net_if_get_by_iface(iface), iface,
4554 			net_sprint_ipv4_addr(addr));
4555 
4556 		net_mgmt_event_notify_with_info(
4557 			NET_EVENT_IPV4_MADDR_DEL, iface,
4558 			&maddr->address.in_addr,
4559 			sizeof(struct in_addr));
4560 
4561 		ret = true;
4562 	}
4563 
4564 	net_if_unlock(iface);
4565 
4566 	return ret;
4567 }
4568 
net_if_ipv4_maddr_foreach(struct net_if * iface,net_if_ip_maddr_cb_t cb,void * user_data)4569 void net_if_ipv4_maddr_foreach(struct net_if *iface, net_if_ip_maddr_cb_t cb,
4570 			       void *user_data)
4571 {
4572 	struct net_if_ipv4 *ipv4;
4573 
4574 	NET_ASSERT(iface);
4575 	NET_ASSERT(cb);
4576 
4577 	net_if_lock(iface);
4578 
4579 	ipv4 = iface->config.ip.ipv4;
4580 	if (!ipv4) {
4581 		goto out;
4582 	}
4583 
4584 	for (int i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
4585 		if (!ipv4->mcast[i].is_used) {
4586 			continue;
4587 		}
4588 
4589 		cb(iface, &ipv4->mcast[i], user_data);
4590 	}
4591 
4592 out:
4593 	net_if_unlock(iface);
4594 }
4595 
net_if_ipv4_maddr_lookup(const struct in_addr * maddr,struct net_if ** ret)4596 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *maddr,
4597 						   struct net_if **ret)
4598 {
4599 	struct net_if_mcast_addr *addr = NULL;
4600 
4601 	STRUCT_SECTION_FOREACH(net_if, iface) {
4602 		if (ret && *ret && iface != *ret) {
4603 			continue;
4604 		}
4605 
4606 		net_if_lock(iface);
4607 
4608 		addr = ipv4_maddr_find(iface, true, maddr);
4609 		if (addr) {
4610 			if (ret) {
4611 				*ret = iface;
4612 			}
4613 
4614 			net_if_unlock(iface);
4615 			goto out;
4616 		}
4617 
4618 		net_if_unlock(iface);
4619 	}
4620 
4621 out:
4622 	return addr;
4623 }
4624 
net_if_ipv4_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)4625 void net_if_ipv4_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
4626 {
4627 	NET_ASSERT(iface);
4628 	NET_ASSERT(addr);
4629 
4630 	net_if_lock(iface);
4631 	addr->is_joined = false;
4632 	net_if_unlock(iface);
4633 }
4634 
net_if_ipv4_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)4635 void net_if_ipv4_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
4636 {
4637 	NET_ASSERT(iface);
4638 	NET_ASSERT(addr);
4639 
4640 	net_if_lock(iface);
4641 	addr->is_joined = true;
4642 	net_if_unlock(iface);
4643 }
4644 
iface_ipv4_init(int if_count)4645 static void iface_ipv4_init(int if_count)
4646 {
4647 	int i;
4648 
4649 	if (if_count > ARRAY_SIZE(ipv4_addresses)) {
4650 		NET_WARN("You have %zu IPv4 net_if addresses but %d "
4651 			 "network interfaces", ARRAY_SIZE(ipv4_addresses),
4652 			 if_count);
4653 		NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV4_COUNT "
4654 			 "value.");
4655 	}
4656 
4657 	for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
4658 		ipv4_addresses[i].ipv4.ttl = CONFIG_NET_INITIAL_TTL;
4659 		ipv4_addresses[i].ipv4.mcast_ttl = CONFIG_NET_INITIAL_MCAST_TTL;
4660 	}
4661 }
4662 
leave_ipv4_mcast_all(struct net_if * iface)4663 static void leave_ipv4_mcast_all(struct net_if *iface)
4664 {
4665 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4666 
4667 	if (!ipv4) {
4668 		return;
4669 	}
4670 
4671 	ARRAY_FOR_EACH(ipv4->mcast, i) {
4672 		if (!ipv4->mcast[i].is_used ||
4673 		    !ipv4->mcast[i].is_joined) {
4674 			continue;
4675 		}
4676 
4677 		net_ipv4_igmp_leave(iface, &ipv4->mcast[i].address.in_addr);
4678 	}
4679 }
4680 
iface_ipv4_start(struct net_if * iface)4681 static void iface_ipv4_start(struct net_if *iface)
4682 {
4683 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
4684 		return;
4685 	}
4686 
4687 	if (IS_ENABLED(CONFIG_NET_IPV4_ACD)) {
4688 		net_if_start_acd(iface);
4689 	}
4690 }
4691 
4692 
4693 #else /* CONFIG_NET_NATIVE_IPV4 */
4694 #define leave_ipv4_mcast_all(...)
4695 #define iface_ipv4_init(...)
4696 #define iface_ipv4_start(...)
4697 
net_if_ipv4_maddr_lookup(const struct in_addr * addr,struct net_if ** iface)4698 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
4699 						   struct net_if **iface)
4700 {
4701 	ARG_UNUSED(addr);
4702 	ARG_UNUSED(iface);
4703 
4704 	return NULL;
4705 }
4706 
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)4707 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
4708 					    struct net_if **ret)
4709 {
4710 	ARG_UNUSED(addr);
4711 	ARG_UNUSED(ret);
4712 
4713 	return NULL;
4714 }
4715 
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)4716 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
4717 					    enum net_addr_state addr_state)
4718 {
4719 	ARG_UNUSED(addr_state);
4720 	ARG_UNUSED(iface);
4721 
4722 	return NULL;
4723 }
4724 #endif /* CONFIG_NET_NATIVE_IPV4 */
4725 
net_if_select_src_iface(const struct sockaddr * dst)4726 struct net_if *net_if_select_src_iface(const struct sockaddr *dst)
4727 {
4728 	struct net_if *iface = NULL;
4729 
4730 	if (!dst) {
4731 		goto out;
4732 	}
4733 
4734 	if (IS_ENABLED(CONFIG_NET_IPV6) && dst->sa_family == AF_INET6) {
4735 		iface = net_if_ipv6_select_src_iface(&net_sin6(dst)->sin6_addr);
4736 		goto out;
4737 	}
4738 
4739 	if (IS_ENABLED(CONFIG_NET_IPV4) && dst->sa_family == AF_INET) {
4740 		iface = net_if_ipv4_select_src_iface(&net_sin(dst)->sin_addr);
4741 		goto out;
4742 	}
4743 
4744 out:
4745 	if (iface == NULL) {
4746 		iface = net_if_get_default();
4747 	}
4748 
4749 	return iface;
4750 }
4751 
get_ifaddr(struct net_if * iface,sa_family_t family,const void * addr,unsigned int * mcast_addr_count)4752 static struct net_if_addr *get_ifaddr(struct net_if *iface,
4753 				      sa_family_t family,
4754 				      const void *addr,
4755 				      unsigned int *mcast_addr_count)
4756 {
4757 	struct net_if_addr *ifaddr = NULL;
4758 
4759 	net_if_lock(iface);
4760 
4761 	if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
4762 		struct net_if_ipv6 *ipv6 =
4763 			COND_CODE_1(CONFIG_NET_NATIVE_IPV6, (iface->config.ip.ipv6), (NULL));
4764 
4765 		struct in6_addr maddr;
4766 		unsigned int maddr_count = 0;
4767 		int found = -1;
4768 
4769 		net_ipv6_addr_create_solicited_node((struct in6_addr *)addr,
4770 						    &maddr);
4771 
4772 		ARRAY_FOR_EACH(ipv6->unicast, i) {
4773 			struct in6_addr unicast_maddr;
4774 
4775 			if (!ipv6->unicast[i].is_used) {
4776 				continue;
4777 			}
4778 
4779 			/* Count how many times this solicited-node multicast address is identical
4780 			 * for all the used unicast addresses
4781 			 */
4782 			net_ipv6_addr_create_solicited_node(
4783 				&ipv6->unicast[i].address.in6_addr,
4784 				&unicast_maddr);
4785 
4786 			if (net_ipv6_addr_cmp(&maddr, &unicast_maddr)) {
4787 				maddr_count++;
4788 			}
4789 
4790 			if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr, addr)) {
4791 				continue;
4792 			}
4793 
4794 			found = i;
4795 		}
4796 
4797 		if (found >= 0) {
4798 			ifaddr = &ipv6->unicast[found];
4799 
4800 			if (mcast_addr_count != NULL) {
4801 				*mcast_addr_count = maddr_count;
4802 			}
4803 		}
4804 
4805 		goto out;
4806 	}
4807 
4808 	if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
4809 		struct net_if_ipv4 *ipv4 =
4810 			COND_CODE_1(CONFIG_NET_NATIVE_IPV4, (iface->config.ip.ipv4), (NULL));
4811 
4812 		ARRAY_FOR_EACH(ipv4->unicast, i) {
4813 			if (!ipv4->unicast[i].ipv4.is_used) {
4814 				continue;
4815 			}
4816 
4817 			if (!net_ipv4_addr_cmp(&ipv4->unicast[i].ipv4.address.in_addr,
4818 					       addr)) {
4819 				continue;
4820 			}
4821 
4822 			ifaddr = &ipv4->unicast[i].ipv4;
4823 
4824 			goto out;
4825 		}
4826 	}
4827 
4828 out:
4829 	net_if_unlock(iface);
4830 
4831 	return ifaddr;
4832 }
4833 
remove_ipv6_ifaddr(struct net_if * iface,struct net_if_addr * ifaddr,unsigned int maddr_count)4834 static void remove_ipv6_ifaddr(struct net_if *iface,
4835 			       struct net_if_addr *ifaddr,
4836 			       unsigned int maddr_count)
4837 {
4838 	struct net_if_ipv6 *ipv6;
4839 
4840 	net_if_lock(iface);
4841 
4842 	ipv6 = COND_CODE_1(CONFIG_NET_NATIVE_IPV6, (iface->config.ip.ipv6), (NULL));
4843 	if (!ipv6) {
4844 		goto out;
4845 	}
4846 
4847 	if (!ifaddr->is_infinite) {
4848 		k_mutex_lock(&lock, K_FOREVER);
4849 
4850 #if defined(CONFIG_NET_NATIVE_IPV6)
4851 		sys_slist_find_and_remove(&active_address_lifetime_timers,
4852 					  &ifaddr->lifetime.node);
4853 
4854 		if (sys_slist_is_empty(&active_address_lifetime_timers)) {
4855 			k_work_cancel_delayable(&address_lifetime_timer);
4856 		}
4857 #endif
4858 		k_mutex_unlock(&lock);
4859 	}
4860 
4861 #if defined(CONFIG_NET_IPV6_DAD)
4862 	if (!net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
4863 		k_mutex_lock(&lock, K_FOREVER);
4864 		sys_slist_find_and_remove(&active_dad_timers,
4865 					  &ifaddr->dad_node);
4866 		k_mutex_unlock(&lock);
4867 	}
4868 #endif
4869 
4870 	if (maddr_count == 1) {
4871 		/* Remove the solicited-node multicast address only if no other
4872 		 * unicast address is also using it
4873 		 */
4874 		struct in6_addr maddr;
4875 
4876 		net_ipv6_addr_create_solicited_node(&ifaddr->address.in6_addr,
4877 						    &maddr);
4878 		net_if_ipv6_maddr_rm(iface, &maddr);
4879 	}
4880 
4881 	/* Using the IPv6 address pointer here can give false
4882 	 * info if someone adds a new IP address into this position
4883 	 * in the address array. This is quite unlikely thou.
4884 	 */
4885 	net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ADDR_DEL,
4886 					iface,
4887 					&ifaddr->address.in6_addr,
4888 					sizeof(struct in6_addr));
4889 out:
4890 	net_if_unlock(iface);
4891 }
4892 
remove_ipv4_ifaddr(struct net_if * iface,struct net_if_addr * ifaddr)4893 static void remove_ipv4_ifaddr(struct net_if *iface,
4894 			       struct net_if_addr *ifaddr)
4895 {
4896 	struct net_if_ipv4 *ipv4;
4897 
4898 	net_if_lock(iface);
4899 
4900 	ipv4 = COND_CODE_1(CONFIG_NET_NATIVE_IPV4, (iface->config.ip.ipv4), (NULL));
4901 	if (!ipv4) {
4902 		goto out;
4903 	}
4904 
4905 #if defined(CONFIG_NET_IPV4_ACD)
4906 	net_ipv4_acd_cancel(iface, ifaddr);
4907 #endif
4908 
4909 	net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_DEL,
4910 					iface,
4911 					&ifaddr->address.in_addr,
4912 					sizeof(struct in_addr));
4913 out:
4914 	net_if_unlock(iface);
4915 }
4916 
4917 #if defined(CONFIG_NET_IF_LOG_LEVEL)
4918 #define NET_LOG_LEVEL CONFIG_NET_IF_LOG_LEVEL
4919 #else
4920 #define NET_LOG_LEVEL 0
4921 #endif
4922 
4923 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_if_addr_ref_debug(struct net_if * iface,sa_family_t family,const void * addr,const char * caller,int line)4924 struct net_if_addr *net_if_addr_ref_debug(struct net_if *iface,
4925 					  sa_family_t family,
4926 					  const void *addr,
4927 					  const char *caller,
4928 					  int line)
4929 #else
4930 struct net_if_addr *net_if_addr_ref(struct net_if *iface,
4931 				    sa_family_t family,
4932 				    const void *addr)
4933 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
4934 {
4935 	struct net_if_addr *ifaddr;
4936 	atomic_val_t ref;
4937 
4938 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
4939 	char addr_str[IS_ENABLED(CONFIG_NET_IPV6) ?
4940 		      INET6_ADDRSTRLEN : INET_ADDRSTRLEN];
4941 
4942 	__ASSERT(iface, "iface is NULL (%s():%d)", caller, line);
4943 #endif
4944 
4945 	ifaddr = get_ifaddr(iface, family, addr, NULL);
4946 
4947 	do {
4948 		ref = ifaddr ? atomic_get(&ifaddr->atomic_ref) : 0;
4949 		if (!ref) {
4950 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
4951 			NET_ERR("iface %d addr %s (%s():%d)",
4952 				net_if_get_by_iface(iface),
4953 				net_addr_ntop(family,
4954 					      addr,
4955 					      addr_str, sizeof(addr_str)),
4956 				caller, line);
4957 #endif
4958 			return NULL;
4959 		}
4960 	} while (!atomic_cas(&ifaddr->atomic_ref, ref, ref + 1));
4961 
4962 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
4963 	NET_DBG("[%d] ifaddr %s state %d ref %ld (%s():%d)",
4964 		net_if_get_by_iface(iface),
4965 		net_addr_ntop(ifaddr->address.family,
4966 			      (void *)&ifaddr->address.in_addr,
4967 			      addr_str, sizeof(addr_str)),
4968 		ifaddr->addr_state,
4969 		ref + 1,
4970 		caller, line);
4971 #endif
4972 
4973 	return ifaddr;
4974 }
4975 
4976 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_if_addr_unref_debug(struct net_if * iface,sa_family_t family,const void * addr,const char * caller,int line)4977 int net_if_addr_unref_debug(struct net_if *iface,
4978 			    sa_family_t family,
4979 			    const void *addr,
4980 			    const char *caller, int line)
4981 #else
4982 int net_if_addr_unref(struct net_if *iface,
4983 		      sa_family_t family,
4984 		      const void *addr)
4985 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
4986 {
4987 	struct net_if_addr *ifaddr;
4988 	unsigned int maddr_count = 0;
4989 	atomic_val_t ref;
4990 
4991 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
4992 	char addr_str[IS_ENABLED(CONFIG_NET_IPV6) ?
4993 		      INET6_ADDRSTRLEN : INET_ADDRSTRLEN];
4994 
4995 	__ASSERT(iface, "iface is NULL (%s():%d)", caller, line);
4996 #endif
4997 
4998 	ifaddr = get_ifaddr(iface, family, addr, &maddr_count);
4999 
5000 	if (!ifaddr) {
5001 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5002 		NET_ERR("iface %d addr %s (%s():%d)",
5003 			net_if_get_by_iface(iface),
5004 			net_addr_ntop(family,
5005 				      addr,
5006 				      addr_str, sizeof(addr_str)),
5007 			caller, line);
5008 #endif
5009 		return -EINVAL;
5010 	}
5011 
5012 	do {
5013 		ref = atomic_get(&ifaddr->atomic_ref);
5014 		if (!ref) {
5015 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5016 			NET_ERR("*** ERROR *** iface %d ifaddr %p "
5017 				"is freed already (%s():%d)",
5018 				net_if_get_by_iface(iface),
5019 				ifaddr,
5020 				caller, line);
5021 #endif
5022 			return -EINVAL;
5023 		}
5024 
5025 	} while (!atomic_cas(&ifaddr->atomic_ref, ref, ref - 1));
5026 
5027 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
5028 	NET_DBG("[%d] ifaddr %s state %d ref %ld (%s():%d)",
5029 		net_if_get_by_iface(iface),
5030 		net_addr_ntop(ifaddr->address.family,
5031 			      (void *)&ifaddr->address.in_addr,
5032 			      addr_str, sizeof(addr_str)),
5033 		ifaddr->addr_state,
5034 		ref - 1, caller, line);
5035 #endif
5036 
5037 	if (ref > 1) {
5038 		return ref - 1;
5039 	}
5040 
5041 	ifaddr->is_used = false;
5042 
5043 	if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 && addr != NULL) {
5044 		remove_ipv6_ifaddr(iface, ifaddr, maddr_count);
5045 	}
5046 
5047 	if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET && addr != NULL) {
5048 		remove_ipv4_ifaddr(iface, ifaddr);
5049 	}
5050 
5051 	return 0;
5052 }
5053 
net_if_recv_data(struct net_if * iface,struct net_pkt * pkt)5054 enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt)
5055 {
5056 	if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) &&
5057 	    net_if_is_promisc(iface)) {
5058 		struct net_pkt *new_pkt;
5059 
5060 		new_pkt = net_pkt_clone(pkt, K_NO_WAIT);
5061 
5062 		if (net_promisc_mode_input(new_pkt) == NET_DROP) {
5063 			net_pkt_unref(new_pkt);
5064 		}
5065 	}
5066 
5067 	return net_if_l2(iface)->recv(iface, pkt);
5068 }
5069 
net_if_register_link_cb(struct net_if_link_cb * link,net_if_link_callback_t cb)5070 void net_if_register_link_cb(struct net_if_link_cb *link,
5071 			     net_if_link_callback_t cb)
5072 {
5073 	k_mutex_lock(&lock, K_FOREVER);
5074 
5075 	sys_slist_find_and_remove(&link_callbacks, &link->node);
5076 	sys_slist_prepend(&link_callbacks, &link->node);
5077 
5078 	link->cb = cb;
5079 
5080 	k_mutex_unlock(&lock);
5081 }
5082 
net_if_unregister_link_cb(struct net_if_link_cb * link)5083 void net_if_unregister_link_cb(struct net_if_link_cb *link)
5084 {
5085 	k_mutex_lock(&lock, K_FOREVER);
5086 
5087 	sys_slist_find_and_remove(&link_callbacks, &link->node);
5088 
5089 	k_mutex_unlock(&lock);
5090 }
5091 
net_if_call_link_cb(struct net_if * iface,struct net_linkaddr * lladdr,int status)5092 void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
5093 			 int status)
5094 {
5095 	struct net_if_link_cb *link, *tmp;
5096 
5097 	k_mutex_lock(&lock, K_FOREVER);
5098 
5099 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&link_callbacks, link, tmp, node) {
5100 		link->cb(iface, lladdr, status);
5101 	}
5102 
5103 	k_mutex_unlock(&lock);
5104 }
5105 
need_calc_checksum(struct net_if * iface,enum ethernet_hw_caps caps,enum net_if_checksum_type chksum_type)5106 static bool need_calc_checksum(struct net_if *iface, enum ethernet_hw_caps caps,
5107 			      enum net_if_checksum_type chksum_type)
5108 {
5109 #if defined(CONFIG_NET_L2_ETHERNET)
5110 	struct ethernet_config config;
5111 	enum ethernet_config_type config_type;
5112 
5113 	if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
5114 		return true;
5115 	}
5116 
5117 	if (!(net_eth_get_hw_capabilities(iface) & caps)) {
5118 		return true; /* No checksum offload*/
5119 	}
5120 
5121 	if (caps == ETHERNET_HW_RX_CHKSUM_OFFLOAD) {
5122 		config_type = ETHERNET_CONFIG_TYPE_RX_CHECKSUM_SUPPORT;
5123 	} else {
5124 		config_type = ETHERNET_CONFIG_TYPE_TX_CHECKSUM_SUPPORT;
5125 	}
5126 
5127 	if (net_eth_get_hw_config(iface, config_type, &config) != 0) {
5128 		return false; /* No extra info, assume all offloaded. */
5129 	}
5130 
5131 	/* bitmaps are encoded such that this works */
5132 	return !((config.chksum_support & chksum_type) == chksum_type);
5133 #else
5134 	ARG_UNUSED(iface);
5135 	ARG_UNUSED(caps);
5136 
5137 	return true;
5138 #endif
5139 }
5140 
net_if_need_calc_tx_checksum(struct net_if * iface,enum net_if_checksum_type chksum_type)5141 bool net_if_need_calc_tx_checksum(struct net_if *iface, enum net_if_checksum_type chksum_type)
5142 {
5143 	return need_calc_checksum(iface, ETHERNET_HW_TX_CHKSUM_OFFLOAD, chksum_type);
5144 }
5145 
net_if_need_calc_rx_checksum(struct net_if * iface,enum net_if_checksum_type chksum_type)5146 bool net_if_need_calc_rx_checksum(struct net_if *iface, enum net_if_checksum_type chksum_type)
5147 {
5148 	return need_calc_checksum(iface, ETHERNET_HW_RX_CHKSUM_OFFLOAD, chksum_type);
5149 }
5150 
net_if_get_by_iface(struct net_if * iface)5151 int net_if_get_by_iface(struct net_if *iface)
5152 {
5153 	if (!(iface >= _net_if_list_start && iface < _net_if_list_end)) {
5154 		return -1;
5155 	}
5156 
5157 	return (iface - _net_if_list_start) + 1;
5158 }
5159 
net_if_foreach(net_if_cb_t cb,void * user_data)5160 void net_if_foreach(net_if_cb_t cb, void *user_data)
5161 {
5162 	STRUCT_SECTION_FOREACH(net_if, iface) {
5163 		cb(iface, user_data);
5164 	}
5165 }
5166 
net_if_is_offloaded(struct net_if * iface)5167 bool net_if_is_offloaded(struct net_if *iface)
5168 {
5169 	return (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
5170 		net_if_is_ip_offloaded(iface)) ||
5171 	       (IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD) &&
5172 		net_if_is_socket_offloaded(iface));
5173 }
5174 
rejoin_multicast_groups(struct net_if * iface)5175 static void rejoin_multicast_groups(struct net_if *iface)
5176 {
5177 #if defined(CONFIG_NET_NATIVE_IPV6)
5178 	rejoin_ipv6_mcast_groups(iface);
5179 	if (l2_flags_get(iface) & NET_L2_MULTICAST) {
5180 		join_mcast_allnodes(iface);
5181 	}
5182 #else
5183 	ARG_UNUSED(iface);
5184 #endif
5185 }
5186 
notify_iface_up(struct net_if * iface)5187 static void notify_iface_up(struct net_if *iface)
5188 {
5189 	/* In many places it's assumed that link address was set with
5190 	 * net_if_set_link_addr(). Better check that now.
5191 	 */
5192 	if (IS_ENABLED(CONFIG_NET_L2_CANBUS_RAW) &&
5193 	    IS_ENABLED(CONFIG_NET_SOCKETS_CAN) &&
5194 	    (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW)))	{
5195 		/* CAN does not require link address. */
5196 	} else {
5197 		if (!net_if_is_offloaded(iface)) {
5198 			NET_ASSERT(net_if_get_link_addr(iface)->addr != NULL);
5199 		}
5200 	}
5201 
5202 	net_if_flag_set(iface, NET_IF_RUNNING);
5203 	net_mgmt_event_notify(NET_EVENT_IF_UP, iface);
5204 	net_virtual_enable(iface);
5205 
5206 	/* If the interface is only having point-to-point traffic then we do
5207 	 * not need to run DAD etc for it.
5208 	 */
5209 	if (!net_if_is_offloaded(iface) &&
5210 	    !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
5211 		/* Make sure that we update the IPv6 addresses and join the
5212 		 * multicast groups.
5213 		 */
5214 		rejoin_multicast_groups(iface);
5215 		iface_ipv6_start(iface);
5216 		iface_ipv4_start(iface);
5217 		net_ipv4_autoconf_start(iface);
5218 	}
5219 }
5220 
notify_iface_down(struct net_if * iface)5221 static void notify_iface_down(struct net_if *iface)
5222 {
5223 	net_if_flag_clear(iface, NET_IF_RUNNING);
5224 	net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
5225 	net_virtual_disable(iface);
5226 
5227 	if (!net_if_is_offloaded(iface) &&
5228 	    !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
5229 		iface_ipv6_stop(iface);
5230 		clear_joined_ipv6_mcast_groups(iface);
5231 		net_ipv4_autoconf_reset(iface);
5232 	}
5233 }
5234 
net_if_oper_state2str(enum net_if_oper_state state)5235 static inline const char *net_if_oper_state2str(enum net_if_oper_state state)
5236 {
5237 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
5238 	switch (state) {
5239 	case NET_IF_OPER_UNKNOWN:
5240 		return "UNKNOWN";
5241 	case NET_IF_OPER_NOTPRESENT:
5242 		return "NOTPRESENT";
5243 	case NET_IF_OPER_DOWN:
5244 		return "DOWN";
5245 	case NET_IF_OPER_LOWERLAYERDOWN:
5246 		return "LOWERLAYERDOWN";
5247 	case NET_IF_OPER_TESTING:
5248 		return "TESTING";
5249 	case NET_IF_OPER_DORMANT:
5250 		return "DORMANT";
5251 	case NET_IF_OPER_UP:
5252 		return "UP";
5253 	default:
5254 		break;
5255 	}
5256 
5257 	return "<invalid>";
5258 #else
5259 	ARG_UNUSED(state);
5260 
5261 	return "";
5262 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
5263 }
5264 
update_operational_state(struct net_if * iface)5265 static void update_operational_state(struct net_if *iface)
5266 {
5267 	enum net_if_oper_state prev_state = iface->if_dev->oper_state;
5268 	enum net_if_oper_state new_state = NET_IF_OPER_UNKNOWN;
5269 
5270 	if (!net_if_is_admin_up(iface)) {
5271 		new_state = NET_IF_OPER_DOWN;
5272 		goto exit;
5273 	}
5274 
5275 	if (!device_is_ready(net_if_get_device(iface))) {
5276 		new_state = NET_IF_OPER_LOWERLAYERDOWN;
5277 		goto exit;
5278 	}
5279 
5280 	if (!net_if_is_carrier_ok(iface)) {
5281 #if defined(CONFIG_NET_L2_VIRTUAL)
5282 		if (net_if_l2(iface) == &NET_L2_GET_NAME(VIRTUAL)) {
5283 			new_state = NET_IF_OPER_LOWERLAYERDOWN;
5284 		} else
5285 #endif /* CONFIG_NET_L2_VIRTUAL */
5286 		{
5287 			new_state = NET_IF_OPER_DOWN;
5288 		}
5289 
5290 		goto exit;
5291 	}
5292 
5293 	if (net_if_is_dormant(iface)) {
5294 		new_state = NET_IF_OPER_DORMANT;
5295 		goto exit;
5296 	}
5297 
5298 	new_state = NET_IF_OPER_UP;
5299 
5300 exit:
5301 	if (net_if_oper_state_set(iface, new_state) != new_state) {
5302 		NET_ERR("Failed to update oper state to %d", new_state);
5303 		return;
5304 	}
5305 
5306 	NET_DBG("iface %d (%p), oper state %s admin %s carrier %s dormant %s",
5307 		net_if_get_by_iface(iface), iface,
5308 		net_if_oper_state2str(net_if_oper_state(iface)),
5309 		net_if_is_admin_up(iface) ? "UP" : "DOWN",
5310 		net_if_is_carrier_ok(iface) ? "ON" : "OFF",
5311 		net_if_is_dormant(iface) ? "ON" : "OFF");
5312 
5313 	if (net_if_oper_state(iface) == NET_IF_OPER_UP) {
5314 		if (prev_state != NET_IF_OPER_UP) {
5315 			notify_iface_up(iface);
5316 		}
5317 	} else {
5318 		if (prev_state == NET_IF_OPER_UP) {
5319 			notify_iface_down(iface);
5320 		}
5321 	}
5322 }
5323 
init_igmp(struct net_if * iface)5324 static void init_igmp(struct net_if *iface)
5325 {
5326 #if defined(CONFIG_NET_IPV4_IGMP)
5327 	/* Ensure IPv4 is enabled for this interface. */
5328 	if (net_if_config_ipv4_get(iface, NULL)) {
5329 		return;
5330 	}
5331 
5332 	net_ipv4_igmp_init(iface);
5333 #else
5334 	ARG_UNUSED(iface);
5335 	return;
5336 #endif
5337 }
5338 
net_if_up(struct net_if * iface)5339 int net_if_up(struct net_if *iface)
5340 {
5341 	int status = 0;
5342 
5343 	NET_DBG("iface %d (%p)", net_if_get_by_iface(iface), iface);
5344 
5345 	net_if_lock(iface);
5346 
5347 	if (net_if_flag_is_set(iface, NET_IF_UP)) {
5348 		status = -EALREADY;
5349 		goto out;
5350 	}
5351 
5352 	/* If the L2 does not support enable just set the flag */
5353 	if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
5354 		goto done;
5355 	} else {
5356 		/* If the L2 does not implement enable(), then the network
5357 		 * device driver cannot implement start(), in which case
5358 		 * we can do simple check here and not try to bring interface
5359 		 * up as the device is not ready.
5360 		 *
5361 		 * If the network device driver does implement start(), then
5362 		 * it could bring the interface up when the enable() is called
5363 		 * few lines below.
5364 		 */
5365 		const struct device *dev;
5366 
5367 		dev = net_if_get_device(iface);
5368 		NET_ASSERT(dev);
5369 
5370 		/* If the device is not ready it is pointless trying to take it up. */
5371 		if (!device_is_ready(dev)) {
5372 			NET_DBG("Device %s (%p) is not ready", dev->name, dev);
5373 			status = -ENXIO;
5374 			goto out;
5375 		}
5376 	}
5377 
5378 	/* Notify L2 to enable the interface. Note that the interface is still down
5379 	 * at this point from network interface point of view i.e., the NET_IF_UP
5380 	 * flag has not been set yet.
5381 	 */
5382 	status = net_if_l2(iface)->enable(iface, true);
5383 	if (status < 0) {
5384 		NET_DBG("Cannot take interface %d up (%d)",
5385 			net_if_get_by_iface(iface), status);
5386 		goto out;
5387 	}
5388 
5389 	init_igmp(iface);
5390 
5391 done:
5392 	net_if_flag_set(iface, NET_IF_UP);
5393 	net_mgmt_event_notify(NET_EVENT_IF_ADMIN_UP, iface);
5394 	update_operational_state(iface);
5395 
5396 out:
5397 	net_if_unlock(iface);
5398 
5399 	return status;
5400 }
5401 
net_if_down(struct net_if * iface)5402 int net_if_down(struct net_if *iface)
5403 {
5404 	int status = 0;
5405 
5406 	NET_DBG("iface %p", iface);
5407 
5408 	net_if_lock(iface);
5409 
5410 	if (!net_if_flag_is_set(iface, NET_IF_UP)) {
5411 		status = -EALREADY;
5412 		goto out;
5413 	}
5414 
5415 	leave_mcast_all(iface);
5416 	leave_ipv4_mcast_all(iface);
5417 
5418 	/* If the L2 does not support enable just clear the flag */
5419 	if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
5420 		goto done;
5421 	}
5422 
5423 	/* Notify L2 to disable the interface */
5424 	status = net_if_l2(iface)->enable(iface, false);
5425 	if (status < 0) {
5426 		goto out;
5427 	}
5428 
5429 done:
5430 	net_if_flag_clear(iface, NET_IF_UP);
5431 	net_mgmt_event_notify(NET_EVENT_IF_ADMIN_DOWN, iface);
5432 	update_operational_state(iface);
5433 
5434 out:
5435 	net_if_unlock(iface);
5436 
5437 	return status;
5438 }
5439 
net_if_carrier_on(struct net_if * iface)5440 void net_if_carrier_on(struct net_if *iface)
5441 {
5442 	NET_ASSERT(iface);
5443 
5444 	net_if_lock(iface);
5445 
5446 	if (!net_if_flag_test_and_set(iface, NET_IF_LOWER_UP)) {
5447 		update_operational_state(iface);
5448 	}
5449 
5450 	net_if_unlock(iface);
5451 }
5452 
net_if_carrier_off(struct net_if * iface)5453 void net_if_carrier_off(struct net_if *iface)
5454 {
5455 	NET_ASSERT(iface);
5456 
5457 	net_if_lock(iface);
5458 
5459 	if (net_if_flag_test_and_clear(iface, NET_IF_LOWER_UP)) {
5460 		update_operational_state(iface);
5461 	}
5462 
5463 	net_if_unlock(iface);
5464 }
5465 
net_if_dormant_on(struct net_if * iface)5466 void net_if_dormant_on(struct net_if *iface)
5467 {
5468 	NET_ASSERT(iface);
5469 
5470 	net_if_lock(iface);
5471 
5472 	if (!net_if_flag_test_and_set(iface, NET_IF_DORMANT)) {
5473 		update_operational_state(iface);
5474 	}
5475 
5476 	net_if_unlock(iface);
5477 }
5478 
net_if_dormant_off(struct net_if * iface)5479 void net_if_dormant_off(struct net_if *iface)
5480 {
5481 	NET_ASSERT(iface);
5482 
5483 	net_if_lock(iface);
5484 
5485 	if (net_if_flag_test_and_clear(iface, NET_IF_DORMANT)) {
5486 		update_operational_state(iface);
5487 	}
5488 
5489 	net_if_unlock(iface);
5490 }
5491 
5492 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
promisc_mode_set(struct net_if * iface,bool enable)5493 static int promisc_mode_set(struct net_if *iface, bool enable)
5494 {
5495 	enum net_l2_flags l2_flags = 0;
5496 
5497 	NET_ASSERT(iface);
5498 
5499 	l2_flags = l2_flags_get(iface);
5500 	if (!(l2_flags & NET_L2_PROMISC_MODE)) {
5501 		return -ENOTSUP;
5502 	}
5503 
5504 #if defined(CONFIG_NET_L2_ETHERNET)
5505 	if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
5506 		int ret = net_eth_promisc_mode(iface, enable);
5507 
5508 		if (ret < 0) {
5509 			return ret;
5510 		}
5511 	}
5512 #else
5513 	ARG_UNUSED(enable);
5514 
5515 	return -ENOTSUP;
5516 #endif
5517 
5518 	return 0;
5519 }
5520 
net_if_set_promisc(struct net_if * iface)5521 int net_if_set_promisc(struct net_if *iface)
5522 {
5523 	int ret;
5524 
5525 	net_if_lock(iface);
5526 
5527 	ret = promisc_mode_set(iface, true);
5528 	if (ret < 0) {
5529 		goto out;
5530 	}
5531 
5532 	ret = net_if_flag_test_and_set(iface, NET_IF_PROMISC);
5533 	if (ret) {
5534 		ret = -EALREADY;
5535 		goto out;
5536 	}
5537 
5538 out:
5539 	net_if_unlock(iface);
5540 
5541 	return ret;
5542 }
5543 
net_if_unset_promisc(struct net_if * iface)5544 void net_if_unset_promisc(struct net_if *iface)
5545 {
5546 	int ret;
5547 
5548 	net_if_lock(iface);
5549 
5550 	ret = promisc_mode_set(iface, false);
5551 	if (ret < 0) {
5552 		goto out;
5553 	}
5554 
5555 	net_if_flag_clear(iface, NET_IF_PROMISC);
5556 
5557 out:
5558 	net_if_unlock(iface);
5559 }
5560 
net_if_is_promisc(struct net_if * iface)5561 bool net_if_is_promisc(struct net_if *iface)
5562 {
5563 	NET_ASSERT(iface);
5564 
5565 	return net_if_flag_is_set(iface, NET_IF_PROMISC);
5566 }
5567 #endif /* CONFIG_NET_PROMISCUOUS_MODE */
5568 
5569 #ifdef CONFIG_NET_POWER_MANAGEMENT
5570 
net_if_suspend(struct net_if * iface)5571 int net_if_suspend(struct net_if *iface)
5572 {
5573 	int ret = 0;
5574 
5575 	net_if_lock(iface);
5576 
5577 	if (net_if_are_pending_tx_packets(iface)) {
5578 		ret = -EBUSY;
5579 		goto out;
5580 	}
5581 
5582 	if (net_if_flag_test_and_set(iface, NET_IF_SUSPENDED)) {
5583 		ret = -EALREADY;
5584 		goto out;
5585 	}
5586 
5587 	net_stats_add_suspend_start_time(iface, k_cycle_get_32());
5588 
5589 out:
5590 	net_if_unlock(iface);
5591 
5592 	return ret;
5593 }
5594 
net_if_resume(struct net_if * iface)5595 int net_if_resume(struct net_if *iface)
5596 {
5597 	int ret = 0;
5598 
5599 	net_if_lock(iface);
5600 
5601 	if (!net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
5602 		ret = -EALREADY;
5603 		goto out;
5604 	}
5605 
5606 	net_if_flag_clear(iface, NET_IF_SUSPENDED);
5607 
5608 	net_stats_add_suspend_end_time(iface, k_cycle_get_32());
5609 
5610 out:
5611 	net_if_unlock(iface);
5612 
5613 	return ret;
5614 }
5615 
net_if_is_suspended(struct net_if * iface)5616 bool net_if_is_suspended(struct net_if *iface)
5617 {
5618 	return net_if_flag_is_set(iface, NET_IF_SUSPENDED);
5619 }
5620 
5621 #endif /* CONFIG_NET_POWER_MANAGEMENT */
5622 
5623 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
net_tx_ts_thread(void * p1,void * p2,void * p3)5624 static void net_tx_ts_thread(void *p1, void *p2, void *p3)
5625 {
5626 	ARG_UNUSED(p1);
5627 	ARG_UNUSED(p2);
5628 	ARG_UNUSED(p3);
5629 
5630 	struct net_pkt *pkt;
5631 
5632 	NET_DBG("Starting TX timestamp callback thread");
5633 
5634 	while (1) {
5635 		pkt = k_fifo_get(&tx_ts_queue, K_FOREVER);
5636 		if (pkt) {
5637 			net_if_call_timestamp_cb(pkt);
5638 		}
5639 		net_pkt_unref(pkt);
5640 	}
5641 }
5642 
net_if_register_timestamp_cb(struct net_if_timestamp_cb * handle,struct net_pkt * pkt,struct net_if * iface,net_if_timestamp_callback_t cb)5643 void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
5644 				  struct net_pkt *pkt,
5645 				  struct net_if *iface,
5646 				  net_if_timestamp_callback_t cb)
5647 {
5648 	k_mutex_lock(&lock, K_FOREVER);
5649 
5650 	sys_slist_find_and_remove(&timestamp_callbacks, &handle->node);
5651 	sys_slist_prepend(&timestamp_callbacks, &handle->node);
5652 
5653 	handle->iface = iface;
5654 	handle->cb = cb;
5655 	handle->pkt = pkt;
5656 
5657 	k_mutex_unlock(&lock);
5658 }
5659 
net_if_unregister_timestamp_cb(struct net_if_timestamp_cb * handle)5660 void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle)
5661 {
5662 	k_mutex_lock(&lock, K_FOREVER);
5663 
5664 	sys_slist_find_and_remove(&timestamp_callbacks, &handle->node);
5665 
5666 	k_mutex_unlock(&lock);
5667 }
5668 
net_if_call_timestamp_cb(struct net_pkt * pkt)5669 void net_if_call_timestamp_cb(struct net_pkt *pkt)
5670 {
5671 	sys_snode_t *sn, *sns;
5672 
5673 	k_mutex_lock(&lock, K_FOREVER);
5674 
5675 	SYS_SLIST_FOR_EACH_NODE_SAFE(&timestamp_callbacks, sn, sns) {
5676 		struct net_if_timestamp_cb *handle =
5677 			CONTAINER_OF(sn, struct net_if_timestamp_cb, node);
5678 
5679 		if (((handle->iface == NULL) ||
5680 		     (handle->iface == net_pkt_iface(pkt))) &&
5681 		    (handle->pkt == NULL || handle->pkt == pkt)) {
5682 			handle->cb(pkt);
5683 		}
5684 	}
5685 
5686 	k_mutex_unlock(&lock);
5687 }
5688 
net_if_add_tx_timestamp(struct net_pkt * pkt)5689 void net_if_add_tx_timestamp(struct net_pkt *pkt)
5690 {
5691 	k_fifo_put(&tx_ts_queue, pkt);
5692 	net_pkt_ref(pkt);
5693 }
5694 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
5695 
net_if_is_wifi(struct net_if * iface)5696 bool net_if_is_wifi(struct net_if *iface)
5697 {
5698 	if (net_if_is_offloaded(iface)) {
5699 		return net_off_is_wifi_offloaded(iface);
5700 	}
5701 
5702 	if (IS_ENABLED(CONFIG_NET_L2_ETHERNET)) {
5703 		return net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) &&
5704 			net_eth_type_is_wifi(iface);
5705 	}
5706 
5707 	return false;
5708 }
5709 
net_if_get_first_wifi(void)5710 struct net_if *net_if_get_first_wifi(void)
5711 {
5712 	STRUCT_SECTION_FOREACH(net_if, iface) {
5713 		if (net_if_is_wifi(iface)) {
5714 			return iface;
5715 		}
5716 	}
5717 	return NULL;
5718 }
5719 
net_if_get_wifi_sta(void)5720 struct net_if *net_if_get_wifi_sta(void)
5721 {
5722 	STRUCT_SECTION_FOREACH(net_if, iface) {
5723 		if (net_if_is_wifi(iface)
5724 #ifdef CONFIG_WIFI_NM
5725 			&& wifi_nm_iface_is_sta(iface)
5726 #endif
5727 			) {
5728 			return iface;
5729 		}
5730 	}
5731 
5732 	/* If no STA interface is found, return the first WiFi interface */
5733 	return net_if_get_first_wifi();
5734 }
5735 
net_if_get_wifi_sap(void)5736 struct net_if *net_if_get_wifi_sap(void)
5737 {
5738 	STRUCT_SECTION_FOREACH(net_if, iface) {
5739 		if (net_if_is_wifi(iface)
5740 #ifdef CONFIG_WIFI_NM
5741 			&& wifi_nm_iface_is_sap(iface)
5742 #endif
5743 			) {
5744 			return iface;
5745 		}
5746 	}
5747 
5748 	/* If no STA interface is found, return the first WiFi interface */
5749 	return net_if_get_first_wifi();
5750 }
5751 
net_if_get_name(struct net_if * iface,char * buf,int len)5752 int net_if_get_name(struct net_if *iface, char *buf, int len)
5753 {
5754 #if defined(CONFIG_NET_INTERFACE_NAME)
5755 	int name_len;
5756 
5757 	if (iface == NULL || buf == NULL || len <= 0) {
5758 		return -EINVAL;
5759 	}
5760 
5761 	name_len = strlen(net_if_get_config(iface)->name);
5762 	if (name_len >= len) {
5763 		return -ERANGE;
5764 	}
5765 
5766 	/* Copy string and null terminator */
5767 	memcpy(buf, net_if_get_config(iface)->name, name_len + 1);
5768 
5769 	return name_len;
5770 #else
5771 	return -ENOTSUP;
5772 #endif
5773 }
5774 
net_if_set_name(struct net_if * iface,const char * buf)5775 int net_if_set_name(struct net_if *iface, const char *buf)
5776 {
5777 #if defined(CONFIG_NET_INTERFACE_NAME)
5778 	int name_len;
5779 
5780 	if (iface == NULL || buf == NULL) {
5781 		return -EINVAL;
5782 	}
5783 
5784 	name_len = strlen(buf);
5785 	if (name_len >= sizeof(iface->config.name)) {
5786 		return -ENAMETOOLONG;
5787 	}
5788 
5789 	STRUCT_SECTION_FOREACH(net_if, iface_check) {
5790 		if (iface_check == iface) {
5791 			continue;
5792 		}
5793 
5794 		if (memcmp(net_if_get_config(iface_check)->name,
5795 			   buf,
5796 			   name_len + 1) == 0) {
5797 			return -EALREADY;
5798 		}
5799 	}
5800 
5801 	/* Copy string and null terminator */
5802 	memcpy(net_if_get_config(iface)->name, buf, name_len + 1);
5803 
5804 	return 0;
5805 #else
5806 	return -ENOTSUP;
5807 #endif
5808 }
5809 
net_if_get_by_name(const char * name)5810 int net_if_get_by_name(const char *name)
5811 {
5812 #if defined(CONFIG_NET_INTERFACE_NAME)
5813 	if (name == NULL) {
5814 		return -EINVAL;
5815 	}
5816 
5817 	STRUCT_SECTION_FOREACH(net_if, iface) {
5818 		if (strncmp(net_if_get_config(iface)->name, name, strlen(name)) == 0) {
5819 			return net_if_get_by_iface(iface);
5820 		}
5821 	}
5822 
5823 	return -ENOENT;
5824 #else
5825 	return -ENOTSUP;
5826 #endif
5827 }
5828 
5829 #if defined(CONFIG_NET_INTERFACE_NAME)
set_default_name(struct net_if * iface)5830 static void set_default_name(struct net_if *iface)
5831 {
5832 	char name[CONFIG_NET_INTERFACE_NAME_LEN + 1];
5833 	int ret;
5834 
5835 	if (net_if_is_wifi(iface)) {
5836 		static int count;
5837 
5838 		snprintk(name, sizeof(name), "wlan%d", count++);
5839 
5840 	} else if (IS_ENABLED(CONFIG_NET_L2_ETHERNET) &&
5841 		   (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET))) {
5842 		static int count;
5843 
5844 		snprintk(name, sizeof(name), "eth%d", count++);
5845 	} else if (IS_ENABLED(CONFIG_NET_L2_IEEE802154) &&
5846 		   (net_if_l2(iface) == &NET_L2_GET_NAME(IEEE802154))) {
5847 		static int count;
5848 
5849 		snprintk(name, sizeof(name), "ieee%d", count++);
5850 	} else if (IS_ENABLED(CONFIG_NET_L2_DUMMY) &&
5851 		   (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY))) {
5852 		static int count;
5853 
5854 		snprintk(name, sizeof(name), "dummy%d", count++);
5855 	} else if (IS_ENABLED(CONFIG_NET_L2_CANBUS_RAW) &&
5856 		   (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW))) {
5857 		static int count;
5858 
5859 		snprintk(name, sizeof(name), "can%d", count++);
5860 	} else if (IS_ENABLED(CONFIG_NET_L2_PPP) &&
5861 		   (net_if_l2(iface) == &NET_L2_GET_NAME(PPP))) {
5862 		static int count;
5863 
5864 		snprintk(name, sizeof(name) - 1, "ppp%d", count++);
5865 	} else if (IS_ENABLED(CONFIG_NET_L2_OPENTHREAD) &&
5866 		   (net_if_l2(iface) == &NET_L2_GET_NAME(OPENTHREAD))) {
5867 		static int count;
5868 
5869 		snprintk(name, sizeof(name), "thread%d", count++);
5870 	} else {
5871 		static int count;
5872 
5873 		snprintk(name, sizeof(name), "net%d", count++);
5874 	}
5875 
5876 	ret = net_if_set_name(iface, name);
5877 	if (ret < 0) {
5878 		NET_WARN("Cannot set default name for interface %d (%p) (%d)",
5879 			 net_if_get_by_iface(iface), iface, ret);
5880 	}
5881 }
5882 #endif /* CONFIG_NET_INTERFACE_NAME */
5883 
net_if_init(void)5884 void net_if_init(void)
5885 {
5886 	int if_count = 0;
5887 
5888 	NET_DBG("");
5889 
5890 	k_mutex_lock(&lock, K_FOREVER);
5891 
5892 	net_tc_tx_init();
5893 
5894 	STRUCT_SECTION_FOREACH(net_if, iface) {
5895 #if defined(CONFIG_NET_INTERFACE_NAME)
5896 		memset(net_if_get_config(iface)->name, 0,
5897 		       sizeof(iface->config.name));
5898 #endif
5899 
5900 		init_iface(iface);
5901 
5902 #if defined(CONFIG_NET_INTERFACE_NAME)
5903 		/* If the driver did not set the name, then set
5904 		 * a default name for the network interface.
5905 		 */
5906 		if (net_if_get_config(iface)->name[0] == '\0') {
5907 			set_default_name(iface);
5908 		}
5909 #endif
5910 
5911 		if_count++;
5912 	}
5913 
5914 	if (if_count == 0) {
5915 		NET_ERR("There is no network interface to work with!");
5916 		goto out;
5917 	}
5918 
5919 #if defined(CONFIG_ASSERT)
5920 	/* Do extra check that verifies that interface count is properly
5921 	 * done.
5922 	 */
5923 	int count_if;
5924 
5925 	NET_IFACE_COUNT(&count_if);
5926 	NET_ASSERT(count_if == if_count);
5927 #endif
5928 
5929 	iface_ipv6_init(if_count);
5930 	iface_ipv4_init(if_count);
5931 	iface_router_init();
5932 
5933 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
5934 	k_thread_create(&tx_thread_ts, tx_ts_stack,
5935 			K_KERNEL_STACK_SIZEOF(tx_ts_stack),
5936 			net_tx_ts_thread,
5937 			NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT);
5938 	k_thread_name_set(&tx_thread_ts, "tx_tstamp");
5939 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
5940 
5941 out:
5942 	k_mutex_unlock(&lock);
5943 }
5944 
net_if_post_init(void)5945 void net_if_post_init(void)
5946 {
5947 	NET_DBG("");
5948 
5949 	/* After TX is running, attempt to bring the interface up */
5950 	STRUCT_SECTION_FOREACH(net_if, iface) {
5951 		if (!net_if_flag_is_set(iface, NET_IF_NO_AUTO_START)) {
5952 			net_if_up(iface);
5953 		}
5954 	}
5955 }
5956