1 /*
2  * Copyright (c) 2016 Intel Corporation.
3  * Copyright (c) 2023 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
10 
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/linker/sections.h>
14 #include <zephyr/random/random.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <zephyr/net/igmp.h>
19 #include <zephyr/net/net_core.h>
20 #include <zephyr/net/net_event.h>
21 #include <zephyr/net/net_pkt.h>
22 #include <zephyr/net/net_if.h>
23 #include <zephyr/net/net_mgmt.h>
24 #include <zephyr/net/ethernet.h>
25 #include <zephyr/net/offloaded_netdev.h>
26 #include <zephyr/net/virtual.h>
27 #include <zephyr/sys/iterable_sections.h>
28 
29 #include "net_private.h"
30 #include "ipv4.h"
31 #include "ipv6.h"
32 #include "ipv4_autoconf_internal.h"
33 
34 #include "net_stats.h"
35 
36 #define REACHABLE_TIME (MSEC_PER_SEC * 30) /* in ms */
37 /*
38  * split the min/max random reachable factors into numerator/denominator
39  * so that integer-based math works better
40  */
41 #define MIN_RANDOM_NUMER (1)
42 #define MIN_RANDOM_DENOM (2)
43 #define MAX_RANDOM_NUMER (3)
44 #define MAX_RANDOM_DENOM (2)
45 
46 static K_MUTEX_DEFINE(lock);
47 
48 /* net_if dedicated section limiters */
49 extern struct net_if _net_if_list_start[];
50 extern struct net_if _net_if_list_end[];
51 
52 static struct net_if *default_iface;
53 
54 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
55 static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
56 static struct k_work_delayable router_timer;
57 static sys_slist_t active_router_timers;
58 #endif
59 
60 #if defined(CONFIG_NET_NATIVE_IPV6)
61 /* Timer that triggers network address renewal */
62 static struct k_work_delayable address_lifetime_timer;
63 
64 /* Track currently active address lifetime timers */
65 static sys_slist_t active_address_lifetime_timers;
66 
67 /* Timer that triggers IPv6 prefix lifetime */
68 static struct k_work_delayable prefix_lifetime_timer;
69 
70 /* Track currently active IPv6 prefix lifetime timers */
71 static sys_slist_t active_prefix_lifetime_timers;
72 
73 #if defined(CONFIG_NET_IPV6_DAD)
74 /** Duplicate address detection (DAD) timer */
75 static struct k_work_delayable dad_timer;
76 static sys_slist_t active_dad_timers;
77 #endif
78 
79 #if defined(CONFIG_NET_IPV6_ND)
80 static struct k_work_delayable rs_timer;
81 static sys_slist_t active_rs_timers;
82 #endif
83 
84 static struct {
85 	struct net_if_ipv6 ipv6;
86 	struct net_if *iface;
87 } ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
88 #endif /* CONFIG_NET_IPV6 */
89 
90 #if defined(CONFIG_NET_NATIVE_IPV4)
91 static struct {
92 	struct net_if_ipv4 ipv4;
93 	struct net_if *iface;
94 } ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
95 #endif /* CONFIG_NET_IPV4 */
96 
97 /* We keep track of the link callbacks in this list.
98  */
99 static sys_slist_t link_callbacks;
100 
101 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
102 /* Multicast join/leave tracking.
103  */
104 static sys_slist_t mcast_monitor_callbacks;
105 #endif
106 
107 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
108 #if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
109 #define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
110 #endif
111 
112 K_KERNEL_STACK_DEFINE(tx_ts_stack, CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
113 K_FIFO_DEFINE(tx_ts_queue);
114 
115 static struct k_thread tx_thread_ts;
116 
117 /* We keep track of the timestamp callbacks in this list.
118  */
119 static sys_slist_t timestamp_callbacks;
120 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
121 
122 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
123 #define debug_check_packet(pkt)						\
124 	do {								\
125 		NET_DBG("Processing (pkt %p, prio %d) network packet "	\
126 			"iface %p/%d",					\
127 			pkt, net_pkt_priority(pkt),			\
128 			net_pkt_iface(pkt),				\
129 			net_if_get_by_iface(net_pkt_iface(pkt)));	\
130 									\
131 		NET_ASSERT(pkt->frags);					\
132 	} while (0)
133 #else
134 #define debug_check_packet(...)
135 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
136 
z_impl_net_if_get_by_index(int index)137 struct net_if *z_impl_net_if_get_by_index(int index)
138 {
139 	if (index <= 0) {
140 		return NULL;
141 	}
142 
143 	if (&_net_if_list_start[index - 1] >= _net_if_list_end) {
144 		NET_DBG("Index %d is too large", index);
145 		return NULL;
146 	}
147 
148 	return &_net_if_list_start[index - 1];
149 }
150 
151 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_get_by_index(int index)152 struct net_if *z_vrfy_net_if_get_by_index(int index)
153 {
154 	struct net_if *iface;
155 
156 	iface = net_if_get_by_index(index);
157 	if (!iface) {
158 		return NULL;
159 	}
160 
161 	if (!k_object_is_valid(iface, K_OBJ_NET_IF)) {
162 		return NULL;
163 	}
164 
165 	return iface;
166 }
167 
168 #include <syscalls/net_if_get_by_index_mrsh.c>
169 #endif
170 
net_context_send_cb(struct net_context * context,int status)171 static inline void net_context_send_cb(struct net_context *context,
172 				       int status)
173 {
174 	if (!context) {
175 		return;
176 	}
177 
178 	if (context->send_cb) {
179 		context->send_cb(context, status, context->user_data);
180 	}
181 
182 	if (IS_ENABLED(CONFIG_NET_UDP) &&
183 	    net_context_get_proto(context) == IPPROTO_UDP) {
184 		net_stats_update_udp_sent(net_context_get_iface(context));
185 	} else if (IS_ENABLED(CONFIG_NET_TCP) &&
186 		   net_context_get_proto(context) == IPPROTO_TCP) {
187 		net_stats_update_tcp_seg_sent(net_context_get_iface(context));
188 	}
189 }
190 
update_txtime_stats_detail(struct net_pkt * pkt,uint32_t start_time,uint32_t stop_time)191 static void update_txtime_stats_detail(struct net_pkt *pkt,
192 				       uint32_t start_time, uint32_t stop_time)
193 {
194 	uint32_t val, prev = start_time;
195 	int i;
196 
197 	for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
198 		if (!net_pkt_stats_tick(pkt)[i]) {
199 			break;
200 		}
201 
202 		val = net_pkt_stats_tick(pkt)[i] - prev;
203 		prev = net_pkt_stats_tick(pkt)[i];
204 		net_pkt_stats_tick(pkt)[i] = val;
205 	}
206 }
207 
net_if_tx(struct net_if * iface,struct net_pkt * pkt)208 static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
209 {
210 	struct net_linkaddr ll_dst = {
211 		.addr = NULL
212 	};
213 	struct net_linkaddr_storage ll_dst_storage;
214 	struct net_context *context;
215 	uint32_t create_time;
216 	int status;
217 
218 	/* We collect send statistics for each socket priority if enabled */
219 	uint8_t pkt_priority;
220 
221 	if (!pkt) {
222 		return false;
223 	}
224 
225 	create_time = net_pkt_create_time(pkt);
226 
227 	debug_check_packet(pkt);
228 
229 	/* If there're any link callbacks, with such a callback receiving
230 	 * a destination address, copy that address out of packet, just in
231 	 * case packet is freed before callback is called.
232 	 */
233 	if (!sys_slist_is_empty(&link_callbacks)) {
234 		if (net_linkaddr_set(&ll_dst_storage,
235 				     net_pkt_lladdr_dst(pkt)->addr,
236 				     net_pkt_lladdr_dst(pkt)->len) == 0) {
237 			ll_dst.addr = ll_dst_storage.addr;
238 			ll_dst.len = ll_dst_storage.len;
239 			ll_dst.type = net_pkt_lladdr_dst(pkt)->type;
240 		}
241 	}
242 
243 	context = net_pkt_context(pkt);
244 
245 	if (net_if_flag_is_set(iface, NET_IF_LOWER_UP)) {
246 		if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
247 			pkt_priority = net_pkt_priority(pkt);
248 
249 			if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
250 				/* Make sure the statistics information is not
251 				 * lost by keeping the net_pkt over L2 send.
252 				 */
253 				net_pkt_ref(pkt);
254 			}
255 		}
256 
257 		net_if_tx_lock(iface);
258 		status = net_if_l2(iface)->send(iface, pkt);
259 		net_if_tx_unlock(iface);
260 
261 		if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
262 			uint32_t end_tick = k_cycle_get_32();
263 
264 			net_pkt_set_tx_stats_tick(pkt, end_tick);
265 
266 			net_stats_update_tc_tx_time(iface,
267 						    pkt_priority,
268 						    create_time,
269 						    end_tick);
270 
271 			if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
272 				update_txtime_stats_detail(
273 					pkt,
274 					create_time,
275 					end_tick);
276 
277 				net_stats_update_tc_tx_time_detail(
278 					iface, pkt_priority,
279 					net_pkt_stats_tick(pkt));
280 
281 				/* For TCP connections, we might keep the pkt
282 				 * longer so that we can resend it if needed.
283 				 * Because of that we need to clear the
284 				 * statistics here.
285 				 */
286 				net_pkt_stats_tick_reset(pkt);
287 
288 				net_pkt_unref(pkt);
289 			}
290 		}
291 
292 	} else {
293 		/* Drop packet if interface is not up */
294 		NET_WARN("iface %p is down", iface);
295 		status = -ENETDOWN;
296 	}
297 
298 	if (status < 0) {
299 		net_pkt_unref(pkt);
300 	} else {
301 		net_stats_update_bytes_sent(iface, status);
302 	}
303 
304 	if (context) {
305 		NET_DBG("Calling context send cb %p status %d",
306 			context, status);
307 
308 		net_context_send_cb(context, status);
309 	}
310 
311 	if (ll_dst.addr) {
312 		net_if_call_link_cb(iface, &ll_dst, status);
313 	}
314 
315 	return true;
316 }
317 
net_process_tx_packet(struct net_pkt * pkt)318 void net_process_tx_packet(struct net_pkt *pkt)
319 {
320 	struct net_if *iface;
321 
322 	net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
323 
324 	iface = net_pkt_iface(pkt);
325 
326 	net_if_tx(iface, pkt);
327 
328 #if defined(CONFIG_NET_POWER_MANAGEMENT)
329 	iface->tx_pending--;
330 #endif
331 }
332 
net_if_queue_tx(struct net_if * iface,struct net_pkt * pkt)333 void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
334 {
335 	if (!net_pkt_filter_send_ok(pkt)) {
336 		/* silently drop the packet */
337 		net_pkt_unref(pkt);
338 		return;
339 	}
340 
341 	uint8_t prio = net_pkt_priority(pkt);
342 	uint8_t tc = net_tx_priority2tc(prio);
343 
344 	net_stats_update_tc_sent_pkt(iface, tc);
345 	net_stats_update_tc_sent_bytes(iface, tc, net_pkt_get_len(pkt));
346 	net_stats_update_tc_sent_priority(iface, tc, prio);
347 
348 	/* For highest priority packet, skip the TX queue and push directly to
349 	 * the driver. Also if there are no TX queue/thread, push the packet
350 	 * directly to the driver.
351 	 */
352 	if ((IS_ENABLED(CONFIG_NET_TC_SKIP_FOR_HIGH_PRIO) &&
353 	     prio >= NET_PRIORITY_CA) || NET_TC_TX_COUNT == 0) {
354 		net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
355 
356 		net_if_tx(net_pkt_iface(pkt), pkt);
357 		return;
358 	}
359 
360 #if NET_TC_TX_COUNT > 1
361 	NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
362 #endif
363 
364 #if defined(CONFIG_NET_POWER_MANAGEMENT)
365 	iface->tx_pending++;
366 #endif
367 
368 	if (!net_tc_submit_to_tx_queue(tc, pkt)) {
369 #if defined(CONFIG_NET_POWER_MANAGEMENT)
370 		iface->tx_pending--
371 #endif
372 			;
373 	}
374 }
375 
net_if_stats_reset(struct net_if * iface)376 void net_if_stats_reset(struct net_if *iface)
377 {
378 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
379 	STRUCT_SECTION_FOREACH(net_if, tmp) {
380 		if (iface == tmp) {
381 			net_if_lock(iface);
382 			memset(&iface->stats, 0, sizeof(iface->stats));
383 			net_if_unlock(iface);
384 			return;
385 		}
386 	}
387 #else
388 	ARG_UNUSED(iface);
389 #endif
390 }
391 
net_if_stats_reset_all(void)392 void net_if_stats_reset_all(void)
393 {
394 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
395 	STRUCT_SECTION_FOREACH(net_if, iface) {
396 		net_if_lock(iface);
397 		memset(&iface->stats, 0, sizeof(iface->stats));
398 		net_if_unlock(iface);
399 	}
400 #endif
401 }
402 
init_iface(struct net_if * iface)403 static inline void init_iface(struct net_if *iface)
404 {
405 	const struct net_if_api *api = net_if_get_device(iface)->api;
406 
407 	if (!api || !api->init) {
408 		NET_ERR("Iface %p driver API init NULL", iface);
409 		return;
410 	}
411 
412 	/* By default IPv4 and IPv6 are enabled for a given network interface.
413 	 * These can be turned off later if needed.
414 	 */
415 #if defined(CONFIG_NET_NATIVE_IPV4)
416 	net_if_flag_set(iface, NET_IF_IPV4);
417 #endif
418 #if defined(CONFIG_NET_NATIVE_IPV6)
419 	net_if_flag_set(iface, NET_IF_IPV6);
420 #endif
421 
422 	net_virtual_init(iface);
423 
424 	NET_DBG("On iface %p", iface);
425 
426 #ifdef CONFIG_USERSPACE
427 	k_object_init(iface);
428 #endif
429 
430 	k_mutex_init(&iface->lock);
431 	k_mutex_init(&iface->tx_lock);
432 
433 	api->init(iface);
434 }
435 
net_if_send_data(struct net_if * iface,struct net_pkt * pkt)436 enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt)
437 {
438 	struct net_context *context = net_pkt_context(pkt);
439 	struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
440 	enum net_verdict verdict = NET_OK;
441 	int status = -EIO;
442 
443 	if (!net_if_flag_is_set(iface, NET_IF_LOWER_UP) ||
444 	    net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
445 		/* Drop packet if interface is not up */
446 		NET_WARN("iface %p is down", iface);
447 		verdict = NET_DROP;
448 		status = -ENETDOWN;
449 		goto done;
450 	}
451 
452 	if (IS_ENABLED(CONFIG_NET_OFFLOAD) && !net_if_l2(iface)) {
453 		NET_WARN("no l2 for iface %p, discard pkt", iface);
454 		verdict = NET_DROP;
455 		goto done;
456 	}
457 
458 	/* If the ll address is not set at all, then we must set
459 	 * it here.
460 	 * Workaround Linux bug, see:
461 	 * https://github.com/zephyrproject-rtos/zephyr/issues/3111
462 	 */
463 	if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
464 	    !net_pkt_lladdr_src(pkt)->addr) {
465 		net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_if(pkt)->addr;
466 		net_pkt_lladdr_src(pkt)->len = net_pkt_lladdr_if(pkt)->len;
467 	}
468 
469 #if defined(CONFIG_NET_LOOPBACK)
470 	/* If the packet is destined back to us, then there is no need to do
471 	 * additional checks, so let the packet through.
472 	 */
473 	if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
474 		goto done;
475 	}
476 #endif
477 
478 	/* Bypass the IP stack with SOCK_RAW/IPPROTO_RAW sockets */
479 	if (IS_ENABLED(CONFIG_NET_SOCKETS_PACKET) &&
480 	    context && net_context_get_type(context) == SOCK_RAW &&
481 	    net_context_get_proto(context) == IPPROTO_RAW) {
482 		goto done;
483 	}
484 
485 	/* If the ll dst address is not set check if it is present in the nbr
486 	 * cache.
487 	 */
488 	if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
489 		verdict = net_ipv6_prepare_for_send(pkt);
490 	}
491 
492 #if defined(CONFIG_NET_IPV4_FRAGMENT)
493 	if (net_pkt_family(pkt) == AF_INET) {
494 		verdict = net_ipv4_prepare_for_send(pkt);
495 	}
496 #endif
497 
498 done:
499 	/*   NET_OK in which case packet has checked successfully. In this case
500 	 *   the net_context callback is called after successful delivery in
501 	 *   net_if_tx_thread().
502 	 *
503 	 *   NET_DROP in which case we call net_context callback that will
504 	 *   give the status to user application.
505 	 *
506 	 *   NET_CONTINUE in which case the sending of the packet is delayed.
507 	 *   This can happen for example if we need to do IPv6 ND to figure
508 	 *   out link layer address.
509 	 */
510 	if (verdict == NET_DROP) {
511 		if (context) {
512 			NET_DBG("Calling ctx send cb %p verdict %d",
513 				context, verdict);
514 			net_context_send_cb(context, status);
515 		}
516 
517 		if (dst->addr) {
518 			net_if_call_link_cb(iface, dst, status);
519 		}
520 	} else if (verdict == NET_OK) {
521 		/* Packet is ready to be sent by L2, let's queue */
522 		net_if_queue_tx(iface, pkt);
523 	}
524 
525 	return verdict;
526 }
527 
net_if_set_link_addr_locked(struct net_if * iface,uint8_t * addr,uint8_t len,enum net_link_type type)528 int net_if_set_link_addr_locked(struct net_if *iface,
529 				uint8_t *addr, uint8_t len,
530 				enum net_link_type type)
531 {
532 	int ret;
533 
534 	net_if_lock(iface);
535 
536 	ret = net_if_set_link_addr_unlocked(iface, addr, len, type);
537 
538 	net_if_unlock(iface);
539 
540 	return ret;
541 }
542 
net_if_get_by_link_addr(struct net_linkaddr * ll_addr)543 struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
544 {
545 	STRUCT_SECTION_FOREACH(net_if, iface) {
546 		net_if_lock(iface);
547 		if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
548 			    ll_addr->len)) {
549 			net_if_unlock(iface);
550 			return iface;
551 		}
552 		net_if_unlock(iface);
553 	}
554 
555 	return NULL;
556 }
557 
net_if_lookup_by_dev(const struct device * dev)558 struct net_if *net_if_lookup_by_dev(const struct device *dev)
559 {
560 	STRUCT_SECTION_FOREACH(net_if, iface) {
561 		if (net_if_get_device(iface) == dev) {
562 			return iface;
563 		}
564 	}
565 
566 	return NULL;
567 }
568 
net_if_set_default(struct net_if * iface)569 void net_if_set_default(struct net_if *iface)
570 {
571 	default_iface = iface;
572 }
573 
net_if_get_default(void)574 struct net_if *net_if_get_default(void)
575 {
576 	struct net_if *iface = NULL;
577 
578 	if (&_net_if_list_start[0] == &_net_if_list_end[0]) {
579 		return NULL;
580 	}
581 
582 	if (default_iface != NULL) {
583 		return default_iface;
584 	}
585 
586 #if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
587 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
588 #endif
589 #if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
590 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
591 #endif
592 #if defined(CONFIG_NET_DEFAULT_IF_BLUETOOTH)
593 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(BLUETOOTH));
594 #endif
595 #if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
596 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
597 #endif
598 #if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
599 	iface = net_if_get_first_by_type(NULL);
600 #endif
601 #if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
602 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
603 #endif
604 #if defined(CONFIG_NET_DEFAULT_IF_PPP)
605 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(PPP));
606 #endif
607 #if defined(CONFIG_NET_DEFAULT_IF_UP)
608 	iface = net_if_get_first_up();
609 #endif
610 #if defined(CONFIG_NET_DEFAULT_IF_WIFI)
611 	iface = net_if_get_first_wifi();
612 #endif
613 	return iface ? iface : _net_if_list_start;
614 }
615 
net_if_get_first_by_type(const struct net_l2 * l2)616 struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
617 {
618 	STRUCT_SECTION_FOREACH(net_if, iface) {
619 		if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
620 		    !l2 && net_if_offload(iface)) {
621 			return iface;
622 		}
623 
624 		if (net_if_l2(iface) == l2) {
625 			return iface;
626 		}
627 	}
628 
629 	return NULL;
630 }
631 
net_if_get_first_up(void)632 struct net_if *net_if_get_first_up(void)
633 {
634 	STRUCT_SECTION_FOREACH(net_if, iface) {
635 		if (net_if_flag_is_set(iface, NET_IF_UP)) {
636 			return iface;
637 		}
638 	}
639 
640 	return NULL;
641 }
642 
l2_flags_get(struct net_if * iface)643 static enum net_l2_flags l2_flags_get(struct net_if *iface)
644 {
645 	enum net_l2_flags flags = 0;
646 
647 	if (net_if_l2(iface) && net_if_l2(iface)->get_flags) {
648 		flags = net_if_l2(iface)->get_flags(iface);
649 	}
650 
651 	return flags;
652 }
653 
654 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
655 /* Return how many bits are shared between two IP addresses */
get_ipaddr_diff(const uint8_t * src,const uint8_t * dst,int addr_len)656 static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_len)
657 {
658 	uint8_t j, k, xor;
659 	uint8_t len = 0U;
660 
661 	for (j = 0U; j < addr_len; j++) {
662 		if (src[j] == dst[j]) {
663 			len += 8U;
664 		} else {
665 			xor = src[j] ^ dst[j];
666 			for (k = 0U; k < 8; k++) {
667 				if (!(xor & 0x80)) {
668 					len++;
669 					xor <<= 1;
670 				} else {
671 					break;
672 				}
673 			}
674 			break;
675 		}
676 	}
677 
678 	return len;
679 }
680 
iface_router_lookup(struct net_if * iface,uint8_t family,void * addr)681 static struct net_if_router *iface_router_lookup(struct net_if *iface,
682 						 uint8_t family, void *addr)
683 {
684 	struct net_if_router *router = NULL;
685 	int i;
686 
687 	k_mutex_lock(&lock, K_FOREVER);
688 
689 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
690 		if (!routers[i].is_used ||
691 		    routers[i].address.family != family ||
692 		    routers[i].iface != iface) {
693 			continue;
694 		}
695 
696 		if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
697 		     net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
698 				       (struct in6_addr *)addr)) ||
699 		    (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
700 		     net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
701 				       (struct in_addr *)addr))) {
702 			router = &routers[i];
703 			goto out;
704 		}
705 	}
706 
707 out:
708 	k_mutex_unlock(&lock);
709 
710 	return router;
711 }
712 
iface_router_notify_deletion(struct net_if_router * router,const char * delete_reason)713 static void iface_router_notify_deletion(struct net_if_router *router,
714 					 const char *delete_reason)
715 {
716 	if (IS_ENABLED(CONFIG_NET_IPV6) &&
717 	    router->address.family == AF_INET6) {
718 		NET_DBG("IPv6 router %s %s",
719 			net_sprint_ipv6_addr(net_if_router_ipv6(router)),
720 			delete_reason);
721 
722 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
723 						router->iface,
724 						&router->address.in6_addr,
725 						sizeof(struct in6_addr));
726 	} else if (IS_ENABLED(CONFIG_NET_IPV4) &&
727 		   router->address.family == AF_INET) {
728 		NET_DBG("IPv4 router %s %s",
729 			net_sprint_ipv4_addr(net_if_router_ipv4(router)),
730 			delete_reason);
731 
732 		net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
733 						router->iface,
734 						&router->address.in_addr,
735 						sizeof(struct in6_addr));
736 	}
737 }
738 
iface_router_ends(const struct net_if_router * router,uint32_t now)739 static inline int32_t iface_router_ends(const struct net_if_router *router,
740 					uint32_t now)
741 {
742 	uint32_t ends = router->life_start;
743 
744 	ends += MSEC_PER_SEC * router->lifetime;
745 
746 	/* Signed number of ms until router lifetime ends */
747 	return (int32_t)(ends - now);
748 }
749 
iface_router_update_timer(uint32_t now)750 static void iface_router_update_timer(uint32_t now)
751 {
752 	struct net_if_router *router, *next;
753 	uint32_t new_delay = UINT32_MAX;
754 
755 	k_mutex_lock(&lock, K_FOREVER);
756 
757 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
758 					 router, next, node) {
759 		int32_t ends = iface_router_ends(router, now);
760 
761 		if (ends <= 0) {
762 			new_delay = 0;
763 			break;
764 		}
765 
766 		new_delay = MIN((uint32_t)ends, new_delay);
767 	}
768 
769 	if (new_delay == UINT32_MAX) {
770 		k_work_cancel_delayable(&router_timer);
771 	} else {
772 		k_work_reschedule(&router_timer, K_MSEC(new_delay));
773 	}
774 
775 	k_mutex_unlock(&lock);
776 }
777 
iface_router_expired(struct k_work * work)778 static void iface_router_expired(struct k_work *work)
779 {
780 	uint32_t current_time = k_uptime_get_32();
781 	struct net_if_router *router, *next;
782 	sys_snode_t *prev_node = NULL;
783 
784 	ARG_UNUSED(work);
785 
786 	k_mutex_lock(&lock, K_FOREVER);
787 
788 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
789 					  router, next, node) {
790 		int32_t ends = iface_router_ends(router, current_time);
791 
792 		if (ends > 0) {
793 			/* We have to loop on all active routers as their
794 			 * lifetime differ from each other.
795 			 */
796 			prev_node = &router->node;
797 			continue;
798 		}
799 
800 		iface_router_notify_deletion(router, "has expired");
801 		sys_slist_remove(&active_router_timers,
802 				 prev_node, &router->node);
803 		router->is_used = false;
804 	}
805 
806 	iface_router_update_timer(current_time);
807 
808 	k_mutex_unlock(&lock);
809 }
810 
iface_router_add(struct net_if * iface,uint8_t family,void * addr,bool is_default,uint16_t lifetime)811 static struct net_if_router *iface_router_add(struct net_if *iface,
812 					      uint8_t family, void *addr,
813 					      bool is_default,
814 					      uint16_t lifetime)
815 {
816 	struct net_if_router *router = NULL;
817 	int i;
818 
819 	k_mutex_lock(&lock, K_FOREVER);
820 
821 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
822 		if (routers[i].is_used) {
823 			continue;
824 		}
825 
826 		routers[i].is_used = true;
827 		routers[i].iface = iface;
828 		routers[i].address.family = family;
829 
830 		if (lifetime) {
831 			routers[i].is_default = true;
832 			routers[i].is_infinite = false;
833 			routers[i].lifetime = lifetime;
834 			routers[i].life_start = k_uptime_get_32();
835 
836 			sys_slist_append(&active_router_timers,
837 					 &routers[i].node);
838 
839 			iface_router_update_timer(routers[i].life_start);
840 		} else {
841 			routers[i].is_default = false;
842 			routers[i].is_infinite = true;
843 			routers[i].lifetime = 0;
844 		}
845 
846 		if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
847 			memcpy(net_if_router_ipv6(&routers[i]), addr,
848 			       sizeof(struct in6_addr));
849 			net_mgmt_event_notify_with_info(
850 					NET_EVENT_IPV6_ROUTER_ADD, iface,
851 					&routers[i].address.in6_addr,
852 					sizeof(struct in6_addr));
853 
854 			NET_DBG("interface %p router %s lifetime %u default %d "
855 				"added", iface,
856 				net_sprint_ipv6_addr((struct in6_addr *)addr),
857 				lifetime, routers[i].is_default);
858 		} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
859 			memcpy(net_if_router_ipv4(&routers[i]), addr,
860 			       sizeof(struct in_addr));
861 			routers[i].is_default = is_default;
862 
863 			net_mgmt_event_notify_with_info(
864 					NET_EVENT_IPV4_ROUTER_ADD, iface,
865 					&routers[i].address.in_addr,
866 					sizeof(struct in_addr));
867 
868 			NET_DBG("interface %p router %s lifetime %u default %d "
869 				"added", iface,
870 				net_sprint_ipv4_addr((struct in_addr *)addr),
871 				lifetime, is_default);
872 		}
873 
874 		router = &routers[i];
875 		goto out;
876 	}
877 
878 out:
879 	k_mutex_unlock(&lock);
880 
881 	return router;
882 }
883 
iface_router_rm(struct net_if_router * router)884 static bool iface_router_rm(struct net_if_router *router)
885 {
886 	bool ret = false;
887 
888 	k_mutex_lock(&lock, K_FOREVER);
889 
890 	if (!router->is_used) {
891 		goto out;
892 	}
893 
894 	iface_router_notify_deletion(router, "has been removed");
895 
896 	/* We recompute the timer if only the router was time limited */
897 	if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
898 		iface_router_update_timer(k_uptime_get_32());
899 	}
900 
901 	router->is_used = false;
902 	ret = true;
903 
904 out:
905 	k_mutex_unlock(&lock);
906 
907 	return ret;
908 }
909 
net_if_router_rm(struct net_if_router * router)910 void net_if_router_rm(struct net_if_router *router)
911 {
912 	k_mutex_lock(&lock, K_FOREVER);
913 
914 	router->is_used = false;
915 
916 	/* FIXME - remove timer */
917 
918 	k_mutex_unlock(&lock);
919 }
920 
iface_router_find_default(struct net_if * iface,uint8_t family,void * addr)921 static struct net_if_router *iface_router_find_default(struct net_if *iface,
922 						       uint8_t family, void *addr)
923 {
924 	struct net_if_router *router = NULL;
925 	int i;
926 
927 	/* Todo: addr will need to be handled */
928 	ARG_UNUSED(addr);
929 
930 	k_mutex_lock(&lock, K_FOREVER);
931 
932 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
933 		if (!routers[i].is_used ||
934 		    !routers[i].is_default ||
935 		    routers[i].address.family != family) {
936 			continue;
937 		}
938 
939 		if (iface && iface != routers[i].iface) {
940 			continue;
941 		}
942 
943 		router = &routers[i];
944 		goto out;
945 	}
946 
947 out:
948 	k_mutex_unlock(&lock);
949 
950 	return router;
951 }
952 
iface_router_init(void)953 static void iface_router_init(void)
954 {
955 	k_work_init_delayable(&router_timer, iface_router_expired);
956 	sys_slist_init(&active_router_timers);
957 }
958 #else
959 #define iface_router_init(...)
960 #endif
961 
962 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
net_if_mcast_mon_register(struct net_if_mcast_monitor * mon,struct net_if * iface,net_if_mcast_callback_t cb)963 void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
964 			       struct net_if *iface,
965 			       net_if_mcast_callback_t cb)
966 {
967 	k_mutex_lock(&lock, K_FOREVER);
968 
969 	sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
970 	sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
971 
972 	mon->iface = iface;
973 	mon->cb = cb;
974 
975 	k_mutex_unlock(&lock);
976 }
977 
net_if_mcast_mon_unregister(struct net_if_mcast_monitor * mon)978 void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
979 {
980 	k_mutex_lock(&lock, K_FOREVER);
981 
982 	sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
983 
984 	k_mutex_unlock(&lock);
985 }
986 
net_if_mcast_monitor(struct net_if * iface,const struct net_addr * addr,bool is_joined)987 void net_if_mcast_monitor(struct net_if *iface,
988 			  const struct net_addr *addr,
989 			  bool is_joined)
990 {
991 	struct net_if_mcast_monitor *mon, *tmp;
992 
993 	k_mutex_lock(&lock, K_FOREVER);
994 
995 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
996 					  mon, tmp, node) {
997 		if (iface == mon->iface) {
998 			mon->cb(iface, addr, is_joined);
999 		}
1000 	}
1001 
1002 	k_mutex_unlock(&lock);
1003 }
1004 #else
1005 #define net_if_mcast_mon_register(...)
1006 #define net_if_mcast_mon_unregister(...)
1007 #define net_if_mcast_monitor(...)
1008 #endif
1009 
1010 #if defined(CONFIG_NET_NATIVE_IPV6)
net_if_config_ipv6_get(struct net_if * iface,struct net_if_ipv6 ** ipv6)1011 int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
1012 {
1013 	int ret = 0;
1014 	int i;
1015 
1016 	net_if_lock(iface);
1017 
1018 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1019 		ret = -ENOTSUP;
1020 		goto out;
1021 	}
1022 
1023 	if (iface->config.ip.ipv6) {
1024 		if (ipv6) {
1025 			*ipv6 = iface->config.ip.ipv6;
1026 		}
1027 
1028 		goto out;
1029 	}
1030 
1031 	k_mutex_lock(&lock, K_FOREVER);
1032 
1033 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1034 		if (ipv6_addresses[i].iface) {
1035 			continue;
1036 		}
1037 
1038 		iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
1039 		ipv6_addresses[i].iface = iface;
1040 
1041 		if (ipv6) {
1042 			*ipv6 = &ipv6_addresses[i].ipv6;
1043 		}
1044 
1045 		k_mutex_unlock(&lock);
1046 		goto out;
1047 	}
1048 
1049 	k_mutex_unlock(&lock);
1050 
1051 	ret = -ESRCH;
1052 out:
1053 	net_if_unlock(iface);
1054 
1055 	return ret;
1056 }
1057 
net_if_config_ipv6_put(struct net_if * iface)1058 int net_if_config_ipv6_put(struct net_if *iface)
1059 {
1060 	int ret = 0;
1061 	int i;
1062 
1063 	net_if_lock(iface);
1064 
1065 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1066 		ret = -ENOTSUP;
1067 		goto out;
1068 	}
1069 
1070 	if (!iface->config.ip.ipv6) {
1071 		ret = -EALREADY;
1072 		goto out;
1073 	}
1074 
1075 	k_mutex_lock(&lock, K_FOREVER);
1076 
1077 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1078 		if (ipv6_addresses[i].iface != iface) {
1079 			continue;
1080 		}
1081 
1082 		iface->config.ip.ipv6 = NULL;
1083 		ipv6_addresses[i].iface = NULL;
1084 
1085 		k_mutex_unlock(&lock);
1086 		goto out;
1087 	}
1088 
1089 	k_mutex_unlock(&lock);
1090 
1091 	ret = -ESRCH;
1092 out:
1093 	net_if_unlock(iface);
1094 
1095 	return ret;
1096 }
1097 
1098 #if defined(CONFIG_NET_IPV6_MLD)
join_mcast_allnodes(struct net_if * iface)1099 static void join_mcast_allnodes(struct net_if *iface)
1100 {
1101 	struct in6_addr addr;
1102 	int ret;
1103 
1104 	net_ipv6_addr_create_ll_allnodes_mcast(&addr);
1105 
1106 	ret = net_ipv6_mld_join(iface, &addr);
1107 	if (ret < 0 && ret != -EALREADY) {
1108 		NET_ERR("Cannot join all nodes address %s (%d)",
1109 			net_sprint_ipv6_addr(&addr), ret);
1110 	}
1111 }
1112 
join_mcast_solicit_node(struct net_if * iface,struct in6_addr * my_addr)1113 static void join_mcast_solicit_node(struct net_if *iface,
1114 				    struct in6_addr *my_addr)
1115 {
1116 	struct in6_addr addr;
1117 	int ret;
1118 
1119 	/* Join to needed multicast groups, RFC 4291 ch 2.8 */
1120 	net_ipv6_addr_create_solicited_node(my_addr, &addr);
1121 
1122 	ret = net_ipv6_mld_join(iface, &addr);
1123 	if (ret < 0 && ret != -EALREADY) {
1124 		NET_ERR("Cannot join solicit node address %s (%d)",
1125 			net_sprint_ipv6_addr(&addr), ret);
1126 	}
1127 }
1128 
leave_mcast_all(struct net_if * iface)1129 static void leave_mcast_all(struct net_if *iface)
1130 {
1131 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1132 	int i;
1133 
1134 	if (!ipv6) {
1135 		return;
1136 	}
1137 
1138 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1139 		if (!ipv6->mcast[i].is_used ||
1140 		    !ipv6->mcast[i].is_joined) {
1141 			continue;
1142 		}
1143 
1144 		net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
1145 	}
1146 }
1147 
join_mcast_nodes(struct net_if * iface,struct in6_addr * addr)1148 static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
1149 {
1150 	enum net_l2_flags flags = 0;
1151 
1152 	flags = l2_flags_get(iface);
1153 	if (flags & NET_L2_MULTICAST) {
1154 		join_mcast_allnodes(iface);
1155 
1156 		if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
1157 			join_mcast_solicit_node(iface, addr);
1158 		}
1159 	}
1160 }
1161 #else
1162 #define join_mcast_allnodes(...)
1163 #define join_mcast_solicit_node(...)
1164 #define leave_mcast_all(...)
1165 #define join_mcast_nodes(...)
1166 #endif /* CONFIG_NET_IPV6_MLD */
1167 
1168 #if defined(CONFIG_NET_IPV6_DAD)
1169 #define DAD_TIMEOUT 100U /* ms */
1170 
dad_timeout(struct k_work * work)1171 static void dad_timeout(struct k_work *work)
1172 {
1173 	uint32_t current_time = k_uptime_get_32();
1174 	struct net_if_addr *ifaddr, *next;
1175 	int32_t delay = -1;
1176 	sys_slist_t expired_list;
1177 
1178 	ARG_UNUSED(work);
1179 
1180 	sys_slist_init(&expired_list);
1181 
1182 	k_mutex_lock(&lock, K_FOREVER);
1183 
1184 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
1185 					  ifaddr, next, dad_node) {
1186 		/* DAD entries are ordered by construction.  Stop when
1187 		 * we find one that hasn't expired.
1188 		 */
1189 		delay = (int32_t)(ifaddr->dad_start +
1190 				  DAD_TIMEOUT - current_time);
1191 		if (delay > 0) {
1192 			break;
1193 		}
1194 
1195 		/* Removing the ifaddr from active_dad_timers list */
1196 		sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
1197 		sys_slist_append(&expired_list, &ifaddr->dad_node);
1198 
1199 		ifaddr = NULL;
1200 	}
1201 
1202 	if ((ifaddr != NULL) && (delay > 0)) {
1203 		k_work_reschedule(&dad_timer, K_MSEC((uint32_t)delay));
1204 	}
1205 
1206 	k_mutex_unlock(&lock);
1207 
1208 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ifaddr, dad_node) {
1209 		struct net_if_addr *tmp;
1210 		struct net_if *iface;
1211 
1212 		NET_DBG("DAD succeeded for %s",
1213 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1214 
1215 		ifaddr->addr_state = NET_ADDR_PREFERRED;
1216 
1217 		/* Because we do not know the interface at this point,
1218 		 * we need to lookup for it.
1219 		 */
1220 		iface = NULL;
1221 		tmp = net_if_ipv6_addr_lookup(&ifaddr->address.in6_addr,
1222 					      &iface);
1223 		if (tmp == ifaddr) {
1224 			net_mgmt_event_notify_with_info(
1225 					NET_EVENT_IPV6_DAD_SUCCEED,
1226 					iface, &ifaddr->address.in6_addr,
1227 					sizeof(struct in6_addr));
1228 
1229 			/* The address gets added to neighbor cache which is not
1230 			 * needed in this case as the address is our own one.
1231 			 */
1232 			net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
1233 		}
1234 	}
1235 }
1236 
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1237 static void net_if_ipv6_start_dad(struct net_if *iface,
1238 				  struct net_if_addr *ifaddr)
1239 {
1240 	ifaddr->addr_state = NET_ADDR_TENTATIVE;
1241 
1242 	if (net_if_is_up(iface)) {
1243 		NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
1244 			iface,
1245 			net_sprint_ll_addr(
1246 					   net_if_get_link_addr(iface)->addr,
1247 					   net_if_get_link_addr(iface)->len),
1248 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1249 
1250 		ifaddr->dad_count = 1U;
1251 
1252 		if (!net_ipv6_start_dad(iface, ifaddr)) {
1253 			ifaddr->dad_start = k_uptime_get_32();
1254 
1255 			k_mutex_lock(&lock, K_FOREVER);
1256 			sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
1257 			k_mutex_unlock(&lock);
1258 
1259 			/* FUTURE: use schedule, not reschedule. */
1260 			if (!k_work_delayable_remaining_get(&dad_timer)) {
1261 				k_work_reschedule(&dad_timer,
1262 						  K_MSEC(DAD_TIMEOUT));
1263 			}
1264 		}
1265 	} else {
1266 		NET_DBG("Interface %p is down, starting DAD for %s later.",
1267 			iface,
1268 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1269 	}
1270 }
1271 
net_if_start_dad(struct net_if * iface)1272 void net_if_start_dad(struct net_if *iface)
1273 {
1274 	struct net_if_addr *ifaddr;
1275 	struct net_if_ipv6 *ipv6;
1276 	struct in6_addr addr = { };
1277 	int ret, i;
1278 
1279 	net_if_lock(iface);
1280 
1281 	NET_DBG("Starting DAD for iface %p", iface);
1282 
1283 	ret = net_if_config_ipv6_get(iface, &ipv6);
1284 	if (ret < 0) {
1285 		if (ret != -ENOTSUP) {
1286 			NET_WARN("Cannot do DAD IPv6 config is not valid.");
1287 		}
1288 
1289 		goto out;
1290 	}
1291 
1292 	if (!ipv6) {
1293 		goto out;
1294 	}
1295 
1296 	net_ipv6_addr_create_iid(&addr, net_if_get_link_addr(iface));
1297 
1298 	ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
1299 	if (!ifaddr) {
1300 		NET_ERR("Cannot add %s address to interface %p, DAD fails",
1301 			net_sprint_ipv6_addr(&addr), iface);
1302 	}
1303 
1304 	/* Start DAD for all the addresses that were added earlier when
1305 	 * the interface was down.
1306 	 */
1307 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1308 		if (!ipv6->unicast[i].is_used ||
1309 		    ipv6->unicast[i].address.family != AF_INET6 ||
1310 		    &ipv6->unicast[i] == ifaddr ||
1311 		    net_ipv6_is_addr_loopback(
1312 			    &ipv6->unicast[i].address.in6_addr)) {
1313 			continue;
1314 		}
1315 
1316 		net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1317 	}
1318 
1319 out:
1320 	net_if_unlock(iface);
1321 }
1322 
net_if_ipv6_dad_failed(struct net_if * iface,const struct in6_addr * addr)1323 void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
1324 {
1325 	struct net_if_addr *ifaddr;
1326 
1327 	net_if_lock(iface);
1328 
1329 	ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
1330 	if (!ifaddr) {
1331 		NET_ERR("Cannot find %s address in interface %p",
1332 			net_sprint_ipv6_addr(addr), iface);
1333 		goto out;
1334 	}
1335 
1336 
1337 	net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
1338 					&ifaddr->address.in6_addr,
1339 					sizeof(struct in6_addr));
1340 
1341 	net_if_ipv6_addr_rm(iface, addr);
1342 
1343 out:
1344 	net_if_unlock(iface);
1345 }
1346 
iface_ipv6_dad_init(void)1347 static inline void iface_ipv6_dad_init(void)
1348 {
1349 	k_work_init_delayable(&dad_timer, dad_timeout);
1350 	sys_slist_init(&active_dad_timers);
1351 }
1352 
1353 #else
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1354 static inline void net_if_ipv6_start_dad(struct net_if *iface,
1355 					 struct net_if_addr *ifaddr)
1356 {
1357 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1358 }
1359 
1360 #define iface_ipv6_dad_init(...)
1361 #endif /* CONFIG_NET_IPV6_DAD */
1362 
1363 #if defined(CONFIG_NET_IPV6_ND)
1364 #define RS_TIMEOUT (1U * MSEC_PER_SEC)
1365 #define RS_COUNT 3
1366 
rs_timeout(struct k_work * work)1367 static void rs_timeout(struct k_work *work)
1368 {
1369 	uint32_t current_time = k_uptime_get_32();
1370 	struct net_if_ipv6 *ipv6, *next;
1371 	int32_t delay = -1;
1372 	sys_slist_t expired_list;
1373 
1374 	ARG_UNUSED(work);
1375 
1376 	sys_slist_init(&expired_list);
1377 
1378 	k_mutex_lock(&lock, K_FOREVER);
1379 
1380 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
1381 					  ipv6, next, rs_node) {
1382 		/* RS entries are ordered by construction.  Stop when
1383 		 * we find one that hasn't expired.
1384 		 */
1385 		delay = (int32_t)(ipv6->rs_start + RS_TIMEOUT - current_time);
1386 		if (delay > 0) {
1387 			break;
1388 		}
1389 
1390 		/* Removing the ipv6 from active_rs_timers list */
1391 		sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
1392 		sys_slist_append(&expired_list, &ipv6->rs_node);
1393 
1394 		ipv6 = NULL;
1395 	}
1396 
1397 	if ((ipv6 != NULL) && (delay > 0)) {
1398 		k_work_reschedule(&rs_timer, K_MSEC(ipv6->rs_start +
1399 						    RS_TIMEOUT - current_time));
1400 	}
1401 
1402 	k_mutex_unlock(&lock);
1403 
1404 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ipv6, rs_node) {
1405 		struct net_if *iface = NULL;
1406 
1407 		/* Did not receive RA yet. */
1408 		ipv6->rs_count++;
1409 
1410 		STRUCT_SECTION_FOREACH(net_if, tmp) {
1411 			if (tmp->config.ip.ipv6 == ipv6) {
1412 				iface = tmp;
1413 				break;
1414 			}
1415 		}
1416 
1417 		if (iface) {
1418 			NET_DBG("RS no respond iface %p count %d",
1419 				iface, ipv6->rs_count);
1420 			if (ipv6->rs_count < RS_COUNT) {
1421 				net_if_start_rs(iface);
1422 			}
1423 		} else {
1424 			NET_DBG("Interface IPv6 config %p not found", ipv6);
1425 		}
1426 	}
1427 }
1428 
net_if_start_rs(struct net_if * iface)1429 void net_if_start_rs(struct net_if *iface)
1430 {
1431 	struct net_if_ipv6 *ipv6;
1432 
1433 	net_if_lock(iface);
1434 
1435 	if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1436 		goto out;
1437 	}
1438 
1439 	ipv6 = iface->config.ip.ipv6;
1440 	if (!ipv6) {
1441 		goto out;
1442 	}
1443 
1444 	NET_DBG("Starting ND/RS for iface %p", iface);
1445 
1446 	if (!net_ipv6_start_rs(iface)) {
1447 		ipv6->rs_start = k_uptime_get_32();
1448 
1449 		k_mutex_lock(&lock, K_FOREVER);
1450 		sys_slist_append(&active_rs_timers, &ipv6->rs_node);
1451 		k_mutex_unlock(&lock);
1452 
1453 		/* FUTURE: use schedule, not reschedule. */
1454 		if (!k_work_delayable_remaining_get(&rs_timer)) {
1455 			k_work_reschedule(&rs_timer, K_MSEC(RS_TIMEOUT));
1456 		}
1457 	}
1458 
1459 out:
1460 	net_if_unlock(iface);
1461 }
1462 
net_if_stop_rs(struct net_if * iface)1463 void net_if_stop_rs(struct net_if *iface)
1464 {
1465 	struct net_if_ipv6 *ipv6;
1466 
1467 	net_if_lock(iface);
1468 
1469 	ipv6 = iface->config.ip.ipv6;
1470 	if (!ipv6) {
1471 		goto out;
1472 	}
1473 
1474 	NET_DBG("Stopping ND/RS for iface %p", iface);
1475 
1476 	k_mutex_lock(&lock, K_FOREVER);
1477 	sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
1478 	k_mutex_unlock(&lock);
1479 
1480 out:
1481 	net_if_unlock(iface);
1482 }
1483 
iface_ipv6_nd_init(void)1484 static inline void iface_ipv6_nd_init(void)
1485 {
1486 	k_work_init_delayable(&rs_timer, rs_timeout);
1487 	sys_slist_init(&active_rs_timers);
1488 }
1489 
1490 #else
1491 #define net_if_start_rs(...)
1492 #define net_if_stop_rs(...)
1493 #define iface_ipv6_nd_init(...)
1494 #endif /* CONFIG_NET_IPV6_ND */
1495 
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)1496 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
1497 					    struct net_if **ret)
1498 {
1499 	struct net_if_addr *ifaddr = NULL;
1500 
1501 	STRUCT_SECTION_FOREACH(net_if, iface) {
1502 		struct net_if_ipv6 *ipv6;
1503 		int i;
1504 
1505 		net_if_lock(iface);
1506 
1507 		ipv6 = iface->config.ip.ipv6;
1508 		if (!ipv6) {
1509 			net_if_unlock(iface);
1510 			continue;
1511 		}
1512 
1513 		for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1514 			if (!ipv6->unicast[i].is_used ||
1515 			    ipv6->unicast[i].address.family != AF_INET6) {
1516 				continue;
1517 			}
1518 
1519 			if (net_ipv6_is_prefix(
1520 				    addr->s6_addr,
1521 				    ipv6->unicast[i].address.in6_addr.s6_addr,
1522 				    128)) {
1523 
1524 				if (ret) {
1525 					*ret = iface;
1526 				}
1527 
1528 				ifaddr = &ipv6->unicast[i];
1529 				net_if_unlock(iface);
1530 				goto out;
1531 			}
1532 		}
1533 
1534 		net_if_unlock(iface);
1535 	}
1536 
1537 out:
1538 	return ifaddr;
1539 }
1540 
net_if_ipv6_addr_lookup_by_iface(struct net_if * iface,struct in6_addr * addr)1541 struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
1542 						     struct in6_addr *addr)
1543 {
1544 	struct net_if_addr *ifaddr = NULL;
1545 	struct net_if_ipv6 *ipv6;
1546 	int i;
1547 
1548 	net_if_lock(iface);
1549 
1550 	ipv6 = iface->config.ip.ipv6;
1551 	if (!ipv6) {
1552 		goto out;
1553 	}
1554 
1555 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1556 		if (!ipv6->unicast[i].is_used ||
1557 		    ipv6->unicast[i].address.family != AF_INET6) {
1558 			continue;
1559 		}
1560 
1561 		if (net_ipv6_is_prefix(
1562 			    addr->s6_addr,
1563 			    ipv6->unicast[i].address.in6_addr.s6_addr,
1564 			    128)) {
1565 			ifaddr = &ipv6->unicast[i];
1566 			goto out;
1567 		}
1568 	}
1569 
1570 out:
1571 	net_if_unlock(iface);
1572 
1573 	return ifaddr;
1574 }
1575 
z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1576 int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
1577 {
1578 	struct net_if *iface = NULL;
1579 	struct net_if_addr *if_addr;
1580 
1581 	if_addr = net_if_ipv6_addr_lookup(addr, &iface);
1582 	if (!if_addr) {
1583 		return 0;
1584 	}
1585 
1586 	return net_if_get_by_iface(iface);
1587 }
1588 
1589 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1590 static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
1591 					  const struct in6_addr *addr)
1592 {
1593 	struct in6_addr addr_v6;
1594 
1595 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1596 
1597 	return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
1598 }
1599 #include <syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
1600 #endif
1601 
address_expired(struct net_if_addr * ifaddr)1602 static void address_expired(struct net_if_addr *ifaddr)
1603 {
1604 	NET_DBG("IPv6 address %s is deprecated",
1605 		net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1606 
1607 	ifaddr->addr_state = NET_ADDR_DEPRECATED;
1608 
1609 	sys_slist_find_and_remove(&active_address_lifetime_timers,
1610 				  &ifaddr->lifetime.node);
1611 
1612 	net_timeout_set(&ifaddr->lifetime, 0, 0);
1613 }
1614 
address_lifetime_timeout(struct k_work * work)1615 static void address_lifetime_timeout(struct k_work *work)
1616 {
1617 	uint32_t next_update = UINT32_MAX;
1618 	uint32_t current_time = k_uptime_get_32();
1619 	struct net_if_addr *current, *next;
1620 
1621 	ARG_UNUSED(work);
1622 
1623 	k_mutex_lock(&lock, K_FOREVER);
1624 
1625 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
1626 					  current, next, lifetime.node) {
1627 		struct net_timeout *timeout = &current->lifetime;
1628 		uint32_t this_update = net_timeout_evaluate(timeout,
1629 							     current_time);
1630 
1631 		if (this_update == 0U) {
1632 			address_expired(current);
1633 			continue;
1634 		}
1635 
1636 		if (this_update < next_update) {
1637 			next_update = this_update;
1638 		}
1639 
1640 		if (current == next) {
1641 			break;
1642 		}
1643 	}
1644 
1645 	if (next_update != UINT32_MAX) {
1646 		NET_DBG("Waiting for %d ms", (int32_t)next_update);
1647 
1648 		k_work_reschedule(&address_lifetime_timer, K_MSEC(next_update));
1649 	}
1650 
1651 	k_mutex_unlock(&lock);
1652 }
1653 
1654 #if defined(CONFIG_NET_TEST)
net_address_lifetime_timeout(void)1655 void net_address_lifetime_timeout(void)
1656 {
1657 	address_lifetime_timeout(NULL);
1658 }
1659 #endif
1660 
address_start_timer(struct net_if_addr * ifaddr,uint32_t vlifetime)1661 static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
1662 {
1663 	sys_slist_append(&active_address_lifetime_timers,
1664 			 &ifaddr->lifetime.node);
1665 
1666 	net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
1667 	k_work_reschedule(&address_lifetime_timer, K_NO_WAIT);
1668 }
1669 
net_if_ipv6_addr_update_lifetime(struct net_if_addr * ifaddr,uint32_t vlifetime)1670 void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
1671 				      uint32_t vlifetime)
1672 {
1673 	k_mutex_lock(&lock, K_FOREVER);
1674 
1675 	NET_DBG("Updating expire time of %s by %u secs",
1676 		net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1677 		vlifetime);
1678 
1679 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1680 
1681 	address_start_timer(ifaddr, vlifetime);
1682 
1683 	k_mutex_unlock(&lock);
1684 }
1685 
ipv6_addr_find(struct net_if * iface,struct in6_addr * addr)1686 static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
1687 					  struct in6_addr *addr)
1688 {
1689 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1690 	int i;
1691 
1692 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1693 		if (!ipv6->unicast[i].is_used) {
1694 			continue;
1695 		}
1696 
1697 		if (net_ipv6_addr_cmp(
1698 			    addr, &ipv6->unicast[i].address.in6_addr)) {
1699 
1700 			return &ipv6->unicast[i];
1701 		}
1702 	}
1703 
1704 	return NULL;
1705 }
1706 
net_if_addr_init(struct net_if_addr * ifaddr,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1707 static inline void net_if_addr_init(struct net_if_addr *ifaddr,
1708 				    struct in6_addr *addr,
1709 				    enum net_addr_type addr_type,
1710 				    uint32_t vlifetime)
1711 {
1712 	ifaddr->is_used = true;
1713 	ifaddr->address.family = AF_INET6;
1714 	ifaddr->addr_type = addr_type;
1715 	net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
1716 
1717 	/* FIXME - set the mcast addr for this node */
1718 
1719 	if (vlifetime) {
1720 		ifaddr->is_infinite = false;
1721 
1722 		NET_DBG("Expiring %s in %u secs",
1723 			net_sprint_ipv6_addr(addr),
1724 			vlifetime);
1725 
1726 		net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
1727 	} else {
1728 		ifaddr->is_infinite = true;
1729 	}
1730 }
1731 
net_if_ipv6_addr_add(struct net_if * iface,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1732 struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
1733 					 struct in6_addr *addr,
1734 					 enum net_addr_type addr_type,
1735 					 uint32_t vlifetime)
1736 {
1737 	struct net_if_addr *ifaddr = NULL;
1738 	struct net_if_ipv6 *ipv6;
1739 	int i;
1740 
1741 	net_if_lock(iface);
1742 
1743 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1744 		goto out;
1745 	}
1746 
1747 	ifaddr = ipv6_addr_find(iface, addr);
1748 	if (ifaddr) {
1749 		goto out;
1750 	}
1751 
1752 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1753 		if (ipv6->unicast[i].is_used) {
1754 			continue;
1755 		}
1756 
1757 		net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
1758 				 vlifetime);
1759 
1760 		NET_DBG("[%d] interface %p address %s type %s added", i,
1761 			iface, net_sprint_ipv6_addr(addr),
1762 			net_addr_type2str(addr_type));
1763 
1764 		if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
1765 		    !net_ipv6_is_addr_loopback(addr) &&
1766 		    !net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1767 			/* RFC 4862 5.4.2
1768 			 * Before sending a Neighbor Solicitation, an interface
1769 			 * MUST join the all-nodes multicast address and the
1770 			 * solicited-node multicast address of the tentative
1771 			 * address.
1772 			 */
1773 			/* The allnodes multicast group is only joined once as
1774 			 * net_ipv6_mcast_join() checks if we have already
1775 			 * joined.
1776 			 */
1777 			join_mcast_nodes(iface,
1778 					 &ipv6->unicast[i].address.in6_addr);
1779 
1780 			net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1781 		} else {
1782 			/* If DAD is not done for point-to-point links, then
1783 			 * the address is usable immediately.
1784 			 */
1785 			ipv6->unicast[i].addr_state = NET_ADDR_PREFERRED;
1786 		}
1787 
1788 		net_mgmt_event_notify_with_info(
1789 			NET_EVENT_IPV6_ADDR_ADD, iface,
1790 			&ipv6->unicast[i].address.in6_addr,
1791 			sizeof(struct in6_addr));
1792 
1793 		ifaddr = &ipv6->unicast[i];
1794 		goto out;
1795 	}
1796 
1797 out:
1798 	net_if_unlock(iface);
1799 
1800 	return ifaddr;
1801 }
1802 
net_if_ipv6_addr_rm(struct net_if * iface,const struct in6_addr * addr)1803 bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
1804 {
1805 	bool ret = false;
1806 	struct net_if_ipv6 *ipv6;
1807 	struct in6_addr maddr;
1808 	int found = -1;
1809 	unsigned int maddr_count = 0;
1810 
1811 	NET_ASSERT(addr);
1812 
1813 	net_if_lock(iface);
1814 
1815 	ipv6 = iface->config.ip.ipv6;
1816 	if (!ipv6) {
1817 		goto out;
1818 	}
1819 
1820 	net_ipv6_addr_create_solicited_node(addr, &maddr);
1821 
1822 	for (int i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1823 		struct in6_addr unicast_maddr;
1824 
1825 		if (!ipv6->unicast[i].is_used) {
1826 			continue;
1827 		}
1828 
1829 		/* count how many times this solicited-node multicast address is identical
1830 		 * for all the used unicast addresses
1831 		 */
1832 		net_ipv6_addr_create_solicited_node(&ipv6->unicast[i].address.in6_addr,
1833 						    &unicast_maddr);
1834 		if (net_ipv6_addr_cmp(&maddr, &unicast_maddr)) {
1835 			maddr_count++;
1836 		}
1837 
1838 		if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr,
1839 				       addr)) {
1840 			continue;
1841 		}
1842 
1843 		found = i;
1844 	}
1845 
1846 	if (found >= 0) {
1847 		if (!ipv6->unicast[found].is_infinite) {
1848 			k_mutex_lock(&lock, K_FOREVER);
1849 
1850 			sys_slist_find_and_remove(
1851 				&active_address_lifetime_timers,
1852 				&ipv6->unicast[found].lifetime.node);
1853 
1854 			if (sys_slist_is_empty(
1855 				    &active_address_lifetime_timers)) {
1856 				k_work_cancel_delayable(
1857 					&address_lifetime_timer);
1858 			}
1859 
1860 			k_mutex_unlock(&lock);
1861 		}
1862 
1863 #if defined(CONFIG_NET_IPV6_DAD)
1864 		if (!net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1865 			k_mutex_lock(&lock, K_FOREVER);
1866 			sys_slist_find_and_remove(&active_dad_timers,
1867 						  &ipv6->unicast[found].dad_node);
1868 			k_mutex_unlock(&lock);
1869 		}
1870 #endif
1871 
1872 		ipv6->unicast[found].is_used = false;
1873 
1874 		if (maddr_count == 1) {
1875 			/* remove the solicited-node multicast address only if no other
1876 			 * unicast address is also using it
1877 			 */
1878 			net_if_ipv6_maddr_rm(iface, &maddr);
1879 		}
1880 
1881 		NET_DBG("[%d] interface %p address %s type %s removed",
1882 			found, iface, net_sprint_ipv6_addr(addr),
1883 			net_addr_type2str(ipv6->unicast[found].addr_type));
1884 
1885 		/* Using the IPv6 address pointer here can give false
1886 		 * info if someone adds a new IP address into this position
1887 		 * in the address array. This is quite unlikely thou.
1888 		 */
1889 		net_mgmt_event_notify_with_info(
1890 			NET_EVENT_IPV6_ADDR_DEL,
1891 			iface,
1892 			&ipv6->unicast[found].address.in6_addr,
1893 			sizeof(struct in6_addr));
1894 
1895 		ret = true;
1896 		goto out;
1897 	}
1898 
1899 out:
1900 	net_if_unlock(iface);
1901 
1902 	return ret;
1903 }
1904 
z_impl_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1905 bool z_impl_net_if_ipv6_addr_add_by_index(int index,
1906 					  struct in6_addr *addr,
1907 					  enum net_addr_type addr_type,
1908 					  uint32_t vlifetime)
1909 {
1910 	struct net_if *iface;
1911 
1912 	iface = net_if_get_by_index(index);
1913 	if (!iface) {
1914 		return false;
1915 	}
1916 
1917 	return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
1918 		true : false;
1919 }
1920 
1921 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1922 bool z_vrfy_net_if_ipv6_addr_add_by_index(int index,
1923 					  struct in6_addr *addr,
1924 					  enum net_addr_type addr_type,
1925 					  uint32_t vlifetime)
1926 {
1927 	struct in6_addr addr_v6;
1928 	struct net_if *iface;
1929 
1930 	iface = z_vrfy_net_if_get_by_index(index);
1931 	if (!iface) {
1932 		return false;
1933 	}
1934 
1935 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1936 
1937 	return z_impl_net_if_ipv6_addr_add_by_index(index,
1938 						    &addr_v6,
1939 						    addr_type,
1940 						    vlifetime);
1941 }
1942 
1943 #include <syscalls/net_if_ipv6_addr_add_by_index_mrsh.c>
1944 #endif /* CONFIG_USERSPACE */
1945 
z_impl_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)1946 bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
1947 					 const struct in6_addr *addr)
1948 {
1949 	struct net_if *iface;
1950 
1951 	iface = net_if_get_by_index(index);
1952 	if (!iface) {
1953 		return false;
1954 	}
1955 
1956 	return net_if_ipv6_addr_rm(iface, addr);
1957 }
1958 
1959 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)1960 bool z_vrfy_net_if_ipv6_addr_rm_by_index(int index,
1961 					 const struct in6_addr *addr)
1962 {
1963 	struct in6_addr addr_v6;
1964 	struct net_if *iface;
1965 
1966 	iface = z_vrfy_net_if_get_by_index(index);
1967 	if (!iface) {
1968 		return false;
1969 	}
1970 
1971 	K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1972 
1973 	return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
1974 }
1975 
1976 #include <syscalls/net_if_ipv6_addr_rm_by_index_mrsh.c>
1977 #endif /* CONFIG_USERSPACE */
1978 
net_if_ipv6_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)1979 void net_if_ipv6_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
1980 			      void *user_data)
1981 {
1982 	struct net_if_ipv6 *ipv6;
1983 
1984 	if (iface == NULL) {
1985 		return;
1986 	}
1987 
1988 	net_if_lock(iface);
1989 
1990 	ipv6 = iface->config.ip.ipv6;
1991 	if (ipv6 == NULL) {
1992 		goto out;
1993 	}
1994 
1995 	for (int i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1996 		struct net_if_addr *if_addr = &ipv6->unicast[i];
1997 
1998 		if (!if_addr->is_used) {
1999 			continue;
2000 		}
2001 
2002 		cb(iface, if_addr, user_data);
2003 	}
2004 
2005 out:
2006 	net_if_unlock(iface);
2007 }
2008 
net_if_ipv6_maddr_add(struct net_if * iface,const struct in6_addr * addr)2009 struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
2010 						const struct in6_addr *addr)
2011 {
2012 	struct net_if_mcast_addr *ifmaddr = NULL;
2013 	struct net_if_ipv6 *ipv6;
2014 	int i;
2015 
2016 	net_if_lock(iface);
2017 
2018 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2019 		goto out;
2020 	}
2021 
2022 	if (!net_ipv6_is_addr_mcast(addr)) {
2023 		NET_DBG("Address %s is not a multicast address.",
2024 			net_sprint_ipv6_addr(addr));
2025 		goto out;
2026 	}
2027 
2028 	if (net_if_ipv6_maddr_lookup(addr, &iface)) {
2029 		NET_WARN("Multicast address %s is is already registered.",
2030 			net_sprint_ipv6_addr(addr));
2031 		goto out;
2032 	}
2033 
2034 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2035 		if (ipv6->mcast[i].is_used) {
2036 			continue;
2037 		}
2038 
2039 		ipv6->mcast[i].is_used = true;
2040 		ipv6->mcast[i].address.family = AF_INET6;
2041 		memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
2042 
2043 		NET_DBG("[%d] interface %p address %s added", i, iface,
2044 			net_sprint_ipv6_addr(addr));
2045 
2046 		net_mgmt_event_notify_with_info(
2047 			NET_EVENT_IPV6_MADDR_ADD, iface,
2048 			&ipv6->mcast[i].address.in6_addr,
2049 			sizeof(struct in6_addr));
2050 
2051 		ifmaddr = &ipv6->mcast[i];
2052 		goto out;
2053 	}
2054 
2055 out:
2056 	net_if_unlock(iface);
2057 
2058 	return ifmaddr;
2059 }
2060 
net_if_ipv6_maddr_rm(struct net_if * iface,const struct in6_addr * addr)2061 bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
2062 {
2063 	bool ret = false;
2064 	struct net_if_ipv6 *ipv6;
2065 	int i;
2066 
2067 	net_if_lock(iface);
2068 
2069 	ipv6 = iface->config.ip.ipv6;
2070 	if (!ipv6) {
2071 		goto out;
2072 	}
2073 
2074 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2075 		if (!ipv6->mcast[i].is_used) {
2076 			continue;
2077 		}
2078 
2079 		if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
2080 				       addr)) {
2081 			continue;
2082 		}
2083 
2084 		ipv6->mcast[i].is_used = false;
2085 
2086 		NET_DBG("[%d] interface %p address %s removed",
2087 			i, iface, net_sprint_ipv6_addr(addr));
2088 
2089 		net_mgmt_event_notify_with_info(
2090 			NET_EVENT_IPV6_MADDR_DEL, iface,
2091 			&ipv6->mcast[i].address.in6_addr,
2092 			sizeof(struct in6_addr));
2093 
2094 		ret = true;
2095 		goto out;
2096 	}
2097 
2098 out:
2099 	net_if_unlock(iface);
2100 
2101 	return ret;
2102 }
2103 
net_if_ipv6_maddr_lookup(const struct in6_addr * maddr,struct net_if ** ret)2104 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
2105 						   struct net_if **ret)
2106 {
2107 	struct net_if_mcast_addr *ifmaddr = NULL;
2108 
2109 	STRUCT_SECTION_FOREACH(net_if, iface) {
2110 		struct net_if_ipv6 *ipv6;
2111 		int i;
2112 
2113 		if (ret && *ret && iface != *ret) {
2114 			continue;
2115 		}
2116 
2117 		net_if_lock(iface);
2118 
2119 		ipv6 = iface->config.ip.ipv6;
2120 		if (!ipv6) {
2121 			net_if_unlock(iface);
2122 			continue;
2123 		}
2124 
2125 		for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2126 			if (!ipv6->mcast[i].is_used ||
2127 			    ipv6->mcast[i].address.family != AF_INET6) {
2128 				continue;
2129 			}
2130 
2131 			if (net_ipv6_is_prefix(
2132 				    maddr->s6_addr,
2133 				    ipv6->mcast[i].address.in6_addr.s6_addr,
2134 				    128)) {
2135 				if (ret) {
2136 					*ret = iface;
2137 				}
2138 
2139 				ifmaddr = &ipv6->mcast[i];
2140 				net_if_unlock(iface);
2141 				goto out;
2142 			}
2143 		}
2144 
2145 		net_if_unlock(iface);
2146 	}
2147 
2148 out:
2149 	return ifmaddr;
2150 }
2151 
net_if_ipv6_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)2152 void net_if_ipv6_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
2153 {
2154 	NET_ASSERT(iface);
2155 	NET_ASSERT(addr);
2156 
2157 	net_if_lock(iface);
2158 	addr->is_joined = false;
2159 	net_if_unlock(iface);
2160 }
2161 
net_if_ipv6_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)2162 void net_if_ipv6_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
2163 {
2164 	NET_ASSERT(iface);
2165 	NET_ASSERT(addr);
2166 
2167 	net_if_lock(iface);
2168 	addr->is_joined = true;
2169 	net_if_unlock(iface);
2170 }
2171 
remove_prefix_addresses(struct net_if * iface,struct net_if_ipv6 * ipv6,struct in6_addr * addr,uint8_t len)2172 static void remove_prefix_addresses(struct net_if *iface,
2173 				    struct net_if_ipv6 *ipv6,
2174 				    struct in6_addr *addr,
2175 				    uint8_t len)
2176 {
2177 	int i;
2178 
2179 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2180 		if (!ipv6->unicast[i].is_used ||
2181 		    ipv6->unicast[i].address.family != AF_INET6 ||
2182 		    ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
2183 			continue;
2184 		}
2185 
2186 		if (net_ipv6_is_prefix(
2187 				addr->s6_addr,
2188 				ipv6->unicast[i].address.in6_addr.s6_addr,
2189 				len)) {
2190 			net_if_ipv6_addr_rm(iface,
2191 					    &ipv6->unicast[i].address.in6_addr);
2192 		}
2193 	}
2194 }
2195 
prefix_lifetime_expired(struct net_if_ipv6_prefix * ifprefix)2196 static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
2197 {
2198 	struct net_if_ipv6 *ipv6;
2199 
2200 	net_if_lock(ifprefix->iface);
2201 
2202 	NET_DBG("Prefix %s/%d expired",
2203 		net_sprint_ipv6_addr(&ifprefix->prefix),
2204 		ifprefix->len);
2205 
2206 	ifprefix->is_used = false;
2207 
2208 	if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
2209 		return;
2210 	}
2211 
2212 	/* Remove also all auto addresses if the they have the same prefix.
2213 	 */
2214 	remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
2215 				ifprefix->len);
2216 
2217 	if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2218 		struct net_event_ipv6_prefix info;
2219 
2220 		net_ipaddr_copy(&info.addr, &ifprefix->prefix);
2221 		info.len = ifprefix->len;
2222 		info.lifetime = 0;
2223 
2224 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2225 						ifprefix->iface,
2226 						(const void *) &info,
2227 						sizeof(struct net_event_ipv6_prefix));
2228 	} else {
2229 		net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface);
2230 	}
2231 
2232 	net_if_unlock(ifprefix->iface);
2233 }
2234 
prefix_timer_remove(struct net_if_ipv6_prefix * ifprefix)2235 static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
2236 {
2237 	k_mutex_lock(&lock, K_FOREVER);
2238 
2239 	NET_DBG("IPv6 prefix %s/%d removed",
2240 		net_sprint_ipv6_addr(&ifprefix->prefix),
2241 		ifprefix->len);
2242 
2243 	sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2244 				  &ifprefix->lifetime.node);
2245 
2246 	net_timeout_set(&ifprefix->lifetime, 0, 0);
2247 
2248 	k_mutex_unlock(&lock);
2249 }
2250 
prefix_lifetime_timeout(struct k_work * work)2251 static void prefix_lifetime_timeout(struct k_work *work)
2252 {
2253 	uint32_t next_update = UINT32_MAX;
2254 	uint32_t current_time = k_uptime_get_32();
2255 	struct net_if_ipv6_prefix *current, *next;
2256 	sys_slist_t expired_list;
2257 
2258 	ARG_UNUSED(work);
2259 
2260 	sys_slist_init(&expired_list);
2261 
2262 	k_mutex_lock(&lock, K_FOREVER);
2263 
2264 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
2265 					  current, next, lifetime.node) {
2266 		struct net_timeout *timeout = &current->lifetime;
2267 		uint32_t this_update = net_timeout_evaluate(timeout,
2268 							    current_time);
2269 
2270 		if (this_update == 0U) {
2271 			sys_slist_find_and_remove(
2272 				&active_prefix_lifetime_timers,
2273 				&current->lifetime.node);
2274 			sys_slist_append(&expired_list,
2275 					 &current->lifetime.node);
2276 			continue;
2277 		}
2278 
2279 		if (this_update < next_update) {
2280 			next_update = this_update;
2281 		}
2282 
2283 		if (current == next) {
2284 			break;
2285 		}
2286 	}
2287 
2288 	if (next_update != UINT32_MAX) {
2289 		k_work_reschedule(&prefix_lifetime_timer, K_MSEC(next_update));
2290 	}
2291 
2292 	k_mutex_unlock(&lock);
2293 
2294 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, current, lifetime.node) {
2295 		prefix_lifetime_expired(current);
2296 	}
2297 }
2298 
prefix_start_timer(struct net_if_ipv6_prefix * ifprefix,uint32_t lifetime)2299 static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
2300 			       uint32_t lifetime)
2301 {
2302 	k_mutex_lock(&lock, K_FOREVER);
2303 
2304 	(void)sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2305 					&ifprefix->lifetime.node);
2306 	sys_slist_append(&active_prefix_lifetime_timers,
2307 			 &ifprefix->lifetime.node);
2308 
2309 	net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
2310 	k_work_reschedule(&prefix_lifetime_timer, K_NO_WAIT);
2311 
2312 	k_mutex_unlock(&lock);
2313 }
2314 
ipv6_prefix_find(struct net_if * iface,struct in6_addr * prefix,uint8_t prefix_len)2315 static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
2316 						   struct in6_addr *prefix,
2317 						   uint8_t prefix_len)
2318 {
2319 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2320 	int i;
2321 
2322 	if (!ipv6) {
2323 		return NULL;
2324 	}
2325 
2326 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2327 		if (!ipv6->prefix[i].is_used) {
2328 			continue;
2329 		}
2330 
2331 		if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
2332 		    prefix_len == ipv6->prefix[i].len) {
2333 			return &ipv6->prefix[i];
2334 		}
2335 	}
2336 
2337 	return NULL;
2338 }
2339 
net_if_ipv6_prefix_init(struct net_if * iface,struct net_if_ipv6_prefix * ifprefix,struct in6_addr * addr,uint8_t len,uint32_t lifetime)2340 static void net_if_ipv6_prefix_init(struct net_if *iface,
2341 				    struct net_if_ipv6_prefix *ifprefix,
2342 				    struct in6_addr *addr, uint8_t len,
2343 				    uint32_t lifetime)
2344 {
2345 	ifprefix->is_used = true;
2346 	ifprefix->len = len;
2347 	ifprefix->iface = iface;
2348 	net_ipaddr_copy(&ifprefix->prefix, addr);
2349 
2350 	if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
2351 		ifprefix->is_infinite = true;
2352 	} else {
2353 		ifprefix->is_infinite = false;
2354 	}
2355 }
2356 
net_if_ipv6_prefix_add(struct net_if * iface,struct in6_addr * prefix,uint8_t len,uint32_t lifetime)2357 struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
2358 						  struct in6_addr *prefix,
2359 						  uint8_t len,
2360 						  uint32_t lifetime)
2361 {
2362 	struct net_if_ipv6_prefix *ifprefix = NULL;
2363 	struct net_if_ipv6 *ipv6;
2364 	int i;
2365 
2366 	net_if_lock(iface);
2367 
2368 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2369 		goto out;
2370 	}
2371 
2372 	ifprefix = ipv6_prefix_find(iface, prefix, len);
2373 	if (ifprefix) {
2374 		goto out;
2375 	}
2376 
2377 	if (!ipv6) {
2378 		goto out;
2379 	}
2380 
2381 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2382 		if (ipv6->prefix[i].is_used) {
2383 			continue;
2384 		}
2385 
2386 		net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
2387 					len, lifetime);
2388 
2389 		NET_DBG("[%d] interface %p prefix %s/%d added", i, iface,
2390 			net_sprint_ipv6_addr(prefix), len);
2391 
2392 		if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2393 			struct net_event_ipv6_prefix info;
2394 
2395 			net_ipaddr_copy(&info.addr, prefix);
2396 			info.len = len;
2397 			info.lifetime = lifetime;
2398 
2399 			net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_ADD,
2400 							iface, (const void *) &info,
2401 							sizeof(struct net_event_ipv6_prefix));
2402 		} else {
2403 			net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_ADD, iface);
2404 		}
2405 
2406 		ifprefix = &ipv6->prefix[i];
2407 		goto out;
2408 	}
2409 
2410 out:
2411 	net_if_unlock(iface);
2412 
2413 	return ifprefix;
2414 }
2415 
net_if_ipv6_prefix_rm(struct net_if * iface,struct in6_addr * addr,uint8_t len)2416 bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
2417 			   uint8_t len)
2418 {
2419 	bool ret = false;
2420 	struct net_if_ipv6 *ipv6;
2421 	int i;
2422 
2423 	net_if_lock(iface);
2424 
2425 	ipv6 = iface->config.ip.ipv6;
2426 	if (!ipv6) {
2427 		goto out;
2428 	}
2429 
2430 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2431 		if (!ipv6->prefix[i].is_used) {
2432 			continue;
2433 		}
2434 
2435 		if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
2436 		    ipv6->prefix[i].len != len) {
2437 			continue;
2438 		}
2439 
2440 		net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
2441 
2442 		ipv6->prefix[i].is_used = false;
2443 
2444 		/* Remove also all auto addresses if the they have the same
2445 		 * prefix.
2446 		 */
2447 		remove_prefix_addresses(iface, ipv6, addr, len);
2448 
2449 		if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2450 			struct net_event_ipv6_prefix info;
2451 
2452 			net_ipaddr_copy(&info.addr, addr);
2453 			info.len = len;
2454 			info.lifetime = 0;
2455 
2456 			net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2457 							iface, (const void *) &info,
2458 							sizeof(struct net_event_ipv6_prefix));
2459 		} else {
2460 			net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, iface);
2461 		}
2462 
2463 		ret = true;
2464 		goto out;
2465 	}
2466 
2467 out:
2468 	net_if_unlock(iface);
2469 
2470 	return ret;
2471 }
2472 
net_if_ipv6_prefix_get(struct net_if * iface,struct in6_addr * addr)2473 struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
2474 						  struct in6_addr *addr)
2475 {
2476 	struct net_if_ipv6_prefix *prefix = NULL;
2477 	struct net_if_ipv6 *ipv6;
2478 	int i;
2479 
2480 	if (!iface) {
2481 		iface = net_if_get_default();
2482 	}
2483 
2484 	net_if_lock(iface);
2485 
2486 	ipv6 = iface->config.ip.ipv6;
2487 	if (!ipv6) {
2488 		goto out;
2489 	}
2490 
2491 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2492 		if (!ipv6->prefix[i].is_used) {
2493 			continue;
2494 		}
2495 
2496 		if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2497 				       addr->s6_addr,
2498 				       ipv6->prefix[i].len)) {
2499 			if (!prefix || prefix->len > ipv6->prefix[i].len) {
2500 				prefix = &ipv6->prefix[i];
2501 			}
2502 		}
2503 	}
2504 
2505 out:
2506 	net_if_unlock(iface);
2507 
2508 	return prefix;
2509 }
2510 
net_if_ipv6_prefix_lookup(struct net_if * iface,struct in6_addr * addr,uint8_t len)2511 struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
2512 						     struct in6_addr *addr,
2513 						     uint8_t len)
2514 {
2515 	struct net_if_ipv6_prefix *prefix = NULL;
2516 	struct net_if_ipv6 *ipv6;
2517 	int i;
2518 
2519 	net_if_lock(iface);
2520 
2521 	ipv6 = iface->config.ip.ipv6;
2522 	if (!ipv6) {
2523 		goto out;
2524 	}
2525 
2526 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2527 		if (!ipv6->prefix[i].is_used) {
2528 			continue;
2529 		}
2530 
2531 		if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2532 				       addr->s6_addr, len)) {
2533 			prefix = &ipv6->prefix[i];
2534 			goto out;
2535 		}
2536 	}
2537 
2538 out:
2539 	net_if_unlock(iface);
2540 
2541 	return prefix;
2542 }
2543 
net_if_ipv6_addr_onlink(struct net_if ** iface,struct in6_addr * addr)2544 bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
2545 {
2546 	bool ret = false;
2547 
2548 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2549 		struct net_if_ipv6 *ipv6;
2550 		int i;
2551 
2552 		if (iface && *iface && *iface != tmp) {
2553 			continue;
2554 		}
2555 
2556 		net_if_lock(tmp);
2557 
2558 		ipv6 = tmp->config.ip.ipv6;
2559 		if (!ipv6) {
2560 			net_if_unlock(tmp);
2561 			continue;
2562 		}
2563 
2564 		for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2565 			if (ipv6->prefix[i].is_used &&
2566 			    net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2567 					       addr->s6_addr,
2568 					       ipv6->prefix[i].len)) {
2569 				if (iface) {
2570 					*iface = tmp;
2571 				}
2572 
2573 				ret = true;
2574 				net_if_unlock(tmp);
2575 				goto out;
2576 			}
2577 		}
2578 
2579 		net_if_unlock(tmp);
2580 	}
2581 
2582 out:
2583 	return ret;
2584 }
2585 
net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix * prefix,uint32_t lifetime)2586 void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
2587 				  uint32_t lifetime)
2588 {
2589 	/* No need to set a timer for infinite timeout */
2590 	if (lifetime == 0xffffffff) {
2591 		return;
2592 	}
2593 
2594 	NET_DBG("Prefix lifetime %u sec", lifetime);
2595 
2596 	prefix_start_timer(prefix, lifetime);
2597 }
2598 
net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix * prefix)2599 void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
2600 {
2601 	if (!prefix->is_used) {
2602 		return;
2603 	}
2604 
2605 	prefix_timer_remove(prefix);
2606 }
2607 
net_if_ipv6_router_lookup(struct net_if * iface,struct in6_addr * addr)2608 struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
2609 						struct in6_addr *addr)
2610 {
2611 	return iface_router_lookup(iface, AF_INET6, addr);
2612 }
2613 
net_if_ipv6_router_find_default(struct net_if * iface,struct in6_addr * addr)2614 struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
2615 						      struct in6_addr *addr)
2616 {
2617 	return iface_router_find_default(iface, AF_INET6, addr);
2618 }
2619 
net_if_ipv6_router_update_lifetime(struct net_if_router * router,uint16_t lifetime)2620 void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
2621 					uint16_t lifetime)
2622 {
2623 	NET_DBG("Updating expire time of %s by %u secs",
2624 		net_sprint_ipv6_addr(&router->address.in6_addr),
2625 		lifetime);
2626 
2627 	router->life_start = k_uptime_get_32();
2628 	router->lifetime = lifetime;
2629 
2630 	iface_router_update_timer(router->life_start);
2631 }
2632 
net_if_ipv6_router_add(struct net_if * iface,struct in6_addr * addr,uint16_t lifetime)2633 struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
2634 					     struct in6_addr *addr,
2635 					     uint16_t lifetime)
2636 {
2637 	return iface_router_add(iface, AF_INET6, addr, false, lifetime);
2638 }
2639 
net_if_ipv6_router_rm(struct net_if_router * router)2640 bool net_if_ipv6_router_rm(struct net_if_router *router)
2641 {
2642 	return iface_router_rm(router);
2643 }
2644 
net_if_ipv6_get_mcast_hop_limit(struct net_if * iface)2645 uint8_t net_if_ipv6_get_mcast_hop_limit(struct net_if *iface)
2646 {
2647 #if defined(CONFIG_NET_NATIVE_IPV6)
2648 	int ret = 0;
2649 
2650 	net_if_lock(iface);
2651 
2652 	if (!iface->config.ip.ipv6) {
2653 		goto out;
2654 	}
2655 
2656 	ret = iface->config.ip.ipv6->mcast_hop_limit;
2657 out:
2658 	net_if_unlock(iface);
2659 
2660 	return ret;
2661 #else
2662 	ARG_UNUSED(iface);
2663 
2664 	return 0;
2665 #endif
2666 }
2667 
net_if_ipv6_set_mcast_hop_limit(struct net_if * iface,uint8_t hop_limit)2668 void net_if_ipv6_set_mcast_hop_limit(struct net_if *iface, uint8_t hop_limit)
2669 {
2670 #if defined(CONFIG_NET_NATIVE_IPV6)
2671 	net_if_lock(iface);
2672 
2673 	if (!iface->config.ip.ipv6) {
2674 		goto out;
2675 	}
2676 
2677 	iface->config.ip.ipv6->mcast_hop_limit = hop_limit;
2678 out:
2679 	net_if_unlock(iface);
2680 #else
2681 	ARG_UNUSED(iface);
2682 	ARG_UNUSED(hop_limit);
2683 #endif
2684 }
2685 
net_if_ipv6_get_hop_limit(struct net_if * iface)2686 uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface)
2687 {
2688 #if defined(CONFIG_NET_NATIVE_IPV6)
2689 	int ret = 0;
2690 
2691 	net_if_lock(iface);
2692 
2693 	if (!iface->config.ip.ipv6) {
2694 		goto out;
2695 	}
2696 
2697 	ret = iface->config.ip.ipv6->hop_limit;
2698 out:
2699 	net_if_unlock(iface);
2700 
2701 	return ret;
2702 #else
2703 	ARG_UNUSED(iface);
2704 
2705 	return 0;
2706 #endif
2707 }
2708 
net_if_ipv6_set_hop_limit(struct net_if * iface,uint8_t hop_limit)2709 void net_if_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit)
2710 {
2711 #if defined(CONFIG_NET_NATIVE_IPV6)
2712 	net_if_lock(iface);
2713 
2714 	if (!iface->config.ip.ipv6) {
2715 		goto out;
2716 	}
2717 
2718 	iface->config.ip.ipv6->hop_limit = hop_limit;
2719 out:
2720 	net_if_unlock(iface);
2721 #else
2722 	ARG_UNUSED(iface);
2723 	ARG_UNUSED(hop_limit);
2724 #endif
2725 }
2726 
net_if_ipv6_get_ll(struct net_if * iface,enum net_addr_state addr_state)2727 struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
2728 				    enum net_addr_state addr_state)
2729 {
2730 	struct in6_addr *addr = NULL;
2731 	struct net_if_ipv6 *ipv6;
2732 	int i;
2733 
2734 	net_if_lock(iface);
2735 
2736 	ipv6 = iface->config.ip.ipv6;
2737 	if (!ipv6) {
2738 		goto out;
2739 	}
2740 
2741 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2742 		if (!ipv6->unicast[i].is_used ||
2743 		    (addr_state != NET_ADDR_ANY_STATE &&
2744 		     ipv6->unicast[i].addr_state != addr_state) ||
2745 		    ipv6->unicast[i].address.family != AF_INET6) {
2746 			continue;
2747 		}
2748 
2749 		if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2750 			addr = &ipv6->unicast[i].address.in6_addr;
2751 			goto out;
2752 		}
2753 	}
2754 
2755 out:
2756 	net_if_unlock(iface);
2757 
2758 	return addr;
2759 }
2760 
net_if_ipv6_get_ll_addr(enum net_addr_state state,struct net_if ** iface)2761 struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
2762 					 struct net_if **iface)
2763 {
2764 	struct in6_addr *addr = NULL;
2765 
2766 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2767 		net_if_lock(tmp);
2768 
2769 		addr = net_if_ipv6_get_ll(tmp, state);
2770 		if (addr) {
2771 			if (iface) {
2772 				*iface = tmp;
2773 			}
2774 
2775 			net_if_unlock(tmp);
2776 			goto out;
2777 		}
2778 
2779 		net_if_unlock(tmp);
2780 	}
2781 
2782 out:
2783 	return addr;
2784 }
2785 
check_global_addr(struct net_if * iface,enum net_addr_state state)2786 static inline struct in6_addr *check_global_addr(struct net_if *iface,
2787 						 enum net_addr_state state)
2788 {
2789 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2790 	int i;
2791 
2792 	if (!ipv6) {
2793 		return NULL;
2794 	}
2795 
2796 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2797 		if (!ipv6->unicast[i].is_used ||
2798 		    (ipv6->unicast[i].addr_state != state) ||
2799 		    ipv6->unicast[i].address.family != AF_INET6) {
2800 			continue;
2801 		}
2802 
2803 		if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2804 			return &ipv6->unicast[i].address.in6_addr;
2805 		}
2806 	}
2807 
2808 	return NULL;
2809 }
2810 
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2811 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2812 					     struct net_if **iface)
2813 {
2814 	struct in6_addr *addr = NULL;
2815 
2816 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2817 		if (iface && *iface && tmp != *iface) {
2818 			continue;
2819 		}
2820 
2821 		net_if_lock(tmp);
2822 		addr = check_global_addr(tmp, state);
2823 		if (addr) {
2824 			if (iface) {
2825 				*iface = tmp;
2826 			}
2827 
2828 			net_if_unlock(tmp);
2829 			goto out;
2830 		}
2831 
2832 		net_if_unlock(tmp);
2833 	}
2834 
2835 out:
2836 
2837 	return addr;
2838 }
2839 
get_diff_ipv6(const struct in6_addr * src,const struct in6_addr * dst)2840 static uint8_t get_diff_ipv6(const struct in6_addr *src,
2841 			  const struct in6_addr *dst)
2842 {
2843 	return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 16);
2844 }
2845 
is_proper_ipv6_address(struct net_if_addr * addr)2846 static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
2847 {
2848 	if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
2849 	    addr->address.family == AF_INET6 &&
2850 	    !net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
2851 		return true;
2852 	}
2853 
2854 	return false;
2855 }
2856 
net_if_ipv6_get_best_match(struct net_if * iface,const struct in6_addr * dst,uint8_t * best_so_far)2857 static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
2858 						   const struct in6_addr *dst,
2859 						   uint8_t *best_so_far)
2860 {
2861 	struct net_if_ipv6 *ipv6;
2862 	struct in6_addr *src = NULL;
2863 	uint8_t len;
2864 	int i;
2865 
2866 	net_if_lock(iface);
2867 
2868 	ipv6 = iface->config.ip.ipv6;
2869 	if (!ipv6) {
2870 		goto out;
2871 	}
2872 
2873 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2874 		if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
2875 			continue;
2876 		}
2877 
2878 		len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
2879 		if (len >= *best_so_far) {
2880 			/* Mesh local address can only be selected for the same
2881 			 * subnet.
2882 			 */
2883 			if (ipv6->unicast[i].is_mesh_local && len < 64 &&
2884 			    !net_ipv6_is_addr_mcast_mesh(dst)) {
2885 				continue;
2886 			}
2887 
2888 			*best_so_far = len;
2889 			src = &ipv6->unicast[i].address.in6_addr;
2890 		}
2891 	}
2892 
2893 out:
2894 	net_if_unlock(iface);
2895 
2896 	return src;
2897 }
2898 
net_if_ipv6_select_src_addr(struct net_if * dst_iface,const struct in6_addr * dst)2899 const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
2900 						   const struct in6_addr *dst)
2901 {
2902 	const struct in6_addr *src = NULL;
2903 	uint8_t best_match = 0U;
2904 
2905 	NET_ASSERT(dst);
2906 
2907 	if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast_link(dst)) {
2908 		/* If caller has supplied interface, then use that */
2909 		if (dst_iface) {
2910 			src = net_if_ipv6_get_best_match(dst_iface, dst,
2911 							 &best_match);
2912 		} else {
2913 			STRUCT_SECTION_FOREACH(net_if, iface) {
2914 				struct in6_addr *addr;
2915 
2916 				addr = net_if_ipv6_get_best_match(iface, dst,
2917 								  &best_match);
2918 				if (addr) {
2919 					src = addr;
2920 				}
2921 			}
2922 		}
2923 
2924 	} else {
2925 		if (dst_iface) {
2926 			src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
2927 		} else {
2928 			struct in6_addr *addr;
2929 
2930 			addr = net_if_ipv6_get_ll(net_if_get_default(), NET_ADDR_PREFERRED);
2931 			if (addr) {
2932 				src = addr;
2933 				goto out;
2934 			}
2935 
2936 			STRUCT_SECTION_FOREACH(net_if, iface) {
2937 				addr = net_if_ipv6_get_ll(iface,
2938 							  NET_ADDR_PREFERRED);
2939 				if (addr) {
2940 					src = addr;
2941 					break;
2942 				}
2943 			}
2944 		}
2945 	}
2946 
2947 	if (!src) {
2948 		src = net_ipv6_unspecified_address();
2949 	}
2950 
2951 out:
2952 	return src;
2953 }
2954 
net_if_ipv6_select_src_iface(const struct in6_addr * dst)2955 struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
2956 {
2957 	struct net_if *iface = NULL;
2958 	const struct in6_addr *src;
2959 
2960 	src = net_if_ipv6_select_src_addr(NULL, dst);
2961 	if (src != net_ipv6_unspecified_address()) {
2962 		net_if_ipv6_addr_lookup(src, &iface);
2963 	}
2964 
2965 	if (iface == NULL) {
2966 		iface = net_if_get_default();
2967 	}
2968 
2969 	return iface;
2970 }
2971 
net_if_ipv6_calc_reachable_time(struct net_if_ipv6 * ipv6)2972 uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
2973 {
2974 	uint32_t min_reachable, max_reachable;
2975 
2976 	min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
2977 			/ MIN_RANDOM_DENOM;
2978 	max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
2979 			/ MAX_RANDOM_DENOM;
2980 
2981 	NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
2982 		max_reachable);
2983 
2984 	return min_reachable +
2985 	       sys_rand32_get() % (max_reachable - min_reachable);
2986 }
2987 
iface_ipv6_start(struct net_if * iface)2988 static void iface_ipv6_start(struct net_if *iface)
2989 {
2990 	if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
2991 	    net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
2992 		return;
2993 	}
2994 
2995 	if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
2996 		net_if_start_dad(iface);
2997 	} else {
2998 		struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2999 
3000 		if (ipv6 != NULL) {
3001 			join_mcast_nodes(iface,
3002 					 &ipv6->mcast[0].address.in6_addr);
3003 		}
3004 	}
3005 
3006 	net_if_start_rs(iface);
3007 }
3008 
iface_ipv6_init(int if_count)3009 static void iface_ipv6_init(int if_count)
3010 {
3011 	int i;
3012 
3013 	iface_ipv6_dad_init();
3014 	iface_ipv6_nd_init();
3015 
3016 	k_work_init_delayable(&address_lifetime_timer,
3017 			      address_lifetime_timeout);
3018 	k_work_init_delayable(&prefix_lifetime_timer, prefix_lifetime_timeout);
3019 
3020 	if (if_count > ARRAY_SIZE(ipv6_addresses)) {
3021 		NET_WARN("You have %zu IPv6 net_if addresses but %d "
3022 			 "network interfaces", ARRAY_SIZE(ipv6_addresses),
3023 			 if_count);
3024 		NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
3025 			 "value.");
3026 	}
3027 
3028 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
3029 		ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
3030 		ipv6_addresses[i].ipv6.mcast_hop_limit = CONFIG_NET_INITIAL_MCAST_HOP_LIMIT;
3031 		ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
3032 
3033 		net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
3034 	}
3035 }
3036 
3037 #else
3038 #define join_mcast_allnodes(...)
3039 #define join_mcast_solicit_node(...)
3040 #define leave_mcast_all(...)
3041 #define join_mcast_nodes(...)
3042 #define iface_ipv6_start(...)
3043 #define iface_ipv6_init(...)
3044 
net_if_ipv6_maddr_lookup(const struct in6_addr * addr,struct net_if ** iface)3045 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
3046 						   struct net_if **iface)
3047 {
3048 	ARG_UNUSED(addr);
3049 	ARG_UNUSED(iface);
3050 
3051 	return NULL;
3052 }
3053 
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)3054 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
3055 					    struct net_if **ret)
3056 {
3057 	ARG_UNUSED(addr);
3058 	ARG_UNUSED(ret);
3059 
3060 	return NULL;
3061 }
3062 
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)3063 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
3064 					     struct net_if **iface)
3065 {
3066 	ARG_UNUSED(state);
3067 	ARG_UNUSED(iface);
3068 
3069 	return NULL;
3070 }
3071 #endif /* CONFIG_NET_IPV6 */
3072 
3073 #if defined(CONFIG_NET_NATIVE_IPV4)
net_if_config_ipv4_get(struct net_if * iface,struct net_if_ipv4 ** ipv4)3074 int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
3075 {
3076 	int ret = 0;
3077 	int i;
3078 
3079 	net_if_lock(iface);
3080 
3081 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3082 		ret = -ENOTSUP;
3083 		goto out;
3084 	}
3085 
3086 	if (iface->config.ip.ipv4) {
3087 		if (ipv4) {
3088 			*ipv4 = iface->config.ip.ipv4;
3089 		}
3090 
3091 		goto out;
3092 	}
3093 
3094 	k_mutex_lock(&lock, K_FOREVER);
3095 
3096 	for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
3097 		if (ipv4_addresses[i].iface) {
3098 			continue;
3099 		}
3100 
3101 		iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
3102 		ipv4_addresses[i].iface = iface;
3103 
3104 		if (ipv4) {
3105 			*ipv4 = &ipv4_addresses[i].ipv4;
3106 		}
3107 
3108 		k_mutex_unlock(&lock);
3109 		goto out;
3110 	}
3111 
3112 	k_mutex_unlock(&lock);
3113 
3114 	ret = -ESRCH;
3115 out:
3116 	net_if_unlock(iface);
3117 
3118 	return ret;
3119 }
3120 
net_if_config_ipv4_put(struct net_if * iface)3121 int net_if_config_ipv4_put(struct net_if *iface)
3122 {
3123 	int ret = 0;
3124 	int i;
3125 
3126 	net_if_lock(iface);
3127 
3128 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3129 		ret = -ENOTSUP;
3130 		goto out;
3131 	}
3132 
3133 	if (!iface->config.ip.ipv4) {
3134 		ret = -EALREADY;
3135 		goto out;
3136 	}
3137 
3138 	k_mutex_lock(&lock, K_FOREVER);
3139 
3140 	for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
3141 		if (ipv4_addresses[i].iface != iface) {
3142 			continue;
3143 		}
3144 
3145 		iface->config.ip.ipv4 = NULL;
3146 		ipv4_addresses[i].iface = NULL;
3147 
3148 		k_mutex_unlock(&lock);
3149 		goto out;
3150 	}
3151 
3152 	k_mutex_unlock(&lock);
3153 
3154 	ret = -ESRCH;
3155 out:
3156 	net_if_unlock(iface);
3157 
3158 	return ret;
3159 }
3160 
net_if_ipv4_get_ttl(struct net_if * iface)3161 uint8_t net_if_ipv4_get_ttl(struct net_if *iface)
3162 {
3163 #if defined(CONFIG_NET_NATIVE_IPV4)
3164 	int ret = 0;
3165 
3166 	net_if_lock(iface);
3167 
3168 	if (!iface->config.ip.ipv4) {
3169 		goto out;
3170 	}
3171 
3172 	ret = iface->config.ip.ipv4->ttl;
3173 out:
3174 	net_if_unlock(iface);
3175 
3176 	return ret;
3177 #else
3178 	ARG_UNUSED(iface);
3179 
3180 	return 0;
3181 #endif
3182 }
3183 
net_if_ipv4_set_ttl(struct net_if * iface,uint8_t ttl)3184 void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl)
3185 {
3186 #if defined(CONFIG_NET_NATIVE_IPV4)
3187 	net_if_lock(iface);
3188 
3189 	if (!iface->config.ip.ipv4) {
3190 		goto out;
3191 	}
3192 
3193 	iface->config.ip.ipv4->ttl = ttl;
3194 out:
3195 	net_if_unlock(iface);
3196 #else
3197 	ARG_UNUSED(iface);
3198 	ARG_UNUSED(ttl);
3199 #endif
3200 }
3201 
net_if_ipv4_get_mcast_ttl(struct net_if * iface)3202 uint8_t net_if_ipv4_get_mcast_ttl(struct net_if *iface)
3203 {
3204 #if defined(CONFIG_NET_NATIVE_IPV4)
3205 	int ret = 0;
3206 
3207 	net_if_lock(iface);
3208 
3209 	if (!iface->config.ip.ipv4) {
3210 		goto out;
3211 	}
3212 
3213 	ret = iface->config.ip.ipv4->mcast_ttl;
3214 out:
3215 	net_if_unlock(iface);
3216 
3217 	return ret;
3218 #else
3219 	ARG_UNUSED(iface);
3220 
3221 	return 0;
3222 #endif
3223 }
3224 
net_if_ipv4_set_mcast_ttl(struct net_if * iface,uint8_t ttl)3225 void net_if_ipv4_set_mcast_ttl(struct net_if *iface, uint8_t ttl)
3226 {
3227 #if defined(CONFIG_NET_NATIVE_IPV4)
3228 	net_if_lock(iface);
3229 
3230 	if (!iface->config.ip.ipv4) {
3231 		goto out;
3232 	}
3233 
3234 	iface->config.ip.ipv4->mcast_ttl = ttl;
3235 out:
3236 	net_if_unlock(iface);
3237 #else
3238 	ARG_UNUSED(iface);
3239 	ARG_UNUSED(ttl);
3240 #endif
3241 }
3242 
net_if_ipv4_router_lookup(struct net_if * iface,struct in_addr * addr)3243 struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
3244 						struct in_addr *addr)
3245 {
3246 	return iface_router_lookup(iface, AF_INET, addr);
3247 }
3248 
net_if_ipv4_router_find_default(struct net_if * iface,struct in_addr * addr)3249 struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
3250 						      struct in_addr *addr)
3251 {
3252 	return iface_router_find_default(iface, AF_INET, addr);
3253 }
3254 
net_if_ipv4_router_add(struct net_if * iface,struct in_addr * addr,bool is_default,uint16_t lifetime)3255 struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
3256 					     struct in_addr *addr,
3257 					     bool is_default,
3258 					     uint16_t lifetime)
3259 {
3260 	return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
3261 }
3262 
net_if_ipv4_router_rm(struct net_if_router * router)3263 bool net_if_ipv4_router_rm(struct net_if_router *router)
3264 {
3265 	return iface_router_rm(router);
3266 }
3267 
net_if_ipv4_addr_mask_cmp(struct net_if * iface,const struct in_addr * addr)3268 bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
3269 			       const struct in_addr *addr)
3270 {
3271 	bool ret = false;
3272 	struct net_if_ipv4 *ipv4;
3273 	uint32_t subnet;
3274 	int i;
3275 
3276 	net_if_lock(iface);
3277 
3278 	ipv4 = iface->config.ip.ipv4;
3279 	if (!ipv4) {
3280 		goto out;
3281 	}
3282 
3283 	subnet = UNALIGNED_GET(&addr->s_addr) & ipv4->netmask.s_addr;
3284 
3285 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3286 		if (!ipv4->unicast[i].is_used ||
3287 		    ipv4->unicast[i].address.family != AF_INET) {
3288 			continue;
3289 		}
3290 
3291 		if ((ipv4->unicast[i].address.in_addr.s_addr &
3292 		     ipv4->netmask.s_addr) == subnet) {
3293 			ret = true;
3294 			goto out;
3295 		}
3296 	}
3297 
3298 out:
3299 	net_if_unlock(iface);
3300 
3301 	return ret;
3302 }
3303 
ipv4_is_broadcast_address(struct net_if * iface,const struct in_addr * addr)3304 static bool ipv4_is_broadcast_address(struct net_if *iface,
3305 				      const struct in_addr *addr)
3306 {
3307 	struct net_if_ipv4 *ipv4;
3308 	bool ret = false;
3309 
3310 	net_if_lock(iface);
3311 
3312 	ipv4 = iface->config.ip.ipv4;
3313 	if (!ipv4) {
3314 		ret = false;
3315 		goto out;
3316 	}
3317 
3318 	if (!net_if_ipv4_addr_mask_cmp(iface, addr)) {
3319 		ret = false;
3320 		goto out;
3321 	}
3322 
3323 	if ((UNALIGNED_GET(&addr->s_addr) & ~ipv4->netmask.s_addr) ==
3324 	    ~ipv4->netmask.s_addr) {
3325 		ret = true;
3326 		goto out;
3327 	}
3328 
3329 out:
3330 	net_if_unlock(iface);
3331 	return ret;
3332 }
3333 
net_if_ipv4_is_addr_bcast(struct net_if * iface,const struct in_addr * addr)3334 bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
3335 			       const struct in_addr *addr)
3336 {
3337 	bool ret = false;
3338 
3339 	if (iface) {
3340 		ret = ipv4_is_broadcast_address(iface, addr);
3341 		goto out;
3342 	}
3343 
3344 	STRUCT_SECTION_FOREACH(net_if, one_iface) {
3345 		ret = ipv4_is_broadcast_address(one_iface, addr);
3346 		if (ret) {
3347 			goto out;
3348 		}
3349 	}
3350 
3351 out:
3352 	return ret;
3353 }
3354 
net_if_ipv4_select_src_iface(const struct in_addr * dst)3355 struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
3356 {
3357 	struct net_if *selected = NULL;
3358 
3359 	STRUCT_SECTION_FOREACH(net_if, iface) {
3360 		bool ret;
3361 
3362 		ret = net_if_ipv4_addr_mask_cmp(iface, dst);
3363 		if (ret) {
3364 			selected = iface;
3365 			goto out;
3366 		}
3367 	}
3368 
3369 	if (selected == NULL) {
3370 		selected = net_if_get_default();
3371 	}
3372 
3373 out:
3374 	return selected;
3375 }
3376 
get_diff_ipv4(const struct in_addr * src,const struct in_addr * dst)3377 static uint8_t get_diff_ipv4(const struct in_addr *src,
3378 			  const struct in_addr *dst)
3379 {
3380 	return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 4);
3381 }
3382 
is_proper_ipv4_address(struct net_if_addr * addr)3383 static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
3384 {
3385 	if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3386 	    addr->address.family == AF_INET &&
3387 	    !net_ipv4_is_ll_addr(&addr->address.in_addr)) {
3388 		return true;
3389 	}
3390 
3391 	return false;
3392 }
3393 
net_if_ipv4_get_best_match(struct net_if * iface,const struct in_addr * dst,uint8_t * best_so_far)3394 static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
3395 						  const struct in_addr *dst,
3396 						  uint8_t *best_so_far)
3397 {
3398 	struct net_if_ipv4 *ipv4;
3399 	struct in_addr *src = NULL;
3400 	uint8_t len;
3401 	int i;
3402 
3403 	net_if_lock(iface);
3404 
3405 	ipv4 = iface->config.ip.ipv4;
3406 	if (!ipv4) {
3407 		goto out;
3408 	}
3409 
3410 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3411 		if (!is_proper_ipv4_address(&ipv4->unicast[i])) {
3412 			continue;
3413 		}
3414 
3415 		len = get_diff_ipv4(dst, &ipv4->unicast[i].address.in_addr);
3416 		if (len >= *best_so_far) {
3417 			*best_so_far = len;
3418 			src = &ipv4->unicast[i].address.in_addr;
3419 		}
3420 	}
3421 
3422 out:
3423 	net_if_unlock(iface);
3424 
3425 	return src;
3426 }
3427 
if_ipv4_get_addr(struct net_if * iface,enum net_addr_state addr_state,bool ll)3428 static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
3429 					enum net_addr_state addr_state, bool ll)
3430 {
3431 	struct in_addr *addr = NULL;
3432 	struct net_if_ipv4 *ipv4;
3433 	int i;
3434 
3435 	if (!iface) {
3436 		return NULL;
3437 	}
3438 
3439 	net_if_lock(iface);
3440 
3441 	ipv4 = iface->config.ip.ipv4;
3442 	if (!ipv4) {
3443 		goto out;
3444 	}
3445 
3446 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3447 		if (!ipv4->unicast[i].is_used ||
3448 		    (addr_state != NET_ADDR_ANY_STATE &&
3449 		     ipv4->unicast[i].addr_state != addr_state) ||
3450 		    ipv4->unicast[i].address.family != AF_INET) {
3451 			continue;
3452 		}
3453 
3454 		if (net_ipv4_is_ll_addr(&ipv4->unicast[i].address.in_addr)) {
3455 			if (!ll) {
3456 				continue;
3457 			}
3458 		} else {
3459 			if (ll) {
3460 				continue;
3461 			}
3462 		}
3463 
3464 		addr = &ipv4->unicast[i].address.in_addr;
3465 		goto out;
3466 	}
3467 
3468 out:
3469 	net_if_unlock(iface);
3470 
3471 	return addr;
3472 }
3473 
net_if_ipv4_get_ll(struct net_if * iface,enum net_addr_state addr_state)3474 struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
3475 				   enum net_addr_state addr_state)
3476 {
3477 	return if_ipv4_get_addr(iface, addr_state, true);
3478 }
3479 
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3480 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3481 					    enum net_addr_state addr_state)
3482 {
3483 	return if_ipv4_get_addr(iface, addr_state, false);
3484 }
3485 
net_if_ipv4_select_src_addr(struct net_if * dst_iface,const struct in_addr * dst)3486 const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
3487 						  const struct in_addr *dst)
3488 {
3489 	const struct in_addr *src = NULL;
3490 	uint8_t best_match = 0U;
3491 
3492 	NET_ASSERT(dst);
3493 
3494 	if (!net_ipv4_is_ll_addr(dst)) {
3495 
3496 		/* If caller has supplied interface, then use that */
3497 		if (dst_iface) {
3498 			src = net_if_ipv4_get_best_match(dst_iface, dst,
3499 							 &best_match);
3500 		} else {
3501 			STRUCT_SECTION_FOREACH(net_if, iface) {
3502 				struct in_addr *addr;
3503 
3504 				addr = net_if_ipv4_get_best_match(iface, dst,
3505 								  &best_match);
3506 				if (addr) {
3507 					src = addr;
3508 				}
3509 			}
3510 		}
3511 
3512 	} else {
3513 		if (dst_iface) {
3514 			src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3515 		} else {
3516 			struct in_addr *addr;
3517 
3518 			addr = net_if_ipv4_get_ll(net_if_get_default(), NET_ADDR_PREFERRED);
3519 			if (addr) {
3520 				src = addr;
3521 				goto out;
3522 			}
3523 
3524 			STRUCT_SECTION_FOREACH(net_if, iface) {
3525 				addr = net_if_ipv4_get_ll(iface,
3526 							  NET_ADDR_PREFERRED);
3527 				if (addr) {
3528 					src = addr;
3529 					break;
3530 				}
3531 			}
3532 		}
3533 	}
3534 
3535 	if (!src) {
3536 		src = net_if_ipv4_get_global_addr(dst_iface,
3537 						  NET_ADDR_PREFERRED);
3538 
3539 		if (IS_ENABLED(CONFIG_NET_IPV4_AUTO) && !src) {
3540 			/* Try to use LL address if there's really no other
3541 			 * address available.
3542 			 */
3543 			src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3544 		}
3545 
3546 		if (!src) {
3547 			src = net_ipv4_unspecified_address();
3548 		}
3549 	}
3550 
3551 out:
3552 	return src;
3553 }
3554 
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3555 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3556 					    struct net_if **ret)
3557 {
3558 	struct net_if_addr *ifaddr = NULL;
3559 
3560 	STRUCT_SECTION_FOREACH(net_if, iface) {
3561 		struct net_if_ipv4 *ipv4;
3562 		int i;
3563 
3564 		net_if_lock(iface);
3565 
3566 		ipv4 = iface->config.ip.ipv4;
3567 		if (!ipv4) {
3568 			net_if_unlock(iface);
3569 			continue;
3570 		}
3571 
3572 		for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3573 			if (!ipv4->unicast[i].is_used ||
3574 			    ipv4->unicast[i].address.family != AF_INET) {
3575 				continue;
3576 			}
3577 
3578 			if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
3579 			    ipv4->unicast[i].address.in_addr.s_addr) {
3580 
3581 				if (ret) {
3582 					*ret = iface;
3583 				}
3584 
3585 				ifaddr = &ipv4->unicast[i];
3586 				net_if_unlock(iface);
3587 				goto out;
3588 			}
3589 		}
3590 
3591 		net_if_unlock(iface);
3592 	}
3593 
3594 out:
3595 	return ifaddr;
3596 }
3597 
z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3598 int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
3599 {
3600 	struct net_if_addr *if_addr;
3601 	struct net_if *iface = NULL;
3602 
3603 	if_addr = net_if_ipv4_addr_lookup(addr, &iface);
3604 	if (!if_addr) {
3605 		return 0;
3606 	}
3607 
3608 	return net_if_get_by_iface(iface);
3609 }
3610 
3611 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3612 static inline int z_vrfy_net_if_ipv4_addr_lookup_by_index(
3613 					  const struct in_addr *addr)
3614 {
3615 	struct in_addr addr_v4;
3616 
3617 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3618 
3619 	return z_impl_net_if_ipv4_addr_lookup_by_index(&addr_v4);
3620 }
3621 #include <syscalls/net_if_ipv4_addr_lookup_by_index_mrsh.c>
3622 #endif
3623 
net_if_ipv4_get_netmask(struct net_if * iface)3624 struct in_addr net_if_ipv4_get_netmask(struct net_if *iface)
3625 {
3626 	struct in_addr netmask = { 0 };
3627 
3628 	net_if_lock(iface);
3629 
3630 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3631 		goto out;
3632 	}
3633 
3634 	if (!iface->config.ip.ipv4) {
3635 		goto out;
3636 	}
3637 
3638 	netmask = iface->config.ip.ipv4->netmask;
3639 out:
3640 	net_if_unlock(iface);
3641 
3642 	return netmask;
3643 }
3644 
net_if_ipv4_set_netmask(struct net_if * iface,const struct in_addr * netmask)3645 void net_if_ipv4_set_netmask(struct net_if *iface,
3646 			     const struct in_addr *netmask)
3647 {
3648 	net_if_lock(iface);
3649 
3650 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3651 		goto out;
3652 	}
3653 
3654 	if (!iface->config.ip.ipv4) {
3655 		goto out;
3656 	}
3657 
3658 	net_ipaddr_copy(&iface->config.ip.ipv4->netmask, netmask);
3659 out:
3660 	net_if_unlock(iface);
3661 }
3662 
z_impl_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3663 bool z_impl_net_if_ipv4_set_netmask_by_index(int index,
3664 					     const struct in_addr *netmask)
3665 {
3666 	struct net_if *iface;
3667 
3668 	iface = net_if_get_by_index(index);
3669 	if (!iface) {
3670 		return false;
3671 	}
3672 
3673 	net_if_ipv4_set_netmask(iface, netmask);
3674 
3675 	return true;
3676 }
3677 
3678 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3679 bool z_vrfy_net_if_ipv4_set_netmask_by_index(int index,
3680 					     const struct in_addr *netmask)
3681 {
3682 	struct in_addr netmask_addr;
3683 	struct net_if *iface;
3684 
3685 	iface = z_vrfy_net_if_get_by_index(index);
3686 	if (!iface) {
3687 		return false;
3688 	}
3689 
3690 	K_OOPS(k_usermode_from_copy(&netmask_addr, (void *)netmask,
3691 				sizeof(netmask_addr)));
3692 
3693 	return z_impl_net_if_ipv4_set_netmask_by_index(index, &netmask_addr);
3694 }
3695 
3696 #include <syscalls/net_if_ipv4_set_netmask_by_index_mrsh.c>
3697 #endif /* CONFIG_USERSPACE */
3698 
net_if_ipv4_set_gw(struct net_if * iface,const struct in_addr * gw)3699 void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw)
3700 {
3701 	net_if_lock(iface);
3702 
3703 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3704 		goto out;
3705 	}
3706 
3707 	if (!iface->config.ip.ipv4) {
3708 		goto out;
3709 	}
3710 
3711 	net_ipaddr_copy(&iface->config.ip.ipv4->gw, gw);
3712 out:
3713 	net_if_unlock(iface);
3714 }
3715 
z_impl_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)3716 bool z_impl_net_if_ipv4_set_gw_by_index(int index,
3717 					const struct in_addr *gw)
3718 {
3719 	struct net_if *iface;
3720 
3721 	iface = net_if_get_by_index(index);
3722 	if (!iface) {
3723 		return false;
3724 	}
3725 
3726 	net_if_ipv4_set_gw(iface, gw);
3727 
3728 	return true;
3729 }
3730 
3731 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)3732 bool z_vrfy_net_if_ipv4_set_gw_by_index(int index,
3733 					const struct in_addr *gw)
3734 {
3735 	struct in_addr gw_addr;
3736 	struct net_if *iface;
3737 
3738 	iface = z_vrfy_net_if_get_by_index(index);
3739 	if (!iface) {
3740 		return false;
3741 	}
3742 
3743 	K_OOPS(k_usermode_from_copy(&gw_addr, (void *)gw, sizeof(gw_addr)));
3744 
3745 	return z_impl_net_if_ipv4_set_gw_by_index(index, &gw_addr);
3746 }
3747 
3748 #include <syscalls/net_if_ipv4_set_gw_by_index_mrsh.c>
3749 #endif /* CONFIG_USERSPACE */
3750 
ipv4_addr_find(struct net_if * iface,struct in_addr * addr)3751 static struct net_if_addr *ipv4_addr_find(struct net_if *iface,
3752 					  struct in_addr *addr)
3753 {
3754 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3755 	int i;
3756 
3757 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3758 		if (!ipv4->unicast[i].is_used) {
3759 			continue;
3760 		}
3761 
3762 		if (net_ipv4_addr_cmp(addr,
3763 				      &ipv4->unicast[i].address.in_addr)) {
3764 			return &ipv4->unicast[i];
3765 		}
3766 	}
3767 
3768 	return NULL;
3769 }
3770 
net_if_ipv4_addr_add(struct net_if * iface,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3771 struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
3772 					 struct in_addr *addr,
3773 					 enum net_addr_type addr_type,
3774 					 uint32_t vlifetime)
3775 {
3776 	struct net_if_addr *ifaddr = NULL;
3777 	struct net_if_ipv4 *ipv4;
3778 	int i;
3779 
3780 	net_if_lock(iface);
3781 
3782 	if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
3783 		goto out;
3784 	}
3785 
3786 	ifaddr = ipv4_addr_find(iface, addr);
3787 	if (ifaddr) {
3788 		/* TODO: should set addr_type/vlifetime */
3789 		goto out;
3790 	}
3791 
3792 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3793 		struct net_if_addr *cur = &ipv4->unicast[i];
3794 
3795 		if (addr_type == NET_ADDR_DHCP
3796 		    && cur->addr_type == NET_ADDR_OVERRIDABLE) {
3797 			ifaddr = cur;
3798 			break;
3799 		}
3800 
3801 		if (!ipv4->unicast[i].is_used) {
3802 			ifaddr = cur;
3803 			break;
3804 		}
3805 	}
3806 
3807 	if (ifaddr) {
3808 		ifaddr->is_used = true;
3809 		ifaddr->address.family = AF_INET;
3810 		ifaddr->address.in_addr.s4_addr32[0] =
3811 						addr->s4_addr32[0];
3812 		ifaddr->addr_type = addr_type;
3813 
3814 		/* Caller has to take care of timers and their expiry */
3815 		if (vlifetime) {
3816 			ifaddr->is_infinite = false;
3817 		} else {
3818 			ifaddr->is_infinite = true;
3819 		}
3820 
3821 		/**
3822 		 *  TODO: Handle properly PREFERRED/DEPRECATED state when
3823 		 *  address in use, expired and renewal state.
3824 		 */
3825 		ifaddr->addr_state = NET_ADDR_PREFERRED;
3826 
3827 		NET_DBG("[%d] interface %p address %s type %s added", i, iface,
3828 			net_sprint_ipv4_addr(addr),
3829 			net_addr_type2str(addr_type));
3830 
3831 		net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface,
3832 						&ifaddr->address.in_addr,
3833 						sizeof(struct in_addr));
3834 		goto out;
3835 	}
3836 
3837 out:
3838 	net_if_unlock(iface);
3839 
3840 	return ifaddr;
3841 }
3842 
net_if_ipv4_addr_rm(struct net_if * iface,const struct in_addr * addr)3843 bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr)
3844 {
3845 	struct net_if_ipv4 *ipv4;
3846 	bool ret = false;
3847 	int i;
3848 
3849 	net_if_lock(iface);
3850 
3851 	ipv4 = iface->config.ip.ipv4;
3852 	if (!ipv4) {
3853 		goto out;
3854 	}
3855 
3856 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3857 		if (!ipv4->unicast[i].is_used) {
3858 			continue;
3859 		}
3860 
3861 		if (!net_ipv4_addr_cmp(&ipv4->unicast[i].address.in_addr,
3862 				       addr)) {
3863 			continue;
3864 		}
3865 
3866 		ipv4->unicast[i].is_used = false;
3867 
3868 		NET_DBG("[%d] interface %p address %s removed",
3869 			i, iface, net_sprint_ipv4_addr(addr));
3870 
3871 		net_mgmt_event_notify_with_info(
3872 			NET_EVENT_IPV4_ADDR_DEL, iface,
3873 			&ipv4->unicast[i].address.in_addr,
3874 			sizeof(struct in_addr));
3875 
3876 		ret = true;
3877 		goto out;
3878 	}
3879 
3880 out:
3881 	net_if_unlock(iface);
3882 
3883 	return ret;
3884 }
3885 
z_impl_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3886 bool z_impl_net_if_ipv4_addr_add_by_index(int index,
3887 					  struct in_addr *addr,
3888 					  enum net_addr_type addr_type,
3889 					  uint32_t vlifetime)
3890 {
3891 	struct net_if *iface;
3892 	struct net_if_addr *if_addr;
3893 
3894 	iface = net_if_get_by_index(index);
3895 	if (!iface) {
3896 		return false;
3897 	}
3898 
3899 	if_addr = net_if_ipv4_addr_add(iface, addr, addr_type, vlifetime);
3900 	return if_addr ? true : false;
3901 }
3902 
3903 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3904 bool z_vrfy_net_if_ipv4_addr_add_by_index(int index,
3905 					  struct in_addr *addr,
3906 					  enum net_addr_type addr_type,
3907 					  uint32_t vlifetime)
3908 {
3909 	struct in_addr addr_v4;
3910 	struct net_if *iface;
3911 
3912 	iface = z_vrfy_net_if_get_by_index(index);
3913 	if (!iface) {
3914 		return false;
3915 	}
3916 
3917 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3918 
3919 	return z_impl_net_if_ipv4_addr_add_by_index(index,
3920 						    &addr_v4,
3921 						    addr_type,
3922 						    vlifetime);
3923 }
3924 
3925 #include <syscalls/net_if_ipv4_addr_add_by_index_mrsh.c>
3926 #endif /* CONFIG_USERSPACE */
3927 
z_impl_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)3928 bool z_impl_net_if_ipv4_addr_rm_by_index(int index,
3929 					 const struct in_addr *addr)
3930 {
3931 	struct net_if *iface;
3932 
3933 	iface = net_if_get_by_index(index);
3934 	if (!iface) {
3935 		return false;
3936 	}
3937 
3938 	return net_if_ipv4_addr_rm(iface, addr);
3939 }
3940 
3941 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)3942 bool z_vrfy_net_if_ipv4_addr_rm_by_index(int index,
3943 					 const struct in_addr *addr)
3944 {
3945 	struct in_addr addr_v4;
3946 	struct net_if *iface;
3947 
3948 	iface = z_vrfy_net_if_get_by_index(index);
3949 	if (!iface) {
3950 		return false;
3951 	}
3952 
3953 	K_OOPS(k_usermode_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3954 
3955 	return (uint32_t)z_impl_net_if_ipv4_addr_rm_by_index(index, &addr_v4);
3956 }
3957 
3958 #include <syscalls/net_if_ipv4_addr_rm_by_index_mrsh.c>
3959 #endif /* CONFIG_USERSPACE */
3960 
net_if_ipv4_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)3961 void net_if_ipv4_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
3962 			      void *user_data)
3963 {
3964 	struct net_if_ipv4 *ipv4;
3965 
3966 	if (iface == NULL) {
3967 		return;
3968 	}
3969 
3970 	net_if_lock(iface);
3971 
3972 	ipv4 = iface->config.ip.ipv4;
3973 	if (ipv4 == NULL) {
3974 		goto out;
3975 	}
3976 
3977 	for (int i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3978 		struct net_if_addr *if_addr = &ipv4->unicast[i];
3979 
3980 		if (!if_addr->is_used) {
3981 			continue;
3982 		}
3983 
3984 		cb(iface, if_addr, user_data);
3985 	}
3986 
3987 out:
3988 	net_if_unlock(iface);
3989 }
3990 
ipv4_maddr_find(struct net_if * iface,bool is_used,const struct in_addr * addr)3991 static struct net_if_mcast_addr *ipv4_maddr_find(struct net_if *iface,
3992 						 bool is_used,
3993 						 const struct in_addr *addr)
3994 {
3995 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3996 	int i;
3997 
3998 	if (!ipv4) {
3999 		return NULL;
4000 	}
4001 
4002 	for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
4003 		if ((is_used && !ipv4->mcast[i].is_used) ||
4004 		    (!is_used && ipv4->mcast[i].is_used)) {
4005 			continue;
4006 		}
4007 
4008 		if (addr) {
4009 			if (!net_ipv4_addr_cmp(&ipv4->mcast[i].address.in_addr,
4010 					       addr)) {
4011 				continue;
4012 			}
4013 		}
4014 
4015 		return &ipv4->mcast[i];
4016 	}
4017 
4018 	return NULL;
4019 }
net_if_ipv4_maddr_add(struct net_if * iface,const struct in_addr * addr)4020 struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
4021 						const struct in_addr *addr)
4022 {
4023 	struct net_if_mcast_addr *maddr = NULL;
4024 
4025 	net_if_lock(iface);
4026 
4027 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
4028 		goto out;
4029 	}
4030 
4031 	if (!net_ipv4_is_addr_mcast(addr)) {
4032 		NET_DBG("Address %s is not a multicast address.",
4033 			net_sprint_ipv4_addr(addr));
4034 		goto out;
4035 	}
4036 
4037 	maddr = ipv4_maddr_find(iface, false, NULL);
4038 	if (maddr) {
4039 		maddr->is_used = true;
4040 		maddr->address.family = AF_INET;
4041 		maddr->address.in_addr.s4_addr32[0] = addr->s4_addr32[0];
4042 
4043 		NET_DBG("interface %p address %s added", iface,
4044 			net_sprint_ipv4_addr(addr));
4045 
4046 		net_mgmt_event_notify_with_info(
4047 			NET_EVENT_IPV4_MADDR_ADD, iface,
4048 			&maddr->address.in_addr,
4049 			sizeof(struct in_addr));
4050 	}
4051 
4052 out:
4053 	net_if_unlock(iface);
4054 
4055 	return maddr;
4056 }
4057 
net_if_ipv4_maddr_rm(struct net_if * iface,const struct in_addr * addr)4058 bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr)
4059 {
4060 	struct net_if_mcast_addr *maddr;
4061 	bool ret = false;
4062 
4063 	net_if_lock(iface);
4064 
4065 	maddr = ipv4_maddr_find(iface, true, addr);
4066 	if (maddr) {
4067 		maddr->is_used = false;
4068 
4069 		NET_DBG("interface %p address %s removed",
4070 			iface, net_sprint_ipv4_addr(addr));
4071 
4072 		net_mgmt_event_notify_with_info(
4073 			NET_EVENT_IPV4_MADDR_DEL, iface,
4074 			&maddr->address.in_addr,
4075 			sizeof(struct in_addr));
4076 
4077 		ret = true;
4078 	}
4079 
4080 	net_if_unlock(iface);
4081 
4082 	return ret;
4083 }
4084 
net_if_ipv4_maddr_lookup(const struct in_addr * maddr,struct net_if ** ret)4085 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *maddr,
4086 						   struct net_if **ret)
4087 {
4088 	struct net_if_mcast_addr *addr = NULL;
4089 
4090 	STRUCT_SECTION_FOREACH(net_if, iface) {
4091 		if (ret && *ret && iface != *ret) {
4092 			continue;
4093 		}
4094 
4095 		net_if_lock(iface);
4096 
4097 		addr = ipv4_maddr_find(iface, true, maddr);
4098 		if (addr) {
4099 			if (ret) {
4100 				*ret = iface;
4101 			}
4102 
4103 			net_if_unlock(iface);
4104 			goto out;
4105 		}
4106 
4107 		net_if_unlock(iface);
4108 	}
4109 
4110 out:
4111 	return addr;
4112 }
4113 
net_if_ipv4_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)4114 void net_if_ipv4_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
4115 {
4116 	NET_ASSERT(iface);
4117 	NET_ASSERT(addr);
4118 
4119 	net_if_lock(iface);
4120 	addr->is_joined = false;
4121 	net_if_unlock(iface);
4122 }
4123 
net_if_ipv4_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)4124 void net_if_ipv4_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
4125 {
4126 	NET_ASSERT(iface);
4127 	NET_ASSERT(addr);
4128 
4129 	net_if_lock(iface);
4130 	addr->is_joined = true;
4131 	net_if_unlock(iface);
4132 }
4133 
iface_ipv4_init(int if_count)4134 static void iface_ipv4_init(int if_count)
4135 {
4136 	int i;
4137 
4138 	if (if_count > ARRAY_SIZE(ipv4_addresses)) {
4139 		NET_WARN("You have %zu IPv4 net_if addresses but %d "
4140 			 "network interfaces", ARRAY_SIZE(ipv4_addresses),
4141 			 if_count);
4142 		NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV4_COUNT "
4143 			 "value.");
4144 	}
4145 
4146 	for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
4147 		ipv4_addresses[i].ipv4.ttl = CONFIG_NET_INITIAL_TTL;
4148 		ipv4_addresses[i].ipv4.mcast_ttl = CONFIG_NET_INITIAL_MCAST_TTL;
4149 	}
4150 }
4151 
leave_ipv4_mcast_all(struct net_if * iface)4152 static void leave_ipv4_mcast_all(struct net_if *iface)
4153 {
4154 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4155 	int i;
4156 
4157 	if (!ipv4) {
4158 		return;
4159 	}
4160 
4161 	for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
4162 		if (!ipv4->mcast[i].is_used ||
4163 		    !ipv4->mcast[i].is_joined) {
4164 			continue;
4165 		}
4166 
4167 		net_ipv4_igmp_leave(iface, &ipv4->mcast[i].address.in_addr);
4168 	}
4169 }
4170 
4171 #else
4172 #define leave_ipv4_mcast_all(...)
4173 #define iface_ipv4_init(...)
4174 
net_if_ipv4_maddr_lookup(const struct in_addr * addr,struct net_if ** iface)4175 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
4176 						   struct net_if **iface)
4177 {
4178 	ARG_UNUSED(addr);
4179 	ARG_UNUSED(iface);
4180 
4181 	return NULL;
4182 }
4183 
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)4184 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
4185 					    struct net_if **ret)
4186 {
4187 	ARG_UNUSED(addr);
4188 	ARG_UNUSED(ret);
4189 
4190 	return NULL;
4191 }
4192 
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)4193 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
4194 					    enum net_addr_state addr_state)
4195 {
4196 	ARG_UNUSED(addr_state);
4197 	ARG_UNUSED(iface);
4198 
4199 	return NULL;
4200 }
4201 #endif /* CONFIG_NET_IPV4 */
4202 
net_if_select_src_iface(const struct sockaddr * dst)4203 struct net_if *net_if_select_src_iface(const struct sockaddr *dst)
4204 {
4205 	struct net_if *iface = NULL;
4206 
4207 	if (!dst) {
4208 		goto out;
4209 	}
4210 
4211 	if (IS_ENABLED(CONFIG_NET_IPV6) && dst->sa_family == AF_INET6) {
4212 		iface = net_if_ipv6_select_src_iface(&net_sin6(dst)->sin6_addr);
4213 		goto out;
4214 	}
4215 
4216 	if (IS_ENABLED(CONFIG_NET_IPV4) && dst->sa_family == AF_INET) {
4217 		iface = net_if_ipv4_select_src_iface(&net_sin(dst)->sin_addr);
4218 		goto out;
4219 	}
4220 
4221 out:
4222 	if (iface == NULL) {
4223 		iface = net_if_get_default();
4224 	}
4225 
4226 	return iface;
4227 }
4228 
net_if_recv_data(struct net_if * iface,struct net_pkt * pkt)4229 enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt)
4230 {
4231 	if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) &&
4232 	    net_if_is_promisc(iface)) {
4233 		struct net_pkt *new_pkt;
4234 
4235 		new_pkt = net_pkt_clone(pkt, K_NO_WAIT);
4236 
4237 		if (net_promisc_mode_input(new_pkt) == NET_DROP) {
4238 			net_pkt_unref(new_pkt);
4239 		}
4240 	}
4241 
4242 	return net_if_l2(iface)->recv(iface, pkt);
4243 }
4244 
net_if_register_link_cb(struct net_if_link_cb * link,net_if_link_callback_t cb)4245 void net_if_register_link_cb(struct net_if_link_cb *link,
4246 			     net_if_link_callback_t cb)
4247 {
4248 	k_mutex_lock(&lock, K_FOREVER);
4249 
4250 	sys_slist_find_and_remove(&link_callbacks, &link->node);
4251 	sys_slist_prepend(&link_callbacks, &link->node);
4252 
4253 	link->cb = cb;
4254 
4255 	k_mutex_unlock(&lock);
4256 }
4257 
net_if_unregister_link_cb(struct net_if_link_cb * link)4258 void net_if_unregister_link_cb(struct net_if_link_cb *link)
4259 {
4260 	k_mutex_lock(&lock, K_FOREVER);
4261 
4262 	sys_slist_find_and_remove(&link_callbacks, &link->node);
4263 
4264 	k_mutex_unlock(&lock);
4265 }
4266 
net_if_call_link_cb(struct net_if * iface,struct net_linkaddr * lladdr,int status)4267 void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
4268 			 int status)
4269 {
4270 	struct net_if_link_cb *link, *tmp;
4271 
4272 	k_mutex_lock(&lock, K_FOREVER);
4273 
4274 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&link_callbacks, link, tmp, node) {
4275 		link->cb(iface, lladdr, status);
4276 	}
4277 
4278 	k_mutex_unlock(&lock);
4279 }
4280 
need_calc_checksum(struct net_if * iface,enum ethernet_hw_caps caps)4281 static bool need_calc_checksum(struct net_if *iface, enum ethernet_hw_caps caps)
4282 {
4283 #if defined(CONFIG_NET_L2_ETHERNET)
4284 	if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
4285 		return true;
4286 	}
4287 
4288 	return !(net_eth_get_hw_capabilities(iface) & caps);
4289 #else
4290 	ARG_UNUSED(iface);
4291 	ARG_UNUSED(caps);
4292 
4293 	return true;
4294 #endif
4295 }
4296 
net_if_need_calc_tx_checksum(struct net_if * iface)4297 bool net_if_need_calc_tx_checksum(struct net_if *iface)
4298 {
4299 	return need_calc_checksum(iface, ETHERNET_HW_TX_CHKSUM_OFFLOAD);
4300 }
4301 
net_if_need_calc_rx_checksum(struct net_if * iface)4302 bool net_if_need_calc_rx_checksum(struct net_if *iface)
4303 {
4304 	return need_calc_checksum(iface, ETHERNET_HW_RX_CHKSUM_OFFLOAD);
4305 }
4306 
net_if_get_by_iface(struct net_if * iface)4307 int net_if_get_by_iface(struct net_if *iface)
4308 {
4309 	if (!(iface >= _net_if_list_start && iface < _net_if_list_end)) {
4310 		return -1;
4311 	}
4312 
4313 	return (iface - _net_if_list_start) + 1;
4314 }
4315 
net_if_foreach(net_if_cb_t cb,void * user_data)4316 void net_if_foreach(net_if_cb_t cb, void *user_data)
4317 {
4318 	STRUCT_SECTION_FOREACH(net_if, iface) {
4319 		cb(iface, user_data);
4320 	}
4321 }
4322 
net_if_is_offloaded(struct net_if * iface)4323 bool net_if_is_offloaded(struct net_if *iface)
4324 {
4325 	return (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
4326 		net_if_is_ip_offloaded(iface)) ||
4327 	       (IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD) &&
4328 		net_if_is_socket_offloaded(iface));
4329 }
4330 
notify_iface_up(struct net_if * iface)4331 static void notify_iface_up(struct net_if *iface)
4332 {
4333 	/* In many places it's assumed that link address was set with
4334 	 * net_if_set_link_addr(). Better check that now.
4335 	 */
4336 #if defined(CONFIG_NET_L2_CANBUS_RAW)
4337 	if (IS_ENABLED(CONFIG_NET_SOCKETS_CAN) &&
4338 	    (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW)))	{
4339 		/* CAN does not require link address. */
4340 	} else
4341 #endif	/* CONFIG_NET_L2_CANBUS_RAW */
4342 	{
4343 		if (!net_if_is_offloaded(iface)) {
4344 			NET_ASSERT(net_if_get_link_addr(iface)->addr != NULL);
4345 		}
4346 	}
4347 
4348 	net_if_flag_set(iface, NET_IF_RUNNING);
4349 	net_mgmt_event_notify(NET_EVENT_IF_UP, iface);
4350 	net_virtual_enable(iface);
4351 
4352 	/* If the interface is only having point-to-point traffic then we do
4353 	 * not need to run DAD etc for it.
4354 	 */
4355 	if (!net_if_is_offloaded(iface) &&
4356 	    !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
4357 		iface_ipv6_start(iface);
4358 		net_ipv4_autoconf_start(iface);
4359 	}
4360 }
4361 
notify_iface_down(struct net_if * iface)4362 static void notify_iface_down(struct net_if *iface)
4363 {
4364 	net_if_flag_clear(iface, NET_IF_RUNNING);
4365 	net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
4366 	net_virtual_disable(iface);
4367 
4368 	if (!net_if_is_offloaded(iface) &&
4369 	    !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
4370 		net_ipv4_autoconf_reset(iface);
4371 	}
4372 }
4373 
net_if_oper_state2str(enum net_if_oper_state state)4374 static inline const char *net_if_oper_state2str(enum net_if_oper_state state)
4375 {
4376 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
4377 	switch (state) {
4378 	case NET_IF_OPER_UNKNOWN:
4379 		return "UNKNOWN";
4380 	case NET_IF_OPER_NOTPRESENT:
4381 		return "NOTPRESENT";
4382 	case NET_IF_OPER_DOWN:
4383 		return "DOWN";
4384 	case NET_IF_OPER_LOWERLAYERDOWN:
4385 		return "LOWERLAYERDOWN";
4386 	case NET_IF_OPER_TESTING:
4387 		return "TESTING";
4388 	case NET_IF_OPER_DORMANT:
4389 		return "DORMANT";
4390 	case NET_IF_OPER_UP:
4391 		return "UP";
4392 	default:
4393 		break;
4394 	}
4395 
4396 	return "<invalid>";
4397 #else
4398 	ARG_UNUSED(state);
4399 
4400 	return "";
4401 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
4402 }
4403 
update_operational_state(struct net_if * iface)4404 static void update_operational_state(struct net_if *iface)
4405 {
4406 	enum net_if_oper_state prev_state = iface->if_dev->oper_state;
4407 	enum net_if_oper_state new_state = NET_IF_OPER_UNKNOWN;
4408 
4409 	if (!net_if_is_admin_up(iface)) {
4410 		new_state = NET_IF_OPER_DOWN;
4411 		goto exit;
4412 	}
4413 
4414 	if (!device_is_ready(net_if_get_device(iface))) {
4415 		new_state = NET_IF_OPER_LOWERLAYERDOWN;
4416 		goto exit;
4417 	}
4418 
4419 	if (!net_if_is_carrier_ok(iface)) {
4420 #if defined(CONFIG_NET_L2_VIRTUAL)
4421 		if (net_if_l2(iface) == &NET_L2_GET_NAME(VIRTUAL)) {
4422 			new_state = NET_IF_OPER_LOWERLAYERDOWN;
4423 		} else
4424 #endif /* CONFIG_NET_L2_VIRTUAL */
4425 		{
4426 			new_state = NET_IF_OPER_DOWN;
4427 		}
4428 
4429 		goto exit;
4430 	}
4431 
4432 	if (net_if_is_dormant(iface)) {
4433 		new_state = NET_IF_OPER_DORMANT;
4434 		goto exit;
4435 	}
4436 
4437 	new_state = NET_IF_OPER_UP;
4438 
4439 exit:
4440 	if (net_if_oper_state_set(iface, new_state) != new_state) {
4441 		NET_ERR("Failed to update oper state to %d", new_state);
4442 		return;
4443 	}
4444 
4445 	NET_DBG("iface %p, oper state %s admin %s carrier %s dormant %s",
4446 		iface, net_if_oper_state2str(net_if_oper_state(iface)),
4447 		net_if_is_admin_up(iface) ? "UP" : "DOWN",
4448 		net_if_is_carrier_ok(iface) ? "ON" : "OFF",
4449 		net_if_is_dormant(iface) ? "ON" : "OFF");
4450 
4451 	if (net_if_oper_state(iface) == NET_IF_OPER_UP) {
4452 		if (prev_state != NET_IF_OPER_UP) {
4453 			notify_iface_up(iface);
4454 		}
4455 	} else {
4456 		if (prev_state == NET_IF_OPER_UP) {
4457 			notify_iface_down(iface);
4458 		}
4459 	}
4460 }
4461 
init_igmp(struct net_if * iface)4462 static void init_igmp(struct net_if *iface)
4463 {
4464 #if defined(CONFIG_NET_IPV4_IGMP)
4465 	/* Ensure IPv4 is enabled for this interface. */
4466 	if (net_if_config_ipv4_get(iface, NULL)) {
4467 		return;
4468 	}
4469 
4470 	net_ipv4_igmp_init(iface);
4471 #else
4472 	ARG_UNUSED(iface);
4473 	return;
4474 #endif
4475 }
4476 
net_if_up(struct net_if * iface)4477 int net_if_up(struct net_if *iface)
4478 {
4479 	int status = 0;
4480 
4481 	NET_DBG("iface %p", iface);
4482 
4483 	net_if_lock(iface);
4484 
4485 	if (net_if_flag_is_set(iface, NET_IF_UP)) {
4486 		status = -EALREADY;
4487 		goto out;
4488 	}
4489 
4490 	/* If the L2 does not support enable just set the flag */
4491 	if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
4492 		goto done;
4493 	} else {
4494 		/* If the L2 does not implement enable(), then the network
4495 		 * device driver cannot implement start(), in which case
4496 		 * we can do simple check here and not try to bring interface
4497 		 * up as the device is not ready.
4498 		 *
4499 		 * If the network device driver does implement start(), then
4500 		 * it could bring the interface up when the enable() is called
4501 		 * few lines below.
4502 		 */
4503 		const struct device *dev;
4504 
4505 		dev = net_if_get_device(iface);
4506 		NET_ASSERT(dev);
4507 
4508 		/* If the device is not ready it is pointless trying to take it up. */
4509 		if (!device_is_ready(dev)) {
4510 			NET_DBG("Device %s (%p) is not ready", dev->name, dev);
4511 			status = -ENXIO;
4512 			goto out;
4513 		}
4514 	}
4515 
4516 	/* Notify L2 to enable the interface */
4517 	status = net_if_l2(iface)->enable(iface, true);
4518 	if (status < 0) {
4519 		goto out;
4520 	}
4521 
4522 	init_igmp(iface);
4523 
4524 done:
4525 	net_if_flag_set(iface, NET_IF_UP);
4526 	net_mgmt_event_notify(NET_EVENT_IF_ADMIN_UP, iface);
4527 	update_operational_state(iface);
4528 
4529 out:
4530 	net_if_unlock(iface);
4531 
4532 	return status;
4533 }
4534 
net_if_down(struct net_if * iface)4535 int net_if_down(struct net_if *iface)
4536 {
4537 	int status = 0;
4538 
4539 	NET_DBG("iface %p", iface);
4540 
4541 	net_if_lock(iface);
4542 
4543 	if (!net_if_flag_is_set(iface, NET_IF_UP)) {
4544 		status = -EALREADY;
4545 		goto out;
4546 	}
4547 
4548 	leave_mcast_all(iface);
4549 	leave_ipv4_mcast_all(iface);
4550 
4551 	/* If the L2 does not support enable just clear the flag */
4552 	if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
4553 		goto done;
4554 	}
4555 
4556 	/* Notify L2 to disable the interface */
4557 	status = net_if_l2(iface)->enable(iface, false);
4558 	if (status < 0) {
4559 		goto out;
4560 	}
4561 
4562 done:
4563 	net_if_flag_clear(iface, NET_IF_UP);
4564 	net_mgmt_event_notify(NET_EVENT_IF_ADMIN_DOWN, iface);
4565 	update_operational_state(iface);
4566 
4567 out:
4568 	net_if_unlock(iface);
4569 
4570 	return status;
4571 }
4572 
net_if_carrier_on(struct net_if * iface)4573 void net_if_carrier_on(struct net_if *iface)
4574 {
4575 	NET_ASSERT(iface);
4576 
4577 	net_if_lock(iface);
4578 
4579 	if (!net_if_flag_test_and_set(iface, NET_IF_LOWER_UP)) {
4580 		update_operational_state(iface);
4581 	}
4582 
4583 	net_if_unlock(iface);
4584 }
4585 
net_if_carrier_off(struct net_if * iface)4586 void net_if_carrier_off(struct net_if *iface)
4587 {
4588 	NET_ASSERT(iface);
4589 
4590 	net_if_lock(iface);
4591 
4592 	if (net_if_flag_test_and_clear(iface, NET_IF_LOWER_UP)) {
4593 		update_operational_state(iface);
4594 	}
4595 
4596 	net_if_unlock(iface);
4597 }
4598 
net_if_dormant_on(struct net_if * iface)4599 void net_if_dormant_on(struct net_if *iface)
4600 {
4601 	NET_ASSERT(iface);
4602 
4603 	net_if_lock(iface);
4604 
4605 	if (!net_if_flag_test_and_set(iface, NET_IF_DORMANT)) {
4606 		update_operational_state(iface);
4607 	}
4608 
4609 	net_if_unlock(iface);
4610 }
4611 
net_if_dormant_off(struct net_if * iface)4612 void net_if_dormant_off(struct net_if *iface)
4613 {
4614 	NET_ASSERT(iface);
4615 
4616 	net_if_lock(iface);
4617 
4618 	if (net_if_flag_test_and_clear(iface, NET_IF_DORMANT)) {
4619 		update_operational_state(iface);
4620 	}
4621 
4622 	net_if_unlock(iface);
4623 }
4624 
4625 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
promisc_mode_set(struct net_if * iface,bool enable)4626 static int promisc_mode_set(struct net_if *iface, bool enable)
4627 {
4628 	enum net_l2_flags l2_flags = 0;
4629 
4630 	NET_ASSERT(iface);
4631 
4632 	l2_flags = l2_flags_get(iface);
4633 	if (!(l2_flags & NET_L2_PROMISC_MODE)) {
4634 		return -ENOTSUP;
4635 	}
4636 
4637 #if defined(CONFIG_NET_L2_ETHERNET)
4638 	if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4639 		int ret = net_eth_promisc_mode(iface, enable);
4640 
4641 		if (ret < 0) {
4642 			return ret;
4643 		}
4644 	}
4645 #else
4646 	ARG_UNUSED(enable);
4647 
4648 	return -ENOTSUP;
4649 #endif
4650 
4651 	return 0;
4652 }
4653 
net_if_set_promisc(struct net_if * iface)4654 int net_if_set_promisc(struct net_if *iface)
4655 {
4656 	int ret;
4657 
4658 	net_if_lock(iface);
4659 
4660 	ret = promisc_mode_set(iface, true);
4661 	if (ret < 0) {
4662 		goto out;
4663 	}
4664 
4665 	ret = net_if_flag_test_and_set(iface, NET_IF_PROMISC);
4666 	if (ret) {
4667 		ret = -EALREADY;
4668 		goto out;
4669 	}
4670 
4671 out:
4672 	net_if_unlock(iface);
4673 
4674 	return ret;
4675 }
4676 
net_if_unset_promisc(struct net_if * iface)4677 void net_if_unset_promisc(struct net_if *iface)
4678 {
4679 	int ret;
4680 
4681 	net_if_lock(iface);
4682 
4683 	ret = promisc_mode_set(iface, false);
4684 	if (ret < 0) {
4685 		goto out;
4686 	}
4687 
4688 	net_if_flag_clear(iface, NET_IF_PROMISC);
4689 
4690 out:
4691 	net_if_unlock(iface);
4692 }
4693 
net_if_is_promisc(struct net_if * iface)4694 bool net_if_is_promisc(struct net_if *iface)
4695 {
4696 	NET_ASSERT(iface);
4697 
4698 	return net_if_flag_is_set(iface, NET_IF_PROMISC);
4699 }
4700 #endif /* CONFIG_NET_PROMISCUOUS_MODE */
4701 
4702 #ifdef CONFIG_NET_POWER_MANAGEMENT
4703 
net_if_suspend(struct net_if * iface)4704 int net_if_suspend(struct net_if *iface)
4705 {
4706 	int ret = 0;
4707 
4708 	net_if_lock(iface);
4709 
4710 	if (net_if_are_pending_tx_packets(iface)) {
4711 		ret = -EBUSY;
4712 		goto out;
4713 	}
4714 
4715 	if (net_if_flag_test_and_set(iface, NET_IF_SUSPENDED)) {
4716 		ret = -EALREADY;
4717 		goto out;
4718 	}
4719 
4720 	net_stats_add_suspend_start_time(iface, k_cycle_get_32());
4721 
4722 out:
4723 	net_if_unlock(iface);
4724 
4725 	return ret;
4726 }
4727 
net_if_resume(struct net_if * iface)4728 int net_if_resume(struct net_if *iface)
4729 {
4730 	int ret = 0;
4731 
4732 	net_if_lock(iface);
4733 
4734 	if (!net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
4735 		ret = -EALREADY;
4736 		goto out;
4737 	}
4738 
4739 	net_if_flag_clear(iface, NET_IF_SUSPENDED);
4740 
4741 	net_stats_add_suspend_end_time(iface, k_cycle_get_32());
4742 
4743 out:
4744 	net_if_unlock(iface);
4745 
4746 	return ret;
4747 }
4748 
net_if_is_suspended(struct net_if * iface)4749 bool net_if_is_suspended(struct net_if *iface)
4750 {
4751 	return net_if_flag_is_set(iface, NET_IF_SUSPENDED);
4752 }
4753 
4754 #endif /* CONFIG_NET_POWER_MANAGEMENT */
4755 
4756 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
net_tx_ts_thread(void * p1,void * p2,void * p3)4757 static void net_tx_ts_thread(void *p1, void *p2, void *p3)
4758 {
4759 	ARG_UNUSED(p1);
4760 	ARG_UNUSED(p2);
4761 	ARG_UNUSED(p3);
4762 
4763 	struct net_pkt *pkt;
4764 
4765 	NET_DBG("Starting TX timestamp callback thread");
4766 
4767 	while (1) {
4768 		pkt = k_fifo_get(&tx_ts_queue, K_FOREVER);
4769 		if (pkt) {
4770 			net_if_call_timestamp_cb(pkt);
4771 		}
4772 	}
4773 }
4774 
net_if_register_timestamp_cb(struct net_if_timestamp_cb * handle,struct net_pkt * pkt,struct net_if * iface,net_if_timestamp_callback_t cb)4775 void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
4776 				  struct net_pkt *pkt,
4777 				  struct net_if *iface,
4778 				  net_if_timestamp_callback_t cb)
4779 {
4780 	k_mutex_lock(&lock, K_FOREVER);
4781 
4782 	sys_slist_find_and_remove(&timestamp_callbacks, &handle->node);
4783 	sys_slist_prepend(&timestamp_callbacks, &handle->node);
4784 
4785 	handle->iface = iface;
4786 	handle->cb = cb;
4787 	handle->pkt = pkt;
4788 
4789 	k_mutex_unlock(&lock);
4790 }
4791 
net_if_unregister_timestamp_cb(struct net_if_timestamp_cb * handle)4792 void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle)
4793 {
4794 	k_mutex_lock(&lock, K_FOREVER);
4795 
4796 	sys_slist_find_and_remove(&timestamp_callbacks, &handle->node);
4797 
4798 	k_mutex_unlock(&lock);
4799 }
4800 
net_if_call_timestamp_cb(struct net_pkt * pkt)4801 void net_if_call_timestamp_cb(struct net_pkt *pkt)
4802 {
4803 	sys_snode_t *sn, *sns;
4804 
4805 	k_mutex_lock(&lock, K_FOREVER);
4806 
4807 	SYS_SLIST_FOR_EACH_NODE_SAFE(&timestamp_callbacks, sn, sns) {
4808 		struct net_if_timestamp_cb *handle =
4809 			CONTAINER_OF(sn, struct net_if_timestamp_cb, node);
4810 
4811 		if (((handle->iface == NULL) ||
4812 		     (handle->iface == net_pkt_iface(pkt))) &&
4813 		    (handle->pkt == NULL || handle->pkt == pkt)) {
4814 			handle->cb(pkt);
4815 		}
4816 	}
4817 
4818 	k_mutex_unlock(&lock);
4819 }
4820 
net_if_add_tx_timestamp(struct net_pkt * pkt)4821 void net_if_add_tx_timestamp(struct net_pkt *pkt)
4822 {
4823 	k_fifo_put(&tx_ts_queue, pkt);
4824 }
4825 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
4826 
net_if_is_wifi(struct net_if * iface)4827 bool net_if_is_wifi(struct net_if *iface)
4828 {
4829 	if (net_if_is_offloaded(iface)) {
4830 		return net_off_is_wifi_offloaded(iface);
4831 	}
4832 #if defined(CONFIG_NET_L2_ETHERNET)
4833 	return net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) &&
4834 		net_eth_type_is_wifi(iface);
4835 #endif
4836 	return false;
4837 }
4838 
net_if_get_first_wifi(void)4839 struct net_if *net_if_get_first_wifi(void)
4840 {
4841 	STRUCT_SECTION_FOREACH(net_if, iface) {
4842 		if (net_if_is_wifi(iface)) {
4843 			return iface;
4844 		}
4845 	}
4846 	return NULL;
4847 }
4848 
net_if_get_name(struct net_if * iface,char * buf,int len)4849 int net_if_get_name(struct net_if *iface, char *buf, int len)
4850 {
4851 #if defined(CONFIG_NET_INTERFACE_NAME)
4852 	int name_len;
4853 
4854 	if (iface == NULL || buf == NULL || len <= 0) {
4855 		return -EINVAL;
4856 	}
4857 
4858 	name_len = strlen(net_if_get_config(iface)->name);
4859 	if (name_len >= len) {
4860 		return -ERANGE;
4861 	}
4862 
4863 	/* Copy string and null terminator */
4864 	memcpy(buf, net_if_get_config(iface)->name, name_len + 1);
4865 
4866 	return name_len;
4867 #else
4868 	return -ENOTSUP;
4869 #endif
4870 }
4871 
net_if_set_name(struct net_if * iface,const char * buf)4872 int net_if_set_name(struct net_if *iface, const char *buf)
4873 {
4874 #if defined(CONFIG_NET_INTERFACE_NAME)
4875 	int name_len;
4876 
4877 	if (iface == NULL || buf == NULL) {
4878 		return -EINVAL;
4879 	}
4880 
4881 	name_len = strlen(buf);
4882 	if (name_len >= sizeof(iface->config.name)) {
4883 		return -ENAMETOOLONG;
4884 	}
4885 
4886 	/* Copy string and null terminator */
4887 	memcpy(net_if_get_config(iface)->name, buf, name_len + 1);
4888 
4889 	return 0;
4890 #else
4891 	return -ENOTSUP;
4892 #endif
4893 }
4894 
net_if_get_by_name(const char * name)4895 int net_if_get_by_name(const char *name)
4896 {
4897 #if defined(CONFIG_NET_INTERFACE_NAME)
4898 	if (name == NULL) {
4899 		return -EINVAL;
4900 	}
4901 
4902 	STRUCT_SECTION_FOREACH(net_if, iface) {
4903 		if (strncmp(net_if_get_config(iface)->name, name, strlen(name)) == 0) {
4904 			return net_if_get_by_iface(iface);
4905 		}
4906 	}
4907 
4908 	return -ENOENT;
4909 #else
4910 	return -ENOTSUP;
4911 #endif
4912 }
4913 
4914 #if defined(CONFIG_NET_INTERFACE_NAME)
set_default_name(struct net_if * iface)4915 static void set_default_name(struct net_if *iface)
4916 {
4917 	char name[CONFIG_NET_INTERFACE_NAME_LEN + 1] = { 0 };
4918 	int ret;
4919 
4920 	if (net_if_is_wifi(iface)) {
4921 		static int count;
4922 
4923 		snprintk(name, sizeof(name) - 1, "wlan%d", count++);
4924 
4925 	} else if (IS_ENABLED(CONFIG_NET_L2_ETHERNET)) {
4926 #if defined(CONFIG_NET_L2_ETHERNET)
4927 		if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4928 			static int count;
4929 
4930 			snprintk(name, sizeof(name) - 1, "eth%d", count++);
4931 		}
4932 #endif /* CONFIG_NET_L2_ETHERNET */
4933 	}
4934 
4935 	if (IS_ENABLED(CONFIG_NET_L2_IEEE802154)) {
4936 #if defined(CONFIG_NET_L2_IEEE802154)
4937 		if (net_if_l2(iface) == &NET_L2_GET_NAME(IEEE802154)) {
4938 			static int count;
4939 
4940 			snprintk(name, sizeof(name) - 1, "ieee%d", count++);
4941 		}
4942 #endif /* CONFIG_NET_L2_IEEE802154 */
4943 	}
4944 
4945 	if (IS_ENABLED(CONFIG_NET_L2_DUMMY)) {
4946 #if defined(CONFIG_NET_L2_DUMMY)
4947 		if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
4948 			static int count;
4949 
4950 			snprintk(name, sizeof(name) - 1, "dummy%d", count++);
4951 		}
4952 #endif /* CONFIG_NET_L2_DUMMY */
4953 	}
4954 
4955 	if (IS_ENABLED(CONFIG_NET_L2_CANBUS_RAW)) {
4956 #if defined(CONFIG_NET_L2_CANBUS_RAW)
4957 		if (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW)) {
4958 			static int count;
4959 
4960 			snprintk(name, sizeof(name) - 1, "can%d", count++);
4961 		}
4962 #endif /* CONFIG_NET_L2_CANBUS_RAW */
4963 	}
4964 
4965 	if (IS_ENABLED(CONFIG_NET_L2_PPP)) {
4966 #if defined(CONFIG_NET_L2_PPP)
4967 		if (net_if_l2(iface) == &NET_L2_GET_NAME(PPP)) {
4968 			static int count;
4969 
4970 			snprintk(name, sizeof(name) - 1, "ppp%d", count++);
4971 		}
4972 #endif /* CONFIG_NET_L2_PPP */
4973 	}
4974 
4975 	if (name[0] == '\0') {
4976 		static int count;
4977 
4978 		snprintk(name, sizeof(name) - 1, "net%d", count++);
4979 	}
4980 
4981 	ret = net_if_set_name(iface, name);
4982 	if (ret < 0) {
4983 		NET_WARN("Cannot set default name for interface %d (%p) (%d)",
4984 			 net_if_get_by_iface(iface), iface, ret);
4985 	}
4986 }
4987 #endif /* CONFIG_NET_INTERFACE_NAME */
4988 
net_if_init(void)4989 void net_if_init(void)
4990 {
4991 	int if_count = 0;
4992 
4993 	NET_DBG("");
4994 
4995 	k_mutex_lock(&lock, K_FOREVER);
4996 
4997 	net_tc_tx_init();
4998 
4999 	STRUCT_SECTION_FOREACH(net_if, iface) {
5000 
5001 #if defined(CONFIG_NET_INTERFACE_NAME)
5002 		memset(net_if_get_config(iface)->name, 0,
5003 		       sizeof(iface->config.name));
5004 
5005 		set_default_name(iface);
5006 #endif
5007 
5008 		init_iface(iface);
5009 		if_count++;
5010 	}
5011 
5012 	if (if_count == 0) {
5013 		NET_ERR("There is no network interface to work with!");
5014 		goto out;
5015 	}
5016 
5017 #if defined(CONFIG_ASSERT)
5018 	/* Do extra check that verifies that interface count is properly
5019 	 * done.
5020 	 */
5021 	int count_if;
5022 
5023 	NET_IFACE_COUNT(&count_if);
5024 	NET_ASSERT(count_if == if_count);
5025 #endif
5026 
5027 	iface_ipv6_init(if_count);
5028 	iface_ipv4_init(if_count);
5029 	iface_router_init();
5030 
5031 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
5032 	k_thread_create(&tx_thread_ts, tx_ts_stack,
5033 			K_KERNEL_STACK_SIZEOF(tx_ts_stack),
5034 			net_tx_ts_thread,
5035 			NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT);
5036 	k_thread_name_set(&tx_thread_ts, "tx_tstamp");
5037 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
5038 
5039 #if defined(CONFIG_NET_VLAN)
5040 	/* Make sure that we do not have too many network interfaces
5041 	 * compared to the number of VLAN interfaces.
5042 	 */
5043 	if_count = 0;
5044 
5045 	STRUCT_SECTION_FOREACH(net_if, iface) {
5046 		if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
5047 			if_count++;
5048 		}
5049 	}
5050 
5051 	if (if_count > CONFIG_NET_VLAN_COUNT) {
5052 		NET_WARN("You have configured only %d VLAN interfaces"
5053 			 " but you have %d network interfaces.",
5054 			 CONFIG_NET_VLAN_COUNT, if_count);
5055 	}
5056 #endif
5057 
5058 out:
5059 	k_mutex_unlock(&lock);
5060 }
5061 
net_if_post_init(void)5062 void net_if_post_init(void)
5063 {
5064 	NET_DBG("");
5065 
5066 	/* After TX is running, attempt to bring the interface up */
5067 	STRUCT_SECTION_FOREACH(net_if, iface) {
5068 		if (!net_if_flag_is_set(iface, NET_IF_NO_AUTO_START)) {
5069 			net_if_up(iface);
5070 		}
5071 	}
5072 }
5073