1 /*
2  * Copyright (c) 2016 Intel Corporation.
3  * Copyright (c) 2023 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
10 
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/linker/sections.h>
14 #include <zephyr/random/random.h>
15 #include <zephyr/syscall_handler.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <zephyr/net/igmp.h>
19 #include <zephyr/net/net_core.h>
20 #include <zephyr/net/net_event.h>
21 #include <zephyr/net/net_pkt.h>
22 #include <zephyr/net/net_if.h>
23 #include <zephyr/net/net_mgmt.h>
24 #include <zephyr/net/ethernet.h>
25 #include <zephyr/net/offloaded_netdev.h>
26 #include <zephyr/net/virtual.h>
27 #include <zephyr/sys/iterable_sections.h>
28 
29 #include "net_private.h"
30 #include "ipv4.h"
31 #include "ipv6.h"
32 #include "ipv4_autoconf_internal.h"
33 
34 #include "net_stats.h"
35 
36 #define REACHABLE_TIME (MSEC_PER_SEC * 30) /* in ms */
37 /*
38  * split the min/max random reachable factors into numerator/denominator
39  * so that integer-based math works better
40  */
41 #define MIN_RANDOM_NUMER (1)
42 #define MIN_RANDOM_DENOM (2)
43 #define MAX_RANDOM_NUMER (3)
44 #define MAX_RANDOM_DENOM (2)
45 
46 static K_MUTEX_DEFINE(lock);
47 
48 /* net_if dedicated section limiters */
49 extern struct net_if _net_if_list_start[];
50 extern struct net_if _net_if_list_end[];
51 
52 static struct net_if *default_iface;
53 
54 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
55 static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
56 static struct k_work_delayable router_timer;
57 static sys_slist_t active_router_timers;
58 #endif
59 
60 #if defined(CONFIG_NET_NATIVE_IPV6)
61 /* Timer that triggers network address renewal */
62 static struct k_work_delayable address_lifetime_timer;
63 
64 /* Track currently active address lifetime timers */
65 static sys_slist_t active_address_lifetime_timers;
66 
67 /* Timer that triggers IPv6 prefix lifetime */
68 static struct k_work_delayable prefix_lifetime_timer;
69 
70 /* Track currently active IPv6 prefix lifetime timers */
71 static sys_slist_t active_prefix_lifetime_timers;
72 
73 #if defined(CONFIG_NET_IPV6_DAD)
74 /** Duplicate address detection (DAD) timer */
75 static struct k_work_delayable dad_timer;
76 static sys_slist_t active_dad_timers;
77 #endif
78 
79 #if defined(CONFIG_NET_IPV6_ND)
80 static struct k_work_delayable rs_timer;
81 static sys_slist_t active_rs_timers;
82 #endif
83 
84 static struct {
85 	struct net_if_ipv6 ipv6;
86 	struct net_if *iface;
87 } ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
88 #endif /* CONFIG_NET_IPV6 */
89 
90 #if defined(CONFIG_NET_NATIVE_IPV4)
91 static struct {
92 	struct net_if_ipv4 ipv4;
93 	struct net_if *iface;
94 } ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
95 #endif /* CONFIG_NET_IPV4 */
96 
97 /* We keep track of the link callbacks in this list.
98  */
99 static sys_slist_t link_callbacks;
100 
101 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
102 /* Multicast join/leave tracking.
103  */
104 static sys_slist_t mcast_monitor_callbacks;
105 #endif
106 
107 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
108 #if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
109 #define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
110 #endif
111 
112 K_KERNEL_STACK_DEFINE(tx_ts_stack, CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
113 K_FIFO_DEFINE(tx_ts_queue);
114 
115 static struct k_thread tx_thread_ts;
116 
117 /* We keep track of the timestamp callbacks in this list.
118  */
119 static sys_slist_t timestamp_callbacks;
120 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
121 
122 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
123 #define debug_check_packet(pkt)						\
124 	do {								\
125 		NET_DBG("Processing (pkt %p, prio %d) network packet "	\
126 			"iface %p/%d",					\
127 			pkt, net_pkt_priority(pkt),			\
128 			net_pkt_iface(pkt),				\
129 			net_if_get_by_iface(net_pkt_iface(pkt)));	\
130 									\
131 		NET_ASSERT(pkt->frags);					\
132 	} while (0)
133 #else
134 #define debug_check_packet(...)
135 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
136 
z_impl_net_if_get_by_index(int index)137 struct net_if *z_impl_net_if_get_by_index(int index)
138 {
139 	if (index <= 0) {
140 		return NULL;
141 	}
142 
143 	if (&_net_if_list_start[index - 1] >= _net_if_list_end) {
144 		NET_DBG("Index %d is too large", index);
145 		return NULL;
146 	}
147 
148 	return &_net_if_list_start[index - 1];
149 }
150 
151 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_get_by_index(int index)152 struct net_if *z_vrfy_net_if_get_by_index(int index)
153 {
154 	struct net_if *iface;
155 
156 	iface = net_if_get_by_index(index);
157 	if (!iface) {
158 		return NULL;
159 	}
160 
161 	if (!k_object_is_valid(iface, K_OBJ_NET_IF)) {
162 		return NULL;
163 	}
164 
165 	return iface;
166 }
167 
168 #include <syscalls/net_if_get_by_index_mrsh.c>
169 #endif
170 
net_context_send_cb(struct net_context * context,int status)171 static inline void net_context_send_cb(struct net_context *context,
172 				       int status)
173 {
174 	if (!context) {
175 		return;
176 	}
177 
178 	if (context->send_cb) {
179 		context->send_cb(context, status, context->user_data);
180 	}
181 
182 	if (IS_ENABLED(CONFIG_NET_UDP) &&
183 	    net_context_get_proto(context) == IPPROTO_UDP) {
184 		net_stats_update_udp_sent(net_context_get_iface(context));
185 	} else if (IS_ENABLED(CONFIG_NET_TCP) &&
186 		   net_context_get_proto(context) == IPPROTO_TCP) {
187 		net_stats_update_tcp_seg_sent(net_context_get_iface(context));
188 	}
189 }
190 
update_txtime_stats_detail(struct net_pkt * pkt,uint32_t start_time,uint32_t stop_time)191 static void update_txtime_stats_detail(struct net_pkt *pkt,
192 				       uint32_t start_time, uint32_t stop_time)
193 {
194 	uint32_t val, prev = start_time;
195 	int i;
196 
197 	for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
198 		if (!net_pkt_stats_tick(pkt)[i]) {
199 			break;
200 		}
201 
202 		val = net_pkt_stats_tick(pkt)[i] - prev;
203 		prev = net_pkt_stats_tick(pkt)[i];
204 		net_pkt_stats_tick(pkt)[i] = val;
205 	}
206 }
207 
net_if_tx(struct net_if * iface,struct net_pkt * pkt)208 static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
209 {
210 	struct net_linkaddr ll_dst = {
211 		.addr = NULL
212 	};
213 	struct net_linkaddr_storage ll_dst_storage;
214 	struct net_context *context;
215 	uint32_t create_time;
216 	int status;
217 
218 	/* We collect send statistics for each socket priority if enabled */
219 	uint8_t pkt_priority;
220 
221 	if (!pkt) {
222 		return false;
223 	}
224 
225 	create_time = net_pkt_create_time(pkt);
226 
227 	debug_check_packet(pkt);
228 
229 	/* If there're any link callbacks, with such a callback receiving
230 	 * a destination address, copy that address out of packet, just in
231 	 * case packet is freed before callback is called.
232 	 */
233 	if (!sys_slist_is_empty(&link_callbacks)) {
234 		if (net_linkaddr_set(&ll_dst_storage,
235 				     net_pkt_lladdr_dst(pkt)->addr,
236 				     net_pkt_lladdr_dst(pkt)->len) == 0) {
237 			ll_dst.addr = ll_dst_storage.addr;
238 			ll_dst.len = ll_dst_storage.len;
239 			ll_dst.type = net_pkt_lladdr_dst(pkt)->type;
240 		}
241 	}
242 
243 	context = net_pkt_context(pkt);
244 
245 	if (net_if_flag_is_set(iface, NET_IF_LOWER_UP)) {
246 		if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
247 			pkt_priority = net_pkt_priority(pkt);
248 
249 			if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
250 				/* Make sure the statistics information is not
251 				 * lost by keeping the net_pkt over L2 send.
252 				 */
253 				net_pkt_ref(pkt);
254 			}
255 		}
256 
257 		status = net_if_l2(iface)->send(iface, pkt);
258 
259 		if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
260 			uint32_t end_tick = k_cycle_get_32();
261 
262 			net_pkt_set_tx_stats_tick(pkt, end_tick);
263 
264 			net_stats_update_tc_tx_time(iface,
265 						    pkt_priority,
266 						    create_time,
267 						    end_tick);
268 
269 			if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
270 				update_txtime_stats_detail(
271 					pkt,
272 					create_time,
273 					end_tick);
274 
275 				net_stats_update_tc_tx_time_detail(
276 					iface, pkt_priority,
277 					net_pkt_stats_tick(pkt));
278 
279 				/* For TCP connections, we might keep the pkt
280 				 * longer so that we can resend it if needed.
281 				 * Because of that we need to clear the
282 				 * statistics here.
283 				 */
284 				net_pkt_stats_tick_reset(pkt);
285 
286 				net_pkt_unref(pkt);
287 			}
288 		}
289 
290 	} else {
291 		/* Drop packet if interface is not up */
292 		NET_WARN("iface %p is down", iface);
293 		status = -ENETDOWN;
294 	}
295 
296 	if (status < 0) {
297 		net_pkt_unref(pkt);
298 	} else {
299 		net_stats_update_bytes_sent(iface, status);
300 	}
301 
302 	if (context) {
303 		NET_DBG("Calling context send cb %p status %d",
304 			context, status);
305 
306 		net_context_send_cb(context, status);
307 	}
308 
309 	if (ll_dst.addr) {
310 		net_if_call_link_cb(iface, &ll_dst, status);
311 	}
312 
313 	return true;
314 }
315 
net_process_tx_packet(struct net_pkt * pkt)316 void net_process_tx_packet(struct net_pkt *pkt)
317 {
318 	struct net_if *iface;
319 
320 	net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
321 
322 	iface = net_pkt_iface(pkt);
323 
324 	net_if_tx(iface, pkt);
325 
326 #if defined(CONFIG_NET_POWER_MANAGEMENT)
327 	iface->tx_pending--;
328 #endif
329 }
330 
net_if_queue_tx(struct net_if * iface,struct net_pkt * pkt)331 void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
332 {
333 	if (!net_pkt_filter_send_ok(pkt)) {
334 		/* silently drop the packet */
335 		net_pkt_unref(pkt);
336 		return;
337 	}
338 
339 	uint8_t prio = net_pkt_priority(pkt);
340 	uint8_t tc = net_tx_priority2tc(prio);
341 
342 	net_stats_update_tc_sent_pkt(iface, tc);
343 	net_stats_update_tc_sent_bytes(iface, tc, net_pkt_get_len(pkt));
344 	net_stats_update_tc_sent_priority(iface, tc, prio);
345 
346 	/* For highest priority packet, skip the TX queue and push directly to
347 	 * the driver. Also if there are no TX queue/thread, push the packet
348 	 * directly to the driver.
349 	 */
350 	if ((IS_ENABLED(CONFIG_NET_TC_SKIP_FOR_HIGH_PRIO) &&
351 	     prio == NET_PRIORITY_CA) || NET_TC_TX_COUNT == 0) {
352 		net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
353 
354 		net_if_tx(net_pkt_iface(pkt), pkt);
355 		return;
356 	}
357 
358 #if NET_TC_TX_COUNT > 1
359 	NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
360 #endif
361 
362 #if defined(CONFIG_NET_POWER_MANAGEMENT)
363 	iface->tx_pending++;
364 #endif
365 
366 	if (!net_tc_submit_to_tx_queue(tc, pkt)) {
367 #if defined(CONFIG_NET_POWER_MANAGEMENT)
368 		iface->tx_pending--
369 #endif
370 			;
371 	}
372 }
373 
net_if_stats_reset(struct net_if * iface)374 void net_if_stats_reset(struct net_if *iface)
375 {
376 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
377 	STRUCT_SECTION_FOREACH(net_if, tmp) {
378 		if (iface == tmp) {
379 			net_if_lock(iface);
380 			memset(&iface->stats, 0, sizeof(iface->stats));
381 			net_if_unlock(iface);
382 			return;
383 		}
384 	}
385 #else
386 	ARG_UNUSED(iface);
387 #endif
388 }
389 
net_if_stats_reset_all(void)390 void net_if_stats_reset_all(void)
391 {
392 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
393 	STRUCT_SECTION_FOREACH(net_if, iface) {
394 		net_if_lock(iface);
395 		memset(&iface->stats, 0, sizeof(iface->stats));
396 		net_if_unlock(iface);
397 	}
398 #endif
399 }
400 
init_iface(struct net_if * iface)401 static inline void init_iface(struct net_if *iface)
402 {
403 	const struct net_if_api *api = net_if_get_device(iface)->api;
404 
405 	if (!api || !api->init) {
406 		NET_ERR("Iface %p driver API init NULL", iface);
407 		return;
408 	}
409 
410 	/* By default IPv4 and IPv6 are enabled for a given network interface.
411 	 * These can be turned off later if needed.
412 	 */
413 #if defined(CONFIG_NET_NATIVE_IPV4)
414 	net_if_flag_set(iface, NET_IF_IPV4);
415 #endif
416 #if defined(CONFIG_NET_NATIVE_IPV6)
417 	net_if_flag_set(iface, NET_IF_IPV6);
418 #endif
419 
420 	net_virtual_init(iface);
421 
422 	NET_DBG("On iface %p", iface);
423 
424 #ifdef CONFIG_USERSPACE
425 	z_object_init(iface);
426 #endif
427 
428 	k_mutex_init(&iface->lock);
429 
430 	api->init(iface);
431 }
432 
net_if_send_data(struct net_if * iface,struct net_pkt * pkt)433 enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt)
434 {
435 	struct net_context *context = net_pkt_context(pkt);
436 	struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
437 	enum net_verdict verdict = NET_OK;
438 	int status = -EIO;
439 
440 	net_if_lock(iface);
441 
442 	if (!net_if_flag_is_set(iface, NET_IF_LOWER_UP) ||
443 	    net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
444 		/* Drop packet if interface is not up */
445 		NET_WARN("iface %p is down", iface);
446 		verdict = NET_DROP;
447 		status = -ENETDOWN;
448 		goto done;
449 	}
450 
451 	if (IS_ENABLED(CONFIG_NET_OFFLOAD) && !net_if_l2(iface)) {
452 		NET_WARN("no l2 for iface %p, discard pkt", iface);
453 		verdict = NET_DROP;
454 		goto done;
455 	}
456 
457 	/* If the ll address is not set at all, then we must set
458 	 * it here.
459 	 * Workaround Linux bug, see:
460 	 * https://github.com/zephyrproject-rtos/zephyr/issues/3111
461 	 */
462 	if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
463 	    !net_pkt_lladdr_src(pkt)->addr) {
464 		net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_if(pkt)->addr;
465 		net_pkt_lladdr_src(pkt)->len = net_pkt_lladdr_if(pkt)->len;
466 	}
467 
468 #if defined(CONFIG_NET_LOOPBACK)
469 	/* If the packet is destined back to us, then there is no need to do
470 	 * additional checks, so let the packet through.
471 	 */
472 	if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
473 		goto done;
474 	}
475 #endif
476 
477 	/* Bypass the IP stack with SOCK_RAW/IPPROTO_RAW sockets */
478 	if (IS_ENABLED(CONFIG_NET_SOCKETS_PACKET) &&
479 	    context && net_context_get_type(context) == SOCK_RAW &&
480 	    net_context_get_proto(context) == IPPROTO_RAW) {
481 		goto done;
482 	}
483 
484 	/* If the ll dst address is not set check if it is present in the nbr
485 	 * cache.
486 	 */
487 	if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
488 		verdict = net_ipv6_prepare_for_send(pkt);
489 	}
490 
491 #if defined(CONFIG_NET_IPV4_FRAGMENT)
492 	if (net_pkt_family(pkt) == AF_INET) {
493 		verdict = net_ipv4_prepare_for_send(pkt);
494 	}
495 #endif
496 
497 done:
498 	/*   NET_OK in which case packet has checked successfully. In this case
499 	 *   the net_context callback is called after successful delivery in
500 	 *   net_if_tx_thread().
501 	 *
502 	 *   NET_DROP in which case we call net_context callback that will
503 	 *   give the status to user application.
504 	 *
505 	 *   NET_CONTINUE in which case the sending of the packet is delayed.
506 	 *   This can happen for example if we need to do IPv6 ND to figure
507 	 *   out link layer address.
508 	 */
509 	if (verdict == NET_DROP) {
510 		if (context) {
511 			NET_DBG("Calling ctx send cb %p verdict %d",
512 				context, verdict);
513 			net_context_send_cb(context, status);
514 		}
515 
516 		if (dst->addr) {
517 			net_if_call_link_cb(iface, dst, status);
518 		}
519 	} else if (verdict == NET_OK) {
520 		/* Packet is ready to be sent by L2, let's queue */
521 		net_if_queue_tx(iface, pkt);
522 	}
523 
524 	net_if_unlock(iface);
525 
526 	return verdict;
527 }
528 
net_if_set_link_addr_locked(struct net_if * iface,uint8_t * addr,uint8_t len,enum net_link_type type)529 int net_if_set_link_addr_locked(struct net_if *iface,
530 				uint8_t *addr, uint8_t len,
531 				enum net_link_type type)
532 {
533 	int ret;
534 
535 	net_if_lock(iface);
536 
537 	ret = net_if_set_link_addr_unlocked(iface, addr, len, type);
538 
539 	net_if_unlock(iface);
540 
541 	return ret;
542 }
543 
net_if_get_by_link_addr(struct net_linkaddr * ll_addr)544 struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
545 {
546 	STRUCT_SECTION_FOREACH(net_if, iface) {
547 		net_if_lock(iface);
548 		if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
549 			    ll_addr->len)) {
550 			net_if_unlock(iface);
551 			return iface;
552 		}
553 		net_if_unlock(iface);
554 	}
555 
556 	return NULL;
557 }
558 
net_if_lookup_by_dev(const struct device * dev)559 struct net_if *net_if_lookup_by_dev(const struct device *dev)
560 {
561 	STRUCT_SECTION_FOREACH(net_if, iface) {
562 		if (net_if_get_device(iface) == dev) {
563 			return iface;
564 		}
565 	}
566 
567 	return NULL;
568 }
569 
net_if_set_default(struct net_if * iface)570 void net_if_set_default(struct net_if *iface)
571 {
572 	default_iface = iface;
573 }
574 
net_if_get_default(void)575 struct net_if *net_if_get_default(void)
576 {
577 	struct net_if *iface = NULL;
578 
579 	if (&_net_if_list_start[0] == &_net_if_list_end[0]) {
580 		return NULL;
581 	}
582 
583 	if (default_iface != NULL) {
584 		return default_iface;
585 	}
586 
587 #if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
588 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
589 #endif
590 #if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
591 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
592 #endif
593 #if defined(CONFIG_NET_DEFAULT_IF_BLUETOOTH)
594 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(BLUETOOTH));
595 #endif
596 #if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
597 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
598 #endif
599 #if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
600 	iface = net_if_get_first_by_type(NULL);
601 #endif
602 #if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
603 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
604 #endif
605 #if defined(CONFIG_NET_DEFAULT_IF_PPP)
606 	iface = net_if_get_first_by_type(&NET_L2_GET_NAME(PPP));
607 #endif
608 #if defined(CONFIG_NET_DEFAULT_IF_UP)
609 	iface = net_if_get_first_up();
610 #endif
611 #if defined(CONFIG_NET_DEFAULT_IF_WIFI)
612 	iface = net_if_get_first_wifi();
613 #endif
614 	return iface ? iface : _net_if_list_start;
615 }
616 
net_if_get_first_by_type(const struct net_l2 * l2)617 struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
618 {
619 	STRUCT_SECTION_FOREACH(net_if, iface) {
620 		if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
621 		    !l2 && net_if_offload(iface)) {
622 			return iface;
623 		}
624 
625 		if (net_if_l2(iface) == l2) {
626 			return iface;
627 		}
628 	}
629 
630 	return NULL;
631 }
632 
net_if_get_first_up(void)633 struct net_if *net_if_get_first_up(void)
634 {
635 	STRUCT_SECTION_FOREACH(net_if, iface) {
636 		if (net_if_flag_is_set(iface, NET_IF_UP)) {
637 			return iface;
638 		}
639 	}
640 
641 	return NULL;
642 }
643 
l2_flags_get(struct net_if * iface)644 static enum net_l2_flags l2_flags_get(struct net_if *iface)
645 {
646 	enum net_l2_flags flags = 0;
647 
648 	if (net_if_l2(iface) && net_if_l2(iface)->get_flags) {
649 		flags = net_if_l2(iface)->get_flags(iface);
650 	}
651 
652 	return flags;
653 }
654 
655 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
656 /* Return how many bits are shared between two IP addresses */
get_ipaddr_diff(const uint8_t * src,const uint8_t * dst,int addr_len)657 static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_len)
658 {
659 	uint8_t j, k, xor;
660 	uint8_t len = 0U;
661 
662 	for (j = 0U; j < addr_len; j++) {
663 		if (src[j] == dst[j]) {
664 			len += 8U;
665 		} else {
666 			xor = src[j] ^ dst[j];
667 			for (k = 0U; k < 8; k++) {
668 				if (!(xor & 0x80)) {
669 					len++;
670 					xor <<= 1;
671 				} else {
672 					break;
673 				}
674 			}
675 			break;
676 		}
677 	}
678 
679 	return len;
680 }
681 
iface_router_lookup(struct net_if * iface,uint8_t family,void * addr)682 static struct net_if_router *iface_router_lookup(struct net_if *iface,
683 						 uint8_t family, void *addr)
684 {
685 	struct net_if_router *router = NULL;
686 	int i;
687 
688 	k_mutex_lock(&lock, K_FOREVER);
689 
690 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
691 		if (!routers[i].is_used ||
692 		    routers[i].address.family != family ||
693 		    routers[i].iface != iface) {
694 			continue;
695 		}
696 
697 		if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
698 		     net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
699 				       (struct in6_addr *)addr)) ||
700 		    (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
701 		     net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
702 				       (struct in_addr *)addr))) {
703 			router = &routers[i];
704 			goto out;
705 		}
706 	}
707 
708 out:
709 	k_mutex_unlock(&lock);
710 
711 	return router;
712 }
713 
iface_router_notify_deletion(struct net_if_router * router,const char * delete_reason)714 static void iface_router_notify_deletion(struct net_if_router *router,
715 					 const char *delete_reason)
716 {
717 	if (IS_ENABLED(CONFIG_NET_IPV6) &&
718 	    router->address.family == AF_INET6) {
719 		NET_DBG("IPv6 router %s %s",
720 			net_sprint_ipv6_addr(net_if_router_ipv6(router)),
721 			delete_reason);
722 
723 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
724 						router->iface,
725 						&router->address.in6_addr,
726 						sizeof(struct in6_addr));
727 	} else if (IS_ENABLED(CONFIG_NET_IPV4) &&
728 		   router->address.family == AF_INET) {
729 		NET_DBG("IPv4 router %s %s",
730 			net_sprint_ipv4_addr(net_if_router_ipv4(router)),
731 			delete_reason);
732 
733 		net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
734 						router->iface,
735 						&router->address.in_addr,
736 						sizeof(struct in6_addr));
737 	}
738 }
739 
iface_router_ends(const struct net_if_router * router,uint32_t now)740 static inline int32_t iface_router_ends(const struct net_if_router *router,
741 					uint32_t now)
742 {
743 	uint32_t ends = router->life_start;
744 
745 	ends += MSEC_PER_SEC * router->lifetime;
746 
747 	/* Signed number of ms until router lifetime ends */
748 	return (int32_t)(ends - now);
749 }
750 
iface_router_update_timer(uint32_t now)751 static void iface_router_update_timer(uint32_t now)
752 {
753 	struct net_if_router *router, *next;
754 	uint32_t new_delay = UINT32_MAX;
755 
756 	k_mutex_lock(&lock, K_FOREVER);
757 
758 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
759 					 router, next, node) {
760 		int32_t ends = iface_router_ends(router, now);
761 
762 		if (ends <= 0) {
763 			new_delay = 0;
764 			break;
765 		}
766 
767 		new_delay = MIN((uint32_t)ends, new_delay);
768 	}
769 
770 	if (new_delay == UINT32_MAX) {
771 		k_work_cancel_delayable(&router_timer);
772 	} else {
773 		k_work_reschedule(&router_timer, K_MSEC(new_delay));
774 	}
775 
776 	k_mutex_unlock(&lock);
777 }
778 
iface_router_expired(struct k_work * work)779 static void iface_router_expired(struct k_work *work)
780 {
781 	uint32_t current_time = k_uptime_get_32();
782 	struct net_if_router *router, *next;
783 	sys_snode_t *prev_node = NULL;
784 
785 	ARG_UNUSED(work);
786 
787 	k_mutex_lock(&lock, K_FOREVER);
788 
789 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
790 					  router, next, node) {
791 		int32_t ends = iface_router_ends(router, current_time);
792 
793 		if (ends > 0) {
794 			/* We have to loop on all active routers as their
795 			 * lifetime differ from each other.
796 			 */
797 			prev_node = &router->node;
798 			continue;
799 		}
800 
801 		iface_router_notify_deletion(router, "has expired");
802 		sys_slist_remove(&active_router_timers,
803 				 prev_node, &router->node);
804 		router->is_used = false;
805 	}
806 
807 	iface_router_update_timer(current_time);
808 
809 	k_mutex_unlock(&lock);
810 }
811 
iface_router_add(struct net_if * iface,uint8_t family,void * addr,bool is_default,uint16_t lifetime)812 static struct net_if_router *iface_router_add(struct net_if *iface,
813 					      uint8_t family, void *addr,
814 					      bool is_default,
815 					      uint16_t lifetime)
816 {
817 	struct net_if_router *router = NULL;
818 	int i;
819 
820 	k_mutex_lock(&lock, K_FOREVER);
821 
822 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
823 		if (routers[i].is_used) {
824 			continue;
825 		}
826 
827 		routers[i].is_used = true;
828 		routers[i].iface = iface;
829 		routers[i].address.family = family;
830 
831 		if (lifetime) {
832 			routers[i].is_default = true;
833 			routers[i].is_infinite = false;
834 			routers[i].lifetime = lifetime;
835 			routers[i].life_start = k_uptime_get_32();
836 
837 			sys_slist_append(&active_router_timers,
838 					 &routers[i].node);
839 
840 			iface_router_update_timer(routers[i].life_start);
841 		} else {
842 			routers[i].is_default = false;
843 			routers[i].is_infinite = true;
844 			routers[i].lifetime = 0;
845 		}
846 
847 		if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
848 			memcpy(net_if_router_ipv6(&routers[i]), addr,
849 			       sizeof(struct in6_addr));
850 			net_mgmt_event_notify_with_info(
851 					NET_EVENT_IPV6_ROUTER_ADD, iface,
852 					&routers[i].address.in6_addr,
853 					sizeof(struct in6_addr));
854 
855 			NET_DBG("interface %p router %s lifetime %u default %d "
856 				"added", iface,
857 				net_sprint_ipv6_addr((struct in6_addr *)addr),
858 				lifetime, routers[i].is_default);
859 		} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
860 			memcpy(net_if_router_ipv4(&routers[i]), addr,
861 			       sizeof(struct in_addr));
862 			routers[i].is_default = is_default;
863 
864 			net_mgmt_event_notify_with_info(
865 					NET_EVENT_IPV4_ROUTER_ADD, iface,
866 					&routers[i].address.in_addr,
867 					sizeof(struct in_addr));
868 
869 			NET_DBG("interface %p router %s lifetime %u default %d "
870 				"added", iface,
871 				net_sprint_ipv4_addr((struct in_addr *)addr),
872 				lifetime, is_default);
873 		}
874 
875 		router = &routers[i];
876 		goto out;
877 	}
878 
879 out:
880 	k_mutex_unlock(&lock);
881 
882 	return router;
883 }
884 
iface_router_rm(struct net_if_router * router)885 static bool iface_router_rm(struct net_if_router *router)
886 {
887 	bool ret = false;
888 
889 	k_mutex_lock(&lock, K_FOREVER);
890 
891 	if (!router->is_used) {
892 		goto out;
893 	}
894 
895 	iface_router_notify_deletion(router, "has been removed");
896 
897 	/* We recompute the timer if only the router was time limited */
898 	if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
899 		iface_router_update_timer(k_uptime_get_32());
900 	}
901 
902 	router->is_used = false;
903 	ret = true;
904 
905 out:
906 	k_mutex_unlock(&lock);
907 
908 	return ret;
909 }
910 
net_if_router_rm(struct net_if_router * router)911 void net_if_router_rm(struct net_if_router *router)
912 {
913 	k_mutex_lock(&lock, K_FOREVER);
914 
915 	router->is_used = false;
916 
917 	/* FIXME - remove timer */
918 
919 	k_mutex_unlock(&lock);
920 }
921 
iface_router_find_default(struct net_if * iface,uint8_t family,void * addr)922 static struct net_if_router *iface_router_find_default(struct net_if *iface,
923 						       uint8_t family, void *addr)
924 {
925 	struct net_if_router *router = NULL;
926 	int i;
927 
928 	/* Todo: addr will need to be handled */
929 	ARG_UNUSED(addr);
930 
931 	k_mutex_lock(&lock, K_FOREVER);
932 
933 	for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
934 		if (!routers[i].is_used ||
935 		    !routers[i].is_default ||
936 		    routers[i].address.family != family) {
937 			continue;
938 		}
939 
940 		if (iface && iface != routers[i].iface) {
941 			continue;
942 		}
943 
944 		router = &routers[i];
945 		goto out;
946 	}
947 
948 out:
949 	k_mutex_unlock(&lock);
950 
951 	return router;
952 }
953 
iface_router_init(void)954 static void iface_router_init(void)
955 {
956 	k_work_init_delayable(&router_timer, iface_router_expired);
957 	sys_slist_init(&active_router_timers);
958 }
959 #else
960 #define iface_router_init(...)
961 #endif
962 
963 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
net_if_mcast_mon_register(struct net_if_mcast_monitor * mon,struct net_if * iface,net_if_mcast_callback_t cb)964 void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
965 			       struct net_if *iface,
966 			       net_if_mcast_callback_t cb)
967 {
968 	k_mutex_lock(&lock, K_FOREVER);
969 
970 	sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
971 	sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
972 
973 	mon->iface = iface;
974 	mon->cb = cb;
975 
976 	k_mutex_unlock(&lock);
977 }
978 
net_if_mcast_mon_unregister(struct net_if_mcast_monitor * mon)979 void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
980 {
981 	k_mutex_lock(&lock, K_FOREVER);
982 
983 	sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
984 
985 	k_mutex_unlock(&lock);
986 }
987 
net_if_mcast_monitor(struct net_if * iface,const struct net_addr * addr,bool is_joined)988 void net_if_mcast_monitor(struct net_if *iface,
989 			  const struct net_addr *addr,
990 			  bool is_joined)
991 {
992 	struct net_if_mcast_monitor *mon, *tmp;
993 
994 	k_mutex_lock(&lock, K_FOREVER);
995 
996 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
997 					  mon, tmp, node) {
998 		if (iface == mon->iface) {
999 			mon->cb(iface, addr, is_joined);
1000 		}
1001 	}
1002 
1003 	k_mutex_unlock(&lock);
1004 }
1005 #endif
1006 
1007 #if defined(CONFIG_NET_NATIVE_IPV6)
net_if_config_ipv6_get(struct net_if * iface,struct net_if_ipv6 ** ipv6)1008 int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
1009 {
1010 	int ret = 0;
1011 	int i;
1012 
1013 	net_if_lock(iface);
1014 
1015 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1016 		ret = -ENOTSUP;
1017 		goto out;
1018 	}
1019 
1020 	if (iface->config.ip.ipv6) {
1021 		if (ipv6) {
1022 			*ipv6 = iface->config.ip.ipv6;
1023 		}
1024 
1025 		goto out;
1026 	}
1027 
1028 	k_mutex_lock(&lock, K_FOREVER);
1029 
1030 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1031 		if (ipv6_addresses[i].iface) {
1032 			continue;
1033 		}
1034 
1035 		iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
1036 		ipv6_addresses[i].iface = iface;
1037 
1038 		if (ipv6) {
1039 			*ipv6 = &ipv6_addresses[i].ipv6;
1040 		}
1041 
1042 		k_mutex_unlock(&lock);
1043 		goto out;
1044 	}
1045 
1046 	k_mutex_unlock(&lock);
1047 
1048 	ret = -ESRCH;
1049 out:
1050 	net_if_unlock(iface);
1051 
1052 	return ret;
1053 }
1054 
net_if_config_ipv6_put(struct net_if * iface)1055 int net_if_config_ipv6_put(struct net_if *iface)
1056 {
1057 	int ret = 0;
1058 	int i;
1059 
1060 	net_if_lock(iface);
1061 
1062 	if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1063 		ret = -ENOTSUP;
1064 		goto out;
1065 	}
1066 
1067 	if (!iface->config.ip.ipv6) {
1068 		ret = -EALREADY;
1069 		goto out;
1070 	}
1071 
1072 	k_mutex_lock(&lock, K_FOREVER);
1073 
1074 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1075 		if (ipv6_addresses[i].iface != iface) {
1076 			continue;
1077 		}
1078 
1079 		iface->config.ip.ipv6 = NULL;
1080 		ipv6_addresses[i].iface = NULL;
1081 
1082 		k_mutex_unlock(&lock);
1083 		goto out;
1084 	}
1085 
1086 	k_mutex_unlock(&lock);
1087 
1088 	ret = -ESRCH;
1089 out:
1090 	net_if_unlock(iface);
1091 
1092 	return ret;
1093 }
1094 
1095 #if defined(CONFIG_NET_IPV6_MLD)
join_mcast_allnodes(struct net_if * iface)1096 static void join_mcast_allnodes(struct net_if *iface)
1097 {
1098 	struct in6_addr addr;
1099 	int ret;
1100 
1101 	net_ipv6_addr_create_ll_allnodes_mcast(&addr);
1102 
1103 	ret = net_ipv6_mld_join(iface, &addr);
1104 	if (ret < 0 && ret != -EALREADY) {
1105 		NET_ERR("Cannot join all nodes address %s (%d)",
1106 			net_sprint_ipv6_addr(&addr), ret);
1107 	}
1108 }
1109 
join_mcast_solicit_node(struct net_if * iface,struct in6_addr * my_addr)1110 static void join_mcast_solicit_node(struct net_if *iface,
1111 				    struct in6_addr *my_addr)
1112 {
1113 	struct in6_addr addr;
1114 	int ret;
1115 
1116 	/* Join to needed multicast groups, RFC 4291 ch 2.8 */
1117 	net_ipv6_addr_create_solicited_node(my_addr, &addr);
1118 
1119 	ret = net_ipv6_mld_join(iface, &addr);
1120 	if (ret < 0 && ret != -EALREADY) {
1121 		NET_ERR("Cannot join solicit node address %s (%d)",
1122 			net_sprint_ipv6_addr(&addr), ret);
1123 	}
1124 }
1125 
leave_mcast_all(struct net_if * iface)1126 static void leave_mcast_all(struct net_if *iface)
1127 {
1128 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1129 	int i;
1130 
1131 	if (!ipv6) {
1132 		return;
1133 	}
1134 
1135 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1136 		if (!ipv6->mcast[i].is_used ||
1137 		    !ipv6->mcast[i].is_joined) {
1138 			continue;
1139 		}
1140 
1141 		net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
1142 	}
1143 }
1144 
join_mcast_nodes(struct net_if * iface,struct in6_addr * addr)1145 static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
1146 {
1147 	enum net_l2_flags flags = 0;
1148 
1149 	flags = l2_flags_get(iface);
1150 	if (flags & NET_L2_MULTICAST) {
1151 		join_mcast_allnodes(iface);
1152 
1153 		if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
1154 			join_mcast_solicit_node(iface, addr);
1155 		}
1156 	}
1157 }
1158 #else
1159 #define join_mcast_allnodes(...)
1160 #define join_mcast_solicit_node(...)
1161 #define leave_mcast_all(...)
1162 #define join_mcast_nodes(...)
1163 #endif /* CONFIG_NET_IPV6_MLD */
1164 
1165 #if defined(CONFIG_NET_IPV6_DAD)
1166 #define DAD_TIMEOUT 100U /* ms */
1167 
dad_timeout(struct k_work * work)1168 static void dad_timeout(struct k_work *work)
1169 {
1170 	uint32_t current_time = k_uptime_get_32();
1171 	struct net_if_addr *ifaddr, *next;
1172 	int32_t delay = -1;
1173 	sys_slist_t expired_list;
1174 
1175 	ARG_UNUSED(work);
1176 
1177 	sys_slist_init(&expired_list);
1178 
1179 	k_mutex_lock(&lock, K_FOREVER);
1180 
1181 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
1182 					  ifaddr, next, dad_node) {
1183 		/* DAD entries are ordered by construction.  Stop when
1184 		 * we find one that hasn't expired.
1185 		 */
1186 		delay = (int32_t)(ifaddr->dad_start +
1187 				  DAD_TIMEOUT - current_time);
1188 		if (delay > 0) {
1189 			break;
1190 		}
1191 
1192 		/* Removing the ifaddr from active_dad_timers list */
1193 		sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
1194 		sys_slist_append(&expired_list, &ifaddr->dad_node);
1195 
1196 		ifaddr = NULL;
1197 	}
1198 
1199 	if ((ifaddr != NULL) && (delay > 0)) {
1200 		k_work_reschedule(&dad_timer, K_MSEC((uint32_t)delay));
1201 	}
1202 
1203 	k_mutex_unlock(&lock);
1204 
1205 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ifaddr, dad_node) {
1206 		struct net_if_addr *tmp;
1207 		struct net_if *iface;
1208 
1209 		NET_DBG("DAD succeeded for %s",
1210 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1211 
1212 		ifaddr->addr_state = NET_ADDR_PREFERRED;
1213 
1214 		/* Because we do not know the interface at this point,
1215 		 * we need to lookup for it.
1216 		 */
1217 		iface = NULL;
1218 		tmp = net_if_ipv6_addr_lookup(&ifaddr->address.in6_addr,
1219 					      &iface);
1220 		if (tmp == ifaddr) {
1221 			net_mgmt_event_notify_with_info(
1222 					NET_EVENT_IPV6_DAD_SUCCEED,
1223 					iface, &ifaddr->address.in6_addr,
1224 					sizeof(struct in6_addr));
1225 
1226 			/* The address gets added to neighbor cache which is not
1227 			 * needed in this case as the address is our own one.
1228 			 */
1229 			net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
1230 		}
1231 	}
1232 }
1233 
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1234 static void net_if_ipv6_start_dad(struct net_if *iface,
1235 				  struct net_if_addr *ifaddr)
1236 {
1237 	ifaddr->addr_state = NET_ADDR_TENTATIVE;
1238 
1239 	if (net_if_is_up(iface)) {
1240 		NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
1241 			iface,
1242 			net_sprint_ll_addr(
1243 					   net_if_get_link_addr(iface)->addr,
1244 					   net_if_get_link_addr(iface)->len),
1245 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1246 
1247 		ifaddr->dad_count = 1U;
1248 
1249 		if (!net_ipv6_start_dad(iface, ifaddr)) {
1250 			ifaddr->dad_start = k_uptime_get_32();
1251 
1252 			k_mutex_lock(&lock, K_FOREVER);
1253 			sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
1254 			k_mutex_unlock(&lock);
1255 
1256 			/* FUTURE: use schedule, not reschedule. */
1257 			if (!k_work_delayable_remaining_get(&dad_timer)) {
1258 				k_work_reschedule(&dad_timer,
1259 						  K_MSEC(DAD_TIMEOUT));
1260 			}
1261 		}
1262 	} else {
1263 		NET_DBG("Interface %p is down, starting DAD for %s later.",
1264 			iface,
1265 			net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1266 	}
1267 }
1268 
net_if_start_dad(struct net_if * iface)1269 void net_if_start_dad(struct net_if *iface)
1270 {
1271 	struct net_if_addr *ifaddr;
1272 	struct net_if_ipv6 *ipv6;
1273 	struct in6_addr addr = { };
1274 	int ret, i;
1275 
1276 	net_if_lock(iface);
1277 
1278 	NET_DBG("Starting DAD for iface %p", iface);
1279 
1280 	ret = net_if_config_ipv6_get(iface, &ipv6);
1281 	if (ret < 0) {
1282 		if (ret != -ENOTSUP) {
1283 			NET_WARN("Cannot do DAD IPv6 config is not valid.");
1284 		}
1285 
1286 		goto out;
1287 	}
1288 
1289 	if (!ipv6) {
1290 		goto out;
1291 	}
1292 
1293 	net_ipv6_addr_create_iid(&addr, net_if_get_link_addr(iface));
1294 
1295 	ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
1296 	if (!ifaddr) {
1297 		NET_ERR("Cannot add %s address to interface %p, DAD fails",
1298 			net_sprint_ipv6_addr(&addr), iface);
1299 	}
1300 
1301 	/* Start DAD for all the addresses that were added earlier when
1302 	 * the interface was down.
1303 	 */
1304 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1305 		if (!ipv6->unicast[i].is_used ||
1306 		    ipv6->unicast[i].address.family != AF_INET6 ||
1307 		    &ipv6->unicast[i] == ifaddr ||
1308 		    net_ipv6_is_addr_loopback(
1309 			    &ipv6->unicast[i].address.in6_addr)) {
1310 			continue;
1311 		}
1312 
1313 		net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1314 	}
1315 
1316 out:
1317 	net_if_unlock(iface);
1318 }
1319 
net_if_ipv6_dad_failed(struct net_if * iface,const struct in6_addr * addr)1320 void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
1321 {
1322 	struct net_if_addr *ifaddr;
1323 
1324 	net_if_lock(iface);
1325 
1326 	ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
1327 	if (!ifaddr) {
1328 		NET_ERR("Cannot find %s address in interface %p",
1329 			net_sprint_ipv6_addr(addr), iface);
1330 		goto out;
1331 	}
1332 
1333 
1334 	net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
1335 					&ifaddr->address.in6_addr,
1336 					sizeof(struct in6_addr));
1337 
1338 	net_if_ipv6_addr_rm(iface, addr);
1339 
1340 out:
1341 	net_if_unlock(iface);
1342 }
1343 
iface_ipv6_dad_init(void)1344 static inline void iface_ipv6_dad_init(void)
1345 {
1346 	k_work_init_delayable(&dad_timer, dad_timeout);
1347 	sys_slist_init(&active_dad_timers);
1348 }
1349 
1350 #else
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1351 static inline void net_if_ipv6_start_dad(struct net_if *iface,
1352 					 struct net_if_addr *ifaddr)
1353 {
1354 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1355 }
1356 
1357 #define iface_ipv6_dad_init(...)
1358 #endif /* CONFIG_NET_IPV6_DAD */
1359 
1360 #if defined(CONFIG_NET_IPV6_ND)
1361 #define RS_TIMEOUT (1U * MSEC_PER_SEC)
1362 #define RS_COUNT 3
1363 
rs_timeout(struct k_work * work)1364 static void rs_timeout(struct k_work *work)
1365 {
1366 	uint32_t current_time = k_uptime_get_32();
1367 	struct net_if_ipv6 *ipv6, *next;
1368 	int32_t delay = -1;
1369 	sys_slist_t expired_list;
1370 
1371 	ARG_UNUSED(work);
1372 
1373 	sys_slist_init(&expired_list);
1374 
1375 	k_mutex_lock(&lock, K_FOREVER);
1376 
1377 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
1378 					  ipv6, next, rs_node) {
1379 		/* RS entries are ordered by construction.  Stop when
1380 		 * we find one that hasn't expired.
1381 		 */
1382 		delay = (int32_t)(ipv6->rs_start + RS_TIMEOUT - current_time);
1383 		if (delay > 0) {
1384 			break;
1385 		}
1386 
1387 		/* Removing the ipv6 from active_rs_timers list */
1388 		sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
1389 		sys_slist_append(&expired_list, &ipv6->rs_node);
1390 
1391 		ipv6 = NULL;
1392 	}
1393 
1394 	if ((ipv6 != NULL) && (delay > 0)) {
1395 		k_work_reschedule(&rs_timer, K_MSEC(ipv6->rs_start +
1396 						    RS_TIMEOUT - current_time));
1397 	}
1398 
1399 	k_mutex_unlock(&lock);
1400 
1401 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ipv6, rs_node) {
1402 		struct net_if *iface = NULL;
1403 
1404 		/* Did not receive RA yet. */
1405 		ipv6->rs_count++;
1406 
1407 		STRUCT_SECTION_FOREACH(net_if, tmp) {
1408 			if (tmp->config.ip.ipv6 == ipv6) {
1409 				iface = tmp;
1410 				break;
1411 			}
1412 		}
1413 
1414 		if (iface) {
1415 			NET_DBG("RS no respond iface %p count %d",
1416 				iface, ipv6->rs_count);
1417 			if (ipv6->rs_count < RS_COUNT) {
1418 				net_if_start_rs(iface);
1419 			}
1420 		} else {
1421 			NET_DBG("Interface IPv6 config %p not found", ipv6);
1422 		}
1423 	}
1424 }
1425 
net_if_start_rs(struct net_if * iface)1426 void net_if_start_rs(struct net_if *iface)
1427 {
1428 	struct net_if_ipv6 *ipv6;
1429 
1430 	net_if_lock(iface);
1431 
1432 	if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1433 		goto out;
1434 	}
1435 
1436 	ipv6 = iface->config.ip.ipv6;
1437 	if (!ipv6) {
1438 		goto out;
1439 	}
1440 
1441 	NET_DBG("Starting ND/RS for iface %p", iface);
1442 
1443 	if (!net_ipv6_start_rs(iface)) {
1444 		ipv6->rs_start = k_uptime_get_32();
1445 
1446 		k_mutex_lock(&lock, K_FOREVER);
1447 		sys_slist_append(&active_rs_timers, &ipv6->rs_node);
1448 		k_mutex_unlock(&lock);
1449 
1450 		/* FUTURE: use schedule, not reschedule. */
1451 		if (!k_work_delayable_remaining_get(&rs_timer)) {
1452 			k_work_reschedule(&rs_timer, K_MSEC(RS_TIMEOUT));
1453 		}
1454 	}
1455 
1456 out:
1457 	net_if_unlock(iface);
1458 }
1459 
net_if_stop_rs(struct net_if * iface)1460 void net_if_stop_rs(struct net_if *iface)
1461 {
1462 	struct net_if_ipv6 *ipv6;
1463 
1464 	net_if_lock(iface);
1465 
1466 	ipv6 = iface->config.ip.ipv6;
1467 	if (!ipv6) {
1468 		goto out;
1469 	}
1470 
1471 	NET_DBG("Stopping ND/RS for iface %p", iface);
1472 
1473 	k_mutex_lock(&lock, K_FOREVER);
1474 	sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
1475 	k_mutex_unlock(&lock);
1476 
1477 out:
1478 	net_if_unlock(iface);
1479 }
1480 
iface_ipv6_nd_init(void)1481 static inline void iface_ipv6_nd_init(void)
1482 {
1483 	k_work_init_delayable(&rs_timer, rs_timeout);
1484 	sys_slist_init(&active_rs_timers);
1485 }
1486 
1487 #else
1488 #define net_if_start_rs(...)
1489 #define net_if_stop_rs(...)
1490 #define iface_ipv6_nd_init(...)
1491 #endif /* CONFIG_NET_IPV6_ND */
1492 
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)1493 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
1494 					    struct net_if **ret)
1495 {
1496 	struct net_if_addr *ifaddr = NULL;
1497 
1498 	STRUCT_SECTION_FOREACH(net_if, iface) {
1499 		struct net_if_ipv6 *ipv6;
1500 		int i;
1501 
1502 		net_if_lock(iface);
1503 
1504 		ipv6 = iface->config.ip.ipv6;
1505 		if (!ipv6) {
1506 			net_if_unlock(iface);
1507 			continue;
1508 		}
1509 
1510 		for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1511 			if (!ipv6->unicast[i].is_used ||
1512 			    ipv6->unicast[i].address.family != AF_INET6) {
1513 				continue;
1514 			}
1515 
1516 			if (net_ipv6_is_prefix(
1517 				    addr->s6_addr,
1518 				    ipv6->unicast[i].address.in6_addr.s6_addr,
1519 				    128)) {
1520 
1521 				if (ret) {
1522 					*ret = iface;
1523 				}
1524 
1525 				ifaddr = &ipv6->unicast[i];
1526 				net_if_unlock(iface);
1527 				goto out;
1528 			}
1529 		}
1530 
1531 		net_if_unlock(iface);
1532 	}
1533 
1534 out:
1535 	return ifaddr;
1536 }
1537 
net_if_ipv6_addr_lookup_by_iface(struct net_if * iface,struct in6_addr * addr)1538 struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
1539 						     struct in6_addr *addr)
1540 {
1541 	struct net_if_addr *ifaddr = NULL;
1542 	struct net_if_ipv6 *ipv6;
1543 	int i;
1544 
1545 	net_if_lock(iface);
1546 
1547 	ipv6 = iface->config.ip.ipv6;
1548 	if (!ipv6) {
1549 		goto out;
1550 	}
1551 
1552 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1553 		if (!ipv6->unicast[i].is_used ||
1554 		    ipv6->unicast[i].address.family != AF_INET6) {
1555 			continue;
1556 		}
1557 
1558 		if (net_ipv6_is_prefix(
1559 			    addr->s6_addr,
1560 			    ipv6->unicast[i].address.in6_addr.s6_addr,
1561 			    128)) {
1562 			ifaddr = &ipv6->unicast[i];
1563 			goto out;
1564 		}
1565 	}
1566 
1567 out:
1568 	net_if_unlock(iface);
1569 
1570 	return ifaddr;
1571 }
1572 
z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1573 int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
1574 {
1575 	struct net_if *iface = NULL;
1576 	struct net_if_addr *if_addr;
1577 
1578 	if_addr = net_if_ipv6_addr_lookup(addr, &iface);
1579 	if (!if_addr) {
1580 		return 0;
1581 	}
1582 
1583 	return net_if_get_by_iface(iface);
1584 }
1585 
1586 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1587 static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
1588 					  const struct in6_addr *addr)
1589 {
1590 	struct in6_addr addr_v6;
1591 
1592 	Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1593 
1594 	return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
1595 }
1596 #include <syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
1597 #endif
1598 
address_expired(struct net_if_addr * ifaddr)1599 static void address_expired(struct net_if_addr *ifaddr)
1600 {
1601 	NET_DBG("IPv6 address %s is deprecated",
1602 		net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1603 
1604 	ifaddr->addr_state = NET_ADDR_DEPRECATED;
1605 
1606 	sys_slist_find_and_remove(&active_address_lifetime_timers,
1607 				  &ifaddr->lifetime.node);
1608 
1609 	net_timeout_set(&ifaddr->lifetime, 0, 0);
1610 }
1611 
address_lifetime_timeout(struct k_work * work)1612 static void address_lifetime_timeout(struct k_work *work)
1613 {
1614 	uint32_t next_update = UINT32_MAX;
1615 	uint32_t current_time = k_uptime_get_32();
1616 	struct net_if_addr *current, *next;
1617 
1618 	ARG_UNUSED(work);
1619 
1620 	k_mutex_lock(&lock, K_FOREVER);
1621 
1622 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
1623 					  current, next, lifetime.node) {
1624 		struct net_timeout *timeout = &current->lifetime;
1625 		uint32_t this_update = net_timeout_evaluate(timeout,
1626 							     current_time);
1627 
1628 		if (this_update == 0U) {
1629 			address_expired(current);
1630 			continue;
1631 		}
1632 
1633 		if (this_update < next_update) {
1634 			next_update = this_update;
1635 		}
1636 
1637 		if (current == next) {
1638 			break;
1639 		}
1640 	}
1641 
1642 	if (next_update != UINT32_MAX) {
1643 		NET_DBG("Waiting for %d ms", (int32_t)next_update);
1644 
1645 		k_work_reschedule(&address_lifetime_timer, K_MSEC(next_update));
1646 	}
1647 
1648 	k_mutex_unlock(&lock);
1649 }
1650 
1651 #if defined(CONFIG_NET_TEST)
net_address_lifetime_timeout(void)1652 void net_address_lifetime_timeout(void)
1653 {
1654 	address_lifetime_timeout(NULL);
1655 }
1656 #endif
1657 
address_start_timer(struct net_if_addr * ifaddr,uint32_t vlifetime)1658 static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
1659 {
1660 	sys_slist_append(&active_address_lifetime_timers,
1661 			 &ifaddr->lifetime.node);
1662 
1663 	net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
1664 	k_work_reschedule(&address_lifetime_timer, K_NO_WAIT);
1665 }
1666 
net_if_ipv6_addr_update_lifetime(struct net_if_addr * ifaddr,uint32_t vlifetime)1667 void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
1668 				      uint32_t vlifetime)
1669 {
1670 	k_mutex_lock(&lock, K_FOREVER);
1671 
1672 	NET_DBG("Updating expire time of %s by %u secs",
1673 		net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1674 		vlifetime);
1675 
1676 	ifaddr->addr_state = NET_ADDR_PREFERRED;
1677 
1678 	address_start_timer(ifaddr, vlifetime);
1679 
1680 	k_mutex_unlock(&lock);
1681 }
1682 
ipv6_addr_find(struct net_if * iface,struct in6_addr * addr)1683 static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
1684 					  struct in6_addr *addr)
1685 {
1686 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1687 	int i;
1688 
1689 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1690 		if (!ipv6->unicast[i].is_used) {
1691 			continue;
1692 		}
1693 
1694 		if (net_ipv6_addr_cmp(
1695 			    addr, &ipv6->unicast[i].address.in6_addr)) {
1696 
1697 			return &ipv6->unicast[i];
1698 		}
1699 	}
1700 
1701 	return NULL;
1702 }
1703 
net_if_addr_init(struct net_if_addr * ifaddr,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1704 static inline void net_if_addr_init(struct net_if_addr *ifaddr,
1705 				    struct in6_addr *addr,
1706 				    enum net_addr_type addr_type,
1707 				    uint32_t vlifetime)
1708 {
1709 	ifaddr->is_used = true;
1710 	ifaddr->address.family = AF_INET6;
1711 	ifaddr->addr_type = addr_type;
1712 	net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
1713 
1714 	/* FIXME - set the mcast addr for this node */
1715 
1716 	if (vlifetime) {
1717 		ifaddr->is_infinite = false;
1718 
1719 		NET_DBG("Expiring %s in %u secs",
1720 			net_sprint_ipv6_addr(addr),
1721 			vlifetime);
1722 
1723 		net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
1724 	} else {
1725 		ifaddr->is_infinite = true;
1726 	}
1727 }
1728 
net_if_ipv6_addr_add(struct net_if * iface,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1729 struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
1730 					 struct in6_addr *addr,
1731 					 enum net_addr_type addr_type,
1732 					 uint32_t vlifetime)
1733 {
1734 	struct net_if_addr *ifaddr = NULL;
1735 	struct net_if_ipv6 *ipv6;
1736 	int i;
1737 
1738 	net_if_lock(iface);
1739 
1740 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1741 		goto out;
1742 	}
1743 
1744 	ifaddr = ipv6_addr_find(iface, addr);
1745 	if (ifaddr) {
1746 		goto out;
1747 	}
1748 
1749 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1750 		if (ipv6->unicast[i].is_used) {
1751 			continue;
1752 		}
1753 
1754 		net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
1755 				 vlifetime);
1756 
1757 		NET_DBG("[%d] interface %p address %s type %s added", i,
1758 			iface, net_sprint_ipv6_addr(addr),
1759 			net_addr_type2str(addr_type));
1760 
1761 		if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
1762 		    !net_ipv6_is_addr_loopback(addr) &&
1763 		    !net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1764 			/* RFC 4862 5.4.2
1765 			 * Before sending a Neighbor Solicitation, an interface
1766 			 * MUST join the all-nodes multicast address and the
1767 			 * solicited-node multicast address of the tentative
1768 			 * address.
1769 			 */
1770 			/* The allnodes multicast group is only joined once as
1771 			 * net_ipv6_mcast_join() checks if we have already
1772 			 * joined.
1773 			 */
1774 			join_mcast_nodes(iface,
1775 					 &ipv6->unicast[i].address.in6_addr);
1776 
1777 			net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1778 		} else {
1779 			/* If DAD is not done for point-to-point links, then
1780 			 * the address is usable immediately.
1781 			 */
1782 			ipv6->unicast[i].addr_state = NET_ADDR_PREFERRED;
1783 		}
1784 
1785 		net_mgmt_event_notify_with_info(
1786 			NET_EVENT_IPV6_ADDR_ADD, iface,
1787 			&ipv6->unicast[i].address.in6_addr,
1788 			sizeof(struct in6_addr));
1789 
1790 		ifaddr = &ipv6->unicast[i];
1791 		goto out;
1792 	}
1793 
1794 out:
1795 	net_if_unlock(iface);
1796 
1797 	return ifaddr;
1798 }
1799 
net_if_ipv6_addr_rm(struct net_if * iface,const struct in6_addr * addr)1800 bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
1801 {
1802 	bool ret = false;
1803 	struct net_if_ipv6 *ipv6;
1804 	struct in6_addr maddr;
1805 	int found = -1;
1806 	unsigned int maddr_count = 0;
1807 
1808 	NET_ASSERT(addr);
1809 
1810 	net_if_lock(iface);
1811 
1812 	ipv6 = iface->config.ip.ipv6;
1813 	if (!ipv6) {
1814 		goto out;
1815 	}
1816 
1817 	net_ipv6_addr_create_solicited_node(addr, &maddr);
1818 
1819 	for (int i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1820 		struct in6_addr unicast_maddr;
1821 
1822 		if (!ipv6->unicast[i].is_used) {
1823 			continue;
1824 		}
1825 
1826 		/* count how many times this solicited-node multicast address is identical
1827 		 * for all the used unicast addresses
1828 		 */
1829 		net_ipv6_addr_create_solicited_node(&ipv6->unicast[i].address.in6_addr,
1830 						    &unicast_maddr);
1831 		if (net_ipv6_addr_cmp(&maddr, &unicast_maddr)) {
1832 			maddr_count++;
1833 		}
1834 
1835 		if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr,
1836 				       addr)) {
1837 			continue;
1838 		}
1839 
1840 		found = i;
1841 	}
1842 
1843 	if (found >= 0) {
1844 		if (!ipv6->unicast[found].is_infinite) {
1845 			k_mutex_lock(&lock, K_FOREVER);
1846 
1847 			sys_slist_find_and_remove(
1848 				&active_address_lifetime_timers,
1849 				&ipv6->unicast[found].lifetime.node);
1850 
1851 			if (sys_slist_is_empty(
1852 				    &active_address_lifetime_timers)) {
1853 				k_work_cancel_delayable(
1854 					&address_lifetime_timer);
1855 			}
1856 
1857 			k_mutex_unlock(&lock);
1858 		}
1859 
1860 #if defined(CONFIG_NET_IPV6_DAD)
1861 		if (!net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1862 			k_mutex_lock(&lock, K_FOREVER);
1863 			sys_slist_find_and_remove(&active_dad_timers,
1864 						  &ipv6->unicast[found].dad_node);
1865 			k_mutex_unlock(&lock);
1866 		}
1867 #endif
1868 
1869 		ipv6->unicast[found].is_used = false;
1870 
1871 		if (maddr_count == 1) {
1872 			/* remove the solicited-node multicast address only if no other
1873 			 * unicast address is also using it
1874 			 */
1875 			net_if_ipv6_maddr_rm(iface, &maddr);
1876 		}
1877 
1878 		NET_DBG("[%d] interface %p address %s type %s removed",
1879 			found, iface, net_sprint_ipv6_addr(addr),
1880 			net_addr_type2str(ipv6->unicast[found].addr_type));
1881 
1882 		/* Using the IPv6 address pointer here can give false
1883 		 * info if someone adds a new IP address into this position
1884 		 * in the address array. This is quite unlikely thou.
1885 		 */
1886 		net_mgmt_event_notify_with_info(
1887 			NET_EVENT_IPV6_ADDR_DEL,
1888 			iface,
1889 			&ipv6->unicast[found].address.in6_addr,
1890 			sizeof(struct in6_addr));
1891 
1892 		ret = true;
1893 		goto out;
1894 	}
1895 
1896 out:
1897 	net_if_unlock(iface);
1898 
1899 	return ret;
1900 }
1901 
z_impl_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1902 bool z_impl_net_if_ipv6_addr_add_by_index(int index,
1903 					  struct in6_addr *addr,
1904 					  enum net_addr_type addr_type,
1905 					  uint32_t vlifetime)
1906 {
1907 	struct net_if *iface;
1908 
1909 	iface = net_if_get_by_index(index);
1910 	if (!iface) {
1911 		return false;
1912 	}
1913 
1914 	return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
1915 		true : false;
1916 }
1917 
1918 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1919 bool z_vrfy_net_if_ipv6_addr_add_by_index(int index,
1920 					  struct in6_addr *addr,
1921 					  enum net_addr_type addr_type,
1922 					  uint32_t vlifetime)
1923 {
1924 	struct in6_addr addr_v6;
1925 	struct net_if *iface;
1926 
1927 	iface = z_vrfy_net_if_get_by_index(index);
1928 	if (!iface) {
1929 		return false;
1930 	}
1931 
1932 	Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1933 
1934 	return z_impl_net_if_ipv6_addr_add_by_index(index,
1935 						    &addr_v6,
1936 						    addr_type,
1937 						    vlifetime);
1938 }
1939 
1940 #include <syscalls/net_if_ipv6_addr_add_by_index_mrsh.c>
1941 #endif /* CONFIG_USERSPACE */
1942 
z_impl_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)1943 bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
1944 					 const struct in6_addr *addr)
1945 {
1946 	struct net_if *iface;
1947 
1948 	iface = net_if_get_by_index(index);
1949 	if (!iface) {
1950 		return false;
1951 	}
1952 
1953 	return net_if_ipv6_addr_rm(iface, addr);
1954 }
1955 
1956 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)1957 bool z_vrfy_net_if_ipv6_addr_rm_by_index(int index,
1958 					 const struct in6_addr *addr)
1959 {
1960 	struct in6_addr addr_v6;
1961 	struct net_if *iface;
1962 
1963 	iface = z_vrfy_net_if_get_by_index(index);
1964 	if (!iface) {
1965 		return false;
1966 	}
1967 
1968 	Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1969 
1970 	return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
1971 }
1972 
1973 #include <syscalls/net_if_ipv6_addr_rm_by_index_mrsh.c>
1974 #endif /* CONFIG_USERSPACE */
1975 
net_if_ipv6_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)1976 void net_if_ipv6_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
1977 			      void *user_data)
1978 {
1979 	struct net_if_ipv6 *ipv6;
1980 
1981 	if (iface == NULL) {
1982 		return;
1983 	}
1984 
1985 	net_if_lock(iface);
1986 
1987 	ipv6 = iface->config.ip.ipv6;
1988 	if (ipv6 == NULL) {
1989 		goto out;
1990 	}
1991 
1992 	for (int i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1993 		struct net_if_addr *if_addr = &ipv6->unicast[i];
1994 
1995 		if (!if_addr->is_used) {
1996 			continue;
1997 		}
1998 
1999 		cb(iface, if_addr, user_data);
2000 	}
2001 
2002 out:
2003 	net_if_unlock(iface);
2004 }
2005 
net_if_ipv6_maddr_add(struct net_if * iface,const struct in6_addr * addr)2006 struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
2007 						const struct in6_addr *addr)
2008 {
2009 	struct net_if_mcast_addr *ifmaddr = NULL;
2010 	struct net_if_ipv6 *ipv6;
2011 	int i;
2012 
2013 	net_if_lock(iface);
2014 
2015 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2016 		goto out;
2017 	}
2018 
2019 	if (!net_ipv6_is_addr_mcast(addr)) {
2020 		NET_DBG("Address %s is not a multicast address.",
2021 			net_sprint_ipv6_addr(addr));
2022 		goto out;
2023 	}
2024 
2025 	if (net_if_ipv6_maddr_lookup(addr, &iface)) {
2026 		NET_WARN("Multicast address %s is is already registered.",
2027 			net_sprint_ipv6_addr(addr));
2028 		goto out;
2029 	}
2030 
2031 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2032 		if (ipv6->mcast[i].is_used) {
2033 			continue;
2034 		}
2035 
2036 		ipv6->mcast[i].is_used = true;
2037 		ipv6->mcast[i].address.family = AF_INET6;
2038 		memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
2039 
2040 		NET_DBG("[%d] interface %p address %s added", i, iface,
2041 			net_sprint_ipv6_addr(addr));
2042 
2043 		net_mgmt_event_notify_with_info(
2044 			NET_EVENT_IPV6_MADDR_ADD, iface,
2045 			&ipv6->mcast[i].address.in6_addr,
2046 			sizeof(struct in6_addr));
2047 
2048 		ifmaddr = &ipv6->mcast[i];
2049 		goto out;
2050 	}
2051 
2052 out:
2053 	net_if_unlock(iface);
2054 
2055 	return ifmaddr;
2056 }
2057 
net_if_ipv6_maddr_rm(struct net_if * iface,const struct in6_addr * addr)2058 bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
2059 {
2060 	bool ret = false;
2061 	struct net_if_ipv6 *ipv6;
2062 	int i;
2063 
2064 	net_if_lock(iface);
2065 
2066 	ipv6 = iface->config.ip.ipv6;
2067 	if (!ipv6) {
2068 		goto out;
2069 	}
2070 
2071 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2072 		if (!ipv6->mcast[i].is_used) {
2073 			continue;
2074 		}
2075 
2076 		if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
2077 				       addr)) {
2078 			continue;
2079 		}
2080 
2081 		ipv6->mcast[i].is_used = false;
2082 
2083 		NET_DBG("[%d] interface %p address %s removed",
2084 			i, iface, net_sprint_ipv6_addr(addr));
2085 
2086 		net_mgmt_event_notify_with_info(
2087 			NET_EVENT_IPV6_MADDR_DEL, iface,
2088 			&ipv6->mcast[i].address.in6_addr,
2089 			sizeof(struct in6_addr));
2090 
2091 		ret = true;
2092 		goto out;
2093 	}
2094 
2095 out:
2096 	net_if_unlock(iface);
2097 
2098 	return ret;
2099 }
2100 
net_if_ipv6_maddr_lookup(const struct in6_addr * maddr,struct net_if ** ret)2101 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
2102 						   struct net_if **ret)
2103 {
2104 	struct net_if_mcast_addr *ifmaddr = NULL;
2105 
2106 	STRUCT_SECTION_FOREACH(net_if, iface) {
2107 		struct net_if_ipv6 *ipv6;
2108 		int i;
2109 
2110 		if (ret && *ret && iface != *ret) {
2111 			continue;
2112 		}
2113 
2114 		net_if_lock(iface);
2115 
2116 		ipv6 = iface->config.ip.ipv6;
2117 		if (!ipv6) {
2118 			net_if_unlock(iface);
2119 			continue;
2120 		}
2121 
2122 		for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2123 			if (!ipv6->mcast[i].is_used ||
2124 			    ipv6->mcast[i].address.family != AF_INET6) {
2125 				continue;
2126 			}
2127 
2128 			if (net_ipv6_is_prefix(
2129 				    maddr->s6_addr,
2130 				    ipv6->mcast[i].address.in6_addr.s6_addr,
2131 				    128)) {
2132 				if (ret) {
2133 					*ret = iface;
2134 				}
2135 
2136 				ifmaddr = &ipv6->mcast[i];
2137 				net_if_unlock(iface);
2138 				goto out;
2139 			}
2140 		}
2141 
2142 		net_if_unlock(iface);
2143 	}
2144 
2145 out:
2146 	return ifmaddr;
2147 }
2148 
net_if_ipv6_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)2149 void net_if_ipv6_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
2150 {
2151 	NET_ASSERT(iface);
2152 	NET_ASSERT(addr);
2153 
2154 	net_if_lock(iface);
2155 	addr->is_joined = false;
2156 	net_if_unlock(iface);
2157 }
2158 
net_if_ipv6_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)2159 void net_if_ipv6_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
2160 {
2161 	NET_ASSERT(iface);
2162 	NET_ASSERT(addr);
2163 
2164 	net_if_lock(iface);
2165 	addr->is_joined = true;
2166 	net_if_unlock(iface);
2167 }
2168 
remove_prefix_addresses(struct net_if * iface,struct net_if_ipv6 * ipv6,struct in6_addr * addr,uint8_t len)2169 static void remove_prefix_addresses(struct net_if *iface,
2170 				    struct net_if_ipv6 *ipv6,
2171 				    struct in6_addr *addr,
2172 				    uint8_t len)
2173 {
2174 	int i;
2175 
2176 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2177 		if (!ipv6->unicast[i].is_used ||
2178 		    ipv6->unicast[i].address.family != AF_INET6 ||
2179 		    ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
2180 			continue;
2181 		}
2182 
2183 		if (net_ipv6_is_prefix(
2184 				addr->s6_addr,
2185 				ipv6->unicast[i].address.in6_addr.s6_addr,
2186 				len)) {
2187 			net_if_ipv6_addr_rm(iface,
2188 					    &ipv6->unicast[i].address.in6_addr);
2189 		}
2190 	}
2191 }
2192 
prefix_lifetime_expired(struct net_if_ipv6_prefix * ifprefix)2193 static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
2194 {
2195 	struct net_if_ipv6 *ipv6;
2196 
2197 	net_if_lock(ifprefix->iface);
2198 
2199 	NET_DBG("Prefix %s/%d expired",
2200 		net_sprint_ipv6_addr(&ifprefix->prefix),
2201 		ifprefix->len);
2202 
2203 	ifprefix->is_used = false;
2204 
2205 	if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
2206 		return;
2207 	}
2208 
2209 	/* Remove also all auto addresses if the they have the same prefix.
2210 	 */
2211 	remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
2212 				ifprefix->len);
2213 
2214 	if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2215 		struct net_event_ipv6_prefix info;
2216 
2217 		net_ipaddr_copy(&info.addr, &ifprefix->prefix);
2218 		info.len = ifprefix->len;
2219 		info.lifetime = 0;
2220 
2221 		net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2222 						ifprefix->iface,
2223 						(const void *) &info,
2224 						sizeof(struct net_event_ipv6_prefix));
2225 	} else {
2226 		net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface);
2227 	}
2228 
2229 	net_if_unlock(ifprefix->iface);
2230 }
2231 
prefix_timer_remove(struct net_if_ipv6_prefix * ifprefix)2232 static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
2233 {
2234 	k_mutex_lock(&lock, K_FOREVER);
2235 
2236 	NET_DBG("IPv6 prefix %s/%d removed",
2237 		net_sprint_ipv6_addr(&ifprefix->prefix),
2238 		ifprefix->len);
2239 
2240 	sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2241 				  &ifprefix->lifetime.node);
2242 
2243 	net_timeout_set(&ifprefix->lifetime, 0, 0);
2244 
2245 	k_mutex_unlock(&lock);
2246 }
2247 
prefix_lifetime_timeout(struct k_work * work)2248 static void prefix_lifetime_timeout(struct k_work *work)
2249 {
2250 	uint32_t next_update = UINT32_MAX;
2251 	uint32_t current_time = k_uptime_get_32();
2252 	struct net_if_ipv6_prefix *current, *next;
2253 	sys_slist_t expired_list;
2254 
2255 	ARG_UNUSED(work);
2256 
2257 	sys_slist_init(&expired_list);
2258 
2259 	k_mutex_lock(&lock, K_FOREVER);
2260 
2261 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
2262 					  current, next, lifetime.node) {
2263 		struct net_timeout *timeout = &current->lifetime;
2264 		uint32_t this_update = net_timeout_evaluate(timeout,
2265 							    current_time);
2266 
2267 		if (this_update == 0U) {
2268 			sys_slist_find_and_remove(
2269 				&active_prefix_lifetime_timers,
2270 				&current->lifetime.node);
2271 			sys_slist_append(&expired_list,
2272 					 &current->lifetime.node);
2273 			continue;
2274 		}
2275 
2276 		if (this_update < next_update) {
2277 			next_update = this_update;
2278 		}
2279 
2280 		if (current == next) {
2281 			break;
2282 		}
2283 	}
2284 
2285 	if (next_update != UINT32_MAX) {
2286 		k_work_reschedule(&prefix_lifetime_timer, K_MSEC(next_update));
2287 	}
2288 
2289 	k_mutex_unlock(&lock);
2290 
2291 	SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, current, lifetime.node) {
2292 		prefix_lifetime_expired(current);
2293 	}
2294 }
2295 
prefix_start_timer(struct net_if_ipv6_prefix * ifprefix,uint32_t lifetime)2296 static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
2297 			       uint32_t lifetime)
2298 {
2299 	k_mutex_lock(&lock, K_FOREVER);
2300 
2301 	(void)sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2302 					&ifprefix->lifetime.node);
2303 	sys_slist_append(&active_prefix_lifetime_timers,
2304 			 &ifprefix->lifetime.node);
2305 
2306 	net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
2307 	k_work_reschedule(&prefix_lifetime_timer, K_NO_WAIT);
2308 
2309 	k_mutex_unlock(&lock);
2310 }
2311 
ipv6_prefix_find(struct net_if * iface,struct in6_addr * prefix,uint8_t prefix_len)2312 static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
2313 						   struct in6_addr *prefix,
2314 						   uint8_t prefix_len)
2315 {
2316 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2317 	int i;
2318 
2319 	if (!ipv6) {
2320 		return NULL;
2321 	}
2322 
2323 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2324 		if (!ipv6->prefix[i].is_used) {
2325 			continue;
2326 		}
2327 
2328 		if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
2329 		    prefix_len == ipv6->prefix[i].len) {
2330 			return &ipv6->prefix[i];
2331 		}
2332 	}
2333 
2334 	return NULL;
2335 }
2336 
net_if_ipv6_prefix_init(struct net_if * iface,struct net_if_ipv6_prefix * ifprefix,struct in6_addr * addr,uint8_t len,uint32_t lifetime)2337 static void net_if_ipv6_prefix_init(struct net_if *iface,
2338 				    struct net_if_ipv6_prefix *ifprefix,
2339 				    struct in6_addr *addr, uint8_t len,
2340 				    uint32_t lifetime)
2341 {
2342 	ifprefix->is_used = true;
2343 	ifprefix->len = len;
2344 	ifprefix->iface = iface;
2345 	net_ipaddr_copy(&ifprefix->prefix, addr);
2346 
2347 	if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
2348 		ifprefix->is_infinite = true;
2349 	} else {
2350 		ifprefix->is_infinite = false;
2351 	}
2352 }
2353 
net_if_ipv6_prefix_add(struct net_if * iface,struct in6_addr * prefix,uint8_t len,uint32_t lifetime)2354 struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
2355 						  struct in6_addr *prefix,
2356 						  uint8_t len,
2357 						  uint32_t lifetime)
2358 {
2359 	struct net_if_ipv6_prefix *ifprefix = NULL;
2360 	struct net_if_ipv6 *ipv6;
2361 	int i;
2362 
2363 	net_if_lock(iface);
2364 
2365 	if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2366 		goto out;
2367 	}
2368 
2369 	ifprefix = ipv6_prefix_find(iface, prefix, len);
2370 	if (ifprefix) {
2371 		goto out;
2372 	}
2373 
2374 	if (!ipv6) {
2375 		goto out;
2376 	}
2377 
2378 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2379 		if (ipv6->prefix[i].is_used) {
2380 			continue;
2381 		}
2382 
2383 		net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
2384 					len, lifetime);
2385 
2386 		NET_DBG("[%d] interface %p prefix %s/%d added", i, iface,
2387 			net_sprint_ipv6_addr(prefix), len);
2388 
2389 		if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2390 			struct net_event_ipv6_prefix info;
2391 
2392 			net_ipaddr_copy(&info.addr, prefix);
2393 			info.len = len;
2394 			info.lifetime = lifetime;
2395 
2396 			net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_ADD,
2397 							iface, (const void *) &info,
2398 							sizeof(struct net_event_ipv6_prefix));
2399 		} else {
2400 			net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_ADD, iface);
2401 		}
2402 
2403 		ifprefix = &ipv6->prefix[i];
2404 		goto out;
2405 	}
2406 
2407 out:
2408 	net_if_unlock(iface);
2409 
2410 	return ifprefix;
2411 }
2412 
net_if_ipv6_prefix_rm(struct net_if * iface,struct in6_addr * addr,uint8_t len)2413 bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
2414 			   uint8_t len)
2415 {
2416 	bool ret = false;
2417 	struct net_if_ipv6 *ipv6;
2418 	int i;
2419 
2420 	net_if_lock(iface);
2421 
2422 	ipv6 = iface->config.ip.ipv6;
2423 	if (!ipv6) {
2424 		goto out;
2425 	}
2426 
2427 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2428 		if (!ipv6->prefix[i].is_used) {
2429 			continue;
2430 		}
2431 
2432 		if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
2433 		    ipv6->prefix[i].len != len) {
2434 			continue;
2435 		}
2436 
2437 		net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
2438 
2439 		ipv6->prefix[i].is_used = false;
2440 
2441 		/* Remove also all auto addresses if the they have the same
2442 		 * prefix.
2443 		 */
2444 		remove_prefix_addresses(iface, ipv6, addr, len);
2445 
2446 		if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
2447 			struct net_event_ipv6_prefix info;
2448 
2449 			net_ipaddr_copy(&info.addr, addr);
2450 			info.len = len;
2451 			info.lifetime = 0;
2452 
2453 			net_mgmt_event_notify_with_info(NET_EVENT_IPV6_PREFIX_DEL,
2454 							iface, (const void *) &info,
2455 							sizeof(struct net_event_ipv6_prefix));
2456 		} else {
2457 			net_mgmt_event_notify(NET_EVENT_IPV6_PREFIX_DEL, iface);
2458 		}
2459 
2460 		ret = true;
2461 		goto out;
2462 	}
2463 
2464 out:
2465 	net_if_unlock(iface);
2466 
2467 	return ret;
2468 }
2469 
net_if_ipv6_prefix_get(struct net_if * iface,struct in6_addr * addr)2470 struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
2471 						  struct in6_addr *addr)
2472 {
2473 	struct net_if_ipv6_prefix *prefix = NULL;
2474 	struct net_if_ipv6 *ipv6;
2475 	int i;
2476 
2477 	if (!iface) {
2478 		iface = net_if_get_default();
2479 	}
2480 
2481 	net_if_lock(iface);
2482 
2483 	ipv6 = iface->config.ip.ipv6;
2484 	if (!ipv6) {
2485 		goto out;
2486 	}
2487 
2488 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2489 		if (!ipv6->prefix[i].is_used) {
2490 			continue;
2491 		}
2492 
2493 		if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2494 				       addr->s6_addr,
2495 				       ipv6->prefix[i].len)) {
2496 			if (!prefix || prefix->len > ipv6->prefix[i].len) {
2497 				prefix = &ipv6->prefix[i];
2498 			}
2499 		}
2500 	}
2501 
2502 out:
2503 	net_if_unlock(iface);
2504 
2505 	return prefix;
2506 }
2507 
net_if_ipv6_prefix_lookup(struct net_if * iface,struct in6_addr * addr,uint8_t len)2508 struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
2509 						     struct in6_addr *addr,
2510 						     uint8_t len)
2511 {
2512 	struct net_if_ipv6_prefix *prefix = NULL;
2513 	struct net_if_ipv6 *ipv6;
2514 	int i;
2515 
2516 	net_if_lock(iface);
2517 
2518 	ipv6 = iface->config.ip.ipv6;
2519 	if (!ipv6) {
2520 		goto out;
2521 	}
2522 
2523 	for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2524 		if (!ipv6->prefix[i].is_used) {
2525 			continue;
2526 		}
2527 
2528 		if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2529 				       addr->s6_addr, len)) {
2530 			prefix = &ipv6->prefix[i];
2531 			goto out;
2532 		}
2533 	}
2534 
2535 out:
2536 	net_if_unlock(iface);
2537 
2538 	return prefix;
2539 }
2540 
net_if_ipv6_addr_onlink(struct net_if ** iface,struct in6_addr * addr)2541 bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
2542 {
2543 	bool ret = false;
2544 
2545 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2546 		struct net_if_ipv6 *ipv6;
2547 		int i;
2548 
2549 		if (iface && *iface && *iface != tmp) {
2550 			continue;
2551 		}
2552 
2553 		net_if_lock(tmp);
2554 
2555 		ipv6 = tmp->config.ip.ipv6;
2556 		if (!ipv6) {
2557 			net_if_unlock(tmp);
2558 			continue;
2559 		}
2560 
2561 		for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2562 			if (ipv6->prefix[i].is_used &&
2563 			    net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2564 					       addr->s6_addr,
2565 					       ipv6->prefix[i].len)) {
2566 				if (iface) {
2567 					*iface = tmp;
2568 				}
2569 
2570 				ret = true;
2571 				net_if_unlock(tmp);
2572 				goto out;
2573 			}
2574 		}
2575 
2576 		net_if_unlock(tmp);
2577 	}
2578 
2579 out:
2580 	return ret;
2581 }
2582 
net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix * prefix,uint32_t lifetime)2583 void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
2584 				  uint32_t lifetime)
2585 {
2586 	/* No need to set a timer for infinite timeout */
2587 	if (lifetime == 0xffffffff) {
2588 		return;
2589 	}
2590 
2591 	NET_DBG("Prefix lifetime %u sec", lifetime);
2592 
2593 	prefix_start_timer(prefix, lifetime);
2594 }
2595 
net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix * prefix)2596 void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
2597 {
2598 	if (!prefix->is_used) {
2599 		return;
2600 	}
2601 
2602 	prefix_timer_remove(prefix);
2603 }
2604 
net_if_ipv6_router_lookup(struct net_if * iface,struct in6_addr * addr)2605 struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
2606 						struct in6_addr *addr)
2607 {
2608 	return iface_router_lookup(iface, AF_INET6, addr);
2609 }
2610 
net_if_ipv6_router_find_default(struct net_if * iface,struct in6_addr * addr)2611 struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
2612 						      struct in6_addr *addr)
2613 {
2614 	return iface_router_find_default(iface, AF_INET6, addr);
2615 }
2616 
net_if_ipv6_router_update_lifetime(struct net_if_router * router,uint16_t lifetime)2617 void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
2618 					uint16_t lifetime)
2619 {
2620 	NET_DBG("Updating expire time of %s by %u secs",
2621 		net_sprint_ipv6_addr(&router->address.in6_addr),
2622 		lifetime);
2623 
2624 	router->life_start = k_uptime_get_32();
2625 	router->lifetime = lifetime;
2626 
2627 	iface_router_update_timer(router->life_start);
2628 }
2629 
net_if_ipv6_router_add(struct net_if * iface,struct in6_addr * addr,uint16_t lifetime)2630 struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
2631 					     struct in6_addr *addr,
2632 					     uint16_t lifetime)
2633 {
2634 	return iface_router_add(iface, AF_INET6, addr, false, lifetime);
2635 }
2636 
net_if_ipv6_router_rm(struct net_if_router * router)2637 bool net_if_ipv6_router_rm(struct net_if_router *router)
2638 {
2639 	return iface_router_rm(router);
2640 }
2641 
net_if_ipv6_get_hop_limit(struct net_if * iface)2642 uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface)
2643 {
2644 #if defined(CONFIG_NET_NATIVE_IPV6)
2645 	int ret = 0;
2646 
2647 	net_if_lock(iface);
2648 
2649 	if (!iface->config.ip.ipv6) {
2650 		goto out;
2651 	}
2652 
2653 	ret = iface->config.ip.ipv6->hop_limit;
2654 out:
2655 	net_if_unlock(iface);
2656 
2657 	return ret;
2658 #else
2659 	ARG_UNUSED(iface);
2660 
2661 	return 0;
2662 #endif
2663 }
2664 
net_ipv6_set_hop_limit(struct net_if * iface,uint8_t hop_limit)2665 void net_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit)
2666 {
2667 #if defined(CONFIG_NET_NATIVE_IPV6)
2668 	net_if_lock(iface);
2669 
2670 	if (!iface->config.ip.ipv6) {
2671 		goto out;
2672 	}
2673 
2674 	iface->config.ip.ipv6->hop_limit = hop_limit;
2675 out:
2676 	net_if_unlock(iface);
2677 #else
2678 	ARG_UNUSED(iface);
2679 	ARG_UNUSED(hop_limit);
2680 #endif
2681 }
2682 
net_if_ipv6_get_ll(struct net_if * iface,enum net_addr_state addr_state)2683 struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
2684 				    enum net_addr_state addr_state)
2685 {
2686 	struct in6_addr *addr = NULL;
2687 	struct net_if_ipv6 *ipv6;
2688 	int i;
2689 
2690 	net_if_lock(iface);
2691 
2692 	ipv6 = iface->config.ip.ipv6;
2693 	if (!ipv6) {
2694 		goto out;
2695 	}
2696 
2697 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2698 		if (!ipv6->unicast[i].is_used ||
2699 		    (addr_state != NET_ADDR_ANY_STATE &&
2700 		     ipv6->unicast[i].addr_state != addr_state) ||
2701 		    ipv6->unicast[i].address.family != AF_INET6) {
2702 			continue;
2703 		}
2704 
2705 		if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2706 			addr = &ipv6->unicast[i].address.in6_addr;
2707 			goto out;
2708 		}
2709 	}
2710 
2711 out:
2712 	net_if_unlock(iface);
2713 
2714 	return addr;
2715 }
2716 
net_if_ipv6_get_ll_addr(enum net_addr_state state,struct net_if ** iface)2717 struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
2718 					 struct net_if **iface)
2719 {
2720 	struct in6_addr *addr = NULL;
2721 
2722 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2723 		net_if_lock(tmp);
2724 
2725 		addr = net_if_ipv6_get_ll(tmp, state);
2726 		if (addr) {
2727 			if (iface) {
2728 				*iface = tmp;
2729 			}
2730 
2731 			net_if_unlock(tmp);
2732 			goto out;
2733 		}
2734 
2735 		net_if_unlock(tmp);
2736 	}
2737 
2738 out:
2739 	return addr;
2740 }
2741 
check_global_addr(struct net_if * iface,enum net_addr_state state)2742 static inline struct in6_addr *check_global_addr(struct net_if *iface,
2743 						 enum net_addr_state state)
2744 {
2745 	struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2746 	int i;
2747 
2748 	if (!ipv6) {
2749 		return NULL;
2750 	}
2751 
2752 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2753 		if (!ipv6->unicast[i].is_used ||
2754 		    (ipv6->unicast[i].addr_state != state) ||
2755 		    ipv6->unicast[i].address.family != AF_INET6) {
2756 			continue;
2757 		}
2758 
2759 		if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2760 			return &ipv6->unicast[i].address.in6_addr;
2761 		}
2762 	}
2763 
2764 	return NULL;
2765 }
2766 
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2767 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2768 					     struct net_if **iface)
2769 {
2770 	struct in6_addr *addr = NULL;
2771 
2772 	STRUCT_SECTION_FOREACH(net_if, tmp) {
2773 		if (iface && *iface && tmp != *iface) {
2774 			continue;
2775 		}
2776 
2777 		net_if_lock(tmp);
2778 		addr = check_global_addr(tmp, state);
2779 		if (addr) {
2780 			if (iface) {
2781 				*iface = tmp;
2782 			}
2783 
2784 			net_if_unlock(tmp);
2785 			goto out;
2786 		}
2787 
2788 		net_if_unlock(tmp);
2789 	}
2790 
2791 out:
2792 
2793 	return addr;
2794 }
2795 
get_diff_ipv6(const struct in6_addr * src,const struct in6_addr * dst)2796 static uint8_t get_diff_ipv6(const struct in6_addr *src,
2797 			  const struct in6_addr *dst)
2798 {
2799 	return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 16);
2800 }
2801 
is_proper_ipv6_address(struct net_if_addr * addr)2802 static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
2803 {
2804 	if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
2805 	    addr->address.family == AF_INET6 &&
2806 	    !net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
2807 		return true;
2808 	}
2809 
2810 	return false;
2811 }
2812 
net_if_ipv6_get_best_match(struct net_if * iface,const struct in6_addr * dst,uint8_t * best_so_far)2813 static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
2814 						   const struct in6_addr *dst,
2815 						   uint8_t *best_so_far)
2816 {
2817 	struct net_if_ipv6 *ipv6;
2818 	struct in6_addr *src = NULL;
2819 	uint8_t len;
2820 	int i;
2821 
2822 	net_if_lock(iface);
2823 
2824 	ipv6 = iface->config.ip.ipv6;
2825 	if (!ipv6) {
2826 		goto out;
2827 	}
2828 
2829 	for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2830 		if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
2831 			continue;
2832 		}
2833 
2834 		len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
2835 		if (len >= *best_so_far) {
2836 			/* Mesh local address can only be selected for the same
2837 			 * subnet.
2838 			 */
2839 			if (ipv6->unicast[i].is_mesh_local && len < 64 &&
2840 			    !net_ipv6_is_addr_mcast_mesh(dst)) {
2841 				continue;
2842 			}
2843 
2844 			*best_so_far = len;
2845 			src = &ipv6->unicast[i].address.in6_addr;
2846 		}
2847 	}
2848 
2849 out:
2850 	net_if_unlock(iface);
2851 
2852 	return src;
2853 }
2854 
net_if_ipv6_select_src_addr(struct net_if * dst_iface,const struct in6_addr * dst)2855 const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
2856 						   const struct in6_addr *dst)
2857 {
2858 	const struct in6_addr *src = NULL;
2859 	uint8_t best_match = 0U;
2860 
2861 	NET_ASSERT(dst);
2862 
2863 	if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast_link(dst)) {
2864 		/* If caller has supplied interface, then use that */
2865 		if (dst_iface) {
2866 			src = net_if_ipv6_get_best_match(dst_iface, dst,
2867 							 &best_match);
2868 		} else {
2869 			STRUCT_SECTION_FOREACH(net_if, iface) {
2870 				struct in6_addr *addr;
2871 
2872 				addr = net_if_ipv6_get_best_match(iface, dst,
2873 								  &best_match);
2874 				if (addr) {
2875 					src = addr;
2876 				}
2877 			}
2878 		}
2879 
2880 	} else {
2881 		if (dst_iface) {
2882 			src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
2883 		} else {
2884 			struct in6_addr *addr;
2885 
2886 			addr = net_if_ipv6_get_ll(net_if_get_default(), NET_ADDR_PREFERRED);
2887 			if (addr) {
2888 				src = addr;
2889 				goto out;
2890 			}
2891 
2892 			STRUCT_SECTION_FOREACH(net_if, iface) {
2893 				addr = net_if_ipv6_get_ll(iface,
2894 							  NET_ADDR_PREFERRED);
2895 				if (addr) {
2896 					src = addr;
2897 					break;
2898 				}
2899 			}
2900 		}
2901 	}
2902 
2903 	if (!src) {
2904 		src = net_ipv6_unspecified_address();
2905 	}
2906 
2907 out:
2908 	return src;
2909 }
2910 
net_if_ipv6_select_src_iface(const struct in6_addr * dst)2911 struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
2912 {
2913 	struct net_if *iface = NULL;
2914 	const struct in6_addr *src;
2915 
2916 	src = net_if_ipv6_select_src_addr(NULL, dst);
2917 	if (src != net_ipv6_unspecified_address()) {
2918 		net_if_ipv6_addr_lookup(src, &iface);
2919 	}
2920 
2921 	if (iface == NULL) {
2922 		iface = net_if_get_default();
2923 	}
2924 
2925 	return iface;
2926 }
2927 
net_if_ipv6_calc_reachable_time(struct net_if_ipv6 * ipv6)2928 uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
2929 {
2930 	uint32_t min_reachable, max_reachable;
2931 
2932 	min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
2933 			/ MIN_RANDOM_DENOM;
2934 	max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
2935 			/ MAX_RANDOM_DENOM;
2936 
2937 	NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
2938 		max_reachable);
2939 
2940 	return min_reachable +
2941 	       sys_rand32_get() % (max_reachable - min_reachable);
2942 }
2943 
iface_ipv6_start(struct net_if * iface)2944 static void iface_ipv6_start(struct net_if *iface)
2945 {
2946 	if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
2947 	    net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
2948 		return;
2949 	}
2950 
2951 	if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
2952 		net_if_start_dad(iface);
2953 	} else {
2954 		struct net_if_ipv6 *ipv6 __unused = iface->config.ip.ipv6;
2955 
2956 		join_mcast_nodes(iface,
2957 				 &ipv6->mcast[0].address.in6_addr);
2958 	}
2959 
2960 	net_if_start_rs(iface);
2961 }
2962 
iface_ipv6_init(int if_count)2963 static void iface_ipv6_init(int if_count)
2964 {
2965 	int i;
2966 
2967 	iface_ipv6_dad_init();
2968 	iface_ipv6_nd_init();
2969 
2970 	k_work_init_delayable(&address_lifetime_timer,
2971 			      address_lifetime_timeout);
2972 	k_work_init_delayable(&prefix_lifetime_timer, prefix_lifetime_timeout);
2973 
2974 	if (if_count > ARRAY_SIZE(ipv6_addresses)) {
2975 		NET_WARN("You have %zu IPv6 net_if addresses but %d "
2976 			 "network interfaces", ARRAY_SIZE(ipv6_addresses),
2977 			 if_count);
2978 		NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
2979 			 "value.");
2980 	}
2981 
2982 	for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
2983 		ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
2984 		ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
2985 
2986 		net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
2987 	}
2988 }
2989 
2990 #else
2991 #define join_mcast_allnodes(...)
2992 #define join_mcast_solicit_node(...)
2993 #define leave_mcast_all(...)
2994 #define join_mcast_nodes(...)
2995 #define iface_ipv6_start(...)
2996 #define iface_ipv6_init(...)
2997 
net_if_ipv6_maddr_lookup(const struct in6_addr * addr,struct net_if ** iface)2998 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
2999 						   struct net_if **iface)
3000 {
3001 	ARG_UNUSED(addr);
3002 	ARG_UNUSED(iface);
3003 
3004 	return NULL;
3005 }
3006 
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)3007 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
3008 					    struct net_if **ret)
3009 {
3010 	ARG_UNUSED(addr);
3011 	ARG_UNUSED(ret);
3012 
3013 	return NULL;
3014 }
3015 
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)3016 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
3017 					     struct net_if **iface)
3018 {
3019 	ARG_UNUSED(state);
3020 	ARG_UNUSED(iface);
3021 
3022 	return NULL;
3023 }
3024 #endif /* CONFIG_NET_IPV6 */
3025 
3026 #if defined(CONFIG_NET_NATIVE_IPV4)
net_if_config_ipv4_get(struct net_if * iface,struct net_if_ipv4 ** ipv4)3027 int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
3028 {
3029 	int ret = 0;
3030 	int i;
3031 
3032 	net_if_lock(iface);
3033 
3034 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3035 		ret = -ENOTSUP;
3036 		goto out;
3037 	}
3038 
3039 	if (iface->config.ip.ipv4) {
3040 		if (ipv4) {
3041 			*ipv4 = iface->config.ip.ipv4;
3042 		}
3043 
3044 		goto out;
3045 	}
3046 
3047 	k_mutex_lock(&lock, K_FOREVER);
3048 
3049 	for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
3050 		if (ipv4_addresses[i].iface) {
3051 			continue;
3052 		}
3053 
3054 		iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
3055 		ipv4_addresses[i].iface = iface;
3056 
3057 		if (ipv4) {
3058 			*ipv4 = &ipv4_addresses[i].ipv4;
3059 		}
3060 
3061 		k_mutex_unlock(&lock);
3062 		goto out;
3063 	}
3064 
3065 	k_mutex_unlock(&lock);
3066 
3067 	ret = -ESRCH;
3068 out:
3069 	net_if_unlock(iface);
3070 
3071 	return ret;
3072 }
3073 
net_if_config_ipv4_put(struct net_if * iface)3074 int net_if_config_ipv4_put(struct net_if *iface)
3075 {
3076 	int ret = 0;
3077 	int i;
3078 
3079 	net_if_lock(iface);
3080 
3081 	if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
3082 		ret = -ENOTSUP;
3083 		goto out;
3084 	}
3085 
3086 	if (!iface->config.ip.ipv4) {
3087 		ret = -EALREADY;
3088 		goto out;
3089 	}
3090 
3091 	k_mutex_lock(&lock, K_FOREVER);
3092 
3093 	for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
3094 		if (ipv4_addresses[i].iface != iface) {
3095 			continue;
3096 		}
3097 
3098 		iface->config.ip.ipv4 = NULL;
3099 		ipv4_addresses[i].iface = NULL;
3100 
3101 		k_mutex_unlock(&lock);
3102 		goto out;
3103 	}
3104 
3105 	k_mutex_unlock(&lock);
3106 
3107 	ret = -ESRCH;
3108 out:
3109 	net_if_unlock(iface);
3110 
3111 	return ret;
3112 }
3113 
net_if_ipv4_get_ttl(struct net_if * iface)3114 uint8_t net_if_ipv4_get_ttl(struct net_if *iface)
3115 {
3116 #if defined(CONFIG_NET_NATIVE_IPV4)
3117 	int ret = 0;
3118 
3119 	net_if_lock(iface);
3120 
3121 	if (!iface->config.ip.ipv4) {
3122 		goto out;
3123 	}
3124 
3125 	ret = iface->config.ip.ipv4->ttl;
3126 out:
3127 	net_if_unlock(iface);
3128 
3129 	return ret;
3130 #else
3131 	ARG_UNUSED(iface);
3132 
3133 	return 0;
3134 #endif
3135 }
3136 
net_if_ipv4_set_ttl(struct net_if * iface,uint8_t ttl)3137 void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl)
3138 {
3139 #if defined(CONFIG_NET_NATIVE_IPV4)
3140 	net_if_lock(iface);
3141 
3142 	if (!iface->config.ip.ipv4) {
3143 		goto out;
3144 	}
3145 
3146 	iface->config.ip.ipv4->ttl = ttl;
3147 out:
3148 	net_if_unlock(iface);
3149 #else
3150 	ARG_UNUSED(iface);
3151 	ARG_UNUSED(ttl);
3152 #endif
3153 }
3154 
net_if_ipv4_router_lookup(struct net_if * iface,struct in_addr * addr)3155 struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
3156 						struct in_addr *addr)
3157 {
3158 	return iface_router_lookup(iface, AF_INET, addr);
3159 }
3160 
net_if_ipv4_router_find_default(struct net_if * iface,struct in_addr * addr)3161 struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
3162 						      struct in_addr *addr)
3163 {
3164 	return iface_router_find_default(iface, AF_INET, addr);
3165 }
3166 
net_if_ipv4_router_add(struct net_if * iface,struct in_addr * addr,bool is_default,uint16_t lifetime)3167 struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
3168 					     struct in_addr *addr,
3169 					     bool is_default,
3170 					     uint16_t lifetime)
3171 {
3172 	return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
3173 }
3174 
net_if_ipv4_router_rm(struct net_if_router * router)3175 bool net_if_ipv4_router_rm(struct net_if_router *router)
3176 {
3177 	return iface_router_rm(router);
3178 }
3179 
net_if_ipv4_addr_mask_cmp(struct net_if * iface,const struct in_addr * addr)3180 bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
3181 			       const struct in_addr *addr)
3182 {
3183 	bool ret = false;
3184 	struct net_if_ipv4 *ipv4;
3185 	uint32_t subnet;
3186 	int i;
3187 
3188 	net_if_lock(iface);
3189 
3190 	ipv4 = iface->config.ip.ipv4;
3191 	if (!ipv4) {
3192 		goto out;
3193 	}
3194 
3195 	subnet = UNALIGNED_GET(&addr->s_addr) & ipv4->netmask.s_addr;
3196 
3197 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3198 		if (!ipv4->unicast[i].is_used ||
3199 		    ipv4->unicast[i].address.family != AF_INET) {
3200 			continue;
3201 		}
3202 
3203 		if ((ipv4->unicast[i].address.in_addr.s_addr &
3204 		     ipv4->netmask.s_addr) == subnet) {
3205 			ret = true;
3206 			goto out;
3207 		}
3208 	}
3209 
3210 out:
3211 	net_if_unlock(iface);
3212 
3213 	return ret;
3214 }
3215 
ipv4_is_broadcast_address(struct net_if * iface,const struct in_addr * addr)3216 static bool ipv4_is_broadcast_address(struct net_if *iface,
3217 				      const struct in_addr *addr)
3218 {
3219 	struct net_if_ipv4 *ipv4;
3220 	bool ret = false;
3221 
3222 	net_if_lock(iface);
3223 
3224 	ipv4 = iface->config.ip.ipv4;
3225 	if (!ipv4) {
3226 		ret = false;
3227 		goto out;
3228 	}
3229 
3230 	if (!net_if_ipv4_addr_mask_cmp(iface, addr)) {
3231 		ret = false;
3232 		goto out;
3233 	}
3234 
3235 	if ((UNALIGNED_GET(&addr->s_addr) & ~ipv4->netmask.s_addr) ==
3236 	    ~ipv4->netmask.s_addr) {
3237 		ret = true;
3238 		goto out;
3239 	}
3240 
3241 out:
3242 	net_if_unlock(iface);
3243 	return ret;
3244 }
3245 
net_if_ipv4_is_addr_bcast(struct net_if * iface,const struct in_addr * addr)3246 bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
3247 			       const struct in_addr *addr)
3248 {
3249 	bool ret = false;
3250 
3251 	if (iface) {
3252 		ret = ipv4_is_broadcast_address(iface, addr);
3253 		goto out;
3254 	}
3255 
3256 	STRUCT_SECTION_FOREACH(net_if, one_iface) {
3257 		ret = ipv4_is_broadcast_address(one_iface, addr);
3258 		if (ret) {
3259 			goto out;
3260 		}
3261 	}
3262 
3263 out:
3264 	return ret;
3265 }
3266 
net_if_ipv4_select_src_iface(const struct in_addr * dst)3267 struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
3268 {
3269 	struct net_if *selected = NULL;
3270 
3271 	STRUCT_SECTION_FOREACH(net_if, iface) {
3272 		bool ret;
3273 
3274 		ret = net_if_ipv4_addr_mask_cmp(iface, dst);
3275 		if (ret) {
3276 			selected = iface;
3277 			goto out;
3278 		}
3279 	}
3280 
3281 	if (selected == NULL) {
3282 		selected = net_if_get_default();
3283 	}
3284 
3285 out:
3286 	return selected;
3287 }
3288 
get_diff_ipv4(const struct in_addr * src,const struct in_addr * dst)3289 static uint8_t get_diff_ipv4(const struct in_addr *src,
3290 			  const struct in_addr *dst)
3291 {
3292 	return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 4);
3293 }
3294 
is_proper_ipv4_address(struct net_if_addr * addr)3295 static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
3296 {
3297 	if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3298 	    addr->address.family == AF_INET &&
3299 	    !net_ipv4_is_ll_addr(&addr->address.in_addr)) {
3300 		return true;
3301 	}
3302 
3303 	return false;
3304 }
3305 
net_if_ipv4_get_best_match(struct net_if * iface,const struct in_addr * dst,uint8_t * best_so_far)3306 static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
3307 						  const struct in_addr *dst,
3308 						  uint8_t *best_so_far)
3309 {
3310 	struct net_if_ipv4 *ipv4;
3311 	struct in_addr *src = NULL;
3312 	uint8_t len;
3313 	int i;
3314 
3315 	net_if_lock(iface);
3316 
3317 	ipv4 = iface->config.ip.ipv4;
3318 	if (!ipv4) {
3319 		goto out;
3320 	}
3321 
3322 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3323 		if (!is_proper_ipv4_address(&ipv4->unicast[i])) {
3324 			continue;
3325 		}
3326 
3327 		len = get_diff_ipv4(dst, &ipv4->unicast[i].address.in_addr);
3328 		if (len >= *best_so_far) {
3329 			*best_so_far = len;
3330 			src = &ipv4->unicast[i].address.in_addr;
3331 		}
3332 	}
3333 
3334 out:
3335 	net_if_unlock(iface);
3336 
3337 	return src;
3338 }
3339 
if_ipv4_get_addr(struct net_if * iface,enum net_addr_state addr_state,bool ll)3340 static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
3341 					enum net_addr_state addr_state, bool ll)
3342 {
3343 	struct in_addr *addr = NULL;
3344 	struct net_if_ipv4 *ipv4;
3345 	int i;
3346 
3347 	if (!iface) {
3348 		return NULL;
3349 	}
3350 
3351 	net_if_lock(iface);
3352 
3353 	ipv4 = iface->config.ip.ipv4;
3354 	if (!ipv4) {
3355 		goto out;
3356 	}
3357 
3358 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3359 		if (!ipv4->unicast[i].is_used ||
3360 		    (addr_state != NET_ADDR_ANY_STATE &&
3361 		     ipv4->unicast[i].addr_state != addr_state) ||
3362 		    ipv4->unicast[i].address.family != AF_INET) {
3363 			continue;
3364 		}
3365 
3366 		if (net_ipv4_is_ll_addr(&ipv4->unicast[i].address.in_addr)) {
3367 			if (!ll) {
3368 				continue;
3369 			}
3370 		} else {
3371 			if (ll) {
3372 				continue;
3373 			}
3374 		}
3375 
3376 		addr = &ipv4->unicast[i].address.in_addr;
3377 		goto out;
3378 	}
3379 
3380 out:
3381 	net_if_unlock(iface);
3382 
3383 	return addr;
3384 }
3385 
net_if_ipv4_get_ll(struct net_if * iface,enum net_addr_state addr_state)3386 struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
3387 				   enum net_addr_state addr_state)
3388 {
3389 	return if_ipv4_get_addr(iface, addr_state, true);
3390 }
3391 
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3392 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3393 					    enum net_addr_state addr_state)
3394 {
3395 	return if_ipv4_get_addr(iface, addr_state, false);
3396 }
3397 
net_if_ipv4_select_src_addr(struct net_if * dst_iface,const struct in_addr * dst)3398 const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
3399 						  const struct in_addr *dst)
3400 {
3401 	const struct in_addr *src = NULL;
3402 	uint8_t best_match = 0U;
3403 
3404 	NET_ASSERT(dst);
3405 
3406 	if (!net_ipv4_is_ll_addr(dst)) {
3407 
3408 		/* If caller has supplied interface, then use that */
3409 		if (dst_iface) {
3410 			src = net_if_ipv4_get_best_match(dst_iface, dst,
3411 							 &best_match);
3412 		} else {
3413 			STRUCT_SECTION_FOREACH(net_if, iface) {
3414 				struct in_addr *addr;
3415 
3416 				addr = net_if_ipv4_get_best_match(iface, dst,
3417 								  &best_match);
3418 				if (addr) {
3419 					src = addr;
3420 				}
3421 			}
3422 		}
3423 
3424 	} else {
3425 		if (dst_iface) {
3426 			src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3427 		} else {
3428 			struct in_addr *addr;
3429 
3430 			addr = net_if_ipv4_get_ll(net_if_get_default(), NET_ADDR_PREFERRED);
3431 			if (addr) {
3432 				src = addr;
3433 				goto out;
3434 			}
3435 
3436 			STRUCT_SECTION_FOREACH(net_if, iface) {
3437 				addr = net_if_ipv4_get_ll(iface,
3438 							  NET_ADDR_PREFERRED);
3439 				if (addr) {
3440 					src = addr;
3441 					break;
3442 				}
3443 			}
3444 		}
3445 	}
3446 
3447 	if (!src) {
3448 		src = net_if_ipv4_get_global_addr(dst_iface,
3449 						  NET_ADDR_PREFERRED);
3450 
3451 		if (IS_ENABLED(CONFIG_NET_IPV4_AUTO) && !src) {
3452 			/* Try to use LL address if there's really no other
3453 			 * address available.
3454 			 */
3455 			src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3456 		}
3457 
3458 		if (!src) {
3459 			src = net_ipv4_unspecified_address();
3460 		}
3461 	}
3462 
3463 out:
3464 	return src;
3465 }
3466 
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3467 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3468 					    struct net_if **ret)
3469 {
3470 	struct net_if_addr *ifaddr = NULL;
3471 
3472 	STRUCT_SECTION_FOREACH(net_if, iface) {
3473 		struct net_if_ipv4 *ipv4;
3474 		int i;
3475 
3476 		net_if_lock(iface);
3477 
3478 		ipv4 = iface->config.ip.ipv4;
3479 		if (!ipv4) {
3480 			net_if_unlock(iface);
3481 			continue;
3482 		}
3483 
3484 		for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3485 			if (!ipv4->unicast[i].is_used ||
3486 			    ipv4->unicast[i].address.family != AF_INET) {
3487 				continue;
3488 			}
3489 
3490 			if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
3491 			    ipv4->unicast[i].address.in_addr.s_addr) {
3492 
3493 				if (ret) {
3494 					*ret = iface;
3495 				}
3496 
3497 				ifaddr = &ipv4->unicast[i];
3498 				net_if_unlock(iface);
3499 				goto out;
3500 			}
3501 		}
3502 
3503 		net_if_unlock(iface);
3504 	}
3505 
3506 out:
3507 	return ifaddr;
3508 }
3509 
z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3510 int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
3511 {
3512 	struct net_if_addr *if_addr;
3513 	struct net_if *iface = NULL;
3514 
3515 	if_addr = net_if_ipv4_addr_lookup(addr, &iface);
3516 	if (!if_addr) {
3517 		return 0;
3518 	}
3519 
3520 	return net_if_get_by_iface(iface);
3521 }
3522 
3523 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3524 static inline int z_vrfy_net_if_ipv4_addr_lookup_by_index(
3525 					  const struct in_addr *addr)
3526 {
3527 	struct in_addr addr_v4;
3528 
3529 	Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3530 
3531 	return z_impl_net_if_ipv4_addr_lookup_by_index(&addr_v4);
3532 }
3533 #include <syscalls/net_if_ipv4_addr_lookup_by_index_mrsh.c>
3534 #endif
3535 
net_if_ipv4_set_netmask(struct net_if * iface,const struct in_addr * netmask)3536 void net_if_ipv4_set_netmask(struct net_if *iface,
3537 			     const struct in_addr *netmask)
3538 {
3539 	net_if_lock(iface);
3540 
3541 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3542 		goto out;
3543 	}
3544 
3545 	if (!iface->config.ip.ipv4) {
3546 		goto out;
3547 	}
3548 
3549 	net_ipaddr_copy(&iface->config.ip.ipv4->netmask, netmask);
3550 out:
3551 	net_if_unlock(iface);
3552 }
3553 
z_impl_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3554 bool z_impl_net_if_ipv4_set_netmask_by_index(int index,
3555 					     const struct in_addr *netmask)
3556 {
3557 	struct net_if *iface;
3558 
3559 	iface = net_if_get_by_index(index);
3560 	if (!iface) {
3561 		return false;
3562 	}
3563 
3564 	net_if_ipv4_set_netmask(iface, netmask);
3565 
3566 	return true;
3567 }
3568 
3569 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3570 bool z_vrfy_net_if_ipv4_set_netmask_by_index(int index,
3571 					     const struct in_addr *netmask)
3572 {
3573 	struct in_addr netmask_addr;
3574 	struct net_if *iface;
3575 
3576 	iface = z_vrfy_net_if_get_by_index(index);
3577 	if (!iface) {
3578 		return false;
3579 	}
3580 
3581 	Z_OOPS(z_user_from_copy(&netmask_addr, (void *)netmask,
3582 				sizeof(netmask_addr)));
3583 
3584 	return z_impl_net_if_ipv4_set_netmask_by_index(index, &netmask_addr);
3585 }
3586 
3587 #include <syscalls/net_if_ipv4_set_netmask_by_index_mrsh.c>
3588 #endif /* CONFIG_USERSPACE */
3589 
net_if_ipv4_set_gw(struct net_if * iface,const struct in_addr * gw)3590 void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw)
3591 {
3592 	net_if_lock(iface);
3593 
3594 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3595 		goto out;
3596 	}
3597 
3598 	if (!iface->config.ip.ipv4) {
3599 		goto out;
3600 	}
3601 
3602 	net_ipaddr_copy(&iface->config.ip.ipv4->gw, gw);
3603 out:
3604 	net_if_unlock(iface);
3605 }
3606 
z_impl_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)3607 bool z_impl_net_if_ipv4_set_gw_by_index(int index,
3608 					const struct in_addr *gw)
3609 {
3610 	struct net_if *iface;
3611 
3612 	iface = net_if_get_by_index(index);
3613 	if (!iface) {
3614 		return false;
3615 	}
3616 
3617 	net_if_ipv4_set_gw(iface, gw);
3618 
3619 	return true;
3620 }
3621 
3622 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)3623 bool z_vrfy_net_if_ipv4_set_gw_by_index(int index,
3624 					const struct in_addr *gw)
3625 {
3626 	struct in_addr gw_addr;
3627 	struct net_if *iface;
3628 
3629 	iface = z_vrfy_net_if_get_by_index(index);
3630 	if (!iface) {
3631 		return false;
3632 	}
3633 
3634 	Z_OOPS(z_user_from_copy(&gw_addr, (void *)gw, sizeof(gw_addr)));
3635 
3636 	return z_impl_net_if_ipv4_set_gw_by_index(index, &gw_addr);
3637 }
3638 
3639 #include <syscalls/net_if_ipv4_set_gw_by_index_mrsh.c>
3640 #endif /* CONFIG_USERSPACE */
3641 
ipv4_addr_find(struct net_if * iface,struct in_addr * addr)3642 static struct net_if_addr *ipv4_addr_find(struct net_if *iface,
3643 					  struct in_addr *addr)
3644 {
3645 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3646 	int i;
3647 
3648 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3649 		if (!ipv4->unicast[i].is_used) {
3650 			continue;
3651 		}
3652 
3653 		if (net_ipv4_addr_cmp(addr,
3654 				      &ipv4->unicast[i].address.in_addr)) {
3655 			return &ipv4->unicast[i];
3656 		}
3657 	}
3658 
3659 	return NULL;
3660 }
3661 
net_if_ipv4_addr_add(struct net_if * iface,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3662 struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
3663 					 struct in_addr *addr,
3664 					 enum net_addr_type addr_type,
3665 					 uint32_t vlifetime)
3666 {
3667 	struct net_if_addr *ifaddr = NULL;
3668 	struct net_if_ipv4 *ipv4;
3669 	int i;
3670 
3671 	net_if_lock(iface);
3672 
3673 	if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
3674 		goto out;
3675 	}
3676 
3677 	ifaddr = ipv4_addr_find(iface, addr);
3678 	if (ifaddr) {
3679 		/* TODO: should set addr_type/vlifetime */
3680 		goto out;
3681 	}
3682 
3683 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3684 		struct net_if_addr *cur = &ipv4->unicast[i];
3685 
3686 		if (addr_type == NET_ADDR_DHCP
3687 		    && cur->addr_type == NET_ADDR_OVERRIDABLE) {
3688 			ifaddr = cur;
3689 			break;
3690 		}
3691 
3692 		if (!ipv4->unicast[i].is_used) {
3693 			ifaddr = cur;
3694 			break;
3695 		}
3696 	}
3697 
3698 	if (ifaddr) {
3699 		ifaddr->is_used = true;
3700 		ifaddr->address.family = AF_INET;
3701 		ifaddr->address.in_addr.s4_addr32[0] =
3702 						addr->s4_addr32[0];
3703 		ifaddr->addr_type = addr_type;
3704 
3705 		/* Caller has to take care of timers and their expiry */
3706 		if (vlifetime) {
3707 			ifaddr->is_infinite = false;
3708 		} else {
3709 			ifaddr->is_infinite = true;
3710 		}
3711 
3712 		/**
3713 		 *  TODO: Handle properly PREFERRED/DEPRECATED state when
3714 		 *  address in use, expired and renewal state.
3715 		 */
3716 		ifaddr->addr_state = NET_ADDR_PREFERRED;
3717 
3718 		NET_DBG("[%d] interface %p address %s type %s added", i, iface,
3719 			net_sprint_ipv4_addr(addr),
3720 			net_addr_type2str(addr_type));
3721 
3722 		net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface,
3723 						&ifaddr->address.in_addr,
3724 						sizeof(struct in_addr));
3725 		goto out;
3726 	}
3727 
3728 out:
3729 	net_if_unlock(iface);
3730 
3731 	return ifaddr;
3732 }
3733 
net_if_ipv4_addr_rm(struct net_if * iface,const struct in_addr * addr)3734 bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr)
3735 {
3736 	struct net_if_ipv4 *ipv4;
3737 	bool ret = false;
3738 	int i;
3739 
3740 	net_if_lock(iface);
3741 
3742 	ipv4 = iface->config.ip.ipv4;
3743 	if (!ipv4) {
3744 		goto out;
3745 	}
3746 
3747 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3748 		if (!ipv4->unicast[i].is_used) {
3749 			continue;
3750 		}
3751 
3752 		if (!net_ipv4_addr_cmp(&ipv4->unicast[i].address.in_addr,
3753 				       addr)) {
3754 			continue;
3755 		}
3756 
3757 		ipv4->unicast[i].is_used = false;
3758 
3759 		NET_DBG("[%d] interface %p address %s removed",
3760 			i, iface, net_sprint_ipv4_addr(addr));
3761 
3762 		net_mgmt_event_notify_with_info(
3763 			NET_EVENT_IPV4_ADDR_DEL, iface,
3764 			&ipv4->unicast[i].address.in_addr,
3765 			sizeof(struct in_addr));
3766 
3767 		ret = true;
3768 		goto out;
3769 	}
3770 
3771 out:
3772 	net_if_unlock(iface);
3773 
3774 	return ret;
3775 }
3776 
z_impl_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3777 bool z_impl_net_if_ipv4_addr_add_by_index(int index,
3778 					  struct in_addr *addr,
3779 					  enum net_addr_type addr_type,
3780 					  uint32_t vlifetime)
3781 {
3782 	struct net_if *iface;
3783 	struct net_if_addr *if_addr;
3784 
3785 	iface = net_if_get_by_index(index);
3786 	if (!iface) {
3787 		return false;
3788 	}
3789 
3790 	if_addr = net_if_ipv4_addr_add(iface, addr, addr_type, vlifetime);
3791 	return if_addr ? true : false;
3792 }
3793 
3794 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3795 bool z_vrfy_net_if_ipv4_addr_add_by_index(int index,
3796 					  struct in_addr *addr,
3797 					  enum net_addr_type addr_type,
3798 					  uint32_t vlifetime)
3799 {
3800 	struct in_addr addr_v4;
3801 	struct net_if *iface;
3802 
3803 	iface = z_vrfy_net_if_get_by_index(index);
3804 	if (!iface) {
3805 		return false;
3806 	}
3807 
3808 	Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3809 
3810 	return z_impl_net_if_ipv4_addr_add_by_index(index,
3811 						    &addr_v4,
3812 						    addr_type,
3813 						    vlifetime);
3814 }
3815 
3816 #include <syscalls/net_if_ipv4_addr_add_by_index_mrsh.c>
3817 #endif /* CONFIG_USERSPACE */
3818 
z_impl_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)3819 bool z_impl_net_if_ipv4_addr_rm_by_index(int index,
3820 					 const struct in_addr *addr)
3821 {
3822 	struct net_if *iface;
3823 
3824 	iface = net_if_get_by_index(index);
3825 	if (!iface) {
3826 		return false;
3827 	}
3828 
3829 	return net_if_ipv4_addr_rm(iface, addr);
3830 }
3831 
3832 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)3833 bool z_vrfy_net_if_ipv4_addr_rm_by_index(int index,
3834 					 const struct in_addr *addr)
3835 {
3836 	struct in_addr addr_v4;
3837 	struct net_if *iface;
3838 
3839 	iface = z_vrfy_net_if_get_by_index(index);
3840 	if (!iface) {
3841 		return false;
3842 	}
3843 
3844 	Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3845 
3846 	return (uint32_t)z_impl_net_if_ipv4_addr_rm_by_index(index, &addr_v4);
3847 }
3848 
3849 #include <syscalls/net_if_ipv4_addr_rm_by_index_mrsh.c>
3850 #endif /* CONFIG_USERSPACE */
3851 
net_if_ipv4_addr_foreach(struct net_if * iface,net_if_ip_addr_cb_t cb,void * user_data)3852 void net_if_ipv4_addr_foreach(struct net_if *iface, net_if_ip_addr_cb_t cb,
3853 			      void *user_data)
3854 {
3855 	struct net_if_ipv4 *ipv4;
3856 
3857 	if (iface == NULL) {
3858 		return;
3859 	}
3860 
3861 	net_if_lock(iface);
3862 
3863 	ipv4 = iface->config.ip.ipv4;
3864 	if (ipv4 == NULL) {
3865 		goto out;
3866 	}
3867 
3868 	for (int i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3869 		struct net_if_addr *if_addr = &ipv4->unicast[i];
3870 
3871 		if (!if_addr->is_used) {
3872 			continue;
3873 		}
3874 
3875 		cb(iface, if_addr, user_data);
3876 	}
3877 
3878 out:
3879 	net_if_unlock(iface);
3880 }
3881 
ipv4_maddr_find(struct net_if * iface,bool is_used,const struct in_addr * addr)3882 static struct net_if_mcast_addr *ipv4_maddr_find(struct net_if *iface,
3883 						 bool is_used,
3884 						 const struct in_addr *addr)
3885 {
3886 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3887 	int i;
3888 
3889 	if (!ipv4) {
3890 		return NULL;
3891 	}
3892 
3893 	for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
3894 		if ((is_used && !ipv4->mcast[i].is_used) ||
3895 		    (!is_used && ipv4->mcast[i].is_used)) {
3896 			continue;
3897 		}
3898 
3899 		if (addr) {
3900 			if (!net_ipv4_addr_cmp(&ipv4->mcast[i].address.in_addr,
3901 					       addr)) {
3902 				continue;
3903 			}
3904 		}
3905 
3906 		return &ipv4->mcast[i];
3907 	}
3908 
3909 	return NULL;
3910 }
net_if_ipv4_maddr_add(struct net_if * iface,const struct in_addr * addr)3911 struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
3912 						const struct in_addr *addr)
3913 {
3914 	struct net_if_mcast_addr *maddr = NULL;
3915 
3916 	net_if_lock(iface);
3917 
3918 	if (net_if_config_ipv4_get(iface, NULL) < 0) {
3919 		goto out;
3920 	}
3921 
3922 	if (!net_ipv4_is_addr_mcast(addr)) {
3923 		NET_DBG("Address %s is not a multicast address.",
3924 			net_sprint_ipv4_addr(addr));
3925 		goto out;
3926 	}
3927 
3928 	maddr = ipv4_maddr_find(iface, false, NULL);
3929 	if (maddr) {
3930 		maddr->is_used = true;
3931 		maddr->address.family = AF_INET;
3932 		maddr->address.in_addr.s4_addr32[0] = addr->s4_addr32[0];
3933 
3934 		NET_DBG("interface %p address %s added", iface,
3935 			net_sprint_ipv4_addr(addr));
3936 
3937 		net_mgmt_event_notify_with_info(
3938 			NET_EVENT_IPV4_MADDR_ADD, iface,
3939 			&maddr->address.in_addr,
3940 			sizeof(struct in_addr));
3941 	}
3942 
3943 out:
3944 	net_if_unlock(iface);
3945 
3946 	return maddr;
3947 }
3948 
net_if_ipv4_maddr_rm(struct net_if * iface,const struct in_addr * addr)3949 bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr)
3950 {
3951 	struct net_if_mcast_addr *maddr;
3952 	bool ret = false;
3953 
3954 	net_if_lock(iface);
3955 
3956 	maddr = ipv4_maddr_find(iface, true, addr);
3957 	if (maddr) {
3958 		maddr->is_used = false;
3959 
3960 		NET_DBG("interface %p address %s removed",
3961 			iface, net_sprint_ipv4_addr(addr));
3962 
3963 		net_mgmt_event_notify_with_info(
3964 			NET_EVENT_IPV4_MADDR_DEL, iface,
3965 			&maddr->address.in_addr,
3966 			sizeof(struct in_addr));
3967 
3968 		ret = true;
3969 	}
3970 
3971 	net_if_unlock(iface);
3972 
3973 	return ret;
3974 }
3975 
net_if_ipv4_maddr_lookup(const struct in_addr * maddr,struct net_if ** ret)3976 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *maddr,
3977 						   struct net_if **ret)
3978 {
3979 	struct net_if_mcast_addr *addr = NULL;
3980 
3981 	STRUCT_SECTION_FOREACH(net_if, iface) {
3982 		if (ret && *ret && iface != *ret) {
3983 			continue;
3984 		}
3985 
3986 		net_if_lock(iface);
3987 
3988 		addr = ipv4_maddr_find(iface, true, maddr);
3989 		if (addr) {
3990 			if (ret) {
3991 				*ret = iface;
3992 			}
3993 
3994 			net_if_unlock(iface);
3995 			goto out;
3996 		}
3997 
3998 		net_if_unlock(iface);
3999 	}
4000 
4001 out:
4002 	return addr;
4003 }
4004 
net_if_ipv4_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)4005 void net_if_ipv4_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
4006 {
4007 	NET_ASSERT(iface);
4008 	NET_ASSERT(addr);
4009 
4010 	net_if_lock(iface);
4011 	addr->is_joined = false;
4012 	net_if_unlock(iface);
4013 }
4014 
net_if_ipv4_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)4015 void net_if_ipv4_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
4016 {
4017 	NET_ASSERT(iface);
4018 	NET_ASSERT(addr);
4019 
4020 	net_if_lock(iface);
4021 	addr->is_joined = true;
4022 	net_if_unlock(iface);
4023 }
4024 
iface_ipv4_init(int if_count)4025 static void iface_ipv4_init(int if_count)
4026 {
4027 	int i;
4028 
4029 	if (if_count > ARRAY_SIZE(ipv4_addresses)) {
4030 		NET_WARN("You have %zu IPv4 net_if addresses but %d "
4031 			 "network interfaces", ARRAY_SIZE(ipv4_addresses),
4032 			 if_count);
4033 		NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV4_COUNT "
4034 			 "value.");
4035 	}
4036 
4037 	for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
4038 		ipv4_addresses[i].ipv4.ttl = CONFIG_NET_INITIAL_TTL;
4039 	}
4040 }
4041 
leave_ipv4_mcast_all(struct net_if * iface)4042 static void leave_ipv4_mcast_all(struct net_if *iface)
4043 {
4044 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
4045 	int i;
4046 
4047 	if (!ipv4) {
4048 		return;
4049 	}
4050 
4051 	for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
4052 		if (!ipv4->mcast[i].is_used ||
4053 		    !ipv4->mcast[i].is_joined) {
4054 			continue;
4055 		}
4056 
4057 		net_ipv4_igmp_leave(iface, &ipv4->mcast[i].address.in_addr);
4058 	}
4059 }
4060 
4061 #else
4062 #define leave_ipv4_mcast_all(...)
4063 #define iface_ipv4_init(...)
4064 
net_if_ipv4_maddr_lookup(const struct in_addr * addr,struct net_if ** iface)4065 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
4066 						   struct net_if **iface)
4067 {
4068 	ARG_UNUSED(addr);
4069 	ARG_UNUSED(iface);
4070 
4071 	return NULL;
4072 }
4073 
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)4074 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
4075 					    struct net_if **ret)
4076 {
4077 	ARG_UNUSED(addr);
4078 	ARG_UNUSED(ret);
4079 
4080 	return NULL;
4081 }
4082 
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)4083 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
4084 					    enum net_addr_state addr_state)
4085 {
4086 	ARG_UNUSED(addr_state);
4087 	ARG_UNUSED(iface);
4088 
4089 	return NULL;
4090 }
4091 #endif /* CONFIG_NET_IPV4 */
4092 
net_if_select_src_iface(const struct sockaddr * dst)4093 struct net_if *net_if_select_src_iface(const struct sockaddr *dst)
4094 {
4095 	struct net_if *iface = NULL;
4096 
4097 	if (!dst) {
4098 		goto out;
4099 	}
4100 
4101 	if (IS_ENABLED(CONFIG_NET_IPV6) && dst->sa_family == AF_INET6) {
4102 		iface = net_if_ipv6_select_src_iface(&net_sin6(dst)->sin6_addr);
4103 		goto out;
4104 	}
4105 
4106 	if (IS_ENABLED(CONFIG_NET_IPV4) && dst->sa_family == AF_INET) {
4107 		iface = net_if_ipv4_select_src_iface(&net_sin(dst)->sin_addr);
4108 		goto out;
4109 	}
4110 
4111 out:
4112 	if (iface == NULL) {
4113 		iface = net_if_get_default();
4114 	}
4115 
4116 	return iface;
4117 }
4118 
net_if_recv_data(struct net_if * iface,struct net_pkt * pkt)4119 enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt)
4120 {
4121 	if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) &&
4122 	    net_if_is_promisc(iface)) {
4123 		struct net_pkt *new_pkt;
4124 
4125 		new_pkt = net_pkt_clone(pkt, K_NO_WAIT);
4126 
4127 		if (net_promisc_mode_input(new_pkt) == NET_DROP) {
4128 			net_pkt_unref(new_pkt);
4129 		}
4130 	}
4131 
4132 	return net_if_l2(iface)->recv(iface, pkt);
4133 }
4134 
net_if_register_link_cb(struct net_if_link_cb * link,net_if_link_callback_t cb)4135 void net_if_register_link_cb(struct net_if_link_cb *link,
4136 			     net_if_link_callback_t cb)
4137 {
4138 	k_mutex_lock(&lock, K_FOREVER);
4139 
4140 	sys_slist_find_and_remove(&link_callbacks, &link->node);
4141 	sys_slist_prepend(&link_callbacks, &link->node);
4142 
4143 	link->cb = cb;
4144 
4145 	k_mutex_unlock(&lock);
4146 }
4147 
net_if_unregister_link_cb(struct net_if_link_cb * link)4148 void net_if_unregister_link_cb(struct net_if_link_cb *link)
4149 {
4150 	k_mutex_lock(&lock, K_FOREVER);
4151 
4152 	sys_slist_find_and_remove(&link_callbacks, &link->node);
4153 
4154 	k_mutex_unlock(&lock);
4155 }
4156 
net_if_call_link_cb(struct net_if * iface,struct net_linkaddr * lladdr,int status)4157 void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
4158 			 int status)
4159 {
4160 	struct net_if_link_cb *link, *tmp;
4161 
4162 	k_mutex_lock(&lock, K_FOREVER);
4163 
4164 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&link_callbacks, link, tmp, node) {
4165 		link->cb(iface, lladdr, status);
4166 	}
4167 
4168 	k_mutex_unlock(&lock);
4169 }
4170 
need_calc_checksum(struct net_if * iface,enum ethernet_hw_caps caps)4171 static bool need_calc_checksum(struct net_if *iface, enum ethernet_hw_caps caps)
4172 {
4173 #if defined(CONFIG_NET_L2_ETHERNET)
4174 	if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
4175 		return true;
4176 	}
4177 
4178 	return !(net_eth_get_hw_capabilities(iface) & caps);
4179 #else
4180 	ARG_UNUSED(iface);
4181 	ARG_UNUSED(caps);
4182 
4183 	return true;
4184 #endif
4185 }
4186 
net_if_need_calc_tx_checksum(struct net_if * iface)4187 bool net_if_need_calc_tx_checksum(struct net_if *iface)
4188 {
4189 	return need_calc_checksum(iface, ETHERNET_HW_TX_CHKSUM_OFFLOAD);
4190 }
4191 
net_if_need_calc_rx_checksum(struct net_if * iface)4192 bool net_if_need_calc_rx_checksum(struct net_if *iface)
4193 {
4194 	return need_calc_checksum(iface, ETHERNET_HW_RX_CHKSUM_OFFLOAD);
4195 }
4196 
net_if_get_by_iface(struct net_if * iface)4197 int net_if_get_by_iface(struct net_if *iface)
4198 {
4199 	if (!(iface >= _net_if_list_start && iface < _net_if_list_end)) {
4200 		return -1;
4201 	}
4202 
4203 	return (iface - _net_if_list_start) + 1;
4204 }
4205 
net_if_foreach(net_if_cb_t cb,void * user_data)4206 void net_if_foreach(net_if_cb_t cb, void *user_data)
4207 {
4208 	STRUCT_SECTION_FOREACH(net_if, iface) {
4209 		cb(iface, user_data);
4210 	}
4211 }
4212 
net_if_is_offloaded(struct net_if * iface)4213 bool net_if_is_offloaded(struct net_if *iface)
4214 {
4215 	return (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
4216 		net_if_is_ip_offloaded(iface)) ||
4217 	       (IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD) &&
4218 		net_if_is_socket_offloaded(iface));
4219 }
4220 
notify_iface_up(struct net_if * iface)4221 static void notify_iface_up(struct net_if *iface)
4222 {
4223 	/* In many places it's assumed that link address was set with
4224 	 * net_if_set_link_addr(). Better check that now.
4225 	 */
4226 #if defined(CONFIG_NET_L2_CANBUS_RAW)
4227 	if (IS_ENABLED(CONFIG_NET_SOCKETS_CAN) &&
4228 	    (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW)))	{
4229 		/* CAN does not require link address. */
4230 	} else
4231 #endif	/* CONFIG_NET_L2_CANBUS_RAW */
4232 	{
4233 		if (!net_if_is_offloaded(iface)) {
4234 			NET_ASSERT(net_if_get_link_addr(iface)->addr != NULL);
4235 		}
4236 	}
4237 
4238 	net_if_flag_set(iface, NET_IF_RUNNING);
4239 	net_mgmt_event_notify(NET_EVENT_IF_UP, iface);
4240 	net_virtual_enable(iface);
4241 
4242 	/* If the interface is only having point-to-point traffic then we do
4243 	 * not need to run DAD etc for it.
4244 	 */
4245 	if (!net_if_is_offloaded(iface) &&
4246 	    !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
4247 		iface_ipv6_start(iface);
4248 		net_ipv4_autoconf_start(iface);
4249 	}
4250 }
4251 
notify_iface_down(struct net_if * iface)4252 static void notify_iface_down(struct net_if *iface)
4253 {
4254 	net_if_flag_clear(iface, NET_IF_RUNNING);
4255 	net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
4256 	net_virtual_disable(iface);
4257 
4258 	if (!net_if_is_offloaded(iface) &&
4259 	    !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
4260 		net_ipv4_autoconf_reset(iface);
4261 	}
4262 }
4263 
net_if_oper_state2str(enum net_if_oper_state state)4264 static inline const char *net_if_oper_state2str(enum net_if_oper_state state)
4265 {
4266 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
4267 	switch (state) {
4268 	case NET_IF_OPER_UNKNOWN:
4269 		return "UNKNOWN";
4270 	case NET_IF_OPER_NOTPRESENT:
4271 		return "NOTPRESENT";
4272 	case NET_IF_OPER_DOWN:
4273 		return "DOWN";
4274 	case NET_IF_OPER_LOWERLAYERDOWN:
4275 		return "LOWERLAYERDOWN";
4276 	case NET_IF_OPER_TESTING:
4277 		return "TESTING";
4278 	case NET_IF_OPER_DORMANT:
4279 		return "DORMANT";
4280 	case NET_IF_OPER_UP:
4281 		return "UP";
4282 	default:
4283 		break;
4284 	}
4285 
4286 	return "<invalid>";
4287 #else
4288 	ARG_UNUSED(state);
4289 
4290 	return "";
4291 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
4292 }
4293 
update_operational_state(struct net_if * iface)4294 static void update_operational_state(struct net_if *iface)
4295 {
4296 	enum net_if_oper_state prev_state = iface->if_dev->oper_state;
4297 	enum net_if_oper_state new_state = NET_IF_OPER_UNKNOWN;
4298 
4299 	if (!net_if_is_admin_up(iface)) {
4300 		new_state = NET_IF_OPER_DOWN;
4301 		goto exit;
4302 	}
4303 
4304 	if (!net_if_is_carrier_ok(iface)) {
4305 #if defined(CONFIG_NET_L2_VIRTUAL)
4306 		if (net_if_l2(iface) == &NET_L2_GET_NAME(VIRTUAL)) {
4307 			new_state = NET_IF_OPER_LOWERLAYERDOWN;
4308 		} else
4309 #endif /* CONFIG_NET_L2_VIRTUAL */
4310 		{
4311 			new_state = NET_IF_OPER_DOWN;
4312 		}
4313 
4314 		goto exit;
4315 	}
4316 
4317 	if (net_if_is_dormant(iface)) {
4318 		new_state = NET_IF_OPER_DORMANT;
4319 		goto exit;
4320 	}
4321 
4322 	new_state = NET_IF_OPER_UP;
4323 
4324 exit:
4325 	if (net_if_oper_state_set(iface, new_state) != new_state) {
4326 		NET_ERR("Failed to update oper state to %d", new_state);
4327 		return;
4328 	}
4329 
4330 	NET_DBG("iface %p, oper state %s admin %s carrier %s dormant %s",
4331 		iface, net_if_oper_state2str(net_if_oper_state(iface)),
4332 		net_if_is_admin_up(iface) ? "UP" : "DOWN",
4333 		net_if_is_carrier_ok(iface) ? "ON" : "OFF",
4334 		net_if_is_dormant(iface) ? "ON" : "OFF");
4335 
4336 	if (net_if_oper_state(iface) == NET_IF_OPER_UP) {
4337 		if (prev_state != NET_IF_OPER_UP) {
4338 			notify_iface_up(iface);
4339 		}
4340 	} else {
4341 		if (prev_state == NET_IF_OPER_UP) {
4342 			notify_iface_down(iface);
4343 		}
4344 	}
4345 }
4346 
init_igmp(struct net_if * iface)4347 static void init_igmp(struct net_if *iface)
4348 {
4349 #if defined(CONFIG_NET_IPV4_IGMP)
4350 	/* Ensure IPv4 is enabled for this interface. */
4351 	if (net_if_config_ipv4_get(iface, NULL)) {
4352 		return;
4353 	}
4354 
4355 	net_ipv4_igmp_init(iface);
4356 #else
4357 	ARG_UNUSED(iface);
4358 	return;
4359 #endif
4360 }
4361 
net_if_up(struct net_if * iface)4362 int net_if_up(struct net_if *iface)
4363 {
4364 	int status = 0;
4365 
4366 	NET_DBG("iface %p", iface);
4367 
4368 	net_if_lock(iface);
4369 
4370 	if (net_if_flag_is_set(iface, NET_IF_UP)) {
4371 		status = -EALREADY;
4372 		goto out;
4373 	}
4374 
4375 	/* If the L2 does not support enable just set the flag */
4376 	if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
4377 		goto done;
4378 	}
4379 
4380 	/* Notify L2 to enable the interface */
4381 	status = net_if_l2(iface)->enable(iface, true);
4382 	if (status < 0) {
4383 		goto out;
4384 	}
4385 
4386 	init_igmp(iface);
4387 
4388 done:
4389 	net_if_flag_set(iface, NET_IF_UP);
4390 	net_mgmt_event_notify(NET_EVENT_IF_ADMIN_UP, iface);
4391 	update_operational_state(iface);
4392 
4393 out:
4394 	net_if_unlock(iface);
4395 
4396 	return status;
4397 }
4398 
net_if_down(struct net_if * iface)4399 int net_if_down(struct net_if *iface)
4400 {
4401 	int status = 0;
4402 
4403 	NET_DBG("iface %p", iface);
4404 
4405 	net_if_lock(iface);
4406 
4407 	if (!net_if_flag_is_set(iface, NET_IF_UP)) {
4408 		status = -EALREADY;
4409 		goto out;
4410 	}
4411 
4412 	leave_mcast_all(iface);
4413 	leave_ipv4_mcast_all(iface);
4414 
4415 	/* If the L2 does not support enable just clear the flag */
4416 	if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
4417 		goto done;
4418 	}
4419 
4420 	/* Notify L2 to disable the interface */
4421 	status = net_if_l2(iface)->enable(iface, false);
4422 	if (status < 0) {
4423 		goto out;
4424 	}
4425 
4426 done:
4427 	net_if_flag_clear(iface, NET_IF_UP);
4428 	net_mgmt_event_notify(NET_EVENT_IF_ADMIN_DOWN, iface);
4429 	update_operational_state(iface);
4430 
4431 out:
4432 	net_if_unlock(iface);
4433 
4434 	return status;
4435 }
4436 
net_if_carrier_on(struct net_if * iface)4437 void net_if_carrier_on(struct net_if *iface)
4438 {
4439 	NET_ASSERT(iface);
4440 
4441 	net_if_lock(iface);
4442 
4443 	if (!net_if_flag_test_and_set(iface, NET_IF_LOWER_UP)) {
4444 		update_operational_state(iface);
4445 	}
4446 
4447 	net_if_unlock(iface);
4448 }
4449 
net_if_carrier_off(struct net_if * iface)4450 void net_if_carrier_off(struct net_if *iface)
4451 {
4452 	NET_ASSERT(iface);
4453 
4454 	net_if_lock(iface);
4455 
4456 	if (net_if_flag_test_and_clear(iface, NET_IF_LOWER_UP)) {
4457 		update_operational_state(iface);
4458 	}
4459 
4460 	net_if_unlock(iface);
4461 }
4462 
net_if_dormant_on(struct net_if * iface)4463 void net_if_dormant_on(struct net_if *iface)
4464 {
4465 	NET_ASSERT(iface);
4466 
4467 	net_if_lock(iface);
4468 
4469 	if (!net_if_flag_test_and_set(iface, NET_IF_DORMANT)) {
4470 		update_operational_state(iface);
4471 	}
4472 
4473 	net_if_unlock(iface);
4474 }
4475 
net_if_dormant_off(struct net_if * iface)4476 void net_if_dormant_off(struct net_if *iface)
4477 {
4478 	NET_ASSERT(iface);
4479 
4480 	net_if_lock(iface);
4481 
4482 	if (net_if_flag_test_and_clear(iface, NET_IF_DORMANT)) {
4483 		update_operational_state(iface);
4484 	}
4485 
4486 	net_if_unlock(iface);
4487 }
4488 
4489 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
promisc_mode_set(struct net_if * iface,bool enable)4490 static int promisc_mode_set(struct net_if *iface, bool enable)
4491 {
4492 	enum net_l2_flags l2_flags = 0;
4493 
4494 	NET_ASSERT(iface);
4495 
4496 	l2_flags = l2_flags_get(iface);
4497 	if (!(l2_flags & NET_L2_PROMISC_MODE)) {
4498 		return -ENOTSUP;
4499 	}
4500 
4501 #if defined(CONFIG_NET_L2_ETHERNET)
4502 	if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4503 		int ret = net_eth_promisc_mode(iface, enable);
4504 
4505 		if (ret < 0) {
4506 			return ret;
4507 		}
4508 	}
4509 #else
4510 	ARG_UNUSED(enable);
4511 
4512 	return -ENOTSUP;
4513 #endif
4514 
4515 	return 0;
4516 }
4517 
net_if_set_promisc(struct net_if * iface)4518 int net_if_set_promisc(struct net_if *iface)
4519 {
4520 	int ret;
4521 
4522 	net_if_lock(iface);
4523 
4524 	ret = promisc_mode_set(iface, true);
4525 	if (ret < 0) {
4526 		goto out;
4527 	}
4528 
4529 	ret = net_if_flag_test_and_set(iface, NET_IF_PROMISC);
4530 	if (ret) {
4531 		ret = -EALREADY;
4532 		goto out;
4533 	}
4534 
4535 out:
4536 	net_if_unlock(iface);
4537 
4538 	return ret;
4539 }
4540 
net_if_unset_promisc(struct net_if * iface)4541 void net_if_unset_promisc(struct net_if *iface)
4542 {
4543 	int ret;
4544 
4545 	net_if_lock(iface);
4546 
4547 	ret = promisc_mode_set(iface, false);
4548 	if (ret < 0) {
4549 		goto out;
4550 	}
4551 
4552 	net_if_flag_clear(iface, NET_IF_PROMISC);
4553 
4554 out:
4555 	net_if_unlock(iface);
4556 }
4557 
net_if_is_promisc(struct net_if * iface)4558 bool net_if_is_promisc(struct net_if *iface)
4559 {
4560 	NET_ASSERT(iface);
4561 
4562 	return net_if_flag_is_set(iface, NET_IF_PROMISC);
4563 }
4564 #endif /* CONFIG_NET_PROMISCUOUS_MODE */
4565 
4566 #ifdef CONFIG_NET_POWER_MANAGEMENT
4567 
net_if_suspend(struct net_if * iface)4568 int net_if_suspend(struct net_if *iface)
4569 {
4570 	int ret = 0;
4571 
4572 	net_if_lock(iface);
4573 
4574 	if (net_if_are_pending_tx_packets(iface)) {
4575 		ret = -EBUSY;
4576 		goto out;
4577 	}
4578 
4579 	if (net_if_flag_test_and_set(iface, NET_IF_SUSPENDED)) {
4580 		ret = -EALREADY;
4581 		goto out;
4582 	}
4583 
4584 	net_stats_add_suspend_start_time(iface, k_cycle_get_32());
4585 
4586 out:
4587 	net_if_unlock(iface);
4588 
4589 	return ret;
4590 }
4591 
net_if_resume(struct net_if * iface)4592 int net_if_resume(struct net_if *iface)
4593 {
4594 	int ret = 0;
4595 
4596 	net_if_lock(iface);
4597 
4598 	if (!net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
4599 		ret = -EALREADY;
4600 		goto out;
4601 	}
4602 
4603 	net_if_flag_clear(iface, NET_IF_SUSPENDED);
4604 
4605 	net_stats_add_suspend_end_time(iface, k_cycle_get_32());
4606 
4607 out:
4608 	net_if_unlock(iface);
4609 
4610 	return ret;
4611 }
4612 
net_if_is_suspended(struct net_if * iface)4613 bool net_if_is_suspended(struct net_if *iface)
4614 {
4615 	return net_if_flag_is_set(iface, NET_IF_SUSPENDED);
4616 }
4617 
4618 #endif /* CONFIG_NET_POWER_MANAGEMENT */
4619 
4620 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
net_tx_ts_thread(void)4621 static void net_tx_ts_thread(void)
4622 {
4623 	struct net_pkt *pkt;
4624 
4625 	NET_DBG("Starting TX timestamp callback thread");
4626 
4627 	while (1) {
4628 		pkt = k_fifo_get(&tx_ts_queue, K_FOREVER);
4629 		if (pkt) {
4630 			net_if_call_timestamp_cb(pkt);
4631 		}
4632 	}
4633 }
4634 
net_if_register_timestamp_cb(struct net_if_timestamp_cb * handle,struct net_pkt * pkt,struct net_if * iface,net_if_timestamp_callback_t cb)4635 void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
4636 				  struct net_pkt *pkt,
4637 				  struct net_if *iface,
4638 				  net_if_timestamp_callback_t cb)
4639 {
4640 	k_mutex_lock(&lock, K_FOREVER);
4641 
4642 	sys_slist_find_and_remove(&timestamp_callbacks, &handle->node);
4643 	sys_slist_prepend(&timestamp_callbacks, &handle->node);
4644 
4645 	handle->iface = iface;
4646 	handle->cb = cb;
4647 	handle->pkt = pkt;
4648 
4649 	k_mutex_unlock(&lock);
4650 }
4651 
net_if_unregister_timestamp_cb(struct net_if_timestamp_cb * handle)4652 void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle)
4653 {
4654 	k_mutex_lock(&lock, K_FOREVER);
4655 
4656 	sys_slist_find_and_remove(&timestamp_callbacks, &handle->node);
4657 
4658 	k_mutex_unlock(&lock);
4659 }
4660 
net_if_call_timestamp_cb(struct net_pkt * pkt)4661 void net_if_call_timestamp_cb(struct net_pkt *pkt)
4662 {
4663 	sys_snode_t *sn, *sns;
4664 
4665 	k_mutex_lock(&lock, K_FOREVER);
4666 
4667 	SYS_SLIST_FOR_EACH_NODE_SAFE(&timestamp_callbacks, sn, sns) {
4668 		struct net_if_timestamp_cb *handle =
4669 			CONTAINER_OF(sn, struct net_if_timestamp_cb, node);
4670 
4671 		if (((handle->iface == NULL) ||
4672 		     (handle->iface == net_pkt_iface(pkt))) &&
4673 		    (handle->pkt == NULL || handle->pkt == pkt)) {
4674 			handle->cb(pkt);
4675 		}
4676 	}
4677 
4678 	k_mutex_unlock(&lock);
4679 }
4680 
net_if_add_tx_timestamp(struct net_pkt * pkt)4681 void net_if_add_tx_timestamp(struct net_pkt *pkt)
4682 {
4683 	k_fifo_put(&tx_ts_queue, pkt);
4684 }
4685 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
4686 
net_if_is_wifi(struct net_if * iface)4687 bool net_if_is_wifi(struct net_if *iface)
4688 {
4689 	if (net_if_is_offloaded(iface)) {
4690 		return net_off_is_wifi_offloaded(iface);
4691 	}
4692 #if defined(CONFIG_NET_L2_ETHERNET)
4693 	return net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) &&
4694 		net_eth_type_is_wifi(iface);
4695 #endif
4696 	return false;
4697 }
4698 
net_if_get_first_wifi(void)4699 struct net_if *net_if_get_first_wifi(void)
4700 {
4701 	STRUCT_SECTION_FOREACH(net_if, iface) {
4702 		if (net_if_is_wifi(iface)) {
4703 			return iface;
4704 		}
4705 	}
4706 	return NULL;
4707 }
4708 
net_if_get_name(struct net_if * iface,char * buf,int len)4709 int net_if_get_name(struct net_if *iface, char *buf, int len)
4710 {
4711 #if defined(CONFIG_NET_INTERFACE_NAME)
4712 	int name_len;
4713 
4714 	if (iface == NULL || buf == NULL || len <= 0) {
4715 		return -EINVAL;
4716 	}
4717 
4718 	name_len = strlen(net_if_get_config(iface)->name);
4719 	if (name_len >= len) {
4720 		return -ERANGE;
4721 	}
4722 
4723 	/* Copy string and null terminator */
4724 	memcpy(buf, net_if_get_config(iface)->name, name_len + 1);
4725 
4726 	return name_len;
4727 #else
4728 	return -ENOTSUP;
4729 #endif
4730 }
4731 
net_if_set_name(struct net_if * iface,const char * buf)4732 int net_if_set_name(struct net_if *iface, const char *buf)
4733 {
4734 #if defined(CONFIG_NET_INTERFACE_NAME)
4735 	int name_len;
4736 
4737 	if (iface == NULL || buf == NULL) {
4738 		return -EINVAL;
4739 	}
4740 
4741 	name_len = strlen(buf);
4742 	if (name_len >= sizeof(iface->config.name)) {
4743 		return -ENAMETOOLONG;
4744 	}
4745 
4746 	/* Copy string and null terminator */
4747 	memcpy(net_if_get_config(iface)->name, buf, name_len + 1);
4748 
4749 	return 0;
4750 #else
4751 	return -ENOTSUP;
4752 #endif
4753 }
4754 
net_if_get_by_name(const char * name)4755 int net_if_get_by_name(const char *name)
4756 {
4757 #if defined(CONFIG_NET_INTERFACE_NAME)
4758 	if (name == NULL) {
4759 		return -EINVAL;
4760 	}
4761 
4762 	STRUCT_SECTION_FOREACH(net_if, iface) {
4763 		if (strncmp(net_if_get_config(iface)->name, name, strlen(name)) == 0) {
4764 			return net_if_get_by_iface(iface);
4765 		}
4766 	}
4767 
4768 	return -ENOENT;
4769 #else
4770 	return -ENOTSUP;
4771 #endif
4772 }
4773 
4774 #if defined(CONFIG_NET_INTERFACE_NAME)
set_default_name(struct net_if * iface)4775 static void set_default_name(struct net_if *iface)
4776 {
4777 	char name[CONFIG_NET_INTERFACE_NAME_LEN + 1] = { 0 };
4778 	int ret;
4779 
4780 	if (net_if_is_wifi(iface)) {
4781 		static int count;
4782 
4783 		snprintk(name, sizeof(name) - 1, "wlan%d", count++);
4784 
4785 	} else if (IS_ENABLED(CONFIG_NET_L2_ETHERNET)) {
4786 #if defined(CONFIG_NET_L2_ETHERNET)
4787 		if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4788 			static int count;
4789 
4790 			snprintk(name, sizeof(name) - 1, "eth%d", count++);
4791 		}
4792 #endif /* CONFIG_NET_L2_ETHERNET */
4793 	}
4794 
4795 	if (IS_ENABLED(CONFIG_NET_L2_IEEE802154)) {
4796 #if defined(CONFIG_NET_L2_IEEE802154)
4797 		if (net_if_l2(iface) == &NET_L2_GET_NAME(IEEE802154)) {
4798 			static int count;
4799 
4800 			snprintk(name, sizeof(name) - 1, "ieee%d", count++);
4801 		}
4802 #endif /* CONFIG_NET_L2_IEEE802154 */
4803 	}
4804 
4805 	if (IS_ENABLED(CONFIG_NET_L2_DUMMY)) {
4806 #if defined(CONFIG_NET_L2_DUMMY)
4807 		if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
4808 			static int count;
4809 
4810 			snprintk(name, sizeof(name) - 1, "dummy%d", count++);
4811 		}
4812 #endif /* CONFIG_NET_L2_DUMMY */
4813 	}
4814 
4815 	if (IS_ENABLED(CONFIG_NET_L2_CANBUS_RAW)) {
4816 #if defined(CONFIG_NET_L2_CANBUS_RAW)
4817 		if (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW)) {
4818 			static int count;
4819 
4820 			snprintk(name, sizeof(name) - 1, "can%d", count++);
4821 		}
4822 #endif /* CONFIG_NET_L2_CANBUS_RAW */
4823 	}
4824 
4825 	if (IS_ENABLED(CONFIG_NET_L2_PPP)) {
4826 #if defined(CONFIG_NET_L2_PPP)
4827 		if (net_if_l2(iface) == &NET_L2_GET_NAME(PPP)) {
4828 			static int count;
4829 
4830 			snprintk(name, sizeof(name) - 1, "ppp%d", count++);
4831 		}
4832 #endif /* CONFIG_NET_L2_PPP */
4833 	}
4834 
4835 	if (name[0] == '\0') {
4836 		static int count;
4837 
4838 		snprintk(name, sizeof(name) - 1, "net%d", count++);
4839 	}
4840 
4841 	ret = net_if_set_name(iface, name);
4842 	if (ret < 0) {
4843 		NET_WARN("Cannot set default name for interface %d (%p) (%d)",
4844 			 net_if_get_by_iface(iface), iface, ret);
4845 	}
4846 }
4847 #endif /* CONFIG_NET_INTERFACE_NAME */
4848 
net_if_init(void)4849 void net_if_init(void)
4850 {
4851 	int if_count = 0;
4852 
4853 	NET_DBG("");
4854 
4855 	k_mutex_lock(&lock, K_FOREVER);
4856 
4857 	net_tc_tx_init();
4858 
4859 	STRUCT_SECTION_FOREACH(net_if, iface) {
4860 		init_iface(iface);
4861 		if_count++;
4862 
4863 #if defined(CONFIG_NET_INTERFACE_NAME)
4864 		memset(net_if_get_config(iface)->name, 0,
4865 		       sizeof(iface->config.name));
4866 
4867 		set_default_name(iface);
4868 #endif
4869 	}
4870 
4871 	if (if_count == 0) {
4872 		NET_ERR("There is no network interface to work with!");
4873 		goto out;
4874 	}
4875 
4876 	iface_ipv6_init(if_count);
4877 	iface_ipv4_init(if_count);
4878 	iface_router_init();
4879 
4880 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
4881 	k_thread_create(&tx_thread_ts, tx_ts_stack,
4882 			K_KERNEL_STACK_SIZEOF(tx_ts_stack),
4883 			(k_thread_entry_t)net_tx_ts_thread,
4884 			NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT);
4885 	k_thread_name_set(&tx_thread_ts, "tx_tstamp");
4886 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
4887 
4888 #if defined(CONFIG_NET_VLAN)
4889 	/* Make sure that we do not have too many network interfaces
4890 	 * compared to the number of VLAN interfaces.
4891 	 */
4892 	if_count = 0;
4893 
4894 	STRUCT_SECTION_FOREACH(net_if, iface) {
4895 		if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4896 			if_count++;
4897 		}
4898 	}
4899 
4900 	if (if_count > CONFIG_NET_VLAN_COUNT) {
4901 		NET_WARN("You have configured only %d VLAN interfaces"
4902 			 " but you have %d network interfaces.",
4903 			 CONFIG_NET_VLAN_COUNT, if_count);
4904 	}
4905 #endif
4906 
4907 out:
4908 	k_mutex_unlock(&lock);
4909 }
4910 
net_if_post_init(void)4911 void net_if_post_init(void)
4912 {
4913 	NET_DBG("");
4914 
4915 	/* After TX is running, attempt to bring the interface up */
4916 	STRUCT_SECTION_FOREACH(net_if, iface) {
4917 		if (!net_if_flag_is_set(iface, NET_IF_NO_AUTO_START)) {
4918 			net_if_up(iface);
4919 		}
4920 	}
4921 }
4922