1 /*
2 * Copyright (c) 2016 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
9
10 #include <zephyr/init.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/linker/sections.h>
13 #include <zephyr/random/rand32.h>
14 #include <zephyr/syscall_handler.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <zephyr/net/igmp.h>
18 #include <zephyr/net/net_core.h>
19 #include <zephyr/net/net_pkt.h>
20 #include <zephyr/net/net_if.h>
21 #include <zephyr/net/net_mgmt.h>
22 #include <zephyr/net/ethernet.h>
23 #include <zephyr/net/virtual.h>
24 #include <zephyr/sys/iterable_sections.h>
25
26 #include "net_private.h"
27 #include "ipv4.h"
28 #include "ipv6.h"
29 #include "ipv4_autoconf_internal.h"
30
31 #include "net_stats.h"
32
33 #define REACHABLE_TIME (MSEC_PER_SEC * 30) /* in ms */
34 /*
35 * split the min/max random reachable factors into numerator/denominator
36 * so that integer-based math works better
37 */
38 #define MIN_RANDOM_NUMER (1)
39 #define MIN_RANDOM_DENOM (2)
40 #define MAX_RANDOM_NUMER (3)
41 #define MAX_RANDOM_DENOM (2)
42
43 static K_MUTEX_DEFINE(lock);
44
45 /* net_if dedicated section limiters */
46 extern struct net_if _net_if_list_start[];
47 extern struct net_if _net_if_list_end[];
48
49 static struct net_if *default_iface;
50
51 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
52 static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
53 static struct k_work_delayable router_timer;
54 static sys_slist_t active_router_timers;
55 #endif
56
57 #if defined(CONFIG_NET_NATIVE_IPV6)
58 /* Timer that triggers network address renewal */
59 static struct k_work_delayable address_lifetime_timer;
60
61 /* Track currently active address lifetime timers */
62 static sys_slist_t active_address_lifetime_timers;
63
64 /* Timer that triggers IPv6 prefix lifetime */
65 static struct k_work_delayable prefix_lifetime_timer;
66
67 /* Track currently active IPv6 prefix lifetime timers */
68 static sys_slist_t active_prefix_lifetime_timers;
69
70 #if defined(CONFIG_NET_IPV6_DAD)
71 /** Duplicate address detection (DAD) timer */
72 static struct k_work_delayable dad_timer;
73 static sys_slist_t active_dad_timers;
74 #endif
75
76 #if defined(CONFIG_NET_IPV6_ND)
77 static struct k_work_delayable rs_timer;
78 static sys_slist_t active_rs_timers;
79 #endif
80
81 static struct {
82 struct net_if_ipv6 ipv6;
83 struct net_if *iface;
84 } ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
85 #endif /* CONFIG_NET_IPV6 */
86
87 #if defined(CONFIG_NET_NATIVE_IPV4)
88 static struct {
89 struct net_if_ipv4 ipv4;
90 struct net_if *iface;
91 } ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
92 #endif /* CONFIG_NET_IPV4 */
93
94 /* We keep track of the link callbacks in this list.
95 */
96 static sys_slist_t link_callbacks;
97
98 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
99 /* Multicast join/leave tracking.
100 */
101 static sys_slist_t mcast_monitor_callbacks;
102 #endif
103
104 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
105 #if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
106 #define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
107 #endif
108
109 K_KERNEL_STACK_DEFINE(tx_ts_stack, CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
110 K_FIFO_DEFINE(tx_ts_queue);
111
112 static struct k_thread tx_thread_ts;
113
114 /* We keep track of the timestamp callbacks in this list.
115 */
116 static sys_slist_t timestamp_callbacks;
117 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
118
119 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
120 #define debug_check_packet(pkt) \
121 do { \
122 NET_DBG("Processing (pkt %p, prio %d) network packet " \
123 "iface %p/%d", \
124 pkt, net_pkt_priority(pkt), \
125 net_pkt_iface(pkt), \
126 net_if_get_by_iface(net_pkt_iface(pkt))); \
127 \
128 NET_ASSERT(pkt->frags); \
129 } while (0)
130 #else
131 #define debug_check_packet(...)
132 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
133
z_impl_net_if_get_by_index(int index)134 struct net_if *z_impl_net_if_get_by_index(int index)
135 {
136 if (index <= 0) {
137 return NULL;
138 }
139
140 if (&_net_if_list_start[index - 1] >= _net_if_list_end) {
141 NET_DBG("Index %d is too large", index);
142 return NULL;
143 }
144
145 return &_net_if_list_start[index - 1];
146 }
147
148 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_get_by_index(int index)149 struct net_if *z_vrfy_net_if_get_by_index(int index)
150 {
151 struct net_if *iface;
152 struct z_object *zo;
153 int ret;
154
155 iface = net_if_get_by_index(index);
156 if (!iface) {
157 return NULL;
158 }
159
160 zo = z_object_find(iface);
161
162 ret = z_object_validate(zo, K_OBJ_NET_IF, _OBJ_INIT_TRUE);
163 if (ret != 0) {
164 z_dump_object_error(ret, iface, zo, K_OBJ_NET_IF);
165 return NULL;
166 }
167
168 return iface;
169 }
170
171 #include <syscalls/net_if_get_by_index_mrsh.c>
172 #endif
173
net_context_send_cb(struct net_context * context,int status)174 static inline void net_context_send_cb(struct net_context *context,
175 int status)
176 {
177 if (!context) {
178 return;
179 }
180
181 if (context->send_cb) {
182 context->send_cb(context, status, context->user_data);
183 }
184
185 if (IS_ENABLED(CONFIG_NET_UDP) &&
186 net_context_get_proto(context) == IPPROTO_UDP) {
187 net_stats_update_udp_sent(net_context_get_iface(context));
188 } else if (IS_ENABLED(CONFIG_NET_TCP) &&
189 net_context_get_proto(context) == IPPROTO_TCP) {
190 net_stats_update_tcp_seg_sent(net_context_get_iface(context));
191 }
192 }
193
update_txtime_stats_detail(struct net_pkt * pkt,uint32_t start_time,uint32_t stop_time)194 static void update_txtime_stats_detail(struct net_pkt *pkt,
195 uint32_t start_time, uint32_t stop_time)
196 {
197 uint32_t val, prev = start_time;
198 int i;
199
200 for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
201 if (!net_pkt_stats_tick(pkt)[i]) {
202 break;
203 }
204
205 val = net_pkt_stats_tick(pkt)[i] - prev;
206 prev = net_pkt_stats_tick(pkt)[i];
207 net_pkt_stats_tick(pkt)[i] = val;
208 }
209 }
210
net_if_tx(struct net_if * iface,struct net_pkt * pkt)211 static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
212 {
213 struct net_linkaddr ll_dst = {
214 .addr = NULL
215 };
216 struct net_linkaddr_storage ll_dst_storage;
217 struct net_context *context;
218 uint32_t create_time;
219 int status;
220
221 /* We collect send statistics for each socket priority if enabled */
222 uint8_t pkt_priority;
223
224 if (!pkt) {
225 return false;
226 }
227
228 create_time = net_pkt_create_time(pkt);
229
230 debug_check_packet(pkt);
231
232 /* If there're any link callbacks, with such a callback receiving
233 * a destination address, copy that address out of packet, just in
234 * case packet is freed before callback is called.
235 */
236 if (!sys_slist_is_empty(&link_callbacks)) {
237 if (net_linkaddr_set(&ll_dst_storage,
238 net_pkt_lladdr_dst(pkt)->addr,
239 net_pkt_lladdr_dst(pkt)->len) == 0) {
240 ll_dst.addr = ll_dst_storage.addr;
241 ll_dst.len = ll_dst_storage.len;
242 ll_dst.type = net_pkt_lladdr_dst(pkt)->type;
243 }
244 }
245
246 context = net_pkt_context(pkt);
247
248 if (net_if_flag_is_set(iface, NET_IF_LOWER_UP)) {
249 if (IS_ENABLED(CONFIG_NET_TCP) &&
250 net_pkt_family(pkt) != AF_UNSPEC) {
251 net_pkt_set_queued(pkt, false);
252 }
253
254 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
255 pkt_priority = net_pkt_priority(pkt);
256
257 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
258 /* Make sure the statistics information is not
259 * lost by keeping the net_pkt over L2 send.
260 */
261 net_pkt_ref(pkt);
262 }
263 }
264
265 status = net_if_l2(iface)->send(iface, pkt);
266
267 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
268 uint32_t end_tick = k_cycle_get_32();
269
270 net_pkt_set_tx_stats_tick(pkt, end_tick);
271
272 net_stats_update_tc_tx_time(iface,
273 pkt_priority,
274 create_time,
275 end_tick);
276
277 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
278 update_txtime_stats_detail(
279 pkt,
280 create_time,
281 end_tick);
282
283 net_stats_update_tc_tx_time_detail(
284 iface, pkt_priority,
285 net_pkt_stats_tick(pkt));
286
287 /* For TCP connections, we might keep the pkt
288 * longer so that we can resend it if needed.
289 * Because of that we need to clear the
290 * statistics here.
291 */
292 net_pkt_stats_tick_reset(pkt);
293
294 net_pkt_unref(pkt);
295 }
296 }
297
298 } else {
299 /* Drop packet if interface is not up */
300 NET_WARN("iface %p is down", iface);
301 status = -ENETDOWN;
302 }
303
304 if (status < 0) {
305 net_pkt_unref(pkt);
306 } else {
307 net_stats_update_bytes_sent(iface, status);
308 }
309
310 if (context) {
311 NET_DBG("Calling context send cb %p status %d",
312 context, status);
313
314 net_context_send_cb(context, status);
315 }
316
317 if (ll_dst.addr) {
318 net_if_call_link_cb(iface, &ll_dst, status);
319 }
320
321 return true;
322 }
323
net_process_tx_packet(struct net_pkt * pkt)324 void net_process_tx_packet(struct net_pkt *pkt)
325 {
326 struct net_if *iface;
327
328 net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
329
330 iface = net_pkt_iface(pkt);
331
332 net_if_tx(iface, pkt);
333
334 #if defined(CONFIG_NET_POWER_MANAGEMENT)
335 iface->tx_pending--;
336 #endif
337 }
338
net_if_queue_tx(struct net_if * iface,struct net_pkt * pkt)339 void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
340 {
341 if (!net_pkt_filter_send_ok(pkt)) {
342 /* silently drop the packet */
343 net_pkt_unref(pkt);
344 return;
345 }
346
347 uint8_t prio = net_pkt_priority(pkt);
348 uint8_t tc = net_tx_priority2tc(prio);
349
350 net_stats_update_tc_sent_pkt(iface, tc);
351 net_stats_update_tc_sent_bytes(iface, tc, net_pkt_get_len(pkt));
352 net_stats_update_tc_sent_priority(iface, tc, prio);
353
354 /* For highest priority packet, skip the TX queue and push directly to
355 * the driver. Also if there are no TX queue/thread, push the packet
356 * directly to the driver.
357 */
358 if ((IS_ENABLED(CONFIG_NET_TC_SKIP_FOR_HIGH_PRIO) &&
359 prio == NET_PRIORITY_CA) || NET_TC_TX_COUNT == 0) {
360 net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
361
362 net_if_tx(net_pkt_iface(pkt), pkt);
363 return;
364 }
365
366 #if NET_TC_TX_COUNT > 1
367 NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
368 #endif
369
370 #if defined(CONFIG_NET_POWER_MANAGEMENT)
371 iface->tx_pending++;
372 #endif
373
374 if (!net_tc_submit_to_tx_queue(tc, pkt)) {
375 #if defined(CONFIG_NET_POWER_MANAGEMENT)
376 iface->tx_pending--
377 #endif
378 ;
379 }
380 }
381
net_if_stats_reset(struct net_if * iface)382 void net_if_stats_reset(struct net_if *iface)
383 {
384 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
385 STRUCT_SECTION_FOREACH(net_if, tmp) {
386 if (iface == tmp) {
387 net_if_lock(iface);
388 memset(&iface->stats, 0, sizeof(iface->stats));
389 net_if_unlock(iface);
390 return;
391 }
392 }
393 #else
394 ARG_UNUSED(iface);
395 #endif
396 }
397
net_if_stats_reset_all(void)398 void net_if_stats_reset_all(void)
399 {
400 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
401 STRUCT_SECTION_FOREACH(net_if, iface) {
402 net_if_lock(iface);
403 memset(&iface->stats, 0, sizeof(iface->stats));
404 net_if_unlock(iface);
405 }
406 #endif
407 }
408
init_iface(struct net_if * iface)409 static inline void init_iface(struct net_if *iface)
410 {
411 const struct net_if_api *api = net_if_get_device(iface)->api;
412
413 if (!api || !api->init) {
414 NET_ERR("Iface %p driver API init NULL", iface);
415 return;
416 }
417
418 /* By default IPv4 and IPv6 are enabled for a given network interface.
419 * These can be turned off later if needed.
420 */
421 #if defined(CONFIG_NET_NATIVE_IPV4)
422 net_if_flag_set(iface, NET_IF_IPV4);
423 #endif
424 #if defined(CONFIG_NET_NATIVE_IPV6)
425 net_if_flag_set(iface, NET_IF_IPV6);
426 #endif
427
428 net_if_flag_test_and_set(iface, NET_IF_LOWER_UP);
429
430 net_virtual_init(iface);
431
432 NET_DBG("On iface %p", iface);
433
434 #ifdef CONFIG_USERSPACE
435 z_object_init(iface);
436 #endif
437
438 k_mutex_init(&iface->lock);
439
440 api->init(iface);
441 }
442
net_if_send_data(struct net_if * iface,struct net_pkt * pkt)443 enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt)
444 {
445 struct net_context *context = net_pkt_context(pkt);
446 struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
447 enum net_verdict verdict = NET_OK;
448 int status = -EIO;
449
450 net_if_lock(iface);
451
452 if (!net_if_flag_is_set(iface, NET_IF_LOWER_UP) ||
453 net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
454 /* Drop packet if interface is not up */
455 NET_WARN("iface %p is down", iface);
456 verdict = NET_DROP;
457 status = -ENETDOWN;
458 goto done;
459 }
460
461 if (IS_ENABLED(CONFIG_NET_OFFLOAD) && !net_if_l2(iface)) {
462 NET_WARN("no l2 for iface %p, discard pkt", iface);
463 verdict = NET_DROP;
464 goto done;
465 }
466
467 /* If the ll address is not set at all, then we must set
468 * it here.
469 * Workaround Linux bug, see:
470 * https://github.com/zephyrproject-rtos/zephyr/issues/3111
471 */
472 if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
473 !net_pkt_lladdr_src(pkt)->addr) {
474 net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_if(pkt)->addr;
475 net_pkt_lladdr_src(pkt)->len = net_pkt_lladdr_if(pkt)->len;
476 }
477
478 #if defined(CONFIG_NET_LOOPBACK)
479 /* If the packet is destined back to us, then there is no need to do
480 * additional checks, so let the packet through.
481 */
482 if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
483 goto done;
484 }
485 #endif
486
487 /* Bypass the IP stack with SOCK_RAW/IPPROTO_RAW sockets */
488 if (IS_ENABLED(CONFIG_NET_SOCKETS_PACKET) &&
489 context && net_context_get_type(context) == SOCK_RAW &&
490 net_context_get_proto(context) == IPPROTO_RAW) {
491 goto done;
492 }
493
494 /* If the ll dst address is not set check if it is present in the nbr
495 * cache.
496 */
497 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
498 verdict = net_ipv6_prepare_for_send(pkt);
499 }
500
501 #if defined(CONFIG_NET_IPV4_FRAGMENT)
502 if (net_pkt_family(pkt) == AF_INET) {
503 verdict = net_ipv4_prepare_for_send(pkt);
504 }
505 #endif
506
507 done:
508 /* NET_OK in which case packet has checked successfully. In this case
509 * the net_context callback is called after successful delivery in
510 * net_if_tx_thread().
511 *
512 * NET_DROP in which case we call net_context callback that will
513 * give the status to user application.
514 *
515 * NET_CONTINUE in which case the sending of the packet is delayed.
516 * This can happen for example if we need to do IPv6 ND to figure
517 * out link layer address.
518 */
519 if (verdict == NET_DROP) {
520 if (context) {
521 NET_DBG("Calling ctx send cb %p verdict %d",
522 context, verdict);
523 net_context_send_cb(context, status);
524 }
525
526 if (dst->addr) {
527 net_if_call_link_cb(iface, dst, status);
528 }
529 } else if (verdict == NET_OK) {
530 /* Packet is ready to be sent by L2, let's queue */
531 net_if_queue_tx(iface, pkt);
532 }
533
534 net_if_unlock(iface);
535
536 return verdict;
537 }
538
net_if_set_link_addr_locked(struct net_if * iface,uint8_t * addr,uint8_t len,enum net_link_type type)539 int net_if_set_link_addr_locked(struct net_if *iface,
540 uint8_t *addr, uint8_t len,
541 enum net_link_type type)
542 {
543 int ret;
544
545 net_if_lock(iface);
546
547 ret = net_if_set_link_addr_unlocked(iface, addr, len, type);
548
549 net_if_unlock(iface);
550
551 return ret;
552 }
553
net_if_get_by_link_addr(struct net_linkaddr * ll_addr)554 struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
555 {
556 STRUCT_SECTION_FOREACH(net_if, iface) {
557 net_if_lock(iface);
558 if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
559 ll_addr->len)) {
560 net_if_unlock(iface);
561 return iface;
562 }
563 net_if_unlock(iface);
564 }
565
566 return NULL;
567 }
568
net_if_lookup_by_dev(const struct device * dev)569 struct net_if *net_if_lookup_by_dev(const struct device *dev)
570 {
571 STRUCT_SECTION_FOREACH(net_if, iface) {
572 if (net_if_get_device(iface) == dev) {
573 return iface;
574 }
575 }
576
577 return NULL;
578 }
579
net_if_set_default(struct net_if * iface)580 void net_if_set_default(struct net_if *iface)
581 {
582 default_iface = iface;
583 }
584
net_if_get_default(void)585 struct net_if *net_if_get_default(void)
586 {
587 struct net_if *iface = NULL;
588
589 if (&_net_if_list_start[0] == &_net_if_list_end[0]) {
590 return NULL;
591 }
592
593 if (default_iface != NULL) {
594 return default_iface;
595 }
596
597 #if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
598 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
599 #endif
600 #if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
601 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
602 #endif
603 #if defined(CONFIG_NET_DEFAULT_IF_BLUETOOTH)
604 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(BLUETOOTH));
605 #endif
606 #if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
607 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
608 #endif
609 #if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
610 iface = net_if_get_first_by_type(NULL);
611 #endif
612 #if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
613 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
614 #endif
615 #if defined(CONFIG_NET_DEFAULT_IF_PPP)
616 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(PPP));
617 #endif
618 #if defined(CONFIG_NET_DEFAULT_IF_UP)
619 iface = net_if_get_first_up();
620 #endif
621
622 return iface ? iface : _net_if_list_start;
623 }
624
net_if_get_first_by_type(const struct net_l2 * l2)625 struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
626 {
627 STRUCT_SECTION_FOREACH(net_if, iface) {
628 if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
629 !l2 && net_if_offload(iface)) {
630 return iface;
631 }
632
633 if (net_if_l2(iface) == l2) {
634 return iface;
635 }
636 }
637
638 return NULL;
639 }
640
net_if_get_first_up(void)641 struct net_if *net_if_get_first_up(void)
642 {
643 STRUCT_SECTION_FOREACH(net_if, iface) {
644 if (net_if_flag_is_set(iface, NET_IF_UP)) {
645 return iface;
646 }
647 }
648
649 return NULL;
650 }
651
l2_flags_get(struct net_if * iface)652 static enum net_l2_flags l2_flags_get(struct net_if *iface)
653 {
654 enum net_l2_flags flags = 0;
655
656 if (net_if_l2(iface) && net_if_l2(iface)->get_flags) {
657 flags = net_if_l2(iface)->get_flags(iface);
658 }
659
660 return flags;
661 }
662
663 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
664 /* Return how many bits are shared between two IP addresses */
get_ipaddr_diff(const uint8_t * src,const uint8_t * dst,int addr_len)665 static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_len)
666 {
667 uint8_t j, k, xor;
668 uint8_t len = 0U;
669
670 for (j = 0U; j < addr_len; j++) {
671 if (src[j] == dst[j]) {
672 len += 8U;
673 } else {
674 xor = src[j] ^ dst[j];
675 for (k = 0U; k < 8; k++) {
676 if (!(xor & 0x80)) {
677 len++;
678 xor <<= 1;
679 } else {
680 break;
681 }
682 }
683 break;
684 }
685 }
686
687 return len;
688 }
689
iface_router_lookup(struct net_if * iface,uint8_t family,void * addr)690 static struct net_if_router *iface_router_lookup(struct net_if *iface,
691 uint8_t family, void *addr)
692 {
693 struct net_if_router *router = NULL;
694 int i;
695
696 k_mutex_lock(&lock, K_FOREVER);
697
698 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
699 if (!routers[i].is_used ||
700 routers[i].address.family != family ||
701 routers[i].iface != iface) {
702 continue;
703 }
704
705 if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
706 net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
707 (struct in6_addr *)addr)) ||
708 (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
709 net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
710 (struct in_addr *)addr))) {
711 router = &routers[i];
712 goto out;
713 }
714 }
715
716 out:
717 k_mutex_unlock(&lock);
718
719 return router;
720 }
721
iface_router_notify_deletion(struct net_if_router * router,const char * delete_reason)722 static void iface_router_notify_deletion(struct net_if_router *router,
723 const char *delete_reason)
724 {
725 if (IS_ENABLED(CONFIG_NET_IPV6) &&
726 router->address.family == AF_INET6) {
727 NET_DBG("IPv6 router %s %s",
728 net_sprint_ipv6_addr(net_if_router_ipv6(router)),
729 delete_reason);
730
731 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
732 router->iface,
733 &router->address.in6_addr,
734 sizeof(struct in6_addr));
735 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
736 router->address.family == AF_INET) {
737 NET_DBG("IPv4 router %s %s",
738 net_sprint_ipv4_addr(net_if_router_ipv4(router)),
739 delete_reason);
740
741 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
742 router->iface,
743 &router->address.in_addr,
744 sizeof(struct in6_addr));
745 }
746 }
747
iface_router_ends(const struct net_if_router * router,uint32_t now)748 static inline int32_t iface_router_ends(const struct net_if_router *router,
749 uint32_t now)
750 {
751 uint32_t ends = router->life_start;
752
753 ends += MSEC_PER_SEC * router->lifetime;
754
755 /* Signed number of ms until router lifetime ends */
756 return (int32_t)(ends - now);
757 }
758
iface_router_update_timer(uint32_t now)759 static void iface_router_update_timer(uint32_t now)
760 {
761 struct net_if_router *router, *next;
762 uint32_t new_delay = UINT32_MAX;
763
764 k_mutex_lock(&lock, K_FOREVER);
765
766 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
767 router, next, node) {
768 int32_t ends = iface_router_ends(router, now);
769
770 if (ends <= 0) {
771 new_delay = 0;
772 break;
773 }
774
775 new_delay = MIN((uint32_t)ends, new_delay);
776 }
777
778 if (new_delay == UINT32_MAX) {
779 k_work_cancel_delayable(&router_timer);
780 } else {
781 k_work_reschedule(&router_timer, K_MSEC(new_delay));
782 }
783
784 k_mutex_unlock(&lock);
785 }
786
iface_router_expired(struct k_work * work)787 static void iface_router_expired(struct k_work *work)
788 {
789 uint32_t current_time = k_uptime_get_32();
790 struct net_if_router *router, *next;
791 sys_snode_t *prev_node = NULL;
792
793 ARG_UNUSED(work);
794
795 k_mutex_lock(&lock, K_FOREVER);
796
797 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
798 router, next, node) {
799 int32_t ends = iface_router_ends(router, current_time);
800
801 if (ends > 0) {
802 /* We have to loop on all active routers as their
803 * lifetime differ from each other.
804 */
805 prev_node = &router->node;
806 continue;
807 }
808
809 iface_router_notify_deletion(router, "has expired");
810 sys_slist_remove(&active_router_timers,
811 prev_node, &router->node);
812 router->is_used = false;
813 }
814
815 iface_router_update_timer(current_time);
816
817 k_mutex_unlock(&lock);
818 }
819
iface_router_add(struct net_if * iface,uint8_t family,void * addr,bool is_default,uint16_t lifetime)820 static struct net_if_router *iface_router_add(struct net_if *iface,
821 uint8_t family, void *addr,
822 bool is_default,
823 uint16_t lifetime)
824 {
825 struct net_if_router *router = NULL;
826 int i;
827
828 k_mutex_lock(&lock, K_FOREVER);
829
830 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
831 if (routers[i].is_used) {
832 continue;
833 }
834
835 routers[i].is_used = true;
836 routers[i].iface = iface;
837 routers[i].address.family = family;
838
839 if (lifetime) {
840 routers[i].is_default = true;
841 routers[i].is_infinite = false;
842 routers[i].lifetime = lifetime;
843 routers[i].life_start = k_uptime_get_32();
844
845 sys_slist_append(&active_router_timers,
846 &routers[i].node);
847
848 iface_router_update_timer(routers[i].life_start);
849 } else {
850 routers[i].is_default = false;
851 routers[i].is_infinite = true;
852 routers[i].lifetime = 0;
853 }
854
855 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
856 memcpy(net_if_router_ipv6(&routers[i]), addr,
857 sizeof(struct in6_addr));
858 net_mgmt_event_notify_with_info(
859 NET_EVENT_IPV6_ROUTER_ADD, iface,
860 &routers[i].address.in6_addr,
861 sizeof(struct in6_addr));
862
863 NET_DBG("interface %p router %s lifetime %u default %d "
864 "added", iface,
865 net_sprint_ipv6_addr((struct in6_addr *)addr),
866 lifetime, routers[i].is_default);
867 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
868 memcpy(net_if_router_ipv4(&routers[i]), addr,
869 sizeof(struct in_addr));
870 routers[i].is_default = is_default;
871
872 net_mgmt_event_notify_with_info(
873 NET_EVENT_IPV4_ROUTER_ADD, iface,
874 &routers[i].address.in_addr,
875 sizeof(struct in_addr));
876
877 NET_DBG("interface %p router %s lifetime %u default %d "
878 "added", iface,
879 net_sprint_ipv4_addr((struct in_addr *)addr),
880 lifetime, is_default);
881 }
882
883 router = &routers[i];
884 goto out;
885 }
886
887 out:
888 k_mutex_unlock(&lock);
889
890 return router;
891 }
892
iface_router_rm(struct net_if_router * router)893 static bool iface_router_rm(struct net_if_router *router)
894 {
895 bool ret = false;
896
897 k_mutex_lock(&lock, K_FOREVER);
898
899 if (!router->is_used) {
900 goto out;
901 }
902
903 iface_router_notify_deletion(router, "has been removed");
904
905 /* We recompute the timer if only the router was time limited */
906 if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
907 iface_router_update_timer(k_uptime_get_32());
908 }
909
910 router->is_used = false;
911 ret = true;
912
913 out:
914 k_mutex_unlock(&lock);
915
916 return ret;
917 }
918
net_if_router_rm(struct net_if_router * router)919 void net_if_router_rm(struct net_if_router *router)
920 {
921 k_mutex_lock(&lock, K_FOREVER);
922
923 router->is_used = false;
924
925 /* FIXME - remove timer */
926
927 k_mutex_unlock(&lock);
928 }
929
iface_router_find_default(struct net_if * iface,uint8_t family,void * addr)930 static struct net_if_router *iface_router_find_default(struct net_if *iface,
931 uint8_t family, void *addr)
932 {
933 struct net_if_router *router = NULL;
934 int i;
935
936 /* Todo: addr will need to be handled */
937 ARG_UNUSED(addr);
938
939 k_mutex_lock(&lock, K_FOREVER);
940
941 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
942 if (!routers[i].is_used ||
943 !routers[i].is_default ||
944 routers[i].address.family != family) {
945 continue;
946 }
947
948 if (iface && iface != routers[i].iface) {
949 continue;
950 }
951
952 router = &routers[i];
953 goto out;
954 }
955
956 out:
957 k_mutex_unlock(&lock);
958
959 return router;
960 }
961
iface_router_init(void)962 static void iface_router_init(void)
963 {
964 k_work_init_delayable(&router_timer, iface_router_expired);
965 sys_slist_init(&active_router_timers);
966 }
967 #else
968 #define iface_router_init(...)
969 #endif
970
971 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
net_if_mcast_mon_register(struct net_if_mcast_monitor * mon,struct net_if * iface,net_if_mcast_callback_t cb)972 void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
973 struct net_if *iface,
974 net_if_mcast_callback_t cb)
975 {
976 k_mutex_lock(&lock, K_FOREVER);
977
978 sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
979 sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
980
981 mon->iface = iface;
982 mon->cb = cb;
983
984 k_mutex_unlock(&lock);
985 }
986
net_if_mcast_mon_unregister(struct net_if_mcast_monitor * mon)987 void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
988 {
989 k_mutex_lock(&lock, K_FOREVER);
990
991 sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
992
993 k_mutex_unlock(&lock);
994 }
995
net_if_mcast_monitor(struct net_if * iface,const struct net_addr * addr,bool is_joined)996 void net_if_mcast_monitor(struct net_if *iface,
997 const struct net_addr *addr,
998 bool is_joined)
999 {
1000 struct net_if_mcast_monitor *mon, *tmp;
1001
1002 k_mutex_lock(&lock, K_FOREVER);
1003
1004 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
1005 mon, tmp, node) {
1006 if (iface == mon->iface) {
1007 mon->cb(iface, addr, is_joined);
1008 }
1009 }
1010
1011 k_mutex_unlock(&lock);
1012 }
1013 #endif
1014
1015 #if defined(CONFIG_NET_NATIVE_IPV6)
net_if_config_ipv6_get(struct net_if * iface,struct net_if_ipv6 ** ipv6)1016 int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
1017 {
1018 int ret = 0;
1019 int i;
1020
1021 net_if_lock(iface);
1022
1023 if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1024 ret = -ENOTSUP;
1025 goto out;
1026 }
1027
1028 if (iface->config.ip.ipv6) {
1029 if (ipv6) {
1030 *ipv6 = iface->config.ip.ipv6;
1031 }
1032
1033 goto out;
1034 }
1035
1036 k_mutex_lock(&lock, K_FOREVER);
1037
1038 for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1039 if (ipv6_addresses[i].iface) {
1040 continue;
1041 }
1042
1043 iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
1044 ipv6_addresses[i].iface = iface;
1045
1046 if (ipv6) {
1047 *ipv6 = &ipv6_addresses[i].ipv6;
1048 }
1049
1050 k_mutex_unlock(&lock);
1051 goto out;
1052 }
1053
1054 k_mutex_unlock(&lock);
1055
1056 ret = -ESRCH;
1057 out:
1058 net_if_unlock(iface);
1059
1060 return ret;
1061 }
1062
net_if_config_ipv6_put(struct net_if * iface)1063 int net_if_config_ipv6_put(struct net_if *iface)
1064 {
1065 int ret = 0;
1066 int i;
1067
1068 net_if_lock(iface);
1069
1070 if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
1071 ret = -ENOTSUP;
1072 goto out;
1073 }
1074
1075 if (!iface->config.ip.ipv6) {
1076 ret = -EALREADY;
1077 goto out;
1078 }
1079
1080 k_mutex_lock(&lock, K_FOREVER);
1081
1082 for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
1083 if (ipv6_addresses[i].iface != iface) {
1084 continue;
1085 }
1086
1087 iface->config.ip.ipv6 = NULL;
1088 ipv6_addresses[i].iface = NULL;
1089
1090 k_mutex_unlock(&lock);
1091 goto out;
1092 }
1093
1094 k_mutex_unlock(&lock);
1095
1096 ret = -ESRCH;
1097 out:
1098 net_if_unlock(iface);
1099
1100 return ret;
1101 }
1102
1103 #if defined(CONFIG_NET_IPV6_MLD)
join_mcast_allnodes(struct net_if * iface)1104 static void join_mcast_allnodes(struct net_if *iface)
1105 {
1106 struct in6_addr addr;
1107 int ret;
1108
1109 net_ipv6_addr_create_ll_allnodes_mcast(&addr);
1110
1111 ret = net_ipv6_mld_join(iface, &addr);
1112 if (ret < 0 && ret != -EALREADY) {
1113 NET_ERR("Cannot join all nodes address %s (%d)",
1114 net_sprint_ipv6_addr(&addr), ret);
1115 }
1116 }
1117
join_mcast_solicit_node(struct net_if * iface,struct in6_addr * my_addr)1118 static void join_mcast_solicit_node(struct net_if *iface,
1119 struct in6_addr *my_addr)
1120 {
1121 struct in6_addr addr;
1122 int ret;
1123
1124 /* Join to needed multicast groups, RFC 4291 ch 2.8 */
1125 net_ipv6_addr_create_solicited_node(my_addr, &addr);
1126
1127 ret = net_ipv6_mld_join(iface, &addr);
1128 if (ret < 0 && ret != -EALREADY) {
1129 NET_ERR("Cannot join solicit node address %s (%d)",
1130 net_sprint_ipv6_addr(&addr), ret);
1131 }
1132 }
1133
leave_mcast_all(struct net_if * iface)1134 static void leave_mcast_all(struct net_if *iface)
1135 {
1136 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1137 int i;
1138
1139 if (!ipv6) {
1140 return;
1141 }
1142
1143 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1144 if (!ipv6->mcast[i].is_used ||
1145 !ipv6->mcast[i].is_joined) {
1146 continue;
1147 }
1148
1149 net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
1150 }
1151 }
1152
join_mcast_nodes(struct net_if * iface,struct in6_addr * addr)1153 static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
1154 {
1155 enum net_l2_flags flags = 0;
1156
1157 flags = l2_flags_get(iface);
1158 if (flags & NET_L2_MULTICAST) {
1159 join_mcast_allnodes(iface);
1160
1161 if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
1162 join_mcast_solicit_node(iface, addr);
1163 }
1164 }
1165 }
1166 #else
1167 #define join_mcast_allnodes(...)
1168 #define join_mcast_solicit_node(...)
1169 #define leave_mcast_all(...)
1170 #define join_mcast_nodes(...)
1171 #endif /* CONFIG_NET_IPV6_MLD */
1172
1173 #if defined(CONFIG_NET_IPV6_DAD)
1174 #define DAD_TIMEOUT 100U /* ms */
1175
dad_timeout(struct k_work * work)1176 static void dad_timeout(struct k_work *work)
1177 {
1178 uint32_t current_time = k_uptime_get_32();
1179 struct net_if_addr *ifaddr, *next;
1180 int32_t delay = -1;
1181 sys_slist_t expired_list;
1182
1183 ARG_UNUSED(work);
1184
1185 sys_slist_init(&expired_list);
1186
1187 k_mutex_lock(&lock, K_FOREVER);
1188
1189 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
1190 ifaddr, next, dad_node) {
1191 /* DAD entries are ordered by construction. Stop when
1192 * we find one that hasn't expired.
1193 */
1194 delay = (int32_t)(ifaddr->dad_start +
1195 DAD_TIMEOUT - current_time);
1196 if (delay > 0) {
1197 break;
1198 }
1199
1200 /* Removing the ifaddr from active_dad_timers list */
1201 sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
1202 sys_slist_append(&expired_list, &ifaddr->dad_node);
1203
1204 ifaddr = NULL;
1205 }
1206
1207 if ((ifaddr != NULL) && (delay > 0)) {
1208 k_work_reschedule(&dad_timer, K_MSEC((uint32_t)delay));
1209 }
1210
1211 k_mutex_unlock(&lock);
1212
1213 SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ifaddr, dad_node) {
1214 struct net_if_addr *tmp;
1215 struct net_if *iface;
1216
1217 NET_DBG("DAD succeeded for %s",
1218 net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1219
1220 ifaddr->addr_state = NET_ADDR_PREFERRED;
1221
1222 /* Because we do not know the interface at this point,
1223 * we need to lookup for it.
1224 */
1225 iface = NULL;
1226 tmp = net_if_ipv6_addr_lookup(&ifaddr->address.in6_addr,
1227 &iface);
1228 if (tmp == ifaddr) {
1229 net_mgmt_event_notify_with_info(
1230 NET_EVENT_IPV6_DAD_SUCCEED,
1231 iface, &ifaddr->address.in6_addr,
1232 sizeof(struct in6_addr));
1233
1234 /* The address gets added to neighbor cache which is not
1235 * needed in this case as the address is our own one.
1236 */
1237 net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
1238 }
1239 }
1240 }
1241
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1242 static void net_if_ipv6_start_dad(struct net_if *iface,
1243 struct net_if_addr *ifaddr)
1244 {
1245 ifaddr->addr_state = NET_ADDR_TENTATIVE;
1246
1247 if (net_if_is_up(iface)) {
1248 NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
1249 iface,
1250 net_sprint_ll_addr(
1251 net_if_get_link_addr(iface)->addr,
1252 net_if_get_link_addr(iface)->len),
1253 net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1254
1255 ifaddr->dad_count = 1U;
1256
1257 if (!net_ipv6_start_dad(iface, ifaddr)) {
1258 ifaddr->dad_start = k_uptime_get_32();
1259
1260 k_mutex_lock(&lock, K_FOREVER);
1261 sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
1262 k_mutex_unlock(&lock);
1263
1264 /* FUTURE: use schedule, not reschedule. */
1265 if (!k_work_delayable_remaining_get(&dad_timer)) {
1266 k_work_reschedule(&dad_timer,
1267 K_MSEC(DAD_TIMEOUT));
1268 }
1269 }
1270 } else {
1271 NET_DBG("Interface %p is down, starting DAD for %s later.",
1272 iface,
1273 net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1274 }
1275 }
1276
net_if_start_dad(struct net_if * iface)1277 void net_if_start_dad(struct net_if *iface)
1278 {
1279 struct net_if_addr *ifaddr;
1280 struct net_if_ipv6 *ipv6;
1281 struct in6_addr addr = { };
1282 int ret, i;
1283
1284 net_if_lock(iface);
1285
1286 NET_DBG("Starting DAD for iface %p", iface);
1287
1288 ret = net_if_config_ipv6_get(iface, &ipv6);
1289 if (ret < 0) {
1290 if (ret != -ENOTSUP) {
1291 NET_WARN("Cannot do DAD IPv6 config is not valid.");
1292 }
1293
1294 goto out;
1295 }
1296
1297 if (!ipv6) {
1298 goto out;
1299 }
1300
1301 net_ipv6_addr_create_iid(&addr, net_if_get_link_addr(iface));
1302
1303 ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
1304 if (!ifaddr) {
1305 NET_ERR("Cannot add %s address to interface %p, DAD fails",
1306 net_sprint_ipv6_addr(&addr), iface);
1307 }
1308
1309 /* Start DAD for all the addresses that were added earlier when
1310 * the interface was down.
1311 */
1312 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1313 if (!ipv6->unicast[i].is_used ||
1314 ipv6->unicast[i].address.family != AF_INET6 ||
1315 &ipv6->unicast[i] == ifaddr ||
1316 net_ipv6_is_addr_loopback(
1317 &ipv6->unicast[i].address.in6_addr)) {
1318 continue;
1319 }
1320
1321 net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1322 }
1323
1324 out:
1325 net_if_unlock(iface);
1326 }
1327
net_if_ipv6_dad_failed(struct net_if * iface,const struct in6_addr * addr)1328 void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
1329 {
1330 struct net_if_addr *ifaddr;
1331
1332 net_if_lock(iface);
1333
1334 ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
1335 if (!ifaddr) {
1336 NET_ERR("Cannot find %s address in interface %p",
1337 net_sprint_ipv6_addr(addr), iface);
1338 goto out;
1339 }
1340
1341
1342 k_mutex_lock(&lock, K_FOREVER);
1343 sys_slist_find_and_remove(&active_dad_timers, &ifaddr->dad_node);
1344 k_mutex_unlock(&lock);
1345
1346 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
1347 &ifaddr->address.in6_addr,
1348 sizeof(struct in6_addr));
1349
1350 net_if_ipv6_addr_rm(iface, addr);
1351
1352 out:
1353 net_if_unlock(iface);
1354 }
1355
iface_ipv6_dad_init(void)1356 static inline void iface_ipv6_dad_init(void)
1357 {
1358 k_work_init_delayable(&dad_timer, dad_timeout);
1359 sys_slist_init(&active_dad_timers);
1360 }
1361
1362 #else
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1363 static inline void net_if_ipv6_start_dad(struct net_if *iface,
1364 struct net_if_addr *ifaddr)
1365 {
1366 ifaddr->addr_state = NET_ADDR_PREFERRED;
1367 }
1368
1369 #define iface_ipv6_dad_init(...)
1370 #endif /* CONFIG_NET_IPV6_DAD */
1371
1372 #if defined(CONFIG_NET_IPV6_ND)
1373 #define RS_TIMEOUT (1U * MSEC_PER_SEC)
1374 #define RS_COUNT 3
1375
rs_timeout(struct k_work * work)1376 static void rs_timeout(struct k_work *work)
1377 {
1378 uint32_t current_time = k_uptime_get_32();
1379 struct net_if_ipv6 *ipv6, *next;
1380 int32_t delay = -1;
1381 sys_slist_t expired_list;
1382
1383 ARG_UNUSED(work);
1384
1385 sys_slist_init(&expired_list);
1386
1387 k_mutex_lock(&lock, K_FOREVER);
1388
1389 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
1390 ipv6, next, rs_node) {
1391 /* RS entries are ordered by construction. Stop when
1392 * we find one that hasn't expired.
1393 */
1394 delay = (int32_t)(ipv6->rs_start + RS_TIMEOUT - current_time);
1395 if (delay > 0) {
1396 break;
1397 }
1398
1399 /* Removing the ipv6 from active_rs_timers list */
1400 sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
1401 sys_slist_append(&expired_list, &ipv6->rs_node);
1402
1403 ipv6 = NULL;
1404 }
1405
1406 if ((ipv6 != NULL) && (delay > 0)) {
1407 k_work_reschedule(&rs_timer, K_MSEC(ipv6->rs_start +
1408 RS_TIMEOUT - current_time));
1409 }
1410
1411 k_mutex_unlock(&lock);
1412
1413 SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, ipv6, rs_node) {
1414 struct net_if *iface = NULL;
1415
1416 /* Did not receive RA yet. */
1417 ipv6->rs_count++;
1418
1419 STRUCT_SECTION_FOREACH(net_if, tmp) {
1420 if (tmp->config.ip.ipv6 == ipv6) {
1421 iface = tmp;
1422 break;
1423 }
1424 }
1425
1426 if (iface) {
1427 NET_DBG("RS no respond iface %p count %d",
1428 iface, ipv6->rs_count);
1429 if (ipv6->rs_count < RS_COUNT) {
1430 net_if_start_rs(iface);
1431 }
1432 } else {
1433 NET_DBG("Interface IPv6 config %p not found", ipv6);
1434 }
1435 }
1436 }
1437
net_if_start_rs(struct net_if * iface)1438 void net_if_start_rs(struct net_if *iface)
1439 {
1440 struct net_if_ipv6 *ipv6;
1441
1442 net_if_lock(iface);
1443
1444 if (net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1445 goto out;
1446 }
1447
1448 ipv6 = iface->config.ip.ipv6;
1449 if (!ipv6) {
1450 goto out;
1451 }
1452
1453 NET_DBG("Starting ND/RS for iface %p", iface);
1454
1455 if (!net_ipv6_start_rs(iface)) {
1456 ipv6->rs_start = k_uptime_get_32();
1457
1458 k_mutex_lock(&lock, K_FOREVER);
1459 sys_slist_append(&active_rs_timers, &ipv6->rs_node);
1460 k_mutex_unlock(&lock);
1461
1462 /* FUTURE: use schedule, not reschedule. */
1463 if (!k_work_delayable_remaining_get(&rs_timer)) {
1464 k_work_reschedule(&rs_timer, K_MSEC(RS_TIMEOUT));
1465 }
1466 }
1467
1468 out:
1469 net_if_unlock(iface);
1470 }
1471
net_if_stop_rs(struct net_if * iface)1472 void net_if_stop_rs(struct net_if *iface)
1473 {
1474 struct net_if_ipv6 *ipv6;
1475
1476 net_if_lock(iface);
1477
1478 ipv6 = iface->config.ip.ipv6;
1479 if (!ipv6) {
1480 goto out;
1481 }
1482
1483 NET_DBG("Stopping ND/RS for iface %p", iface);
1484
1485 k_mutex_lock(&lock, K_FOREVER);
1486 sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
1487 k_mutex_unlock(&lock);
1488
1489 out:
1490 net_if_unlock(iface);
1491 }
1492
iface_ipv6_nd_init(void)1493 static inline void iface_ipv6_nd_init(void)
1494 {
1495 k_work_init_delayable(&rs_timer, rs_timeout);
1496 sys_slist_init(&active_rs_timers);
1497 }
1498
1499 #else
1500 #define net_if_start_rs(...)
1501 #define net_if_stop_rs(...)
1502 #define iface_ipv6_nd_init(...)
1503 #endif /* CONFIG_NET_IPV6_ND */
1504
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)1505 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
1506 struct net_if **ret)
1507 {
1508 struct net_if_addr *ifaddr = NULL;
1509
1510 STRUCT_SECTION_FOREACH(net_if, iface) {
1511 struct net_if_ipv6 *ipv6;
1512 int i;
1513
1514 net_if_lock(iface);
1515
1516 ipv6 = iface->config.ip.ipv6;
1517 if (!ipv6) {
1518 net_if_unlock(iface);
1519 continue;
1520 }
1521
1522 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1523 if (!ipv6->unicast[i].is_used ||
1524 ipv6->unicast[i].address.family != AF_INET6) {
1525 continue;
1526 }
1527
1528 if (net_ipv6_is_prefix(
1529 addr->s6_addr,
1530 ipv6->unicast[i].address.in6_addr.s6_addr,
1531 128)) {
1532
1533 if (ret) {
1534 *ret = iface;
1535 }
1536
1537 ifaddr = &ipv6->unicast[i];
1538 net_if_unlock(iface);
1539 goto out;
1540 }
1541 }
1542
1543 net_if_unlock(iface);
1544 }
1545
1546 out:
1547 return ifaddr;
1548 }
1549
net_if_ipv6_addr_lookup_by_iface(struct net_if * iface,struct in6_addr * addr)1550 struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
1551 struct in6_addr *addr)
1552 {
1553 struct net_if_addr *ifaddr = NULL;
1554 struct net_if_ipv6 *ipv6;
1555 int i;
1556
1557 net_if_lock(iface);
1558
1559 ipv6 = iface->config.ip.ipv6;
1560 if (!ipv6) {
1561 goto out;
1562 }
1563
1564 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1565 if (!ipv6->unicast[i].is_used ||
1566 ipv6->unicast[i].address.family != AF_INET6) {
1567 continue;
1568 }
1569
1570 if (net_ipv6_is_prefix(
1571 addr->s6_addr,
1572 ipv6->unicast[i].address.in6_addr.s6_addr,
1573 128)) {
1574 ifaddr = &ipv6->unicast[i];
1575 goto out;
1576 }
1577 }
1578
1579 out:
1580 net_if_unlock(iface);
1581
1582 return ifaddr;
1583 }
1584
z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1585 int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
1586 {
1587 struct net_if *iface = NULL;
1588 struct net_if_addr *if_addr;
1589
1590 if_addr = net_if_ipv6_addr_lookup(addr, &iface);
1591 if (!if_addr) {
1592 return 0;
1593 }
1594
1595 return net_if_get_by_iface(iface);
1596 }
1597
1598 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1599 static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
1600 const struct in6_addr *addr)
1601 {
1602 struct in6_addr addr_v6;
1603
1604 Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1605
1606 return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
1607 }
1608 #include <syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
1609 #endif
1610
address_expired(struct net_if_addr * ifaddr)1611 static void address_expired(struct net_if_addr *ifaddr)
1612 {
1613 NET_DBG("IPv6 address %s is deprecated",
1614 net_sprint_ipv6_addr(&ifaddr->address.in6_addr));
1615
1616 ifaddr->addr_state = NET_ADDR_DEPRECATED;
1617
1618 sys_slist_find_and_remove(&active_address_lifetime_timers,
1619 &ifaddr->lifetime.node);
1620
1621 net_timeout_set(&ifaddr->lifetime, 0, 0);
1622 }
1623
address_lifetime_timeout(struct k_work * work)1624 static void address_lifetime_timeout(struct k_work *work)
1625 {
1626 uint32_t next_update = UINT32_MAX;
1627 uint32_t current_time = k_uptime_get_32();
1628 struct net_if_addr *current, *next;
1629
1630 ARG_UNUSED(work);
1631
1632 k_mutex_lock(&lock, K_FOREVER);
1633
1634 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
1635 current, next, lifetime.node) {
1636 struct net_timeout *timeout = ¤t->lifetime;
1637 uint32_t this_update = net_timeout_evaluate(timeout,
1638 current_time);
1639
1640 if (this_update == 0U) {
1641 address_expired(current);
1642 continue;
1643 }
1644
1645 if (this_update < next_update) {
1646 next_update = this_update;
1647 }
1648
1649 if (current == next) {
1650 break;
1651 }
1652 }
1653
1654 if (next_update != UINT32_MAX) {
1655 NET_DBG("Waiting for %d ms", (int32_t)next_update);
1656
1657 k_work_reschedule(&address_lifetime_timer, K_MSEC(next_update));
1658 }
1659
1660 k_mutex_unlock(&lock);
1661 }
1662
1663 #if defined(CONFIG_NET_TEST)
net_address_lifetime_timeout(void)1664 void net_address_lifetime_timeout(void)
1665 {
1666 address_lifetime_timeout(NULL);
1667 }
1668 #endif
1669
address_start_timer(struct net_if_addr * ifaddr,uint32_t vlifetime)1670 static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
1671 {
1672 sys_slist_append(&active_address_lifetime_timers,
1673 &ifaddr->lifetime.node);
1674
1675 net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
1676 k_work_reschedule(&address_lifetime_timer, K_NO_WAIT);
1677 }
1678
net_if_ipv6_addr_update_lifetime(struct net_if_addr * ifaddr,uint32_t vlifetime)1679 void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
1680 uint32_t vlifetime)
1681 {
1682 k_mutex_lock(&lock, K_FOREVER);
1683
1684 NET_DBG("Updating expire time of %s by %u secs",
1685 net_sprint_ipv6_addr(&ifaddr->address.in6_addr),
1686 vlifetime);
1687
1688 ifaddr->addr_state = NET_ADDR_PREFERRED;
1689
1690 address_start_timer(ifaddr, vlifetime);
1691
1692 k_mutex_unlock(&lock);
1693 }
1694
ipv6_addr_find(struct net_if * iface,struct in6_addr * addr)1695 static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
1696 struct in6_addr *addr)
1697 {
1698 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1699 int i;
1700
1701 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1702 if (!ipv6->unicast[i].is_used) {
1703 continue;
1704 }
1705
1706 if (net_ipv6_addr_cmp(
1707 addr, &ipv6->unicast[i].address.in6_addr)) {
1708
1709 return &ipv6->unicast[i];
1710 }
1711 }
1712
1713 return NULL;
1714 }
1715
net_if_addr_init(struct net_if_addr * ifaddr,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1716 static inline void net_if_addr_init(struct net_if_addr *ifaddr,
1717 struct in6_addr *addr,
1718 enum net_addr_type addr_type,
1719 uint32_t vlifetime)
1720 {
1721 ifaddr->is_used = true;
1722 ifaddr->address.family = AF_INET6;
1723 ifaddr->addr_type = addr_type;
1724 net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
1725
1726 /* FIXME - set the mcast addr for this node */
1727
1728 if (vlifetime) {
1729 ifaddr->is_infinite = false;
1730
1731 NET_DBG("Expiring %s in %u secs",
1732 net_sprint_ipv6_addr(addr),
1733 vlifetime);
1734
1735 net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
1736 } else {
1737 ifaddr->is_infinite = true;
1738 }
1739 }
1740
net_if_ipv6_addr_add(struct net_if * iface,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1741 struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
1742 struct in6_addr *addr,
1743 enum net_addr_type addr_type,
1744 uint32_t vlifetime)
1745 {
1746 struct net_if_addr *ifaddr = NULL;
1747 struct net_if_ipv6 *ipv6;
1748 int i;
1749
1750 net_if_lock(iface);
1751
1752 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1753 goto out;
1754 }
1755
1756 ifaddr = ipv6_addr_find(iface, addr);
1757 if (ifaddr) {
1758 goto out;
1759 }
1760
1761 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1762 if (ipv6->unicast[i].is_used) {
1763 continue;
1764 }
1765
1766 net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
1767 vlifetime);
1768
1769 NET_DBG("[%d] interface %p address %s type %s added", i,
1770 iface, net_sprint_ipv6_addr(addr),
1771 net_addr_type2str(addr_type));
1772
1773 if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
1774 !net_ipv6_is_addr_loopback(addr) &&
1775 !net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
1776 /* RFC 4862 5.4.2
1777 * Before sending a Neighbor Solicitation, an interface
1778 * MUST join the all-nodes multicast address and the
1779 * solicited-node multicast address of the tentative
1780 * address.
1781 */
1782 /* The allnodes multicast group is only joined once as
1783 * net_ipv6_mcast_join() checks if we have already
1784 * joined.
1785 */
1786 join_mcast_nodes(iface,
1787 &ipv6->unicast[i].address.in6_addr);
1788
1789 net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1790 } else {
1791 /* If DAD is not done for point-to-point links, then
1792 * the address is usable immediately.
1793 */
1794 ipv6->unicast[i].addr_state = NET_ADDR_PREFERRED;
1795 }
1796
1797 net_mgmt_event_notify_with_info(
1798 NET_EVENT_IPV6_ADDR_ADD, iface,
1799 &ipv6->unicast[i].address.in6_addr,
1800 sizeof(struct in6_addr));
1801
1802 ifaddr = &ipv6->unicast[i];
1803 goto out;
1804 }
1805
1806 out:
1807 net_if_unlock(iface);
1808
1809 return ifaddr;
1810 }
1811
net_if_ipv6_addr_rm(struct net_if * iface,const struct in6_addr * addr)1812 bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
1813 {
1814 bool ret = false;
1815 struct net_if_ipv6 *ipv6;
1816 int i;
1817
1818 NET_ASSERT(addr);
1819
1820 net_if_lock(iface);
1821
1822 ipv6 = iface->config.ip.ipv6;
1823 if (!ipv6) {
1824 goto out;
1825 }
1826
1827 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1828 struct in6_addr maddr;
1829
1830 if (!ipv6->unicast[i].is_used) {
1831 continue;
1832 }
1833
1834 if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr,
1835 addr)) {
1836 continue;
1837 }
1838
1839 if (!ipv6->unicast[i].is_infinite) {
1840 k_mutex_lock(&lock, K_FOREVER);
1841
1842 sys_slist_find_and_remove(
1843 &active_address_lifetime_timers,
1844 &ipv6->unicast[i].lifetime.node);
1845
1846 if (sys_slist_is_empty(
1847 &active_address_lifetime_timers)) {
1848 k_work_cancel_delayable(
1849 &address_lifetime_timer);
1850 }
1851
1852 k_mutex_unlock(&lock);
1853 }
1854
1855 ipv6->unicast[i].is_used = false;
1856
1857 net_ipv6_addr_create_solicited_node(addr, &maddr);
1858
1859 net_if_ipv6_maddr_rm(iface, &maddr);
1860
1861 NET_DBG("[%d] interface %p address %s type %s removed",
1862 i, iface, net_sprint_ipv6_addr(addr),
1863 net_addr_type2str(ipv6->unicast[i].addr_type));
1864
1865 /* Using the IPv6 address pointer here can give false
1866 * info if someone adds a new IP address into this position
1867 * in the address array. This is quite unlikely thou.
1868 */
1869 net_mgmt_event_notify_with_info(
1870 NET_EVENT_IPV6_ADDR_DEL,
1871 iface,
1872 &ipv6->unicast[i].address.in6_addr,
1873 sizeof(struct in6_addr));
1874
1875 ret = true;
1876 goto out;
1877 }
1878
1879 out:
1880 net_if_unlock(iface);
1881
1882 return ret;
1883 }
1884
z_impl_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1885 bool z_impl_net_if_ipv6_addr_add_by_index(int index,
1886 struct in6_addr *addr,
1887 enum net_addr_type addr_type,
1888 uint32_t vlifetime)
1889 {
1890 struct net_if *iface;
1891
1892 iface = net_if_get_by_index(index);
1893 if (!iface) {
1894 return false;
1895 }
1896
1897 return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
1898 true : false;
1899 }
1900
1901 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1902 bool z_vrfy_net_if_ipv6_addr_add_by_index(int index,
1903 struct in6_addr *addr,
1904 enum net_addr_type addr_type,
1905 uint32_t vlifetime)
1906 {
1907 struct in6_addr addr_v6;
1908 struct net_if *iface;
1909
1910 iface = z_vrfy_net_if_get_by_index(index);
1911 if (!iface) {
1912 return false;
1913 }
1914
1915 Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1916
1917 return z_impl_net_if_ipv6_addr_add_by_index(index,
1918 &addr_v6,
1919 addr_type,
1920 vlifetime);
1921 }
1922
1923 #include <syscalls/net_if_ipv6_addr_add_by_index_mrsh.c>
1924 #endif /* CONFIG_USERSPACE */
1925
z_impl_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)1926 bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
1927 const struct in6_addr *addr)
1928 {
1929 struct net_if *iface;
1930
1931 iface = net_if_get_by_index(index);
1932 if (!iface) {
1933 return false;
1934 }
1935
1936 return net_if_ipv6_addr_rm(iface, addr);
1937 }
1938
1939 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)1940 bool z_vrfy_net_if_ipv6_addr_rm_by_index(int index,
1941 const struct in6_addr *addr)
1942 {
1943 struct in6_addr addr_v6;
1944 struct net_if *iface;
1945
1946 iface = z_vrfy_net_if_get_by_index(index);
1947 if (!iface) {
1948 return false;
1949 }
1950
1951 Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1952
1953 return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
1954 }
1955
1956 #include <syscalls/net_if_ipv6_addr_rm_by_index_mrsh.c>
1957 #endif /* CONFIG_USERSPACE */
1958
net_if_ipv6_maddr_add(struct net_if * iface,const struct in6_addr * addr)1959 struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
1960 const struct in6_addr *addr)
1961 {
1962 struct net_if_mcast_addr *ifmaddr = NULL;
1963 struct net_if_ipv6 *ipv6;
1964 int i;
1965
1966 net_if_lock(iface);
1967
1968 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1969 goto out;
1970 }
1971
1972 if (!net_ipv6_is_addr_mcast(addr)) {
1973 NET_DBG("Address %s is not a multicast address.",
1974 net_sprint_ipv6_addr(addr));
1975 goto out;
1976 }
1977
1978 if (net_if_ipv6_maddr_lookup(addr, &iface)) {
1979 NET_WARN("Multicast address %s is is already registered.",
1980 net_sprint_ipv6_addr(addr));
1981 goto out;
1982 }
1983
1984 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1985 if (ipv6->mcast[i].is_used) {
1986 continue;
1987 }
1988
1989 ipv6->mcast[i].is_used = true;
1990 ipv6->mcast[i].address.family = AF_INET6;
1991 memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
1992
1993 NET_DBG("[%d] interface %p address %s added", i, iface,
1994 net_sprint_ipv6_addr(addr));
1995
1996 net_mgmt_event_notify_with_info(
1997 NET_EVENT_IPV6_MADDR_ADD, iface,
1998 &ipv6->mcast[i].address.in6_addr,
1999 sizeof(struct in6_addr));
2000
2001 ifmaddr = &ipv6->mcast[i];
2002 goto out;
2003 }
2004
2005 out:
2006 net_if_unlock(iface);
2007
2008 return ifmaddr;
2009 }
2010
net_if_ipv6_maddr_rm(struct net_if * iface,const struct in6_addr * addr)2011 bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
2012 {
2013 bool ret = false;
2014 struct net_if_ipv6 *ipv6;
2015 int i;
2016
2017 net_if_lock(iface);
2018
2019 ipv6 = iface->config.ip.ipv6;
2020 if (!ipv6) {
2021 goto out;
2022 }
2023
2024 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2025 if (!ipv6->mcast[i].is_used) {
2026 continue;
2027 }
2028
2029 if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
2030 addr)) {
2031 continue;
2032 }
2033
2034 ipv6->mcast[i].is_used = false;
2035
2036 NET_DBG("[%d] interface %p address %s removed",
2037 i, iface, net_sprint_ipv6_addr(addr));
2038
2039 net_mgmt_event_notify_with_info(
2040 NET_EVENT_IPV6_MADDR_DEL, iface,
2041 &ipv6->mcast[i].address.in6_addr,
2042 sizeof(struct in6_addr));
2043
2044 ret = true;
2045 goto out;
2046 }
2047
2048 out:
2049 net_if_unlock(iface);
2050
2051 return ret;
2052 }
2053
net_if_ipv6_maddr_lookup(const struct in6_addr * maddr,struct net_if ** ret)2054 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
2055 struct net_if **ret)
2056 {
2057 struct net_if_mcast_addr *ifmaddr = NULL;
2058
2059 STRUCT_SECTION_FOREACH(net_if, iface) {
2060 struct net_if_ipv6 *ipv6;
2061 int i;
2062
2063 if (ret && *ret && iface != *ret) {
2064 continue;
2065 }
2066
2067 net_if_lock(iface);
2068
2069 ipv6 = iface->config.ip.ipv6;
2070 if (!ipv6) {
2071 net_if_unlock(iface);
2072 continue;
2073 }
2074
2075 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
2076 if (!ipv6->mcast[i].is_used ||
2077 ipv6->mcast[i].address.family != AF_INET6) {
2078 continue;
2079 }
2080
2081 if (net_ipv6_is_prefix(
2082 maddr->s6_addr,
2083 ipv6->mcast[i].address.in6_addr.s6_addr,
2084 128)) {
2085 if (ret) {
2086 *ret = iface;
2087 }
2088
2089 ifmaddr = &ipv6->mcast[i];
2090 net_if_unlock(iface);
2091 goto out;
2092 }
2093 }
2094
2095 net_if_unlock(iface);
2096 }
2097
2098 out:
2099 return ifmaddr;
2100 }
2101
net_if_ipv6_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)2102 void net_if_ipv6_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
2103 {
2104 NET_ASSERT(iface);
2105 NET_ASSERT(addr);
2106
2107 net_if_lock(iface);
2108 addr->is_joined = false;
2109 net_if_unlock(iface);
2110 }
2111
net_if_ipv6_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)2112 void net_if_ipv6_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
2113 {
2114 NET_ASSERT(iface);
2115 NET_ASSERT(addr);
2116
2117 net_if_lock(iface);
2118 addr->is_joined = true;
2119 net_if_unlock(iface);
2120 }
2121
remove_prefix_addresses(struct net_if * iface,struct net_if_ipv6 * ipv6,struct in6_addr * addr,uint8_t len)2122 static void remove_prefix_addresses(struct net_if *iface,
2123 struct net_if_ipv6 *ipv6,
2124 struct in6_addr *addr,
2125 uint8_t len)
2126 {
2127 int i;
2128
2129 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2130 if (!ipv6->unicast[i].is_used ||
2131 ipv6->unicast[i].address.family != AF_INET6 ||
2132 ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
2133 continue;
2134 }
2135
2136 if (net_ipv6_is_prefix(
2137 addr->s6_addr,
2138 ipv6->unicast[i].address.in6_addr.s6_addr,
2139 len)) {
2140 net_if_ipv6_addr_rm(iface,
2141 &ipv6->unicast[i].address.in6_addr);
2142 }
2143 }
2144 }
2145
prefix_lifetime_expired(struct net_if_ipv6_prefix * ifprefix)2146 static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
2147 {
2148 struct net_if_ipv6 *ipv6;
2149
2150 net_if_lock(ifprefix->iface);
2151
2152 NET_DBG("Prefix %s/%d expired",
2153 net_sprint_ipv6_addr(&ifprefix->prefix),
2154 ifprefix->len);
2155
2156 ifprefix->is_used = false;
2157
2158 if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
2159 return;
2160 }
2161
2162 /* Remove also all auto addresses if the they have the same prefix.
2163 */
2164 remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
2165 ifprefix->len);
2166
2167 net_mgmt_event_notify_with_info(
2168 NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface,
2169 &ifprefix->prefix, sizeof(struct in6_addr));
2170
2171 net_if_unlock(ifprefix->iface);
2172 }
2173
prefix_timer_remove(struct net_if_ipv6_prefix * ifprefix)2174 static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
2175 {
2176 k_mutex_lock(&lock, K_FOREVER);
2177
2178 NET_DBG("IPv6 prefix %s/%d removed",
2179 net_sprint_ipv6_addr(&ifprefix->prefix),
2180 ifprefix->len);
2181
2182 sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2183 &ifprefix->lifetime.node);
2184
2185 net_timeout_set(&ifprefix->lifetime, 0, 0);
2186
2187 k_mutex_unlock(&lock);
2188 }
2189
prefix_lifetime_timeout(struct k_work * work)2190 static void prefix_lifetime_timeout(struct k_work *work)
2191 {
2192 uint32_t next_update = UINT32_MAX;
2193 uint32_t current_time = k_uptime_get_32();
2194 struct net_if_ipv6_prefix *current, *next;
2195 sys_slist_t expired_list;
2196
2197 ARG_UNUSED(work);
2198
2199 sys_slist_init(&expired_list);
2200
2201 k_mutex_lock(&lock, K_FOREVER);
2202
2203 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
2204 current, next, lifetime.node) {
2205 struct net_timeout *timeout = ¤t->lifetime;
2206 uint32_t this_update = net_timeout_evaluate(timeout,
2207 current_time);
2208
2209 if (this_update == 0U) {
2210 sys_slist_find_and_remove(
2211 &active_prefix_lifetime_timers,
2212 ¤t->lifetime.node);
2213 sys_slist_append(&expired_list,
2214 ¤t->lifetime.node);
2215 continue;
2216 }
2217
2218 if (this_update < next_update) {
2219 next_update = this_update;
2220 }
2221
2222 if (current == next) {
2223 break;
2224 }
2225 }
2226
2227 if (next_update != UINT32_MAX) {
2228 k_work_reschedule(&prefix_lifetime_timer, K_MSEC(next_update));
2229 }
2230
2231 k_mutex_unlock(&lock);
2232
2233 SYS_SLIST_FOR_EACH_CONTAINER(&expired_list, current, lifetime.node) {
2234 prefix_lifetime_expired(current);
2235 }
2236 }
2237
prefix_start_timer(struct net_if_ipv6_prefix * ifprefix,uint32_t lifetime)2238 static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
2239 uint32_t lifetime)
2240 {
2241 k_mutex_lock(&lock, K_FOREVER);
2242
2243 (void)sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2244 &ifprefix->lifetime.node);
2245 sys_slist_append(&active_prefix_lifetime_timers,
2246 &ifprefix->lifetime.node);
2247
2248 net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
2249 k_work_reschedule(&prefix_lifetime_timer, K_NO_WAIT);
2250
2251 k_mutex_unlock(&lock);
2252 }
2253
ipv6_prefix_find(struct net_if * iface,struct in6_addr * prefix,uint8_t prefix_len)2254 static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
2255 struct in6_addr *prefix,
2256 uint8_t prefix_len)
2257 {
2258 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2259 int i;
2260
2261 if (!ipv6) {
2262 return NULL;
2263 }
2264
2265 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2266 if (!ipv6->prefix[i].is_used) {
2267 continue;
2268 }
2269
2270 if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
2271 prefix_len == ipv6->prefix[i].len) {
2272 return &ipv6->prefix[i];
2273 }
2274 }
2275
2276 return NULL;
2277 }
2278
net_if_ipv6_prefix_init(struct net_if * iface,struct net_if_ipv6_prefix * ifprefix,struct in6_addr * addr,uint8_t len,uint32_t lifetime)2279 static void net_if_ipv6_prefix_init(struct net_if *iface,
2280 struct net_if_ipv6_prefix *ifprefix,
2281 struct in6_addr *addr, uint8_t len,
2282 uint32_t lifetime)
2283 {
2284 ifprefix->is_used = true;
2285 ifprefix->len = len;
2286 ifprefix->iface = iface;
2287 net_ipaddr_copy(&ifprefix->prefix, addr);
2288
2289 if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
2290 ifprefix->is_infinite = true;
2291 } else {
2292 ifprefix->is_infinite = false;
2293 }
2294 }
2295
net_if_ipv6_prefix_add(struct net_if * iface,struct in6_addr * prefix,uint8_t len,uint32_t lifetime)2296 struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
2297 struct in6_addr *prefix,
2298 uint8_t len,
2299 uint32_t lifetime)
2300 {
2301 struct net_if_ipv6_prefix *ifprefix = NULL;
2302 struct net_if_ipv6 *ipv6;
2303 int i;
2304
2305 net_if_lock(iface);
2306
2307 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2308 goto out;
2309 }
2310
2311 ifprefix = ipv6_prefix_find(iface, prefix, len);
2312 if (ifprefix) {
2313 goto out;
2314 }
2315
2316 if (!ipv6) {
2317 goto out;
2318 }
2319
2320 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2321 if (ipv6->prefix[i].is_used) {
2322 continue;
2323 }
2324
2325 net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
2326 len, lifetime);
2327
2328 NET_DBG("[%d] interface %p prefix %s/%d added", i, iface,
2329 net_sprint_ipv6_addr(prefix), len);
2330
2331 net_mgmt_event_notify_with_info(
2332 NET_EVENT_IPV6_PREFIX_ADD, iface,
2333 &ipv6->prefix[i].prefix, sizeof(struct in6_addr));
2334
2335 ifprefix = &ipv6->prefix[i];
2336 goto out;
2337 }
2338
2339 out:
2340 net_if_unlock(iface);
2341
2342 return ifprefix;
2343 }
2344
net_if_ipv6_prefix_rm(struct net_if * iface,struct in6_addr * addr,uint8_t len)2345 bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
2346 uint8_t len)
2347 {
2348 bool ret = false;
2349 struct net_if_ipv6 *ipv6;
2350 int i;
2351
2352 net_if_lock(iface);
2353
2354 ipv6 = iface->config.ip.ipv6;
2355 if (!ipv6) {
2356 goto out;
2357 }
2358
2359 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2360 if (!ipv6->prefix[i].is_used) {
2361 continue;
2362 }
2363
2364 if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
2365 ipv6->prefix[i].len != len) {
2366 continue;
2367 }
2368
2369 net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
2370
2371 ipv6->prefix[i].is_used = false;
2372
2373 /* Remove also all auto addresses if the they have the same
2374 * prefix.
2375 */
2376 remove_prefix_addresses(iface, ipv6, addr, len);
2377
2378 net_mgmt_event_notify_with_info(
2379 NET_EVENT_IPV6_PREFIX_DEL, iface,
2380 &ipv6->prefix[i].prefix, sizeof(struct in6_addr));
2381
2382 ret = true;
2383 goto out;
2384 }
2385
2386 out:
2387 net_if_unlock(iface);
2388
2389 return ret;
2390 }
2391
net_if_ipv6_prefix_get(struct net_if * iface,struct in6_addr * addr)2392 struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
2393 struct in6_addr *addr)
2394 {
2395 struct net_if_ipv6_prefix *prefix = NULL;
2396 struct net_if_ipv6 *ipv6;
2397 int i;
2398
2399 if (!iface) {
2400 iface = net_if_get_default();
2401 }
2402
2403 net_if_lock(iface);
2404
2405 ipv6 = iface->config.ip.ipv6;
2406 if (!ipv6) {
2407 goto out;
2408 }
2409
2410 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2411 if (!ipv6->prefix[i].is_used) {
2412 continue;
2413 }
2414
2415 if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2416 addr->s6_addr,
2417 ipv6->prefix[i].len)) {
2418 if (!prefix || prefix->len > ipv6->prefix[i].len) {
2419 prefix = &ipv6->prefix[i];
2420 }
2421 }
2422 }
2423
2424 out:
2425 net_if_unlock(iface);
2426
2427 return prefix;
2428 }
2429
net_if_ipv6_prefix_lookup(struct net_if * iface,struct in6_addr * addr,uint8_t len)2430 struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
2431 struct in6_addr *addr,
2432 uint8_t len)
2433 {
2434 struct net_if_ipv6_prefix *prefix = NULL;
2435 struct net_if_ipv6 *ipv6;
2436 int i;
2437
2438 net_if_lock(iface);
2439
2440 ipv6 = iface->config.ip.ipv6;
2441 if (!ipv6) {
2442 goto out;
2443 }
2444
2445 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2446 if (!ipv6->prefix[i].is_used) {
2447 continue;
2448 }
2449
2450 if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2451 addr->s6_addr, len)) {
2452 prefix = &ipv6->prefix[i];
2453 goto out;
2454 }
2455 }
2456
2457 out:
2458 net_if_unlock(iface);
2459
2460 return prefix;
2461 }
2462
net_if_ipv6_addr_onlink(struct net_if ** iface,struct in6_addr * addr)2463 bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
2464 {
2465 bool ret = false;
2466
2467 STRUCT_SECTION_FOREACH(net_if, tmp) {
2468 struct net_if_ipv6 *ipv6;
2469 int i;
2470
2471 if (iface && *iface && *iface != tmp) {
2472 continue;
2473 }
2474
2475 net_if_lock(tmp);
2476
2477 ipv6 = tmp->config.ip.ipv6;
2478 if (!ipv6) {
2479 net_if_unlock(tmp);
2480 continue;
2481 }
2482
2483 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2484 if (ipv6->prefix[i].is_used &&
2485 net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2486 addr->s6_addr,
2487 ipv6->prefix[i].len)) {
2488 if (iface) {
2489 *iface = tmp;
2490 }
2491
2492 ret = true;
2493 net_if_unlock(tmp);
2494 goto out;
2495 }
2496 }
2497
2498 net_if_unlock(tmp);
2499 }
2500
2501 out:
2502 return ret;
2503 }
2504
net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix * prefix,uint32_t lifetime)2505 void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
2506 uint32_t lifetime)
2507 {
2508 /* No need to set a timer for infinite timeout */
2509 if (lifetime == 0xffffffff) {
2510 return;
2511 }
2512
2513 NET_DBG("Prefix lifetime %u sec", lifetime);
2514
2515 prefix_start_timer(prefix, lifetime);
2516 }
2517
net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix * prefix)2518 void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
2519 {
2520 if (!prefix->is_used) {
2521 return;
2522 }
2523
2524 prefix_timer_remove(prefix);
2525 }
2526
net_if_ipv6_router_lookup(struct net_if * iface,struct in6_addr * addr)2527 struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
2528 struct in6_addr *addr)
2529 {
2530 return iface_router_lookup(iface, AF_INET6, addr);
2531 }
2532
net_if_ipv6_router_find_default(struct net_if * iface,struct in6_addr * addr)2533 struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
2534 struct in6_addr *addr)
2535 {
2536 return iface_router_find_default(iface, AF_INET6, addr);
2537 }
2538
net_if_ipv6_router_update_lifetime(struct net_if_router * router,uint16_t lifetime)2539 void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
2540 uint16_t lifetime)
2541 {
2542 NET_DBG("Updating expire time of %s by %u secs",
2543 net_sprint_ipv6_addr(&router->address.in6_addr),
2544 lifetime);
2545
2546 router->life_start = k_uptime_get_32();
2547 router->lifetime = lifetime;
2548
2549 iface_router_update_timer(router->life_start);
2550 }
2551
net_if_ipv6_router_add(struct net_if * iface,struct in6_addr * addr,uint16_t lifetime)2552 struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
2553 struct in6_addr *addr,
2554 uint16_t lifetime)
2555 {
2556 return iface_router_add(iface, AF_INET6, addr, false, lifetime);
2557 }
2558
net_if_ipv6_router_rm(struct net_if_router * router)2559 bool net_if_ipv6_router_rm(struct net_if_router *router)
2560 {
2561 return iface_router_rm(router);
2562 }
2563
net_if_ipv6_get_hop_limit(struct net_if * iface)2564 uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface)
2565 {
2566 #if defined(CONFIG_NET_NATIVE_IPV6)
2567 int ret = 0;
2568
2569 net_if_lock(iface);
2570
2571 if (!iface->config.ip.ipv6) {
2572 goto out;
2573 }
2574
2575 ret = iface->config.ip.ipv6->hop_limit;
2576 out:
2577 net_if_unlock(iface);
2578
2579 return ret;
2580 #else
2581 ARG_UNUSED(iface);
2582
2583 return 0;
2584 #endif
2585 }
2586
net_ipv6_set_hop_limit(struct net_if * iface,uint8_t hop_limit)2587 void net_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit)
2588 {
2589 #if defined(CONFIG_NET_NATIVE_IPV6)
2590 net_if_lock(iface);
2591
2592 if (!iface->config.ip.ipv6) {
2593 goto out;
2594 }
2595
2596 iface->config.ip.ipv6->hop_limit = hop_limit;
2597 out:
2598 net_if_unlock(iface);
2599 #else
2600 ARG_UNUSED(iface);
2601 ARG_UNUSED(hop_limit);
2602 #endif
2603 }
2604
net_if_ipv6_get_ll(struct net_if * iface,enum net_addr_state addr_state)2605 struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
2606 enum net_addr_state addr_state)
2607 {
2608 struct in6_addr *addr = NULL;
2609 struct net_if_ipv6 *ipv6;
2610 int i;
2611
2612 net_if_lock(iface);
2613
2614 ipv6 = iface->config.ip.ipv6;
2615 if (!ipv6) {
2616 goto out;
2617 }
2618
2619 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2620 if (!ipv6->unicast[i].is_used ||
2621 (addr_state != NET_ADDR_ANY_STATE &&
2622 ipv6->unicast[i].addr_state != addr_state) ||
2623 ipv6->unicast[i].address.family != AF_INET6) {
2624 continue;
2625 }
2626
2627 if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2628 addr = &ipv6->unicast[i].address.in6_addr;
2629 goto out;
2630 }
2631 }
2632
2633 out:
2634 net_if_unlock(iface);
2635
2636 return addr;
2637 }
2638
net_if_ipv6_get_ll_addr(enum net_addr_state state,struct net_if ** iface)2639 struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
2640 struct net_if **iface)
2641 {
2642 struct in6_addr *addr = NULL;
2643
2644 STRUCT_SECTION_FOREACH(net_if, tmp) {
2645 net_if_lock(tmp);
2646
2647 addr = net_if_ipv6_get_ll(tmp, state);
2648 if (addr) {
2649 if (iface) {
2650 *iface = tmp;
2651 }
2652
2653 net_if_unlock(tmp);
2654 goto out;
2655 }
2656
2657 net_if_unlock(tmp);
2658 }
2659
2660 out:
2661 return addr;
2662 }
2663
check_global_addr(struct net_if * iface,enum net_addr_state state)2664 static inline struct in6_addr *check_global_addr(struct net_if *iface,
2665 enum net_addr_state state)
2666 {
2667 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2668 int i;
2669
2670 if (!ipv6) {
2671 return NULL;
2672 }
2673
2674 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2675 if (!ipv6->unicast[i].is_used ||
2676 (ipv6->unicast[i].addr_state != state) ||
2677 ipv6->unicast[i].address.family != AF_INET6) {
2678 continue;
2679 }
2680
2681 if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2682 return &ipv6->unicast[i].address.in6_addr;
2683 }
2684 }
2685
2686 return NULL;
2687 }
2688
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2689 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2690 struct net_if **iface)
2691 {
2692 struct in6_addr *addr = NULL;
2693
2694 STRUCT_SECTION_FOREACH(net_if, tmp) {
2695 if (iface && *iface && tmp != *iface) {
2696 continue;
2697 }
2698
2699 net_if_lock(tmp);
2700 addr = check_global_addr(tmp, state);
2701 if (addr) {
2702 if (iface) {
2703 *iface = tmp;
2704 }
2705
2706 net_if_unlock(tmp);
2707 goto out;
2708 }
2709
2710 net_if_unlock(tmp);
2711 }
2712
2713 out:
2714
2715 return addr;
2716 }
2717
get_diff_ipv6(const struct in6_addr * src,const struct in6_addr * dst)2718 static uint8_t get_diff_ipv6(const struct in6_addr *src,
2719 const struct in6_addr *dst)
2720 {
2721 return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 16);
2722 }
2723
is_proper_ipv6_address(struct net_if_addr * addr)2724 static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
2725 {
2726 if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
2727 addr->address.family == AF_INET6 &&
2728 !net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
2729 return true;
2730 }
2731
2732 return false;
2733 }
2734
net_if_ipv6_get_best_match(struct net_if * iface,const struct in6_addr * dst,uint8_t * best_so_far)2735 static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
2736 const struct in6_addr *dst,
2737 uint8_t *best_so_far)
2738 {
2739 struct net_if_ipv6 *ipv6;
2740 struct in6_addr *src = NULL;
2741 uint8_t len;
2742 int i;
2743
2744 net_if_lock(iface);
2745
2746 ipv6 = iface->config.ip.ipv6;
2747 if (!ipv6) {
2748 goto out;
2749 }
2750
2751 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2752 if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
2753 continue;
2754 }
2755
2756 len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
2757 if (len >= *best_so_far) {
2758 /* Mesh local address can only be selected for the same
2759 * subnet.
2760 */
2761 if (ipv6->unicast[i].is_mesh_local && len < 64 &&
2762 !net_ipv6_is_addr_mcast_mesh(dst)) {
2763 continue;
2764 }
2765
2766 *best_so_far = len;
2767 src = &ipv6->unicast[i].address.in6_addr;
2768 }
2769 }
2770
2771 out:
2772 net_if_unlock(iface);
2773
2774 return src;
2775 }
2776
net_if_ipv6_select_src_addr(struct net_if * dst_iface,const struct in6_addr * dst)2777 const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
2778 const struct in6_addr *dst)
2779 {
2780 const struct in6_addr *src = NULL;
2781 uint8_t best_match = 0U;
2782
2783 if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast_link(dst)) {
2784 /* If caller has supplied interface, then use that */
2785 if (dst_iface) {
2786 src = net_if_ipv6_get_best_match(dst_iface, dst,
2787 &best_match);
2788 } else {
2789 STRUCT_SECTION_FOREACH(net_if, iface) {
2790 struct in6_addr *addr;
2791
2792 addr = net_if_ipv6_get_best_match(iface, dst,
2793 &best_match);
2794 if (addr) {
2795 src = addr;
2796 }
2797 }
2798 }
2799
2800 } else {
2801 if (dst_iface) {
2802 src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
2803 } else {
2804 STRUCT_SECTION_FOREACH(net_if, iface) {
2805 struct in6_addr *addr;
2806
2807 addr = net_if_ipv6_get_ll(iface,
2808 NET_ADDR_PREFERRED);
2809 if (addr) {
2810 src = addr;
2811 break;
2812 }
2813 }
2814 }
2815 }
2816
2817 if (!src) {
2818 src = net_ipv6_unspecified_address();
2819 goto out;
2820 }
2821
2822 out:
2823 return src;
2824 }
2825
net_if_ipv6_select_src_iface(const struct in6_addr * dst)2826 struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
2827 {
2828 struct net_if *iface = NULL;
2829 const struct in6_addr *src;
2830
2831 src = net_if_ipv6_select_src_addr(NULL, dst);
2832 if (src != net_ipv6_unspecified_address()) {
2833 net_if_ipv6_addr_lookup(src, &iface);
2834 }
2835
2836 if (iface == NULL) {
2837 iface = net_if_get_default();
2838 }
2839
2840 return iface;
2841 }
2842
net_if_ipv6_calc_reachable_time(struct net_if_ipv6 * ipv6)2843 uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
2844 {
2845 uint32_t min_reachable, max_reachable;
2846
2847 min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
2848 / MIN_RANDOM_DENOM;
2849 max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
2850 / MAX_RANDOM_DENOM;
2851
2852 NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
2853 max_reachable);
2854
2855 return min_reachable +
2856 sys_rand32_get() % (max_reachable - min_reachable);
2857 }
2858
iface_ipv6_start(struct net_if * iface)2859 static void iface_ipv6_start(struct net_if *iface)
2860 {
2861 if (!net_if_flag_is_set(iface, NET_IF_IPV6) ||
2862 net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
2863 return;
2864 }
2865
2866 if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
2867 net_if_start_dad(iface);
2868 } else {
2869 struct net_if_ipv6 *ipv6 __unused = iface->config.ip.ipv6;
2870
2871 join_mcast_nodes(iface,
2872 &ipv6->mcast[0].address.in6_addr);
2873 }
2874
2875 net_if_start_rs(iface);
2876 }
2877
iface_ipv6_init(int if_count)2878 static void iface_ipv6_init(int if_count)
2879 {
2880 int i;
2881
2882 iface_ipv6_dad_init();
2883 iface_ipv6_nd_init();
2884
2885 k_work_init_delayable(&address_lifetime_timer,
2886 address_lifetime_timeout);
2887 k_work_init_delayable(&prefix_lifetime_timer, prefix_lifetime_timeout);
2888
2889 if (if_count > ARRAY_SIZE(ipv6_addresses)) {
2890 NET_WARN("You have %zu IPv6 net_if addresses but %d "
2891 "network interfaces", ARRAY_SIZE(ipv6_addresses),
2892 if_count);
2893 NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
2894 "value.");
2895 }
2896
2897 for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
2898 ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
2899 ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
2900
2901 net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
2902 }
2903 }
2904
2905 #else
2906 #define join_mcast_allnodes(...)
2907 #define join_mcast_solicit_node(...)
2908 #define leave_mcast_all(...)
2909 #define join_mcast_nodes(...)
2910 #define iface_ipv6_start(...)
2911 #define iface_ipv6_init(...)
2912
net_if_ipv6_maddr_lookup(const struct in6_addr * addr,struct net_if ** iface)2913 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
2914 struct net_if **iface)
2915 {
2916 ARG_UNUSED(addr);
2917 ARG_UNUSED(iface);
2918
2919 return NULL;
2920 }
2921
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)2922 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
2923 struct net_if **ret)
2924 {
2925 ARG_UNUSED(addr);
2926 ARG_UNUSED(ret);
2927
2928 return NULL;
2929 }
2930
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2931 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2932 struct net_if **iface)
2933 {
2934 ARG_UNUSED(state);
2935 ARG_UNUSED(iface);
2936
2937 return NULL;
2938 }
2939 #endif /* CONFIG_NET_IPV6 */
2940
2941 #if defined(CONFIG_NET_NATIVE_IPV4)
net_if_config_ipv4_get(struct net_if * iface,struct net_if_ipv4 ** ipv4)2942 int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
2943 {
2944 int ret = 0;
2945 int i;
2946
2947 net_if_lock(iface);
2948
2949 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
2950 ret = -ENOTSUP;
2951 goto out;
2952 }
2953
2954 if (iface->config.ip.ipv4) {
2955 if (ipv4) {
2956 *ipv4 = iface->config.ip.ipv4;
2957 }
2958
2959 goto out;
2960 }
2961
2962 k_mutex_lock(&lock, K_FOREVER);
2963
2964 for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
2965 if (ipv4_addresses[i].iface) {
2966 continue;
2967 }
2968
2969 iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
2970 ipv4_addresses[i].iface = iface;
2971
2972 if (ipv4) {
2973 *ipv4 = &ipv4_addresses[i].ipv4;
2974 }
2975
2976 k_mutex_unlock(&lock);
2977 goto out;
2978 }
2979
2980 k_mutex_unlock(&lock);
2981
2982 ret = -ESRCH;
2983 out:
2984 net_if_unlock(iface);
2985
2986 return ret;
2987 }
2988
net_if_config_ipv4_put(struct net_if * iface)2989 int net_if_config_ipv4_put(struct net_if *iface)
2990 {
2991 int ret = 0;
2992 int i;
2993
2994 net_if_lock(iface);
2995
2996 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
2997 ret = -ENOTSUP;
2998 goto out;
2999 }
3000
3001 if (!iface->config.ip.ipv4) {
3002 ret = -EALREADY;
3003 goto out;
3004 }
3005
3006 k_mutex_lock(&lock, K_FOREVER);
3007
3008 for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
3009 if (ipv4_addresses[i].iface != iface) {
3010 continue;
3011 }
3012
3013 iface->config.ip.ipv4 = NULL;
3014 ipv4_addresses[i].iface = NULL;
3015
3016 k_mutex_unlock(&lock);
3017 goto out;
3018 }
3019
3020 k_mutex_unlock(&lock);
3021
3022 ret = -ESRCH;
3023 out:
3024 net_if_unlock(iface);
3025
3026 return ret;
3027 }
3028
net_if_ipv4_get_ttl(struct net_if * iface)3029 uint8_t net_if_ipv4_get_ttl(struct net_if *iface)
3030 {
3031 #if defined(CONFIG_NET_NATIVE_IPV4)
3032 int ret = 0;
3033
3034 net_if_lock(iface);
3035
3036 if (!iface->config.ip.ipv4) {
3037 goto out;
3038 }
3039
3040 ret = iface->config.ip.ipv4->ttl;
3041 out:
3042 net_if_unlock(iface);
3043
3044 return ret;
3045 #else
3046 ARG_UNUSED(iface);
3047
3048 return 0;
3049 #endif
3050 }
3051
net_if_ipv4_set_ttl(struct net_if * iface,uint8_t ttl)3052 void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl)
3053 {
3054 #if defined(CONFIG_NET_NATIVE_IPV4)
3055 net_if_lock(iface);
3056
3057 if (!iface->config.ip.ipv4) {
3058 goto out;
3059 }
3060
3061 iface->config.ip.ipv4->ttl = ttl;
3062 out:
3063 net_if_unlock(iface);
3064 #else
3065 ARG_UNUSED(iface);
3066 ARG_UNUSED(ttl);
3067 #endif
3068 }
3069
net_if_ipv4_router_lookup(struct net_if * iface,struct in_addr * addr)3070 struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
3071 struct in_addr *addr)
3072 {
3073 return iface_router_lookup(iface, AF_INET, addr);
3074 }
3075
net_if_ipv4_router_find_default(struct net_if * iface,struct in_addr * addr)3076 struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
3077 struct in_addr *addr)
3078 {
3079 return iface_router_find_default(iface, AF_INET, addr);
3080 }
3081
net_if_ipv4_router_add(struct net_if * iface,struct in_addr * addr,bool is_default,uint16_t lifetime)3082 struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
3083 struct in_addr *addr,
3084 bool is_default,
3085 uint16_t lifetime)
3086 {
3087 return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
3088 }
3089
net_if_ipv4_router_rm(struct net_if_router * router)3090 bool net_if_ipv4_router_rm(struct net_if_router *router)
3091 {
3092 return iface_router_rm(router);
3093 }
3094
net_if_ipv4_addr_mask_cmp(struct net_if * iface,const struct in_addr * addr)3095 bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
3096 const struct in_addr *addr)
3097 {
3098 bool ret = false;
3099 struct net_if_ipv4 *ipv4;
3100 uint32_t subnet;
3101 int i;
3102
3103 net_if_lock(iface);
3104
3105 ipv4 = iface->config.ip.ipv4;
3106 if (!ipv4) {
3107 goto out;
3108 }
3109
3110 subnet = UNALIGNED_GET(&addr->s_addr) & ipv4->netmask.s_addr;
3111
3112 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3113 if (!ipv4->unicast[i].is_used ||
3114 ipv4->unicast[i].address.family != AF_INET) {
3115 continue;
3116 }
3117
3118 if ((ipv4->unicast[i].address.in_addr.s_addr &
3119 ipv4->netmask.s_addr) == subnet) {
3120 ret = true;
3121 goto out;
3122 }
3123 }
3124
3125 out:
3126 net_if_unlock(iface);
3127
3128 return ret;
3129 }
3130
ipv4_is_broadcast_address(struct net_if * iface,const struct in_addr * addr)3131 static bool ipv4_is_broadcast_address(struct net_if *iface,
3132 const struct in_addr *addr)
3133 {
3134 struct net_if_ipv4 *ipv4;
3135 bool ret = false;
3136
3137 net_if_lock(iface);
3138
3139 ipv4 = iface->config.ip.ipv4;
3140 if (!ipv4) {
3141 ret = false;
3142 goto out;
3143 }
3144
3145 if (!net_if_ipv4_addr_mask_cmp(iface, addr)) {
3146 ret = false;
3147 goto out;
3148 }
3149
3150 if ((UNALIGNED_GET(&addr->s_addr) & ~ipv4->netmask.s_addr) ==
3151 ~ipv4->netmask.s_addr) {
3152 ret = true;
3153 goto out;
3154 }
3155
3156 out:
3157 net_if_unlock(iface);
3158 return ret;
3159 }
3160
net_if_ipv4_is_addr_bcast(struct net_if * iface,const struct in_addr * addr)3161 bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
3162 const struct in_addr *addr)
3163 {
3164 bool ret = false;
3165
3166 if (iface) {
3167 ret = ipv4_is_broadcast_address(iface, addr);
3168 goto out;
3169 }
3170
3171 STRUCT_SECTION_FOREACH(net_if, iface) {
3172 ret = ipv4_is_broadcast_address(iface, addr);
3173 if (ret) {
3174 goto out;
3175 }
3176 }
3177
3178 out:
3179 return ret;
3180 }
3181
net_if_ipv4_select_src_iface(const struct in_addr * dst)3182 struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
3183 {
3184 struct net_if *selected = NULL;
3185
3186 STRUCT_SECTION_FOREACH(net_if, iface) {
3187 bool ret;
3188
3189 ret = net_if_ipv4_addr_mask_cmp(iface, dst);
3190 if (ret) {
3191 selected = iface;
3192 goto out;
3193 }
3194 }
3195
3196 if (selected == NULL) {
3197 selected = net_if_get_default();
3198 }
3199
3200 out:
3201 return selected;
3202 }
3203
get_diff_ipv4(const struct in_addr * src,const struct in_addr * dst)3204 static uint8_t get_diff_ipv4(const struct in_addr *src,
3205 const struct in_addr *dst)
3206 {
3207 return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 4);
3208 }
3209
is_proper_ipv4_address(struct net_if_addr * addr)3210 static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
3211 {
3212 if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3213 addr->address.family == AF_INET &&
3214 !net_ipv4_is_ll_addr(&addr->address.in_addr)) {
3215 return true;
3216 }
3217
3218 return false;
3219 }
3220
net_if_ipv4_get_best_match(struct net_if * iface,const struct in_addr * dst,uint8_t * best_so_far)3221 static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
3222 const struct in_addr *dst,
3223 uint8_t *best_so_far)
3224 {
3225 struct net_if_ipv4 *ipv4;
3226 struct in_addr *src = NULL;
3227 uint8_t len;
3228 int i;
3229
3230 net_if_lock(iface);
3231
3232 ipv4 = iface->config.ip.ipv4;
3233 if (!ipv4) {
3234 goto out;
3235 }
3236
3237 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3238 if (!is_proper_ipv4_address(&ipv4->unicast[i])) {
3239 continue;
3240 }
3241
3242 len = get_diff_ipv4(dst, &ipv4->unicast[i].address.in_addr);
3243 if (len >= *best_so_far) {
3244 *best_so_far = len;
3245 src = &ipv4->unicast[i].address.in_addr;
3246 }
3247 }
3248
3249 out:
3250 net_if_unlock(iface);
3251
3252 return src;
3253 }
3254
if_ipv4_get_addr(struct net_if * iface,enum net_addr_state addr_state,bool ll)3255 static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
3256 enum net_addr_state addr_state, bool ll)
3257 {
3258 struct in_addr *addr = NULL;
3259 struct net_if_ipv4 *ipv4;
3260 int i;
3261
3262 net_if_lock(iface);
3263
3264 if (!iface) {
3265 goto out;
3266 }
3267
3268 ipv4 = iface->config.ip.ipv4;
3269 if (!ipv4) {
3270 goto out;
3271 }
3272
3273 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3274 if (!ipv4->unicast[i].is_used ||
3275 (addr_state != NET_ADDR_ANY_STATE &&
3276 ipv4->unicast[i].addr_state != addr_state) ||
3277 ipv4->unicast[i].address.family != AF_INET) {
3278 continue;
3279 }
3280
3281 if (net_ipv4_is_ll_addr(&ipv4->unicast[i].address.in_addr)) {
3282 if (!ll) {
3283 continue;
3284 }
3285 } else {
3286 if (ll) {
3287 continue;
3288 }
3289 }
3290
3291 addr = &ipv4->unicast[i].address.in_addr;
3292 goto out;
3293 }
3294
3295 out:
3296 net_if_unlock(iface);
3297
3298 return addr;
3299 }
3300
net_if_ipv4_get_ll(struct net_if * iface,enum net_addr_state addr_state)3301 struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
3302 enum net_addr_state addr_state)
3303 {
3304 return if_ipv4_get_addr(iface, addr_state, true);
3305 }
3306
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3307 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3308 enum net_addr_state addr_state)
3309 {
3310 return if_ipv4_get_addr(iface, addr_state, false);
3311 }
3312
net_if_ipv4_select_src_addr(struct net_if * dst_iface,const struct in_addr * dst)3313 const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
3314 const struct in_addr *dst)
3315 {
3316 const struct in_addr *src = NULL;
3317 uint8_t best_match = 0U;
3318
3319 if (!net_ipv4_is_ll_addr(dst)) {
3320
3321 /* If caller has supplied interface, then use that */
3322 if (dst_iface) {
3323 src = net_if_ipv4_get_best_match(dst_iface, dst,
3324 &best_match);
3325 } else {
3326 STRUCT_SECTION_FOREACH(net_if, iface) {
3327 struct in_addr *addr;
3328
3329 addr = net_if_ipv4_get_best_match(iface, dst,
3330 &best_match);
3331 if (addr) {
3332 src = addr;
3333 }
3334 }
3335 }
3336
3337 } else {
3338 if (dst_iface) {
3339 src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3340 } else {
3341 STRUCT_SECTION_FOREACH(net_if, iface) {
3342 struct in_addr *addr;
3343
3344 addr = net_if_ipv4_get_ll(iface,
3345 NET_ADDR_PREFERRED);
3346 if (addr) {
3347 src = addr;
3348 break;
3349 }
3350 }
3351 }
3352 }
3353
3354 if (!src) {
3355 src = net_if_ipv4_get_global_addr(dst_iface,
3356 NET_ADDR_PREFERRED);
3357
3358 if (IS_ENABLED(CONFIG_NET_IPV4_AUTO) && !src) {
3359 /* Try to use LL address if there's really no other
3360 * address available.
3361 */
3362 src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3363 }
3364
3365 if (!src) {
3366 src = net_ipv4_unspecified_address();
3367 }
3368
3369 goto out;
3370 }
3371
3372 out:
3373 return src;
3374 }
3375
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3376 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3377 struct net_if **ret)
3378 {
3379 struct net_if_addr *ifaddr = NULL;
3380
3381 STRUCT_SECTION_FOREACH(net_if, iface) {
3382 struct net_if_ipv4 *ipv4;
3383 int i;
3384
3385 net_if_lock(iface);
3386
3387 ipv4 = iface->config.ip.ipv4;
3388 if (!ipv4) {
3389 net_if_unlock(iface);
3390 continue;
3391 }
3392
3393 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3394 if (!ipv4->unicast[i].is_used ||
3395 ipv4->unicast[i].address.family != AF_INET) {
3396 continue;
3397 }
3398
3399 if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
3400 ipv4->unicast[i].address.in_addr.s_addr) {
3401
3402 if (ret) {
3403 *ret = iface;
3404 }
3405
3406 ifaddr = &ipv4->unicast[i];
3407 net_if_unlock(iface);
3408 goto out;
3409 }
3410 }
3411
3412 net_if_unlock(iface);
3413 }
3414
3415 out:
3416 return ifaddr;
3417 }
3418
z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3419 int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
3420 {
3421 struct net_if_addr *if_addr;
3422 struct net_if *iface = NULL;
3423
3424 if_addr = net_if_ipv4_addr_lookup(addr, &iface);
3425 if (!if_addr) {
3426 return 0;
3427 }
3428
3429 return net_if_get_by_iface(iface);
3430 }
3431
3432 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3433 static inline int z_vrfy_net_if_ipv4_addr_lookup_by_index(
3434 const struct in_addr *addr)
3435 {
3436 struct in_addr addr_v4;
3437
3438 Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3439
3440 return z_impl_net_if_ipv4_addr_lookup_by_index(&addr_v4);
3441 }
3442 #include <syscalls/net_if_ipv4_addr_lookup_by_index_mrsh.c>
3443 #endif
3444
net_if_ipv4_set_netmask(struct net_if * iface,const struct in_addr * netmask)3445 void net_if_ipv4_set_netmask(struct net_if *iface,
3446 const struct in_addr *netmask)
3447 {
3448 net_if_lock(iface);
3449
3450 if (net_if_config_ipv4_get(iface, NULL) < 0) {
3451 goto out;
3452 }
3453
3454 if (!iface->config.ip.ipv4) {
3455 goto out;
3456 }
3457
3458 net_ipaddr_copy(&iface->config.ip.ipv4->netmask, netmask);
3459 out:
3460 net_if_unlock(iface);
3461 }
3462
z_impl_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3463 bool z_impl_net_if_ipv4_set_netmask_by_index(int index,
3464 const struct in_addr *netmask)
3465 {
3466 struct net_if *iface;
3467
3468 iface = net_if_get_by_index(index);
3469 if (!iface) {
3470 return false;
3471 }
3472
3473 net_if_ipv4_set_netmask(iface, netmask);
3474
3475 return true;
3476 }
3477
3478 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3479 bool z_vrfy_net_if_ipv4_set_netmask_by_index(int index,
3480 const struct in_addr *netmask)
3481 {
3482 struct in_addr netmask_addr;
3483 struct net_if *iface;
3484
3485 iface = z_vrfy_net_if_get_by_index(index);
3486 if (!iface) {
3487 return false;
3488 }
3489
3490 Z_OOPS(z_user_from_copy(&netmask_addr, (void *)netmask,
3491 sizeof(netmask_addr)));
3492
3493 return z_impl_net_if_ipv4_set_netmask_by_index(index, &netmask_addr);
3494 }
3495
3496 #include <syscalls/net_if_ipv4_set_netmask_by_index_mrsh.c>
3497 #endif /* CONFIG_USERSPACE */
3498
net_if_ipv4_set_gw(struct net_if * iface,const struct in_addr * gw)3499 void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw)
3500 {
3501 net_if_lock(iface);
3502
3503 if (net_if_config_ipv4_get(iface, NULL) < 0) {
3504 goto out;
3505 }
3506
3507 if (!iface->config.ip.ipv4) {
3508 goto out;
3509 }
3510
3511 net_ipaddr_copy(&iface->config.ip.ipv4->gw, gw);
3512 out:
3513 net_if_unlock(iface);
3514 }
3515
z_impl_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)3516 bool z_impl_net_if_ipv4_set_gw_by_index(int index,
3517 const struct in_addr *gw)
3518 {
3519 struct net_if *iface;
3520
3521 iface = net_if_get_by_index(index);
3522 if (!iface) {
3523 return false;
3524 }
3525
3526 net_if_ipv4_set_gw(iface, gw);
3527
3528 return true;
3529 }
3530
3531 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)3532 bool z_vrfy_net_if_ipv4_set_gw_by_index(int index,
3533 const struct in_addr *gw)
3534 {
3535 struct in_addr gw_addr;
3536 struct net_if *iface;
3537
3538 iface = z_vrfy_net_if_get_by_index(index);
3539 if (!iface) {
3540 return false;
3541 }
3542
3543 Z_OOPS(z_user_from_copy(&gw_addr, (void *)gw, sizeof(gw_addr)));
3544
3545 return z_impl_net_if_ipv4_set_gw_by_index(index, &gw_addr);
3546 }
3547
3548 #include <syscalls/net_if_ipv4_set_gw_by_index_mrsh.c>
3549 #endif /* CONFIG_USERSPACE */
3550
ipv4_addr_find(struct net_if * iface,struct in_addr * addr)3551 static struct net_if_addr *ipv4_addr_find(struct net_if *iface,
3552 struct in_addr *addr)
3553 {
3554 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3555 int i;
3556
3557 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3558 if (!ipv4->unicast[i].is_used) {
3559 continue;
3560 }
3561
3562 if (net_ipv4_addr_cmp(addr,
3563 &ipv4->unicast[i].address.in_addr)) {
3564 return &ipv4->unicast[i];
3565 }
3566 }
3567
3568 return NULL;
3569 }
3570
net_if_ipv4_addr_add(struct net_if * iface,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3571 struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
3572 struct in_addr *addr,
3573 enum net_addr_type addr_type,
3574 uint32_t vlifetime)
3575 {
3576 struct net_if_addr *ifaddr = NULL;
3577 struct net_if_ipv4 *ipv4;
3578 int i;
3579
3580 net_if_lock(iface);
3581
3582 if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
3583 goto out;
3584 }
3585
3586 ifaddr = ipv4_addr_find(iface, addr);
3587 if (ifaddr) {
3588 /* TODO: should set addr_type/vlifetime */
3589 goto out;
3590 }
3591
3592 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3593 struct net_if_addr *cur = &ipv4->unicast[i];
3594
3595 if (addr_type == NET_ADDR_DHCP
3596 && cur->addr_type == NET_ADDR_OVERRIDABLE) {
3597 ifaddr = cur;
3598 break;
3599 }
3600
3601 if (!ipv4->unicast[i].is_used) {
3602 ifaddr = cur;
3603 break;
3604 }
3605 }
3606
3607 if (ifaddr) {
3608 ifaddr->is_used = true;
3609 ifaddr->address.family = AF_INET;
3610 ifaddr->address.in_addr.s4_addr32[0] =
3611 addr->s4_addr32[0];
3612 ifaddr->addr_type = addr_type;
3613
3614 /* Caller has to take care of timers and their expiry */
3615 if (vlifetime) {
3616 ifaddr->is_infinite = false;
3617 } else {
3618 ifaddr->is_infinite = true;
3619 }
3620
3621 /**
3622 * TODO: Handle properly PREFERRED/DEPRECATED state when
3623 * address in use, expired and renewal state.
3624 */
3625 ifaddr->addr_state = NET_ADDR_PREFERRED;
3626
3627 NET_DBG("[%d] interface %p address %s type %s added", i, iface,
3628 net_sprint_ipv4_addr(addr),
3629 net_addr_type2str(addr_type));
3630
3631 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface,
3632 &ifaddr->address.in_addr,
3633 sizeof(struct in_addr));
3634 goto out;
3635 }
3636
3637 out:
3638 net_if_unlock(iface);
3639
3640 return ifaddr;
3641 }
3642
net_if_ipv4_addr_rm(struct net_if * iface,const struct in_addr * addr)3643 bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr)
3644 {
3645 struct net_if_ipv4 *ipv4;
3646 bool ret = false;
3647 int i;
3648
3649 net_if_lock(iface);
3650
3651 ipv4 = iface->config.ip.ipv4;
3652 if (!ipv4) {
3653 goto out;
3654 }
3655
3656 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3657 if (!ipv4->unicast[i].is_used) {
3658 continue;
3659 }
3660
3661 if (!net_ipv4_addr_cmp(&ipv4->unicast[i].address.in_addr,
3662 addr)) {
3663 continue;
3664 }
3665
3666 ipv4->unicast[i].is_used = false;
3667
3668 NET_DBG("[%d] interface %p address %s removed",
3669 i, iface, net_sprint_ipv4_addr(addr));
3670
3671 net_mgmt_event_notify_with_info(
3672 NET_EVENT_IPV4_ADDR_DEL, iface,
3673 &ipv4->unicast[i].address.in_addr,
3674 sizeof(struct in_addr));
3675
3676 ret = true;
3677 goto out;
3678 }
3679
3680 out:
3681 net_if_unlock(iface);
3682
3683 return ret;
3684 }
3685
z_impl_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3686 bool z_impl_net_if_ipv4_addr_add_by_index(int index,
3687 struct in_addr *addr,
3688 enum net_addr_type addr_type,
3689 uint32_t vlifetime)
3690 {
3691 struct net_if *iface;
3692 struct net_if_addr *if_addr;
3693
3694 iface = net_if_get_by_index(index);
3695 if (!iface) {
3696 return false;
3697 }
3698
3699 if_addr = net_if_ipv4_addr_add(iface, addr, addr_type, vlifetime);
3700 return if_addr ? true : false;
3701 }
3702
3703 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3704 bool z_vrfy_net_if_ipv4_addr_add_by_index(int index,
3705 struct in_addr *addr,
3706 enum net_addr_type addr_type,
3707 uint32_t vlifetime)
3708 {
3709 struct in_addr addr_v4;
3710 struct net_if *iface;
3711
3712 iface = z_vrfy_net_if_get_by_index(index);
3713 if (!iface) {
3714 return false;
3715 }
3716
3717 Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3718
3719 return z_impl_net_if_ipv4_addr_add_by_index(index,
3720 &addr_v4,
3721 addr_type,
3722 vlifetime);
3723 }
3724
3725 #include <syscalls/net_if_ipv4_addr_add_by_index_mrsh.c>
3726 #endif /* CONFIG_USERSPACE */
3727
z_impl_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)3728 bool z_impl_net_if_ipv4_addr_rm_by_index(int index,
3729 const struct in_addr *addr)
3730 {
3731 struct net_if *iface;
3732
3733 iface = net_if_get_by_index(index);
3734 if (!iface) {
3735 return false;
3736 }
3737
3738 return net_if_ipv4_addr_rm(iface, addr);
3739 }
3740
3741 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)3742 bool z_vrfy_net_if_ipv4_addr_rm_by_index(int index,
3743 const struct in_addr *addr)
3744 {
3745 struct in_addr addr_v4;
3746 struct net_if *iface;
3747
3748 iface = z_vrfy_net_if_get_by_index(index);
3749 if (!iface) {
3750 return false;
3751 }
3752
3753 Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3754
3755 return (uint32_t)z_impl_net_if_ipv4_addr_rm_by_index(index, &addr_v4);
3756 }
3757
3758 #include <syscalls/net_if_ipv4_addr_rm_by_index_mrsh.c>
3759 #endif /* CONFIG_USERSPACE */
3760
ipv4_maddr_find(struct net_if * iface,bool is_used,const struct in_addr * addr)3761 static struct net_if_mcast_addr *ipv4_maddr_find(struct net_if *iface,
3762 bool is_used,
3763 const struct in_addr *addr)
3764 {
3765 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3766 int i;
3767
3768 if (!ipv4) {
3769 return NULL;
3770 }
3771
3772 for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
3773 if ((is_used && !ipv4->mcast[i].is_used) ||
3774 (!is_used && ipv4->mcast[i].is_used)) {
3775 continue;
3776 }
3777
3778 if (addr) {
3779 if (!net_ipv4_addr_cmp(&ipv4->mcast[i].address.in_addr,
3780 addr)) {
3781 continue;
3782 }
3783 }
3784
3785 return &ipv4->mcast[i];
3786 }
3787
3788 return NULL;
3789 }
net_if_ipv4_maddr_add(struct net_if * iface,const struct in_addr * addr)3790 struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
3791 const struct in_addr *addr)
3792 {
3793 struct net_if_mcast_addr *maddr = NULL;
3794
3795 net_if_lock(iface);
3796
3797 if (net_if_config_ipv4_get(iface, NULL) < 0) {
3798 goto out;
3799 }
3800
3801 if (!net_ipv4_is_addr_mcast(addr)) {
3802 NET_DBG("Address %s is not a multicast address.",
3803 net_sprint_ipv4_addr(addr));
3804 goto out;
3805 }
3806
3807 maddr = ipv4_maddr_find(iface, false, NULL);
3808 if (maddr) {
3809 maddr->is_used = true;
3810 maddr->address.family = AF_INET;
3811 maddr->address.in_addr.s4_addr32[0] = addr->s4_addr32[0];
3812
3813 NET_DBG("interface %p address %s added", iface,
3814 net_sprint_ipv4_addr(addr));
3815
3816 net_mgmt_event_notify_with_info(
3817 NET_EVENT_IPV4_MADDR_ADD, iface,
3818 &maddr->address.in_addr,
3819 sizeof(struct in_addr));
3820 }
3821
3822 out:
3823 net_if_unlock(iface);
3824
3825 return maddr;
3826 }
3827
net_if_ipv4_maddr_rm(struct net_if * iface,const struct in_addr * addr)3828 bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr)
3829 {
3830 struct net_if_mcast_addr *maddr;
3831 bool ret = false;
3832
3833 net_if_lock(iface);
3834
3835 maddr = ipv4_maddr_find(iface, true, addr);
3836 if (maddr) {
3837 maddr->is_used = false;
3838
3839 NET_DBG("interface %p address %s removed",
3840 iface, net_sprint_ipv4_addr(addr));
3841
3842 net_mgmt_event_notify_with_info(
3843 NET_EVENT_IPV4_MADDR_DEL, iface,
3844 &maddr->address.in_addr,
3845 sizeof(struct in_addr));
3846
3847 ret = true;
3848 }
3849
3850 net_if_unlock(iface);
3851
3852 return ret;
3853 }
3854
net_if_ipv4_maddr_lookup(const struct in_addr * maddr,struct net_if ** ret)3855 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *maddr,
3856 struct net_if **ret)
3857 {
3858 struct net_if_mcast_addr *addr = NULL;
3859
3860 STRUCT_SECTION_FOREACH(net_if, iface) {
3861 if (ret && *ret && iface != *ret) {
3862 continue;
3863 }
3864
3865 net_if_lock(iface);
3866
3867 addr = ipv4_maddr_find(iface, true, maddr);
3868 if (addr) {
3869 if (ret) {
3870 *ret = iface;
3871 }
3872
3873 net_if_unlock(iface);
3874 goto out;
3875 }
3876
3877 net_if_unlock(iface);
3878 }
3879
3880 out:
3881 return addr;
3882 }
3883
net_if_ipv4_maddr_leave(struct net_if * iface,struct net_if_mcast_addr * addr)3884 void net_if_ipv4_maddr_leave(struct net_if *iface, struct net_if_mcast_addr *addr)
3885 {
3886 NET_ASSERT(iface);
3887 NET_ASSERT(addr);
3888
3889 net_if_lock(iface);
3890 addr->is_joined = false;
3891 net_if_unlock(iface);
3892 }
3893
net_if_ipv4_maddr_join(struct net_if * iface,struct net_if_mcast_addr * addr)3894 void net_if_ipv4_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr)
3895 {
3896 NET_ASSERT(iface);
3897 NET_ASSERT(addr);
3898
3899 net_if_lock(iface);
3900 addr->is_joined = true;
3901 net_if_unlock(iface);
3902 }
3903
iface_ipv4_init(int if_count)3904 static void iface_ipv4_init(int if_count)
3905 {
3906 int i;
3907
3908 if (if_count > ARRAY_SIZE(ipv4_addresses)) {
3909 NET_WARN("You have %zu IPv4 net_if addresses but %d "
3910 "network interfaces", ARRAY_SIZE(ipv4_addresses),
3911 if_count);
3912 NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV4_COUNT "
3913 "value.");
3914 }
3915
3916 for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
3917 ipv4_addresses[i].ipv4.ttl = CONFIG_NET_INITIAL_TTL;
3918 }
3919 }
3920
leave_ipv4_mcast_all(struct net_if * iface)3921 static void leave_ipv4_mcast_all(struct net_if *iface)
3922 {
3923 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3924 int i;
3925
3926 if (!ipv4) {
3927 return;
3928 }
3929
3930 for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
3931 if (!ipv4->mcast[i].is_used ||
3932 !ipv4->mcast[i].is_joined) {
3933 continue;
3934 }
3935
3936 net_ipv4_igmp_leave(iface, &ipv4->mcast[i].address.in_addr);
3937 }
3938 }
3939
3940 #else
3941 #define leave_ipv4_mcast_all(...)
3942 #define iface_ipv4_init(...)
3943
net_if_ipv4_maddr_lookup(const struct in_addr * addr,struct net_if ** iface)3944 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
3945 struct net_if **iface)
3946 {
3947 ARG_UNUSED(addr);
3948 ARG_UNUSED(iface);
3949
3950 return NULL;
3951 }
3952
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3953 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3954 struct net_if **ret)
3955 {
3956 ARG_UNUSED(addr);
3957 ARG_UNUSED(ret);
3958
3959 return NULL;
3960 }
3961
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3962 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3963 enum net_addr_state addr_state)
3964 {
3965 ARG_UNUSED(addr_state);
3966 ARG_UNUSED(iface);
3967
3968 return NULL;
3969 }
3970 #endif /* CONFIG_NET_IPV4 */
3971
net_if_select_src_iface(const struct sockaddr * dst)3972 struct net_if *net_if_select_src_iface(const struct sockaddr *dst)
3973 {
3974 struct net_if *iface = NULL;
3975
3976 if (!dst) {
3977 goto out;
3978 }
3979
3980 if (IS_ENABLED(CONFIG_NET_IPV6) && dst->sa_family == AF_INET6) {
3981 iface = net_if_ipv6_select_src_iface(&net_sin6(dst)->sin6_addr);
3982 goto out;
3983 }
3984
3985 if (IS_ENABLED(CONFIG_NET_IPV4) && dst->sa_family == AF_INET) {
3986 iface = net_if_ipv4_select_src_iface(&net_sin(dst)->sin_addr);
3987 goto out;
3988 }
3989
3990 out:
3991 if (iface == NULL) {
3992 iface = net_if_get_default();
3993 }
3994
3995 return iface;
3996 }
3997
net_if_recv_data(struct net_if * iface,struct net_pkt * pkt)3998 enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt)
3999 {
4000 if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) &&
4001 net_if_is_promisc(iface)) {
4002 /* If the packet is not for us and the promiscuous
4003 * mode is enabled, then increase the ref count so
4004 * that net_core.c:processing_data() will not free it.
4005 * The promiscuous mode handler must free the packet
4006 * after it has finished working with it.
4007 *
4008 * If packet is for us, then NET_CONTINUE is returned.
4009 * In this case we must clone the packet, as the packet
4010 * could be manipulated by other part of the stack.
4011 */
4012 enum net_verdict verdict;
4013 struct net_pkt *new_pkt;
4014
4015 /* This protects pkt so that it will not be freed by L2 recv()
4016 */
4017 net_pkt_ref(pkt);
4018
4019 verdict = net_if_l2(iface)->recv(iface, pkt);
4020 if (verdict == NET_CONTINUE) {
4021 new_pkt = net_pkt_clone(pkt, K_NO_WAIT);
4022 } else {
4023 new_pkt = net_pkt_ref(pkt);
4024 }
4025
4026 /* L2 has modified the buffer starting point, it is easier
4027 * to re-initialize the cursor rather than updating it.
4028 */
4029 net_pkt_cursor_init(new_pkt);
4030
4031 if (net_promisc_mode_input(new_pkt) == NET_DROP) {
4032 net_pkt_unref(new_pkt);
4033 }
4034
4035 net_pkt_unref(pkt);
4036
4037 return verdict;
4038 }
4039
4040 return net_if_l2(iface)->recv(iface, pkt);
4041 }
4042
net_if_register_link_cb(struct net_if_link_cb * link,net_if_link_callback_t cb)4043 void net_if_register_link_cb(struct net_if_link_cb *link,
4044 net_if_link_callback_t cb)
4045 {
4046 k_mutex_lock(&lock, K_FOREVER);
4047
4048 sys_slist_find_and_remove(&link_callbacks, &link->node);
4049 sys_slist_prepend(&link_callbacks, &link->node);
4050
4051 link->cb = cb;
4052
4053 k_mutex_unlock(&lock);
4054 }
4055
net_if_unregister_link_cb(struct net_if_link_cb * link)4056 void net_if_unregister_link_cb(struct net_if_link_cb *link)
4057 {
4058 k_mutex_lock(&lock, K_FOREVER);
4059
4060 sys_slist_find_and_remove(&link_callbacks, &link->node);
4061
4062 k_mutex_unlock(&lock);
4063 }
4064
net_if_call_link_cb(struct net_if * iface,struct net_linkaddr * lladdr,int status)4065 void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
4066 int status)
4067 {
4068 struct net_if_link_cb *link, *tmp;
4069
4070 k_mutex_lock(&lock, K_FOREVER);
4071
4072 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&link_callbacks, link, tmp, node) {
4073 link->cb(iface, lladdr, status);
4074 }
4075
4076 k_mutex_unlock(&lock);
4077 }
4078
need_calc_checksum(struct net_if * iface,enum ethernet_hw_caps caps)4079 static bool need_calc_checksum(struct net_if *iface, enum ethernet_hw_caps caps)
4080 {
4081 #if defined(CONFIG_NET_L2_ETHERNET)
4082 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
4083 return true;
4084 }
4085
4086 return !(net_eth_get_hw_capabilities(iface) & caps);
4087 #else
4088 ARG_UNUSED(iface);
4089 ARG_UNUSED(caps);
4090
4091 return true;
4092 #endif
4093 }
4094
net_if_need_calc_tx_checksum(struct net_if * iface)4095 bool net_if_need_calc_tx_checksum(struct net_if *iface)
4096 {
4097 return need_calc_checksum(iface, ETHERNET_HW_TX_CHKSUM_OFFLOAD);
4098 }
4099
net_if_need_calc_rx_checksum(struct net_if * iface)4100 bool net_if_need_calc_rx_checksum(struct net_if *iface)
4101 {
4102 return need_calc_checksum(iface, ETHERNET_HW_RX_CHKSUM_OFFLOAD);
4103 }
4104
net_if_get_by_iface(struct net_if * iface)4105 int net_if_get_by_iface(struct net_if *iface)
4106 {
4107 if (!(iface >= _net_if_list_start && iface < _net_if_list_end)) {
4108 return -1;
4109 }
4110
4111 return (iface - _net_if_list_start) + 1;
4112 }
4113
net_if_foreach(net_if_cb_t cb,void * user_data)4114 void net_if_foreach(net_if_cb_t cb, void *user_data)
4115 {
4116 STRUCT_SECTION_FOREACH(net_if, iface) {
4117 cb(iface, user_data);
4118 }
4119 }
4120
is_iface_offloaded(struct net_if * iface)4121 static inline bool is_iface_offloaded(struct net_if *iface)
4122 {
4123 return (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
4124 net_if_is_ip_offloaded(iface)) ||
4125 (IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD) &&
4126 net_if_is_socket_offloaded(iface));
4127 }
4128
notify_iface_up(struct net_if * iface)4129 static void notify_iface_up(struct net_if *iface)
4130 {
4131 /* In many places it's assumed that link address was set with
4132 * net_if_set_link_addr(). Better check that now.
4133 */
4134 #if defined(CONFIG_NET_L2_CANBUS_RAW)
4135 if (IS_ENABLED(CONFIG_NET_SOCKETS_CAN) &&
4136 (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW))) {
4137 /* CAN does not require link address. */
4138 } else
4139 #endif /* CONFIG_NET_L2_CANBUS_RAW */
4140 {
4141 if (!is_iface_offloaded(iface)) {
4142 NET_ASSERT(net_if_get_link_addr(iface)->addr != NULL);
4143 }
4144 }
4145
4146 net_if_flag_set(iface, NET_IF_RUNNING);
4147 net_mgmt_event_notify(NET_EVENT_IF_UP, iface);
4148 net_virtual_enable(iface);
4149
4150 /* If the interface is only having point-to-point traffic then we do
4151 * not need to run DAD etc for it.
4152 */
4153 if (!is_iface_offloaded(iface) &&
4154 !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
4155 iface_ipv6_start(iface);
4156 net_ipv4_autoconf_start(iface);
4157 }
4158 }
4159
notify_iface_down(struct net_if * iface)4160 static void notify_iface_down(struct net_if *iface)
4161 {
4162 net_if_flag_clear(iface, NET_IF_RUNNING);
4163 net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
4164 net_virtual_disable(iface);
4165
4166 if (!is_iface_offloaded(iface) &&
4167 !(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
4168 net_ipv4_autoconf_reset(iface);
4169 }
4170 }
4171
net_if_oper_state2str(enum net_if_oper_state state)4172 static inline const char *net_if_oper_state2str(enum net_if_oper_state state)
4173 {
4174 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
4175 switch (state) {
4176 case NET_IF_OPER_UNKNOWN:
4177 return "UNKNOWN";
4178 case NET_IF_OPER_NOTPRESENT:
4179 return "NOTPRESENT";
4180 case NET_IF_OPER_DOWN:
4181 return "DOWN";
4182 case NET_IF_OPER_LOWERLAYERDOWN:
4183 return "LOWERLAYERDOWN";
4184 case NET_IF_OPER_TESTING:
4185 return "TESTING";
4186 case NET_IF_OPER_DORMANT:
4187 return "DORMANT";
4188 case NET_IF_OPER_UP:
4189 return "UP";
4190 default:
4191 break;
4192 }
4193
4194 return "<invalid>";
4195 #else
4196 ARG_UNUSED(state);
4197
4198 return "";
4199 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
4200 }
4201
update_operational_state(struct net_if * iface)4202 static void update_operational_state(struct net_if *iface)
4203 {
4204 enum net_if_oper_state prev_state = iface->if_dev->oper_state;
4205 enum net_if_oper_state new_state = NET_IF_OPER_UNKNOWN;
4206
4207 if (!net_if_is_admin_up(iface)) {
4208 new_state = NET_IF_OPER_DOWN;
4209 goto exit;
4210 }
4211
4212 if (!net_if_is_carrier_ok(iface)) {
4213 #if defined(CONFIG_NET_L2_VIRTUAL)
4214 if (net_if_l2(iface) == &NET_L2_GET_NAME(VIRTUAL)) {
4215 new_state = NET_IF_OPER_LOWERLAYERDOWN;
4216 } else
4217 #endif /* CONFIG_NET_L2_VIRTUAL */
4218 {
4219 new_state = NET_IF_OPER_DOWN;
4220 }
4221
4222 goto exit;
4223 }
4224
4225 if (net_if_is_dormant(iface)) {
4226 new_state = NET_IF_OPER_DORMANT;
4227 goto exit;
4228 }
4229
4230 new_state = NET_IF_OPER_UP;
4231
4232 exit:
4233 if (net_if_oper_state_set(iface, new_state) != new_state) {
4234 NET_ERR("Failed to update oper state to %d", new_state);
4235 return;
4236 }
4237
4238 NET_DBG("iface %p, oper state %s admin %s carrier %s dormant %s",
4239 iface, net_if_oper_state2str(net_if_oper_state(iface)),
4240 net_if_is_admin_up(iface) ? "UP" : "DOWN",
4241 net_if_is_carrier_ok(iface) ? "ON" : "OFF",
4242 net_if_is_dormant(iface) ? "ON" : "OFF");
4243
4244 if (net_if_oper_state(iface) == NET_IF_OPER_UP) {
4245 if (prev_state != NET_IF_OPER_UP) {
4246 notify_iface_up(iface);
4247 }
4248 } else {
4249 if (prev_state == NET_IF_OPER_UP) {
4250 notify_iface_down(iface);
4251 }
4252 }
4253 }
4254
init_igmp(struct net_if * iface)4255 static void init_igmp(struct net_if *iface)
4256 {
4257 #if defined(CONFIG_NET_IPV4_IGMP)
4258 /* Ensure IPv4 is enabled for this interface. */
4259 if (net_if_config_ipv4_get(iface, NULL)) {
4260 return;
4261 }
4262
4263 net_ipv4_igmp_init(iface);
4264 #else
4265 ARG_UNUSED(iface);
4266 return;
4267 #endif
4268 }
4269
net_if_up(struct net_if * iface)4270 int net_if_up(struct net_if *iface)
4271 {
4272 int status = 0;
4273
4274 NET_DBG("iface %p", iface);
4275
4276 net_if_lock(iface);
4277
4278 if (net_if_flag_is_set(iface, NET_IF_UP)) {
4279 status = -EALREADY;
4280 goto out;
4281 }
4282
4283 /* If the L2 does not support enable just set the flag */
4284 if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
4285 goto done;
4286 }
4287
4288 /* Notify L2 to enable the interface */
4289 status = net_if_l2(iface)->enable(iface, true);
4290 if (status < 0) {
4291 goto out;
4292 }
4293
4294 init_igmp(iface);
4295
4296 done:
4297 net_if_flag_set(iface, NET_IF_UP);
4298 net_mgmt_event_notify(NET_EVENT_IF_ADMIN_UP, iface);
4299 update_operational_state(iface);
4300
4301 out:
4302 net_if_unlock(iface);
4303
4304 return status;
4305 }
4306
net_if_down(struct net_if * iface)4307 int net_if_down(struct net_if *iface)
4308 {
4309 int status = 0;
4310
4311 NET_DBG("iface %p", iface);
4312
4313 net_if_lock(iface);
4314
4315 if (!net_if_flag_is_set(iface, NET_IF_UP)) {
4316 status = -EALREADY;
4317 goto out;
4318 }
4319
4320 leave_mcast_all(iface);
4321 leave_ipv4_mcast_all(iface);
4322
4323 /* If the L2 does not support enable just clear the flag */
4324 if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
4325 goto done;
4326 }
4327
4328 /* Notify L2 to disable the interface */
4329 status = net_if_l2(iface)->enable(iface, false);
4330 if (status < 0) {
4331 goto out;
4332 }
4333
4334 done:
4335 net_if_flag_clear(iface, NET_IF_UP);
4336 net_mgmt_event_notify(NET_EVENT_IF_ADMIN_DOWN, iface);
4337 update_operational_state(iface);
4338
4339 out:
4340 net_if_unlock(iface);
4341
4342 return status;
4343 }
4344
net_if_carrier_on(struct net_if * iface)4345 void net_if_carrier_on(struct net_if *iface)
4346 {
4347 NET_ASSERT(iface);
4348
4349 net_if_lock(iface);
4350
4351 if (!net_if_flag_test_and_set(iface, NET_IF_LOWER_UP)) {
4352 update_operational_state(iface);
4353 }
4354
4355 net_if_unlock(iface);
4356 }
4357
net_if_carrier_off(struct net_if * iface)4358 void net_if_carrier_off(struct net_if *iface)
4359 {
4360 NET_ASSERT(iface);
4361
4362 net_if_lock(iface);
4363
4364 if (net_if_flag_test_and_clear(iface, NET_IF_LOWER_UP)) {
4365 update_operational_state(iface);
4366 }
4367
4368 net_if_unlock(iface);
4369 }
4370
net_if_dormant_on(struct net_if * iface)4371 void net_if_dormant_on(struct net_if *iface)
4372 {
4373 NET_ASSERT(iface);
4374
4375 net_if_lock(iface);
4376
4377 if (!net_if_flag_test_and_set(iface, NET_IF_DORMANT)) {
4378 update_operational_state(iface);
4379 }
4380
4381 net_if_unlock(iface);
4382 }
4383
net_if_dormant_off(struct net_if * iface)4384 void net_if_dormant_off(struct net_if *iface)
4385 {
4386 NET_ASSERT(iface);
4387
4388 net_if_lock(iface);
4389
4390 if (net_if_flag_test_and_clear(iface, NET_IF_DORMANT)) {
4391 update_operational_state(iface);
4392 }
4393
4394 net_if_unlock(iface);
4395 }
4396
4397 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
promisc_mode_set(struct net_if * iface,bool enable)4398 static int promisc_mode_set(struct net_if *iface, bool enable)
4399 {
4400 enum net_l2_flags l2_flags = 0;
4401
4402 NET_ASSERT(iface);
4403
4404 l2_flags = l2_flags_get(iface);
4405 if (!(l2_flags & NET_L2_PROMISC_MODE)) {
4406 return -ENOTSUP;
4407 }
4408
4409 #if defined(CONFIG_NET_L2_ETHERNET)
4410 if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4411 int ret = net_eth_promisc_mode(iface, enable);
4412
4413 if (ret < 0) {
4414 return ret;
4415 }
4416 }
4417 #else
4418 ARG_UNUSED(enable);
4419
4420 return -ENOTSUP;
4421 #endif
4422
4423 return 0;
4424 }
4425
net_if_set_promisc(struct net_if * iface)4426 int net_if_set_promisc(struct net_if *iface)
4427 {
4428 int ret;
4429
4430 net_if_lock(iface);
4431
4432 ret = promisc_mode_set(iface, true);
4433 if (ret < 0) {
4434 goto out;
4435 }
4436
4437 ret = net_if_flag_test_and_set(iface, NET_IF_PROMISC);
4438 if (ret) {
4439 ret = -EALREADY;
4440 goto out;
4441 }
4442
4443 out:
4444 net_if_unlock(iface);
4445
4446 return ret;
4447 }
4448
net_if_unset_promisc(struct net_if * iface)4449 void net_if_unset_promisc(struct net_if *iface)
4450 {
4451 int ret;
4452
4453 net_if_lock(iface);
4454
4455 ret = promisc_mode_set(iface, false);
4456 if (ret < 0) {
4457 goto out;
4458 }
4459
4460 net_if_flag_clear(iface, NET_IF_PROMISC);
4461
4462 out:
4463 net_if_unlock(iface);
4464 }
4465
net_if_is_promisc(struct net_if * iface)4466 bool net_if_is_promisc(struct net_if *iface)
4467 {
4468 NET_ASSERT(iface);
4469
4470 return net_if_flag_is_set(iface, NET_IF_PROMISC);
4471 }
4472 #endif /* CONFIG_NET_PROMISCUOUS_MODE */
4473
4474 #ifdef CONFIG_NET_POWER_MANAGEMENT
4475
net_if_suspend(struct net_if * iface)4476 int net_if_suspend(struct net_if *iface)
4477 {
4478 int ret = 0;
4479
4480 net_if_lock(iface);
4481
4482 if (net_if_are_pending_tx_packets(iface)) {
4483 ret = -EBUSY;
4484 goto out;
4485 }
4486
4487 if (net_if_flag_test_and_set(iface, NET_IF_SUSPENDED)) {
4488 ret = -EALREADY;
4489 goto out;
4490 }
4491
4492 net_stats_add_suspend_start_time(iface, k_cycle_get_32());
4493
4494 out:
4495 net_if_unlock(iface);
4496
4497 return ret;
4498 }
4499
net_if_resume(struct net_if * iface)4500 int net_if_resume(struct net_if *iface)
4501 {
4502 int ret = 0;
4503
4504 net_if_lock(iface);
4505
4506 if (!net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
4507 ret = -EALREADY;
4508 goto out;
4509 }
4510
4511 net_if_flag_clear(iface, NET_IF_SUSPENDED);
4512
4513 net_stats_add_suspend_end_time(iface, k_cycle_get_32());
4514
4515 out:
4516 net_if_unlock(iface);
4517
4518 return ret;
4519 }
4520
net_if_is_suspended(struct net_if * iface)4521 bool net_if_is_suspended(struct net_if *iface)
4522 {
4523 return net_if_flag_is_set(iface, NET_IF_SUSPENDED);
4524 }
4525
4526 #endif /* CONFIG_NET_POWER_MANAGEMENT */
4527
4528 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
net_tx_ts_thread(void)4529 static void net_tx_ts_thread(void)
4530 {
4531 struct net_pkt *pkt;
4532
4533 NET_DBG("Starting TX timestamp callback thread");
4534
4535 while (1) {
4536 pkt = k_fifo_get(&tx_ts_queue, K_FOREVER);
4537 if (pkt) {
4538 net_if_call_timestamp_cb(pkt);
4539 }
4540 }
4541 }
4542
net_if_register_timestamp_cb(struct net_if_timestamp_cb * handle,struct net_pkt * pkt,struct net_if * iface,net_if_timestamp_callback_t cb)4543 void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
4544 struct net_pkt *pkt,
4545 struct net_if *iface,
4546 net_if_timestamp_callback_t cb)
4547 {
4548 k_mutex_lock(&lock, K_FOREVER);
4549
4550 sys_slist_find_and_remove(×tamp_callbacks, &handle->node);
4551 sys_slist_prepend(×tamp_callbacks, &handle->node);
4552
4553 handle->iface = iface;
4554 handle->cb = cb;
4555 handle->pkt = pkt;
4556
4557 k_mutex_unlock(&lock);
4558 }
4559
net_if_unregister_timestamp_cb(struct net_if_timestamp_cb * handle)4560 void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle)
4561 {
4562 k_mutex_lock(&lock, K_FOREVER);
4563
4564 sys_slist_find_and_remove(×tamp_callbacks, &handle->node);
4565
4566 k_mutex_unlock(&lock);
4567 }
4568
net_if_call_timestamp_cb(struct net_pkt * pkt)4569 void net_if_call_timestamp_cb(struct net_pkt *pkt)
4570 {
4571 sys_snode_t *sn, *sns;
4572
4573 k_mutex_lock(&lock, K_FOREVER);
4574
4575 SYS_SLIST_FOR_EACH_NODE_SAFE(×tamp_callbacks, sn, sns) {
4576 struct net_if_timestamp_cb *handle =
4577 CONTAINER_OF(sn, struct net_if_timestamp_cb, node);
4578
4579 if (((handle->iface == NULL) ||
4580 (handle->iface == net_pkt_iface(pkt))) &&
4581 (handle->pkt == NULL || handle->pkt == pkt)) {
4582 handle->cb(pkt);
4583 }
4584 }
4585
4586 k_mutex_unlock(&lock);
4587 }
4588
net_if_add_tx_timestamp(struct net_pkt * pkt)4589 void net_if_add_tx_timestamp(struct net_pkt *pkt)
4590 {
4591 k_fifo_put(&tx_ts_queue, pkt);
4592 }
4593 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
4594
net_if_init(void)4595 void net_if_init(void)
4596 {
4597 int if_count = 0;
4598
4599 NET_DBG("");
4600
4601 k_mutex_lock(&lock, K_FOREVER);
4602
4603 net_tc_tx_init();
4604
4605 STRUCT_SECTION_FOREACH(net_if, iface) {
4606 init_iface(iface);
4607 if_count++;
4608 }
4609
4610 if (if_count == 0) {
4611 NET_ERR("There is no network interface to work with!");
4612 goto out;
4613 }
4614
4615 iface_ipv6_init(if_count);
4616 iface_ipv4_init(if_count);
4617 iface_router_init();
4618
4619 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
4620 k_thread_create(&tx_thread_ts, tx_ts_stack,
4621 K_KERNEL_STACK_SIZEOF(tx_ts_stack),
4622 (k_thread_entry_t)net_tx_ts_thread,
4623 NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT);
4624 k_thread_name_set(&tx_thread_ts, "tx_tstamp");
4625 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
4626
4627 #if defined(CONFIG_NET_VLAN)
4628 /* Make sure that we do not have too many network interfaces
4629 * compared to the number of VLAN interfaces.
4630 */
4631 if_count = 0;
4632
4633 STRUCT_SECTION_FOREACH(net_if, iface) {
4634 if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4635 if_count++;
4636 }
4637 }
4638
4639 if (if_count > CONFIG_NET_VLAN_COUNT) {
4640 NET_WARN("You have configured only %d VLAN interfaces"
4641 " but you have %d network interfaces.",
4642 CONFIG_NET_VLAN_COUNT, if_count);
4643 }
4644 #endif
4645
4646 out:
4647 k_mutex_unlock(&lock);
4648 }
4649
net_if_post_init(void)4650 void net_if_post_init(void)
4651 {
4652 NET_DBG("");
4653
4654 /* After TX is running, attempt to bring the interface up */
4655 STRUCT_SECTION_FOREACH(net_if, iface) {
4656 if (!net_if_flag_is_set(iface, NET_IF_NO_AUTO_START)) {
4657 net_if_up(iface);
4658 }
4659 }
4660 }
4661