1 /*
2 * Copyright (c) 2016 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <logging/log.h>
8 LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
9
10 #include <init.h>
11 #include <kernel.h>
12 #include <linker/sections.h>
13 #include <random/rand32.h>
14 #include <syscall_handler.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <net/net_core.h>
18 #include <net/net_pkt.h>
19 #include <net/net_if.h>
20 #include <net/net_mgmt.h>
21 #include <net/ethernet.h>
22 #include <net/virtual.h>
23
24 #include "net_private.h"
25 #include "ipv6.h"
26 #include "ipv4_autoconf_internal.h"
27
28 #include "net_stats.h"
29
30 #define REACHABLE_TIME (MSEC_PER_SEC * 30) /* in ms */
31 /*
32 * split the min/max random reachable factors into numerator/denominator
33 * so that integer-based math works better
34 */
35 #define MIN_RANDOM_NUMER (1)
36 #define MIN_RANDOM_DENOM (2)
37 #define MAX_RANDOM_NUMER (3)
38 #define MAX_RANDOM_DENOM (2)
39
40 static K_MUTEX_DEFINE(lock);
41
42 /* net_if dedicated section limiters */
43 extern struct net_if _net_if_list_start[];
44 extern struct net_if _net_if_list_end[];
45
46 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
47 static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
48 static struct k_work_delayable router_timer;
49 static sys_slist_t active_router_timers;
50 #endif
51
52 #if defined(CONFIG_NET_NATIVE_IPV6)
53 /* Timer that triggers network address renewal */
54 static struct k_work_delayable address_lifetime_timer;
55
56 /* Track currently active address lifetime timers */
57 static sys_slist_t active_address_lifetime_timers;
58
59 /* Timer that triggers IPv6 prefix lifetime */
60 static struct k_work_delayable prefix_lifetime_timer;
61
62 /* Track currently active IPv6 prefix lifetime timers */
63 static sys_slist_t active_prefix_lifetime_timers;
64
65 #if defined(CONFIG_NET_IPV6_DAD)
66 /** Duplicate address detection (DAD) timer */
67 static struct k_work_delayable dad_timer;
68 static sys_slist_t active_dad_timers;
69 #endif
70
71 #if defined(CONFIG_NET_IPV6_ND)
72 static struct k_work_delayable rs_timer;
73 static sys_slist_t active_rs_timers;
74 #endif
75
76 static struct {
77 struct net_if_ipv6 ipv6;
78 struct net_if *iface;
79 } ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
80 #endif /* CONFIG_NET_IPV6 */
81
82 #if defined(CONFIG_NET_NATIVE_IPV4)
83 static struct {
84 struct net_if_ipv4 ipv4;
85 struct net_if *iface;
86 } ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
87 #endif /* CONFIG_NET_IPV4 */
88
89 /* We keep track of the link callbacks in this list.
90 */
91 static sys_slist_t link_callbacks;
92
93 #if defined(CONFIG_NET_NATIVE_IPV6)
94 /* Multicast join/leave tracking.
95 */
96 static sys_slist_t mcast_monitor_callbacks;
97 #endif
98
99 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
100 #if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
101 #define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
102 #endif
103
104 K_KERNEL_STACK_DEFINE(tx_ts_stack, CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
105 K_FIFO_DEFINE(tx_ts_queue);
106
107 static struct k_thread tx_thread_ts;
108
109 /* We keep track of the timestamp callbacks in this list.
110 */
111 static sys_slist_t timestamp_callbacks;
112 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
113
114 #if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
115 #define debug_check_packet(pkt) \
116 do { \
117 NET_DBG("Processing (pkt %p, prio %d) network packet " \
118 "iface %p/%d", \
119 pkt, net_pkt_priority(pkt), \
120 net_pkt_iface(pkt), \
121 net_if_get_by_iface(net_pkt_iface(pkt))); \
122 \
123 NET_ASSERT(pkt->frags); \
124 } while (0)
125 #else
126 #define debug_check_packet(...)
127 #endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
128
z_impl_net_if_get_by_index(int index)129 struct net_if *z_impl_net_if_get_by_index(int index)
130 {
131 if (index <= 0) {
132 return NULL;
133 }
134
135 if (&_net_if_list_start[index - 1] >= _net_if_list_end) {
136 NET_DBG("Index %d is too large", index);
137 return NULL;
138 }
139
140 return &_net_if_list_start[index - 1];
141 }
142
143 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_get_by_index(int index)144 struct net_if *z_vrfy_net_if_get_by_index(int index)
145 {
146 struct net_if *iface;
147 struct z_object *zo;
148 int ret;
149
150 iface = net_if_get_by_index(index);
151 if (!iface) {
152 return NULL;
153 }
154
155 zo = z_object_find(iface);
156
157 ret = z_object_validate(zo, K_OBJ_NET_IF, _OBJ_INIT_TRUE);
158 if (ret != 0) {
159 z_dump_object_error(ret, iface, zo, K_OBJ_NET_IF);
160 return NULL;
161 }
162
163 return iface;
164 }
165
166 #include <syscalls/net_if_get_by_index_mrsh.c>
167 #endif
168
net_context_send_cb(struct net_context * context,int status)169 static inline void net_context_send_cb(struct net_context *context,
170 int status)
171 {
172 if (!context) {
173 return;
174 }
175
176 if (context->send_cb) {
177 context->send_cb(context, status, context->user_data);
178 }
179
180 if (IS_ENABLED(CONFIG_NET_UDP) &&
181 net_context_get_ip_proto(context) == IPPROTO_UDP) {
182 net_stats_update_udp_sent(net_context_get_iface(context));
183 } else if (IS_ENABLED(CONFIG_NET_TCP) &&
184 net_context_get_ip_proto(context) == IPPROTO_TCP) {
185 net_stats_update_tcp_seg_sent(net_context_get_iface(context));
186 }
187 }
188
update_txtime_stats_detail(struct net_pkt * pkt,uint32_t start_time,uint32_t stop_time)189 static void update_txtime_stats_detail(struct net_pkt *pkt,
190 uint32_t start_time, uint32_t stop_time)
191 {
192 uint32_t val, prev = start_time;
193 int i;
194
195 for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
196 if (!net_pkt_stats_tick(pkt)[i]) {
197 break;
198 }
199
200 val = net_pkt_stats_tick(pkt)[i] - prev;
201 prev = net_pkt_stats_tick(pkt)[i];
202 net_pkt_stats_tick(pkt)[i] = val;
203 }
204 }
205
net_if_tx(struct net_if * iface,struct net_pkt * pkt)206 static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
207 {
208 struct net_linkaddr ll_dst = {
209 .addr = NULL
210 };
211 struct net_linkaddr_storage ll_dst_storage;
212 struct net_context *context;
213 uint32_t create_time;
214 int status;
215
216 /* We collect send statistics for each socket priority if enabled */
217 uint8_t pkt_priority;
218
219 if (!pkt) {
220 return false;
221 }
222
223 create_time = net_pkt_create_time(pkt);
224
225 debug_check_packet(pkt);
226
227 /* If there're any link callbacks, with such a callback receiving
228 * a destination address, copy that address out of packet, just in
229 * case packet is freed before callback is called.
230 */
231 if (!sys_slist_is_empty(&link_callbacks)) {
232 if (net_linkaddr_set(&ll_dst_storage,
233 net_pkt_lladdr_dst(pkt)->addr,
234 net_pkt_lladdr_dst(pkt)->len) == 0) {
235 ll_dst.addr = ll_dst_storage.addr;
236 ll_dst.len = ll_dst_storage.len;
237 ll_dst.type = net_pkt_lladdr_dst(pkt)->type;
238 }
239 }
240
241 context = net_pkt_context(pkt);
242
243 if (net_if_flag_is_set(iface, NET_IF_UP)) {
244 if (IS_ENABLED(CONFIG_NET_TCP) &&
245 net_pkt_family(pkt) != AF_UNSPEC) {
246 net_pkt_set_queued(pkt, false);
247 }
248
249 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
250 pkt_priority = net_pkt_priority(pkt);
251
252 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
253 /* Make sure the statistics information is not
254 * lost by keeping the net_pkt over L2 send.
255 */
256 net_pkt_ref(pkt);
257 }
258 }
259
260 status = net_if_l2(iface)->send(iface, pkt);
261
262 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
263 uint32_t end_tick = k_cycle_get_32();
264
265 net_pkt_set_tx_stats_tick(pkt, end_tick);
266
267 net_stats_update_tc_tx_time(iface,
268 pkt_priority,
269 create_time,
270 end_tick);
271
272 if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
273 update_txtime_stats_detail(
274 pkt,
275 create_time,
276 end_tick);
277
278 net_stats_update_tc_tx_time_detail(
279 iface, pkt_priority,
280 net_pkt_stats_tick(pkt));
281
282 /* For TCP connections, we might keep the pkt
283 * longer so that we can resend it if needed.
284 * Because of that we need to clear the
285 * statistics here.
286 */
287 net_pkt_stats_tick_reset(pkt);
288
289 net_pkt_unref(pkt);
290 }
291 }
292
293 } else {
294 /* Drop packet if interface is not up */
295 NET_WARN("iface %p is down", iface);
296 status = -ENETDOWN;
297 }
298
299 if (status < 0) {
300 net_pkt_unref(pkt);
301 } else {
302 net_stats_update_bytes_sent(iface, status);
303 }
304
305 if (context) {
306 NET_DBG("Calling context send cb %p status %d",
307 context, status);
308
309 net_context_send_cb(context, status);
310 }
311
312 if (ll_dst.addr) {
313 net_if_call_link_cb(iface, &ll_dst, status);
314 }
315
316 return true;
317 }
318
net_process_tx_packet(struct net_pkt * pkt)319 void net_process_tx_packet(struct net_pkt *pkt)
320 {
321 struct net_if *iface;
322
323 net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
324
325 iface = net_pkt_iface(pkt);
326
327 net_if_tx(iface, pkt);
328
329 #if defined(CONFIG_NET_POWER_MANAGEMENT)
330 iface->tx_pending--;
331 #endif
332 }
333
net_if_queue_tx(struct net_if * iface,struct net_pkt * pkt)334 void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
335 {
336 uint8_t prio = net_pkt_priority(pkt);
337 uint8_t tc = net_tx_priority2tc(prio);
338
339 net_stats_update_tc_sent_pkt(iface, tc);
340 net_stats_update_tc_sent_bytes(iface, tc, net_pkt_get_len(pkt));
341 net_stats_update_tc_sent_priority(iface, tc, prio);
342
343 /* For highest priority packet, skip the TX queue and push directly to
344 * the driver. Also if there are no TX queue/thread, push the packet
345 * directly to the driver.
346 */
347 if ((IS_ENABLED(CONFIG_NET_TC_SKIP_FOR_HIGH_PRIO) &&
348 prio == NET_PRIORITY_CA) || NET_TC_TX_COUNT == 0) {
349 net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
350
351 net_if_tx(net_pkt_iface(pkt), pkt);
352 return;
353 }
354
355 #if NET_TC_TX_COUNT > 1
356 NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
357 #endif
358
359 #if defined(CONFIG_NET_POWER_MANAGEMENT)
360 iface->tx_pending++;
361 #endif
362
363 if (!net_tc_submit_to_tx_queue(tc, pkt)) {
364 #if defined(CONFIG_NET_POWER_MANAGEMENT)
365 iface->tx_pending--
366 #endif
367 ;
368 }
369 }
370
net_if_stats_reset(struct net_if * iface)371 void net_if_stats_reset(struct net_if *iface)
372 {
373 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
374 STRUCT_SECTION_FOREACH(net_if, tmp) {
375 if (iface == tmp) {
376 memset(&iface->stats, 0, sizeof(iface->stats));
377 return;
378 }
379 }
380 #else
381 ARG_UNUSED(iface);
382 #endif
383 }
384
net_if_stats_reset_all(void)385 void net_if_stats_reset_all(void)
386 {
387 #if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
388 k_mutex_lock(&lock, K_FOREVER);
389
390 STRUCT_SECTION_FOREACH(net_if, iface) {
391 memset(&iface->stats, 0, sizeof(iface->stats));
392 }
393
394 k_mutex_unlock(&lock);
395 #endif
396 }
397
init_iface(struct net_if * iface)398 static inline void init_iface(struct net_if *iface)
399 {
400 const struct net_if_api *api = net_if_get_device(iface)->api;
401
402 if (!api || !api->init) {
403 NET_ERR("Iface %p driver API init NULL", iface);
404 return;
405 }
406
407 /* By default IPv4 and IPv6 are enabled for a given network interface.
408 * These can be turned off later if needed.
409 */
410 #if defined(CONFIG_NET_NATIVE_IPV4)
411 net_if_flag_set(iface, NET_IF_IPV4);
412 #endif
413 #if defined(CONFIG_NET_NATIVE_IPV6)
414 net_if_flag_set(iface, NET_IF_IPV6);
415 #endif
416 net_virtual_init(iface);
417
418 NET_DBG("On iface %p", iface);
419
420 #ifdef CONFIG_USERSPACE
421 z_object_init(iface);
422 #endif
423
424 api->init(iface);
425 }
426
net_if_send_data(struct net_if * iface,struct net_pkt * pkt)427 enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt)
428 {
429 struct net_context *context = net_pkt_context(pkt);
430 struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
431 enum net_verdict verdict = NET_OK;
432 int status = -EIO;
433
434 k_mutex_lock(&lock, K_FOREVER);
435
436 if (!net_if_flag_is_set(iface, NET_IF_UP) ||
437 net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
438 /* Drop packet if interface is not up */
439 NET_WARN("iface %p is down", iface);
440 verdict = NET_DROP;
441 status = -ENETDOWN;
442 goto done;
443 }
444
445 if (IS_ENABLED(CONFIG_NET_OFFLOAD) && !net_if_l2(iface)) {
446 NET_WARN("no l2 for iface %p, discard pkt", iface);
447 verdict = NET_DROP;
448 goto done;
449 }
450
451 /* If the ll address is not set at all, then we must set
452 * it here.
453 * Workaround Linux bug, see:
454 * https://github.com/zephyrproject-rtos/zephyr/issues/3111
455 */
456 if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
457 !net_pkt_lladdr_src(pkt)->addr) {
458 net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_if(pkt)->addr;
459 net_pkt_lladdr_src(pkt)->len = net_pkt_lladdr_if(pkt)->len;
460 }
461
462 #if defined(CONFIG_NET_LOOPBACK)
463 /* If the packet is destined back to us, then there is no need to do
464 * additional checks, so let the packet through.
465 */
466 if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
467 goto done;
468 }
469 #endif
470
471 /* If the ll dst address is not set check if it is present in the nbr
472 * cache.
473 */
474 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
475 verdict = net_ipv6_prepare_for_send(pkt);
476 }
477
478 done:
479 /* NET_OK in which case packet has checked successfully. In this case
480 * the net_context callback is called after successful delivery in
481 * net_if_tx_thread().
482 *
483 * NET_DROP in which case we call net_context callback that will
484 * give the status to user application.
485 *
486 * NET_CONTINUE in which case the sending of the packet is delayed.
487 * This can happen for example if we need to do IPv6 ND to figure
488 * out link layer address.
489 */
490 if (verdict == NET_DROP) {
491 if (context) {
492 NET_DBG("Calling ctx send cb %p verdict %d",
493 context, verdict);
494 net_context_send_cb(context, status);
495 }
496
497 if (dst->addr) {
498 net_if_call_link_cb(iface, dst, status);
499 }
500 } else if (verdict == NET_OK) {
501 /* Packet is ready to be sent by L2, let's queue */
502 net_if_queue_tx(iface, pkt);
503 }
504
505 k_mutex_unlock(&lock);
506
507 return verdict;
508 }
509
net_if_set_link_addr_locked(struct net_if * iface,uint8_t * addr,uint8_t len,enum net_link_type type)510 int net_if_set_link_addr_locked(struct net_if *iface,
511 uint8_t *addr, uint8_t len,
512 enum net_link_type type)
513 {
514 int ret;
515
516 k_mutex_lock(&lock, K_FOREVER);
517
518 ret = net_if_set_link_addr_unlocked(iface, addr, len, type);
519
520 k_mutex_unlock(&lock);
521
522 return ret;
523 }
524
net_if_get_by_link_addr(struct net_linkaddr * ll_addr)525 struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
526 {
527 STRUCT_SECTION_FOREACH(net_if, iface) {
528 if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
529 ll_addr->len)) {
530 return iface;
531 }
532 }
533
534 return NULL;
535 }
536
net_if_lookup_by_dev(const struct device * dev)537 struct net_if *net_if_lookup_by_dev(const struct device *dev)
538 {
539 STRUCT_SECTION_FOREACH(net_if, iface) {
540 if (net_if_get_device(iface) == dev) {
541 return iface;
542 }
543 }
544
545 return NULL;
546 }
547
net_if_get_default(void)548 struct net_if *net_if_get_default(void)
549 {
550 struct net_if *iface = NULL;
551
552 if (_net_if_list_start == _net_if_list_end) {
553 return NULL;
554 }
555
556 #if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
557 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
558 #endif
559 #if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
560 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
561 #endif
562 #if defined(CONFIG_NET_DEFAULT_IF_BLUETOOTH)
563 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(BLUETOOTH));
564 #endif
565 #if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
566 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
567 #endif
568 #if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
569 iface = net_if_get_first_by_type(NULL);
570 #endif
571 #if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
572 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
573 #endif
574 #if defined(CONFIG_NET_DEFAULT_IF_CANBUS)
575 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS));
576 #endif
577 #if defined(CONFIG_NET_DEFAULT_IF_PPP)
578 iface = net_if_get_first_by_type(&NET_L2_GET_NAME(PPP));
579 #endif
580
581 return iface ? iface : _net_if_list_start;
582 }
583
net_if_get_first_by_type(const struct net_l2 * l2)584 struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
585 {
586 STRUCT_SECTION_FOREACH(net_if, iface) {
587 if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
588 !l2 && net_if_offload(iface)) {
589 return iface;
590 }
591
592 if (net_if_l2(iface) == l2) {
593 return iface;
594 }
595 }
596
597 return NULL;
598 }
599
l2_flags_get(struct net_if * iface)600 static enum net_l2_flags l2_flags_get(struct net_if *iface)
601 {
602 enum net_l2_flags flags = 0;
603
604 if (net_if_l2(iface) && net_if_l2(iface)->get_flags) {
605 flags = net_if_l2(iface)->get_flags(iface);
606 }
607
608 return flags;
609 }
610
611 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
612 /* Return how many bits are shared between two IP addresses */
get_ipaddr_diff(const uint8_t * src,const uint8_t * dst,int addr_len)613 static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_len)
614 {
615 uint8_t j, k, xor;
616 uint8_t len = 0U;
617
618 for (j = 0U; j < addr_len; j++) {
619 if (src[j] == dst[j]) {
620 len += 8U;
621 } else {
622 xor = src[j] ^ dst[j];
623 for (k = 0U; k < 8; k++) {
624 if (!(xor & 0x80)) {
625 len++;
626 xor <<= 1;
627 } else {
628 break;
629 }
630 }
631 break;
632 }
633 }
634
635 return len;
636 }
637
iface_router_lookup(struct net_if * iface,uint8_t family,void * addr)638 static struct net_if_router *iface_router_lookup(struct net_if *iface,
639 uint8_t family, void *addr)
640 {
641 struct net_if_router *router = NULL;
642 int i;
643
644 k_mutex_lock(&lock, K_FOREVER);
645
646 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
647 if (!routers[i].is_used ||
648 routers[i].address.family != family ||
649 routers[i].iface != iface) {
650 continue;
651 }
652
653 if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
654 net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
655 (struct in6_addr *)addr)) ||
656 (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
657 net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
658 (struct in_addr *)addr))) {
659 router = &routers[i];
660 goto out;
661 }
662 }
663
664 out:
665 k_mutex_unlock(&lock);
666
667 return router;
668 }
669
iface_router_notify_deletion(struct net_if_router * router,const char * delete_reason)670 static void iface_router_notify_deletion(struct net_if_router *router,
671 const char *delete_reason)
672 {
673 if (IS_ENABLED(CONFIG_NET_IPV6) &&
674 router->address.family == AF_INET6) {
675 NET_DBG("IPv6 router %s %s",
676 log_strdup(net_sprint_ipv6_addr(
677 net_if_router_ipv6(router))),
678 delete_reason);
679
680 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
681 router->iface,
682 &router->address.in6_addr,
683 sizeof(struct in6_addr));
684 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
685 router->address.family == AF_INET) {
686 NET_DBG("IPv4 router %s %s",
687 log_strdup(net_sprint_ipv4_addr(
688 net_if_router_ipv4(router))),
689 delete_reason);
690
691 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
692 router->iface,
693 &router->address.in_addr,
694 sizeof(struct in6_addr));
695 }
696 }
697
iface_router_ends(const struct net_if_router * router,uint32_t now)698 static inline int32_t iface_router_ends(const struct net_if_router *router,
699 uint32_t now)
700 {
701 uint32_t ends = router->life_start;
702
703 ends += MSEC_PER_SEC * router->lifetime;
704
705 /* Signed number of ms until router lifetime ends */
706 return (int32_t)(ends - now);
707 }
708
iface_router_update_timer(uint32_t now)709 static void iface_router_update_timer(uint32_t now)
710 {
711 struct net_if_router *router, *next;
712 uint32_t new_delay = UINT32_MAX;
713
714 k_mutex_lock(&lock, K_FOREVER);
715
716 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
717 router, next, node) {
718 int32_t ends = iface_router_ends(router, now);
719
720 if (ends <= 0) {
721 new_delay = 0;
722 break;
723 }
724
725 new_delay = MIN((uint32_t)ends, new_delay);
726 }
727
728 if (new_delay == UINT32_MAX) {
729 k_work_cancel_delayable(&router_timer);
730 } else {
731 k_work_reschedule(&router_timer, K_MSEC(new_delay));
732 }
733
734 k_mutex_unlock(&lock);
735 }
736
iface_router_expired(struct k_work * work)737 static void iface_router_expired(struct k_work *work)
738 {
739 uint32_t current_time = k_uptime_get_32();
740 struct net_if_router *router, *next;
741 sys_snode_t *prev_node = NULL;
742
743 ARG_UNUSED(work);
744
745 k_mutex_lock(&lock, K_FOREVER);
746
747 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
748 router, next, node) {
749 int32_t ends = iface_router_ends(router, current_time);
750
751 if (ends > 0) {
752 /* We have to loop on all active routers as their
753 * lifetime differ from each other.
754 */
755 prev_node = &router->node;
756 continue;
757 }
758
759 iface_router_notify_deletion(router, "has expired");
760 sys_slist_remove(&active_router_timers,
761 prev_node, &router->node);
762 router->is_used = false;
763 }
764
765 iface_router_update_timer(current_time);
766
767 k_mutex_unlock(&lock);
768 }
769
iface_router_add(struct net_if * iface,uint8_t family,void * addr,bool is_default,uint16_t lifetime)770 static struct net_if_router *iface_router_add(struct net_if *iface,
771 uint8_t family, void *addr,
772 bool is_default,
773 uint16_t lifetime)
774 {
775 struct net_if_router *router = NULL;
776 int i;
777
778 k_mutex_lock(&lock, K_FOREVER);
779
780 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
781 if (routers[i].is_used) {
782 continue;
783 }
784
785 routers[i].is_used = true;
786 routers[i].iface = iface;
787 routers[i].address.family = family;
788
789 if (lifetime) {
790 routers[i].is_default = true;
791 routers[i].is_infinite = false;
792 routers[i].lifetime = lifetime;
793 routers[i].life_start = k_uptime_get_32();
794
795 sys_slist_append(&active_router_timers,
796 &routers[i].node);
797
798 iface_router_update_timer(routers[i].life_start);
799 } else {
800 routers[i].is_default = false;
801 routers[i].is_infinite = true;
802 routers[i].lifetime = 0;
803 }
804
805 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
806 memcpy(net_if_router_ipv6(&routers[i]), addr,
807 sizeof(struct in6_addr));
808 net_mgmt_event_notify_with_info(
809 NET_EVENT_IPV6_ROUTER_ADD, iface,
810 &routers[i].address.in6_addr,
811 sizeof(struct in6_addr));
812
813 NET_DBG("interface %p router %s lifetime %u default %d "
814 "added", iface,
815 log_strdup(net_sprint_ipv6_addr(
816 (struct in6_addr *)addr)),
817 lifetime, routers[i].is_default);
818 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
819 memcpy(net_if_router_ipv4(&routers[i]), addr,
820 sizeof(struct in_addr));
821 routers[i].is_default = is_default;
822
823 net_mgmt_event_notify_with_info(
824 NET_EVENT_IPV4_ROUTER_ADD, iface,
825 &routers[i].address.in_addr,
826 sizeof(struct in_addr));
827
828 NET_DBG("interface %p router %s lifetime %u default %d "
829 "added", iface,
830 log_strdup(net_sprint_ipv4_addr(
831 (struct in_addr *)addr)),
832 lifetime, is_default);
833 }
834
835 router = &routers[i];
836 goto out;
837 }
838
839 out:
840 k_mutex_unlock(&lock);
841
842 return router;
843 }
844
iface_router_rm(struct net_if_router * router)845 static bool iface_router_rm(struct net_if_router *router)
846 {
847 bool ret = false;
848
849 k_mutex_lock(&lock, K_FOREVER);
850
851 if (!router->is_used) {
852 goto out;
853 }
854
855 iface_router_notify_deletion(router, "has been removed");
856
857 /* We recompute the timer if only the router was time limited */
858 if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
859 iface_router_update_timer(k_uptime_get_32());
860 }
861
862 router->is_used = false;
863 ret = true;
864
865 out:
866 k_mutex_unlock(&lock);
867
868 return ret;
869 }
870
net_if_router_rm(struct net_if_router * router)871 void net_if_router_rm(struct net_if_router *router)
872 {
873 k_mutex_lock(&lock, K_FOREVER);
874
875 router->is_used = false;
876
877 /* FIXME - remove timer */
878
879 k_mutex_unlock(&lock);
880 }
881
iface_router_find_default(struct net_if * iface,uint8_t family,void * addr)882 static struct net_if_router *iface_router_find_default(struct net_if *iface,
883 uint8_t family, void *addr)
884 {
885 struct net_if_router *router = NULL;
886 int i;
887
888 /* Todo: addr will need to be handled */
889 ARG_UNUSED(addr);
890
891 k_mutex_lock(&lock, K_FOREVER);
892
893 for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
894 if (!routers[i].is_used ||
895 !routers[i].is_default ||
896 routers[i].address.family != family) {
897 continue;
898 }
899
900 if (iface && iface != routers[i].iface) {
901 continue;
902 }
903
904 router = &routers[i];
905 goto out;
906 }
907
908 out:
909 k_mutex_unlock(&lock);
910
911 return router;
912 }
913
iface_router_init(void)914 static void iface_router_init(void)
915 {
916 k_work_init_delayable(&router_timer, iface_router_expired);
917 sys_slist_init(&active_router_timers);
918 }
919 #else
920 #define iface_router_init(...)
921 #endif
922
923 #if defined(CONFIG_NET_NATIVE_IPV6)
net_if_config_ipv6_get(struct net_if * iface,struct net_if_ipv6 ** ipv6)924 int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
925 {
926 int ret = 0;
927 int i;
928
929 k_mutex_lock(&lock, K_FOREVER);
930
931 if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
932 ret = -ENOTSUP;
933 goto out;
934 }
935
936 if (iface->config.ip.ipv6) {
937 if (ipv6) {
938 *ipv6 = iface->config.ip.ipv6;
939 }
940
941 goto out;
942 }
943
944 for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
945 if (ipv6_addresses[i].iface) {
946 continue;
947 }
948
949 iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
950 ipv6_addresses[i].iface = iface;
951
952 if (ipv6) {
953 *ipv6 = &ipv6_addresses[i].ipv6;
954 }
955
956 goto out;
957 }
958
959 ret = -ESRCH;
960 out:
961 k_mutex_unlock(&lock);
962
963 return ret;
964 }
965
net_if_config_ipv6_put(struct net_if * iface)966 int net_if_config_ipv6_put(struct net_if *iface)
967 {
968 int ret = 0;
969 int i;
970
971 k_mutex_lock(&lock, K_FOREVER);
972
973 if (!net_if_flag_is_set(iface, NET_IF_IPV6)) {
974 ret = -ENOTSUP;
975 goto out;
976 }
977
978 if (!iface->config.ip.ipv6) {
979 ret = -EALREADY;
980 goto out;
981 }
982
983 for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
984 if (ipv6_addresses[i].iface != iface) {
985 continue;
986 }
987
988 iface->config.ip.ipv6 = NULL;
989 ipv6_addresses[i].iface = NULL;
990
991 goto out;
992 }
993
994 ret = -ESRCH;
995 out:
996 k_mutex_unlock(&lock);
997
998 return ret;
999 }
1000
1001 #if defined(CONFIG_NET_IPV6_MLD)
join_mcast_allnodes(struct net_if * iface)1002 static void join_mcast_allnodes(struct net_if *iface)
1003 {
1004 struct in6_addr addr;
1005 int ret;
1006
1007 net_ipv6_addr_create_ll_allnodes_mcast(&addr);
1008
1009 ret = net_ipv6_mld_join(iface, &addr);
1010 if (ret < 0 && ret != -EALREADY) {
1011 NET_ERR("Cannot join all nodes address %s (%d)",
1012 log_strdup(net_sprint_ipv6_addr(&addr)), ret);
1013 }
1014 }
1015
join_mcast_solicit_node(struct net_if * iface,struct in6_addr * my_addr)1016 static void join_mcast_solicit_node(struct net_if *iface,
1017 struct in6_addr *my_addr)
1018 {
1019 struct in6_addr addr;
1020 int ret;
1021
1022 /* Join to needed multicast groups, RFC 4291 ch 2.8 */
1023 net_ipv6_addr_create_solicited_node(my_addr, &addr);
1024
1025 ret = net_ipv6_mld_join(iface, &addr);
1026 if (ret < 0 && ret != -EALREADY) {
1027 NET_ERR("Cannot join solicit node address %s (%d)",
1028 log_strdup(net_sprint_ipv6_addr(&addr)), ret);
1029 }
1030 }
1031
leave_mcast_all(struct net_if * iface)1032 static void leave_mcast_all(struct net_if *iface)
1033 {
1034 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1035 int i;
1036
1037 if (!ipv6) {
1038 return;
1039 }
1040
1041 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1042 if (!ipv6->mcast[i].is_used ||
1043 !ipv6->mcast[i].is_joined) {
1044 continue;
1045 }
1046
1047 net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
1048 }
1049 }
1050
join_mcast_nodes(struct net_if * iface,struct in6_addr * addr)1051 static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
1052 {
1053 enum net_l2_flags flags = 0;
1054
1055 flags = l2_flags_get(iface);
1056 if (flags & NET_L2_MULTICAST) {
1057 join_mcast_allnodes(iface);
1058
1059 if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
1060 join_mcast_solicit_node(iface, addr);
1061 }
1062 }
1063 }
1064 #else
1065 #define join_mcast_allnodes(...)
1066 #define join_mcast_solicit_node(...)
1067 #define leave_mcast_all(...)
1068 #define join_mcast_nodes(...)
1069 #endif /* CONFIG_NET_IPV6_MLD */
1070
1071 #if defined(CONFIG_NET_IPV6_DAD)
1072 #define DAD_TIMEOUT 100U /* ms */
1073
dad_timeout(struct k_work * work)1074 static void dad_timeout(struct k_work *work)
1075 {
1076 uint32_t current_time = k_uptime_get_32();
1077 struct net_if_addr *ifaddr, *next;
1078 int32_t delay = -1;
1079
1080 ARG_UNUSED(work);
1081
1082 k_mutex_lock(&lock, K_FOREVER);
1083
1084 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
1085 ifaddr, next, dad_node) {
1086 struct net_if_addr *tmp;
1087 struct net_if *iface;
1088
1089 /* DAD entries are ordered by construction. Stop when
1090 * we find one that hasn't expired.
1091 */
1092 delay = (int32_t)(ifaddr->dad_start +
1093 DAD_TIMEOUT - current_time);
1094 if (delay > 0) {
1095 break;
1096 }
1097
1098 /* Removing the ifaddr from active_dad_timers list */
1099 sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
1100
1101 NET_DBG("DAD succeeded for %s",
1102 log_strdup(net_sprint_ipv6_addr(
1103 &ifaddr->address.in6_addr)));
1104
1105 ifaddr->addr_state = NET_ADDR_PREFERRED;
1106
1107 /* Because we do not know the interface at this point,
1108 * we need to lookup for it.
1109 */
1110 iface = NULL;
1111 tmp = net_if_ipv6_addr_lookup(&ifaddr->address.in6_addr,
1112 &iface);
1113 if (tmp == ifaddr) {
1114 net_mgmt_event_notify_with_info(
1115 NET_EVENT_IPV6_DAD_SUCCEED,
1116 iface, &ifaddr->address.in6_addr,
1117 sizeof(struct in6_addr));
1118
1119 /* The address gets added to neighbor cache which is not
1120 * needed in this case as the address is our own one.
1121 */
1122 net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
1123 }
1124
1125 ifaddr = NULL;
1126 }
1127
1128 if ((ifaddr != NULL) && (delay > 0)) {
1129 k_work_reschedule(&dad_timer, K_MSEC((uint32_t)delay));
1130 }
1131
1132 k_mutex_unlock(&lock);
1133 }
1134
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1135 static void net_if_ipv6_start_dad(struct net_if *iface,
1136 struct net_if_addr *ifaddr)
1137 {
1138 ifaddr->addr_state = NET_ADDR_TENTATIVE;
1139
1140 if (net_if_is_up(iface)) {
1141 NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
1142 iface,
1143 log_strdup(net_sprint_ll_addr(
1144 net_if_get_link_addr(iface)->addr,
1145 net_if_get_link_addr(iface)->len)),
1146 log_strdup(net_sprint_ipv6_addr(
1147 &ifaddr->address.in6_addr)));
1148
1149 ifaddr->dad_count = 1U;
1150
1151 if (!net_ipv6_start_dad(iface, ifaddr)) {
1152 ifaddr->dad_start = k_uptime_get_32();
1153 sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
1154
1155 /* FUTURE: use schedule, not reschedule. */
1156 if (!k_work_delayable_remaining_get(&dad_timer)) {
1157 k_work_reschedule(&dad_timer,
1158 K_MSEC(DAD_TIMEOUT));
1159 }
1160 }
1161 } else {
1162 NET_DBG("Interface %p is down, starting DAD for %s later.",
1163 iface,
1164 log_strdup(net_sprint_ipv6_addr(
1165 &ifaddr->address.in6_addr)));
1166 }
1167 }
1168
net_if_start_dad(struct net_if * iface)1169 void net_if_start_dad(struct net_if *iface)
1170 {
1171 struct net_if_addr *ifaddr;
1172 struct net_if_ipv6 *ipv6;
1173 struct in6_addr addr = { };
1174 int ret, i;
1175
1176 k_mutex_lock(&lock, K_FOREVER);
1177
1178 NET_DBG("Starting DAD for iface %p", iface);
1179
1180 ret = net_if_config_ipv6_get(iface, &ipv6);
1181 if (ret < 0) {
1182 if (ret != -ENOTSUP) {
1183 NET_WARN("Cannot do DAD IPv6 config is not valid.");
1184 }
1185
1186 goto out;
1187 }
1188
1189 if (!ipv6) {
1190 goto out;
1191 }
1192
1193 net_ipv6_addr_create_iid(&addr, net_if_get_link_addr(iface));
1194
1195 ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
1196 if (!ifaddr) {
1197 NET_ERR("Cannot add %s address to interface %p, DAD fails",
1198 log_strdup(net_sprint_ipv6_addr(&addr)), iface);
1199 }
1200
1201 /* Start DAD for all the addresses that were added earlier when
1202 * the interface was down.
1203 */
1204 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1205 if (!ipv6->unicast[i].is_used ||
1206 ipv6->unicast[i].address.family != AF_INET6 ||
1207 &ipv6->unicast[i] == ifaddr) {
1208 continue;
1209 }
1210
1211 net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1212 }
1213
1214 out:
1215 k_mutex_unlock(&lock);
1216 }
1217
net_if_ipv6_dad_failed(struct net_if * iface,const struct in6_addr * addr)1218 void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
1219 {
1220 struct net_if_addr *ifaddr;
1221
1222 k_mutex_lock(&lock, K_FOREVER);
1223
1224 ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
1225 if (!ifaddr) {
1226 NET_ERR("Cannot find %s address in interface %p",
1227 log_strdup(net_sprint_ipv6_addr(addr)), iface);
1228 goto out;
1229 }
1230
1231 sys_slist_find_and_remove(&active_dad_timers, &ifaddr->dad_node);
1232
1233 net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
1234 &ifaddr->address.in6_addr,
1235 sizeof(struct in6_addr));
1236
1237 net_if_ipv6_addr_rm(iface, addr);
1238
1239 out:
1240 k_mutex_unlock(&lock);
1241 }
1242
iface_ipv6_dad_init(void)1243 static inline void iface_ipv6_dad_init(void)
1244 {
1245 k_work_init_delayable(&dad_timer, dad_timeout);
1246 sys_slist_init(&active_dad_timers);
1247 }
1248
1249 #else
net_if_ipv6_start_dad(struct net_if * iface,struct net_if_addr * ifaddr)1250 static inline void net_if_ipv6_start_dad(struct net_if *iface,
1251 struct net_if_addr *ifaddr)
1252 {
1253 ifaddr->addr_state = NET_ADDR_PREFERRED;
1254 }
1255
1256 #define iface_ipv6_dad_init(...)
1257 #endif /* CONFIG_NET_IPV6_DAD */
1258
1259 #if defined(CONFIG_NET_IPV6_ND)
1260 #define RS_TIMEOUT (1U * MSEC_PER_SEC)
1261 #define RS_COUNT 3
1262
rs_timeout(struct k_work * work)1263 static void rs_timeout(struct k_work *work)
1264 {
1265 uint32_t current_time = k_uptime_get_32();
1266 struct net_if_ipv6 *ipv6, *next;
1267 int32_t delay = -1;
1268
1269 ARG_UNUSED(work);
1270
1271 k_mutex_lock(&lock, K_FOREVER);
1272
1273 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
1274 ipv6, next, rs_node) {
1275 struct net_if *iface = NULL;
1276
1277 /* RS entries are ordered by construction. Stop when
1278 * we find one that hasn't expired.
1279 */
1280 delay = (int32_t)(ipv6->rs_start + RS_TIMEOUT - current_time);
1281 if (delay > 0) {
1282 break;
1283 }
1284
1285 /* Removing the ipv6 from active_rs_timers list */
1286 sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
1287
1288 /* Did not receive RA yet. */
1289 ipv6->rs_count++;
1290
1291 STRUCT_SECTION_FOREACH(net_if, tmp) {
1292 if (tmp->config.ip.ipv6 == ipv6) {
1293 iface = tmp;
1294 break;
1295 }
1296 }
1297
1298 if (iface) {
1299 NET_DBG("RS no respond iface %p count %d",
1300 iface, ipv6->rs_count);
1301 if (ipv6->rs_count < RS_COUNT) {
1302 net_if_start_rs(iface);
1303 }
1304 } else {
1305 NET_DBG("Interface IPv6 config %p not found", ipv6);
1306 }
1307
1308 ipv6 = NULL;
1309 }
1310
1311 if ((ipv6 != NULL) && (delay > 0)) {
1312 k_work_reschedule(&rs_timer, K_MSEC(ipv6->rs_start +
1313 RS_TIMEOUT - current_time));
1314 }
1315
1316 k_mutex_unlock(&lock);
1317 }
1318
net_if_start_rs(struct net_if * iface)1319 void net_if_start_rs(struct net_if *iface)
1320 {
1321 struct net_if_ipv6 *ipv6;
1322
1323 k_mutex_lock(&lock, K_FOREVER);
1324
1325 ipv6 = iface->config.ip.ipv6;
1326 if (!ipv6) {
1327 goto out;
1328 }
1329
1330 NET_DBG("Starting ND/RS for iface %p", iface);
1331
1332 if (!net_ipv6_start_rs(iface)) {
1333 ipv6->rs_start = k_uptime_get_32();
1334 sys_slist_append(&active_rs_timers, &ipv6->rs_node);
1335
1336 /* FUTURE: use schedule, not reschedule. */
1337 if (!k_work_delayable_remaining_get(&rs_timer)) {
1338 k_work_reschedule(&rs_timer, K_MSEC(RS_TIMEOUT));
1339 }
1340 }
1341
1342 out:
1343 k_mutex_unlock(&lock);
1344 }
1345
net_if_stop_rs(struct net_if * iface)1346 void net_if_stop_rs(struct net_if *iface)
1347 {
1348 struct net_if_ipv6 *ipv6;
1349
1350 k_mutex_lock(&lock, K_FOREVER);
1351
1352 ipv6 = iface->config.ip.ipv6;
1353 if (!ipv6) {
1354 goto out;
1355 }
1356
1357 NET_DBG("Stopping ND/RS for iface %p", iface);
1358
1359 sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
1360
1361 out:
1362 k_mutex_unlock(&lock);
1363 }
1364
iface_ipv6_nd_init(void)1365 static inline void iface_ipv6_nd_init(void)
1366 {
1367 k_work_init_delayable(&rs_timer, rs_timeout);
1368 sys_slist_init(&active_rs_timers);
1369 }
1370
1371 #else
1372 #define net_if_start_rs(...)
1373 #define net_if_stop_rs(...)
1374 #define iface_ipv6_nd_init(...)
1375 #endif /* CONFIG_NET_IPV6_ND */
1376
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)1377 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
1378 struct net_if **ret)
1379 {
1380 struct net_if_addr *ifaddr = NULL;
1381
1382 k_mutex_lock(&lock, K_FOREVER);
1383
1384 STRUCT_SECTION_FOREACH(net_if, iface) {
1385 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1386 int i;
1387
1388 if (!ipv6) {
1389 continue;
1390 }
1391
1392 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1393 if (!ipv6->unicast[i].is_used ||
1394 ipv6->unicast[i].address.family != AF_INET6) {
1395 continue;
1396 }
1397
1398 if (net_ipv6_is_prefix(
1399 addr->s6_addr,
1400 ipv6->unicast[i].address.in6_addr.s6_addr,
1401 128)) {
1402
1403 if (ret) {
1404 *ret = iface;
1405 }
1406
1407 ifaddr = &ipv6->unicast[i];
1408 goto out;
1409 }
1410 }
1411 }
1412
1413 out:
1414 k_mutex_unlock(&lock);
1415
1416 return ifaddr;
1417 }
1418
net_if_ipv6_addr_lookup_by_iface(struct net_if * iface,struct in6_addr * addr)1419 struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
1420 struct in6_addr *addr)
1421 {
1422 struct net_if_addr *ifaddr = NULL;
1423 struct net_if_ipv6 *ipv6;
1424 int i;
1425
1426 k_mutex_lock(&lock, K_FOREVER);
1427
1428 ipv6 = iface->config.ip.ipv6;
1429 if (!ipv6) {
1430 goto out;
1431 }
1432
1433 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1434 if (!ipv6->unicast[i].is_used ||
1435 ipv6->unicast[i].address.family != AF_INET6) {
1436 continue;
1437 }
1438
1439 if (net_ipv6_is_prefix(
1440 addr->s6_addr,
1441 ipv6->unicast[i].address.in6_addr.s6_addr,
1442 128)) {
1443 ifaddr = &ipv6->unicast[i];
1444 goto out;
1445 }
1446 }
1447
1448 out:
1449 k_mutex_unlock(&lock);
1450
1451 return ifaddr;
1452 }
1453
z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1454 int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
1455 {
1456 struct net_if *iface = NULL;
1457 struct net_if_addr *if_addr;
1458
1459 if_addr = net_if_ipv6_addr_lookup(addr, &iface);
1460 if (!if_addr) {
1461 return 0;
1462 }
1463
1464 return net_if_get_by_iface(iface);
1465 }
1466
1467 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_lookup_by_index(const struct in6_addr * addr)1468 static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
1469 const struct in6_addr *addr)
1470 {
1471 struct in6_addr addr_v6;
1472
1473 Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1474
1475 return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
1476 }
1477 #include <syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
1478 #endif
1479
address_expired(struct net_if_addr * ifaddr)1480 static void address_expired(struct net_if_addr *ifaddr)
1481 {
1482 NET_DBG("IPv6 address %s is deprecated",
1483 log_strdup(net_sprint_ipv6_addr(&ifaddr->address.in6_addr)));
1484
1485 ifaddr->addr_state = NET_ADDR_DEPRECATED;
1486
1487 sys_slist_find_and_remove(&active_address_lifetime_timers,
1488 &ifaddr->lifetime.node);
1489
1490 net_timeout_set(&ifaddr->lifetime, 0, 0);
1491 }
1492
address_lifetime_timeout(struct k_work * work)1493 static void address_lifetime_timeout(struct k_work *work)
1494 {
1495 uint32_t next_update = UINT32_MAX;
1496 uint32_t current_time = k_uptime_get_32();
1497 struct net_if_addr *current, *next;
1498
1499 ARG_UNUSED(work);
1500
1501 k_mutex_lock(&lock, K_FOREVER);
1502
1503 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
1504 current, next, lifetime.node) {
1505 struct net_timeout *timeout = ¤t->lifetime;
1506 uint32_t this_update = net_timeout_evaluate(timeout,
1507 current_time);
1508
1509 if (this_update == 0U) {
1510 address_expired(current);
1511 continue;
1512 }
1513
1514 if (this_update < next_update) {
1515 next_update = this_update;
1516 }
1517
1518 if (current == next) {
1519 break;
1520 }
1521 }
1522
1523 if (next_update != UINT32_MAX) {
1524 NET_DBG("Waiting for %d ms", (int32_t)next_update);
1525
1526 k_work_reschedule(&address_lifetime_timer, K_MSEC(next_update));
1527 }
1528
1529 k_mutex_unlock(&lock);
1530 }
1531
1532 #if defined(CONFIG_NET_TEST)
net_address_lifetime_timeout(void)1533 void net_address_lifetime_timeout(void)
1534 {
1535 address_lifetime_timeout(NULL);
1536 }
1537 #endif
1538
address_start_timer(struct net_if_addr * ifaddr,uint32_t vlifetime)1539 static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
1540 {
1541 sys_slist_append(&active_address_lifetime_timers,
1542 &ifaddr->lifetime.node);
1543
1544 net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
1545 k_work_reschedule(&address_lifetime_timer, K_NO_WAIT);
1546 }
1547
net_if_ipv6_addr_update_lifetime(struct net_if_addr * ifaddr,uint32_t vlifetime)1548 void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
1549 uint32_t vlifetime)
1550 {
1551 k_mutex_lock(&lock, K_FOREVER);
1552
1553 NET_DBG("Updating expire time of %s by %u secs",
1554 log_strdup(net_sprint_ipv6_addr(&ifaddr->address.in6_addr)),
1555 vlifetime);
1556
1557 ifaddr->addr_state = NET_ADDR_PREFERRED;
1558
1559 address_start_timer(ifaddr, vlifetime);
1560
1561 k_mutex_unlock(&lock);
1562 }
1563
ipv6_addr_find(struct net_if * iface,struct in6_addr * addr)1564 static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
1565 struct in6_addr *addr)
1566 {
1567 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1568 int i;
1569
1570 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1571 if (!ipv6->unicast[i].is_used) {
1572 continue;
1573 }
1574
1575 if (net_ipv6_addr_cmp(
1576 addr, &ipv6->unicast[i].address.in6_addr)) {
1577
1578 return &ipv6->unicast[i];
1579 }
1580 }
1581
1582 return NULL;
1583 }
1584
net_if_addr_init(struct net_if_addr * ifaddr,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1585 static inline void net_if_addr_init(struct net_if_addr *ifaddr,
1586 struct in6_addr *addr,
1587 enum net_addr_type addr_type,
1588 uint32_t vlifetime)
1589 {
1590 ifaddr->is_used = true;
1591 ifaddr->address.family = AF_INET6;
1592 ifaddr->addr_type = addr_type;
1593 net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
1594
1595 /* FIXME - set the mcast addr for this node */
1596
1597 if (vlifetime) {
1598 ifaddr->is_infinite = false;
1599
1600 NET_DBG("Expiring %s in %u secs",
1601 log_strdup(net_sprint_ipv6_addr(addr)),
1602 vlifetime);
1603
1604 net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
1605 } else {
1606 ifaddr->is_infinite = true;
1607 }
1608 }
1609
net_if_ipv6_addr_add(struct net_if * iface,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1610 struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
1611 struct in6_addr *addr,
1612 enum net_addr_type addr_type,
1613 uint32_t vlifetime)
1614 {
1615 struct net_if_addr *ifaddr = NULL;
1616 struct net_if_ipv6 *ipv6;
1617 int i;
1618
1619 k_mutex_lock(&lock, K_FOREVER);
1620
1621 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1622 goto out;
1623 }
1624
1625 ifaddr = ipv6_addr_find(iface, addr);
1626 if (ifaddr) {
1627 goto out;
1628 }
1629
1630 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1631 if (ipv6->unicast[i].is_used) {
1632 continue;
1633 }
1634
1635 net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
1636 vlifetime);
1637
1638 NET_DBG("[%d] interface %p address %s type %s added", i,
1639 iface, log_strdup(net_sprint_ipv6_addr(addr)),
1640 net_addr_type2str(addr_type));
1641
1642 if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
1643 /* RFC 4862 5.4.2
1644 * Before sending a Neighbor Solicitation, an interface
1645 * MUST join the all-nodes multicast address and the
1646 * solicited-node multicast address of the tentative
1647 * address.
1648 */
1649 /* The allnodes multicast group is only joined once as
1650 * net_ipv6_mcast_join() checks if we have already
1651 * joined.
1652 */
1653 join_mcast_nodes(iface,
1654 &ipv6->unicast[i].address.in6_addr);
1655
1656 net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
1657 } else {
1658 /* If DAD is not done for point-to-point links, then
1659 * the address is usable immediately.
1660 */
1661 ipv6->unicast[i].addr_state = NET_ADDR_PREFERRED;
1662 }
1663
1664 net_mgmt_event_notify_with_info(
1665 NET_EVENT_IPV6_ADDR_ADD, iface,
1666 &ipv6->unicast[i].address.in6_addr,
1667 sizeof(struct in6_addr));
1668
1669 ifaddr = &ipv6->unicast[i];
1670 goto out;
1671 }
1672
1673 out:
1674 k_mutex_unlock(&lock);
1675
1676 return ifaddr;
1677 }
1678
net_if_ipv6_addr_rm(struct net_if * iface,const struct in6_addr * addr)1679 bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
1680 {
1681 bool ret = false;
1682 struct net_if_ipv6 *ipv6;
1683 int i;
1684
1685 NET_ASSERT(addr);
1686
1687 k_mutex_lock(&lock, K_FOREVER);
1688
1689 ipv6 = iface->config.ip.ipv6;
1690 if (!ipv6) {
1691 goto out;
1692 }
1693
1694 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
1695 struct in6_addr maddr;
1696
1697 if (!ipv6->unicast[i].is_used) {
1698 continue;
1699 }
1700
1701 if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr,
1702 addr)) {
1703 continue;
1704 }
1705
1706 if (!ipv6->unicast[i].is_infinite) {
1707 sys_slist_find_and_remove(
1708 &active_address_lifetime_timers,
1709 &ipv6->unicast[i].lifetime.node);
1710
1711 if (sys_slist_is_empty(
1712 &active_address_lifetime_timers)) {
1713 k_work_cancel_delayable(
1714 &address_lifetime_timer);
1715 }
1716 }
1717
1718 ipv6->unicast[i].is_used = false;
1719
1720 net_ipv6_addr_create_solicited_node(addr, &maddr);
1721
1722 net_if_ipv6_maddr_rm(iface, &maddr);
1723
1724 NET_DBG("[%d] interface %p address %s type %s removed",
1725 i, iface, log_strdup(net_sprint_ipv6_addr(addr)),
1726 net_addr_type2str(ipv6->unicast[i].addr_type));
1727
1728 /* Using the IPv6 address pointer here can give false
1729 * info if someone adds a new IP address into this position
1730 * in the address array. This is quite unlikely thou.
1731 */
1732 net_mgmt_event_notify_with_info(
1733 NET_EVENT_IPV6_ADDR_DEL,
1734 iface,
1735 &ipv6->unicast[i].address.in6_addr,
1736 sizeof(struct in6_addr));
1737
1738 ret = true;
1739 goto out;
1740 }
1741
1742 out:
1743 k_mutex_unlock(&lock);
1744
1745 return ret;
1746 }
1747
z_impl_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1748 bool z_impl_net_if_ipv6_addr_add_by_index(int index,
1749 struct in6_addr *addr,
1750 enum net_addr_type addr_type,
1751 uint32_t vlifetime)
1752 {
1753 struct net_if *iface;
1754
1755 iface = net_if_get_by_index(index);
1756 if (!iface) {
1757 return false;
1758 }
1759
1760 return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
1761 true : false;
1762 }
1763
1764 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_add_by_index(int index,struct in6_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)1765 bool z_vrfy_net_if_ipv6_addr_add_by_index(int index,
1766 struct in6_addr *addr,
1767 enum net_addr_type addr_type,
1768 uint32_t vlifetime)
1769 {
1770 struct in6_addr addr_v6;
1771 struct net_if *iface;
1772
1773 iface = z_vrfy_net_if_get_by_index(index);
1774 if (!iface) {
1775 return false;
1776 }
1777
1778 Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1779
1780 return z_impl_net_if_ipv6_addr_add_by_index(index,
1781 &addr_v6,
1782 addr_type,
1783 vlifetime);
1784 }
1785
1786 #include <syscalls/net_if_ipv6_addr_add_by_index_mrsh.c>
1787 #endif /* CONFIG_USERSPACE */
1788
z_impl_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)1789 bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
1790 const struct in6_addr *addr)
1791 {
1792 struct net_if *iface;
1793
1794 iface = net_if_get_by_index(index);
1795 if (!iface) {
1796 return false;
1797 }
1798
1799 return net_if_ipv6_addr_rm(iface, addr);
1800 }
1801
1802 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv6_addr_rm_by_index(int index,const struct in6_addr * addr)1803 bool z_vrfy_net_if_ipv6_addr_rm_by_index(int index,
1804 const struct in6_addr *addr)
1805 {
1806 struct in6_addr addr_v6;
1807 struct net_if *iface;
1808
1809 iface = z_vrfy_net_if_get_by_index(index);
1810 if (!iface) {
1811 return false;
1812 }
1813
1814 Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
1815
1816 return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
1817 }
1818
1819 #include <syscalls/net_if_ipv6_addr_rm_by_index_mrsh.c>
1820 #endif /* CONFIG_USERSPACE */
1821
net_if_ipv6_maddr_add(struct net_if * iface,const struct in6_addr * addr)1822 struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
1823 const struct in6_addr *addr)
1824 {
1825 struct net_if_mcast_addr *ifmaddr = NULL;
1826 struct net_if_ipv6 *ipv6;
1827 int i;
1828
1829 k_mutex_lock(&lock, K_FOREVER);
1830
1831 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
1832 goto out;
1833 }
1834
1835 if (!net_ipv6_is_addr_mcast(addr)) {
1836 NET_DBG("Address %s is not a multicast address.",
1837 log_strdup(net_sprint_ipv6_addr(addr)));
1838 goto out;
1839 }
1840
1841 if (net_if_ipv6_maddr_lookup(addr, &iface)) {
1842 NET_WARN("Multicast address %s is is already registered.",
1843 log_strdup(net_sprint_ipv6_addr(addr)));
1844 goto out;
1845 }
1846
1847 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1848 if (ipv6->mcast[i].is_used) {
1849 continue;
1850 }
1851
1852 ipv6->mcast[i].is_used = true;
1853 ipv6->mcast[i].address.family = AF_INET6;
1854 memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
1855
1856 NET_DBG("[%d] interface %p address %s added", i, iface,
1857 log_strdup(net_sprint_ipv6_addr(addr)));
1858
1859 net_mgmt_event_notify_with_info(
1860 NET_EVENT_IPV6_MADDR_ADD, iface,
1861 &ipv6->mcast[i].address.in6_addr,
1862 sizeof(struct in6_addr));
1863
1864 ifmaddr = &ipv6->mcast[i];
1865 goto out;
1866 }
1867
1868 out:
1869 k_mutex_unlock(&lock);
1870
1871 return ifmaddr;
1872 }
1873
net_if_ipv6_maddr_rm(struct net_if * iface,const struct in6_addr * addr)1874 bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
1875 {
1876 bool ret = false;
1877 struct net_if_ipv6 *ipv6;
1878 int i;
1879
1880 k_mutex_lock(&lock, K_FOREVER);
1881
1882 ipv6 = iface->config.ip.ipv6;
1883 if (!ipv6) {
1884 goto out;
1885 }
1886
1887 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1888 if (!ipv6->mcast[i].is_used) {
1889 continue;
1890 }
1891
1892 if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
1893 addr)) {
1894 continue;
1895 }
1896
1897 ipv6->mcast[i].is_used = false;
1898
1899 NET_DBG("[%d] interface %p address %s removed",
1900 i, iface, log_strdup(net_sprint_ipv6_addr(addr)));
1901
1902 net_mgmt_event_notify_with_info(
1903 NET_EVENT_IPV6_MADDR_DEL, iface,
1904 &ipv6->mcast[i].address.in6_addr,
1905 sizeof(struct in6_addr));
1906
1907 ret = true;
1908 goto out;
1909 }
1910
1911 out:
1912 k_mutex_unlock(&lock);
1913
1914 return ret;
1915 }
1916
net_if_ipv6_maddr_lookup(const struct in6_addr * maddr,struct net_if ** ret)1917 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
1918 struct net_if **ret)
1919 {
1920 struct net_if_mcast_addr *ifmaddr = NULL;
1921
1922 k_mutex_lock(&lock, K_FOREVER);
1923
1924 STRUCT_SECTION_FOREACH(net_if, iface) {
1925 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
1926 int i;
1927
1928 if (ret && *ret && iface != *ret) {
1929 continue;
1930 }
1931
1932 if (!ipv6) {
1933 continue;
1934 }
1935
1936 for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1937 if (!ipv6->mcast[i].is_used ||
1938 ipv6->mcast[i].address.family != AF_INET6) {
1939 continue;
1940 }
1941
1942 if (net_ipv6_is_prefix(
1943 maddr->s6_addr,
1944 ipv6->mcast[i].address.in6_addr.s6_addr,
1945 128)) {
1946 if (ret) {
1947 *ret = iface;
1948 }
1949
1950 ifmaddr = &ipv6->mcast[i];
1951 goto out;
1952 }
1953 }
1954 }
1955
1956 out:
1957 k_mutex_unlock(&lock);
1958
1959 return ifmaddr;
1960 }
1961
net_if_ipv6_maddr_leave(struct net_if_mcast_addr * addr)1962 void net_if_ipv6_maddr_leave(struct net_if_mcast_addr *addr)
1963 {
1964 NET_ASSERT(addr);
1965
1966 k_mutex_lock(&lock, K_FOREVER);
1967
1968 addr->is_joined = false;
1969
1970 k_mutex_unlock(&lock);
1971 }
1972
net_if_ipv6_maddr_join(struct net_if_mcast_addr * addr)1973 void net_if_ipv6_maddr_join(struct net_if_mcast_addr *addr)
1974 {
1975 NET_ASSERT(addr);
1976
1977 k_mutex_lock(&lock, K_FOREVER);
1978
1979 addr->is_joined = true;
1980
1981 k_mutex_unlock(&lock);
1982 }
1983
net_if_mcast_mon_register(struct net_if_mcast_monitor * mon,struct net_if * iface,net_if_mcast_callback_t cb)1984 void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
1985 struct net_if *iface,
1986 net_if_mcast_callback_t cb)
1987 {
1988 k_mutex_lock(&lock, K_FOREVER);
1989
1990 sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
1991 sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
1992
1993 mon->iface = iface;
1994 mon->cb = cb;
1995
1996 k_mutex_unlock(&lock);
1997 }
1998
net_if_mcast_mon_unregister(struct net_if_mcast_monitor * mon)1999 void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
2000 {
2001 k_mutex_lock(&lock, K_FOREVER);
2002
2003 sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
2004
2005 k_mutex_unlock(&lock);
2006 }
2007
net_if_mcast_monitor(struct net_if * iface,const struct in6_addr * addr,bool is_joined)2008 void net_if_mcast_monitor(struct net_if *iface,
2009 const struct in6_addr *addr,
2010 bool is_joined)
2011 {
2012 struct net_if_mcast_monitor *mon, *tmp;
2013
2014 k_mutex_lock(&lock, K_FOREVER);
2015
2016 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
2017 mon, tmp, node) {
2018 if (iface == mon->iface) {
2019 mon->cb(iface, addr, is_joined);
2020 }
2021 }
2022
2023 k_mutex_unlock(&lock);
2024 }
2025
remove_prefix_addresses(struct net_if * iface,struct net_if_ipv6 * ipv6,struct in6_addr * addr,uint8_t len)2026 static void remove_prefix_addresses(struct net_if *iface,
2027 struct net_if_ipv6 *ipv6,
2028 struct in6_addr *addr,
2029 uint8_t len)
2030 {
2031 int i;
2032
2033 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2034 if (!ipv6->unicast[i].is_used ||
2035 ipv6->unicast[i].address.family != AF_INET6 ||
2036 ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
2037 continue;
2038 }
2039
2040 if (net_ipv6_is_prefix(
2041 addr->s6_addr,
2042 ipv6->unicast[i].address.in6_addr.s6_addr,
2043 len)) {
2044 net_if_ipv6_addr_rm(iface,
2045 &ipv6->unicast[i].address.in6_addr);
2046 }
2047 }
2048 }
2049
prefix_lifetime_expired(struct net_if_ipv6_prefix * ifprefix)2050 static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
2051 {
2052 struct net_if_ipv6 *ipv6;
2053
2054 NET_DBG("Prefix %s/%d expired",
2055 log_strdup(net_sprint_ipv6_addr(&ifprefix->prefix)),
2056 ifprefix->len);
2057
2058 ifprefix->is_used = false;
2059
2060 if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
2061 return;
2062 }
2063
2064 /* Remove also all auto addresses if the they have the same prefix.
2065 */
2066 remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
2067 ifprefix->len);
2068
2069 net_mgmt_event_notify_with_info(
2070 NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface,
2071 &ifprefix->prefix, sizeof(struct in6_addr));
2072 }
2073
prefix_timer_remove(struct net_if_ipv6_prefix * ifprefix)2074 static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
2075 {
2076 k_mutex_lock(&lock, K_FOREVER);
2077
2078 NET_DBG("IPv6 prefix %s/%d removed",
2079 log_strdup(net_sprint_ipv6_addr(&ifprefix->prefix)),
2080 ifprefix->len);
2081
2082 sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2083 &ifprefix->lifetime.node);
2084
2085 net_timeout_set(&ifprefix->lifetime, 0, 0);
2086
2087 k_mutex_unlock(&lock);
2088 }
2089
prefix_lifetime_timeout(struct k_work * work)2090 static void prefix_lifetime_timeout(struct k_work *work)
2091 {
2092 uint32_t next_update = UINT32_MAX;
2093 uint32_t current_time = k_uptime_get_32();
2094 struct net_if_ipv6_prefix *current, *next;
2095
2096 ARG_UNUSED(work);
2097
2098 k_mutex_lock(&lock, K_FOREVER);
2099
2100 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
2101 current, next, lifetime.node) {
2102 struct net_timeout *timeout = ¤t->lifetime;
2103 uint32_t this_update = net_timeout_evaluate(timeout,
2104 current_time);
2105
2106 if (this_update == 0U) {
2107 prefix_lifetime_expired(current);
2108 continue;
2109 }
2110
2111 if (this_update < next_update) {
2112 next_update = this_update;
2113 }
2114
2115 if (current == next) {
2116 break;
2117 }
2118 }
2119
2120 if (next_update != UINT32_MAX) {
2121 k_work_reschedule(&prefix_lifetime_timer, K_MSEC(next_update));
2122 }
2123
2124 k_mutex_unlock(&lock);
2125 }
2126
prefix_start_timer(struct net_if_ipv6_prefix * ifprefix,uint32_t lifetime)2127 static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
2128 uint32_t lifetime)
2129 {
2130 k_mutex_lock(&lock, K_FOREVER);
2131
2132 (void)sys_slist_find_and_remove(&active_prefix_lifetime_timers,
2133 &ifprefix->lifetime.node);
2134 sys_slist_append(&active_prefix_lifetime_timers,
2135 &ifprefix->lifetime.node);
2136
2137 net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
2138 k_work_reschedule(&prefix_lifetime_timer, K_NO_WAIT);
2139
2140 k_mutex_unlock(&lock);
2141 }
2142
ipv6_prefix_find(struct net_if * iface,struct in6_addr * prefix,uint8_t prefix_len)2143 static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
2144 struct in6_addr *prefix,
2145 uint8_t prefix_len)
2146 {
2147 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2148 int i;
2149
2150 if (!ipv6) {
2151 return NULL;
2152 }
2153
2154 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2155 if (!ipv6->unicast[i].is_used) {
2156 continue;
2157 }
2158
2159 if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
2160 prefix_len == ipv6->prefix[i].len) {
2161 return &ipv6->prefix[i];
2162 }
2163 }
2164
2165 return NULL;
2166 }
2167
net_if_ipv6_prefix_init(struct net_if * iface,struct net_if_ipv6_prefix * ifprefix,struct in6_addr * addr,uint8_t len,uint32_t lifetime)2168 static void net_if_ipv6_prefix_init(struct net_if *iface,
2169 struct net_if_ipv6_prefix *ifprefix,
2170 struct in6_addr *addr, uint8_t len,
2171 uint32_t lifetime)
2172 {
2173 ifprefix->is_used = true;
2174 ifprefix->len = len;
2175 ifprefix->iface = iface;
2176 net_ipaddr_copy(&ifprefix->prefix, addr);
2177
2178 if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
2179 ifprefix->is_infinite = true;
2180 } else {
2181 ifprefix->is_infinite = false;
2182 }
2183 }
2184
net_if_ipv6_prefix_add(struct net_if * iface,struct in6_addr * prefix,uint8_t len,uint32_t lifetime)2185 struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
2186 struct in6_addr *prefix,
2187 uint8_t len,
2188 uint32_t lifetime)
2189 {
2190 struct net_if_ipv6_prefix *ifprefix = NULL;
2191 struct net_if_ipv6 *ipv6;
2192 int i;
2193
2194 k_mutex_lock(&lock, K_FOREVER);
2195
2196 if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
2197 goto out;
2198 }
2199
2200 ifprefix = ipv6_prefix_find(iface, prefix, len);
2201 if (ifprefix) {
2202 goto out;
2203 }
2204
2205 if (!ipv6) {
2206 goto out;
2207 }
2208
2209 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2210 if (ipv6->prefix[i].is_used) {
2211 continue;
2212 }
2213
2214 net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
2215 len, lifetime);
2216
2217 NET_DBG("[%d] interface %p prefix %s/%d added", i, iface,
2218 log_strdup(net_sprint_ipv6_addr(prefix)), len);
2219
2220 net_mgmt_event_notify_with_info(
2221 NET_EVENT_IPV6_PREFIX_ADD, iface,
2222 &ipv6->prefix[i].prefix, sizeof(struct in6_addr));
2223
2224 ifprefix = &ipv6->prefix[i];
2225 goto out;
2226 }
2227
2228 out:
2229 k_mutex_unlock(&lock);
2230
2231 return ifprefix;
2232 }
2233
net_if_ipv6_prefix_rm(struct net_if * iface,struct in6_addr * addr,uint8_t len)2234 bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
2235 uint8_t len)
2236 {
2237 bool ret = false;
2238 struct net_if_ipv6 *ipv6;
2239 int i;
2240
2241 k_mutex_lock(&lock, K_FOREVER);
2242
2243 ipv6 = iface->config.ip.ipv6;
2244 if (!ipv6) {
2245 goto out;
2246 }
2247
2248 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2249 if (!ipv6->prefix[i].is_used) {
2250 continue;
2251 }
2252
2253 if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
2254 ipv6->prefix[i].len != len) {
2255 continue;
2256 }
2257
2258 net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
2259
2260 ipv6->prefix[i].is_used = false;
2261
2262 /* Remove also all auto addresses if the they have the same
2263 * prefix.
2264 */
2265 remove_prefix_addresses(iface, ipv6, addr, len);
2266
2267 net_mgmt_event_notify_with_info(
2268 NET_EVENT_IPV6_PREFIX_DEL, iface,
2269 &ipv6->prefix[i].prefix, sizeof(struct in6_addr));
2270
2271 ret = true;
2272 goto out;
2273 }
2274
2275 out:
2276 k_mutex_unlock(&lock);
2277
2278 return ret;
2279 }
2280
net_if_ipv6_prefix_get(struct net_if * iface,struct in6_addr * addr)2281 struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
2282 struct in6_addr *addr)
2283 {
2284 struct net_if_ipv6_prefix *prefix = NULL;
2285 struct net_if_ipv6 *ipv6;
2286 int i;
2287
2288 k_mutex_lock(&lock, K_FOREVER);
2289
2290 if (!iface) {
2291 iface = net_if_get_default();
2292 }
2293
2294 ipv6 = iface->config.ip.ipv6;
2295 if (!ipv6) {
2296 goto out;
2297 }
2298
2299 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2300 if (!ipv6->prefix[i].is_used) {
2301 continue;
2302 }
2303
2304 if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2305 addr->s6_addr,
2306 ipv6->prefix[i].len)) {
2307 if (!prefix || prefix->len > ipv6->prefix[i].len) {
2308 prefix = &ipv6->prefix[i];
2309 }
2310 }
2311 }
2312
2313 out:
2314 k_mutex_unlock(&lock);
2315
2316 return prefix;
2317 }
2318
net_if_ipv6_prefix_lookup(struct net_if * iface,struct in6_addr * addr,uint8_t len)2319 struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
2320 struct in6_addr *addr,
2321 uint8_t len)
2322 {
2323 struct net_if_ipv6_prefix *prefix = NULL;
2324 struct net_if_ipv6 *ipv6;
2325 int i;
2326
2327 k_mutex_lock(&lock, K_FOREVER);
2328
2329 ipv6 = iface->config.ip.ipv6;
2330 if (!ipv6) {
2331 goto out;
2332 }
2333
2334 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2335 if (!ipv6->prefix[i].is_used) {
2336 continue;
2337 }
2338
2339 if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2340 addr->s6_addr, len)) {
2341 prefix = &ipv6->prefix[i];
2342 goto out;
2343 }
2344 }
2345
2346 out:
2347 k_mutex_unlock(&lock);
2348
2349 return prefix;
2350 }
2351
net_if_ipv6_addr_onlink(struct net_if ** iface,struct in6_addr * addr)2352 bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
2353 {
2354 bool ret = false;
2355
2356 k_mutex_lock(&lock, K_FOREVER);
2357
2358 STRUCT_SECTION_FOREACH(net_if, tmp) {
2359 struct net_if_ipv6 *ipv6 = tmp->config.ip.ipv6;
2360 int i;
2361
2362 if (iface && *iface && *iface != tmp) {
2363 continue;
2364 }
2365
2366 if (!ipv6) {
2367 continue;
2368 }
2369
2370 for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
2371 if (ipv6->prefix[i].is_used &&
2372 net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
2373 addr->s6_addr,
2374 ipv6->prefix[i].len)) {
2375 if (iface) {
2376 *iface = tmp;
2377 }
2378
2379 ret = true;
2380 goto out;
2381 }
2382 }
2383 }
2384
2385 out:
2386 k_mutex_unlock(&lock);
2387
2388 return ret;
2389 }
2390
net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix * prefix,uint32_t lifetime)2391 void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
2392 uint32_t lifetime)
2393 {
2394 /* No need to set a timer for infinite timeout */
2395 if (lifetime == 0xffffffff) {
2396 return;
2397 }
2398
2399 NET_DBG("Prefix lifetime %u sec", lifetime);
2400
2401 prefix_start_timer(prefix, lifetime);
2402 }
2403
net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix * prefix)2404 void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
2405 {
2406 if (!prefix->is_used) {
2407 return;
2408 }
2409
2410 prefix_timer_remove(prefix);
2411 }
2412
net_if_ipv6_router_lookup(struct net_if * iface,struct in6_addr * addr)2413 struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
2414 struct in6_addr *addr)
2415 {
2416 return iface_router_lookup(iface, AF_INET6, addr);
2417 }
2418
net_if_ipv6_router_find_default(struct net_if * iface,struct in6_addr * addr)2419 struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
2420 struct in6_addr *addr)
2421 {
2422 return iface_router_find_default(iface, AF_INET6, addr);
2423 }
2424
net_if_ipv6_router_update_lifetime(struct net_if_router * router,uint16_t lifetime)2425 void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
2426 uint16_t lifetime)
2427 {
2428 NET_DBG("Updating expire time of %s by %u secs",
2429 log_strdup(net_sprint_ipv6_addr(&router->address.in6_addr)),
2430 lifetime);
2431
2432 router->life_start = k_uptime_get_32();
2433 router->lifetime = lifetime;
2434
2435 iface_router_update_timer(router->life_start);
2436 }
2437
net_if_ipv6_router_add(struct net_if * iface,struct in6_addr * addr,uint16_t lifetime)2438 struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
2439 struct in6_addr *addr,
2440 uint16_t lifetime)
2441 {
2442 return iface_router_add(iface, AF_INET6, addr, false, lifetime);
2443 }
2444
net_if_ipv6_router_rm(struct net_if_router * router)2445 bool net_if_ipv6_router_rm(struct net_if_router *router)
2446 {
2447 return iface_router_rm(router);
2448 }
2449
net_if_ipv6_get_hop_limit(struct net_if * iface)2450 uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface)
2451 {
2452 #if defined(CONFIG_NET_NATIVE_IPV6)
2453 int ret = 0;
2454
2455 k_mutex_lock(&lock, K_FOREVER);
2456
2457 if (!iface->config.ip.ipv6) {
2458 goto out;
2459 }
2460
2461 ret = iface->config.ip.ipv6->hop_limit;
2462 out:
2463 k_mutex_unlock(&lock);
2464
2465 return ret;
2466 #else
2467 ARG_UNUSED(iface);
2468
2469 return 0;
2470 #endif
2471 }
2472
net_ipv6_set_hop_limit(struct net_if * iface,uint8_t hop_limit)2473 void net_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit)
2474 {
2475 #if defined(CONFIG_NET_NATIVE_IPV6)
2476 k_mutex_lock(&lock, K_FOREVER);
2477
2478 if (!iface->config.ip.ipv6) {
2479 goto out;
2480 }
2481
2482 iface->config.ip.ipv6->hop_limit = hop_limit;
2483 out:
2484 k_mutex_unlock(&lock);
2485 #else
2486 ARG_UNUSED(iface);
2487 ARG_UNUSED(hop_limit);
2488 #endif
2489 }
2490
net_if_ipv6_get_ll(struct net_if * iface,enum net_addr_state addr_state)2491 struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
2492 enum net_addr_state addr_state)
2493 {
2494 struct in6_addr *addr = NULL;
2495 struct net_if_ipv6 *ipv6;
2496 int i;
2497
2498 k_mutex_lock(&lock, K_FOREVER);
2499
2500 ipv6 = iface->config.ip.ipv6;
2501 if (!ipv6) {
2502 goto out;
2503 }
2504
2505 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2506 if (!ipv6->unicast[i].is_used ||
2507 (addr_state != NET_ADDR_ANY_STATE &&
2508 ipv6->unicast[i].addr_state != addr_state) ||
2509 ipv6->unicast[i].address.family != AF_INET6) {
2510 continue;
2511 }
2512
2513 if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2514 addr = &ipv6->unicast[i].address.in6_addr;
2515 goto out;
2516 }
2517 }
2518
2519 out:
2520 k_mutex_unlock(&lock);
2521
2522 return addr;
2523 }
2524
net_if_ipv6_get_ll_addr(enum net_addr_state state,struct net_if ** iface)2525 struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
2526 struct net_if **iface)
2527 {
2528 struct in6_addr *addr = NULL;
2529
2530 k_mutex_lock(&lock, K_FOREVER);
2531
2532 STRUCT_SECTION_FOREACH(net_if, tmp) {
2533 addr = net_if_ipv6_get_ll(tmp, state);
2534 if (addr) {
2535 if (iface) {
2536 *iface = tmp;
2537 }
2538
2539 goto out;
2540 }
2541 }
2542
2543 out:
2544 k_mutex_unlock(&lock);
2545
2546 return addr;
2547 }
2548
check_global_addr(struct net_if * iface,enum net_addr_state state)2549 static inline struct in6_addr *check_global_addr(struct net_if *iface,
2550 enum net_addr_state state)
2551 {
2552 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2553 int i;
2554
2555 if (!ipv6) {
2556 return NULL;
2557 }
2558
2559 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2560 if (!ipv6->unicast[i].is_used ||
2561 (ipv6->unicast[i].addr_state != state) ||
2562 ipv6->unicast[i].address.family != AF_INET6) {
2563 continue;
2564 }
2565
2566 if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
2567 return &ipv6->unicast[i].address.in6_addr;
2568 }
2569 }
2570
2571 return NULL;
2572 }
2573
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2574 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2575 struct net_if **iface)
2576 {
2577 struct in6_addr *addr = NULL;
2578
2579 k_mutex_lock(&lock, K_FOREVER);
2580
2581 STRUCT_SECTION_FOREACH(net_if, tmp) {
2582 if (iface && *iface && tmp != *iface) {
2583 continue;
2584 }
2585
2586 addr = check_global_addr(tmp, state);
2587 if (addr) {
2588 if (iface) {
2589 *iface = tmp;
2590 }
2591
2592 goto out;
2593 }
2594 }
2595
2596 out:
2597 k_mutex_unlock(&lock);
2598
2599 return addr;
2600 }
2601
get_diff_ipv6(const struct in6_addr * src,const struct in6_addr * dst)2602 static uint8_t get_diff_ipv6(const struct in6_addr *src,
2603 const struct in6_addr *dst)
2604 {
2605 return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 16);
2606 }
2607
is_proper_ipv6_address(struct net_if_addr * addr)2608 static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
2609 {
2610 if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
2611 addr->address.family == AF_INET6 &&
2612 !net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
2613 return true;
2614 }
2615
2616 return false;
2617 }
2618
net_if_ipv6_get_best_match(struct net_if * iface,const struct in6_addr * dst,uint8_t * best_so_far)2619 static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
2620 const struct in6_addr *dst,
2621 uint8_t *best_so_far)
2622 {
2623 struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
2624 struct in6_addr *src = NULL;
2625 uint8_t len;
2626 int i;
2627
2628 if (!ipv6) {
2629 return NULL;
2630 }
2631
2632 for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
2633 if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
2634 continue;
2635 }
2636
2637 len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
2638 if (len >= *best_so_far) {
2639 /* Mesh local address can only be selected for the same
2640 * subnet.
2641 */
2642 if (ipv6->unicast[i].is_mesh_local && len < 64 &&
2643 !net_ipv6_is_addr_mcast_mesh(dst)) {
2644 continue;
2645 }
2646
2647 *best_so_far = len;
2648 src = &ipv6->unicast[i].address.in6_addr;
2649 }
2650 }
2651
2652 return src;
2653 }
2654
net_if_ipv6_select_src_addr(struct net_if * dst_iface,const struct in6_addr * dst)2655 const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
2656 const struct in6_addr *dst)
2657 {
2658 const struct in6_addr *src = NULL;
2659 uint8_t best_match = 0U;
2660
2661 k_mutex_lock(&lock, K_FOREVER);
2662
2663 if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast_link(dst)) {
2664 /* If caller has supplied interface, then use that */
2665 if (dst_iface) {
2666 src = net_if_ipv6_get_best_match(dst_iface, dst,
2667 &best_match);
2668 } else {
2669 STRUCT_SECTION_FOREACH(net_if, iface) {
2670 struct in6_addr *addr;
2671
2672 addr = net_if_ipv6_get_best_match(iface, dst,
2673 &best_match);
2674 if (addr) {
2675 src = addr;
2676 }
2677 }
2678 }
2679
2680 } else {
2681 if (dst_iface) {
2682 src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
2683 } else {
2684 STRUCT_SECTION_FOREACH(net_if, iface) {
2685 struct in6_addr *addr;
2686
2687 addr = net_if_ipv6_get_ll(iface,
2688 NET_ADDR_PREFERRED);
2689 if (addr) {
2690 src = addr;
2691 break;
2692 }
2693 }
2694 }
2695 }
2696
2697 if (!src) {
2698 src = net_ipv6_unspecified_address();
2699 goto out;
2700 }
2701
2702 out:
2703 k_mutex_unlock(&lock);
2704
2705 return src;
2706 }
2707
net_if_ipv6_select_src_iface(const struct in6_addr * dst)2708 struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
2709 {
2710 struct net_if *iface = NULL;
2711 const struct in6_addr *src;
2712
2713 k_mutex_lock(&lock, K_FOREVER);
2714
2715 src = net_if_ipv6_select_src_addr(NULL, dst);
2716 if (src != net_ipv6_unspecified_address()) {
2717 net_if_ipv6_addr_lookup(src, &iface);
2718 }
2719
2720 if (iface == NULL) {
2721 iface = net_if_get_default();
2722 }
2723
2724 k_mutex_unlock(&lock);
2725
2726 return iface;
2727 }
2728
net_if_ipv6_calc_reachable_time(struct net_if_ipv6 * ipv6)2729 uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
2730 {
2731 uint32_t min_reachable, max_reachable;
2732
2733 k_mutex_lock(&lock, K_FOREVER);
2734
2735 min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
2736 / MIN_RANDOM_DENOM;
2737 max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
2738 / MAX_RANDOM_DENOM;
2739
2740 k_mutex_unlock(&lock);
2741
2742 NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
2743 max_reachable);
2744
2745 return min_reachable +
2746 sys_rand32_get() % (max_reachable - min_reachable);
2747 }
2748
iface_ipv6_start(struct net_if * iface)2749 static void iface_ipv6_start(struct net_if *iface)
2750 {
2751 if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
2752 net_if_start_dad(iface);
2753 } else {
2754 struct net_if_ipv6 *ipv6 __unused = iface->config.ip.ipv6;
2755
2756 join_mcast_nodes(iface,
2757 &ipv6->mcast[0].address.in6_addr);
2758 }
2759
2760 net_if_start_rs(iface);
2761 }
2762
iface_ipv6_init(int if_count)2763 static void iface_ipv6_init(int if_count)
2764 {
2765 int i;
2766
2767 iface_ipv6_dad_init();
2768 iface_ipv6_nd_init();
2769
2770 k_work_init_delayable(&address_lifetime_timer,
2771 address_lifetime_timeout);
2772 k_work_init_delayable(&prefix_lifetime_timer, prefix_lifetime_timeout);
2773
2774 if (if_count > ARRAY_SIZE(ipv6_addresses)) {
2775 NET_WARN("You have %lu IPv6 net_if addresses but %d "
2776 "network interfaces", ARRAY_SIZE(ipv6_addresses),
2777 if_count);
2778 NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
2779 "value.");
2780 }
2781
2782 for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
2783 ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
2784 ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
2785
2786 net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
2787 }
2788 }
2789
2790 #else
2791 #define join_mcast_allnodes(...)
2792 #define join_mcast_solicit_node(...)
2793 #define leave_mcast_all(...)
2794 #define join_mcast_nodes(...)
2795 #define iface_ipv6_start(...)
2796 #define iface_ipv6_init(...)
2797
net_if_ipv6_maddr_lookup(const struct in6_addr * addr,struct net_if ** iface)2798 struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
2799 struct net_if **iface)
2800 {
2801 ARG_UNUSED(addr);
2802 ARG_UNUSED(iface);
2803
2804 return NULL;
2805 }
2806
net_if_ipv6_addr_lookup(const struct in6_addr * addr,struct net_if ** ret)2807 struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
2808 struct net_if **ret)
2809 {
2810 ARG_UNUSED(addr);
2811 ARG_UNUSED(ret);
2812
2813 return NULL;
2814 }
2815
net_if_ipv6_get_global_addr(enum net_addr_state state,struct net_if ** iface)2816 struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
2817 struct net_if **iface)
2818 {
2819 ARG_UNUSED(state);
2820 ARG_UNUSED(iface);
2821
2822 return NULL;
2823 }
2824 #endif /* CONFIG_NET_IPV6 */
2825
2826 #if defined(CONFIG_NET_NATIVE_IPV4)
net_if_config_ipv4_get(struct net_if * iface,struct net_if_ipv4 ** ipv4)2827 int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
2828 {
2829 int ret = 0;
2830 int i;
2831
2832 k_mutex_lock(&lock, K_FOREVER);
2833
2834 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
2835 ret = -ENOTSUP;
2836 goto out;
2837 }
2838
2839 if (iface->config.ip.ipv4) {
2840 if (ipv4) {
2841 *ipv4 = iface->config.ip.ipv4;
2842 }
2843
2844 goto out;
2845 }
2846
2847 for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
2848 if (ipv4_addresses[i].iface) {
2849 continue;
2850 }
2851
2852 iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
2853 ipv4_addresses[i].iface = iface;
2854
2855 if (ipv4) {
2856 *ipv4 = &ipv4_addresses[i].ipv4;
2857 }
2858
2859 goto out;
2860 }
2861
2862 ret = -ESRCH;
2863 out:
2864 k_mutex_unlock(&lock);
2865
2866 return ret;
2867 }
2868
net_if_config_ipv4_put(struct net_if * iface)2869 int net_if_config_ipv4_put(struct net_if *iface)
2870 {
2871 int ret = 0;
2872 int i;
2873
2874 k_mutex_lock(&lock, K_FOREVER);
2875
2876 if (!net_if_flag_is_set(iface, NET_IF_IPV4)) {
2877 ret = -ENOTSUP;
2878 goto out;
2879 }
2880
2881 if (!iface->config.ip.ipv4) {
2882 ret = -EALREADY;
2883 goto out;
2884 }
2885
2886 for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
2887 if (ipv4_addresses[i].iface != iface) {
2888 continue;
2889 }
2890
2891 iface->config.ip.ipv4 = NULL;
2892 ipv4_addresses[i].iface = NULL;
2893
2894 goto out;
2895 }
2896
2897 ret = -ESRCH;
2898 out:
2899 k_mutex_unlock(&lock);
2900
2901 return ret;
2902 }
2903
net_if_ipv4_get_ttl(struct net_if * iface)2904 uint8_t net_if_ipv4_get_ttl(struct net_if *iface)
2905 {
2906 #if defined(CONFIG_NET_NATIVE_IPV4)
2907 int ret = 0;
2908
2909 k_mutex_lock(&lock, K_FOREVER);
2910
2911 if (!iface->config.ip.ipv4) {
2912 goto out;
2913 }
2914
2915 ret = iface->config.ip.ipv4->ttl;
2916 out:
2917 k_mutex_unlock(&lock);
2918
2919 return ret;
2920 #else
2921 ARG_UNUSED(iface);
2922
2923 return 0;
2924 #endif
2925 }
2926
net_if_ipv4_set_ttl(struct net_if * iface,uint8_t ttl)2927 void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl)
2928 {
2929 #if defined(CONFIG_NET_NATIVE_IPV4)
2930 k_mutex_lock(&lock, K_FOREVER);
2931
2932 if (!iface->config.ip.ipv4) {
2933 goto out;
2934 }
2935
2936 iface->config.ip.ipv4->ttl = ttl;
2937 out:
2938 k_mutex_unlock(&lock);
2939 #else
2940 ARG_UNUSED(iface);
2941 ARG_UNUSED(ttl);
2942 #endif
2943 }
2944
net_if_ipv4_router_lookup(struct net_if * iface,struct in_addr * addr)2945 struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
2946 struct in_addr *addr)
2947 {
2948 return iface_router_lookup(iface, AF_INET, addr);
2949 }
2950
net_if_ipv4_router_find_default(struct net_if * iface,struct in_addr * addr)2951 struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
2952 struct in_addr *addr)
2953 {
2954 return iface_router_find_default(iface, AF_INET, addr);
2955 }
2956
net_if_ipv4_router_add(struct net_if * iface,struct in_addr * addr,bool is_default,uint16_t lifetime)2957 struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
2958 struct in_addr *addr,
2959 bool is_default,
2960 uint16_t lifetime)
2961 {
2962 return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
2963 }
2964
net_if_ipv4_router_rm(struct net_if_router * router)2965 bool net_if_ipv4_router_rm(struct net_if_router *router)
2966 {
2967 return iface_router_rm(router);
2968 }
2969
net_if_ipv4_addr_mask_cmp(struct net_if * iface,const struct in_addr * addr)2970 bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
2971 const struct in_addr *addr)
2972 {
2973 bool ret = false;
2974 struct net_if_ipv4 *ipv4;
2975 uint32_t subnet;
2976 int i;
2977
2978 k_mutex_lock(&lock, K_FOREVER);
2979
2980 ipv4 = iface->config.ip.ipv4;
2981 if (!ipv4) {
2982 goto out;
2983 }
2984
2985 subnet = UNALIGNED_GET(&addr->s_addr) & ipv4->netmask.s_addr;
2986
2987 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
2988 if (!ipv4->unicast[i].is_used ||
2989 ipv4->unicast[i].address.family != AF_INET) {
2990 continue;
2991 }
2992
2993 if ((ipv4->unicast[i].address.in_addr.s_addr &
2994 ipv4->netmask.s_addr) == subnet) {
2995 ret = true;
2996 goto out;
2997 }
2998 }
2999
3000 out:
3001 k_mutex_unlock(&lock);
3002
3003 return ret;
3004 }
3005
ipv4_is_broadcast_address(struct net_if * iface,const struct in_addr * addr)3006 static bool ipv4_is_broadcast_address(struct net_if *iface,
3007 const struct in_addr *addr)
3008 {
3009 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3010
3011 if (!ipv4) {
3012 return false;
3013 }
3014
3015 if (!net_if_ipv4_addr_mask_cmp(iface, addr)) {
3016 return false;
3017 }
3018
3019 if ((UNALIGNED_GET(&addr->s_addr) & ~ipv4->netmask.s_addr) ==
3020 ~ipv4->netmask.s_addr) {
3021 return true;
3022 }
3023
3024 return false;
3025 }
3026
net_if_ipv4_is_addr_bcast(struct net_if * iface,const struct in_addr * addr)3027 bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
3028 const struct in_addr *addr)
3029 {
3030 bool ret = false;
3031
3032 k_mutex_lock(&lock, K_FOREVER);
3033
3034 if (iface) {
3035 ret = ipv4_is_broadcast_address(iface, addr);
3036 goto out;
3037 }
3038
3039 STRUCT_SECTION_FOREACH(net_if, iface) {
3040 ret = ipv4_is_broadcast_address(iface, addr);
3041 if (ret) {
3042 goto out;
3043 }
3044 }
3045
3046 out:
3047 k_mutex_unlock(&lock);
3048
3049 return ret;
3050 }
3051
net_if_ipv4_select_src_iface(const struct in_addr * dst)3052 struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
3053 {
3054 struct net_if *selected = NULL;
3055
3056 k_mutex_lock(&lock, K_FOREVER);
3057
3058 STRUCT_SECTION_FOREACH(net_if, iface) {
3059 bool ret;
3060
3061 ret = net_if_ipv4_addr_mask_cmp(iface, dst);
3062 if (ret) {
3063 selected = iface;
3064 goto out;
3065 }
3066 }
3067
3068 if (selected == NULL) {
3069 selected = net_if_get_default();
3070 }
3071
3072 out:
3073 k_mutex_unlock(&lock);
3074
3075 return selected;
3076 }
3077
get_diff_ipv4(const struct in_addr * src,const struct in_addr * dst)3078 static uint8_t get_diff_ipv4(const struct in_addr *src,
3079 const struct in_addr *dst)
3080 {
3081 return get_ipaddr_diff((const uint8_t *)src, (const uint8_t *)dst, 4);
3082 }
3083
is_proper_ipv4_address(struct net_if_addr * addr)3084 static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
3085 {
3086 if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
3087 addr->address.family == AF_INET &&
3088 !net_ipv4_is_ll_addr(&addr->address.in_addr)) {
3089 return true;
3090 }
3091
3092 return false;
3093 }
3094
net_if_ipv4_get_best_match(struct net_if * iface,const struct in_addr * dst,uint8_t * best_so_far)3095 static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
3096 const struct in_addr *dst,
3097 uint8_t *best_so_far)
3098 {
3099 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3100 struct in_addr *src = NULL;
3101 uint8_t len;
3102 int i;
3103
3104 if (!ipv4) {
3105 return NULL;
3106 }
3107
3108 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3109 if (!is_proper_ipv4_address(&ipv4->unicast[i])) {
3110 continue;
3111 }
3112
3113 len = get_diff_ipv4(dst, &ipv4->unicast[i].address.in_addr);
3114 if (len >= *best_so_far) {
3115 *best_so_far = len;
3116 src = &ipv4->unicast[i].address.in_addr;
3117 }
3118 }
3119
3120 return src;
3121 }
3122
if_ipv4_get_addr(struct net_if * iface,enum net_addr_state addr_state,bool ll)3123 static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
3124 enum net_addr_state addr_state, bool ll)
3125 {
3126 struct in_addr *addr = NULL;
3127 struct net_if_ipv4 *ipv4;
3128 int i;
3129
3130 k_mutex_lock(&lock, K_FOREVER);
3131
3132 if (!iface) {
3133 goto out;
3134 }
3135
3136 ipv4 = iface->config.ip.ipv4;
3137 if (!ipv4) {
3138 goto out;
3139 }
3140
3141 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3142 if (!ipv4->unicast[i].is_used ||
3143 (addr_state != NET_ADDR_ANY_STATE &&
3144 ipv4->unicast[i].addr_state != addr_state) ||
3145 ipv4->unicast[i].address.family != AF_INET) {
3146 continue;
3147 }
3148
3149 if (net_ipv4_is_ll_addr(&ipv4->unicast[i].address.in_addr)) {
3150 if (!ll) {
3151 continue;
3152 }
3153 } else {
3154 if (ll) {
3155 continue;
3156 }
3157 }
3158
3159 addr = &ipv4->unicast[i].address.in_addr;
3160 goto out;
3161 }
3162
3163 out:
3164 k_mutex_unlock(&lock);
3165
3166 return addr;
3167 }
3168
net_if_ipv4_get_ll(struct net_if * iface,enum net_addr_state addr_state)3169 struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
3170 enum net_addr_state addr_state)
3171 {
3172 return if_ipv4_get_addr(iface, addr_state, true);
3173 }
3174
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3175 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3176 enum net_addr_state addr_state)
3177 {
3178 return if_ipv4_get_addr(iface, addr_state, false);
3179 }
3180
net_if_ipv4_select_src_addr(struct net_if * dst_iface,const struct in_addr * dst)3181 const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
3182 const struct in_addr *dst)
3183 {
3184 const struct in_addr *src = NULL;
3185 uint8_t best_match = 0U;
3186
3187 k_mutex_lock(&lock, K_FOREVER);
3188
3189 if (!net_ipv4_is_ll_addr(dst)) {
3190
3191 /* If caller has supplied interface, then use that */
3192 if (dst_iface) {
3193 src = net_if_ipv4_get_best_match(dst_iface, dst,
3194 &best_match);
3195 } else {
3196 STRUCT_SECTION_FOREACH(net_if, iface) {
3197 struct in_addr *addr;
3198
3199 addr = net_if_ipv4_get_best_match(iface, dst,
3200 &best_match);
3201 if (addr) {
3202 src = addr;
3203 }
3204 }
3205 }
3206
3207 } else {
3208 if (dst_iface) {
3209 src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
3210 } else {
3211 STRUCT_SECTION_FOREACH(net_if, iface) {
3212 struct in_addr *addr;
3213
3214 addr = net_if_ipv4_get_ll(iface,
3215 NET_ADDR_PREFERRED);
3216 if (addr) {
3217 src = addr;
3218 break;
3219 }
3220 }
3221 }
3222 }
3223
3224 if (!src) {
3225 src = net_if_ipv4_get_global_addr(dst_iface,
3226 NET_ADDR_PREFERRED);
3227 if (!src) {
3228 src = net_ipv4_unspecified_address();
3229 }
3230
3231 goto out;
3232 }
3233
3234 out:
3235 k_mutex_unlock(&lock);
3236
3237 return src;
3238 }
3239
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3240 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3241 struct net_if **ret)
3242 {
3243 struct net_if_addr *ifaddr = NULL;
3244
3245 k_mutex_lock(&lock, K_FOREVER);
3246
3247 STRUCT_SECTION_FOREACH(net_if, iface) {
3248 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3249 int i;
3250
3251 if (!ipv4) {
3252 continue;
3253 }
3254
3255 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3256 if (!ipv4->unicast[i].is_used ||
3257 ipv4->unicast[i].address.family != AF_INET) {
3258 continue;
3259 }
3260
3261 if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
3262 ipv4->unicast[i].address.in_addr.s_addr) {
3263
3264 if (ret) {
3265 *ret = iface;
3266 }
3267
3268 ifaddr = &ipv4->unicast[i];
3269 goto out;
3270 }
3271 }
3272 }
3273
3274 out:
3275 k_mutex_unlock(&lock);
3276
3277 return ifaddr;
3278 }
3279
z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3280 int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
3281 {
3282 struct net_if_addr *if_addr;
3283 struct net_if *iface = NULL;
3284
3285 if_addr = net_if_ipv4_addr_lookup(addr, &iface);
3286 if (!if_addr) {
3287 return 0;
3288 }
3289
3290 return net_if_get_by_iface(iface);
3291 }
3292
3293 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_lookup_by_index(const struct in_addr * addr)3294 static inline int z_vrfy_net_if_ipv4_addr_lookup_by_index(
3295 const struct in_addr *addr)
3296 {
3297 struct in_addr addr_v4;
3298
3299 Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3300
3301 return z_impl_net_if_ipv4_addr_lookup_by_index(&addr_v4);
3302 }
3303 #include <syscalls/net_if_ipv4_addr_lookup_by_index_mrsh.c>
3304 #endif
3305
net_if_ipv4_set_netmask(struct net_if * iface,const struct in_addr * netmask)3306 void net_if_ipv4_set_netmask(struct net_if *iface,
3307 const struct in_addr *netmask)
3308 {
3309 k_mutex_lock(&lock, K_FOREVER);
3310
3311 if (net_if_config_ipv4_get(iface, NULL) < 0) {
3312 goto out;
3313 }
3314
3315 if (!iface->config.ip.ipv4) {
3316 goto out;
3317 }
3318
3319 net_ipaddr_copy(&iface->config.ip.ipv4->netmask, netmask);
3320 out:
3321 k_mutex_unlock(&lock);
3322 }
3323
z_impl_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3324 bool z_impl_net_if_ipv4_set_netmask_by_index(int index,
3325 const struct in_addr *netmask)
3326 {
3327 struct net_if *iface;
3328
3329 iface = net_if_get_by_index(index);
3330 if (!iface) {
3331 return false;
3332 }
3333
3334 net_if_ipv4_set_netmask(iface, netmask);
3335
3336 return true;
3337 }
3338
3339 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_netmask_by_index(int index,const struct in_addr * netmask)3340 bool z_vrfy_net_if_ipv4_set_netmask_by_index(int index,
3341 const struct in_addr *netmask)
3342 {
3343 struct in_addr netmask_addr;
3344 struct net_if *iface;
3345
3346 iface = z_vrfy_net_if_get_by_index(index);
3347 if (!iface) {
3348 return false;
3349 }
3350
3351 Z_OOPS(z_user_from_copy(&netmask_addr, (void *)netmask,
3352 sizeof(netmask_addr)));
3353
3354 return z_impl_net_if_ipv4_set_netmask_by_index(index, &netmask_addr);
3355 }
3356
3357 #include <syscalls/net_if_ipv4_set_netmask_by_index_mrsh.c>
3358 #endif /* CONFIG_USERSPACE */
3359
net_if_ipv4_set_gw(struct net_if * iface,const struct in_addr * gw)3360 void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw)
3361 {
3362 k_mutex_lock(&lock, K_FOREVER);
3363
3364 if (net_if_config_ipv4_get(iface, NULL) < 0) {
3365 goto out;
3366 }
3367
3368 if (!iface->config.ip.ipv4) {
3369 goto out;
3370 }
3371
3372 net_ipaddr_copy(&iface->config.ip.ipv4->gw, gw);
3373 out:
3374 k_mutex_unlock(&lock);
3375 }
3376
z_impl_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)3377 bool z_impl_net_if_ipv4_set_gw_by_index(int index,
3378 const struct in_addr *gw)
3379 {
3380 struct net_if *iface;
3381
3382 iface = net_if_get_by_index(index);
3383 if (!iface) {
3384 return false;
3385 }
3386
3387 net_if_ipv4_set_gw(iface, gw);
3388
3389 return true;
3390 }
3391
3392 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_set_gw_by_index(int index,const struct in_addr * gw)3393 bool z_vrfy_net_if_ipv4_set_gw_by_index(int index,
3394 const struct in_addr *gw)
3395 {
3396 struct in_addr gw_addr;
3397 struct net_if *iface;
3398
3399 iface = z_vrfy_net_if_get_by_index(index);
3400 if (!iface) {
3401 return false;
3402 }
3403
3404 Z_OOPS(z_user_from_copy(&gw_addr, (void *)gw, sizeof(gw_addr)));
3405
3406 return z_impl_net_if_ipv4_set_gw_by_index(index, &gw_addr);
3407 }
3408
3409 #include <syscalls/net_if_ipv4_set_gw_by_index_mrsh.c>
3410 #endif /* CONFIG_USERSPACE */
3411
ipv4_addr_find(struct net_if * iface,struct in_addr * addr)3412 static struct net_if_addr *ipv4_addr_find(struct net_if *iface,
3413 struct in_addr *addr)
3414 {
3415 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3416 int i;
3417
3418 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3419 if (!ipv4->unicast[i].is_used) {
3420 continue;
3421 }
3422
3423 if (net_ipv4_addr_cmp(addr,
3424 &ipv4->unicast[i].address.in_addr)) {
3425 return &ipv4->unicast[i];
3426 }
3427 }
3428
3429 return NULL;
3430 }
3431
net_if_ipv4_addr_add(struct net_if * iface,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3432 struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
3433 struct in_addr *addr,
3434 enum net_addr_type addr_type,
3435 uint32_t vlifetime)
3436 {
3437 struct net_if_addr *ifaddr = NULL;
3438 struct net_if_ipv4 *ipv4;
3439 int i;
3440
3441 k_mutex_lock(&lock, K_FOREVER);
3442
3443 if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
3444 goto out;
3445 }
3446
3447 ifaddr = ipv4_addr_find(iface, addr);
3448 if (ifaddr) {
3449 /* TODO: should set addr_type/vlifetime */
3450 goto out;
3451 }
3452
3453 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3454 struct net_if_addr *cur = &ipv4->unicast[i];
3455
3456 if (addr_type == NET_ADDR_DHCP
3457 && cur->addr_type == NET_ADDR_OVERRIDABLE) {
3458 ifaddr = cur;
3459 break;
3460 }
3461
3462 if (!ipv4->unicast[i].is_used) {
3463 ifaddr = cur;
3464 break;
3465 }
3466 }
3467
3468 if (ifaddr) {
3469 ifaddr->is_used = true;
3470 ifaddr->address.family = AF_INET;
3471 ifaddr->address.in_addr.s4_addr32[0] =
3472 addr->s4_addr32[0];
3473 ifaddr->addr_type = addr_type;
3474
3475 /* Caller has to take care of timers and their expiry */
3476 if (vlifetime) {
3477 ifaddr->is_infinite = false;
3478 } else {
3479 ifaddr->is_infinite = true;
3480 }
3481
3482 /**
3483 * TODO: Handle properly PREFERRED/DEPRECATED state when
3484 * address in use, expired and renewal state.
3485 */
3486 ifaddr->addr_state = NET_ADDR_PREFERRED;
3487
3488 NET_DBG("[%d] interface %p address %s type %s added", i, iface,
3489 log_strdup(net_sprint_ipv4_addr(addr)),
3490 net_addr_type2str(addr_type));
3491
3492 net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface,
3493 &ifaddr->address.in_addr,
3494 sizeof(struct in_addr));
3495 goto out;
3496 }
3497
3498 out:
3499 k_mutex_unlock(&lock);
3500
3501 return ifaddr;
3502 }
3503
net_if_ipv4_addr_rm(struct net_if * iface,const struct in_addr * addr)3504 bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr)
3505 {
3506 struct net_if_ipv4 *ipv4;
3507 bool ret = false;
3508 int i;
3509
3510 k_mutex_lock(&lock, K_FOREVER);
3511
3512 ipv4 = iface->config.ip.ipv4;
3513 if (!ipv4) {
3514 goto out;
3515 }
3516
3517 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
3518 if (!ipv4->unicast[i].is_used) {
3519 continue;
3520 }
3521
3522 if (!net_ipv4_addr_cmp(&ipv4->unicast[i].address.in_addr,
3523 addr)) {
3524 continue;
3525 }
3526
3527 ipv4->unicast[i].is_used = false;
3528
3529 NET_DBG("[%d] interface %p address %s removed",
3530 i, iface, log_strdup(net_sprint_ipv4_addr(addr)));
3531
3532 net_mgmt_event_notify_with_info(
3533 NET_EVENT_IPV4_ADDR_DEL, iface,
3534 &ipv4->unicast[i].address.in_addr,
3535 sizeof(struct in_addr));
3536
3537 ret = true;
3538 goto out;
3539 }
3540
3541 out:
3542 k_mutex_unlock(&lock);
3543
3544 return ret;
3545 }
3546
z_impl_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3547 bool z_impl_net_if_ipv4_addr_add_by_index(int index,
3548 struct in_addr *addr,
3549 enum net_addr_type addr_type,
3550 uint32_t vlifetime)
3551 {
3552 struct net_if *iface;
3553 struct net_if_addr *if_addr;
3554
3555 iface = net_if_get_by_index(index);
3556 if (!iface) {
3557 return false;
3558 }
3559
3560 if_addr = net_if_ipv4_addr_add(iface, addr, addr_type, vlifetime);
3561 return if_addr ? true : false;
3562 }
3563
3564 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_add_by_index(int index,struct in_addr * addr,enum net_addr_type addr_type,uint32_t vlifetime)3565 bool z_vrfy_net_if_ipv4_addr_add_by_index(int index,
3566 struct in_addr *addr,
3567 enum net_addr_type addr_type,
3568 uint32_t vlifetime)
3569 {
3570 struct in_addr addr_v4;
3571 struct net_if *iface;
3572
3573 iface = z_vrfy_net_if_get_by_index(index);
3574 if (!iface) {
3575 return false;
3576 }
3577
3578 Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3579
3580 return z_impl_net_if_ipv4_addr_add_by_index(index,
3581 &addr_v4,
3582 addr_type,
3583 vlifetime);
3584 }
3585
3586 #include <syscalls/net_if_ipv4_addr_add_by_index_mrsh.c>
3587 #endif /* CONFIG_USERSPACE */
3588
z_impl_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)3589 bool z_impl_net_if_ipv4_addr_rm_by_index(int index,
3590 const struct in_addr *addr)
3591 {
3592 struct net_if *iface;
3593
3594 iface = net_if_get_by_index(index);
3595 if (!iface) {
3596 return false;
3597 }
3598
3599 return net_if_ipv4_addr_rm(iface, addr);
3600 }
3601
3602 #ifdef CONFIG_USERSPACE
z_vrfy_net_if_ipv4_addr_rm_by_index(int index,const struct in_addr * addr)3603 bool z_vrfy_net_if_ipv4_addr_rm_by_index(int index,
3604 const struct in_addr *addr)
3605 {
3606 struct in_addr addr_v4;
3607 struct net_if *iface;
3608
3609 iface = z_vrfy_net_if_get_by_index(index);
3610 if (!iface) {
3611 return false;
3612 }
3613
3614 Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
3615
3616 return (uint32_t)z_impl_net_if_ipv4_addr_rm_by_index(index, &addr_v4);
3617 }
3618
3619 #include <syscalls/net_if_ipv4_addr_rm_by_index_mrsh.c>
3620 #endif /* CONFIG_USERSPACE */
3621
ipv4_maddr_find(struct net_if * iface,bool is_used,const struct in_addr * addr)3622 static struct net_if_mcast_addr *ipv4_maddr_find(struct net_if *iface,
3623 bool is_used,
3624 const struct in_addr *addr)
3625 {
3626 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
3627 int i;
3628
3629 if (!ipv4) {
3630 return NULL;
3631 }
3632
3633 for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
3634 if ((is_used && !ipv4->mcast[i].is_used) ||
3635 (!is_used && ipv4->mcast[i].is_used)) {
3636 continue;
3637 }
3638
3639 if (addr) {
3640 if (!net_ipv4_addr_cmp(&ipv4->mcast[i].address.in_addr,
3641 addr)) {
3642 continue;
3643 }
3644 }
3645
3646 return &ipv4->mcast[i];
3647 }
3648
3649 return NULL;
3650 }
net_if_ipv4_maddr_add(struct net_if * iface,const struct in_addr * addr)3651 struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
3652 const struct in_addr *addr)
3653 {
3654 struct net_if_mcast_addr *maddr = NULL;
3655
3656 k_mutex_lock(&lock, K_FOREVER);
3657
3658 if (net_if_config_ipv4_get(iface, NULL) < 0) {
3659 goto out;
3660 }
3661
3662 if (!net_ipv4_is_addr_mcast(addr)) {
3663 NET_DBG("Address %s is not a multicast address.",
3664 log_strdup(net_sprint_ipv4_addr(addr)));
3665 goto out;
3666 }
3667
3668 maddr = ipv4_maddr_find(iface, false, NULL);
3669 if (maddr) {
3670 maddr->is_used = true;
3671 maddr->address.family = AF_INET;
3672 maddr->address.in_addr.s4_addr32[0] = addr->s4_addr32[0];
3673
3674 NET_DBG("interface %p address %s added", iface,
3675 log_strdup(net_sprint_ipv4_addr(addr)));
3676 }
3677
3678 out:
3679 k_mutex_unlock(&lock);
3680
3681 return maddr;
3682 }
3683
net_if_ipv4_maddr_rm(struct net_if * iface,const struct in_addr * addr)3684 bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr)
3685 {
3686 struct net_if_mcast_addr *maddr;
3687 bool ret = false;
3688
3689 k_mutex_lock(&lock, K_FOREVER);
3690
3691 maddr = ipv4_maddr_find(iface, true, addr);
3692 if (maddr) {
3693 maddr->is_used = false;
3694
3695 NET_DBG("interface %p address %s removed",
3696 iface, log_strdup(net_sprint_ipv4_addr(addr)));
3697
3698 ret = true;
3699 }
3700
3701 k_mutex_unlock(&lock);
3702
3703 return ret;
3704 }
3705
net_if_ipv4_maddr_lookup(const struct in_addr * maddr,struct net_if ** ret)3706 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *maddr,
3707 struct net_if **ret)
3708 {
3709 struct net_if_mcast_addr *addr = NULL;
3710
3711 k_mutex_lock(&lock, K_FOREVER);
3712
3713 STRUCT_SECTION_FOREACH(net_if, iface) {
3714 if (ret && *ret && iface != *ret) {
3715 continue;
3716 }
3717
3718 addr = ipv4_maddr_find(iface, true, maddr);
3719 if (addr) {
3720 if (ret) {
3721 *ret = iface;
3722 }
3723
3724 goto out;
3725 }
3726 }
3727
3728 out:
3729 k_mutex_unlock(&lock);
3730
3731 return addr;
3732 }
3733
net_if_ipv4_maddr_leave(struct net_if_mcast_addr * addr)3734 void net_if_ipv4_maddr_leave(struct net_if_mcast_addr *addr)
3735 {
3736 NET_ASSERT(addr);
3737
3738 k_mutex_lock(&lock, K_FOREVER);
3739
3740 addr->is_joined = false;
3741
3742 k_mutex_unlock(&lock);
3743 }
3744
net_if_ipv4_maddr_join(struct net_if_mcast_addr * addr)3745 void net_if_ipv4_maddr_join(struct net_if_mcast_addr *addr)
3746 {
3747 NET_ASSERT(addr);
3748
3749 k_mutex_lock(&lock, K_FOREVER);
3750
3751 addr->is_joined = true;
3752
3753 k_mutex_unlock(&lock);
3754 }
3755
iface_ipv4_init(int if_count)3756 static void iface_ipv4_init(int if_count)
3757 {
3758 int i;
3759
3760 if (if_count > ARRAY_SIZE(ipv4_addresses)) {
3761 NET_WARN("You have %lu IPv4 net_if addresses but %d "
3762 "network interfaces", ARRAY_SIZE(ipv4_addresses),
3763 if_count);
3764 NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV4_COUNT "
3765 "value.");
3766 }
3767
3768 for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
3769 ipv4_addresses[i].ipv4.ttl = CONFIG_NET_INITIAL_TTL;
3770 }
3771 }
3772
3773 #else
3774 #define iface_ipv4_init(...)
3775
net_if_ipv4_maddr_lookup(const struct in_addr * addr,struct net_if ** iface)3776 struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
3777 struct net_if **iface)
3778 {
3779 ARG_UNUSED(addr);
3780 ARG_UNUSED(iface);
3781
3782 return NULL;
3783 }
3784
net_if_ipv4_addr_lookup(const struct in_addr * addr,struct net_if ** ret)3785 struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
3786 struct net_if **ret)
3787 {
3788 ARG_UNUSED(addr);
3789 ARG_UNUSED(ret);
3790
3791 return NULL;
3792 }
3793
net_if_ipv4_get_global_addr(struct net_if * iface,enum net_addr_state addr_state)3794 struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
3795 enum net_addr_state addr_state)
3796 {
3797 ARG_UNUSED(addr_state);
3798 ARG_UNUSED(iface);
3799
3800 return NULL;
3801 }
3802 #endif /* CONFIG_NET_IPV4 */
3803
net_if_select_src_iface(const struct sockaddr * dst)3804 struct net_if *net_if_select_src_iface(const struct sockaddr *dst)
3805 {
3806 struct net_if *iface = NULL;
3807
3808 if (!dst) {
3809 goto out;
3810 }
3811
3812 k_mutex_lock(&lock, K_FOREVER);
3813
3814 if (IS_ENABLED(CONFIG_NET_IPV6) && dst->sa_family == AF_INET6) {
3815 iface = net_if_ipv6_select_src_iface(&net_sin6(dst)->sin6_addr);
3816 goto out;
3817 }
3818
3819 if (IS_ENABLED(CONFIG_NET_IPV4) && dst->sa_family == AF_INET) {
3820 iface = net_if_ipv4_select_src_iface(&net_sin(dst)->sin_addr);
3821 goto out;
3822 }
3823
3824 out:
3825 k_mutex_unlock(&lock);
3826
3827 if (iface == NULL) {
3828 iface = net_if_get_default();
3829 }
3830
3831 return iface;
3832 }
3833
net_if_recv_data(struct net_if * iface,struct net_pkt * pkt)3834 enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt)
3835 {
3836 if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) &&
3837 net_if_is_promisc(iface)) {
3838 /* If the packet is not for us and the promiscuous
3839 * mode is enabled, then increase the ref count so
3840 * that net_core.c:processing_data() will not free it.
3841 * The promiscuous mode handler must free the packet
3842 * after it has finished working with it.
3843 *
3844 * If packet is for us, then NET_CONTINUE is returned.
3845 * In this case we must clone the packet, as the packet
3846 * could be manipulated by other part of the stack.
3847 */
3848 enum net_verdict verdict;
3849 struct net_pkt *new_pkt;
3850
3851 /* This protects pkt so that it will not be freed by L2 recv()
3852 */
3853 net_pkt_ref(pkt);
3854
3855 verdict = net_if_l2(iface)->recv(iface, pkt);
3856 if (verdict == NET_CONTINUE) {
3857 new_pkt = net_pkt_clone(pkt, K_NO_WAIT);
3858 } else {
3859 new_pkt = net_pkt_ref(pkt);
3860 }
3861
3862 /* L2 has modified the buffer starting point, it is easier
3863 * to re-initialize the cursor rather than updating it.
3864 */
3865 net_pkt_cursor_init(new_pkt);
3866
3867 if (net_promisc_mode_input(new_pkt) == NET_DROP) {
3868 net_pkt_unref(new_pkt);
3869 }
3870
3871 net_pkt_unref(pkt);
3872
3873 return verdict;
3874 }
3875
3876 return net_if_l2(iface)->recv(iface, pkt);
3877 }
3878
net_if_register_link_cb(struct net_if_link_cb * link,net_if_link_callback_t cb)3879 void net_if_register_link_cb(struct net_if_link_cb *link,
3880 net_if_link_callback_t cb)
3881 {
3882 k_mutex_lock(&lock, K_FOREVER);
3883
3884 sys_slist_find_and_remove(&link_callbacks, &link->node);
3885 sys_slist_prepend(&link_callbacks, &link->node);
3886
3887 link->cb = cb;
3888
3889 k_mutex_unlock(&lock);
3890 }
3891
net_if_unregister_link_cb(struct net_if_link_cb * link)3892 void net_if_unregister_link_cb(struct net_if_link_cb *link)
3893 {
3894 k_mutex_lock(&lock, K_FOREVER);
3895
3896 sys_slist_find_and_remove(&link_callbacks, &link->node);
3897
3898 k_mutex_unlock(&lock);
3899 }
3900
net_if_call_link_cb(struct net_if * iface,struct net_linkaddr * lladdr,int status)3901 void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
3902 int status)
3903 {
3904 struct net_if_link_cb *link, *tmp;
3905
3906 k_mutex_lock(&lock, K_FOREVER);
3907
3908 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&link_callbacks, link, tmp, node) {
3909 link->cb(iface, lladdr, status);
3910 }
3911
3912 k_mutex_unlock(&lock);
3913 }
3914
need_calc_checksum(struct net_if * iface,enum ethernet_hw_caps caps)3915 static bool need_calc_checksum(struct net_if *iface, enum ethernet_hw_caps caps)
3916 {
3917 #if defined(CONFIG_NET_L2_ETHERNET)
3918 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
3919 return true;
3920 }
3921
3922 return !(net_eth_get_hw_capabilities(iface) & caps);
3923 #else
3924 ARG_UNUSED(iface);
3925 ARG_UNUSED(caps);
3926
3927 return true;
3928 #endif
3929 }
3930
net_if_need_calc_tx_checksum(struct net_if * iface)3931 bool net_if_need_calc_tx_checksum(struct net_if *iface)
3932 {
3933 return need_calc_checksum(iface, ETHERNET_HW_TX_CHKSUM_OFFLOAD);
3934 }
3935
net_if_need_calc_rx_checksum(struct net_if * iface)3936 bool net_if_need_calc_rx_checksum(struct net_if *iface)
3937 {
3938 return need_calc_checksum(iface, ETHERNET_HW_RX_CHKSUM_OFFLOAD);
3939 }
3940
net_if_get_by_iface(struct net_if * iface)3941 int net_if_get_by_iface(struct net_if *iface)
3942 {
3943 if (!(iface >= _net_if_list_start && iface < _net_if_list_end)) {
3944 return -1;
3945 }
3946
3947 return (iface - _net_if_list_start) + 1;
3948 }
3949
net_if_foreach(net_if_cb_t cb,void * user_data)3950 void net_if_foreach(net_if_cb_t cb, void *user_data)
3951 {
3952 STRUCT_SECTION_FOREACH(net_if, iface) {
3953 cb(iface, user_data);
3954 }
3955 }
3956
net_if_up(struct net_if * iface)3957 int net_if_up(struct net_if *iface)
3958 {
3959 int status = 0;
3960
3961 NET_DBG("iface %p", iface);
3962
3963 k_mutex_lock(&lock, K_FOREVER);
3964
3965 if (net_if_flag_is_set(iface, NET_IF_UP)) {
3966 status = -EALREADY;
3967 goto out;
3968 }
3969
3970 if ((IS_ENABLED(CONFIG_NET_OFFLOAD) &&
3971 net_if_is_ip_offloaded(iface)) ||
3972 (IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD) &&
3973 net_if_is_socket_offloaded(iface))) {
3974 net_if_flag_set(iface, NET_IF_UP);
3975 goto notify;
3976 }
3977
3978 /* If the L2 does not support enable just set the flag */
3979 if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
3980 goto done;
3981 }
3982
3983 /* Notify L2 to enable the interface */
3984 status = net_if_l2(iface)->enable(iface, true);
3985 if (status < 0) {
3986 goto out;
3987 }
3988
3989 done:
3990 /* In many places it's assumed that link address was set with
3991 * net_if_set_link_addr(). Better check that now.
3992 */
3993 #if defined(CONFIG_NET_L2_CANBUS_RAW)
3994 if (IS_ENABLED(CONFIG_NET_SOCKETS_CAN) &&
3995 (net_if_l2(iface) == &NET_L2_GET_NAME(CANBUS_RAW))) {
3996 /* CAN does not require link address. */
3997 } else
3998 #endif /* CONFIG_NET_L2_CANBUS_RAW */
3999 {
4000 NET_ASSERT(net_if_get_link_addr(iface)->addr != NULL);
4001 }
4002
4003 net_if_flag_set(iface, NET_IF_UP);
4004
4005 /* If the interface is only having point-to-point traffic then we do
4006 * not need to run DAD etc for it.
4007 */
4008 if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
4009 iface_ipv6_start(iface);
4010
4011 net_ipv4_autoconf_start(iface);
4012 }
4013
4014 notify:
4015 net_mgmt_event_notify(NET_EVENT_IF_UP, iface);
4016
4017 out:
4018 k_mutex_unlock(&lock);
4019
4020 return status;
4021 }
4022
net_if_carrier_down(struct net_if * iface)4023 void net_if_carrier_down(struct net_if *iface)
4024 {
4025 NET_DBG("iface %p", iface);
4026
4027 k_mutex_lock(&lock, K_FOREVER);
4028
4029 net_if_flag_clear(iface, NET_IF_UP);
4030
4031 net_ipv4_autoconf_reset(iface);
4032
4033 net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
4034
4035 k_mutex_unlock(&lock);
4036 }
4037
net_if_down(struct net_if * iface)4038 int net_if_down(struct net_if *iface)
4039 {
4040 int status = 0;
4041
4042 NET_DBG("iface %p", iface);
4043
4044 k_mutex_lock(&lock, K_FOREVER);
4045
4046 leave_mcast_all(iface);
4047
4048 if (net_if_is_ip_offloaded(iface)) {
4049 goto done;
4050 }
4051
4052 /* If the L2 does not support enable just clear the flag */
4053 if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
4054 goto done;
4055 }
4056
4057 /* Notify L2 to disable the interface */
4058 status = net_if_l2(iface)->enable(iface, false);
4059 if (status < 0) {
4060 goto out;
4061 }
4062
4063 net_virtual_disable(iface);
4064
4065 done:
4066 net_if_flag_clear(iface, NET_IF_UP);
4067
4068 net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
4069
4070 out:
4071 k_mutex_unlock(&lock);
4072
4073 return status;
4074 }
4075
promisc_mode_set(struct net_if * iface,bool enable)4076 static int promisc_mode_set(struct net_if *iface, bool enable)
4077 {
4078 enum net_l2_flags l2_flags = 0;
4079
4080 NET_ASSERT(iface);
4081
4082 l2_flags = l2_flags_get(iface);
4083 if (!(l2_flags & NET_L2_PROMISC_MODE)) {
4084 return -ENOTSUP;
4085 }
4086
4087 #if defined(CONFIG_NET_L2_ETHERNET)
4088 if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4089 int ret = net_eth_promisc_mode(iface, enable);
4090
4091 if (ret < 0) {
4092 return ret;
4093 }
4094 }
4095 #else
4096 ARG_UNUSED(enable);
4097
4098 return -ENOTSUP;
4099 #endif
4100
4101 return 0;
4102 }
4103
net_if_set_promisc(struct net_if * iface)4104 int net_if_set_promisc(struct net_if *iface)
4105 {
4106 int ret;
4107
4108 k_mutex_lock(&lock, K_FOREVER);
4109
4110 ret = promisc_mode_set(iface, true);
4111 if (ret < 0) {
4112 goto out;
4113 }
4114
4115 ret = net_if_flag_test_and_set(iface, NET_IF_PROMISC);
4116 if (ret) {
4117 ret = -EALREADY;
4118 goto out;
4119 }
4120
4121 out:
4122 k_mutex_unlock(&lock);
4123
4124 return ret;
4125 }
4126
net_if_unset_promisc(struct net_if * iface)4127 void net_if_unset_promisc(struct net_if *iface)
4128 {
4129 int ret;
4130
4131 k_mutex_lock(&lock, K_FOREVER);
4132
4133 ret = promisc_mode_set(iface, false);
4134 if (ret < 0) {
4135 goto out;
4136 }
4137
4138 net_if_flag_clear(iface, NET_IF_PROMISC);
4139
4140 out:
4141 k_mutex_unlock(&lock);
4142 }
4143
net_if_is_promisc(struct net_if * iface)4144 bool net_if_is_promisc(struct net_if *iface)
4145 {
4146 NET_ASSERT(iface);
4147
4148 return net_if_flag_is_set(iface, NET_IF_PROMISC);
4149 }
4150
4151 #ifdef CONFIG_NET_POWER_MANAGEMENT
4152
net_if_suspend(struct net_if * iface)4153 int net_if_suspend(struct net_if *iface)
4154 {
4155 int ret = 0;
4156
4157 k_mutex_lock(&lock, K_FOREVER);
4158
4159 if (net_if_are_pending_tx_packets(iface)) {
4160 ret = -EBUSY;
4161 goto out;
4162 }
4163
4164 if (net_if_flag_test_and_set(iface, NET_IF_SUSPENDED)) {
4165 ret = -EALREADY;
4166 goto out;
4167 }
4168
4169 net_stats_add_suspend_start_time(iface, k_cycle_get_32());
4170
4171 out:
4172 k_mutex_unlock(&lock);
4173
4174 return ret;
4175 }
4176
net_if_resume(struct net_if * iface)4177 int net_if_resume(struct net_if *iface)
4178 {
4179 int ret = 0;
4180
4181 k_mutex_lock(&lock, K_FOREVER);
4182
4183 if (!net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
4184 ret = -EALREADY;
4185 goto out;
4186 }
4187
4188 net_if_flag_clear(iface, NET_IF_SUSPENDED);
4189
4190 net_stats_add_suspend_end_time(iface, k_cycle_get_32());
4191
4192 out:
4193 k_mutex_unlock(&lock);
4194
4195 return ret;
4196 }
4197
net_if_is_suspended(struct net_if * iface)4198 bool net_if_is_suspended(struct net_if *iface)
4199 {
4200 return net_if_flag_is_set(iface, NET_IF_SUSPENDED);
4201 }
4202
4203 #endif /* CONFIG_NET_POWER_MANAGEMENT */
4204
4205 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
net_tx_ts_thread(void)4206 static void net_tx_ts_thread(void)
4207 {
4208 struct net_pkt *pkt;
4209
4210 NET_DBG("Starting TX timestamp callback thread");
4211
4212 while (1) {
4213 pkt = k_fifo_get(&tx_ts_queue, K_FOREVER);
4214 if (pkt) {
4215 net_if_call_timestamp_cb(pkt);
4216 }
4217 }
4218 }
4219
net_if_register_timestamp_cb(struct net_if_timestamp_cb * handle,struct net_pkt * pkt,struct net_if * iface,net_if_timestamp_callback_t cb)4220 void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
4221 struct net_pkt *pkt,
4222 struct net_if *iface,
4223 net_if_timestamp_callback_t cb)
4224 {
4225 k_mutex_lock(&lock, K_FOREVER);
4226
4227 sys_slist_find_and_remove(×tamp_callbacks, &handle->node);
4228 sys_slist_prepend(×tamp_callbacks, &handle->node);
4229
4230 handle->iface = iface;
4231 handle->cb = cb;
4232 handle->pkt = pkt;
4233
4234 k_mutex_unlock(&lock);
4235 }
4236
net_if_unregister_timestamp_cb(struct net_if_timestamp_cb * handle)4237 void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle)
4238 {
4239 k_mutex_lock(&lock, K_FOREVER);
4240
4241 sys_slist_find_and_remove(×tamp_callbacks, &handle->node);
4242
4243 k_mutex_unlock(&lock);
4244 }
4245
net_if_call_timestamp_cb(struct net_pkt * pkt)4246 void net_if_call_timestamp_cb(struct net_pkt *pkt)
4247 {
4248 sys_snode_t *sn, *sns;
4249
4250 k_mutex_lock(&lock, K_FOREVER);
4251
4252 SYS_SLIST_FOR_EACH_NODE_SAFE(×tamp_callbacks, sn, sns) {
4253 struct net_if_timestamp_cb *handle =
4254 CONTAINER_OF(sn, struct net_if_timestamp_cb, node);
4255
4256 if (((handle->iface == NULL) ||
4257 (handle->iface == net_pkt_iface(pkt))) &&
4258 (handle->pkt == NULL || handle->pkt == pkt)) {
4259 handle->cb(pkt);
4260 }
4261 }
4262
4263 k_mutex_unlock(&lock);
4264 }
4265
net_if_add_tx_timestamp(struct net_pkt * pkt)4266 void net_if_add_tx_timestamp(struct net_pkt *pkt)
4267 {
4268 k_fifo_put(&tx_ts_queue, pkt);
4269 }
4270 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
4271
net_if_init(void)4272 void net_if_init(void)
4273 {
4274 int if_count = 0;
4275
4276 NET_DBG("");
4277
4278 k_mutex_lock(&lock, K_FOREVER);
4279
4280 net_tc_tx_init();
4281
4282 STRUCT_SECTION_FOREACH(net_if, iface) {
4283 init_iface(iface);
4284 if_count++;
4285 }
4286
4287 if (if_count == 0) {
4288 NET_ERR("There is no network interface to work with!");
4289 goto out;
4290 }
4291
4292 iface_ipv6_init(if_count);
4293 iface_ipv4_init(if_count);
4294 iface_router_init();
4295
4296 #if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
4297 k_thread_create(&tx_thread_ts, tx_ts_stack,
4298 K_KERNEL_STACK_SIZEOF(tx_ts_stack),
4299 (k_thread_entry_t)net_tx_ts_thread,
4300 NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT);
4301 k_thread_name_set(&tx_thread_ts, "tx_tstamp");
4302 #endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
4303
4304 #if defined(CONFIG_NET_VLAN)
4305 /* Make sure that we do not have too many network interfaces
4306 * compared to the number of VLAN interfaces.
4307 */
4308 if_count = 0;
4309
4310 STRUCT_SECTION_FOREACH(net_if, iface) {
4311 if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
4312 if_count++;
4313 }
4314 }
4315
4316 if (if_count > CONFIG_NET_VLAN_COUNT) {
4317 NET_WARN("You have configured only %d VLAN interfaces"
4318 " but you have %d network interfaces.",
4319 CONFIG_NET_VLAN_COUNT, if_count);
4320 }
4321 #endif
4322
4323 out:
4324 k_mutex_unlock(&lock);
4325 }
4326
net_if_post_init(void)4327 void net_if_post_init(void)
4328 {
4329 NET_DBG("");
4330
4331 /* After TX is running, attempt to bring the interface up */
4332 STRUCT_SECTION_FOREACH(net_if, iface) {
4333 if (!net_if_flag_is_set(iface, NET_IF_NO_AUTO_START)) {
4334 net_if_up(iface);
4335 }
4336 }
4337 }
4338