1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_DECLARE(conn_mgr, CONFIG_NET_CONNECTION_MANAGER_LOG_LEVEL);
9 
10 #include <errno.h>
11 #include <zephyr/net/net_if.h>
12 #include <zephyr/net/net_mgmt.h>
13 #include "conn_mgr_private.h"
14 
15 extern uint16_t iface_states[CONN_MGR_IFACE_MAX];
16 
17 static struct net_mgmt_event_callback iface_events_cb;
18 static struct net_mgmt_event_callback ipv6_events_cb;
19 static struct net_mgmt_event_callback ipv4_events_cb;
20 
conn_mgr_iface_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)21 static void conn_mgr_iface_events_handler(struct net_mgmt_event_callback *cb,
22 					  uint32_t mgmt_event,
23 					  struct net_if *iface)
24 {
25 	int idx;
26 
27 	NET_DBG("%s event 0x%x received on iface %d (%p)", "Iface", mgmt_event,
28 		net_if_get_by_iface(iface), iface);
29 
30 	if ((mgmt_event & CONN_MGR_IFACE_EVENTS_MASK) != mgmt_event) {
31 		return;
32 	}
33 
34 	idx = net_if_get_by_iface(iface) - 1;
35 
36 	NET_DBG("Iface index %u", idx);
37 
38 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
39 
40 	switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
41 	case NET_EVENT_IF_CMD_DOWN:
42 		iface_states[idx] &= ~CONN_MGR_IF_UP;
43 		break;
44 	case NET_EVENT_IF_CMD_UP:
45 		iface_states[idx] |= CONN_MGR_IF_UP;
46 		break;
47 	default:
48 		goto done;
49 	}
50 	k_sem_give(&conn_mgr_mon_updated);
51 
52 done:
53 	k_mutex_unlock(&conn_mgr_mon_lock);
54 }
55 
56 #if defined(CONFIG_NET_IPV6)
conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)57 static void conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback *cb,
58 					 uint32_t mgmt_event,
59 					 struct net_if *iface)
60 {
61 	int idx;
62 
63 	NET_DBG("%s event 0x%x received on iface %d (%p)", "IPv6", mgmt_event,
64 		net_if_get_by_iface(iface), iface);
65 
66 	if ((mgmt_event & CONN_MGR_IPV6_EVENTS_MASK) != mgmt_event) {
67 		return;
68 	}
69 
70 	idx = net_if_get_by_iface(iface) - 1;
71 
72 	NET_DBG("Iface index %u", idx);
73 
74 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
75 
76 	switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
77 	case NET_EVENT_IPV6_CMD_DAD_SUCCEED:
78 		__fallthrough;
79 	case NET_EVENT_IPV6_CMD_ADDR_ADD:
80 		if (net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
81 			iface_states[idx] |= CONN_MGR_IF_IPV6_SET;
82 		}
83 		break;
84 	case NET_EVENT_IPV6_CMD_DAD_FAILED:
85 		__fallthrough;
86 	case NET_EVENT_IPV6_CMD_ADDR_DEL:
87 		if (!net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
88 			iface_states[idx] &= ~CONN_MGR_IF_IPV6_SET;
89 		}
90 
91 		break;
92 	default:
93 		goto done;
94 	}
95 
96 	k_sem_give(&conn_mgr_mon_updated);
97 
98 done:
99 	k_mutex_unlock(&conn_mgr_mon_lock);
100 }
101 #else
102 static inline
conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)103 void conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback *cb,
104 				  uint32_t mgmt_event,
105 				  struct net_if *iface)
106 {
107 	ARG_UNUSED(cb);
108 	ARG_UNUSED(mgmt_event);
109 	ARG_UNUSED(iface);
110 }
111 #endif /* CONFIG_NET_IPV6 */
112 
113 #if defined(CONFIG_NET_IPV4)
conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)114 static void conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback *cb,
115 					 uint32_t mgmt_event,
116 					 struct net_if *iface)
117 {
118 	int idx;
119 
120 	NET_DBG("%s event 0x%x received on iface %d (%p)", "IPv4", mgmt_event,
121 		net_if_get_by_iface(iface), iface);
122 
123 	if ((mgmt_event & CONN_MGR_IPV4_EVENTS_MASK) != mgmt_event) {
124 		return;
125 	}
126 
127 	idx = net_if_get_by_iface(iface) - 1;
128 
129 	NET_DBG("Iface index %u", idx);
130 
131 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
132 
133 	switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
134 	case NET_EVENT_IPV4_CMD_ACD_SUCCEED:
135 		__fallthrough;
136 	case NET_EVENT_IPV4_CMD_ADDR_ADD:
137 		if (net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
138 			iface_states[idx] |= CONN_MGR_IF_IPV4_SET;
139 		}
140 
141 		break;
142 	case NET_EVENT_IPV4_CMD_ACD_FAILED:
143 		__fallthrough;
144 	case NET_EVENT_IPV4_CMD_ADDR_DEL:
145 		if (!net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
146 			iface_states[idx] &= ~CONN_MGR_IF_IPV4_SET;
147 		}
148 
149 		break;
150 	default:
151 		goto done;
152 	}
153 
154 	k_sem_give(&conn_mgr_mon_updated);
155 
156 done:
157 	k_mutex_unlock(&conn_mgr_mon_lock);
158 }
159 #else
160 static inline
conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)161 void conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback *cb,
162 				  uint32_t mgmt_event,
163 				  struct net_if *iface)
164 {
165 	ARG_UNUSED(cb);
166 	ARG_UNUSED(mgmt_event);
167 	ARG_UNUSED(iface);
168 }
169 #endif /* CONFIG_NET_IPV4 */
170 
conn_mgr_init_events_handler(void)171 void conn_mgr_init_events_handler(void)
172 {
173 	net_mgmt_init_event_callback(&iface_events_cb,
174 				     conn_mgr_iface_events_handler,
175 				     CONN_MGR_IFACE_EVENTS_MASK);
176 	net_mgmt_add_event_callback(&iface_events_cb);
177 
178 	if (IS_ENABLED(CONFIG_NET_IPV6)) {
179 		net_mgmt_init_event_callback(&ipv6_events_cb,
180 					     conn_mgr_ipv6_events_handler,
181 					     CONN_MGR_IPV6_EVENTS_MASK);
182 		net_mgmt_add_event_callback(&ipv6_events_cb);
183 	}
184 
185 	if (IS_ENABLED(CONFIG_NET_IPV4)) {
186 		net_mgmt_init_event_callback(&ipv4_events_cb,
187 					     conn_mgr_ipv4_events_handler,
188 					     CONN_MGR_IPV4_EVENTS_MASK);
189 		net_mgmt_add_event_callback(&ipv4_events_cb);
190 	}
191 }
192