1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(conn_mgr, CONFIG_NET_CONNECTION_MANAGER_LOG_LEVEL);
9 
10 #include <zephyr/init.h>
11 #include <zephyr/kernel.h>
12 #include <errno.h>
13 #include <zephyr/net/net_core.h>
14 #include <zephyr/net/net_if.h>
15 #include <zephyr/net/net_mgmt.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/net/conn_mgr_connectivity.h>
18 #include "conn_mgr_private.h"
19 
20 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
21 #define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
22 #else
23 #define THREAD_PRIORITY K_PRIO_PREEMPT(7)
24 #endif
25 
26 static K_THREAD_STACK_DEFINE(conn_mgr_mon_stack,
27 			     CONFIG_NET_CONNECTION_MANAGER_MONITOR_STACK_SIZE);
28 static struct k_thread conn_mgr_mon_thread;
29 
30 /* Internal state array tracking readiness, flags, and other state information for all available
31  * ifaces. Note that indexing starts at 0, whereas Zephyr iface indices start at 1.
32  * conn_mgr_mon_get_if_by_index and conn_mgr_get_index_for_if are used to go back and forth between
33  * iface_states indices and Zephyr iface pointers.
34  */
35 uint16_t iface_states[CONN_MGR_IFACE_MAX];
36 
37 /* Tracks the total number of L4-ready ifaces */
38 static uint16_t ready_count;
39 
40 /* Tracks the last ifaces to change state in each respective direction */
41 static struct net_if *last_iface_down;
42 static struct net_if *last_iface_up;
43 
44 /* Used to signal when modifications have been made that need to be responded to */
45 K_SEM_DEFINE(conn_mgr_mon_updated, 1, 1);
46 
47 /* Used to protect conn_mgr_monitor state */
48 K_MUTEX_DEFINE(conn_mgr_mon_lock);
49 
50 /**
51  * @brief Retrieves pointer to an iface by the index that corresponds to it in iface_states
52  *
53  * @param index - The index in iface_states to find the corresponding iface for.
54  * @return net_if* - The corresponding iface.
55  */
conn_mgr_mon_get_if_by_index(int index)56 static struct net_if *conn_mgr_mon_get_if_by_index(int index)
57 {
58 	return net_if_get_by_index(index + 1);
59 }
60 
61 /**
62  * @brief Gets the index in iface_states for the state corresponding to a provided iface.
63  *
64  * @param iface - iface to find the index of.
65  * @return int - The index found.
66  */
conn_mgr_get_index_for_if(struct net_if * iface)67 static int conn_mgr_get_index_for_if(struct net_if *iface)
68 {
69 	return net_if_get_by_iface(iface) - 1;
70 }
71 
72 /**
73  * @brief Marks an iface as ready or unready and updates all associated state tracking.
74  *
75  * @param idx - index (in iface_states) of the iface to mark ready or unready
76  * @param readiness - true if the iface should be considered ready, otherwise false
77  */
conn_mgr_mon_set_ready(int idx,bool readiness)78 static void conn_mgr_mon_set_ready(int idx, bool readiness)
79 {
80 	/* Clear and then update the L4-readiness bit */
81 	iface_states[idx] &= ~CONN_MGR_IF_READY;
82 
83 	if (readiness) {
84 		iface_states[idx] |= CONN_MGR_IF_READY;
85 
86 		ready_count += 1;
87 		last_iface_up = conn_mgr_mon_get_if_by_index(idx);
88 	} else {
89 		ready_count -= 1;
90 		last_iface_down = conn_mgr_mon_get_if_by_index(idx);
91 	}
92 }
93 
conn_mgr_mon_handle_update(void)94 static void conn_mgr_mon_handle_update(void)
95 {
96 	int idx;
97 	int original_ready_count;
98 	bool is_ip_ready;
99 	bool is_ipv6_ready;
100 	bool is_ipv4_ready;
101 	bool is_l4_ready;
102 	bool is_oper_up;
103 	bool was_l4_ready;
104 	bool is_ignored;
105 
106 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
107 
108 	original_ready_count = ready_count;
109 	for (idx = 0; idx < ARRAY_SIZE(iface_states); idx++) {
110 		if (iface_states[idx] == 0) {
111 			/* This interface is not used */
112 			continue;
113 		}
114 
115 		if (!(iface_states[idx] & CONN_MGR_IF_CHANGED)) {
116 			/* No changes on this iface */
117 			continue;
118 		}
119 
120 		/* Clear the state-change flag */
121 		iface_states[idx] &= ~CONN_MGR_IF_CHANGED;
122 
123 		/* Detect whether the iface is currently or was L4 ready */
124 		was_l4_ready	= iface_states[idx] & CONN_MGR_IF_READY;
125 		is_ipv6_ready	= iface_states[idx] & CONN_MGR_IF_IPV6_SET;
126 		is_ipv4_ready	= iface_states[idx] & CONN_MGR_IF_IPV4_SET;
127 		is_oper_up	= iface_states[idx] & CONN_MGR_IF_UP;
128 		is_ignored	= iface_states[idx] & CONN_MGR_IF_IGNORED;
129 		is_ip_ready	= is_ipv6_ready || is_ipv4_ready;
130 		is_l4_ready	= is_oper_up && is_ip_ready && !is_ignored;
131 
132 		/* Respond to changes to iface readiness */
133 		if (was_l4_ready != is_l4_ready) {
134 			/* Track the iface readiness change */
135 			conn_mgr_mon_set_ready(idx, is_l4_ready);
136 		}
137 	}
138 
139 	/* If the total number of ready ifaces changed, possibly send an event */
140 	if (ready_count != original_ready_count) {
141 		if (ready_count == 0) {
142 			/* We just lost connectivity */
143 			net_mgmt_event_notify(NET_EVENT_L4_DISCONNECTED, last_iface_down);
144 		} else if (original_ready_count == 0) {
145 			/* We just gained connectivity */
146 			net_mgmt_event_notify(NET_EVENT_L4_CONNECTED, last_iface_up);
147 		}
148 	}
149 
150 	k_mutex_unlock(&conn_mgr_mon_lock);
151 }
152 
153 /**
154  * @brief Initialize the internal state flags for the given iface using its current status
155  *
156  * @param iface - iface to initialize from.
157  */
conn_mgr_mon_initial_state(struct net_if * iface)158 static void conn_mgr_mon_initial_state(struct net_if *iface)
159 {
160 	int idx = net_if_get_by_iface(iface) - 1;
161 
162 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
163 
164 	if (net_if_is_up(iface)) {
165 		NET_DBG("Iface %p UP", iface);
166 		iface_states[idx] |= CONN_MGR_IF_UP;
167 	}
168 
169 	if (IS_ENABLED(CONFIG_NET_NATIVE_IPV6)) {
170 		if (net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
171 			NET_DBG("IPv6 addr set");
172 			iface_states[idx] |= CONN_MGR_IF_IPV6_SET;
173 		}
174 	}
175 
176 	if (IS_ENABLED(CONFIG_NET_NATIVE_IPV4)) {
177 		if (net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
178 			NET_DBG("IPv4 addr set");
179 			iface_states[idx] |= CONN_MGR_IF_IPV4_SET;
180 		}
181 
182 	}
183 
184 	iface_states[idx] |= CONN_MGR_IF_CHANGED;
185 
186 	k_mutex_unlock(&conn_mgr_mon_lock);
187 }
188 
conn_mgr_mon_init_cb(struct net_if * iface,void * user_data)189 static void conn_mgr_mon_init_cb(struct net_if *iface, void *user_data)
190 {
191 	ARG_UNUSED(user_data);
192 
193 	conn_mgr_mon_initial_state(iface);
194 }
195 
conn_mgr_mon_thread_fn(void)196 static void conn_mgr_mon_thread_fn(void)
197 {
198 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
199 
200 	conn_mgr_conn_init();
201 
202 	conn_mgr_init_events_handler();
203 
204 	net_if_foreach(conn_mgr_mon_init_cb, NULL);
205 
206 	k_mutex_unlock(&conn_mgr_mon_lock);
207 
208 	NET_DBG("Connection Manager started");
209 
210 	while (true) {
211 		/* Wait for changes */
212 		k_sem_take(&conn_mgr_mon_updated, K_FOREVER);
213 
214 		/* Respond to changes */
215 		conn_mgr_mon_handle_update();
216 	}
217 }
218 
conn_mgr_mon_resend_status(void)219 void conn_mgr_mon_resend_status(void)
220 {
221 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
222 
223 	if (ready_count == 0) {
224 		net_mgmt_event_notify(NET_EVENT_L4_DISCONNECTED, last_iface_down);
225 	} else {
226 		net_mgmt_event_notify(NET_EVENT_L4_CONNECTED, last_iface_up);
227 	}
228 
229 	k_mutex_unlock(&conn_mgr_mon_lock);
230 }
231 
conn_mgr_ignore_iface(struct net_if * iface)232 void conn_mgr_ignore_iface(struct net_if *iface)
233 {
234 	int idx = conn_mgr_get_index_for_if(iface);
235 
236 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
237 
238 	if (!(iface_states[idx] & CONN_MGR_IF_IGNORED)) {
239 		/* Set ignored flag and mark state as changed */
240 		iface_states[idx] |= CONN_MGR_IF_IGNORED;
241 		iface_states[idx] |= CONN_MGR_IF_CHANGED;
242 		k_sem_give(&conn_mgr_mon_updated);
243 	}
244 
245 	k_mutex_unlock(&conn_mgr_mon_lock);
246 }
247 
conn_mgr_watch_iface(struct net_if * iface)248 void conn_mgr_watch_iface(struct net_if *iface)
249 {
250 	int idx = conn_mgr_get_index_for_if(iface);
251 
252 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
253 
254 	if (iface_states[idx] & CONN_MGR_IF_IGNORED) {
255 		/* Clear ignored flag and mark state as changed */
256 		iface_states[idx] &= ~CONN_MGR_IF_IGNORED;
257 		iface_states[idx] |= CONN_MGR_IF_CHANGED;
258 		k_sem_give(&conn_mgr_mon_updated);
259 	}
260 
261 	k_mutex_unlock(&conn_mgr_mon_lock);
262 }
263 
conn_mgr_is_iface_ignored(struct net_if * iface)264 bool conn_mgr_is_iface_ignored(struct net_if *iface)
265 {
266 	int idx = conn_mgr_get_index_for_if(iface);
267 
268 	bool ret = false;
269 
270 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
271 
272 	ret = iface_states[idx] & CONN_MGR_IF_IGNORED;
273 
274 	k_mutex_unlock(&conn_mgr_mon_lock);
275 
276 	return ret;
277 }
278 
279 /**
280  * @brief Check whether a provided iface uses the provided L2.
281  *
282  * @param iface - iface to check.
283  * @param l2 - L2 to check. NULL will match offloaded ifaces.
284  * @retval true if the iface uses the provided L2.
285  * @retval false otherwise.
286  */
iface_uses_l2(struct net_if * iface,const struct net_l2 * l2)287 static bool iface_uses_l2(struct net_if *iface, const struct net_l2 *l2)
288 {
289 	return	(!l2 && net_if_offload(iface)) ||
290 		(net_if_l2(iface) == l2);
291 }
292 
conn_mgr_ignore_l2(const struct net_l2 * l2)293 void conn_mgr_ignore_l2(const struct net_l2 *l2)
294 {
295 	/* conn_mgr_ignore_iface already locks the mutex, but we lock it here too
296 	 * so that all matching ifaces are updated simultaneously.
297 	 */
298 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
299 
300 	STRUCT_SECTION_FOREACH(net_if, iface) {
301 		if (iface_uses_l2(iface, l2)) {
302 			conn_mgr_ignore_iface(iface);
303 		}
304 	}
305 
306 	k_mutex_unlock(&conn_mgr_mon_lock);
307 }
308 
conn_mgr_watch_l2(const struct net_l2 * l2)309 void conn_mgr_watch_l2(const struct net_l2 *l2)
310 {
311 	/* conn_mgr_watch_iface already locks the mutex, but we lock it here too
312 	 * so that all matching ifaces are updated simultaneously.
313 	 */
314 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
315 
316 	STRUCT_SECTION_FOREACH(net_if, iface) {
317 		if (iface_uses_l2(iface, l2)) {
318 			conn_mgr_watch_iface(iface);
319 		}
320 	}
321 
322 	k_mutex_unlock(&conn_mgr_mon_lock);
323 }
324 
conn_mgr_mon_init(void)325 static int conn_mgr_mon_init(void)
326 {
327 	int i;
328 
329 	for (i = 0; i < ARRAY_SIZE(iface_states); i++) {
330 		iface_states[i] = 0;
331 	}
332 
333 	k_thread_create(&conn_mgr_mon_thread, conn_mgr_mon_stack,
334 			CONFIG_NET_CONNECTION_MANAGER_MONITOR_STACK_SIZE,
335 			(k_thread_entry_t)conn_mgr_mon_thread_fn,
336 			NULL, NULL, NULL, THREAD_PRIORITY, 0, K_NO_WAIT);
337 
338 	return 0;
339 }
340 
341 SYS_INIT(conn_mgr_mon_init, APPLICATION, CONFIG_NET_CONNECTION_MANAGER_MONITOR_PRIORITY);
342