1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(conn_mgr, CONFIG_NET_CONNECTION_MANAGER_LOG_LEVEL);
9 
10 #include <zephyr/init.h>
11 #include <zephyr/kernel.h>
12 #include <errno.h>
13 #include <zephyr/net/net_core.h>
14 #include <zephyr/net/net_if.h>
15 #include <zephyr/net/net_mgmt.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/net/conn_mgr_connectivity.h>
18 #include "conn_mgr_private.h"
19 
20 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
21 #define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
22 #else
23 #define THREAD_PRIORITY K_PRIO_PREEMPT(7)
24 #endif
25 
26 static K_THREAD_STACK_DEFINE(conn_mgr_mon_stack,
27 			     CONFIG_NET_CONNECTION_MANAGER_MONITOR_STACK_SIZE);
28 static struct k_thread conn_mgr_mon_thread;
29 
30 /* Internal state array tracking readiness, flags, and other state information for all available
31  * ifaces. Note that indexing starts at 0, whereas Zephyr iface indices start at 1.
32  * conn_mgr_mon_get_if_by_index and conn_mgr_get_index_for_if are used to go back and forth between
33  * iface_states indices and Zephyr iface pointers.
34  */
35 uint16_t iface_states[CONN_MGR_IFACE_MAX];
36 
37 /* Tracks the most recent total quantity of L4-ready ifaces (any, IPv4, IPv6) */
38 static uint16_t last_ready_count;
39 static uint16_t last_ready_count_ipv4;
40 static uint16_t last_ready_count_ipv6;
41 
42 /* Tracks the last ifaces to cause a major state change (any, IPv4, IPv6) */
43 static struct net_if *last_blame;
44 static struct net_if *last_blame_ipv4;
45 static struct net_if *last_blame_ipv6;
46 
47 /* Used to signal when modifications have been made that need to be responded to */
48 K_SEM_DEFINE(conn_mgr_mon_updated, 1, 1);
49 
50 /* Used to protect conn_mgr_monitor state */
51 K_MUTEX_DEFINE(conn_mgr_mon_lock);
52 
53 /**
54  * @brief Retrieves pointer to an iface by the index that corresponds to it in iface_states
55  *
56  * @param index - The index in iface_states to find the corresponding iface for.
57  * @return net_if* - The corresponding iface.
58  */
conn_mgr_mon_get_if_by_index(int index)59 static struct net_if *conn_mgr_mon_get_if_by_index(int index)
60 {
61 	return net_if_get_by_index(index + 1);
62 }
63 
64 /**
65  * @brief Gets the index in iface_states for the state corresponding to a provided iface.
66  *
67  * @param iface - iface to find the index of.
68  * @return int - The index found.
69  */
conn_mgr_get_index_for_if(struct net_if * iface)70 static int conn_mgr_get_index_for_if(struct net_if *iface)
71 {
72 	return net_if_get_by_iface(iface) - 1;
73 }
74 
75 /**
76  * @brief Conveniently update iface readiness state
77  *
78  * @param idx - index (in iface_states) of the iface to mark ready or unready
79  * @param ready - true if the iface should be considered ready, otherwise false
80  * @param ready_ipv4 - true if the iface is ready with IPv4, otherwise false
81  * @param ready_ipv6 - true if the iface is ready with IPv6, otherwise false
82  */
conn_mgr_mon_set_ready(int idx,bool ready,bool ready_ipv4,bool ready_ipv6)83 static void conn_mgr_mon_set_ready(int idx, bool ready, bool ready_ipv4, bool ready_ipv6)
84 {
85 	/* Clear and then update the L4-readiness bit */
86 	iface_states[idx] &= ~CONN_MGR_IF_READY;
87 	iface_states[idx] &= ~CONN_MGR_IF_READY_IPV4;
88 	iface_states[idx] &= ~CONN_MGR_IF_READY_IPV6;
89 
90 	if (ready) {
91 		iface_states[idx] |= CONN_MGR_IF_READY;
92 	}
93 
94 	if (ready_ipv4) {
95 		iface_states[idx] |= CONN_MGR_IF_READY_IPV4;
96 	}
97 
98 	if (ready_ipv6) {
99 		iface_states[idx] |= CONN_MGR_IF_READY_IPV6;
100 	}
101 }
102 
conn_mgr_mon_handle_update(void)103 static void conn_mgr_mon_handle_update(void)
104 {
105 	int idx;
106 	bool has_ip;
107 	bool has_ipv6;
108 	bool has_ipv4;
109 	bool is_l4_ready;
110 	bool is_ipv6_ready;
111 	bool is_ipv4_ready;
112 	bool is_oper_up;
113 	bool was_l4_ready;
114 	bool was_ipv6_ready;
115 	bool was_ipv4_ready;
116 	bool is_ignored;
117 	int ready_count = 0;
118 	int ready_count_ipv4 = 0;
119 	int ready_count_ipv6 = 0;
120 	struct net_if *blame = NULL;
121 	struct net_if *blame_ipv4 = NULL;
122 	struct net_if *blame_ipv6 = NULL;
123 
124 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
125 
126 	for (idx = 0; idx < ARRAY_SIZE(iface_states); idx++) {
127 		if (iface_states[idx] == 0) {
128 			/* This interface is not used */
129 			continue;
130 		}
131 
132 		/* Detect whether iface was previously considered ready */
133 		was_l4_ready	= iface_states[idx] & CONN_MGR_IF_READY;
134 		was_ipv6_ready	= iface_states[idx] & CONN_MGR_IF_READY_IPV6;
135 		was_ipv4_ready	= iface_states[idx] & CONN_MGR_IF_READY_IPV4;
136 
137 		/* Collect iface readiness requirements */
138 		has_ipv6	= iface_states[idx] & CONN_MGR_IF_IPV6_SET;
139 		has_ipv4	= iface_states[idx] & CONN_MGR_IF_IPV4_SET;
140 		has_ip		= has_ipv6 || has_ipv4;
141 		is_oper_up	= iface_states[idx] & CONN_MGR_IF_UP;
142 		is_ignored	= iface_states[idx] & CONN_MGR_IF_IGNORED;
143 
144 		/* Determine whether iface is currently considered ready */
145 		is_l4_ready	= is_oper_up && has_ip   && !is_ignored;
146 		is_ipv6_ready	= is_oper_up && has_ipv6 && !is_ignored;
147 		is_ipv4_ready	= is_oper_up && has_ipv4 && !is_ignored;
148 
149 		/* Track ready iface count */
150 		if (is_l4_ready) {
151 			ready_count += 1;
152 		}
153 		if (is_ipv6_ready) {
154 			ready_count_ipv6 += 1;
155 		}
156 		if (is_ipv4_ready) {
157 			ready_count_ipv4 += 1;
158 		}
159 
160 		/* If any states changed, track blame for possibly triggered events */
161 		if (was_l4_ready != is_l4_ready) {
162 			blame = conn_mgr_mon_get_if_by_index(idx);
163 		}
164 		if (was_ipv6_ready != is_ipv6_ready) {
165 			blame_ipv6 = conn_mgr_mon_get_if_by_index(idx);
166 		}
167 		if (was_ipv4_ready != is_ipv4_ready) {
168 			blame_ipv4 = conn_mgr_mon_get_if_by_index(idx);
169 		}
170 
171 		/* Update readiness state flags with the (possibly) new values */
172 		conn_mgr_mon_set_ready(idx, is_l4_ready, is_ipv4_ready, is_ipv6_ready);
173 	}
174 
175 	/* If the total number of ready ifaces changed, possibly send an event */
176 	if (ready_count != last_ready_count) {
177 		if (ready_count == 0) {
178 			/* We just lost connectivity */
179 			net_mgmt_event_notify(NET_EVENT_L4_DISCONNECTED, blame);
180 		} else if (last_ready_count == 0) {
181 			/* We just gained connectivity */
182 			net_mgmt_event_notify(NET_EVENT_L4_CONNECTED, blame);
183 		}
184 		last_ready_count = ready_count;
185 		last_blame = blame;
186 	}
187 
188 	/* Same, but specifically for IPv4 */
189 	if (ready_count_ipv4 != last_ready_count_ipv4) {
190 		if (ready_count_ipv4 == 0) {
191 			/* We just lost IPv4 connectivity */
192 			net_mgmt_event_notify(NET_EVENT_L4_IPV4_DISCONNECTED, blame_ipv4);
193 		} else if (last_ready_count_ipv4 == 0) {
194 			/* We just gained IPv4 connectivity */
195 			net_mgmt_event_notify(NET_EVENT_L4_IPV4_CONNECTED, blame_ipv4);
196 		}
197 		last_ready_count_ipv4 = ready_count_ipv4;
198 		last_blame_ipv4 = blame_ipv4;
199 	}
200 
201 	/* Same, but specifically for IPv6 */
202 	if (ready_count_ipv6 != last_ready_count_ipv6) {
203 		if (ready_count_ipv6 == 0) {
204 			/* We just lost IPv6 connectivity */
205 			net_mgmt_event_notify(NET_EVENT_L4_IPV6_DISCONNECTED, blame_ipv6);
206 		} else if (last_ready_count_ipv6 == 0) {
207 			/* We just gained IPv6 connectivity */
208 			net_mgmt_event_notify(NET_EVENT_L4_IPV6_CONNECTED, blame_ipv6);
209 		}
210 		last_ready_count_ipv6 = ready_count_ipv6;
211 		last_blame_ipv6 = blame_ipv6;
212 	}
213 
214 	k_mutex_unlock(&conn_mgr_mon_lock);
215 }
216 
217 /**
218  * @brief Initialize the internal state flags for the given iface using its current status
219  *
220  * @param iface - iface to initialize from.
221  */
conn_mgr_mon_initial_state(struct net_if * iface)222 static void conn_mgr_mon_initial_state(struct net_if *iface)
223 {
224 	int idx = conn_mgr_get_index_for_if(iface);
225 
226 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
227 
228 	if (net_if_is_up(iface)) {
229 		NET_DBG("Iface %p UP", iface);
230 		iface_states[idx] |= CONN_MGR_IF_UP;
231 	}
232 
233 	if (IS_ENABLED(CONFIG_NET_NATIVE_IPV6)) {
234 		if (net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
235 			NET_DBG("IPv6 addr set");
236 			iface_states[idx] |= CONN_MGR_IF_IPV6_SET;
237 		}
238 	}
239 
240 	if (IS_ENABLED(CONFIG_NET_NATIVE_IPV4)) {
241 		if (net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
242 			NET_DBG("IPv4 addr set");
243 			iface_states[idx] |= CONN_MGR_IF_IPV4_SET;
244 		}
245 
246 	}
247 
248 	k_mutex_unlock(&conn_mgr_mon_lock);
249 }
250 
conn_mgr_mon_init_cb(struct net_if * iface,void * user_data)251 static void conn_mgr_mon_init_cb(struct net_if *iface, void *user_data)
252 {
253 	ARG_UNUSED(user_data);
254 
255 	conn_mgr_mon_initial_state(iface);
256 }
257 
conn_mgr_mon_thread_fn(void * p1,void * p2,void * p3)258 static void conn_mgr_mon_thread_fn(void *p1, void *p2, void *p3)
259 {
260 	ARG_UNUSED(p1);
261 	ARG_UNUSED(p2);
262 	ARG_UNUSED(p3);
263 
264 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
265 
266 	conn_mgr_conn_init();
267 
268 	conn_mgr_init_events_handler();
269 
270 	net_if_foreach(conn_mgr_mon_init_cb, NULL);
271 
272 	k_mutex_unlock(&conn_mgr_mon_lock);
273 
274 	NET_DBG("Connection Manager started");
275 
276 	while (true) {
277 		/* Wait for changes */
278 		k_sem_take(&conn_mgr_mon_updated, K_FOREVER);
279 
280 		/* Respond to changes */
281 		conn_mgr_mon_handle_update();
282 	}
283 }
284 
conn_mgr_mon_resend_status(void)285 void conn_mgr_mon_resend_status(void)
286 {
287 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
288 
289 	if (last_ready_count == 0) {
290 		net_mgmt_event_notify(NET_EVENT_L4_DISCONNECTED, last_blame);
291 	} else {
292 		net_mgmt_event_notify(NET_EVENT_L4_CONNECTED, last_blame);
293 	}
294 
295 	if (last_ready_count_ipv6 == 0) {
296 		net_mgmt_event_notify(NET_EVENT_L4_IPV6_DISCONNECTED, last_blame_ipv6);
297 	} else {
298 		net_mgmt_event_notify(NET_EVENT_L4_IPV6_CONNECTED, last_blame_ipv6);
299 	}
300 
301 	if (last_ready_count_ipv4 == 0) {
302 		net_mgmt_event_notify(NET_EVENT_L4_IPV4_DISCONNECTED, last_blame_ipv4);
303 	} else {
304 		net_mgmt_event_notify(NET_EVENT_L4_IPV4_CONNECTED, last_blame_ipv4);
305 	}
306 
307 	k_mutex_unlock(&conn_mgr_mon_lock);
308 }
309 
conn_mgr_ignore_iface(struct net_if * iface)310 void conn_mgr_ignore_iface(struct net_if *iface)
311 {
312 	int idx = conn_mgr_get_index_for_if(iface);
313 
314 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
315 
316 	if (!(iface_states[idx] & CONN_MGR_IF_IGNORED)) {
317 		/* Set ignored flag and mark state as changed */
318 		iface_states[idx] |= CONN_MGR_IF_IGNORED;
319 		k_sem_give(&conn_mgr_mon_updated);
320 	}
321 
322 	k_mutex_unlock(&conn_mgr_mon_lock);
323 }
324 
conn_mgr_watch_iface(struct net_if * iface)325 void conn_mgr_watch_iface(struct net_if *iface)
326 {
327 	int idx = conn_mgr_get_index_for_if(iface);
328 
329 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
330 
331 	if (iface_states[idx] & CONN_MGR_IF_IGNORED) {
332 		/* Clear ignored flag and mark state as changed */
333 		iface_states[idx] &= ~CONN_MGR_IF_IGNORED;
334 		k_sem_give(&conn_mgr_mon_updated);
335 	}
336 
337 	k_mutex_unlock(&conn_mgr_mon_lock);
338 }
339 
conn_mgr_is_iface_ignored(struct net_if * iface)340 bool conn_mgr_is_iface_ignored(struct net_if *iface)
341 {
342 	int idx = conn_mgr_get_index_for_if(iface);
343 
344 	bool ret = false;
345 
346 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
347 
348 	ret = iface_states[idx] & CONN_MGR_IF_IGNORED;
349 
350 	k_mutex_unlock(&conn_mgr_mon_lock);
351 
352 	return ret;
353 }
354 
355 /**
356  * @brief Check whether a provided iface uses the provided L2.
357  *
358  * @param iface - iface to check.
359  * @param l2 - L2 to check. NULL will match offloaded ifaces.
360  * @retval true if the iface uses the provided L2.
361  * @retval false otherwise.
362  */
iface_uses_l2(struct net_if * iface,const struct net_l2 * l2)363 static bool iface_uses_l2(struct net_if *iface, const struct net_l2 *l2)
364 {
365 	return	(!l2 && net_if_offload(iface)) ||
366 		(net_if_l2(iface) == l2);
367 }
368 
conn_mgr_ignore_l2(const struct net_l2 * l2)369 void conn_mgr_ignore_l2(const struct net_l2 *l2)
370 {
371 	/* conn_mgr_ignore_iface already locks the mutex, but we lock it here too
372 	 * so that all matching ifaces are updated simultaneously.
373 	 */
374 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
375 
376 	STRUCT_SECTION_FOREACH(net_if, iface) {
377 		if (iface_uses_l2(iface, l2)) {
378 			conn_mgr_ignore_iface(iface);
379 		}
380 	}
381 
382 	k_mutex_unlock(&conn_mgr_mon_lock);
383 }
384 
conn_mgr_watch_l2(const struct net_l2 * l2)385 void conn_mgr_watch_l2(const struct net_l2 *l2)
386 {
387 	/* conn_mgr_watch_iface already locks the mutex, but we lock it here too
388 	 * so that all matching ifaces are updated simultaneously.
389 	 */
390 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
391 
392 	STRUCT_SECTION_FOREACH(net_if, iface) {
393 		if (iface_uses_l2(iface, l2)) {
394 			conn_mgr_watch_iface(iface);
395 		}
396 	}
397 
398 	k_mutex_unlock(&conn_mgr_mon_lock);
399 }
400 
conn_mgr_mon_init(void)401 static int conn_mgr_mon_init(void)
402 {
403 	int i;
404 
405 	for (i = 0; i < ARRAY_SIZE(iface_states); i++) {
406 		iface_states[i] = 0;
407 	}
408 
409 	k_thread_create(&conn_mgr_mon_thread, conn_mgr_mon_stack,
410 			CONFIG_NET_CONNECTION_MANAGER_MONITOR_STACK_SIZE,
411 			conn_mgr_mon_thread_fn,
412 			NULL, NULL, NULL, THREAD_PRIORITY, 0, K_NO_WAIT);
413 	k_thread_name_set(&conn_mgr_mon_thread, "conn_mgr_monitor");
414 
415 	return 0;
416 }
417 
conn_mgr_if_state(struct net_if * iface)418 uint16_t conn_mgr_if_state(struct net_if *iface)
419 {
420 	int idx = conn_mgr_get_index_for_if(iface);
421 	uint16_t state = CONN_MGR_IF_STATE_INVALID;
422 
423 	if (idx < CONN_MGR_IFACE_MAX) {
424 		k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
425 		state = iface_states[idx];
426 		k_mutex_unlock(&conn_mgr_mon_lock);
427 	}
428 
429 	return state;
430 }
431 
432 SYS_INIT(conn_mgr_mon_init, APPLICATION, CONFIG_NET_CONNECTION_MANAGER_MONITOR_PRIORITY);
433