1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(conn_mgr, CONFIG_NET_CONNECTION_MANAGER_LOG_LEVEL);
9
10 #include <zephyr/init.h>
11 #include <zephyr/kernel.h>
12 #include <errno.h>
13 #include <zephyr/net/net_core.h>
14 #include <zephyr/net/net_if.h>
15 #include <zephyr/net/net_mgmt.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/net/conn_mgr_connectivity.h>
18 #include "conn_mgr_private.h"
19
20 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
21 #define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
22 #else
23 #define THREAD_PRIORITY K_PRIO_PREEMPT(7)
24 #endif
25
26 /* Internal state array tracking readiness, flags, and other state information for all available
27 * ifaces. Note that indexing starts at 0, whereas Zephyr iface indices start at 1.
28 * conn_mgr_get_if_by_index and conn_mgr_get_index_for_if are used to go back and forth between
29 * iface_states indices and Zephyr iface pointers.
30 */
31 uint16_t iface_states[CONN_MGR_IFACE_MAX];
32
33 /* Tracks the total number of L4-ready ifaces */
34 static uint16_t ready_count;
35
36 /* Tracks the last ifaces to change state in each respective direction */
37 static struct net_if *last_iface_down;
38 static struct net_if *last_iface_up;
39
40 /* Used to signal when modifications have been made that need to be responded to */
41 K_SEM_DEFINE(conn_mgr_event_signal, 1, 1);
42
43 /* Used to protect conn_mgr state */
44 K_MUTEX_DEFINE(conn_mgr_lock);
45
46 /**
47 * @brief Retrieves pointer to an iface by the index that corresponds to it in iface_states
48 *
49 * @param index - The index in iface_states to find the corresponding iface for.
50 * @return net_if* - The corresponding iface.
51 */
conn_mgr_get_if_by_index(int index)52 static struct net_if *conn_mgr_get_if_by_index(int index)
53 {
54 return net_if_get_by_index(index + 1);
55 }
56
57 /**
58 * @brief Gets the index in iface_states for the state corresponding to a provided iface.
59 *
60 * @param iface - iface to find the index of.
61 * @return int - The index found.
62 */
conn_mgr_get_index_for_if(struct net_if * iface)63 static int conn_mgr_get_index_for_if(struct net_if *iface)
64 {
65 return net_if_get_by_iface(iface) - 1;
66 }
67
68 /**
69 * @brief Marks an iface as ready or unready and updates all associated state tracking.
70 *
71 * @param idx - index (in iface_states) of the iface to mark ready or unready
72 * @param readiness - true if the iface should be considered ready, otherwise false
73 */
conn_mgr_set_ready(int idx,bool readiness)74 static void conn_mgr_set_ready(int idx, bool readiness)
75 {
76 /* Clear and then update the L4-readiness bit */
77 iface_states[idx] &= ~CONN_MGR_IF_READY;
78
79 if (readiness) {
80 iface_states[idx] |= CONN_MGR_IF_READY;
81
82 ready_count += 1;
83 last_iface_up = conn_mgr_get_if_by_index(idx);
84 } else {
85 ready_count -= 1;
86 last_iface_down = conn_mgr_get_if_by_index(idx);
87 }
88 }
89
conn_mgr_act_on_changes(void)90 static void conn_mgr_act_on_changes(void)
91 {
92 int idx;
93 int original_ready_count;
94 bool is_ip_ready;
95 bool is_ipv6_ready;
96 bool is_ipv4_ready;
97 bool is_l4_ready;
98 bool is_oper_up;
99 bool was_l4_ready;
100 bool is_ignored;
101
102 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
103
104 original_ready_count = ready_count;
105 for (idx = 0; idx < ARRAY_SIZE(iface_states); idx++) {
106 if (iface_states[idx] == 0) {
107 /* This interface is not used */
108 continue;
109 }
110
111 if (!(iface_states[idx] & CONN_MGR_IF_CHANGED)) {
112 /* No changes on this iface */
113 continue;
114 }
115
116 /* Clear the state-change flag */
117 iface_states[idx] &= ~CONN_MGR_IF_CHANGED;
118
119 /* Detect whether the iface is currently or was L4 ready */
120 was_l4_ready = iface_states[idx] & CONN_MGR_IF_READY;
121 is_ipv6_ready = iface_states[idx] & CONN_MGR_IF_IPV6_SET;
122 is_ipv4_ready = iface_states[idx] & CONN_MGR_IF_IPV4_SET;
123 is_oper_up = iface_states[idx] & CONN_MGR_IF_UP;
124 is_ignored = iface_states[idx] & CONN_MGR_IF_IGNORED;
125 is_ip_ready = is_ipv6_ready || is_ipv4_ready;
126 is_l4_ready = is_oper_up && is_ip_ready && !is_ignored;
127
128 /* Respond to changes to iface readiness */
129 if (was_l4_ready != is_l4_ready) {
130 /* Track the iface readiness change */
131 conn_mgr_set_ready(idx, is_l4_ready);
132 }
133 }
134
135 /* If the total number of ready ifaces changed, possibly send an event */
136 if (ready_count != original_ready_count) {
137 if (ready_count == 0) {
138 /* We just lost connectivity */
139 net_mgmt_event_notify(NET_EVENT_L4_DISCONNECTED, last_iface_down);
140 } else if (original_ready_count == 0) {
141 /* We just gained connectivity */
142 net_mgmt_event_notify(NET_EVENT_L4_CONNECTED, last_iface_up);
143 }
144 }
145
146 k_mutex_unlock(&conn_mgr_lock);
147 }
148
149 /**
150 * @brief Initialize the internal state flags for the given iface using its current status
151 *
152 * @param iface - iface to initialize from.
153 */
conn_mgr_initial_state(struct net_if * iface)154 static void conn_mgr_initial_state(struct net_if *iface)
155 {
156 int idx = net_if_get_by_iface(iface) - 1;
157
158 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
159
160 if (net_if_is_up(iface)) {
161 NET_DBG("Iface %p UP", iface);
162 iface_states[idx] |= CONN_MGR_IF_UP;
163 }
164
165 if (IS_ENABLED(CONFIG_NET_NATIVE_IPV6)) {
166 if (net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
167 NET_DBG("IPv6 addr set");
168 iface_states[idx] |= CONN_MGR_IF_IPV6_SET;
169 }
170 }
171
172 if (IS_ENABLED(CONFIG_NET_NATIVE_IPV4)) {
173 if (net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
174 NET_DBG("IPv4 addr set");
175 iface_states[idx] |= CONN_MGR_IF_IPV4_SET;
176 }
177
178 }
179
180 iface_states[idx] |= CONN_MGR_IF_CHANGED;
181
182 k_mutex_unlock(&conn_mgr_lock);
183 }
184
conn_mgr_init_cb(struct net_if * iface,void * user_data)185 static void conn_mgr_init_cb(struct net_if *iface, void *user_data)
186 {
187 ARG_UNUSED(user_data);
188
189 conn_mgr_initial_state(iface);
190 }
191
conn_mgr_handler(void)192 static void conn_mgr_handler(void)
193 {
194 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
195
196 conn_mgr_conn_init();
197
198 conn_mgr_init_events_handler();
199
200 net_if_foreach(conn_mgr_init_cb, NULL);
201
202 k_mutex_unlock(&conn_mgr_lock);
203
204 NET_DBG("Connection Manager started");
205
206 while (true) {
207 /* Wait for changes */
208 k_sem_take(&conn_mgr_event_signal, K_FOREVER);
209
210 /* Respond to changes */
211 conn_mgr_act_on_changes();
212 }
213 }
214
215 K_THREAD_DEFINE(conn_mgr, CONFIG_NET_CONNECTION_MANAGER_STACK_SIZE,
216 (k_thread_entry_t)conn_mgr_handler, NULL, NULL, NULL,
217 THREAD_PRIORITY, 0, 0);
218
conn_mgr_resend_status(void)219 void conn_mgr_resend_status(void)
220 {
221 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
222
223 if (ready_count == 0) {
224 net_mgmt_event_notify(NET_EVENT_L4_DISCONNECTED, last_iface_down);
225 } else {
226 net_mgmt_event_notify(NET_EVENT_L4_CONNECTED, last_iface_up);
227 }
228
229 k_mutex_unlock(&conn_mgr_lock);
230 }
231
conn_mgr_ignore_iface(struct net_if * iface)232 void conn_mgr_ignore_iface(struct net_if *iface)
233 {
234 int idx = conn_mgr_get_index_for_if(iface);
235
236 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
237
238 if (!(iface_states[idx] & CONN_MGR_IF_IGNORED)) {
239 /* Set ignored flag and mark state as changed */
240 iface_states[idx] |= CONN_MGR_IF_IGNORED;
241 iface_states[idx] |= CONN_MGR_IF_CHANGED;
242 k_sem_give(&conn_mgr_event_signal);
243 }
244
245 k_mutex_unlock(&conn_mgr_lock);
246 }
247
conn_mgr_watch_iface(struct net_if * iface)248 void conn_mgr_watch_iface(struct net_if *iface)
249 {
250 int idx = conn_mgr_get_index_for_if(iface);
251
252 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
253
254 if (iface_states[idx] & CONN_MGR_IF_IGNORED) {
255 /* Clear ignored flag and mark state as changed */
256 iface_states[idx] &= ~CONN_MGR_IF_IGNORED;
257 iface_states[idx] |= CONN_MGR_IF_CHANGED;
258 k_sem_give(&conn_mgr_event_signal);
259 }
260
261 k_mutex_unlock(&conn_mgr_lock);
262 }
263
conn_mgr_is_iface_ignored(struct net_if * iface)264 bool conn_mgr_is_iface_ignored(struct net_if *iface)
265 {
266 int idx = conn_mgr_get_index_for_if(iface);
267
268 bool ret = false;
269
270 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
271
272 ret = iface_states[idx] & CONN_MGR_IF_IGNORED;
273
274 k_mutex_unlock(&conn_mgr_lock);
275
276 return ret;
277 }
278
279 /**
280 * @brief Check whether a provided iface uses the provided L2.
281 *
282 * @param iface - iface to check.
283 * @param l2 - L2 to check. NULL will match offloaded ifaces.
284 * @retval true if the iface uses the provided L2.
285 * @retval false otherwise.
286 */
iface_uses_l2(struct net_if * iface,const struct net_l2 * l2)287 static bool iface_uses_l2(struct net_if *iface, const struct net_l2 *l2)
288 {
289 return (!l2 && net_if_offload(iface)) ||
290 (net_if_l2(iface) == l2);
291 }
292
conn_mgr_ignore_l2(const struct net_l2 * l2)293 void conn_mgr_ignore_l2(const struct net_l2 *l2)
294 {
295 /* conn_mgr_ignore_iface already locks the mutex, but we lock it here too
296 * so that all matching ifaces are updated simultaneously.
297 */
298 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
299
300 STRUCT_SECTION_FOREACH(net_if, iface) {
301 if (iface_uses_l2(iface, l2)) {
302 conn_mgr_ignore_iface(iface);
303 }
304 }
305
306 k_mutex_unlock(&conn_mgr_lock);
307 }
308
conn_mgr_watch_l2(const struct net_l2 * l2)309 void conn_mgr_watch_l2(const struct net_l2 *l2)
310 {
311 /* conn_mgr_watch_iface already locks the mutex, but we lock it here too
312 * so that all matching ifaces are updated simultaneously.
313 */
314 k_mutex_lock(&conn_mgr_lock, K_FOREVER);
315
316 STRUCT_SECTION_FOREACH(net_if, iface) {
317 if (iface_uses_l2(iface, l2)) {
318 conn_mgr_watch_iface(iface);
319 }
320 }
321
322 k_mutex_unlock(&conn_mgr_lock);
323 }
324
conn_mgr_init(void)325 static int conn_mgr_init(void)
326 {
327 int i;
328
329
330 for (i = 0; i < ARRAY_SIZE(iface_states); i++) {
331 iface_states[i] = 0;
332 }
333
334 k_thread_start(conn_mgr);
335
336 return 0;
337 }
338
339 SYS_INIT(conn_mgr_init, APPLICATION, CONFIG_NET_CONNECTION_MANAGER_PRIORITY);
340