1 /*
2  * Copyright (c) 2016 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_mgmt, CONFIG_NET_MGMT_EVENT_LOG_LEVEL);
9 
10 #include <zephyr/kernel.h>
11 #include <zephyr/toolchain.h>
12 #include <zephyr/linker/sections.h>
13 
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/slist.h>
16 #include <zephyr/net/net_mgmt.h>
17 #include <zephyr/debug/stack.h>
18 
19 #include "net_private.h"
20 
21 struct mgmt_event_entry {
22 #if defined(CONFIG_NET_MGMT_EVENT_INFO)
23 #if defined(CONFIG_NET_MGMT_EVENT_QUEUE)
24 	uint8_t info[NET_EVENT_INFO_MAX_SIZE];
25 #else
26 	const void *info;
27 #endif /* CONFIG_NET_MGMT_EVENT_QUEUE */
28 	size_t info_length;
29 #endif /* CONFIG_NET_MGMT_EVENT_INFO */
30 	uint32_t event;
31 	struct net_if *iface;
32 };
33 
34 BUILD_ASSERT((sizeof(struct mgmt_event_entry) % sizeof(uint32_t)) == 0,
35 	     "The structure must be a multiple of sizeof(uint32_t)");
36 
37 struct mgmt_event_wait {
38 	struct k_sem sync_call;
39 	struct net_if *iface;
40 };
41 
42 static K_MUTEX_DEFINE(net_mgmt_callback_lock);
43 
44 #if defined(CONFIG_NET_MGMT_EVENT_THREAD)
45 K_KERNEL_STACK_DEFINE(mgmt_stack, CONFIG_NET_MGMT_EVENT_STACK_SIZE);
46 
47 static struct k_work_q mgmt_work_q_obj;
48 #endif
49 
50 static uint32_t global_event_mask;
51 static sys_slist_t event_callbacks = SYS_SLIST_STATIC_INIT(&event_callbacks);
52 
53 /* Forward declaration for the actual caller */
54 static void mgmt_run_callbacks(const struct mgmt_event_entry * const mgmt_event);
55 
56 #if defined(CONFIG_NET_MGMT_EVENT_QUEUE)
57 
58 static K_MUTEX_DEFINE(net_mgmt_event_lock);
59 /* event structure used to prevent increasing the stack usage on the caller thread */
60 static struct mgmt_event_entry new_event;
61 K_MSGQ_DEFINE(event_msgq, sizeof(struct mgmt_event_entry),
62 	      CONFIG_NET_MGMT_EVENT_QUEUE_SIZE, sizeof(uint32_t));
63 
64 static struct k_work_q *mgmt_work_q = COND_CODE_1(CONFIG_NET_MGMT_EVENT_SYSTEM_WORKQUEUE,
65 	(&k_sys_work_q), (&mgmt_work_q_obj));
66 
67 static void mgmt_event_work_handler(struct k_work *work);
68 static K_WORK_DEFINE(mgmt_work, mgmt_event_work_handler);
69 
mgmt_push_event(uint32_t mgmt_event,struct net_if * iface,const void * info,size_t length)70 static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
71 				   const void *info, size_t length)
72 {
73 #ifndef CONFIG_NET_MGMT_EVENT_INFO
74 	ARG_UNUSED(info);
75 	ARG_UNUSED(length);
76 #endif /* CONFIG_NET_MGMT_EVENT_INFO */
77 
78 	(void)k_mutex_lock(&net_mgmt_event_lock, K_FOREVER);
79 
80 	memset(&new_event, 0, sizeof(struct mgmt_event_entry));
81 
82 #ifdef CONFIG_NET_MGMT_EVENT_INFO
83 	if (info && length) {
84 		if (length <= NET_EVENT_INFO_MAX_SIZE) {
85 			memcpy(new_event.info, info, length);
86 			new_event.info_length = length;
87 		} else {
88 			NET_ERR("Event %u info length %zu > max size %zu",
89 				mgmt_event, length, NET_EVENT_INFO_MAX_SIZE);
90 			(void)k_mutex_unlock(&net_mgmt_event_lock);
91 
92 			return;
93 		}
94 	}
95 #endif /* CONFIG_NET_MGMT_EVENT_INFO */
96 
97 	new_event.event = mgmt_event;
98 	new_event.iface = iface;
99 
100 	if (k_msgq_put(&event_msgq, &new_event,
101 		K_MSEC(CONFIG_NET_MGMT_EVENT_QUEUE_TIMEOUT)) != 0) {
102 		NET_WARN("Failure to push event (%u), "
103 			 "try increasing the 'CONFIG_NET_MGMT_EVENT_QUEUE_SIZE' "
104 			 "or 'CONFIG_NET_MGMT_EVENT_QUEUE_TIMEOUT' options.",
105 			 mgmt_event);
106 	}
107 
108 	(void)k_mutex_unlock(&net_mgmt_event_lock);
109 
110 	k_work_submit_to_queue(mgmt_work_q, &mgmt_work);
111 }
112 
mgmt_event_work_handler(struct k_work * work)113 static void mgmt_event_work_handler(struct k_work *work)
114 {
115 	struct mgmt_event_entry mgmt_event;
116 
117 	ARG_UNUSED(work);
118 
119 	while (k_msgq_get(&event_msgq, &mgmt_event, K_NO_WAIT) == 0) {
120 		NET_DBG("Handling events, forwarding it relevantly");
121 
122 		mgmt_run_callbacks(&mgmt_event);
123 
124 		/* forcefully give up our timeslot, to give time to the callback */
125 		k_yield();
126 	}
127 }
128 
129 #else
130 
mgmt_push_event(uint32_t event,struct net_if * iface,const void * info,size_t length)131 static inline void mgmt_push_event(uint32_t event, struct net_if *iface,
132 				   const void *info, size_t length)
133 {
134 #ifndef CONFIG_NET_MGMT_EVENT_INFO
135 	ARG_UNUSED(info);
136 	ARG_UNUSED(length);
137 #endif /* CONFIG_NET_MGMT_EVENT_INFO */
138 	const struct mgmt_event_entry mgmt_event = {
139 #if defined(CONFIG_NET_MGMT_EVENT_INFO)
140 		.info = info,
141 		.info_length = length,
142 #endif /* CONFIG_NET_MGMT_EVENT_INFO */
143 		.event = event,
144 		.iface = iface,
145 	};
146 
147 	mgmt_run_callbacks(&mgmt_event);
148 }
149 
150 #endif /* CONFIG_NET_MGMT_EVENT_QUEUE */
151 
mgmt_add_event_mask(uint32_t event_mask)152 static inline void mgmt_add_event_mask(uint32_t event_mask)
153 {
154 	global_event_mask |= event_mask;
155 }
156 
mgmt_rebuild_global_event_mask(void)157 static inline void mgmt_rebuild_global_event_mask(void)
158 {
159 	struct net_mgmt_event_callback *cb, *tmp;
160 
161 	global_event_mask = 0U;
162 
163 	STRUCT_SECTION_FOREACH(net_mgmt_event_static_handler, it) {
164 		mgmt_add_event_mask(it->event_mask);
165 	}
166 
167 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&event_callbacks, cb, tmp, node) {
168 		mgmt_add_event_mask(cb->event_mask);
169 	}
170 }
171 
mgmt_is_event_handled(uint32_t mgmt_event)172 static inline bool mgmt_is_event_handled(uint32_t mgmt_event)
173 {
174 	return (((NET_MGMT_GET_LAYER(mgmt_event) &
175 		  NET_MGMT_GET_LAYER(global_event_mask)) ==
176 		 NET_MGMT_GET_LAYER(mgmt_event)) &&
177 		((NET_MGMT_GET_LAYER_CODE(mgmt_event) &
178 		  NET_MGMT_GET_LAYER_CODE(global_event_mask)) ==
179 		 NET_MGMT_GET_LAYER_CODE(mgmt_event)) &&
180 		((NET_MGMT_GET_COMMAND(mgmt_event) &
181 		  NET_MGMT_GET_COMMAND(global_event_mask)) ==
182 		 NET_MGMT_GET_COMMAND(mgmt_event)));
183 }
184 
mgmt_run_slist_callbacks(const struct mgmt_event_entry * const mgmt_event)185 static inline void mgmt_run_slist_callbacks(const struct mgmt_event_entry * const mgmt_event)
186 {
187 	sys_snode_t *prev = NULL;
188 	struct net_mgmt_event_callback *cb, *tmp;
189 
190 	/* Readable layer code is starting from 1, thus the increment */
191 	NET_DBG("Event layer %u code %u cmd %u",
192 		NET_MGMT_GET_LAYER(mgmt_event->event) + 1,
193 		NET_MGMT_GET_LAYER_CODE(mgmt_event->event),
194 		NET_MGMT_GET_COMMAND(mgmt_event->event));
195 
196 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&event_callbacks, cb, tmp, node) {
197 		if (!(NET_MGMT_GET_LAYER(mgmt_event->event) ==
198 		      NET_MGMT_GET_LAYER(cb->event_mask)) ||
199 		    !(NET_MGMT_GET_LAYER_CODE(mgmt_event->event) ==
200 		      NET_MGMT_GET_LAYER_CODE(cb->event_mask)) ||
201 		    (NET_MGMT_GET_COMMAND(mgmt_event->event) &&
202 		     NET_MGMT_GET_COMMAND(cb->event_mask) &&
203 		     !(NET_MGMT_GET_COMMAND(mgmt_event->event) &
204 		       NET_MGMT_GET_COMMAND(cb->event_mask)))) {
205 			continue;
206 		}
207 
208 #ifdef CONFIG_NET_MGMT_EVENT_INFO
209 		if (mgmt_event->info_length) {
210 			cb->info = (void *)mgmt_event->info;
211 			cb->info_length = mgmt_event->info_length;
212 		} else {
213 			cb->info = NULL;
214 			cb->info_length = 0;
215 		}
216 #endif /* CONFIG_NET_MGMT_EVENT_INFO */
217 
218 		if (NET_MGMT_EVENT_SYNCHRONOUS(cb->event_mask)) {
219 			struct mgmt_event_wait *sync_data =
220 				CONTAINER_OF(cb->sync_call,
221 					     struct mgmt_event_wait, sync_call);
222 
223 			if (sync_data->iface &&
224 			    sync_data->iface != mgmt_event->iface) {
225 				continue;
226 			}
227 
228 			NET_DBG("Unlocking %p synchronous call", cb);
229 
230 			cb->raised_event = mgmt_event->event;
231 			sync_data->iface = mgmt_event->iface;
232 
233 			sys_slist_remove(&event_callbacks, prev, &cb->node);
234 
235 			k_sem_give(cb->sync_call);
236 		} else {
237 			NET_DBG("Running callback %p : %p",
238 				cb, cb->handler);
239 
240 			cb->handler(cb, mgmt_event->event, mgmt_event->iface);
241 			prev = &cb->node;
242 		}
243 	}
244 
245 #ifdef CONFIG_NET_DEBUG_MGMT_EVENT_STACK
246 	log_stack_usage(&mgmt_work_q->thread);
247 #endif
248 }
249 
mgmt_run_static_callbacks(const struct mgmt_event_entry * const mgmt_event)250 static inline void mgmt_run_static_callbacks(const struct mgmt_event_entry * const mgmt_event)
251 {
252 	STRUCT_SECTION_FOREACH(net_mgmt_event_static_handler, it) {
253 		if (!(NET_MGMT_GET_LAYER(mgmt_event->event) ==
254 		      NET_MGMT_GET_LAYER(it->event_mask)) ||
255 		    !(NET_MGMT_GET_LAYER_CODE(mgmt_event->event) ==
256 		      NET_MGMT_GET_LAYER_CODE(it->event_mask)) ||
257 		    (NET_MGMT_GET_COMMAND(mgmt_event->event) &&
258 		     NET_MGMT_GET_COMMAND(it->event_mask) &&
259 		     !(NET_MGMT_GET_COMMAND(mgmt_event->event) &
260 		       NET_MGMT_GET_COMMAND(it->event_mask)))) {
261 			continue;
262 		}
263 
264 		it->handler(mgmt_event->event, mgmt_event->iface,
265 #ifdef CONFIG_NET_MGMT_EVENT_INFO
266 			    (void *)mgmt_event->info, mgmt_event->info_length,
267 #else
268 			    NULL, 0U,
269 #endif
270 			    it->user_data);
271 	}
272 }
273 
mgmt_run_callbacks(const struct mgmt_event_entry * const mgmt_event)274 static void mgmt_run_callbacks(const struct mgmt_event_entry * const mgmt_event)
275 {
276 	/* take the lock to prevent changes to the callback structure during use */
277 	(void)k_mutex_lock(&net_mgmt_callback_lock, K_FOREVER);
278 
279 	mgmt_run_static_callbacks(mgmt_event);
280 	mgmt_run_slist_callbacks(mgmt_event);
281 
282 	(void)k_mutex_unlock(&net_mgmt_callback_lock);
283 }
284 
mgmt_event_wait_call(struct net_if * iface,uint32_t mgmt_event_mask,uint32_t * raised_event,struct net_if ** event_iface,const void ** info,size_t * info_length,k_timeout_t timeout)285 static int mgmt_event_wait_call(struct net_if *iface,
286 				uint32_t mgmt_event_mask,
287 				uint32_t *raised_event,
288 				struct net_if **event_iface,
289 				const void **info,
290 				size_t *info_length,
291 				k_timeout_t timeout)
292 {
293 	struct mgmt_event_wait sync_data = {
294 		.sync_call = Z_SEM_INITIALIZER(sync_data.sync_call, 0, 1),
295 	};
296 	struct net_mgmt_event_callback sync = {
297 		.sync_call = &sync_data.sync_call,
298 		.event_mask = mgmt_event_mask | NET_MGMT_SYNC_EVENT_BIT,
299 	};
300 	int ret;
301 
302 	if (iface) {
303 		sync_data.iface = iface;
304 	}
305 
306 	NET_DBG("Synchronous event 0x%08x wait %p", sync.event_mask, &sync);
307 
308 	net_mgmt_add_event_callback(&sync);
309 
310 	ret = k_sem_take(sync.sync_call, timeout);
311 	if (ret < 0) {
312 		if (ret == -EAGAIN) {
313 			ret = -ETIMEDOUT;
314 		}
315 
316 		net_mgmt_del_event_callback(&sync);
317 		return ret;
318 	}
319 
320 	if (raised_event) {
321 		*raised_event = sync.raised_event;
322 	}
323 
324 	if (event_iface) {
325 		*event_iface = sync_data.iface;
326 	}
327 
328 #ifdef CONFIG_NET_MGMT_EVENT_INFO
329 	if (info) {
330 		*info = sync.info;
331 
332 		if (info_length) {
333 			*info_length = sync.info_length;
334 		}
335 	}
336 #endif /* CONFIG_NET_MGMT_EVENT_INFO */
337 
338 	return ret;
339 }
340 
net_mgmt_add_event_callback(struct net_mgmt_event_callback * cb)341 void net_mgmt_add_event_callback(struct net_mgmt_event_callback *cb)
342 {
343 	NET_DBG("Adding event callback %p", cb);
344 
345 	(void)k_mutex_lock(&net_mgmt_callback_lock, K_FOREVER);
346 
347 	/* Remove the callback if it already exists to avoid loop */
348 	sys_slist_find_and_remove(&event_callbacks, &cb->node);
349 
350 	sys_slist_prepend(&event_callbacks, &cb->node);
351 
352 	mgmt_add_event_mask(cb->event_mask);
353 
354 	(void)k_mutex_unlock(&net_mgmt_callback_lock);
355 }
356 
net_mgmt_del_event_callback(struct net_mgmt_event_callback * cb)357 void net_mgmt_del_event_callback(struct net_mgmt_event_callback *cb)
358 {
359 	NET_DBG("Deleting event callback %p", cb);
360 
361 	(void)k_mutex_lock(&net_mgmt_callback_lock, K_FOREVER);
362 
363 	sys_slist_find_and_remove(&event_callbacks, &cb->node);
364 
365 	mgmt_rebuild_global_event_mask();
366 
367 	(void)k_mutex_unlock(&net_mgmt_callback_lock);
368 }
369 
net_mgmt_event_notify_with_info(uint32_t mgmt_event,struct net_if * iface,const void * info,size_t length)370 void net_mgmt_event_notify_with_info(uint32_t mgmt_event, struct net_if *iface,
371 				     const void *info, size_t length)
372 {
373 	if (mgmt_is_event_handled(mgmt_event)) {
374 		/* Readable layer code is starting from 1, thus the increment */
375 		NET_DBG("Notifying Event layer %u code %u type %u",
376 			NET_MGMT_GET_LAYER(mgmt_event) + 1,
377 			NET_MGMT_GET_LAYER_CODE(mgmt_event),
378 			NET_MGMT_GET_COMMAND(mgmt_event));
379 
380 		mgmt_push_event(mgmt_event, iface, info, length);
381 	}
382 }
383 
net_mgmt_event_wait(uint32_t mgmt_event_mask,uint32_t * raised_event,struct net_if ** iface,const void ** info,size_t * info_length,k_timeout_t timeout)384 int net_mgmt_event_wait(uint32_t mgmt_event_mask,
385 			uint32_t *raised_event,
386 			struct net_if **iface,
387 			const void **info,
388 			size_t *info_length,
389 			k_timeout_t timeout)
390 {
391 	return mgmt_event_wait_call(NULL, mgmt_event_mask,
392 				    raised_event, iface, info, info_length,
393 				    timeout);
394 }
395 
net_mgmt_event_wait_on_iface(struct net_if * iface,uint32_t mgmt_event_mask,uint32_t * raised_event,const void ** info,size_t * info_length,k_timeout_t timeout)396 int net_mgmt_event_wait_on_iface(struct net_if *iface,
397 				 uint32_t mgmt_event_mask,
398 				 uint32_t *raised_event,
399 				 const void **info,
400 				 size_t *info_length,
401 				 k_timeout_t timeout)
402 {
403 	NET_ASSERT(NET_MGMT_ON_IFACE(mgmt_event_mask));
404 	NET_ASSERT(iface);
405 
406 	return mgmt_event_wait_call(iface, mgmt_event_mask,
407 				    raised_event, NULL, info, info_length,
408 				    timeout);
409 }
410 
net_mgmt_event_init(void)411 void net_mgmt_event_init(void)
412 {
413 	mgmt_rebuild_global_event_mask();
414 
415 #if defined(CONFIG_NET_MGMT_EVENT_THREAD)
416 #if defined(CONFIG_NET_MGMT_THREAD_PRIO_CUSTOM)
417 #define THREAD_PRIORITY CONFIG_NET_MGMT_THREAD_PRIORITY
418 #elif defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
419 /* Lowest priority cooperative thread */
420 #define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
421 #else
422 #define THREAD_PRIORITY K_PRIO_PREEMPT(CONFIG_NUM_PREEMPT_PRIORITIES - 1)
423 #endif
424 	struct k_work_queue_config q_cfg = {
425 		.name = "net_mgmt",
426 		.no_yield = false,
427 	};
428 
429 	k_work_queue_init(&mgmt_work_q_obj);
430 	k_work_queue_start(&mgmt_work_q_obj, mgmt_stack,
431 			   K_KERNEL_STACK_SIZEOF(mgmt_stack),
432 			   THREAD_PRIORITY, &q_cfg);
433 
434 	NET_DBG("Net MGMT initialized: queue of %u entries, stack size of %u",
435 		CONFIG_NET_MGMT_EVENT_QUEUE_SIZE,
436 		CONFIG_NET_MGMT_EVENT_STACK_SIZE);
437 #endif /* CONFIG_NET_MGMT_EVENT_THREAD */
438 }
439