1 /*
2  * Copyright (c) 2021 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file event objects library
9  *
10  * Event objects are used to signal one or more threads that a custom set of
11  * events has occurred. Threads wait on event objects until another thread or
12  * ISR posts the desired set of events to the event object. Each time events
13  * are posted to an event object, all threads waiting on that event object are
14  * processed to determine if there is a match. All threads that whose wait
15  * conditions match the current set of events now belonging to the event object
16  * are awakened.
17  *
18  * Threads waiting on an event object have the option of either waking once
19  * any or all of the events it desires have been posted to the event object.
20  *
21  * @brief Kernel event object
22  */
23 
24 #include <zephyr/kernel.h>
25 #include <zephyr/kernel_structs.h>
26 
27 #include <zephyr/toolchain.h>
28 #include <zephyr/sys/dlist.h>
29 #include <zephyr/init.h>
30 #include <zephyr/internal/syscall_handler.h>
31 #include <zephyr/tracing/tracing.h>
32 #include <zephyr/sys/check.h>
33 /* private kernel APIs */
34 #include <wait_q.h>
35 #include <ksched.h>
36 
37 #define K_EVENT_WAIT_ANY      0x00   /* Wait for any events */
38 #define K_EVENT_WAIT_ALL      0x01   /* Wait for all events */
39 #define K_EVENT_WAIT_MASK     0x01
40 
41 #define K_EVENT_OPTION_RESET  0x02   /* Reset events prior to waiting */
42 #define K_EVENT_OPTION_CLEAR  0x04   /* Clear events that are received */
43 
44 struct event_walk_data {
45 	struct k_thread  *head;
46 	uint32_t events;
47 	uint32_t clear_events;
48 };
49 
50 #ifdef CONFIG_OBJ_CORE_EVENT
51 static struct k_obj_type obj_type_event;
52 #endif /* CONFIG_OBJ_CORE_EVENT */
53 
z_impl_k_event_init(struct k_event * event)54 void z_impl_k_event_init(struct k_event *event)
55 {
56 	__ASSERT_NO_MSG(!arch_is_in_isr());
57 
58 	event->events = 0;
59 	event->lock = (struct k_spinlock) {};
60 
61 	SYS_PORT_TRACING_OBJ_INIT(k_event, event);
62 
63 	z_waitq_init(&event->wait_q);
64 
65 	k_object_init(event);
66 
67 #ifdef CONFIG_OBJ_CORE_EVENT
68 	k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
69 #endif /* CONFIG_OBJ_CORE_EVENT */
70 }
71 
72 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_init(struct k_event * event)73 void z_vrfy_k_event_init(struct k_event *event)
74 {
75 	K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT));
76 	z_impl_k_event_init(event);
77 }
78 #include <zephyr/syscalls/k_event_init_mrsh.c>
79 #endif /* CONFIG_USERSPACE */
80 
81 /**
82  * @brief determine the set of events that have been satisfied
83  *
84  * This routine determines if the current set of events satisfies the desired
85  * set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
86  * all the desired events must be present to satisfy the request. If @a
87  * wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
88  * In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
89  * current set of events are present in the desired set of events.
90  *
91  * @return event bits that satisfy the wait condition or zero
92  */
are_wait_conditions_met(uint32_t desired,uint32_t current,unsigned int wait_condition)93 static uint32_t are_wait_conditions_met(uint32_t desired, uint32_t current,
94 					unsigned int wait_condition)
95 {
96 	uint32_t match = current & desired;
97 
98 	if ((wait_condition == K_EVENT_WAIT_ALL) && (match != desired)) {
99 		/* special case for K_EVENT_WAIT_ALL */
100 		return 0;
101 	}
102 
103 	/* return the matched events for any wait condition */
104 	return match;
105 }
106 
event_walk_op(struct k_thread * thread,void * data)107 static int event_walk_op(struct k_thread *thread, void *data)
108 {
109 	uint32_t match;
110 	unsigned int wait_condition;
111 	struct event_walk_data *event_data = data;
112 
113 	wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
114 
115 	match = are_wait_conditions_met(thread->events, event_data->events,
116 					wait_condition);
117 	if (match != 0) {
118 		/*
119 		 * The wait conditions have been satisfied. So, set the
120 		 * received events and then add this thread to the list
121 		 * of threads to unpend.
122 		 *
123 		 * NOTE: thread event options can consume an event
124 		 */
125 		thread->events = match;
126 		if (thread->event_options & K_EVENT_OPTION_CLEAR) {
127 			event_data->clear_events |= match;
128 		}
129 		thread->next_event_link = event_data->head;
130 		event_data->head = thread;
131 
132 		/*
133 		 * Events create a list of threads to wake up. We do
134 		 * not want z_thread_timeout to wake these threads; they
135 		 * will be woken up by k_event_post_internal once they
136 		 * have been processed.
137 		 */
138 		thread->no_wake_on_timeout = true;
139 #ifdef CONFIG_SYS_CLOCK_EXISTS
140 		z_abort_timeout(&thread->base.timeout);
141 #endif /* CONFIG_SYS_CLOCK_EXISTS */
142 	}
143 
144 	return 0;
145 }
146 
k_event_post_internal(struct k_event * event,uint32_t events,uint32_t events_mask)147 static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
148 				  uint32_t events_mask)
149 {
150 	k_spinlock_key_t  key;
151 	struct k_thread  *thread;
152 	struct event_walk_data data;
153 	uint32_t previous_events;
154 
155 	data.head = NULL;
156 	key = k_spin_lock(&event->lock);
157 
158 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
159 					events_mask);
160 
161 	previous_events = event->events & events_mask;
162 	events = (event->events & ~events_mask) |
163 		 (events & events_mask);
164 
165 	/*
166 	 * Posting an event has the potential to wake multiple pended threads.
167 	 * It is desirable to unpend all affected threads simultaneously. This
168 	 * is done in three steps:
169 	 *
170 	 * 1. Walk the waitq and create a linked list of threads to unpend.
171 	 * 2. Unpend each of the threads in the linked list
172 	 * 3. Ready each of the threads in the linked list
173 	 */
174 
175 	data.events = events;
176 	data.clear_events = 0;
177 	z_sched_waitq_walk(&event->wait_q, event_walk_op, &data);
178 
179 	if (data.head != NULL) {
180 		thread = data.head;
181 		struct k_thread *next;
182 		do {
183 			arch_thread_return_value_set(thread, 0);
184 			next = thread->next_event_link;
185 			z_sched_wake_thread(thread, false);
186 			thread = next;
187 		} while (thread != NULL);
188 	}
189 
190 	/* stash any events not consumed */
191 	event->events = data.events & ~data.clear_events;
192 
193 	z_reschedule(&event->lock, key);
194 
195 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events,
196 				       events_mask);
197 
198 	return previous_events;
199 }
200 
z_impl_k_event_post(struct k_event * event,uint32_t events)201 uint32_t z_impl_k_event_post(struct k_event *event, uint32_t events)
202 {
203 	return k_event_post_internal(event, events, events);
204 }
205 
206 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_post(struct k_event * event,uint32_t events)207 uint32_t z_vrfy_k_event_post(struct k_event *event, uint32_t events)
208 {
209 	K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
210 	return z_impl_k_event_post(event, events);
211 }
212 #include <zephyr/syscalls/k_event_post_mrsh.c>
213 #endif /* CONFIG_USERSPACE */
214 
z_impl_k_event_set(struct k_event * event,uint32_t events)215 uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events)
216 {
217 	return k_event_post_internal(event, events, ~0);
218 }
219 
220 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_set(struct k_event * event,uint32_t events)221 uint32_t z_vrfy_k_event_set(struct k_event *event, uint32_t events)
222 {
223 	K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
224 	return z_impl_k_event_set(event, events);
225 }
226 #include <zephyr/syscalls/k_event_set_mrsh.c>
227 #endif /* CONFIG_USERSPACE */
228 
z_impl_k_event_set_masked(struct k_event * event,uint32_t events,uint32_t events_mask)229 uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events,
230 			       uint32_t events_mask)
231 {
232 	return k_event_post_internal(event, events, events_mask);
233 }
234 
235 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_set_masked(struct k_event * event,uint32_t events,uint32_t events_mask)236 uint32_t z_vrfy_k_event_set_masked(struct k_event *event, uint32_t events,
237 			       uint32_t events_mask)
238 {
239 	K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
240 	return z_impl_k_event_set_masked(event, events, events_mask);
241 }
242 #include <zephyr/syscalls/k_event_set_masked_mrsh.c>
243 #endif /* CONFIG_USERSPACE */
244 
z_impl_k_event_clear(struct k_event * event,uint32_t events)245 uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events)
246 {
247 	return k_event_post_internal(event, 0, events);
248 }
249 
250 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_clear(struct k_event * event,uint32_t events)251 uint32_t z_vrfy_k_event_clear(struct k_event *event, uint32_t events)
252 {
253 	K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
254 	return z_impl_k_event_clear(event, events);
255 }
256 #include <zephyr/syscalls/k_event_clear_mrsh.c>
257 #endif /* CONFIG_USERSPACE */
258 
k_event_wait_internal(struct k_event * event,uint32_t events,unsigned int options,k_timeout_t timeout)259 static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
260 				      unsigned int options, k_timeout_t timeout)
261 {
262 	uint32_t  rv = 0;
263 	unsigned int  wait_condition;
264 	struct k_thread  *thread;
265 
266 	__ASSERT(((arch_is_in_isr() == false) ||
267 		  K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
268 
269 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events,
270 					options, timeout);
271 
272 	if (events == 0) {
273 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0);
274 		return 0;
275 	}
276 
277 	wait_condition = options & K_EVENT_WAIT_MASK;
278 	thread = k_sched_current_thread_query();
279 
280 	k_spinlock_key_t  key = k_spin_lock(&event->lock);
281 
282 	if (options & K_EVENT_OPTION_RESET) {
283 		event->events = 0;
284 	}
285 
286 	/* Test if the wait conditions have already been met. */
287 	rv = are_wait_conditions_met(events, event->events, wait_condition);
288 	if (rv != 0) {
289 		/* clear the events that are matched */
290 		if (options & K_EVENT_OPTION_CLEAR) {
291 			event->events &= ~rv;
292 		}
293 
294 		k_spin_unlock(&event->lock, key);
295 		goto out;
296 	}
297 
298 	if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
299 		k_spin_unlock(&event->lock, key);
300 		goto out;
301 	}
302 
303 	/*
304 	 * The caller must pend to wait for the match. Save the desired
305 	 * set of events in the k_thread structure.
306 	 */
307 
308 	thread->events = events;
309 	thread->event_options = options;
310 
311 	SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events,
312 					   options, timeout);
313 
314 	if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) {
315 		/* Retrieve the set of events that woke the thread */
316 		rv = thread->events;
317 	}
318 
319 out:
320 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, rv);
321 
322 	return rv;
323 }
324 
325 /**
326  * Wait for any of the specified events
327  */
z_impl_k_event_wait(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)328 uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events,
329 			     bool reset, k_timeout_t timeout)
330 {
331 	uint32_t options = reset ? K_EVENT_OPTION_RESET : 0;
332 
333 	return k_event_wait_internal(event, events, options, timeout);
334 }
335 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_wait(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)336 uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
337 				    bool reset, k_timeout_t timeout)
338 {
339 	K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
340 	return z_impl_k_event_wait(event, events, reset, timeout);
341 }
342 #include <zephyr/syscalls/k_event_wait_mrsh.c>
343 #endif /* CONFIG_USERSPACE */
344 
345 /**
346  * Wait for all of the specified events
347  */
z_impl_k_event_wait_all(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)348 uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events,
349 				 bool reset, k_timeout_t timeout)
350 {
351 	uint32_t options = reset ? (K_EVENT_OPTION_RESET | K_EVENT_WAIT_ALL)
352 				 : K_EVENT_WAIT_ALL;
353 
354 	return k_event_wait_internal(event, events, options, timeout);
355 }
356 
357 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_wait_all(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)358 uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
359 					bool reset, k_timeout_t timeout)
360 {
361 	K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
362 	return z_impl_k_event_wait_all(event, events, reset, timeout);
363 }
364 #include <zephyr/syscalls/k_event_wait_all_mrsh.c>
365 #endif /* CONFIG_USERSPACE */
366 
z_impl_k_event_wait_safe(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)367 uint32_t z_impl_k_event_wait_safe(struct k_event *event, uint32_t events,
368 				  bool reset, k_timeout_t timeout)
369 {
370 	uint32_t options = reset ? (K_EVENT_OPTION_CLEAR | K_EVENT_OPTION_RESET)
371 				 : K_EVENT_OPTION_CLEAR;
372 
373 	return k_event_wait_internal(event, events, options, timeout);
374 }
375 
376 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_wait_safe(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)377 uint32_t z_vrfy_k_event_wait_safe(struct k_event *event, uint32_t events,
378 				  bool reset, k_timeout_t timeout)
379 {
380 	K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
381 	return z_impl_k_event_wait_safe(event, events, reset, timeout);
382 }
383 #include <zephyr/syscalls/k_event_wait_safe_mrsh.c>
384 #endif /* CONFIG_USERSPACE */
385 
z_impl_k_event_wait_all_safe(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)386 uint32_t z_impl_k_event_wait_all_safe(struct k_event *event, uint32_t events,
387 				      bool reset, k_timeout_t timeout)
388 {
389 	uint32_t options = reset ? (K_EVENT_OPTION_CLEAR |
390 				    K_EVENT_OPTION_RESET | K_EVENT_WAIT_ALL)
391 				 : (K_EVENT_OPTION_CLEAR | K_EVENT_WAIT_ALL);
392 
393 	return k_event_wait_internal(event, events, options, timeout);
394 }
395 
396 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_wait_all_safe(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)397 uint32_t z_vrfy_k_event_wait_all_safe(struct k_event *event, uint32_t events,
398 				      bool reset, k_timeout_t timeout)
399 {
400 	K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
401 	return z_impl_k_event_wait_all_safe(event, events, reset, timeout);
402 }
403 #include <zephyr/syscalls/k_event_wait_all_safe_mrsh.c>
404 #endif /* CONFIG_USERSPACE */
405 
406 #ifdef CONFIG_OBJ_CORE_EVENT
init_event_obj_core_list(void)407 static int init_event_obj_core_list(void)
408 {
409 	/* Initialize condvar object type */
410 
411 	z_obj_type_init(&obj_type_event, K_OBJ_TYPE_EVENT_ID,
412 			offsetof(struct k_event, obj_core));
413 
414 	/* Initialize and link statically defined condvars */
415 
416 	STRUCT_SECTION_FOREACH(k_event, event) {
417 		k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
418 	}
419 
420 	return 0;
421 }
422 
423 SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1,
424 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
425 #endif /* CONFIG_OBJ_CORE_EVENT */
426