1 /*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file event objects library
9 *
10 * Event objects are used to signal one or more threads that a custom set of
11 * events has occurred. Threads wait on event objects until another thread or
12 * ISR posts the desired set of events to the event object. Each time events
13 * are posted to an event object, all threads waiting on that event object are
14 * processed to determine if there is a match. All threads that whose wait
15 * conditions match the current set of events now belonging to the event object
16 * are awakened.
17 *
18 * Threads waiting on an event object have the option of either waking once
19 * any or all of the events it desires have been posted to the event object.
20 *
21 * @brief Kernel event object
22 */
23
24 #include <zephyr/kernel.h>
25 #include <zephyr/kernel_structs.h>
26
27 #include <zephyr/toolchain.h>
28 #include <zephyr/sys/dlist.h>
29 #include <zephyr/init.h>
30 #include <zephyr/syscall_handler.h>
31 #include <zephyr/tracing/tracing.h>
32 #include <zephyr/sys/check.h>
33 /* private kernel APIs */
34 #include <wait_q.h>
35 #include <ksched.h>
36
37 #define K_EVENT_WAIT_ANY 0x00 /* Wait for any events */
38 #define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */
39 #define K_EVENT_WAIT_MASK 0x01
40
41 #define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
42
43 struct event_walk_data {
44 struct k_thread *head;
45 uint32_t events;
46 };
47
48 #ifdef CONFIG_OBJ_CORE_EVENT
49 static struct k_obj_type obj_type_event;
50 #endif
51
z_impl_k_event_init(struct k_event * event)52 void z_impl_k_event_init(struct k_event *event)
53 {
54 event->events = 0;
55 event->lock = (struct k_spinlock) {};
56
57 SYS_PORT_TRACING_OBJ_INIT(k_event, event);
58
59 z_waitq_init(&event->wait_q);
60
61 z_object_init(event);
62
63 #ifdef CONFIG_OBJ_CORE_EVENT
64 k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
65 #endif
66 }
67
68 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_init(struct k_event * event)69 void z_vrfy_k_event_init(struct k_event *event)
70 {
71 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT));
72 z_impl_k_event_init(event);
73 }
74 #include <syscalls/k_event_init_mrsh.c>
75 #endif
76
77 /**
78 * @brief determine if desired set of events been satisfied
79 *
80 * This routine determines if the current set of events satisfies the desired
81 * set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
82 * all the desired events must be present to satisfy the request. If @a
83 * wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
84 * In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
85 * current set of events are present in the desired set of events.
86 */
are_wait_conditions_met(uint32_t desired,uint32_t current,unsigned int wait_condition)87 static bool are_wait_conditions_met(uint32_t desired, uint32_t current,
88 unsigned int wait_condition)
89 {
90 uint32_t match = current & desired;
91
92 if (wait_condition == K_EVENT_WAIT_ALL) {
93 return match == desired;
94 }
95
96 /* wait_condition assumed to be K_EVENT_WAIT_ANY */
97
98 return match != 0;
99 }
100
event_walk_op(struct k_thread * thread,void * data)101 static int event_walk_op(struct k_thread *thread, void *data)
102 {
103 unsigned int wait_condition;
104 struct event_walk_data *event_data = data;
105
106 wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
107
108 if (are_wait_conditions_met(thread->events, event_data->events,
109 wait_condition)) {
110
111 /*
112 * Events create a list of threads to wake up. We do
113 * not want z_thread_timeout to wake these threads; they
114 * will be woken up by k_event_post_internal once they
115 * have been processed.
116 */
117 thread->no_wake_on_timeout = true;
118
119 /*
120 * The wait conditions have been satisfied. Add this
121 * thread to the list of threads to unpend.
122 */
123 thread->next_event_link = event_data->head;
124 event_data->head = thread;
125 z_abort_timeout(&thread->base.timeout);
126 }
127
128 return 0;
129 }
130
k_event_post_internal(struct k_event * event,uint32_t events,uint32_t events_mask)131 static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
132 uint32_t events_mask)
133 {
134 k_spinlock_key_t key;
135 struct k_thread *thread;
136 struct event_walk_data data;
137 uint32_t previous_events;
138
139 data.head = NULL;
140 key = k_spin_lock(&event->lock);
141
142 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
143 events_mask);
144
145 previous_events = event->events & events_mask;
146 events = (event->events & ~events_mask) |
147 (events & events_mask);
148 event->events = events;
149 data.events = events;
150 /*
151 * Posting an event has the potential to wake multiple pended threads.
152 * It is desirable to unpend all affected threads simultaneously. This
153 * is done in three steps:
154 *
155 * 1. Walk the waitq and create a linked list of threads to unpend.
156 * 2. Unpend each of the threads in the linked list
157 * 3. Ready each of the threads in the linked list
158 */
159
160 z_sched_waitq_walk(&event->wait_q, event_walk_op, &data);
161
162 if (data.head != NULL) {
163 thread = data.head;
164 struct k_thread *next;
165 do {
166 arch_thread_return_value_set(thread, 0);
167 thread->events = events;
168 next = thread->next_event_link;
169 z_sched_wake_thread(thread, false);
170 thread = next;
171 } while (thread != NULL);
172 }
173
174 z_reschedule(&event->lock, key);
175
176 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events,
177 events_mask);
178
179 return previous_events;
180 }
181
z_impl_k_event_post(struct k_event * event,uint32_t events)182 uint32_t z_impl_k_event_post(struct k_event *event, uint32_t events)
183 {
184 return k_event_post_internal(event, events, events);
185 }
186
187 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_post(struct k_event * event,uint32_t events)188 uint32_t z_vrfy_k_event_post(struct k_event *event, uint32_t events)
189 {
190 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
191 return z_impl_k_event_post(event, events);
192 }
193 #include <syscalls/k_event_post_mrsh.c>
194 #endif
195
z_impl_k_event_set(struct k_event * event,uint32_t events)196 uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events)
197 {
198 return k_event_post_internal(event, events, ~0);
199 }
200
201 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_set(struct k_event * event,uint32_t events)202 uint32_t z_vrfy_k_event_set(struct k_event *event, uint32_t events)
203 {
204 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
205 return z_impl_k_event_set(event, events);
206 }
207 #include <syscalls/k_event_set_mrsh.c>
208 #endif
209
z_impl_k_event_set_masked(struct k_event * event,uint32_t events,uint32_t events_mask)210 uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events,
211 uint32_t events_mask)
212 {
213 return k_event_post_internal(event, events, events_mask);
214 }
215
216 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_set_masked(struct k_event * event,uint32_t events,uint32_t events_mask)217 uint32_t z_vrfy_k_event_set_masked(struct k_event *event, uint32_t events,
218 uint32_t events_mask)
219 {
220 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
221 return z_impl_k_event_set_masked(event, events, events_mask);
222 }
223 #include <syscalls/k_event_set_masked_mrsh.c>
224 #endif
225
z_impl_k_event_clear(struct k_event * event,uint32_t events)226 uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events)
227 {
228 return k_event_post_internal(event, 0, events);
229 }
230
231 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_clear(struct k_event * event,uint32_t events)232 uint32_t z_vrfy_k_event_clear(struct k_event *event, uint32_t events)
233 {
234 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
235 return z_impl_k_event_clear(event, events);
236 }
237 #include <syscalls/k_event_clear_mrsh.c>
238 #endif
239
k_event_wait_internal(struct k_event * event,uint32_t events,unsigned int options,k_timeout_t timeout)240 static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
241 unsigned int options, k_timeout_t timeout)
242 {
243 uint32_t rv = 0;
244 unsigned int wait_condition;
245 struct k_thread *thread;
246
247 __ASSERT(((arch_is_in_isr() == false) ||
248 K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
249
250 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events,
251 options, timeout);
252
253 if (events == 0) {
254 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0);
255 return 0;
256 }
257
258 wait_condition = options & K_EVENT_WAIT_MASK;
259 thread = k_sched_current_thread_query();
260
261 k_spinlock_key_t key = k_spin_lock(&event->lock);
262
263 if (options & K_EVENT_WAIT_RESET) {
264 event->events = 0;
265 }
266
267 /* Test if the wait conditions have already been met. */
268
269 if (are_wait_conditions_met(events, event->events, wait_condition)) {
270 rv = event->events;
271
272 k_spin_unlock(&event->lock, key);
273 goto out;
274 }
275
276 /* Match conditions have not been met. */
277
278 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
279 k_spin_unlock(&event->lock, key);
280 goto out;
281 }
282
283 /*
284 * The caller must pend to wait for the match. Save the desired
285 * set of events in the k_thread structure.
286 */
287
288 thread->events = events;
289 thread->event_options = options;
290
291 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events,
292 options, timeout);
293
294 if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) {
295 /* Retrieve the set of events that woke the thread */
296 rv = thread->events;
297 }
298
299 out:
300 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event,
301 events, rv & events);
302
303 return rv & events;
304 }
305
306 /**
307 * Wait for any of the specified events
308 */
z_impl_k_event_wait(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)309 uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events,
310 bool reset, k_timeout_t timeout)
311 {
312 uint32_t options = reset ? K_EVENT_WAIT_RESET : 0;
313
314 return k_event_wait_internal(event, events, options, timeout);
315 }
316 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_wait(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)317 uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
318 bool reset, k_timeout_t timeout)
319 {
320 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
321 return z_impl_k_event_wait(event, events, reset, timeout);
322 }
323 #include <syscalls/k_event_wait_mrsh.c>
324 #endif
325
326 /**
327 * Wait for all of the specified events
328 */
z_impl_k_event_wait_all(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)329 uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events,
330 bool reset, k_timeout_t timeout)
331 {
332 uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL)
333 : K_EVENT_WAIT_ALL;
334
335 return k_event_wait_internal(event, events, options, timeout);
336 }
337
338 #ifdef CONFIG_USERSPACE
z_vrfy_k_event_wait_all(struct k_event * event,uint32_t events,bool reset,k_timeout_t timeout)339 uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
340 bool reset, k_timeout_t timeout)
341 {
342 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
343 return z_impl_k_event_wait_all(event, events, reset, timeout);
344 }
345 #include <syscalls/k_event_wait_all_mrsh.c>
346 #endif
347
348 #ifdef CONFIG_OBJ_CORE_EVENT
init_event_obj_core_list(void)349 static int init_event_obj_core_list(void)
350 {
351 /* Initialize condvar object type */
352
353 z_obj_type_init(&obj_type_event, K_OBJ_TYPE_EVENT_ID,
354 offsetof(struct k_event, obj_core));
355
356 /* Initialize and link statically defined condvars */
357
358 STRUCT_SECTION_FOREACH(k_event, event) {
359 k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
360 }
361
362 return 0;
363 }
364
365 SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1,
366 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
367 #endif
368