1 /*
2  * Copyright (c) 2017 Wind River Systems, Inc.
3  * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file
10  *
11  * @brief Kernel asynchronous event polling interface.
12  *
13  * This polling mechanism allows waiting on multiple events concurrently,
14  * either events triggered directly, or from kernel objects or other kernel
15  * constructs.
16  */
17 
18 #include <zephyr/kernel.h>
19 #include <zephyr/kernel_structs.h>
20 #include <kernel_internal.h>
21 #include <wait_q.h>
22 #include <ksched.h>
23 #include <zephyr/internal/syscall_handler.h>
24 #include <zephyr/sys/dlist.h>
25 #include <zephyr/sys/util.h>
26 #include <zephyr/sys/__assert.h>
27 #include <stdbool.h>
28 
29 /* Single subsystem lock.  Locking per-event would be better on highly
30  * contended SMP systems, but the original locking scheme here is
31  * subtle (it relies on releasing/reacquiring the lock in areas for
32  * latency control and it's sometimes hard to see exactly what data is
33  * "inside" a given critical section).  Do the synchronization port
34  * later as an optimization.
35  */
36 static struct k_spinlock lock;
37 
38 enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
39 
40 static int signal_poller(struct k_poll_event *event, uint32_t state);
41 static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
42 
k_poll_event_init(struct k_poll_event * event,uint32_t type,int mode,void * obj)43 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
44 		       int mode, void *obj)
45 {
46 	__ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
47 		 "only NOTIFY_ONLY mode is supported\n");
48 	__ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
49 	__ASSERT(obj != NULL, "must provide an object\n");
50 
51 	event->poller = NULL;
52 	/* event->tag is left uninitialized: the user will set it if needed */
53 	event->type = type;
54 	event->state = K_POLL_STATE_NOT_READY;
55 	event->mode = mode;
56 	event->unused = 0U;
57 	event->obj = obj;
58 
59 	SYS_PORT_TRACING_FUNC(k_poll_api, event_init, event);
60 }
61 
62 /* must be called with interrupts locked */
is_condition_met(struct k_poll_event * event,uint32_t * state)63 static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
64 {
65 	switch (event->type) {
66 	case K_POLL_TYPE_SEM_AVAILABLE:
67 		if (k_sem_count_get(event->sem) > 0U) {
68 			*state = K_POLL_STATE_SEM_AVAILABLE;
69 			return true;
70 		}
71 		break;
72 	case K_POLL_TYPE_DATA_AVAILABLE:
73 		if (!k_queue_is_empty(event->queue)) {
74 			*state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
75 			return true;
76 		}
77 		break;
78 	case K_POLL_TYPE_SIGNAL:
79 		if (event->signal->signaled != 0U) {
80 			*state = K_POLL_STATE_SIGNALED;
81 			return true;
82 		}
83 		break;
84 	case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
85 		if (event->msgq->used_msgs > 0) {
86 			*state = K_POLL_STATE_MSGQ_DATA_AVAILABLE;
87 			return true;
88 		}
89 		break;
90 	case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
91 		*state = K_POLL_STATE_PIPE_DATA_AVAILABLE;
92 		return true;
93 	case K_POLL_TYPE_IGNORE:
94 		break;
95 	default:
96 		__ASSERT(false, "invalid event type (0x%x)\n", event->type);
97 		break;
98 	}
99 
100 	return false;
101 }
102 
poller_thread(struct z_poller * p)103 static struct k_thread *poller_thread(struct z_poller *p)
104 {
105 	return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
106 }
107 
add_event(sys_dlist_t * events,struct k_poll_event * event,struct z_poller * poller)108 static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
109 			     struct z_poller *poller)
110 {
111 	struct k_poll_event *pending;
112 
113 	pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
114 	if ((pending == NULL) ||
115 		(z_sched_prio_cmp(poller_thread(pending->poller),
116 							   poller_thread(poller)) > 0)) {
117 		sys_dlist_append(events, &event->_node);
118 		return;
119 	}
120 
121 	SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
122 		if (z_sched_prio_cmp(poller_thread(poller),
123 					poller_thread(pending->poller)) > 0) {
124 			sys_dlist_insert(&pending->_node, &event->_node);
125 			return;
126 		}
127 	}
128 
129 	sys_dlist_append(events, &event->_node);
130 }
131 
132 /* must be called with interrupts locked */
register_event(struct k_poll_event * event,struct z_poller * poller)133 static inline void register_event(struct k_poll_event *event,
134 				 struct z_poller *poller)
135 {
136 	switch (event->type) {
137 	case K_POLL_TYPE_SEM_AVAILABLE:
138 		__ASSERT(event->sem != NULL, "invalid semaphore\n");
139 		add_event(&event->sem->poll_events, event, poller);
140 		break;
141 	case K_POLL_TYPE_DATA_AVAILABLE:
142 		__ASSERT(event->queue != NULL, "invalid queue\n");
143 		add_event(&event->queue->poll_events, event, poller);
144 		break;
145 	case K_POLL_TYPE_SIGNAL:
146 		__ASSERT(event->signal != NULL, "invalid poll signal\n");
147 		add_event(&event->signal->poll_events, event, poller);
148 		break;
149 	case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
150 		__ASSERT(event->msgq != NULL, "invalid message queue\n");
151 		add_event(&event->msgq->poll_events, event, poller);
152 		break;
153 	case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
154 		__ASSERT(event->pipe != NULL, "invalid pipe\n");
155 		add_event(&event->pipe->poll_events, event, poller);
156 		break;
157 	case K_POLL_TYPE_IGNORE:
158 		/* nothing to do */
159 		break;
160 	default:
161 		__ASSERT(false, "invalid event type\n");
162 		break;
163 	}
164 
165 	event->poller = poller;
166 }
167 
168 /* must be called with interrupts locked */
clear_event_registration(struct k_poll_event * event)169 static inline void clear_event_registration(struct k_poll_event *event)
170 {
171 	bool remove_event = false;
172 
173 	event->poller = NULL;
174 
175 	switch (event->type) {
176 	case K_POLL_TYPE_SEM_AVAILABLE:
177 		__ASSERT(event->sem != NULL, "invalid semaphore\n");
178 		remove_event = true;
179 		break;
180 	case K_POLL_TYPE_DATA_AVAILABLE:
181 		__ASSERT(event->queue != NULL, "invalid queue\n");
182 		remove_event = true;
183 		break;
184 	case K_POLL_TYPE_SIGNAL:
185 		__ASSERT(event->signal != NULL, "invalid poll signal\n");
186 		remove_event = true;
187 		break;
188 	case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
189 		__ASSERT(event->msgq != NULL, "invalid message queue\n");
190 		remove_event = true;
191 		break;
192 	case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
193 		__ASSERT(event->pipe != NULL, "invalid pipe\n");
194 		remove_event = true;
195 		break;
196 	case K_POLL_TYPE_IGNORE:
197 		/* nothing to do */
198 		break;
199 	default:
200 		__ASSERT(false, "invalid event type\n");
201 		break;
202 	}
203 	if (remove_event && sys_dnode_is_linked(&event->_node)) {
204 		sys_dlist_remove(&event->_node);
205 	}
206 }
207 
208 /* must be called with interrupts locked */
clear_event_registrations(struct k_poll_event * events,int num_events,k_spinlock_key_t key)209 static inline void clear_event_registrations(struct k_poll_event *events,
210 					      int num_events,
211 					      k_spinlock_key_t key)
212 {
213 	while (num_events--) {
214 		clear_event_registration(&events[num_events]);
215 		k_spin_unlock(&lock, key);
216 		key = k_spin_lock(&lock);
217 	}
218 }
219 
set_event_ready(struct k_poll_event * event,uint32_t state)220 static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
221 {
222 	event->poller = NULL;
223 	event->state |= state;
224 }
225 
register_events(struct k_poll_event * events,int num_events,struct z_poller * poller,bool just_check)226 static inline int register_events(struct k_poll_event *events,
227 				  int num_events,
228 				  struct z_poller *poller,
229 				  bool just_check)
230 {
231 	int events_registered = 0;
232 
233 	for (int ii = 0; ii < num_events; ii++) {
234 		k_spinlock_key_t key;
235 		uint32_t state;
236 
237 		key = k_spin_lock(&lock);
238 		if (is_condition_met(&events[ii], &state)) {
239 			set_event_ready(&events[ii], state);
240 			poller->is_polling = false;
241 		} else if (!just_check && poller->is_polling) {
242 			register_event(&events[ii], poller);
243 			events_registered += 1;
244 		} else {
245 			/* Event is not one of those identified in is_condition_met()
246 			 * catching non-polling events, or is marked for just check,
247 			 * or not marked for polling. No action needed.
248 			 */
249 			;
250 		}
251 		k_spin_unlock(&lock, key);
252 	}
253 
254 	return events_registered;
255 }
256 
signal_poller(struct k_poll_event * event,uint32_t state)257 static int signal_poller(struct k_poll_event *event, uint32_t state)
258 {
259 	struct k_thread *thread = poller_thread(event->poller);
260 
261 	__ASSERT(thread != NULL, "poller should have a thread\n");
262 
263 	if (!z_is_thread_pending(thread)) {
264 		return 0;
265 	}
266 
267 	z_unpend_thread(thread);
268 	arch_thread_return_value_set(thread,
269 		state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
270 
271 	if (!z_is_thread_ready(thread)) {
272 		return 0;
273 	}
274 
275 	z_ready_thread(thread);
276 
277 	return 0;
278 }
279 
z_impl_k_poll(struct k_poll_event * events,int num_events,k_timeout_t timeout)280 int z_impl_k_poll(struct k_poll_event *events, int num_events,
281 		  k_timeout_t timeout)
282 {
283 	int events_registered;
284 	k_spinlock_key_t key;
285 	struct z_poller *poller = &_current->poller;
286 
287 	poller->is_polling = true;
288 	poller->mode = MODE_POLL;
289 
290 	__ASSERT(!arch_is_in_isr(), "");
291 	__ASSERT(events != NULL, "NULL events\n");
292 	__ASSERT(num_events >= 0, "<0 events\n");
293 
294 	SYS_PORT_TRACING_FUNC_ENTER(k_poll_api, poll, events);
295 
296 	events_registered = register_events(events, num_events, poller,
297 					    K_TIMEOUT_EQ(timeout, K_NO_WAIT));
298 
299 	key = k_spin_lock(&lock);
300 
301 	/*
302 	 * If we're not polling anymore, it means that at least one event
303 	 * condition is met, either when looping through the events here or
304 	 * because one of the events registered has had its state changed.
305 	 */
306 	if (!poller->is_polling) {
307 		clear_event_registrations(events, events_registered, key);
308 		k_spin_unlock(&lock, key);
309 
310 		SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, 0);
311 
312 		return 0;
313 	}
314 
315 	poller->is_polling = false;
316 
317 	if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
318 		k_spin_unlock(&lock, key);
319 
320 		SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, -EAGAIN);
321 
322 		return -EAGAIN;
323 	}
324 
325 	static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
326 
327 	int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
328 
329 	/*
330 	 * Clear all event registrations. If events happen while we're in this
331 	 * loop, and we already had one that triggered, that's OK: they will
332 	 * end up in the list of events that are ready; if we timed out, and
333 	 * events happen while we're in this loop, that is OK as well since
334 	 * we've already know the return code (-EAGAIN), and even if they are
335 	 * added to the list of events that occurred, the user has to check the
336 	 * return code first, which invalidates the whole list of event states.
337 	 */
338 	key = k_spin_lock(&lock);
339 	clear_event_registrations(events, events_registered, key);
340 	k_spin_unlock(&lock, key);
341 
342 	SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, swap_rc);
343 
344 	return swap_rc;
345 }
346 
347 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll(struct k_poll_event * events,int num_events,k_timeout_t timeout)348 static inline int z_vrfy_k_poll(struct k_poll_event *events,
349 				int num_events, k_timeout_t timeout)
350 {
351 	int ret;
352 	k_spinlock_key_t key;
353 	struct k_poll_event *events_copy = NULL;
354 	uint32_t bounds;
355 
356 	/* Validate the events buffer and make a copy of it in an
357 	 * allocated kernel-side buffer.
358 	 */
359 	if (K_SYSCALL_VERIFY(num_events >= 0)) {
360 		ret = -EINVAL;
361 		goto out;
362 	}
363 	if (K_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
364 						   sizeof(struct k_poll_event),
365 						   &bounds),
366 				 "num_events too large")) {
367 		ret = -EINVAL;
368 		goto out;
369 	}
370 	events_copy = z_thread_malloc(bounds);
371 	if (!events_copy) {
372 		ret = -ENOMEM;
373 		goto out;
374 	}
375 
376 	key = k_spin_lock(&lock);
377 	if (K_SYSCALL_MEMORY_WRITE(events, bounds)) {
378 		k_spin_unlock(&lock, key);
379 		goto oops_free;
380 	}
381 	(void)memcpy(events_copy, events, bounds);
382 	k_spin_unlock(&lock, key);
383 
384 	/* Validate what's inside events_copy */
385 	for (int i = 0; i < num_events; i++) {
386 		struct k_poll_event *e = &events_copy[i];
387 
388 		if (K_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
389 			ret = -EINVAL;
390 			goto out_free;
391 		}
392 
393 		switch (e->type) {
394 		case K_POLL_TYPE_IGNORE:
395 			break;
396 		case K_POLL_TYPE_SIGNAL:
397 			K_OOPS(K_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
398 			break;
399 		case K_POLL_TYPE_SEM_AVAILABLE:
400 			K_OOPS(K_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
401 			break;
402 		case K_POLL_TYPE_DATA_AVAILABLE:
403 			K_OOPS(K_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
404 			break;
405 		case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
406 			K_OOPS(K_SYSCALL_OBJ(e->msgq, K_OBJ_MSGQ));
407 			break;
408 		case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
409 			K_OOPS(K_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE));
410 			break;
411 		default:
412 			ret = -EINVAL;
413 			goto out_free;
414 		}
415 	}
416 
417 	ret = k_poll(events_copy, num_events, timeout);
418 	(void)memcpy((void *)events, events_copy, bounds);
419 out_free:
420 	k_free(events_copy);
421 out:
422 	return ret;
423 oops_free:
424 	k_free(events_copy);
425 	K_OOPS(1);
426 }
427 #include <zephyr/syscalls/k_poll_mrsh.c>
428 #endif /* CONFIG_USERSPACE */
429 
430 /* must be called with interrupts locked */
signal_poll_event(struct k_poll_event * event,uint32_t state)431 static int signal_poll_event(struct k_poll_event *event, uint32_t state)
432 {
433 	struct z_poller *poller = event->poller;
434 	int retcode = 0;
435 
436 	if (poller != NULL) {
437 		if (poller->mode == MODE_POLL) {
438 			retcode = signal_poller(event, state);
439 		} else if (poller->mode == MODE_TRIGGERED) {
440 			retcode = signal_triggered_work(event, state);
441 		} else {
442 			/* Poller is not poll or triggered mode. No action needed.*/
443 			;
444 		}
445 
446 		poller->is_polling = false;
447 
448 		if (retcode < 0) {
449 			return retcode;
450 		}
451 	}
452 
453 	set_event_ready(event, state);
454 	return retcode;
455 }
456 
z_handle_obj_poll_events(sys_dlist_t * events,uint32_t state)457 bool z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
458 {
459 	struct k_poll_event *poll_event;
460 	k_spinlock_key_t key = k_spin_lock(&lock);
461 
462 	poll_event = (struct k_poll_event *)sys_dlist_get(events);
463 	if (poll_event != NULL) {
464 		(void) signal_poll_event(poll_event, state);
465 	}
466 
467 	k_spin_unlock(&lock, key);
468 
469 	return (poll_event != NULL);
470 }
471 
z_impl_k_poll_signal_init(struct k_poll_signal * sig)472 void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
473 {
474 	sys_dlist_init(&sig->poll_events);
475 	sig->signaled = 0U;
476 	/* signal->result is left uninitialized */
477 	k_object_init(sig);
478 
479 	SYS_PORT_TRACING_FUNC(k_poll_api, signal_init, sig);
480 }
481 
482 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_init(struct k_poll_signal * sig)483 static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
484 {
485 	K_OOPS(K_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
486 	z_impl_k_poll_signal_init(sig);
487 }
488 #include <zephyr/syscalls/k_poll_signal_init_mrsh.c>
489 #endif /* CONFIG_USERSPACE */
490 
z_impl_k_poll_signal_reset(struct k_poll_signal * sig)491 void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
492 {
493 	sig->signaled = 0U;
494 
495 	SYS_PORT_TRACING_FUNC(k_poll_api, signal_reset, sig);
496 }
497 
z_impl_k_poll_signal_check(struct k_poll_signal * sig,unsigned int * signaled,int * result)498 void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
499 			       unsigned int *signaled, int *result)
500 {
501 	*signaled = sig->signaled;
502 	*result = sig->result;
503 
504 	SYS_PORT_TRACING_FUNC(k_poll_api, signal_check, sig);
505 }
506 
507 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_check(struct k_poll_signal * sig,unsigned int * signaled,int * result)508 void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
509 			       unsigned int *signaled, int *result)
510 {
511 	K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
512 	K_OOPS(K_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
513 	K_OOPS(K_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
514 	z_impl_k_poll_signal_check(sig, signaled, result);
515 }
516 #include <zephyr/syscalls/k_poll_signal_check_mrsh.c>
517 #endif /* CONFIG_USERSPACE */
518 
z_impl_k_poll_signal_raise(struct k_poll_signal * sig,int result)519 int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
520 {
521 	k_spinlock_key_t key = k_spin_lock(&lock);
522 	struct k_poll_event *poll_event;
523 
524 	sig->result = result;
525 	sig->signaled = 1U;
526 
527 	poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
528 	if (poll_event == NULL) {
529 		k_spin_unlock(&lock, key);
530 
531 		SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, 0);
532 
533 		return 0;
534 	}
535 
536 	int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
537 
538 	SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, rc);
539 
540 	z_reschedule(&lock, key);
541 	return rc;
542 }
543 
544 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_raise(struct k_poll_signal * sig,int result)545 static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
546 					     int result)
547 {
548 	K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
549 	return z_impl_k_poll_signal_raise(sig, result);
550 }
551 #include <zephyr/syscalls/k_poll_signal_raise_mrsh.c>
552 
z_vrfy_k_poll_signal_reset(struct k_poll_signal * sig)553 static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
554 {
555 	K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
556 	z_impl_k_poll_signal_reset(sig);
557 }
558 #include <zephyr/syscalls/k_poll_signal_reset_mrsh.c>
559 
560 #endif /* CONFIG_USERSPACE */
561 
triggered_work_handler(struct k_work * work)562 static void triggered_work_handler(struct k_work *work)
563 {
564 	struct k_work_poll *twork =
565 			CONTAINER_OF(work, struct k_work_poll, work);
566 
567 	/*
568 	 * If callback is not set, the k_work_poll_submit_to_queue()
569 	 * already cleared event registrations.
570 	 */
571 	if (twork->poller.mode != MODE_NONE) {
572 		k_spinlock_key_t key;
573 
574 		key = k_spin_lock(&lock);
575 		clear_event_registrations(twork->events,
576 					  twork->num_events, key);
577 		k_spin_unlock(&lock, key);
578 	}
579 
580 	/* Drop work ownership and execute real handler. */
581 	twork->workq = NULL;
582 	twork->real_handler(work);
583 }
584 
triggered_work_expiration_handler(struct _timeout * timeout)585 static void triggered_work_expiration_handler(struct _timeout *timeout)
586 {
587 	struct k_work_poll *twork =
588 		CONTAINER_OF(timeout, struct k_work_poll, timeout);
589 
590 	twork->poller.is_polling = false;
591 	twork->poll_result = -EAGAIN;
592 	k_work_submit_to_queue(twork->workq, &twork->work);
593 }
594 
595 extern int z_work_submit_to_queue(struct k_work_q *queue,
596 			 struct k_work *work);
597 
signal_triggered_work(struct k_poll_event * event,uint32_t status)598 static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
599 {
600 	struct z_poller *poller = event->poller;
601 	struct k_work_poll *twork =
602 		CONTAINER_OF(poller, struct k_work_poll, poller);
603 
604 	if (poller->is_polling && twork->workq != NULL) {
605 		struct k_work_q *work_q = twork->workq;
606 
607 		z_abort_timeout(&twork->timeout);
608 		twork->poll_result = 0;
609 		z_work_submit_to_queue(work_q, &twork->work);
610 	}
611 
612 	return 0;
613 }
614 
triggered_work_cancel(struct k_work_poll * work,k_spinlock_key_t key)615 static int triggered_work_cancel(struct k_work_poll *work,
616 				 k_spinlock_key_t key)
617 {
618 	/* Check if the work waits for event. */
619 	if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
620 		/* Remove timeout associated with the work. */
621 		z_abort_timeout(&work->timeout);
622 
623 		/*
624 		 * Prevent work execution if event arrives while we will be
625 		 * clearing registrations.
626 		 */
627 		work->poller.mode = MODE_NONE;
628 
629 		/* Clear registrations and work ownership. */
630 		clear_event_registrations(work->events, work->num_events, key);
631 		work->workq = NULL;
632 		return 0;
633 	}
634 
635 	/*
636 	 * If we reached here, the work is either being registered in
637 	 * the k_work_poll_submit_to_queue(), executed or is pending.
638 	 * Only in the last case we have a chance to cancel it, but
639 	 * unfortunately there is no public API performing this task.
640 	 */
641 
642 	return -EINVAL;
643 }
644 
k_work_poll_init(struct k_work_poll * work,k_work_handler_t handler)645 void k_work_poll_init(struct k_work_poll *work,
646 		      k_work_handler_t handler)
647 {
648 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_poll, init, work);
649 
650 	*work = (struct k_work_poll) {};
651 	k_work_init(&work->work, triggered_work_handler);
652 	work->real_handler = handler;
653 	z_init_timeout(&work->timeout);
654 
655 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_poll, init, work);
656 }
657 
k_work_poll_submit_to_queue(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)658 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
659 				struct k_work_poll *work,
660 				struct k_poll_event *events,
661 				int num_events,
662 				k_timeout_t timeout)
663 {
664 	int events_registered;
665 	k_spinlock_key_t key;
666 
667 	__ASSERT(work_q != NULL, "NULL work_q\n");
668 	__ASSERT(work != NULL, "NULL work\n");
669 	__ASSERT(events != NULL, "NULL events\n");
670 	__ASSERT(num_events >= 0, "<0 events\n");
671 
672 	SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit_to_queue, work_q, work, timeout);
673 
674 	/* Take ownership of the work if it is possible. */
675 	key = k_spin_lock(&lock);
676 	if (work->workq != NULL) {
677 		if (work->workq == work_q) {
678 			int retval;
679 
680 			retval = triggered_work_cancel(work, key);
681 			if (retval < 0) {
682 				k_spin_unlock(&lock, key);
683 
684 				SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
685 					work, timeout, retval);
686 
687 				return retval;
688 			}
689 		} else {
690 			k_spin_unlock(&lock, key);
691 
692 			SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
693 				work, timeout, -EADDRINUSE);
694 
695 			return -EADDRINUSE;
696 		}
697 	}
698 
699 
700 	work->poller.is_polling = true;
701 	work->workq = work_q;
702 	work->poller.mode = MODE_NONE;
703 	k_spin_unlock(&lock, key);
704 
705 	/* Save list of events. */
706 	work->events = events;
707 	work->num_events = num_events;
708 
709 	/* Clear result */
710 	work->poll_result = -EINPROGRESS;
711 
712 	/* Register events */
713 	events_registered = register_events(events, num_events,
714 					    &work->poller, false);
715 
716 	key = k_spin_lock(&lock);
717 	if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
718 		/*
719 		 * Poller is still polling.
720 		 * No event is ready and all are watched.
721 		 */
722 		__ASSERT(num_events == events_registered,
723 			 "Some events were not registered!\n");
724 
725 		/* Setup timeout if such action is requested */
726 		if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
727 			z_add_timeout(&work->timeout,
728 				      triggered_work_expiration_handler,
729 				      timeout);
730 		}
731 
732 		/* From now, any event will result in submitted work. */
733 		work->poller.mode = MODE_TRIGGERED;
734 		k_spin_unlock(&lock, key);
735 
736 		SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
737 
738 		return 0;
739 	}
740 
741 	/*
742 	 * The K_NO_WAIT timeout was specified or at least one event
743 	 * was ready at registration time or changed state since
744 	 * registration. Hopefully, the poller mode was not set, so
745 	 * work was not submitted to workqueue.
746 	 */
747 
748 	/*
749 	 * If poller is still polling, no watched event occurred. This means
750 	 * we reached here due to K_NO_WAIT timeout "expiration".
751 	 */
752 	if (work->poller.is_polling) {
753 		work->poller.is_polling = false;
754 		work->poll_result = -EAGAIN;
755 	} else {
756 		work->poll_result = 0;
757 	}
758 
759 	/* Clear registrations. */
760 	clear_event_registrations(events, events_registered, key);
761 	k_spin_unlock(&lock, key);
762 
763 	/* Submit work. */
764 	k_work_submit_to_queue(work_q, &work->work);
765 
766 	SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
767 
768 	return 0;
769 }
770 
k_work_poll_submit(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)771 int k_work_poll_submit(struct k_work_poll *work,
772 				     struct k_poll_event *events,
773 				     int num_events,
774 				     k_timeout_t timeout)
775 {
776 	SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit, work, timeout);
777 
778 	int ret = k_work_poll_submit_to_queue(&k_sys_work_q, work,
779 								events, num_events, timeout);
780 
781 	SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit, work, timeout, ret);
782 
783 	return ret;
784 }
785 
k_work_poll_cancel(struct k_work_poll * work)786 int k_work_poll_cancel(struct k_work_poll *work)
787 {
788 	k_spinlock_key_t key;
789 	int retval;
790 
791 	SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, cancel, work);
792 
793 	/* Check if the work was submitted. */
794 	if (work == NULL || work->workq == NULL) {
795 		SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, -EINVAL);
796 
797 		return -EINVAL;
798 	}
799 
800 	key = k_spin_lock(&lock);
801 	retval = triggered_work_cancel(work, key);
802 	k_spin_unlock(&lock, key);
803 
804 	SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, retval);
805 
806 	return retval;
807 }
808