1 /*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /**
9 * @file
10 *
11 * @brief Kernel asynchronous event polling interface.
12 *
13 * This polling mechanism allows waiting on multiple events concurrently,
14 * either events triggered directly, or from kernel objects or other kernel
15 * constructs.
16 */
17
18 #include <zephyr/kernel.h>
19 #include <zephyr/kernel_structs.h>
20 #include <kernel_internal.h>
21 #include <wait_q.h>
22 #include <ksched.h>
23 #include <zephyr/internal/syscall_handler.h>
24 #include <zephyr/sys/dlist.h>
25 #include <zephyr/sys/util.h>
26 #include <zephyr/sys/__assert.h>
27 #include <stdbool.h>
28
29 /* Single subsystem lock. Locking per-event would be better on highly
30 * contended SMP systems, but the original locking scheme here is
31 * subtle (it relies on releasing/reacquiring the lock in areas for
32 * latency control and it's sometimes hard to see exactly what data is
33 * "inside" a given critical section). Do the synchronization port
34 * later as an optimization.
35 */
36 static struct k_spinlock lock;
37
38 enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
39
40 static int signal_poller(struct k_poll_event *event, uint32_t state);
41 static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
42
k_poll_event_init(struct k_poll_event * event,uint32_t type,int mode,void * obj)43 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
44 int mode, void *obj)
45 {
46 __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
47 "only NOTIFY_ONLY mode is supported\n");
48 __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
49 __ASSERT(obj != NULL, "must provide an object\n");
50
51 event->poller = NULL;
52 /* event->tag is left uninitialized: the user will set it if needed */
53 event->type = type;
54 event->state = K_POLL_STATE_NOT_READY;
55 event->mode = mode;
56 event->unused = 0U;
57 event->obj = obj;
58
59 SYS_PORT_TRACING_FUNC(k_poll_api, event_init, event);
60 }
61
62 /* must be called with interrupts locked */
is_condition_met(struct k_poll_event * event,uint32_t * state)63 static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
64 {
65 switch (event->type) {
66 case K_POLL_TYPE_SEM_AVAILABLE:
67 if (k_sem_count_get(event->sem) > 0U) {
68 *state = K_POLL_STATE_SEM_AVAILABLE;
69 return true;
70 }
71 break;
72 case K_POLL_TYPE_DATA_AVAILABLE:
73 if (!k_queue_is_empty(event->queue)) {
74 *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
75 return true;
76 }
77 break;
78 case K_POLL_TYPE_SIGNAL:
79 if (event->signal->signaled != 0U) {
80 *state = K_POLL_STATE_SIGNALED;
81 return true;
82 }
83 break;
84 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
85 if (event->msgq->used_msgs > 0) {
86 *state = K_POLL_STATE_MSGQ_DATA_AVAILABLE;
87 return true;
88 }
89 break;
90 case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
91 if (!ring_buf_is_empty(&event->pipe->buf)) {
92 *state = K_POLL_STATE_PIPE_DATA_AVAILABLE;
93 return true;
94 }
95 break;
96 case K_POLL_TYPE_IGNORE:
97 break;
98 default:
99 __ASSERT(false, "invalid event type (0x%x)\n", event->type);
100 break;
101 }
102
103 return false;
104 }
105
poller_thread(struct z_poller * p)106 static struct k_thread *poller_thread(struct z_poller *p)
107 {
108 return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
109 }
110
add_event(sys_dlist_t * events,struct k_poll_event * event,struct z_poller * poller)111 static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
112 struct z_poller *poller)
113 {
114 struct k_poll_event *pending;
115
116 pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
117 if ((pending == NULL) ||
118 (z_sched_prio_cmp(poller_thread(pending->poller),
119 poller_thread(poller)) > 0)) {
120 sys_dlist_append(events, &event->_node);
121 return;
122 }
123
124 SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
125 if (z_sched_prio_cmp(poller_thread(poller),
126 poller_thread(pending->poller)) > 0) {
127 sys_dlist_insert(&pending->_node, &event->_node);
128 return;
129 }
130 }
131
132 sys_dlist_append(events, &event->_node);
133 }
134
135 /* must be called with interrupts locked */
register_event(struct k_poll_event * event,struct z_poller * poller)136 static inline void register_event(struct k_poll_event *event,
137 struct z_poller *poller)
138 {
139 switch (event->type) {
140 case K_POLL_TYPE_SEM_AVAILABLE:
141 __ASSERT(event->sem != NULL, "invalid semaphore\n");
142 add_event(&event->sem->poll_events, event, poller);
143 break;
144 case K_POLL_TYPE_DATA_AVAILABLE:
145 __ASSERT(event->queue != NULL, "invalid queue\n");
146 add_event(&event->queue->poll_events, event, poller);
147 break;
148 case K_POLL_TYPE_SIGNAL:
149 __ASSERT(event->signal != NULL, "invalid poll signal\n");
150 add_event(&event->signal->poll_events, event, poller);
151 break;
152 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
153 __ASSERT(event->msgq != NULL, "invalid message queue\n");
154 add_event(&event->msgq->poll_events, event, poller);
155 break;
156 case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
157 __ASSERT(event->pipe != NULL, "invalid pipe\n");
158 add_event(&event->pipe->poll_events, event, poller);
159 break;
160 case K_POLL_TYPE_IGNORE:
161 /* nothing to do */
162 break;
163 default:
164 __ASSERT(false, "invalid event type\n");
165 break;
166 }
167
168 event->poller = poller;
169 }
170
171 /* must be called with interrupts locked */
clear_event_registration(struct k_poll_event * event)172 static inline void clear_event_registration(struct k_poll_event *event)
173 {
174 bool remove_event = false;
175
176 event->poller = NULL;
177
178 switch (event->type) {
179 case K_POLL_TYPE_SEM_AVAILABLE:
180 __ASSERT(event->sem != NULL, "invalid semaphore\n");
181 remove_event = true;
182 break;
183 case K_POLL_TYPE_DATA_AVAILABLE:
184 __ASSERT(event->queue != NULL, "invalid queue\n");
185 remove_event = true;
186 break;
187 case K_POLL_TYPE_SIGNAL:
188 __ASSERT(event->signal != NULL, "invalid poll signal\n");
189 remove_event = true;
190 break;
191 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
192 __ASSERT(event->msgq != NULL, "invalid message queue\n");
193 remove_event = true;
194 break;
195 case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
196 __ASSERT(event->pipe != NULL, "invalid pipe\n");
197 remove_event = true;
198 break;
199 case K_POLL_TYPE_IGNORE:
200 /* nothing to do */
201 break;
202 default:
203 __ASSERT(false, "invalid event type\n");
204 break;
205 }
206 if (remove_event && sys_dnode_is_linked(&event->_node)) {
207 sys_dlist_remove(&event->_node);
208 }
209 }
210
211 /* must be called with interrupts locked */
clear_event_registrations(struct k_poll_event * events,int num_events,k_spinlock_key_t key)212 static inline void clear_event_registrations(struct k_poll_event *events,
213 int num_events,
214 k_spinlock_key_t key)
215 {
216 while (num_events--) {
217 clear_event_registration(&events[num_events]);
218 k_spin_unlock(&lock, key);
219 key = k_spin_lock(&lock);
220 }
221 }
222
set_event_ready(struct k_poll_event * event,uint32_t state)223 static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
224 {
225 event->poller = NULL;
226 event->state |= state;
227 }
228
register_events(struct k_poll_event * events,int num_events,struct z_poller * poller,bool just_check)229 static inline int register_events(struct k_poll_event *events,
230 int num_events,
231 struct z_poller *poller,
232 bool just_check)
233 {
234 int events_registered = 0;
235
236 for (int ii = 0; ii < num_events; ii++) {
237 k_spinlock_key_t key;
238 uint32_t state;
239
240 key = k_spin_lock(&lock);
241 if (is_condition_met(&events[ii], &state)) {
242 set_event_ready(&events[ii], state);
243 poller->is_polling = false;
244 } else if (!just_check && poller->is_polling) {
245 register_event(&events[ii], poller);
246 events_registered += 1;
247 } else {
248 /* Event is not one of those identified in is_condition_met()
249 * catching non-polling events, or is marked for just check,
250 * or not marked for polling. No action needed.
251 */
252 ;
253 }
254 k_spin_unlock(&lock, key);
255 }
256
257 return events_registered;
258 }
259
signal_poller(struct k_poll_event * event,uint32_t state)260 static int signal_poller(struct k_poll_event *event, uint32_t state)
261 {
262 struct k_thread *thread = poller_thread(event->poller);
263
264 __ASSERT(thread != NULL, "poller should have a thread\n");
265
266 if (!z_is_thread_pending(thread)) {
267 return 0;
268 }
269
270 z_unpend_thread(thread);
271 arch_thread_return_value_set(thread,
272 state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
273
274 if (!z_is_thread_ready(thread)) {
275 return 0;
276 }
277
278 z_ready_thread(thread);
279
280 return 0;
281 }
282
z_impl_k_poll(struct k_poll_event * events,int num_events,k_timeout_t timeout)283 int z_impl_k_poll(struct k_poll_event *events, int num_events,
284 k_timeout_t timeout)
285 {
286 int events_registered;
287 k_spinlock_key_t key;
288 struct z_poller *poller = &_current->poller;
289
290 poller->is_polling = true;
291 poller->mode = MODE_POLL;
292
293 __ASSERT(!arch_is_in_isr(), "");
294 __ASSERT(events != NULL, "NULL events\n");
295 __ASSERT(num_events >= 0, "<0 events\n");
296
297 SYS_PORT_TRACING_FUNC_ENTER(k_poll_api, poll, events);
298
299 events_registered = register_events(events, num_events, poller,
300 K_TIMEOUT_EQ(timeout, K_NO_WAIT));
301
302 key = k_spin_lock(&lock);
303
304 /*
305 * If we're not polling anymore, it means that at least one event
306 * condition is met, either when looping through the events here or
307 * because one of the events registered has had its state changed.
308 */
309 if (!poller->is_polling) {
310 clear_event_registrations(events, events_registered, key);
311 k_spin_unlock(&lock, key);
312
313 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, 0);
314
315 return 0;
316 }
317
318 poller->is_polling = false;
319
320 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
321 k_spin_unlock(&lock, key);
322
323 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, -EAGAIN);
324
325 return -EAGAIN;
326 }
327
328 static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
329
330 int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
331
332 /*
333 * Clear all event registrations. If events happen while we're in this
334 * loop, and we already had one that triggered, that's OK: they will
335 * end up in the list of events that are ready; if we timed out, and
336 * events happen while we're in this loop, that is OK as well since
337 * we've already know the return code (-EAGAIN), and even if they are
338 * added to the list of events that occurred, the user has to check the
339 * return code first, which invalidates the whole list of event states.
340 */
341 key = k_spin_lock(&lock);
342 clear_event_registrations(events, events_registered, key);
343 k_spin_unlock(&lock, key);
344
345 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, swap_rc);
346
347 return swap_rc;
348 }
349
350 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll(struct k_poll_event * events,int num_events,k_timeout_t timeout)351 static inline int z_vrfy_k_poll(struct k_poll_event *events,
352 int num_events, k_timeout_t timeout)
353 {
354 int ret;
355 k_spinlock_key_t key;
356 struct k_poll_event *events_copy = NULL;
357 uint32_t bounds;
358
359 /* Validate the events buffer and make a copy of it in an
360 * allocated kernel-side buffer.
361 */
362 if (K_SYSCALL_VERIFY(num_events >= 0)) {
363 ret = -EINVAL;
364 goto out;
365 }
366 if (K_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
367 sizeof(struct k_poll_event),
368 &bounds),
369 "num_events too large")) {
370 ret = -EINVAL;
371 goto out;
372 }
373 events_copy = z_thread_malloc(bounds);
374 if (!events_copy) {
375 ret = -ENOMEM;
376 goto out;
377 }
378
379 key = k_spin_lock(&lock);
380 if (K_SYSCALL_MEMORY_WRITE(events, bounds)) {
381 k_spin_unlock(&lock, key);
382 goto oops_free;
383 }
384 (void)memcpy(events_copy, events, bounds);
385 k_spin_unlock(&lock, key);
386
387 /* Validate what's inside events_copy */
388 for (int i = 0; i < num_events; i++) {
389 struct k_poll_event *e = &events_copy[i];
390
391 if (K_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
392 ret = -EINVAL;
393 goto out_free;
394 }
395
396 switch (e->type) {
397 case K_POLL_TYPE_IGNORE:
398 break;
399 case K_POLL_TYPE_SIGNAL:
400 K_OOPS(K_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
401 break;
402 case K_POLL_TYPE_SEM_AVAILABLE:
403 K_OOPS(K_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
404 break;
405 case K_POLL_TYPE_DATA_AVAILABLE:
406 K_OOPS(K_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
407 break;
408 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
409 K_OOPS(K_SYSCALL_OBJ(e->msgq, K_OBJ_MSGQ));
410 break;
411 case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
412 K_OOPS(K_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE));
413 break;
414 default:
415 ret = -EINVAL;
416 goto out_free;
417 }
418 }
419
420 ret = k_poll(events_copy, num_events, timeout);
421 (void)memcpy((void *)events, events_copy, bounds);
422 out_free:
423 k_free(events_copy);
424 out:
425 return ret;
426 oops_free:
427 k_free(events_copy);
428 K_OOPS(1);
429 }
430 #include <zephyr/syscalls/k_poll_mrsh.c>
431 #endif /* CONFIG_USERSPACE */
432
433 /* must be called with interrupts locked */
signal_poll_event(struct k_poll_event * event,uint32_t state)434 static int signal_poll_event(struct k_poll_event *event, uint32_t state)
435 {
436 struct z_poller *poller = event->poller;
437 int retcode = 0;
438
439 if (poller != NULL) {
440 if (poller->mode == MODE_POLL) {
441 retcode = signal_poller(event, state);
442 } else if (poller->mode == MODE_TRIGGERED) {
443 retcode = signal_triggered_work(event, state);
444 } else {
445 /* Poller is not poll or triggered mode. No action needed.*/
446 ;
447 }
448
449 poller->is_polling = false;
450
451 if (retcode < 0) {
452 return retcode;
453 }
454 }
455
456 set_event_ready(event, state);
457 return retcode;
458 }
459
z_handle_obj_poll_events(sys_dlist_t * events,uint32_t state)460 bool z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
461 {
462 struct k_poll_event *poll_event;
463 k_spinlock_key_t key = k_spin_lock(&lock);
464
465 poll_event = (struct k_poll_event *)sys_dlist_get(events);
466 if (poll_event != NULL) {
467 (void) signal_poll_event(poll_event, state);
468 }
469
470 k_spin_unlock(&lock, key);
471
472 return (poll_event != NULL);
473 }
474
z_impl_k_poll_signal_init(struct k_poll_signal * sig)475 void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
476 {
477 sys_dlist_init(&sig->poll_events);
478 sig->signaled = 0U;
479 /* signal->result is left uninitialized */
480 k_object_init(sig);
481
482 SYS_PORT_TRACING_FUNC(k_poll_api, signal_init, sig);
483 }
484
485 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_init(struct k_poll_signal * sig)486 static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
487 {
488 K_OOPS(K_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
489 z_impl_k_poll_signal_init(sig);
490 }
491 #include <zephyr/syscalls/k_poll_signal_init_mrsh.c>
492 #endif /* CONFIG_USERSPACE */
493
z_impl_k_poll_signal_reset(struct k_poll_signal * sig)494 void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
495 {
496 sig->signaled = 0U;
497
498 SYS_PORT_TRACING_FUNC(k_poll_api, signal_reset, sig);
499 }
500
z_impl_k_poll_signal_check(struct k_poll_signal * sig,unsigned int * signaled,int * result)501 void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
502 unsigned int *signaled, int *result)
503 {
504 *signaled = sig->signaled;
505 *result = sig->result;
506
507 SYS_PORT_TRACING_FUNC(k_poll_api, signal_check, sig);
508 }
509
510 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_check(struct k_poll_signal * sig,unsigned int * signaled,int * result)511 void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
512 unsigned int *signaled, int *result)
513 {
514 K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
515 K_OOPS(K_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
516 K_OOPS(K_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
517 z_impl_k_poll_signal_check(sig, signaled, result);
518 }
519 #include <zephyr/syscalls/k_poll_signal_check_mrsh.c>
520 #endif /* CONFIG_USERSPACE */
521
z_impl_k_poll_signal_raise(struct k_poll_signal * sig,int result)522 int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
523 {
524 k_spinlock_key_t key = k_spin_lock(&lock);
525 struct k_poll_event *poll_event;
526
527 sig->result = result;
528 sig->signaled = 1U;
529
530 poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
531 if (poll_event == NULL) {
532 k_spin_unlock(&lock, key);
533
534 SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, 0);
535
536 return 0;
537 }
538
539 int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
540
541 SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, rc);
542
543 z_reschedule(&lock, key);
544 return rc;
545 }
546
547 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_raise(struct k_poll_signal * sig,int result)548 static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
549 int result)
550 {
551 K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
552 return z_impl_k_poll_signal_raise(sig, result);
553 }
554 #include <zephyr/syscalls/k_poll_signal_raise_mrsh.c>
555
z_vrfy_k_poll_signal_reset(struct k_poll_signal * sig)556 static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
557 {
558 K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
559 z_impl_k_poll_signal_reset(sig);
560 }
561 #include <zephyr/syscalls/k_poll_signal_reset_mrsh.c>
562
563 #endif /* CONFIG_USERSPACE */
564
triggered_work_handler(struct k_work * work)565 static void triggered_work_handler(struct k_work *work)
566 {
567 struct k_work_poll *twork =
568 CONTAINER_OF(work, struct k_work_poll, work);
569
570 /*
571 * If callback is not set, the k_work_poll_submit_to_queue()
572 * already cleared event registrations.
573 */
574 if (twork->poller.mode != MODE_NONE) {
575 k_spinlock_key_t key;
576
577 key = k_spin_lock(&lock);
578 clear_event_registrations(twork->events,
579 twork->num_events, key);
580 k_spin_unlock(&lock, key);
581 }
582
583 /* Drop work ownership and execute real handler. */
584 twork->workq = NULL;
585 twork->real_handler(work);
586 }
587
triggered_work_expiration_handler(struct _timeout * timeout)588 static void triggered_work_expiration_handler(struct _timeout *timeout)
589 {
590 struct k_work_poll *twork =
591 CONTAINER_OF(timeout, struct k_work_poll, timeout);
592
593 twork->poller.is_polling = false;
594 twork->poll_result = -EAGAIN;
595 k_work_submit_to_queue(twork->workq, &twork->work);
596 }
597
598 extern int z_work_submit_to_queue(struct k_work_q *queue,
599 struct k_work *work);
600
signal_triggered_work(struct k_poll_event * event,uint32_t status)601 static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
602 {
603 struct z_poller *poller = event->poller;
604 struct k_work_poll *twork =
605 CONTAINER_OF(poller, struct k_work_poll, poller);
606
607 if (poller->is_polling && twork->workq != NULL) {
608 struct k_work_q *work_q = twork->workq;
609
610 z_abort_timeout(&twork->timeout);
611 twork->poll_result = 0;
612 z_work_submit_to_queue(work_q, &twork->work);
613 }
614
615 return 0;
616 }
617
triggered_work_cancel(struct k_work_poll * work,k_spinlock_key_t key)618 static int triggered_work_cancel(struct k_work_poll *work,
619 k_spinlock_key_t key)
620 {
621 /* Check if the work waits for event. */
622 if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
623 /* Remove timeout associated with the work. */
624 z_abort_timeout(&work->timeout);
625
626 /*
627 * Prevent work execution if event arrives while we will be
628 * clearing registrations.
629 */
630 work->poller.mode = MODE_NONE;
631
632 /* Clear registrations and work ownership. */
633 clear_event_registrations(work->events, work->num_events, key);
634 work->workq = NULL;
635 return 0;
636 }
637
638 /*
639 * If we reached here, the work is either being registered in
640 * the k_work_poll_submit_to_queue(), executed or is pending.
641 * Only in the last case we have a chance to cancel it, but
642 * unfortunately there is no public API performing this task.
643 */
644
645 return -EINVAL;
646 }
647
k_work_poll_init(struct k_work_poll * work,k_work_handler_t handler)648 void k_work_poll_init(struct k_work_poll *work,
649 k_work_handler_t handler)
650 {
651 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_poll, init, work);
652
653 *work = (struct k_work_poll) {};
654 k_work_init(&work->work, triggered_work_handler);
655 work->real_handler = handler;
656 z_init_timeout(&work->timeout);
657
658 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_poll, init, work);
659 }
660
k_work_poll_submit_to_queue(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)661 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
662 struct k_work_poll *work,
663 struct k_poll_event *events,
664 int num_events,
665 k_timeout_t timeout)
666 {
667 int events_registered;
668 k_spinlock_key_t key;
669
670 __ASSERT(work_q != NULL, "NULL work_q\n");
671 __ASSERT(work != NULL, "NULL work\n");
672 __ASSERT(events != NULL, "NULL events\n");
673 __ASSERT(num_events >= 0, "<0 events\n");
674
675 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit_to_queue, work_q, work, timeout);
676
677 /* Take ownership of the work if it is possible. */
678 key = k_spin_lock(&lock);
679 if (work->workq != NULL) {
680 if (work->workq == work_q) {
681 int retval;
682
683 retval = triggered_work_cancel(work, key);
684 if (retval < 0) {
685 k_spin_unlock(&lock, key);
686
687 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
688 work, timeout, retval);
689
690 return retval;
691 }
692 } else {
693 k_spin_unlock(&lock, key);
694
695 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
696 work, timeout, -EADDRINUSE);
697
698 return -EADDRINUSE;
699 }
700 }
701
702
703 work->poller.is_polling = true;
704 work->workq = work_q;
705 work->poller.mode = MODE_NONE;
706 k_spin_unlock(&lock, key);
707
708 /* Save list of events. */
709 work->events = events;
710 work->num_events = num_events;
711
712 /* Clear result */
713 work->poll_result = -EINPROGRESS;
714
715 /* Register events */
716 events_registered = register_events(events, num_events,
717 &work->poller, false);
718
719 key = k_spin_lock(&lock);
720 if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
721 /*
722 * Poller is still polling.
723 * No event is ready and all are watched.
724 */
725 __ASSERT(num_events == events_registered,
726 "Some events were not registered!\n");
727
728 /* Setup timeout if such action is requested */
729 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
730 z_add_timeout(&work->timeout,
731 triggered_work_expiration_handler,
732 timeout);
733 }
734
735 /* From now, any event will result in submitted work. */
736 work->poller.mode = MODE_TRIGGERED;
737 k_spin_unlock(&lock, key);
738
739 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
740
741 return 0;
742 }
743
744 /*
745 * The K_NO_WAIT timeout was specified or at least one event
746 * was ready at registration time or changed state since
747 * registration. Hopefully, the poller mode was not set, so
748 * work was not submitted to workqueue.
749 */
750
751 /*
752 * If poller is still polling, no watched event occurred. This means
753 * we reached here due to K_NO_WAIT timeout "expiration".
754 */
755 if (work->poller.is_polling) {
756 work->poller.is_polling = false;
757 work->poll_result = -EAGAIN;
758 } else {
759 work->poll_result = 0;
760 }
761
762 /* Clear registrations. */
763 clear_event_registrations(events, events_registered, key);
764 k_spin_unlock(&lock, key);
765
766 /* Submit work. */
767 k_work_submit_to_queue(work_q, &work->work);
768
769 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
770
771 return 0;
772 }
773
k_work_poll_submit(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)774 int k_work_poll_submit(struct k_work_poll *work,
775 struct k_poll_event *events,
776 int num_events,
777 k_timeout_t timeout)
778 {
779 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit, work, timeout);
780
781 int ret = k_work_poll_submit_to_queue(&k_sys_work_q, work,
782 events, num_events, timeout);
783
784 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit, work, timeout, ret);
785
786 return ret;
787 }
788
k_work_poll_cancel(struct k_work_poll * work)789 int k_work_poll_cancel(struct k_work_poll *work)
790 {
791 k_spinlock_key_t key;
792 int retval;
793
794 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, cancel, work);
795
796 /* Check if the work was submitted. */
797 if (work == NULL || work->workq == NULL) {
798 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, -EINVAL);
799
800 return -EINVAL;
801 }
802
803 key = k_spin_lock(&lock);
804 retval = triggered_work_cancel(work, key);
805 k_spin_unlock(&lock, key);
806
807 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, retval);
808
809 return retval;
810 }
811