1 /*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /**
9 * @file
10 *
11 * @brief Kernel asynchronous event polling interface.
12 *
13 * This polling mechanism allows waiting on multiple events concurrently,
14 * either events triggered directly, or from kernel objects or other kernel
15 * constructs.
16 */
17
18 #include <zephyr/kernel.h>
19 #include <zephyr/kernel_structs.h>
20 #include <kernel_internal.h>
21 #include <wait_q.h>
22 #include <ksched.h>
23 #include <zephyr/internal/syscall_handler.h>
24 #include <zephyr/sys/dlist.h>
25 #include <zephyr/sys/util.h>
26 #include <zephyr/sys/__assert.h>
27 #include <stdbool.h>
28
29 /* Single subsystem lock. Locking per-event would be better on highly
30 * contended SMP systems, but the original locking scheme here is
31 * subtle (it relies on releasing/reacquiring the lock in areas for
32 * latency control and it's sometimes hard to see exactly what data is
33 * "inside" a given critical section). Do the synchronization port
34 * later as an optimization.
35 */
36 static struct k_spinlock lock;
37
38 enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
39
40 static int signal_poller(struct k_poll_event *event, uint32_t state);
41 static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
42
k_poll_event_init(struct k_poll_event * event,uint32_t type,int mode,void * obj)43 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
44 int mode, void *obj)
45 {
46 __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
47 "only NOTIFY_ONLY mode is supported\n");
48 __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
49 __ASSERT(obj != NULL, "must provide an object\n");
50
51 event->poller = NULL;
52 /* event->tag is left uninitialized: the user will set it if needed */
53 event->type = type;
54 event->state = K_POLL_STATE_NOT_READY;
55 event->mode = mode;
56 event->unused = 0U;
57 event->obj = obj;
58
59 SYS_PORT_TRACING_FUNC(k_poll_api, event_init, event);
60 }
61
62 /* must be called with interrupts locked */
is_condition_met(struct k_poll_event * event,uint32_t * state)63 static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
64 {
65 switch (event->type) {
66 case K_POLL_TYPE_SEM_AVAILABLE:
67 if (k_sem_count_get(event->sem) > 0U) {
68 *state = K_POLL_STATE_SEM_AVAILABLE;
69 return true;
70 }
71 break;
72 case K_POLL_TYPE_DATA_AVAILABLE:
73 if (!k_queue_is_empty(event->queue)) {
74 *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
75 return true;
76 }
77 break;
78 case K_POLL_TYPE_SIGNAL:
79 if (event->signal->signaled != 0U) {
80 *state = K_POLL_STATE_SIGNALED;
81 return true;
82 }
83 break;
84 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
85 if (event->msgq->used_msgs > 0) {
86 *state = K_POLL_STATE_MSGQ_DATA_AVAILABLE;
87 return true;
88 }
89 break;
90 #ifdef CONFIG_PIPES
91 case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
92 if (k_pipe_read_avail(event->pipe)) {
93 *state = K_POLL_STATE_PIPE_DATA_AVAILABLE;
94 return true;
95 }
96 #endif
97 case K_POLL_TYPE_IGNORE:
98 break;
99 default:
100 __ASSERT(false, "invalid event type (0x%x)\n", event->type);
101 break;
102 }
103
104 return false;
105 }
106
poller_thread(struct z_poller * p)107 static struct k_thread *poller_thread(struct z_poller *p)
108 {
109 return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
110 }
111
add_event(sys_dlist_t * events,struct k_poll_event * event,struct z_poller * poller)112 static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
113 struct z_poller *poller)
114 {
115 struct k_poll_event *pending;
116
117 pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
118 if ((pending == NULL) ||
119 (z_sched_prio_cmp(poller_thread(pending->poller),
120 poller_thread(poller)) > 0)) {
121 sys_dlist_append(events, &event->_node);
122 return;
123 }
124
125 SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
126 if (z_sched_prio_cmp(poller_thread(poller),
127 poller_thread(pending->poller)) > 0) {
128 sys_dlist_insert(&pending->_node, &event->_node);
129 return;
130 }
131 }
132
133 sys_dlist_append(events, &event->_node);
134 }
135
136 /* must be called with interrupts locked */
register_event(struct k_poll_event * event,struct z_poller * poller)137 static inline void register_event(struct k_poll_event *event,
138 struct z_poller *poller)
139 {
140 switch (event->type) {
141 case K_POLL_TYPE_SEM_AVAILABLE:
142 __ASSERT(event->sem != NULL, "invalid semaphore\n");
143 add_event(&event->sem->poll_events, event, poller);
144 break;
145 case K_POLL_TYPE_DATA_AVAILABLE:
146 __ASSERT(event->queue != NULL, "invalid queue\n");
147 add_event(&event->queue->poll_events, event, poller);
148 break;
149 case K_POLL_TYPE_SIGNAL:
150 __ASSERT(event->signal != NULL, "invalid poll signal\n");
151 add_event(&event->signal->poll_events, event, poller);
152 break;
153 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
154 __ASSERT(event->msgq != NULL, "invalid message queue\n");
155 add_event(&event->msgq->poll_events, event, poller);
156 break;
157 #ifdef CONFIG_PIPES
158 case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
159 __ASSERT(event->pipe != NULL, "invalid pipe\n");
160 add_event(&event->pipe->poll_events, event, poller);
161 break;
162 #endif
163 case K_POLL_TYPE_IGNORE:
164 /* nothing to do */
165 break;
166 default:
167 __ASSERT(false, "invalid event type\n");
168 break;
169 }
170
171 event->poller = poller;
172 }
173
174 /* must be called with interrupts locked */
clear_event_registration(struct k_poll_event * event)175 static inline void clear_event_registration(struct k_poll_event *event)
176 {
177 bool remove_event = false;
178
179 event->poller = NULL;
180
181 switch (event->type) {
182 case K_POLL_TYPE_SEM_AVAILABLE:
183 __ASSERT(event->sem != NULL, "invalid semaphore\n");
184 remove_event = true;
185 break;
186 case K_POLL_TYPE_DATA_AVAILABLE:
187 __ASSERT(event->queue != NULL, "invalid queue\n");
188 remove_event = true;
189 break;
190 case K_POLL_TYPE_SIGNAL:
191 __ASSERT(event->signal != NULL, "invalid poll signal\n");
192 remove_event = true;
193 break;
194 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
195 __ASSERT(event->msgq != NULL, "invalid message queue\n");
196 remove_event = true;
197 break;
198 #ifdef CONFIG_PIPES
199 case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
200 __ASSERT(event->pipe != NULL, "invalid pipe\n");
201 remove_event = true;
202 break;
203 #endif
204 case K_POLL_TYPE_IGNORE:
205 /* nothing to do */
206 break;
207 default:
208 __ASSERT(false, "invalid event type\n");
209 break;
210 }
211 if (remove_event && sys_dnode_is_linked(&event->_node)) {
212 sys_dlist_remove(&event->_node);
213 }
214 }
215
216 /* must be called with interrupts locked */
clear_event_registrations(struct k_poll_event * events,int num_events,k_spinlock_key_t key)217 static inline void clear_event_registrations(struct k_poll_event *events,
218 int num_events,
219 k_spinlock_key_t key)
220 {
221 while (num_events--) {
222 clear_event_registration(&events[num_events]);
223 k_spin_unlock(&lock, key);
224 key = k_spin_lock(&lock);
225 }
226 }
227
set_event_ready(struct k_poll_event * event,uint32_t state)228 static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
229 {
230 event->poller = NULL;
231 event->state |= state;
232 }
233
register_events(struct k_poll_event * events,int num_events,struct z_poller * poller,bool just_check)234 static inline int register_events(struct k_poll_event *events,
235 int num_events,
236 struct z_poller *poller,
237 bool just_check)
238 {
239 int events_registered = 0;
240
241 for (int ii = 0; ii < num_events; ii++) {
242 k_spinlock_key_t key;
243 uint32_t state;
244
245 key = k_spin_lock(&lock);
246 if (is_condition_met(&events[ii], &state)) {
247 set_event_ready(&events[ii], state);
248 poller->is_polling = false;
249 } else if (!just_check && poller->is_polling) {
250 register_event(&events[ii], poller);
251 events_registered += 1;
252 } else {
253 /* Event is not one of those identified in is_condition_met()
254 * catching non-polling events, or is marked for just check,
255 * or not marked for polling. No action needed.
256 */
257 ;
258 }
259 k_spin_unlock(&lock, key);
260 }
261
262 return events_registered;
263 }
264
signal_poller(struct k_poll_event * event,uint32_t state)265 static int signal_poller(struct k_poll_event *event, uint32_t state)
266 {
267 struct k_thread *thread = poller_thread(event->poller);
268
269 __ASSERT(thread != NULL, "poller should have a thread\n");
270
271 if (!z_is_thread_pending(thread)) {
272 return 0;
273 }
274
275 z_unpend_thread(thread);
276 arch_thread_return_value_set(thread,
277 state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
278
279 if (!z_is_thread_ready(thread)) {
280 return 0;
281 }
282
283 z_ready_thread(thread);
284
285 return 0;
286 }
287
z_impl_k_poll(struct k_poll_event * events,int num_events,k_timeout_t timeout)288 int z_impl_k_poll(struct k_poll_event *events, int num_events,
289 k_timeout_t timeout)
290 {
291 int events_registered;
292 k_spinlock_key_t key;
293 struct z_poller *poller = &_current->poller;
294
295 poller->is_polling = true;
296 poller->mode = MODE_POLL;
297
298 __ASSERT(!arch_is_in_isr(), "");
299 __ASSERT(events != NULL, "NULL events\n");
300 __ASSERT(num_events >= 0, "<0 events\n");
301
302 SYS_PORT_TRACING_FUNC_ENTER(k_poll_api, poll, events);
303
304 events_registered = register_events(events, num_events, poller,
305 K_TIMEOUT_EQ(timeout, K_NO_WAIT));
306
307 key = k_spin_lock(&lock);
308
309 /*
310 * If we're not polling anymore, it means that at least one event
311 * condition is met, either when looping through the events here or
312 * because one of the events registered has had its state changed.
313 */
314 if (!poller->is_polling) {
315 clear_event_registrations(events, events_registered, key);
316 k_spin_unlock(&lock, key);
317
318 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, 0);
319
320 return 0;
321 }
322
323 poller->is_polling = false;
324
325 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
326 k_spin_unlock(&lock, key);
327
328 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, -EAGAIN);
329
330 return -EAGAIN;
331 }
332
333 static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
334
335 int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
336
337 /*
338 * Clear all event registrations. If events happen while we're in this
339 * loop, and we already had one that triggered, that's OK: they will
340 * end up in the list of events that are ready; if we timed out, and
341 * events happen while we're in this loop, that is OK as well since
342 * we've already know the return code (-EAGAIN), and even if they are
343 * added to the list of events that occurred, the user has to check the
344 * return code first, which invalidates the whole list of event states.
345 */
346 key = k_spin_lock(&lock);
347 clear_event_registrations(events, events_registered, key);
348 k_spin_unlock(&lock, key);
349
350 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, swap_rc);
351
352 return swap_rc;
353 }
354
355 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll(struct k_poll_event * events,int num_events,k_timeout_t timeout)356 static inline int z_vrfy_k_poll(struct k_poll_event *events,
357 int num_events, k_timeout_t timeout)
358 {
359 int ret;
360 k_spinlock_key_t key;
361 struct k_poll_event *events_copy = NULL;
362 uint32_t bounds;
363
364 /* Validate the events buffer and make a copy of it in an
365 * allocated kernel-side buffer.
366 */
367 if (K_SYSCALL_VERIFY(num_events >= 0)) {
368 ret = -EINVAL;
369 goto out;
370 }
371 if (K_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
372 sizeof(struct k_poll_event),
373 &bounds),
374 "num_events too large")) {
375 ret = -EINVAL;
376 goto out;
377 }
378 events_copy = z_thread_malloc(bounds);
379 if (!events_copy) {
380 ret = -ENOMEM;
381 goto out;
382 }
383
384 key = k_spin_lock(&lock);
385 if (K_SYSCALL_MEMORY_WRITE(events, bounds)) {
386 k_spin_unlock(&lock, key);
387 goto oops_free;
388 }
389 (void)memcpy(events_copy, events, bounds);
390 k_spin_unlock(&lock, key);
391
392 /* Validate what's inside events_copy */
393 for (int i = 0; i < num_events; i++) {
394 struct k_poll_event *e = &events_copy[i];
395
396 if (K_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
397 ret = -EINVAL;
398 goto out_free;
399 }
400
401 switch (e->type) {
402 case K_POLL_TYPE_IGNORE:
403 break;
404 case K_POLL_TYPE_SIGNAL:
405 K_OOPS(K_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
406 break;
407 case K_POLL_TYPE_SEM_AVAILABLE:
408 K_OOPS(K_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
409 break;
410 case K_POLL_TYPE_DATA_AVAILABLE:
411 K_OOPS(K_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
412 break;
413 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
414 K_OOPS(K_SYSCALL_OBJ(e->msgq, K_OBJ_MSGQ));
415 break;
416 #ifdef CONFIG_PIPES
417 case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
418 K_OOPS(K_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE));
419 break;
420 #endif
421 default:
422 ret = -EINVAL;
423 goto out_free;
424 }
425 }
426
427 ret = k_poll(events_copy, num_events, timeout);
428 (void)memcpy((void *)events, events_copy, bounds);
429 out_free:
430 k_free(events_copy);
431 out:
432 return ret;
433 oops_free:
434 k_free(events_copy);
435 K_OOPS(1);
436 }
437 #include <syscalls/k_poll_mrsh.c>
438 #endif
439
440 /* must be called with interrupts locked */
signal_poll_event(struct k_poll_event * event,uint32_t state)441 static int signal_poll_event(struct k_poll_event *event, uint32_t state)
442 {
443 struct z_poller *poller = event->poller;
444 int retcode = 0;
445
446 if (poller != NULL) {
447 if (poller->mode == MODE_POLL) {
448 retcode = signal_poller(event, state);
449 } else if (poller->mode == MODE_TRIGGERED) {
450 retcode = signal_triggered_work(event, state);
451 } else {
452 /* Poller is not poll or triggered mode. No action needed.*/
453 ;
454 }
455
456 poller->is_polling = false;
457
458 if (retcode < 0) {
459 return retcode;
460 }
461 }
462
463 set_event_ready(event, state);
464 return retcode;
465 }
466
z_handle_obj_poll_events(sys_dlist_t * events,uint32_t state)467 void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
468 {
469 struct k_poll_event *poll_event;
470 k_spinlock_key_t key = k_spin_lock(&lock);
471
472 poll_event = (struct k_poll_event *)sys_dlist_get(events);
473 if (poll_event != NULL) {
474 (void) signal_poll_event(poll_event, state);
475 }
476
477 k_spin_unlock(&lock, key);
478 }
479
z_impl_k_poll_signal_init(struct k_poll_signal * sig)480 void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
481 {
482 sys_dlist_init(&sig->poll_events);
483 sig->signaled = 0U;
484 /* signal->result is left uninitialized */
485 k_object_init(sig);
486
487 SYS_PORT_TRACING_FUNC(k_poll_api, signal_init, sig);
488 }
489
490 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_init(struct k_poll_signal * sig)491 static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
492 {
493 K_OOPS(K_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
494 z_impl_k_poll_signal_init(sig);
495 }
496 #include <syscalls/k_poll_signal_init_mrsh.c>
497 #endif
498
z_impl_k_poll_signal_reset(struct k_poll_signal * sig)499 void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
500 {
501 sig->signaled = 0U;
502
503 SYS_PORT_TRACING_FUNC(k_poll_api, signal_reset, sig);
504 }
505
z_impl_k_poll_signal_check(struct k_poll_signal * sig,unsigned int * signaled,int * result)506 void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
507 unsigned int *signaled, int *result)
508 {
509 *signaled = sig->signaled;
510 *result = sig->result;
511
512 SYS_PORT_TRACING_FUNC(k_poll_api, signal_check, sig);
513 }
514
515 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_check(struct k_poll_signal * sig,unsigned int * signaled,int * result)516 void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
517 unsigned int *signaled, int *result)
518 {
519 K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
520 K_OOPS(K_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
521 K_OOPS(K_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
522 z_impl_k_poll_signal_check(sig, signaled, result);
523 }
524 #include <syscalls/k_poll_signal_check_mrsh.c>
525 #endif
526
z_impl_k_poll_signal_raise(struct k_poll_signal * sig,int result)527 int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
528 {
529 k_spinlock_key_t key = k_spin_lock(&lock);
530 struct k_poll_event *poll_event;
531
532 sig->result = result;
533 sig->signaled = 1U;
534
535 poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
536 if (poll_event == NULL) {
537 k_spin_unlock(&lock, key);
538
539 SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, 0);
540
541 return 0;
542 }
543
544 int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
545
546 SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, rc);
547
548 z_reschedule(&lock, key);
549 return rc;
550 }
551
552 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_raise(struct k_poll_signal * sig,int result)553 static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
554 int result)
555 {
556 K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
557 return z_impl_k_poll_signal_raise(sig, result);
558 }
559 #include <syscalls/k_poll_signal_raise_mrsh.c>
560
z_vrfy_k_poll_signal_reset(struct k_poll_signal * sig)561 static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
562 {
563 K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
564 z_impl_k_poll_signal_reset(sig);
565 }
566 #include <syscalls/k_poll_signal_reset_mrsh.c>
567
568 #endif
569
triggered_work_handler(struct k_work * work)570 static void triggered_work_handler(struct k_work *work)
571 {
572 struct k_work_poll *twork =
573 CONTAINER_OF(work, struct k_work_poll, work);
574
575 /*
576 * If callback is not set, the k_work_poll_submit_to_queue()
577 * already cleared event registrations.
578 */
579 if (twork->poller.mode != MODE_NONE) {
580 k_spinlock_key_t key;
581
582 key = k_spin_lock(&lock);
583 clear_event_registrations(twork->events,
584 twork->num_events, key);
585 k_spin_unlock(&lock, key);
586 }
587
588 /* Drop work ownership and execute real handler. */
589 twork->workq = NULL;
590 twork->real_handler(work);
591 }
592
triggered_work_expiration_handler(struct _timeout * timeout)593 static void triggered_work_expiration_handler(struct _timeout *timeout)
594 {
595 struct k_work_poll *twork =
596 CONTAINER_OF(timeout, struct k_work_poll, timeout);
597
598 twork->poller.is_polling = false;
599 twork->poll_result = -EAGAIN;
600 k_work_submit_to_queue(twork->workq, &twork->work);
601 }
602
603 extern int z_work_submit_to_queue(struct k_work_q *queue,
604 struct k_work *work);
605
signal_triggered_work(struct k_poll_event * event,uint32_t status)606 static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
607 {
608 struct z_poller *poller = event->poller;
609 struct k_work_poll *twork =
610 CONTAINER_OF(poller, struct k_work_poll, poller);
611
612 if (poller->is_polling && twork->workq != NULL) {
613 struct k_work_q *work_q = twork->workq;
614
615 z_abort_timeout(&twork->timeout);
616 twork->poll_result = 0;
617 z_work_submit_to_queue(work_q, &twork->work);
618 }
619
620 return 0;
621 }
622
triggered_work_cancel(struct k_work_poll * work,k_spinlock_key_t key)623 static int triggered_work_cancel(struct k_work_poll *work,
624 k_spinlock_key_t key)
625 {
626 /* Check if the work waits for event. */
627 if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
628 /* Remove timeout associated with the work. */
629 z_abort_timeout(&work->timeout);
630
631 /*
632 * Prevent work execution if event arrives while we will be
633 * clearing registrations.
634 */
635 work->poller.mode = MODE_NONE;
636
637 /* Clear registrations and work ownership. */
638 clear_event_registrations(work->events, work->num_events, key);
639 work->workq = NULL;
640 return 0;
641 }
642
643 /*
644 * If we reached here, the work is either being registered in
645 * the k_work_poll_submit_to_queue(), executed or is pending.
646 * Only in the last case we have a chance to cancel it, but
647 * unfortunately there is no public API performing this task.
648 */
649
650 return -EINVAL;
651 }
652
k_work_poll_init(struct k_work_poll * work,k_work_handler_t handler)653 void k_work_poll_init(struct k_work_poll *work,
654 k_work_handler_t handler)
655 {
656 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_poll, init, work);
657
658 *work = (struct k_work_poll) {};
659 k_work_init(&work->work, triggered_work_handler);
660 work->real_handler = handler;
661 z_init_timeout(&work->timeout);
662
663 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_poll, init, work);
664 }
665
k_work_poll_submit_to_queue(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)666 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
667 struct k_work_poll *work,
668 struct k_poll_event *events,
669 int num_events,
670 k_timeout_t timeout)
671 {
672 int events_registered;
673 k_spinlock_key_t key;
674
675 __ASSERT(work_q != NULL, "NULL work_q\n");
676 __ASSERT(work != NULL, "NULL work\n");
677 __ASSERT(events != NULL, "NULL events\n");
678 __ASSERT(num_events > 0, "zero events\n");
679
680 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit_to_queue, work_q, work, timeout);
681
682 /* Take ownership of the work if it is possible. */
683 key = k_spin_lock(&lock);
684 if (work->workq != NULL) {
685 if (work->workq == work_q) {
686 int retval;
687
688 retval = triggered_work_cancel(work, key);
689 if (retval < 0) {
690 k_spin_unlock(&lock, key);
691
692 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
693 work, timeout, retval);
694
695 return retval;
696 }
697 } else {
698 k_spin_unlock(&lock, key);
699
700 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
701 work, timeout, -EADDRINUSE);
702
703 return -EADDRINUSE;
704 }
705 }
706
707
708 work->poller.is_polling = true;
709 work->workq = work_q;
710 work->poller.mode = MODE_NONE;
711 k_spin_unlock(&lock, key);
712
713 /* Save list of events. */
714 work->events = events;
715 work->num_events = num_events;
716
717 /* Clear result */
718 work->poll_result = -EINPROGRESS;
719
720 /* Register events */
721 events_registered = register_events(events, num_events,
722 &work->poller, false);
723
724 key = k_spin_lock(&lock);
725 if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
726 /*
727 * Poller is still polling.
728 * No event is ready and all are watched.
729 */
730 __ASSERT(num_events == events_registered,
731 "Some events were not registered!\n");
732
733 /* Setup timeout if such action is requested */
734 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
735 z_add_timeout(&work->timeout,
736 triggered_work_expiration_handler,
737 timeout);
738 }
739
740 /* From now, any event will result in submitted work. */
741 work->poller.mode = MODE_TRIGGERED;
742 k_spin_unlock(&lock, key);
743
744 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
745
746 return 0;
747 }
748
749 /*
750 * The K_NO_WAIT timeout was specified or at least one event
751 * was ready at registration time or changed state since
752 * registration. Hopefully, the poller mode was not set, so
753 * work was not submitted to workqueue.
754 */
755
756 /*
757 * If poller is still polling, no watched event occurred. This means
758 * we reached here due to K_NO_WAIT timeout "expiration".
759 */
760 if (work->poller.is_polling) {
761 work->poller.is_polling = false;
762 work->poll_result = -EAGAIN;
763 } else {
764 work->poll_result = 0;
765 }
766
767 /* Clear registrations. */
768 clear_event_registrations(events, events_registered, key);
769 k_spin_unlock(&lock, key);
770
771 /* Submit work. */
772 k_work_submit_to_queue(work_q, &work->work);
773
774 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
775
776 return 0;
777 }
778
k_work_poll_submit(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)779 int k_work_poll_submit(struct k_work_poll *work,
780 struct k_poll_event *events,
781 int num_events,
782 k_timeout_t timeout)
783 {
784 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit, work, timeout);
785
786 int ret = k_work_poll_submit_to_queue(&k_sys_work_q, work,
787 events, num_events, timeout);
788
789 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit, work, timeout, ret);
790
791 return ret;
792 }
793
k_work_poll_cancel(struct k_work_poll * work)794 int k_work_poll_cancel(struct k_work_poll *work)
795 {
796 k_spinlock_key_t key;
797 int retval;
798
799 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, cancel, work);
800
801 /* Check if the work was submitted. */
802 if (work == NULL || work->workq == NULL) {
803 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, -EINVAL);
804
805 return -EINVAL;
806 }
807
808 key = k_spin_lock(&lock);
809 retval = triggered_work_cancel(work, key);
810 k_spin_unlock(&lock, key);
811
812 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, retval);
813
814 return retval;
815 }
816