1 /*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Kernel asynchronous event polling interface.
11 *
12 * This polling mechanism allows waiting on multiple events concurrently,
13 * either events triggered directly, or from kernel objects or other kernel
14 * constructs.
15 */
16
17 #include <kernel.h>
18 #include <kernel_structs.h>
19 #include <kernel_internal.h>
20 #include <wait_q.h>
21 #include <ksched.h>
22 #include <syscall_handler.h>
23 #include <sys/dlist.h>
24 #include <sys/util.h>
25 #include <sys/__assert.h>
26 #include <stdbool.h>
27
28 /* Single subsystem lock. Locking per-event would be better on highly
29 * contended SMP systems, but the original locking scheme here is
30 * subtle (it relies on releasing/reacquiring the lock in areas for
31 * latency control and it's sometimes hard to see exactly what data is
32 * "inside" a given critical section). Do the synchronization port
33 * later as an optimization.
34 */
35 static struct k_spinlock lock;
36
37 enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
38
39 static int signal_poller(struct k_poll_event *event, uint32_t state);
40 static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
41
k_poll_event_init(struct k_poll_event * event,uint32_t type,int mode,void * obj)42 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
43 int mode, void *obj)
44 {
45 __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
46 "only NOTIFY_ONLY mode is supported\n");
47 __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
48 __ASSERT(obj != NULL, "must provide an object\n");
49
50 event->poller = NULL;
51 /* event->tag is left uninitialized: the user will set it if needed */
52 event->type = type;
53 event->state = K_POLL_STATE_NOT_READY;
54 event->mode = mode;
55 event->unused = 0U;
56 event->obj = obj;
57
58 SYS_PORT_TRACING_FUNC(k_poll_api, event_init, event);
59 }
60
61 /* must be called with interrupts locked */
is_condition_met(struct k_poll_event * event,uint32_t * state)62 static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
63 {
64 switch (event->type) {
65 case K_POLL_TYPE_SEM_AVAILABLE:
66 if (k_sem_count_get(event->sem) > 0U) {
67 *state = K_POLL_STATE_SEM_AVAILABLE;
68 return true;
69 }
70 break;
71 case K_POLL_TYPE_DATA_AVAILABLE:
72 if (!k_queue_is_empty(event->queue)) {
73 *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
74 return true;
75 }
76 break;
77 case K_POLL_TYPE_SIGNAL:
78 if (event->signal->signaled != 0U) {
79 *state = K_POLL_STATE_SIGNALED;
80 return true;
81 }
82 break;
83 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
84 if (event->msgq->used_msgs > 0) {
85 *state = K_POLL_STATE_MSGQ_DATA_AVAILABLE;
86 return true;
87 }
88 break;
89 case K_POLL_TYPE_IGNORE:
90 break;
91 default:
92 __ASSERT(false, "invalid event type (0x%x)\n", event->type);
93 break;
94 }
95
96 return false;
97 }
98
poller_thread(struct z_poller * p)99 static struct k_thread *poller_thread(struct z_poller *p)
100 {
101 return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
102 }
103
add_event(sys_dlist_t * events,struct k_poll_event * event,struct z_poller * poller)104 static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
105 struct z_poller *poller)
106 {
107 struct k_poll_event *pending;
108
109 pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
110 if ((pending == NULL) ||
111 (z_sched_prio_cmp(poller_thread(pending->poller),
112 poller_thread(poller)) > 0)) {
113 sys_dlist_append(events, &event->_node);
114 return;
115 }
116
117 SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
118 if (z_sched_prio_cmp(poller_thread(poller),
119 poller_thread(pending->poller)) > 0) {
120 sys_dlist_insert(&pending->_node, &event->_node);
121 return;
122 }
123 }
124
125 sys_dlist_append(events, &event->_node);
126 }
127
128 /* must be called with interrupts locked */
register_event(struct k_poll_event * event,struct z_poller * poller)129 static inline void register_event(struct k_poll_event *event,
130 struct z_poller *poller)
131 {
132 switch (event->type) {
133 case K_POLL_TYPE_SEM_AVAILABLE:
134 __ASSERT(event->sem != NULL, "invalid semaphore\n");
135 add_event(&event->sem->poll_events, event, poller);
136 break;
137 case K_POLL_TYPE_DATA_AVAILABLE:
138 __ASSERT(event->queue != NULL, "invalid queue\n");
139 add_event(&event->queue->poll_events, event, poller);
140 break;
141 case K_POLL_TYPE_SIGNAL:
142 __ASSERT(event->signal != NULL, "invalid poll signal\n");
143 add_event(&event->signal->poll_events, event, poller);
144 break;
145 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
146 __ASSERT(event->msgq != NULL, "invalid message queue\n");
147 add_event(&event->msgq->poll_events, event, poller);
148 break;
149 case K_POLL_TYPE_IGNORE:
150 /* nothing to do */
151 break;
152 default:
153 __ASSERT(false, "invalid event type\n");
154 break;
155 }
156
157 event->poller = poller;
158 }
159
160 /* must be called with interrupts locked */
clear_event_registration(struct k_poll_event * event)161 static inline void clear_event_registration(struct k_poll_event *event)
162 {
163 bool remove_event = false;
164
165 event->poller = NULL;
166
167 switch (event->type) {
168 case K_POLL_TYPE_SEM_AVAILABLE:
169 __ASSERT(event->sem != NULL, "invalid semaphore\n");
170 remove_event = true;
171 break;
172 case K_POLL_TYPE_DATA_AVAILABLE:
173 __ASSERT(event->queue != NULL, "invalid queue\n");
174 remove_event = true;
175 break;
176 case K_POLL_TYPE_SIGNAL:
177 __ASSERT(event->signal != NULL, "invalid poll signal\n");
178 remove_event = true;
179 break;
180 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
181 __ASSERT(event->msgq != NULL, "invalid message queue\n");
182 remove_event = true;
183 break;
184 case K_POLL_TYPE_IGNORE:
185 /* nothing to do */
186 break;
187 default:
188 __ASSERT(false, "invalid event type\n");
189 break;
190 }
191 if (remove_event && sys_dnode_is_linked(&event->_node)) {
192 sys_dlist_remove(&event->_node);
193 }
194 }
195
196 /* must be called with interrupts locked */
clear_event_registrations(struct k_poll_event * events,int num_events,k_spinlock_key_t key)197 static inline void clear_event_registrations(struct k_poll_event *events,
198 int num_events,
199 k_spinlock_key_t key)
200 {
201 while (num_events--) {
202 clear_event_registration(&events[num_events]);
203 k_spin_unlock(&lock, key);
204 key = k_spin_lock(&lock);
205 }
206 }
207
set_event_ready(struct k_poll_event * event,uint32_t state)208 static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
209 {
210 event->poller = NULL;
211 event->state |= state;
212 }
213
register_events(struct k_poll_event * events,int num_events,struct z_poller * poller,bool just_check)214 static inline int register_events(struct k_poll_event *events,
215 int num_events,
216 struct z_poller *poller,
217 bool just_check)
218 {
219 int events_registered = 0;
220
221 for (int ii = 0; ii < num_events; ii++) {
222 k_spinlock_key_t key;
223 uint32_t state;
224
225 key = k_spin_lock(&lock);
226 if (is_condition_met(&events[ii], &state)) {
227 set_event_ready(&events[ii], state);
228 poller->is_polling = false;
229 } else if (!just_check && poller->is_polling) {
230 register_event(&events[ii], poller);
231 events_registered += 1;
232 } else {
233 /* Event is not one of those identified in is_condition_met()
234 * catching non-polling events, or is marked for just check,
235 * or not marked for polling. No action needed.
236 */
237 ;
238 }
239 k_spin_unlock(&lock, key);
240 }
241
242 return events_registered;
243 }
244
signal_poller(struct k_poll_event * event,uint32_t state)245 static int signal_poller(struct k_poll_event *event, uint32_t state)
246 {
247 struct k_thread *thread = poller_thread(event->poller);
248
249 __ASSERT(thread != NULL, "poller should have a thread\n");
250
251 if (!z_is_thread_pending(thread)) {
252 return 0;
253 }
254
255 if (z_is_thread_timeout_expired(thread)) {
256 return -EAGAIN;
257 }
258
259 z_unpend_thread(thread);
260 arch_thread_return_value_set(thread,
261 state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
262
263 if (!z_is_thread_ready(thread)) {
264 return 0;
265 }
266
267 z_ready_thread(thread);
268
269 return 0;
270 }
271
z_impl_k_poll(struct k_poll_event * events,int num_events,k_timeout_t timeout)272 int z_impl_k_poll(struct k_poll_event *events, int num_events,
273 k_timeout_t timeout)
274 {
275 int events_registered;
276 k_spinlock_key_t key;
277 struct z_poller *poller = &_current->poller;
278
279 poller->is_polling = true;
280 poller->mode = MODE_POLL;
281
282 __ASSERT(!arch_is_in_isr(), "");
283 __ASSERT(events != NULL, "NULL events\n");
284 __ASSERT(num_events >= 0, "<0 events\n");
285
286 SYS_PORT_TRACING_FUNC_ENTER(k_poll_api, poll, events);
287
288 events_registered = register_events(events, num_events, poller,
289 K_TIMEOUT_EQ(timeout, K_NO_WAIT));
290
291 key = k_spin_lock(&lock);
292
293 /*
294 * If we're not polling anymore, it means that at least one event
295 * condition is met, either when looping through the events here or
296 * because one of the events registered has had its state changed.
297 */
298 if (!poller->is_polling) {
299 clear_event_registrations(events, events_registered, key);
300 k_spin_unlock(&lock, key);
301
302 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, 0);
303
304 return 0;
305 }
306
307 poller->is_polling = false;
308
309 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
310 k_spin_unlock(&lock, key);
311
312 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, -EAGAIN);
313
314 return -EAGAIN;
315 }
316
317 static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
318
319 int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
320
321 /*
322 * Clear all event registrations. If events happen while we're in this
323 * loop, and we already had one that triggered, that's OK: they will
324 * end up in the list of events that are ready; if we timed out, and
325 * events happen while we're in this loop, that is OK as well since
326 * we've already know the return code (-EAGAIN), and even if they are
327 * added to the list of events that occurred, the user has to check the
328 * return code first, which invalidates the whole list of event states.
329 */
330 key = k_spin_lock(&lock);
331 clear_event_registrations(events, events_registered, key);
332 k_spin_unlock(&lock, key);
333
334 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, swap_rc);
335
336 return swap_rc;
337 }
338
339 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll(struct k_poll_event * events,int num_events,k_timeout_t timeout)340 static inline int z_vrfy_k_poll(struct k_poll_event *events,
341 int num_events, k_timeout_t timeout)
342 {
343 int ret;
344 k_spinlock_key_t key;
345 struct k_poll_event *events_copy = NULL;
346 uint32_t bounds;
347
348 /* Validate the events buffer and make a copy of it in an
349 * allocated kernel-side buffer.
350 */
351 if (Z_SYSCALL_VERIFY(num_events >= 0)) {
352 ret = -EINVAL;
353 goto out;
354 }
355 if (Z_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
356 sizeof(struct k_poll_event),
357 &bounds),
358 "num_events too large")) {
359 ret = -EINVAL;
360 goto out;
361 }
362 events_copy = z_thread_malloc(bounds);
363 if (!events_copy) {
364 ret = -ENOMEM;
365 goto out;
366 }
367
368 key = k_spin_lock(&lock);
369 if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) {
370 k_spin_unlock(&lock, key);
371 goto oops_free;
372 }
373 (void)memcpy(events_copy, events, bounds);
374 k_spin_unlock(&lock, key);
375
376 /* Validate what's inside events_copy */
377 for (int i = 0; i < num_events; i++) {
378 struct k_poll_event *e = &events_copy[i];
379
380 if (Z_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
381 ret = -EINVAL;
382 goto out_free;
383 }
384
385 switch (e->type) {
386 case K_POLL_TYPE_IGNORE:
387 break;
388 case K_POLL_TYPE_SIGNAL:
389 Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
390 break;
391 case K_POLL_TYPE_SEM_AVAILABLE:
392 Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
393 break;
394 case K_POLL_TYPE_DATA_AVAILABLE:
395 Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
396 break;
397 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
398 Z_OOPS(Z_SYSCALL_OBJ(e->msgq, K_OBJ_MSGQ));
399 break;
400 default:
401 ret = -EINVAL;
402 goto out_free;
403 }
404 }
405
406 ret = k_poll(events_copy, num_events, timeout);
407 (void)memcpy((void *)events, events_copy, bounds);
408 out_free:
409 k_free(events_copy);
410 out:
411 return ret;
412 oops_free:
413 k_free(events_copy);
414 Z_OOPS(1);
415 }
416 #include <syscalls/k_poll_mrsh.c>
417 #endif
418
419 /* must be called with interrupts locked */
signal_poll_event(struct k_poll_event * event,uint32_t state)420 static int signal_poll_event(struct k_poll_event *event, uint32_t state)
421 {
422 struct z_poller *poller = event->poller;
423 int retcode = 0;
424
425 if (poller != NULL) {
426 if (poller->mode == MODE_POLL) {
427 retcode = signal_poller(event, state);
428 } else if (poller->mode == MODE_TRIGGERED) {
429 retcode = signal_triggered_work(event, state);
430 } else {
431 /* Poller is not poll or triggered mode. No action needed.*/
432 ;
433 }
434
435 poller->is_polling = false;
436
437 if (retcode < 0) {
438 return retcode;
439 }
440 }
441
442 set_event_ready(event, state);
443 return retcode;
444 }
445
z_handle_obj_poll_events(sys_dlist_t * events,uint32_t state)446 void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
447 {
448 struct k_poll_event *poll_event;
449
450 poll_event = (struct k_poll_event *)sys_dlist_get(events);
451 if (poll_event != NULL) {
452 (void) signal_poll_event(poll_event, state);
453 }
454 }
455
z_impl_k_poll_signal_init(struct k_poll_signal * sig)456 void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
457 {
458 sys_dlist_init(&sig->poll_events);
459 sig->signaled = 0U;
460 /* signal->result is left unitialized */
461 z_object_init(sig);
462
463 SYS_PORT_TRACING_FUNC(k_poll_api, signal_init, sig);
464 }
465
466 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_init(struct k_poll_signal * sig)467 static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
468 {
469 Z_OOPS(Z_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
470 z_impl_k_poll_signal_init(sig);
471 }
472 #include <syscalls/k_poll_signal_init_mrsh.c>
473 #endif
474
z_impl_k_poll_signal_reset(struct k_poll_signal * sig)475 void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
476 {
477 sig->signaled = 0U;
478
479 SYS_PORT_TRACING_FUNC(k_poll_api, signal_reset, sig);
480 }
481
z_impl_k_poll_signal_check(struct k_poll_signal * sig,unsigned int * signaled,int * result)482 void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
483 unsigned int *signaled, int *result)
484 {
485 *signaled = sig->signaled;
486 *result = sig->result;
487
488 SYS_PORT_TRACING_FUNC(k_poll_api, signal_check, sig);
489 }
490
491 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_check(struct k_poll_signal * sig,unsigned int * signaled,int * result)492 void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
493 unsigned int *signaled, int *result)
494 {
495 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
496 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
497 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
498 z_impl_k_poll_signal_check(sig, signaled, result);
499 }
500 #include <syscalls/k_poll_signal_check_mrsh.c>
501 #endif
502
z_impl_k_poll_signal_raise(struct k_poll_signal * sig,int result)503 int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
504 {
505 k_spinlock_key_t key = k_spin_lock(&lock);
506 struct k_poll_event *poll_event;
507
508 sig->result = result;
509 sig->signaled = 1U;
510
511 poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
512 if (poll_event == NULL) {
513 k_spin_unlock(&lock, key);
514
515 SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, 0);
516
517 return 0;
518 }
519
520 int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
521
522 SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, rc);
523
524 z_reschedule(&lock, key);
525 return rc;
526 }
527
528 #ifdef CONFIG_USERSPACE
z_vrfy_k_poll_signal_raise(struct k_poll_signal * sig,int result)529 static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
530 int result)
531 {
532 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
533 return z_impl_k_poll_signal_raise(sig, result);
534 }
535 #include <syscalls/k_poll_signal_raise_mrsh.c>
536
z_vrfy_k_poll_signal_reset(struct k_poll_signal * sig)537 static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
538 {
539 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
540 z_impl_k_poll_signal_reset(sig);
541 }
542 #include <syscalls/k_poll_signal_reset_mrsh.c>
543
544 #endif
545
triggered_work_handler(struct k_work * work)546 static void triggered_work_handler(struct k_work *work)
547 {
548 struct k_work_poll *twork =
549 CONTAINER_OF(work, struct k_work_poll, work);
550
551 /*
552 * If callback is not set, the k_work_poll_submit_to_queue()
553 * already cleared event registrations.
554 */
555 if (twork->poller.mode != MODE_NONE) {
556 k_spinlock_key_t key;
557
558 key = k_spin_lock(&lock);
559 clear_event_registrations(twork->events,
560 twork->num_events, key);
561 k_spin_unlock(&lock, key);
562 }
563
564 /* Drop work ownership and execute real handler. */
565 twork->workq = NULL;
566 twork->real_handler(work);
567 }
568
triggered_work_expiration_handler(struct _timeout * timeout)569 static void triggered_work_expiration_handler(struct _timeout *timeout)
570 {
571 struct k_work_poll *twork =
572 CONTAINER_OF(timeout, struct k_work_poll, timeout);
573
574 twork->poller.is_polling = false;
575 twork->poll_result = -EAGAIN;
576 k_work_submit_to_queue(twork->workq, &twork->work);
577 }
578
579 extern int z_work_submit_to_queue(struct k_work_q *queue,
580 struct k_work *work);
581
signal_triggered_work(struct k_poll_event * event,uint32_t status)582 static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
583 {
584 struct z_poller *poller = event->poller;
585 struct k_work_poll *twork =
586 CONTAINER_OF(poller, struct k_work_poll, poller);
587
588 if (poller->is_polling && twork->workq != NULL) {
589 struct k_work_q *work_q = twork->workq;
590
591 z_abort_timeout(&twork->timeout);
592 twork->poll_result = 0;
593 z_work_submit_to_queue(work_q, &twork->work);
594 }
595
596 return 0;
597 }
598
triggered_work_cancel(struct k_work_poll * work,k_spinlock_key_t key)599 static int triggered_work_cancel(struct k_work_poll *work,
600 k_spinlock_key_t key)
601 {
602 /* Check if the work waits for event. */
603 if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
604 /* Remove timeout associated with the work. */
605 z_abort_timeout(&work->timeout);
606
607 /*
608 * Prevent work execution if event arrives while we will be
609 * clearing registrations.
610 */
611 work->poller.mode = MODE_NONE;
612
613 /* Clear registrations and work ownership. */
614 clear_event_registrations(work->events, work->num_events, key);
615 work->workq = NULL;
616 return 0;
617 }
618
619 /*
620 * If we reached here, the work is either being registered in
621 * the k_work_poll_submit_to_queue(), executed or is pending.
622 * Only in the last case we have a chance to cancel it, but
623 * unfortunately there is no public API performing this task.
624 */
625
626 return -EINVAL;
627 }
628
k_work_poll_init(struct k_work_poll * work,k_work_handler_t handler)629 void k_work_poll_init(struct k_work_poll *work,
630 k_work_handler_t handler)
631 {
632 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_poll, init, work);
633
634 *work = (struct k_work_poll) {};
635 k_work_init(&work->work, triggered_work_handler);
636 work->real_handler = handler;
637 z_init_timeout(&work->timeout);
638
639 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_poll, init, work);
640 }
641
k_work_poll_submit_to_queue(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)642 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
643 struct k_work_poll *work,
644 struct k_poll_event *events,
645 int num_events,
646 k_timeout_t timeout)
647 {
648 int events_registered;
649 k_spinlock_key_t key;
650
651 __ASSERT(work_q != NULL, "NULL work_q\n");
652 __ASSERT(work != NULL, "NULL work\n");
653 __ASSERT(events != NULL, "NULL events\n");
654 __ASSERT(num_events > 0, "zero events\n");
655
656 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit_to_queue, work_q, work, timeout);
657
658 /* Take overship of the work if it is possible. */
659 key = k_spin_lock(&lock);
660 if (work->workq != NULL) {
661 if (work->workq == work_q) {
662 int retval;
663
664 retval = triggered_work_cancel(work, key);
665 if (retval < 0) {
666 k_spin_unlock(&lock, key);
667
668 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
669 work, timeout, retval);
670
671 return retval;
672 }
673 } else {
674 k_spin_unlock(&lock, key);
675
676 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
677 work, timeout, -EADDRINUSE);
678
679 return -EADDRINUSE;
680 }
681 }
682
683
684 work->poller.is_polling = true;
685 work->workq = work_q;
686 work->poller.mode = MODE_NONE;
687 k_spin_unlock(&lock, key);
688
689 /* Save list of events. */
690 work->events = events;
691 work->num_events = num_events;
692
693 /* Clear result */
694 work->poll_result = -EINPROGRESS;
695
696 /* Register events */
697 events_registered = register_events(events, num_events,
698 &work->poller, false);
699
700 key = k_spin_lock(&lock);
701 if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
702 /*
703 * Poller is still polling.
704 * No event is ready and all are watched.
705 */
706 __ASSERT(num_events == events_registered,
707 "Some events were not registered!\n");
708
709 /* Setup timeout if such action is requested */
710 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
711 z_add_timeout(&work->timeout,
712 triggered_work_expiration_handler,
713 timeout);
714 }
715
716 /* From now, any event will result in submitted work. */
717 work->poller.mode = MODE_TRIGGERED;
718 k_spin_unlock(&lock, key);
719
720 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
721
722 return 0;
723 }
724
725 /*
726 * The K_NO_WAIT timeout was specified or at least one event
727 * was ready at registration time or changed state since
728 * registration. Hopefully, the poller mode was not set, so
729 * work was not submitted to workqueue.
730 */
731
732 /*
733 * If poller is still polling, no watched event occurred. This means
734 * we reached here due to K_NO_WAIT timeout "expiration".
735 */
736 if (work->poller.is_polling) {
737 work->poller.is_polling = false;
738 work->poll_result = -EAGAIN;
739 } else {
740 work->poll_result = 0;
741 }
742
743 /* Clear registrations. */
744 clear_event_registrations(events, events_registered, key);
745 k_spin_unlock(&lock, key);
746
747 /* Submit work. */
748 k_work_submit_to_queue(work_q, &work->work);
749
750 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
751
752 return 0;
753 }
754
k_work_poll_submit(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)755 int k_work_poll_submit(struct k_work_poll *work,
756 struct k_poll_event *events,
757 int num_events,
758 k_timeout_t timeout)
759 {
760 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit, work, timeout);
761
762 int ret = k_work_poll_submit_to_queue(&k_sys_work_q, work,
763 events, num_events, timeout);
764
765 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit, work, timeout, ret);
766
767 return ret;
768 }
769
k_work_poll_cancel(struct k_work_poll * work)770 int k_work_poll_cancel(struct k_work_poll *work)
771 {
772 k_spinlock_key_t key;
773 int retval;
774
775 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, cancel, work);
776
777 /* Check if the work was submitted. */
778 if (work == NULL || work->workq == NULL) {
779 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, -EINVAL);
780
781 return -EINVAL;
782 }
783
784 key = k_spin_lock(&lock);
785 retval = triggered_work_cancel(work, key);
786 k_spin_unlock(&lock, key);
787
788 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, retval);
789
790 return retval;
791 }
792