1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * Second generation work queue implementation
11 */
12
13 #include <zephyr/kernel.h>
14 #include <zephyr/kernel_structs.h>
15 #include <wait_q.h>
16 #include <zephyr/spinlock.h>
17 #include <errno.h>
18 #include <ksched.h>
19 #include <zephyr/sys/printk.h>
20
flag_clear(uint32_t * flagp,uint32_t bit)21 static inline void flag_clear(uint32_t *flagp,
22 uint32_t bit)
23 {
24 *flagp &= ~BIT(bit);
25 }
26
flag_set(uint32_t * flagp,uint32_t bit)27 static inline void flag_set(uint32_t *flagp,
28 uint32_t bit)
29 {
30 *flagp |= BIT(bit);
31 }
32
flag_test(const uint32_t * flagp,uint32_t bit)33 static inline bool flag_test(const uint32_t *flagp,
34 uint32_t bit)
35 {
36 return (*flagp & BIT(bit)) != 0U;
37 }
38
flag_test_and_clear(uint32_t * flagp,int bit)39 static inline bool flag_test_and_clear(uint32_t *flagp,
40 int bit)
41 {
42 bool ret = flag_test(flagp, bit);
43
44 flag_clear(flagp, bit);
45
46 return ret;
47 }
48
flags_set(uint32_t * flagp,uint32_t flags)49 static inline void flags_set(uint32_t *flagp,
50 uint32_t flags)
51 {
52 *flagp = flags;
53 }
54
flags_get(const uint32_t * flagp)55 static inline uint32_t flags_get(const uint32_t *flagp)
56 {
57 return *flagp;
58 }
59
60 /* Lock to protect the internal state of all work items, work queues,
61 * and pending_cancels.
62 */
63 static struct k_spinlock lock;
64
65 /* Invoked by work thread */
handle_flush(struct k_work * work)66 static void handle_flush(struct k_work *work) { }
67
init_flusher(struct z_work_flusher * flusher)68 static inline void init_flusher(struct z_work_flusher *flusher)
69 {
70 struct k_work *work = &flusher->work;
71 k_sem_init(&flusher->sem, 0, 1);
72 k_work_init(&flusher->work, handle_flush);
73 flag_set(&work->flags, K_WORK_FLUSHING_BIT);
74 }
75
76 /* List of pending cancellations. */
77 static sys_slist_t pending_cancels;
78
79 /* Initialize a canceler record and add it to the list of pending
80 * cancels.
81 *
82 * Invoked with work lock held.
83 *
84 * @param canceler the structure used to notify a waiting process.
85 * @param work the work structure that is to be canceled
86 */
init_work_cancel(struct z_work_canceller * canceler,struct k_work * work)87 static inline void init_work_cancel(struct z_work_canceller *canceler,
88 struct k_work *work)
89 {
90 k_sem_init(&canceler->sem, 0, 1);
91 canceler->work = work;
92 sys_slist_append(&pending_cancels, &canceler->node);
93 }
94
95 /* Comeplete flushing of a work item.
96 *
97 * Invoked with work lock held.
98 *
99 * Invoked from a work queue thread.
100 *
101 * Reschedules.
102 *
103 * @param work the work structure that has completed flushing.
104 */
finalize_flush_locked(struct k_work * work)105 static void finalize_flush_locked(struct k_work *work)
106 {
107 struct z_work_flusher *flusher
108 = CONTAINER_OF(work, struct z_work_flusher, work);
109
110 flag_clear(&work->flags, K_WORK_FLUSHING_BIT);
111
112 k_sem_give(&flusher->sem);
113 };
114
115 /* Complete cancellation of a work item and unlock held lock.
116 *
117 * Invoked with work lock held.
118 *
119 * Invoked from a work queue thread.
120 *
121 * Reschedules.
122 *
123 * @param work the work structure that has completed cancellation
124 */
finalize_cancel_locked(struct k_work * work)125 static void finalize_cancel_locked(struct k_work *work)
126 {
127 struct z_work_canceller *wc, *tmp;
128 sys_snode_t *prev = NULL;
129
130 /* Clear this first, so released high-priority threads don't
131 * see it when doing things.
132 */
133 flag_clear(&work->flags, K_WORK_CANCELING_BIT);
134
135 /* Search for and remove the matching container, and release
136 * what's waiting for the completion. The same work item can
137 * appear multiple times in the list if multiple threads
138 * attempt to cancel it.
139 */
140 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
141 if (wc->work == work) {
142 sys_slist_remove(&pending_cancels, prev, &wc->node);
143 k_sem_give(&wc->sem);
144 } else {
145 prev = &wc->node;
146 }
147 }
148 }
149
k_work_init(struct k_work * work,k_work_handler_t handler)150 void k_work_init(struct k_work *work,
151 k_work_handler_t handler)
152 {
153 __ASSERT_NO_MSG(work != NULL);
154 __ASSERT_NO_MSG(handler != NULL);
155
156 *work = (struct k_work)Z_WORK_INITIALIZER(handler);
157
158 SYS_PORT_TRACING_OBJ_INIT(k_work, work);
159 }
160
work_busy_get_locked(const struct k_work * work)161 static inline int work_busy_get_locked(const struct k_work *work)
162 {
163 return flags_get(&work->flags) & K_WORK_MASK;
164 }
165
k_work_busy_get(const struct k_work * work)166 int k_work_busy_get(const struct k_work *work)
167 {
168 k_spinlock_key_t key = k_spin_lock(&lock);
169 int ret = work_busy_get_locked(work);
170
171 k_spin_unlock(&lock, key);
172
173 return ret;
174 }
175
176 /* Add a flusher work item to the queue.
177 *
178 * Invoked with work lock held.
179 *
180 * Caller must notify queue of pending work.
181 *
182 * @param queue queue on which a work item may appear.
183 * @param work the work item that is either queued or running on @p
184 * queue
185 * @param flusher an uninitialized/unused flusher object
186 */
queue_flusher_locked(struct k_work_q * queue,struct k_work * work,struct z_work_flusher * flusher)187 static void queue_flusher_locked(struct k_work_q *queue,
188 struct k_work *work,
189 struct z_work_flusher *flusher)
190 {
191 bool in_list = false;
192 struct k_work *wn;
193
194 /* Determine whether the work item is still queued. */
195 SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
196 if (wn == work) {
197 in_list = true;
198 break;
199 }
200 }
201
202 init_flusher(flusher);
203 if (in_list) {
204 sys_slist_insert(&queue->pending, &work->node,
205 &flusher->work.node);
206 } else {
207 sys_slist_prepend(&queue->pending, &flusher->work.node);
208 }
209 }
210
211 /* Try to remove a work item from the given queue.
212 *
213 * Invoked with work lock held.
214 *
215 * @param queue the queue from which the work should be removed
216 * @param work work that may be on the queue
217 */
queue_remove_locked(struct k_work_q * queue,struct k_work * work)218 static inline void queue_remove_locked(struct k_work_q *queue,
219 struct k_work *work)
220 {
221 if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
222 (void)sys_slist_find_and_remove(&queue->pending, &work->node);
223 }
224 }
225
226 /* Potentially notify a queue that it needs to look for pending work.
227 *
228 * This may make the work queue thread ready, but as the lock is held it
229 * will not be a reschedule point. Callers should yield after the lock is
230 * released where appropriate (generally if this returns true).
231 *
232 * @param queue to be notified. If this is null no notification is required.
233 *
234 * @return true if and only if the queue was notified and woken, i.e. a
235 * reschedule is pending.
236 */
notify_queue_locked(struct k_work_q * queue)237 static inline bool notify_queue_locked(struct k_work_q *queue)
238 {
239 bool rv = false;
240
241 if (queue != NULL) {
242 rv = z_sched_wake(&queue->notifyq, 0, NULL);
243 }
244
245 return rv;
246 }
247
248 /* Submit an work item to a queue if queue state allows new work.
249 *
250 * Submission is rejected if no queue is provided, or if the queue is
251 * draining and the work isn't being submitted from the queue's
252 * thread (chained submission).
253 *
254 * Invoked with work lock held.
255 * Conditionally notifies queue.
256 *
257 * @param queue the queue to which work should be submitted. This may
258 * be null, in which case the submission will fail.
259 *
260 * @param work to be submitted
261 *
262 * @retval 1 if successfully queued
263 * @retval -EINVAL if no queue is provided
264 * @retval -ENODEV if the queue is not started
265 * @retval -EBUSY if the submission was rejected (draining, plugged)
266 */
queue_submit_locked(struct k_work_q * queue,struct k_work * work)267 static inline int queue_submit_locked(struct k_work_q *queue,
268 struct k_work *work)
269 {
270 if (queue == NULL) {
271 return -EINVAL;
272 }
273
274 int ret = -EBUSY;
275 bool chained = (_current == &queue->thread) && !k_is_in_isr();
276 bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
277 bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
278
279 /* Test for acceptability, in priority order:
280 *
281 * * -ENODEV if the queue isn't running.
282 * * -EBUSY if draining and not chained
283 * * -EBUSY if plugged and not draining
284 * * otherwise OK
285 */
286 if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
287 ret = -ENODEV;
288 } else if (draining && !chained) {
289 ret = -EBUSY;
290 } else if (plugged && !draining) {
291 ret = -EBUSY;
292 } else {
293 sys_slist_append(&queue->pending, &work->node);
294 ret = 1;
295 (void)notify_queue_locked(queue);
296 }
297
298 return ret;
299 }
300
301 /* Attempt to submit work to a queue.
302 *
303 * The submission can fail if:
304 * * the work is cancelling,
305 * * no candidate queue can be identified;
306 * * the candidate queue rejects the submission.
307 *
308 * Invoked with work lock held.
309 * Conditionally notifies queue.
310 *
311 * @param work the work structure to be submitted
312
313 * @param queuep pointer to a queue reference. On input this should
314 * dereference to the proposed queue (which may be null); after completion it
315 * will be null if the work was not submitted or if submitted will reference
316 * the queue it was submitted to. That may or may not be the queue provided
317 * on input.
318 *
319 * @retval 0 if work was already submitted to a queue
320 * @retval 1 if work was not submitted and has been queued to @p queue
321 * @retval 2 if work was running and has been queued to the queue that was
322 * running it
323 * @retval -EBUSY if canceling or submission was rejected by queue
324 * @retval -EINVAL if no queue is provided
325 * @retval -ENODEV if the queue is not started
326 */
submit_to_queue_locked(struct k_work * work,struct k_work_q ** queuep)327 static int submit_to_queue_locked(struct k_work *work,
328 struct k_work_q **queuep)
329 {
330 int ret = 0;
331
332 if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
333 /* Disallowed */
334 ret = -EBUSY;
335 } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
336 /* Not currently queued */
337 ret = 1;
338
339 /* If no queue specified resubmit to last queue.
340 */
341 if (*queuep == NULL) {
342 *queuep = work->queue;
343 }
344
345 /* If the work is currently running we have to use the
346 * queue it's running on to prevent handler
347 * re-entrancy.
348 */
349 if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
350 __ASSERT_NO_MSG(work->queue != NULL);
351 *queuep = work->queue;
352 ret = 2;
353 }
354
355 int rc = queue_submit_locked(*queuep, work);
356
357 if (rc < 0) {
358 ret = rc;
359 } else {
360 flag_set(&work->flags, K_WORK_QUEUED_BIT);
361 work->queue = *queuep;
362 }
363 } else {
364 /* Already queued, do nothing. */
365 }
366
367 if (ret <= 0) {
368 *queuep = NULL;
369 }
370
371 return ret;
372 }
373
374 /* Submit work to a queue but do not yield the current thread.
375 *
376 * Intended for internal use.
377 *
378 * See also submit_to_queue_locked().
379 *
380 * @param queuep pointer to a queue reference.
381 * @param work the work structure to be submitted
382 *
383 * @retval see submit_to_queue_locked()
384 */
z_work_submit_to_queue(struct k_work_q * queue,struct k_work * work)385 int z_work_submit_to_queue(struct k_work_q *queue,
386 struct k_work *work)
387 {
388 __ASSERT_NO_MSG(work != NULL);
389 __ASSERT_NO_MSG(work->handler != NULL);
390
391 k_spinlock_key_t key = k_spin_lock(&lock);
392
393 int ret = submit_to_queue_locked(work, &queue);
394
395 k_spin_unlock(&lock, key);
396
397 return ret;
398 }
399
k_work_submit_to_queue(struct k_work_q * queue,struct k_work * work)400 int k_work_submit_to_queue(struct k_work_q *queue,
401 struct k_work *work)
402 {
403 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
404
405 int ret = z_work_submit_to_queue(queue, work);
406
407 /* submit_to_queue_locked() won't reschedule on its own
408 * (really it should, otherwise this process will result in
409 * spurious calls to z_swap() due to the race), so do it here
410 * if the queue state changed.
411 */
412 if (ret > 0) {
413 z_reschedule_unlocked();
414 }
415
416 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
417
418 return ret;
419 }
420
k_work_submit(struct k_work * work)421 int k_work_submit(struct k_work *work)
422 {
423 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
424
425 int ret = k_work_submit_to_queue(&k_sys_work_q, work);
426
427 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
428
429 return ret;
430 }
431
432 /* Flush the work item if necessary.
433 *
434 * Flushing is necessary only if the work is either queued or running.
435 *
436 * Invoked with work lock held by key.
437 * Sleeps.
438 *
439 * @param work the work item that is to be flushed
440 * @param flusher state used to synchronize the flush
441 *
442 * @retval true if work is queued or running. If this happens the
443 * caller must take the flusher semaphore after releasing the lock.
444 *
445 * @retval false otherwise. No wait required.
446 */
work_flush_locked(struct k_work * work,struct z_work_flusher * flusher)447 static bool work_flush_locked(struct k_work *work,
448 struct z_work_flusher *flusher)
449 {
450 bool need_flush = (flags_get(&work->flags)
451 & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
452
453 if (need_flush) {
454 struct k_work_q *queue = work->queue;
455
456 __ASSERT_NO_MSG(queue != NULL);
457
458 queue_flusher_locked(queue, work, flusher);
459 notify_queue_locked(queue);
460 }
461
462 return need_flush;
463 }
464
k_work_flush(struct k_work * work,struct k_work_sync * sync)465 bool k_work_flush(struct k_work *work,
466 struct k_work_sync *sync)
467 {
468 __ASSERT_NO_MSG(work != NULL);
469 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
470 __ASSERT_NO_MSG(!k_is_in_isr());
471 __ASSERT_NO_MSG(sync != NULL);
472 #ifdef CONFIG_KERNEL_COHERENCE
473 __ASSERT_NO_MSG(arch_mem_coherent(sync));
474 #endif
475
476 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
477
478 struct z_work_flusher *flusher = &sync->flusher;
479 k_spinlock_key_t key = k_spin_lock(&lock);
480
481 bool need_flush = work_flush_locked(work, flusher);
482
483 k_spin_unlock(&lock, key);
484
485 /* If necessary wait until the flusher item completes */
486 if (need_flush) {
487 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
488
489 k_sem_take(&flusher->sem, K_FOREVER);
490 }
491
492 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
493
494 return need_flush;
495 }
496
497 /* Execute the non-waiting steps necessary to cancel a work item.
498 *
499 * Invoked with work lock held.
500 *
501 * @param work the work item to be canceled.
502 *
503 * @retval true if we need to wait for the work item to finish canceling
504 * @retval false if the work item is idle
505 *
506 * @return k_busy_wait() captured under lock
507 */
cancel_async_locked(struct k_work * work)508 static int cancel_async_locked(struct k_work *work)
509 {
510 /* If we haven't already started canceling, do it now. */
511 if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
512 /* Remove it from the queue, if it's queued. */
513 queue_remove_locked(work->queue, work);
514 }
515
516 /* If it's still busy after it's been dequeued, then flag it
517 * as canceling.
518 */
519 int ret = work_busy_get_locked(work);
520
521 if (ret != 0) {
522 flag_set(&work->flags, K_WORK_CANCELING_BIT);
523 ret = work_busy_get_locked(work);
524 }
525
526 return ret;
527 }
528
529 /* Complete cancellation necessary, release work lock, and wait if
530 * necessary.
531 *
532 * Invoked with work lock held by key.
533 * Sleeps.
534 *
535 * @param work work that is being canceled
536 * @param canceller state used to synchronize the cancellation
537 * @param key used by work lock
538 *
539 * @retval true if and only if the work was still active on entry. The caller
540 * must wait on the canceller semaphore after releasing the lock.
541 *
542 * @retval false if work was idle on entry. The caller need not wait.
543 */
cancel_sync_locked(struct k_work * work,struct z_work_canceller * canceller)544 static bool cancel_sync_locked(struct k_work *work,
545 struct z_work_canceller *canceller)
546 {
547 bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
548
549 /* If something's still running then we have to wait for
550 * completion, which is indicated when finish_cancel() gets
551 * invoked.
552 */
553 if (ret) {
554 init_work_cancel(canceller, work);
555 }
556
557 return ret;
558 }
559
k_work_cancel(struct k_work * work)560 int k_work_cancel(struct k_work *work)
561 {
562 __ASSERT_NO_MSG(work != NULL);
563 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
564
565 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
566
567 k_spinlock_key_t key = k_spin_lock(&lock);
568 int ret = cancel_async_locked(work);
569
570 k_spin_unlock(&lock, key);
571
572 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
573
574 return ret;
575 }
576
k_work_cancel_sync(struct k_work * work,struct k_work_sync * sync)577 bool k_work_cancel_sync(struct k_work *work,
578 struct k_work_sync *sync)
579 {
580 __ASSERT_NO_MSG(work != NULL);
581 __ASSERT_NO_MSG(sync != NULL);
582 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
583 __ASSERT_NO_MSG(!k_is_in_isr());
584 #ifdef CONFIG_KERNEL_COHERENCE
585 __ASSERT_NO_MSG(arch_mem_coherent(sync));
586 #endif
587
588 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
589
590 struct z_work_canceller *canceller = &sync->canceller;
591 k_spinlock_key_t key = k_spin_lock(&lock);
592 bool pending = (work_busy_get_locked(work) != 0U);
593 bool need_wait = false;
594
595 if (pending) {
596 (void)cancel_async_locked(work);
597 need_wait = cancel_sync_locked(work, canceller);
598 }
599
600 k_spin_unlock(&lock, key);
601
602 if (need_wait) {
603 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
604
605 k_sem_take(&canceller->sem, K_FOREVER);
606 }
607
608 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
609 return pending;
610 }
611
612 /* Loop executed by a work queue thread.
613 *
614 * @param workq_ptr pointer to the work queue structure
615 */
work_queue_main(void * workq_ptr,void * p2,void * p3)616 static void work_queue_main(void *workq_ptr, void *p2, void *p3)
617 {
618 ARG_UNUSED(p2);
619 ARG_UNUSED(p3);
620
621 struct k_work_q *queue = (struct k_work_q *)workq_ptr;
622
623 while (true) {
624 sys_snode_t *node;
625 struct k_work *work = NULL;
626 k_work_handler_t handler = NULL;
627 k_spinlock_key_t key = k_spin_lock(&lock);
628 bool yield;
629
630 /* Check for and prepare any new work. */
631 node = sys_slist_get(&queue->pending);
632 if (node != NULL) {
633 /* Mark that there's some work active that's
634 * not on the pending list.
635 */
636 flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
637 work = CONTAINER_OF(node, struct k_work, node);
638 flag_set(&work->flags, K_WORK_RUNNING_BIT);
639 flag_clear(&work->flags, K_WORK_QUEUED_BIT);
640
641 /* Static code analysis tool can raise a false-positive violation
642 * in the line below that 'work' is checked for null after being
643 * dereferenced.
644 *
645 * The work is figured out by CONTAINER_OF, as a container
646 * of type struct k_work that contains the node.
647 * The only way for it to be NULL is if node would be a member
648 * of struct k_work object that has been placed at address NULL,
649 * which should never happen, even line 'if (work != NULL)'
650 * ensures that.
651 * This means that if node is not NULL, then work will not be NULL.
652 */
653 handler = work->handler;
654 } else if (flag_test_and_clear(&queue->flags,
655 K_WORK_QUEUE_DRAIN_BIT)) {
656 /* Not busy and draining: move threads waiting for
657 * drain to ready state. The held spinlock inhibits
658 * immediate reschedule; released threads get their
659 * chance when this invokes z_sched_wait() below.
660 *
661 * We don't touch K_WORK_QUEUE_PLUGGABLE, so getting
662 * here doesn't mean that the queue will allow new
663 * submissions.
664 */
665 (void)z_sched_wake_all(&queue->drainq, 1, NULL);
666 } else {
667 /* No work is available and no queue state requires
668 * special handling.
669 */
670 ;
671 }
672
673 if (work == NULL) {
674 /* Nothing's had a chance to add work since we took
675 * the lock, and we didn't find work nor got asked to
676 * stop. Just go to sleep: when something happens the
677 * work thread will be woken and we can check again.
678 */
679
680 (void)z_sched_wait(&lock, key, &queue->notifyq,
681 K_FOREVER, NULL);
682 continue;
683 }
684
685 k_spin_unlock(&lock, key);
686
687 __ASSERT_NO_MSG(handler != NULL);
688 handler(work);
689
690 /* Mark the work item as no longer running and deal
691 * with any cancellation and flushing issued while it
692 * was running. Clear the BUSY flag and optionally
693 * yield to prevent starving other threads.
694 */
695 key = k_spin_lock(&lock);
696
697 flag_clear(&work->flags, K_WORK_RUNNING_BIT);
698 if (flag_test(&work->flags, K_WORK_FLUSHING_BIT)) {
699 finalize_flush_locked(work);
700 }
701 if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
702 finalize_cancel_locked(work);
703 }
704
705 flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
706 yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
707 k_spin_unlock(&lock, key);
708
709 /* Optionally yield to prevent the work queue from
710 * starving other threads.
711 */
712 if (yield) {
713 k_yield();
714 }
715 }
716 }
717
k_work_queue_init(struct k_work_q * queue)718 void k_work_queue_init(struct k_work_q *queue)
719 {
720 __ASSERT_NO_MSG(queue != NULL);
721
722 *queue = (struct k_work_q) {
723 .flags = 0,
724 };
725
726 SYS_PORT_TRACING_OBJ_INIT(k_work_queue, queue);
727 }
728
k_work_queue_start(struct k_work_q * queue,k_thread_stack_t * stack,size_t stack_size,int prio,const struct k_work_queue_config * cfg)729 void k_work_queue_start(struct k_work_q *queue,
730 k_thread_stack_t *stack,
731 size_t stack_size,
732 int prio,
733 const struct k_work_queue_config *cfg)
734 {
735 __ASSERT_NO_MSG(queue);
736 __ASSERT_NO_MSG(stack);
737 __ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
738 uint32_t flags = K_WORK_QUEUE_STARTED;
739
740 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
741
742 sys_slist_init(&queue->pending);
743 z_waitq_init(&queue->notifyq);
744 z_waitq_init(&queue->drainq);
745
746 if ((cfg != NULL) && cfg->no_yield) {
747 flags |= K_WORK_QUEUE_NO_YIELD;
748 }
749
750 /* It hasn't actually been started yet, but all the state is in place
751 * so we can submit things and once the thread gets control it's ready
752 * to roll.
753 */
754 flags_set(&queue->flags, flags);
755
756 (void)k_thread_create(&queue->thread, stack, stack_size,
757 work_queue_main, queue, NULL, NULL,
758 prio, 0, K_FOREVER);
759
760 if ((cfg != NULL) && (cfg->name != NULL)) {
761 k_thread_name_set(&queue->thread, cfg->name);
762 }
763
764 k_thread_start(&queue->thread);
765
766 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
767 }
768
k_work_queue_drain(struct k_work_q * queue,bool plug)769 int k_work_queue_drain(struct k_work_q *queue,
770 bool plug)
771 {
772 __ASSERT_NO_MSG(queue);
773 __ASSERT_NO_MSG(!k_is_in_isr());
774
775 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
776
777 int ret = 0;
778 k_spinlock_key_t key = k_spin_lock(&lock);
779
780 if (((flags_get(&queue->flags)
781 & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
782 || plug
783 || !sys_slist_is_empty(&queue->pending)) {
784 flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
785 if (plug) {
786 flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
787 }
788
789 notify_queue_locked(queue);
790 ret = z_sched_wait(&lock, key, &queue->drainq,
791 K_FOREVER, NULL);
792 } else {
793 k_spin_unlock(&lock, key);
794 }
795
796 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
797
798 return ret;
799 }
800
k_work_queue_unplug(struct k_work_q * queue)801 int k_work_queue_unplug(struct k_work_q *queue)
802 {
803 __ASSERT_NO_MSG(queue);
804
805 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
806
807 int ret = -EALREADY;
808 k_spinlock_key_t key = k_spin_lock(&lock);
809
810 if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
811 ret = 0;
812 }
813
814 k_spin_unlock(&lock, key);
815
816 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
817
818 return ret;
819 }
820
821 #ifdef CONFIG_SYS_CLOCK_EXISTS
822
823 /* Timeout handler for delayable work.
824 *
825 * Invoked by timeout infrastructure.
826 * Takes and releases work lock.
827 * Conditionally reschedules.
828 */
work_timeout(struct _timeout * to)829 static void work_timeout(struct _timeout *to)
830 {
831 struct k_work_delayable *dw
832 = CONTAINER_OF(to, struct k_work_delayable, timeout);
833 struct k_work *wp = &dw->work;
834 k_spinlock_key_t key = k_spin_lock(&lock);
835 struct k_work_q *queue = NULL;
836
837 /* If the work is still marked delayed (should be) then clear that
838 * state and submit it to the queue. If successful the queue will be
839 * notified of new work at the next reschedule point.
840 *
841 * If not successful there is no notification that the work has been
842 * abandoned. Sorry.
843 */
844 if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
845 queue = dw->queue;
846 (void)submit_to_queue_locked(wp, &queue);
847 }
848
849 k_spin_unlock(&lock, key);
850 }
851
k_work_init_delayable(struct k_work_delayable * dwork,k_work_handler_t handler)852 void k_work_init_delayable(struct k_work_delayable *dwork,
853 k_work_handler_t handler)
854 {
855 __ASSERT_NO_MSG(dwork != NULL);
856 __ASSERT_NO_MSG(handler != NULL);
857
858 *dwork = (struct k_work_delayable){
859 .work = {
860 .handler = handler,
861 .flags = K_WORK_DELAYABLE,
862 },
863 };
864 z_init_timeout(&dwork->timeout);
865
866 SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
867 }
868
work_delayable_busy_get_locked(const struct k_work_delayable * dwork)869 static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
870 {
871 return flags_get(&dwork->work.flags) & K_WORK_MASK;
872 }
873
k_work_delayable_busy_get(const struct k_work_delayable * dwork)874 int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
875 {
876 k_spinlock_key_t key = k_spin_lock(&lock);
877 int ret = work_delayable_busy_get_locked(dwork);
878
879 k_spin_unlock(&lock, key);
880 return ret;
881 }
882
883 /* Attempt to schedule a work item for future (maybe immediate)
884 * submission.
885 *
886 * Invoked with work lock held.
887 *
888 * See also submit_to_queue_locked(), which implements this for a no-wait
889 * delay.
890 *
891 * Invoked with work lock held.
892 *
893 * @param queuep pointer to a pointer to a queue. On input this
894 * should dereference to the proposed queue (which may be null); after
895 * completion it will be null if the work was not submitted or if
896 * submitted will reference the queue it was submitted to. That may
897 * or may not be the queue provided on input.
898 *
899 * @param dwork the delayed work structure
900 *
901 * @param delay the delay to use before scheduling.
902 *
903 * @retval from submit_to_queue_locked() if delay is K_NO_WAIT; otherwise
904 * @retval 1 to indicate successfully scheduled.
905 */
schedule_for_queue_locked(struct k_work_q ** queuep,struct k_work_delayable * dwork,k_timeout_t delay)906 static int schedule_for_queue_locked(struct k_work_q **queuep,
907 struct k_work_delayable *dwork,
908 k_timeout_t delay)
909 {
910 int ret = 1;
911 struct k_work *work = &dwork->work;
912
913 if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
914 return submit_to_queue_locked(work, queuep);
915 }
916
917 flag_set(&work->flags, K_WORK_DELAYED_BIT);
918 dwork->queue = *queuep;
919
920 /* Add timeout */
921 z_add_timeout(&dwork->timeout, work_timeout, delay);
922
923 return ret;
924 }
925
926 /* Unschedule delayable work.
927 *
928 * If the work is delayed, cancel the timeout and clear the delayed
929 * flag.
930 *
931 * Invoked with work lock held.
932 *
933 * @param dwork pointer to delayable work structure.
934 *
935 * @return true if and only if work had been delayed so the timeout
936 * was cancelled.
937 */
unschedule_locked(struct k_work_delayable * dwork)938 static inline bool unschedule_locked(struct k_work_delayable *dwork)
939 {
940 bool ret = false;
941 struct k_work *work = &dwork->work;
942
943 /* If scheduled, try to cancel. If it fails, that means the
944 * callback has been dequeued and will inevitably run (or has
945 * already run), so treat that as "undelayed" and return
946 * false.
947 */
948 if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
949 ret = z_abort_timeout(&dwork->timeout) == 0;
950 }
951
952 return ret;
953 }
954
955 /* Full cancellation of a delayable work item.
956 *
957 * Unschedules the delayed part then delegates to standard work
958 * cancellation.
959 *
960 * Invoked with work lock held.
961 *
962 * @param dwork delayable work item
963 *
964 * @return k_work_busy_get() flags
965 */
cancel_delayable_async_locked(struct k_work_delayable * dwork)966 static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
967 {
968 (void)unschedule_locked(dwork);
969
970 return cancel_async_locked(&dwork->work);
971 }
972
k_work_schedule_for_queue(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)973 int k_work_schedule_for_queue(struct k_work_q *queue,
974 struct k_work_delayable *dwork,
975 k_timeout_t delay)
976 {
977 __ASSERT_NO_MSG(dwork != NULL);
978
979 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
980
981 struct k_work *work = &dwork->work;
982 int ret = 0;
983 k_spinlock_key_t key = k_spin_lock(&lock);
984
985 /* Schedule the work item if it's idle or running. */
986 if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
987 ret = schedule_for_queue_locked(&queue, dwork, delay);
988 }
989
990 k_spin_unlock(&lock, key);
991
992 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
993
994 return ret;
995 }
996
k_work_schedule(struct k_work_delayable * dwork,k_timeout_t delay)997 int k_work_schedule(struct k_work_delayable *dwork,
998 k_timeout_t delay)
999 {
1000 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
1001
1002 int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
1003
1004 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
1005
1006 return ret;
1007 }
1008
k_work_reschedule_for_queue(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)1009 int k_work_reschedule_for_queue(struct k_work_q *queue,
1010 struct k_work_delayable *dwork,
1011 k_timeout_t delay)
1012 {
1013 __ASSERT_NO_MSG(dwork != NULL);
1014
1015 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
1016
1017 int ret = 0;
1018 k_spinlock_key_t key = k_spin_lock(&lock);
1019
1020 /* Remove any active scheduling. */
1021 (void)unschedule_locked(dwork);
1022
1023 /* Schedule the work item with the new parameters. */
1024 ret = schedule_for_queue_locked(&queue, dwork, delay);
1025
1026 k_spin_unlock(&lock, key);
1027
1028 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
1029
1030 return ret;
1031 }
1032
k_work_reschedule(struct k_work_delayable * dwork,k_timeout_t delay)1033 int k_work_reschedule(struct k_work_delayable *dwork,
1034 k_timeout_t delay)
1035 {
1036 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
1037
1038 int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
1039
1040 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
1041
1042 return ret;
1043 }
1044
k_work_cancel_delayable(struct k_work_delayable * dwork)1045 int k_work_cancel_delayable(struct k_work_delayable *dwork)
1046 {
1047 __ASSERT_NO_MSG(dwork != NULL);
1048
1049 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
1050
1051 k_spinlock_key_t key = k_spin_lock(&lock);
1052 int ret = cancel_delayable_async_locked(dwork);
1053
1054 k_spin_unlock(&lock, key);
1055
1056 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
1057
1058 return ret;
1059 }
1060
k_work_cancel_delayable_sync(struct k_work_delayable * dwork,struct k_work_sync * sync)1061 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
1062 struct k_work_sync *sync)
1063 {
1064 __ASSERT_NO_MSG(dwork != NULL);
1065 __ASSERT_NO_MSG(sync != NULL);
1066 __ASSERT_NO_MSG(!k_is_in_isr());
1067 #ifdef CONFIG_KERNEL_COHERENCE
1068 __ASSERT_NO_MSG(arch_mem_coherent(sync));
1069 #endif
1070
1071 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
1072
1073 struct z_work_canceller *canceller = &sync->canceller;
1074 k_spinlock_key_t key = k_spin_lock(&lock);
1075 bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
1076 bool need_wait = false;
1077
1078 if (pending) {
1079 (void)cancel_delayable_async_locked(dwork);
1080 need_wait = cancel_sync_locked(&dwork->work, canceller);
1081 }
1082
1083 k_spin_unlock(&lock, key);
1084
1085 if (need_wait) {
1086 k_sem_take(&canceller->sem, K_FOREVER);
1087 }
1088
1089 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
1090 return pending;
1091 }
1092
k_work_flush_delayable(struct k_work_delayable * dwork,struct k_work_sync * sync)1093 bool k_work_flush_delayable(struct k_work_delayable *dwork,
1094 struct k_work_sync *sync)
1095 {
1096 __ASSERT_NO_MSG(dwork != NULL);
1097 __ASSERT_NO_MSG(sync != NULL);
1098 __ASSERT_NO_MSG(!k_is_in_isr());
1099 #ifdef CONFIG_KERNEL_COHERENCE
1100 __ASSERT_NO_MSG(arch_mem_coherent(sync));
1101 #endif
1102
1103 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
1104
1105 struct k_work *work = &dwork->work;
1106 struct z_work_flusher *flusher = &sync->flusher;
1107 k_spinlock_key_t key = k_spin_lock(&lock);
1108
1109 /* If it's idle release the lock and return immediately. */
1110 if (work_busy_get_locked(work) == 0U) {
1111 k_spin_unlock(&lock, key);
1112
1113 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
1114
1115 return false;
1116 }
1117
1118 /* If unscheduling did something then submit it. Ignore a
1119 * failed submission (e.g. when cancelling).
1120 */
1121 if (unschedule_locked(dwork)) {
1122 struct k_work_q *queue = dwork->queue;
1123
1124 (void)submit_to_queue_locked(work, &queue);
1125 }
1126
1127 /* Wait for it to finish */
1128 bool need_flush = work_flush_locked(work, flusher);
1129
1130 k_spin_unlock(&lock, key);
1131
1132 /* If necessary wait until the flusher item completes */
1133 if (need_flush) {
1134 k_sem_take(&flusher->sem, K_FOREVER);
1135 }
1136
1137 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
1138
1139 return need_flush;
1140 }
1141
1142 #endif /* CONFIG_SYS_CLOCK_EXISTS */
1143