Lines Matching full:work

10  * Second generation work queue implementation
60 /* Lock to protect the internal state of all work items, work queues,
65 /* Invoked by work thread */
66 static void handle_flush(struct k_work *work) { } in handle_flush() argument
70 struct k_work *work = &flusher->work; in init_flusher() local
72 k_work_init(&flusher->work, handle_flush); in init_flusher()
73 flag_set(&work->flags, K_WORK_FLUSHING_BIT); in init_flusher()
82 * Invoked with work lock held.
85 * @param work the work structure that is to be canceled
88 struct k_work *work) in init_work_cancel() argument
91 canceler->work = work; in init_work_cancel()
95 /* Complete flushing of a work item.
97 * Invoked with work lock held.
99 * Invoked from a work queue thread.
103 * @param work the work structure that has completed flushing.
105 static void finalize_flush_locked(struct k_work *work) in finalize_flush_locked() argument
108 = CONTAINER_OF(work, struct z_work_flusher, work); in finalize_flush_locked()
110 flag_clear(&work->flags, K_WORK_FLUSHING_BIT); in finalize_flush_locked()
115 /* Complete cancellation of a work item and unlock held lock.
117 * Invoked with work lock held.
119 * Invoked from a work queue thread.
123 * @param work the work structure that has completed cancellation
125 static void finalize_cancel_locked(struct k_work *work) in finalize_cancel_locked() argument
133 flag_clear(&work->flags, K_WORK_CANCELING_BIT); in finalize_cancel_locked()
136 * what's waiting for the completion. The same work item can in finalize_cancel_locked()
141 if (wc->work == work) { in finalize_cancel_locked()
150 void k_work_init(struct k_work *work, in k_work_init() argument
153 __ASSERT_NO_MSG(work != NULL); in k_work_init()
156 *work = (struct k_work)Z_WORK_INITIALIZER(handler); in k_work_init()
158 SYS_PORT_TRACING_OBJ_INIT(k_work, work); in k_work_init()
161 static inline int work_busy_get_locked(const struct k_work *work) in work_busy_get_locked() argument
163 return flags_get(&work->flags) & K_WORK_MASK; in work_busy_get_locked()
166 int k_work_busy_get(const struct k_work *work) in k_work_busy_get() argument
169 int ret = work_busy_get_locked(work); in k_work_busy_get()
176 /* Add a flusher work item to the queue.
178 * Invoked with work lock held.
180 * Caller must notify queue of pending work.
182 * @param queue queue on which a work item may appear.
183 * @param work the work item that is either queued or running on @p
188 struct k_work *work, in queue_flusher_locked() argument
193 if ((flags_get(&work->flags) & K_WORK_QUEUED) != 0U) { in queue_flusher_locked()
194 sys_slist_insert(&queue->pending, &work->node, in queue_flusher_locked()
195 &flusher->work.node); in queue_flusher_locked()
197 sys_slist_prepend(&queue->pending, &flusher->work.node); in queue_flusher_locked()
201 /* Try to remove a work item from the given queue.
203 * Invoked with work lock held.
205 * @param queue the queue from which the work should be removed
206 * @param work work that may be on the queue
209 struct k_work *work) in queue_remove_locked() argument
211 if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) { in queue_remove_locked()
212 (void)sys_slist_find_and_remove(&queue->pending, &work->node); in queue_remove_locked()
216 /* Potentially notify a queue that it needs to look for pending work.
218 * This may make the work queue thread ready, but as the lock is held it
238 /* Submit an work item to a queue if queue state allows new work.
241 * draining and the work isn't being submitted from the queue's
244 * Invoked with work lock held.
247 * @param queue the queue to which work should be submitted. This may
250 * @param work to be submitted
258 struct k_work *work) in queue_submit_locked() argument
283 sys_slist_append(&queue->pending, &work->node); in queue_submit_locked()
291 /* Attempt to submit work to a queue.
294 * * the work is cancelling,
298 * Invoked with work lock held.
301 * @param work the work structure to be submitted
305 * will be null if the work was not submitted or if submitted will reference
309 * @retval 0 if work was already submitted to a queue
310 * @retval 1 if work was not submitted and has been queued to @p queue
311 * @retval 2 if work was running and has been queued to the queue that was
317 static int submit_to_queue_locked(struct k_work *work, in submit_to_queue_locked() argument
322 if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) { in submit_to_queue_locked()
325 } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) { in submit_to_queue_locked()
332 *queuep = work->queue; in submit_to_queue_locked()
335 /* If the work is currently running we have to use the in submit_to_queue_locked()
339 if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) { in submit_to_queue_locked()
340 __ASSERT_NO_MSG(work->queue != NULL); in submit_to_queue_locked()
341 *queuep = work->queue; in submit_to_queue_locked()
345 int rc = queue_submit_locked(*queuep, work); in submit_to_queue_locked()
350 flag_set(&work->flags, K_WORK_QUEUED_BIT); in submit_to_queue_locked()
351 work->queue = *queuep; in submit_to_queue_locked()
364 /* Submit work to a queue but do not yield the current thread.
371 * @param work the work structure to be submitted
376 struct k_work *work) in z_work_submit_to_queue() argument
378 __ASSERT_NO_MSG(work != NULL); in z_work_submit_to_queue()
379 __ASSERT_NO_MSG(work->handler != NULL); in z_work_submit_to_queue()
383 int ret = submit_to_queue_locked(work, &queue); in z_work_submit_to_queue()
391 struct k_work *work) in k_work_submit_to_queue() argument
393 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work); in k_work_submit_to_queue()
395 int ret = z_work_submit_to_queue(queue, work); in k_work_submit_to_queue()
406 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret); in k_work_submit_to_queue()
411 int k_work_submit(struct k_work *work) in k_work_submit() argument
413 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work); in k_work_submit()
415 int ret = k_work_submit_to_queue(&k_sys_work_q, work); in k_work_submit()
417 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret); in k_work_submit()
422 /* Flush the work item if necessary.
424 * Flushing is necessary only if the work is either queued or running.
426 * Invoked with work lock held by key.
429 * @param work the work item that is to be flushed
432 * @retval true if work is queued or running. If this happens the
437 static bool work_flush_locked(struct k_work *work, in work_flush_locked() argument
440 bool need_flush = (flags_get(&work->flags) in work_flush_locked()
444 struct k_work_q *queue = work->queue; in work_flush_locked()
448 queue_flusher_locked(queue, work, flusher); in work_flush_locked()
455 bool k_work_flush(struct k_work *work, in k_work_flush() argument
458 __ASSERT_NO_MSG(work != NULL); in k_work_flush()
459 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); in k_work_flush()
466 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work); in k_work_flush()
471 bool need_flush = work_flush_locked(work, flusher); in k_work_flush()
477 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER); in k_work_flush()
482 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush); in k_work_flush()
487 /* Execute the non-waiting steps necessary to cancel a work item.
489 * Invoked with work lock held.
491 * @param work the work item to be canceled.
493 * @retval true if we need to wait for the work item to finish canceling
494 * @retval false if the work item is idle
498 static int cancel_async_locked(struct k_work *work) in cancel_async_locked() argument
501 if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) { in cancel_async_locked()
503 queue_remove_locked(work->queue, work); in cancel_async_locked()
509 int ret = work_busy_get_locked(work); in cancel_async_locked()
512 flag_set(&work->flags, K_WORK_CANCELING_BIT); in cancel_async_locked()
513 ret = work_busy_get_locked(work); in cancel_async_locked()
519 /* Complete cancellation necessary, release work lock, and wait if
522 * Invoked with work lock held by key.
525 * @param work work that is being canceled
527 * @param key used by work lock
529 * @retval true if and only if the work was still active on entry. The caller
532 * @retval false if work was idle on entry. The caller need not wait.
534 static bool cancel_sync_locked(struct k_work *work, in cancel_sync_locked() argument
537 bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT); in cancel_sync_locked()
544 init_work_cancel(canceller, work); in cancel_sync_locked()
550 int k_work_cancel(struct k_work *work) in k_work_cancel() argument
552 __ASSERT_NO_MSG(work != NULL); in k_work_cancel()
553 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); in k_work_cancel()
555 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work); in k_work_cancel()
558 int ret = cancel_async_locked(work); in k_work_cancel()
562 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret); in k_work_cancel()
567 bool k_work_cancel_sync(struct k_work *work, in k_work_cancel_sync() argument
570 __ASSERT_NO_MSG(work != NULL); in k_work_cancel_sync()
572 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); in k_work_cancel_sync()
578 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync); in k_work_cancel_sync()
582 bool pending = (work_busy_get_locked(work) != 0U); in k_work_cancel_sync()
586 (void)cancel_async_locked(work); in k_work_cancel_sync()
587 need_wait = cancel_sync_locked(work, canceller); in k_work_cancel_sync()
593 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync); in k_work_cancel_sync()
598 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending); in k_work_cancel_sync()
602 /* Loop executed by a work queue thread.
604 * @param workq_ptr pointer to the work queue structure
615 struct k_work *work = NULL; in work_queue_main() local
620 /* Check for and prepare any new work. */ in work_queue_main()
623 /* Mark that there's some work active that's in work_queue_main()
627 work = CONTAINER_OF(node, struct k_work, node); in work_queue_main()
628 flag_set(&work->flags, K_WORK_RUNNING_BIT); in work_queue_main()
629 flag_clear(&work->flags, K_WORK_QUEUED_BIT); in work_queue_main()
632 * in the line below that 'work' is checked for null after being in work_queue_main()
635 * The work is figured out by CONTAINER_OF, as a container in work_queue_main()
639 * which should never happen, even line 'if (work != NULL)' in work_queue_main()
641 * This means that if node is not NULL, then work will not be NULL. in work_queue_main()
643 handler = work->handler; in work_queue_main()
663 /* No work is available and no queue state requires in work_queue_main()
669 if (work == NULL) { in work_queue_main()
670 /* Nothing's had a chance to add work since we took in work_queue_main()
671 * the lock, and we didn't find work nor got asked to in work_queue_main()
673 * work thread will be woken and we can check again. in work_queue_main()
684 handler(work); in work_queue_main()
686 /* Mark the work item as no longer running and deal in work_queue_main()
693 flag_clear(&work->flags, K_WORK_RUNNING_BIT); in work_queue_main()
694 if (flag_test(&work->flags, K_WORK_FLUSHING_BIT)) { in work_queue_main()
695 finalize_flush_locked(work); in work_queue_main()
697 if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) { in work_queue_main()
698 finalize_cancel_locked(work); in work_queue_main()
705 /* Optionally yield to prevent the work queue from in work_queue_main()
858 /* Timeout handler for delayable work.
861 * Takes and releases work lock.
868 struct k_work *wp = &dw->work; in work_timeout()
872 /* If the work is still marked delayed (should be) then clear that in work_timeout()
874 * notified of new work at the next reschedule point. in work_timeout()
876 * If not successful there is no notification that the work has been in work_timeout()
894 .work = { in k_work_init_delayable()
906 return flags_get(&dwork->work.flags) & K_WORK_MASK; in work_delayable_busy_get_locked()
920 /* Attempt to schedule a work item for future (maybe immediate)
923 * Invoked with work lock held.
928 * Invoked with work lock held.
932 * completion it will be null if the work was not submitted or if
936 * @param dwork the delayed work structure
948 struct k_work *work = &dwork->work; in schedule_for_queue_locked() local
951 return submit_to_queue_locked(work, queuep); in schedule_for_queue_locked()
954 flag_set(&work->flags, K_WORK_DELAYED_BIT); in schedule_for_queue_locked()
963 /* Unschedule delayable work.
965 * If the work is delayed, cancel the timeout and clear the delayed
968 * Invoked with work lock held.
970 * @param dwork pointer to delayable work structure.
972 * @return true if and only if work had been delayed so the timeout
978 struct k_work *work = &dwork->work; in unschedule_locked() local
985 if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) { in unschedule_locked()
992 /* Full cancellation of a delayable work item.
994 * Unschedules the delayed part then delegates to standard work
997 * Invoked with work lock held.
999 * @param dwork delayable work item
1007 return cancel_async_locked(&dwork->work); in cancel_delayable_async_locked()
1018 struct k_work *work = &dwork->work; in k_work_schedule_for_queue() local
1022 /* Schedule the work item if it's idle or running. */ in k_work_schedule_for_queue()
1023 if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) { in k_work_schedule_for_queue()
1059 /* Schedule the work item with the new parameters. */ in k_work_reschedule_for_queue()
1115 need_wait = cancel_sync_locked(&dwork->work, canceller); in k_work_cancel_delayable_sync()
1140 struct k_work *work = &dwork->work; in k_work_flush_delayable() local
1145 if (work_busy_get_locked(work) == 0U) { in k_work_flush_delayable()
1159 (void)submit_to_queue_locked(work, &queue); in k_work_flush_delayable()
1163 bool need_flush = work_flush_locked(work, flusher); in k_work_flush_delayable()