Lines Matching refs:work
66 static void handle_flush(struct k_work *work) { } in handle_flush() argument
70 struct k_work *work = &flusher->work; in init_flusher() local
72 k_work_init(&flusher->work, handle_flush); in init_flusher()
73 flag_set(&work->flags, K_WORK_FLUSHING_BIT); in init_flusher()
88 struct k_work *work) in init_work_cancel() argument
91 canceler->work = work; in init_work_cancel()
105 static void finalize_flush_locked(struct k_work *work) in finalize_flush_locked() argument
108 = CONTAINER_OF(work, struct z_work_flusher, work); in finalize_flush_locked()
110 flag_clear(&work->flags, K_WORK_FLUSHING_BIT); in finalize_flush_locked()
125 static void finalize_cancel_locked(struct k_work *work) in finalize_cancel_locked() argument
133 flag_clear(&work->flags, K_WORK_CANCELING_BIT); in finalize_cancel_locked()
141 if (wc->work == work) { in finalize_cancel_locked()
150 void k_work_init(struct k_work *work, in k_work_init() argument
153 __ASSERT_NO_MSG(work != NULL); in k_work_init()
156 *work = (struct k_work)Z_WORK_INITIALIZER(handler); in k_work_init()
158 SYS_PORT_TRACING_OBJ_INIT(k_work, work); in k_work_init()
161 static inline int work_busy_get_locked(const struct k_work *work) in work_busy_get_locked() argument
163 return flags_get(&work->flags) & K_WORK_MASK; in work_busy_get_locked()
166 int k_work_busy_get(const struct k_work *work) in k_work_busy_get() argument
169 int ret = work_busy_get_locked(work); in k_work_busy_get()
188 struct k_work *work, in queue_flusher_locked() argument
193 if ((flags_get(&work->flags) & K_WORK_QUEUED) != 0U) { in queue_flusher_locked()
194 sys_slist_insert(&queue->pending, &work->node, in queue_flusher_locked()
195 &flusher->work.node); in queue_flusher_locked()
197 sys_slist_prepend(&queue->pending, &flusher->work.node); in queue_flusher_locked()
209 struct k_work *work) in queue_remove_locked() argument
211 if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) { in queue_remove_locked()
212 (void)sys_slist_find_and_remove(&queue->pending, &work->node); in queue_remove_locked()
258 struct k_work *work) in queue_submit_locked() argument
283 sys_slist_append(&queue->pending, &work->node); in queue_submit_locked()
317 static int submit_to_queue_locked(struct k_work *work, in submit_to_queue_locked() argument
322 if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) { in submit_to_queue_locked()
325 } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) { in submit_to_queue_locked()
332 *queuep = work->queue; in submit_to_queue_locked()
339 if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) { in submit_to_queue_locked()
340 __ASSERT_NO_MSG(work->queue != NULL); in submit_to_queue_locked()
341 *queuep = work->queue; in submit_to_queue_locked()
345 int rc = queue_submit_locked(*queuep, work); in submit_to_queue_locked()
350 flag_set(&work->flags, K_WORK_QUEUED_BIT); in submit_to_queue_locked()
351 work->queue = *queuep; in submit_to_queue_locked()
376 struct k_work *work) in z_work_submit_to_queue() argument
378 __ASSERT_NO_MSG(work != NULL); in z_work_submit_to_queue()
379 __ASSERT_NO_MSG(work->handler != NULL); in z_work_submit_to_queue()
383 int ret = submit_to_queue_locked(work, &queue); in z_work_submit_to_queue()
391 struct k_work *work) in k_work_submit_to_queue() argument
393 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work); in k_work_submit_to_queue()
395 int ret = z_work_submit_to_queue(queue, work); in k_work_submit_to_queue()
406 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret); in k_work_submit_to_queue()
411 int k_work_submit(struct k_work *work) in k_work_submit() argument
413 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work); in k_work_submit()
415 int ret = k_work_submit_to_queue(&k_sys_work_q, work); in k_work_submit()
417 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret); in k_work_submit()
437 static bool work_flush_locked(struct k_work *work, in work_flush_locked() argument
440 bool need_flush = (flags_get(&work->flags) in work_flush_locked()
444 struct k_work_q *queue = work->queue; in work_flush_locked()
448 queue_flusher_locked(queue, work, flusher); in work_flush_locked()
455 bool k_work_flush(struct k_work *work, in k_work_flush() argument
458 __ASSERT_NO_MSG(work != NULL); in k_work_flush()
459 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); in k_work_flush()
466 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work); in k_work_flush()
471 bool need_flush = work_flush_locked(work, flusher); in k_work_flush()
477 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER); in k_work_flush()
482 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush); in k_work_flush()
498 static int cancel_async_locked(struct k_work *work) in cancel_async_locked() argument
501 if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) { in cancel_async_locked()
503 queue_remove_locked(work->queue, work); in cancel_async_locked()
509 int ret = work_busy_get_locked(work); in cancel_async_locked()
512 flag_set(&work->flags, K_WORK_CANCELING_BIT); in cancel_async_locked()
513 ret = work_busy_get_locked(work); in cancel_async_locked()
534 static bool cancel_sync_locked(struct k_work *work, in cancel_sync_locked() argument
537 bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT); in cancel_sync_locked()
544 init_work_cancel(canceller, work); in cancel_sync_locked()
550 int k_work_cancel(struct k_work *work) in k_work_cancel() argument
552 __ASSERT_NO_MSG(work != NULL); in k_work_cancel()
553 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); in k_work_cancel()
555 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work); in k_work_cancel()
558 int ret = cancel_async_locked(work); in k_work_cancel()
562 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret); in k_work_cancel()
567 bool k_work_cancel_sync(struct k_work *work, in k_work_cancel_sync() argument
570 __ASSERT_NO_MSG(work != NULL); in k_work_cancel_sync()
572 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); in k_work_cancel_sync()
578 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync); in k_work_cancel_sync()
582 bool pending = (work_busy_get_locked(work) != 0U); in k_work_cancel_sync()
586 (void)cancel_async_locked(work); in k_work_cancel_sync()
587 need_wait = cancel_sync_locked(work, canceller); in k_work_cancel_sync()
593 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync); in k_work_cancel_sync()
598 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending); in k_work_cancel_sync()
615 struct k_work *work = NULL; in work_queue_main() local
627 work = CONTAINER_OF(node, struct k_work, node); in work_queue_main()
628 flag_set(&work->flags, K_WORK_RUNNING_BIT); in work_queue_main()
629 flag_clear(&work->flags, K_WORK_QUEUED_BIT); in work_queue_main()
643 handler = work->handler; in work_queue_main()
669 if (work == NULL) { in work_queue_main()
684 handler(work); in work_queue_main()
693 flag_clear(&work->flags, K_WORK_RUNNING_BIT); in work_queue_main()
694 if (flag_test(&work->flags, K_WORK_FLUSHING_BIT)) { in work_queue_main()
695 finalize_flush_locked(work); in work_queue_main()
697 if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) { in work_queue_main()
698 finalize_cancel_locked(work); in work_queue_main()
868 struct k_work *wp = &dw->work; in work_timeout()
894 .work = { in k_work_init_delayable()
906 return flags_get(&dwork->work.flags) & K_WORK_MASK; in work_delayable_busy_get_locked()
948 struct k_work *work = &dwork->work; in schedule_for_queue_locked() local
951 return submit_to_queue_locked(work, queuep); in schedule_for_queue_locked()
954 flag_set(&work->flags, K_WORK_DELAYED_BIT); in schedule_for_queue_locked()
978 struct k_work *work = &dwork->work; in unschedule_locked() local
985 if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) { in unschedule_locked()
1007 return cancel_async_locked(&dwork->work); in cancel_delayable_async_locked()
1018 struct k_work *work = &dwork->work; in k_work_schedule_for_queue() local
1023 if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) { in k_work_schedule_for_queue()
1115 need_wait = cancel_sync_locked(&dwork->work, canceller); in k_work_cancel_delayable_sync()
1140 struct k_work *work = &dwork->work; in k_work_flush_delayable() local
1145 if (work_busy_get_locked(work) == 0U) { in k_work_flush_delayable()
1159 (void)submit_to_queue_locked(work, &queue); in k_work_flush_delayable()
1163 bool need_flush = work_flush_locked(work, flusher); in k_work_flush_delayable()