Lines Matching refs:dwork

887 void k_work_init_delayable(struct k_work_delayable *dwork,  in k_work_init_delayable()  argument
890 __ASSERT_NO_MSG(dwork != NULL); in k_work_init_delayable()
893 *dwork = (struct k_work_delayable){ in k_work_init_delayable()
899 z_init_timeout(&dwork->timeout); in k_work_init_delayable()
901 SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork); in k_work_init_delayable()
904 static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork) in work_delayable_busy_get_locked() argument
906 return flags_get(&dwork->work.flags) & K_WORK_MASK; in work_delayable_busy_get_locked()
909 int k_work_delayable_busy_get(const struct k_work_delayable *dwork) in k_work_delayable_busy_get() argument
911 __ASSERT_NO_MSG(dwork != NULL); in k_work_delayable_busy_get()
914 int ret = work_delayable_busy_get_locked(dwork); in k_work_delayable_busy_get()
944 struct k_work_delayable *dwork, in schedule_for_queue_locked() argument
948 struct k_work *work = &dwork->work; in schedule_for_queue_locked()
955 dwork->queue = *queuep; in schedule_for_queue_locked()
958 z_add_timeout(&dwork->timeout, work_timeout, delay); in schedule_for_queue_locked()
975 static inline bool unschedule_locked(struct k_work_delayable *dwork) in unschedule_locked() argument
978 struct k_work *work = &dwork->work; in unschedule_locked()
986 ret = z_abort_timeout(&dwork->timeout) == 0; in unschedule_locked()
1003 static int cancel_delayable_async_locked(struct k_work_delayable *dwork) in cancel_delayable_async_locked() argument
1005 (void)unschedule_locked(dwork); in cancel_delayable_async_locked()
1007 return cancel_async_locked(&dwork->work); in cancel_delayable_async_locked()
1010 int k_work_schedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, in k_work_schedule_for_queue() argument
1014 __ASSERT_NO_MSG(dwork != NULL); in k_work_schedule_for_queue()
1016 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay); in k_work_schedule_for_queue()
1018 struct k_work *work = &dwork->work; in k_work_schedule_for_queue()
1024 ret = schedule_for_queue_locked(&queue, dwork, delay); in k_work_schedule_for_queue()
1029 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret); in k_work_schedule_for_queue()
1034 int k_work_schedule(struct k_work_delayable *dwork, k_timeout_t delay) in k_work_schedule() argument
1036 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay); in k_work_schedule()
1038 int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay); in k_work_schedule()
1040 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret); in k_work_schedule()
1045 int k_work_reschedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, in k_work_reschedule_for_queue() argument
1049 __ASSERT_NO_MSG(dwork != NULL); in k_work_reschedule_for_queue()
1051 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay); in k_work_reschedule_for_queue()
1057 (void)unschedule_locked(dwork); in k_work_reschedule_for_queue()
1060 ret = schedule_for_queue_locked(&queue, dwork, delay); in k_work_reschedule_for_queue()
1064 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret); in k_work_reschedule_for_queue()
1069 int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay) in k_work_reschedule() argument
1071 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay); in k_work_reschedule()
1073 int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay); in k_work_reschedule()
1075 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret); in k_work_reschedule()
1080 int k_work_cancel_delayable(struct k_work_delayable *dwork) in k_work_cancel_delayable() argument
1082 __ASSERT_NO_MSG(dwork != NULL); in k_work_cancel_delayable()
1084 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork); in k_work_cancel_delayable()
1087 int ret = cancel_delayable_async_locked(dwork); in k_work_cancel_delayable()
1091 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret); in k_work_cancel_delayable()
1096 bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, in k_work_cancel_delayable_sync() argument
1099 __ASSERT_NO_MSG(dwork != NULL); in k_work_cancel_delayable_sync()
1106 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync); in k_work_cancel_delayable_sync()
1110 bool pending = (work_delayable_busy_get_locked(dwork) != 0U); in k_work_cancel_delayable_sync()
1114 (void)cancel_delayable_async_locked(dwork); in k_work_cancel_delayable_sync()
1115 need_wait = cancel_sync_locked(&dwork->work, canceller); in k_work_cancel_delayable_sync()
1124 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending); in k_work_cancel_delayable_sync()
1128 bool k_work_flush_delayable(struct k_work_delayable *dwork, in k_work_flush_delayable() argument
1131 __ASSERT_NO_MSG(dwork != NULL); in k_work_flush_delayable()
1138 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync); in k_work_flush_delayable()
1140 struct k_work *work = &dwork->work; in k_work_flush_delayable()
1148 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false); in k_work_flush_delayable()
1156 if (unschedule_locked(dwork)) { in k_work_flush_delayable()
1157 struct k_work_q *queue = dwork->queue; in k_work_flush_delayable()
1172 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush); in k_work_flush_delayable()