1 /*
2 * Copyright (c) 2022 Nordic Semiconductor ASA
3 * Copyright (c) 2023 Codecoup
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/kernel.h>
9 #include <zephyr/sys/slist.h>
10
11 #include "mock_kernel.h"
12
13 /* List of fakes used by this unit tester */
14 #define FFF_FAKES_LIST(FAKE) \
15 FAKE(z_timeout_remaining) \
16 FAKE(k_work_schedule) \
17 FAKE(k_work_cancel_delayable_sync) \
18
19 /* List of k_work items to be worked. */
20 static sys_slist_t work_pending;
21
22 DEFINE_FAKE_VALUE_FUNC(k_ticks_t, z_timeout_remaining, const struct _timeout *);
23 DEFINE_FAKE_VALUE_FUNC(int, k_work_schedule, struct k_work_delayable *, k_timeout_t);
24 DEFINE_FAKE_VALUE_FUNC(bool, k_work_cancel_delayable_sync, struct k_work_delayable *,
25 struct k_work_sync *);
26 DEFINE_FAKE_VALUE_FUNC(int, k_sem_take, struct k_sem *, k_timeout_t);
27 DEFINE_FAKE_VOID_FUNC(k_sem_give, struct k_sem *);
28
k_work_init_delayable(struct k_work_delayable * dwork,k_work_handler_t handler)29 void k_work_init_delayable(struct k_work_delayable *dwork, k_work_handler_t handler)
30 {
31 dwork->work.handler = handler;
32 }
33
k_work_reschedule(struct k_work_delayable * dwork,k_timeout_t delay)34 int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay)
35 {
36 struct k_work *work;
37
38 /* Determine whether the work item is queued already. */
39 SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
40 if (work == &dwork->work) {
41 dwork->timeout.dticks = delay.ticks;
42 return 0;
43 }
44 }
45
46 dwork->timeout.dticks = delay.ticks;
47 sys_slist_append(&work_pending, &dwork->work.node);
48
49 return 0;
50 }
51
k_work_cancel_delayable(struct k_work_delayable * dwork)52 int k_work_cancel_delayable(struct k_work_delayable *dwork)
53 {
54 (void)sys_slist_find_and_remove(&work_pending, &dwork->work.node);
55
56 return 0;
57 }
58
k_work_cancel(struct k_work * work)59 int k_work_cancel(struct k_work *work)
60 {
61 (void)sys_slist_find_and_remove(&work_pending, &work->node);
62
63 return 0;
64 }
65
k_work_init(struct k_work * work,k_work_handler_t handler)66 void k_work_init(struct k_work *work, k_work_handler_t handler)
67 {
68 work->handler = handler;
69 }
70
k_work_submit(struct k_work * work)71 int k_work_submit(struct k_work *work)
72 {
73 work->handler(work);
74
75 return 0;
76 }
77
k_work_busy_get(const struct k_work * work)78 int k_work_busy_get(const struct k_work *work)
79 {
80 return 0;
81 }
82
k_sleep(k_timeout_t timeout)83 int32_t k_sleep(k_timeout_t timeout)
84 {
85 struct k_work *work;
86
87 SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
88 if (work->flags & K_WORK_DELAYED) {
89 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
90
91 if (dwork->timeout.dticks > timeout.ticks) {
92 dwork->timeout.dticks -= timeout.ticks;
93 continue;
94 }
95 }
96
97 (void)sys_slist_remove(&work_pending, NULL, &work->node);
98 work->handler(work);
99 }
100
101 return 0;
102 }
103
mock_kernel_init(void)104 void mock_kernel_init(void)
105 {
106 FFF_FAKES_LIST(RESET_FAKE);
107
108 sys_slist_init(&work_pending);
109 }
110
mock_kernel_cleanup(void)111 void mock_kernel_cleanup(void)
112 {
113 struct k_work *work, *tmp;
114
115 /* Run all pending works */
116 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&work_pending, work, tmp, node) {
117 (void)sys_slist_remove(&work_pending, NULL, &work->node);
118 work->handler(work);
119 }
120 }
121