1 /*
2 * Copyright (c) 2022-2025 Nordic Semiconductor ASA
3 * Copyright (c) 2023 Codecoup
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10
11 #include <zephyr/fff.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/kernel_structs.h>
14 #include <zephyr/sys_clock.h>
15 #include <zephyr/sys/slist.h>
16
17 #include "mock_kernel.h"
18
19 /* List of fakes used by this unit tester */
20 #define FFF_FAKES_LIST(FAKE) \
21 FAKE(z_timeout_remaining) \
22 FAKE(k_work_cancel_delayable_sync) \
23
24 /* List of k_work items to be worked. */
25 static sys_slist_t work_pending;
26
27 DEFINE_FAKE_VALUE_FUNC(k_ticks_t, z_timeout_remaining, const struct _timeout *);
28 DEFINE_FAKE_VALUE_FUNC(bool, k_work_cancel_delayable_sync, struct k_work_delayable *,
29 struct k_work_sync *);
30 DEFINE_FAKE_VALUE_FUNC(int, k_sem_take, struct k_sem *, k_timeout_t);
31 DEFINE_FAKE_VOID_FUNC(k_sem_give, struct k_sem *);
32
k_work_init_delayable(struct k_work_delayable * dwork,k_work_handler_t handler)33 void k_work_init_delayable(struct k_work_delayable *dwork, k_work_handler_t handler)
34 {
35 dwork->work.handler = handler;
36 }
37
k_work_reschedule(struct k_work_delayable * dwork,k_timeout_t delay)38 int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay)
39 {
40 bool on_list = false;
41 struct k_work *work;
42
43 dwork->timeout.dticks = delay.ticks;
44
45 /* Determine whether the work item is queued already. */
46 SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
47 on_list = work == &dwork->work;
48 if (on_list) {
49 break;
50 }
51 }
52
53 if (dwork->timeout.dticks == 0) {
54 dwork->work.handler(&dwork->work);
55 if (on_list) {
56 (void)sys_slist_remove(&work_pending, NULL, &dwork->work.node);
57 }
58 } else if (!on_list) {
59 sys_slist_append(&work_pending, &dwork->work.node);
60 }
61
62 return 0;
63 }
64
k_work_schedule(struct k_work_delayable * dwork,k_timeout_t delay)65 int k_work_schedule(struct k_work_delayable *dwork, k_timeout_t delay)
66 {
67 struct k_work *work;
68
69 /* Determine whether the work item is queued already. */
70 SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
71 if (work == &dwork->work) {
72 return 0;
73 }
74 }
75
76 dwork->timeout.dticks = delay.ticks;
77 if (dwork->timeout.dticks == 0) {
78 dwork->work.handler(&dwork->work);
79 } else {
80 sys_slist_append(&work_pending, &dwork->work.node);
81 }
82
83 return 0;
84 }
85
k_work_cancel_delayable(struct k_work_delayable * dwork)86 int k_work_cancel_delayable(struct k_work_delayable *dwork)
87 {
88 (void)sys_slist_find_and_remove(&work_pending, &dwork->work.node);
89
90 return 0;
91 }
92
k_work_cancel(struct k_work * work)93 int k_work_cancel(struct k_work *work)
94 {
95 (void)sys_slist_find_and_remove(&work_pending, &work->node);
96
97 return 0;
98 }
99
k_work_init(struct k_work * work,k_work_handler_t handler)100 void k_work_init(struct k_work *work, k_work_handler_t handler)
101 {
102 work->handler = handler;
103 }
104
k_work_submit(struct k_work * work)105 int k_work_submit(struct k_work *work)
106 {
107 work->handler(work);
108
109 return 0;
110 }
111
k_work_busy_get(const struct k_work * work)112 int k_work_busy_get(const struct k_work *work)
113 {
114 return 0;
115 }
116
k_sleep(k_timeout_t timeout)117 int32_t k_sleep(k_timeout_t timeout)
118 {
119 struct k_work *work;
120
121 SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
122 if (work->flags & K_WORK_DELAYED) {
123 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
124
125 if (dwork->timeout.dticks > timeout.ticks) {
126 dwork->timeout.dticks -= timeout.ticks;
127 continue;
128 }
129 }
130
131 (void)sys_slist_remove(&work_pending, NULL, &work->node);
132 work->handler(work);
133 }
134
135 return 0;
136 }
137 static bool mutex_locked;
138
k_mutex_init(struct k_mutex * mutex)139 int k_mutex_init(struct k_mutex *mutex)
140 {
141 mutex_locked = false;
142
143 return 0;
144 }
145
k_mutex_lock(struct k_mutex * mutex,k_timeout_t timeout)146 int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
147 {
148 if (mutex_locked) {
149 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
150 return -EBUSY;
151 } else {
152 return -EAGAIN;
153 }
154 }
155
156 mutex_locked = true;
157
158 return 0;
159 }
160
k_mutex_unlock(struct k_mutex * mutex)161 int k_mutex_unlock(struct k_mutex *mutex)
162 {
163 if (!mutex_locked) {
164 return -EINVAL;
165 }
166
167 mutex_locked = false;
168
169 return 0;
170 }
171
mock_kernel_init(void)172 void mock_kernel_init(void)
173 {
174 FFF_FAKES_LIST(RESET_FAKE);
175
176 sys_slist_init(&work_pending);
177 }
178
mock_kernel_cleanup(void)179 void mock_kernel_cleanup(void)
180 {
181 struct k_work *work, *tmp;
182
183 /* Run all pending works */
184 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&work_pending, work, tmp, node) {
185 (void)sys_slist_remove(&work_pending, NULL, &work->node);
186 work->handler(work);
187 }
188 }
189