1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  * Copyright (c) 2023 Codecoup
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <zephyr/sys_clock.h>
10 #include <zephyr/sys/slist.h>
11 
12 #include "mock_kernel.h"
13 
14 /* List of fakes used by this unit tester */
15 #define FFF_FAKES_LIST(FAKE)                                                                       \
16 	FAKE(z_timeout_remaining)                                                                  \
17 	FAKE(k_work_cancel_delayable_sync)                                                         \
18 
19 /* List of k_work items to be worked. */
20 static sys_slist_t work_pending;
21 
22 DEFINE_FAKE_VALUE_FUNC(k_ticks_t, z_timeout_remaining, const struct _timeout *);
23 DEFINE_FAKE_VALUE_FUNC(bool, k_work_cancel_delayable_sync, struct k_work_delayable *,
24 		       struct k_work_sync *);
25 DEFINE_FAKE_VALUE_FUNC(int, k_sem_take, struct k_sem *, k_timeout_t);
26 DEFINE_FAKE_VOID_FUNC(k_sem_give, struct k_sem *);
27 
k_work_init_delayable(struct k_work_delayable * dwork,k_work_handler_t handler)28 void k_work_init_delayable(struct k_work_delayable *dwork, k_work_handler_t handler)
29 {
30 	dwork->work.handler = handler;
31 }
32 
k_work_reschedule(struct k_work_delayable * dwork,k_timeout_t delay)33 int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay)
34 {
35 	bool on_list = false;
36 	struct k_work *work;
37 
38 	dwork->timeout.dticks = delay.ticks;
39 
40 	/* Determine whether the work item is queued already. */
41 	SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
42 		on_list = work == &dwork->work;
43 		if (on_list) {
44 			break;
45 		}
46 	}
47 
48 	if (dwork->timeout.dticks == 0) {
49 		dwork->work.handler(&dwork->work);
50 		if (on_list) {
51 			(void)sys_slist_remove(&work_pending, NULL, &dwork->work.node);
52 		}
53 	} else if (!on_list) {
54 		sys_slist_append(&work_pending, &dwork->work.node);
55 	}
56 
57 	return 0;
58 }
59 
k_work_schedule(struct k_work_delayable * dwork,k_timeout_t delay)60 int k_work_schedule(struct k_work_delayable *dwork, k_timeout_t delay)
61 {
62 	struct k_work *work;
63 
64 	/* Determine whether the work item is queued already. */
65 	SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
66 		if (work == &dwork->work) {
67 			return 0;
68 		}
69 	}
70 
71 	dwork->timeout.dticks = delay.ticks;
72 	if (dwork->timeout.dticks == 0) {
73 		dwork->work.handler(&dwork->work);
74 	} else {
75 		sys_slist_append(&work_pending, &dwork->work.node);
76 	}
77 
78 	return 0;
79 }
80 
k_work_cancel_delayable(struct k_work_delayable * dwork)81 int k_work_cancel_delayable(struct k_work_delayable *dwork)
82 {
83 	(void)sys_slist_find_and_remove(&work_pending, &dwork->work.node);
84 
85 	return 0;
86 }
87 
k_work_cancel(struct k_work * work)88 int k_work_cancel(struct k_work *work)
89 {
90 	(void)sys_slist_find_and_remove(&work_pending, &work->node);
91 
92 	return 0;
93 }
94 
k_work_init(struct k_work * work,k_work_handler_t handler)95 void k_work_init(struct k_work *work, k_work_handler_t handler)
96 {
97 	work->handler = handler;
98 }
99 
k_work_submit(struct k_work * work)100 int k_work_submit(struct k_work *work)
101 {
102 	work->handler(work);
103 
104 	return 0;
105 }
106 
k_work_busy_get(const struct k_work * work)107 int k_work_busy_get(const struct k_work *work)
108 {
109 	return 0;
110 }
111 
k_sleep(k_timeout_t timeout)112 int32_t k_sleep(k_timeout_t timeout)
113 {
114 	struct k_work *work;
115 
116 	SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
117 		if (work->flags & K_WORK_DELAYED) {
118 			struct k_work_delayable *dwork = k_work_delayable_from_work(work);
119 
120 			if (dwork->timeout.dticks > timeout.ticks) {
121 				dwork->timeout.dticks -= timeout.ticks;
122 				continue;
123 			}
124 		}
125 
126 		(void)sys_slist_remove(&work_pending, NULL, &work->node);
127 		work->handler(work);
128 	}
129 
130 	return 0;
131 }
132 
mock_kernel_init(void)133 void mock_kernel_init(void)
134 {
135 	FFF_FAKES_LIST(RESET_FAKE);
136 
137 	sys_slist_init(&work_pending);
138 }
139 
mock_kernel_cleanup(void)140 void mock_kernel_cleanup(void)
141 {
142 	struct k_work *work, *tmp;
143 
144 	/* Run all pending works */
145 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&work_pending, work, tmp, node) {
146 		(void)sys_slist_remove(&work_pending, NULL, &work->node);
147 		work->handler(work);
148 	}
149 }
150