1 /*
2 * Copyright (c) 1997-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8
9 #include <zephyr/init.h>
10 #include <ksched.h>
11 #include <zephyr/wait_q.h>
12 #include <zephyr/syscall_handler.h>
13 #include <stdbool.h>
14 #include <zephyr/spinlock.h>
15
16 static struct k_spinlock lock;
17
18 /**
19 * @brief Handle expiration of a kernel timer object.
20 *
21 * @param t Timeout used by the timer.
22 */
z_timer_expiration_handler(struct _timeout * t)23 void z_timer_expiration_handler(struct _timeout *t)
24 {
25 struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
26 struct k_thread *thread;
27 k_spinlock_key_t key = k_spin_lock(&lock);
28
29 /* In sys_clock_announce(), when a timeout expires, it is first removed
30 * from the timeout list, then its expiration handler is called (with
31 * unlocked interrupts). For kernel timers, the expiration handler is
32 * this function. Usually, the timeout structure related to the timer
33 * that is handled here will not be linked to the timeout list at this
34 * point. But it may happen that before this function is executed and
35 * interrupts are locked again, a given timer gets restarted from an
36 * interrupt context that has a priority higher than the system timer
37 * interrupt. Then, the timeout structure for this timer will turn out
38 * to be linked to the timeout list. And in such case, since the timer
39 * was restarted, its expiration handler should not be executed then,
40 * so the function exits immediately.
41 */
42 if (sys_dnode_is_linked(&t->node)) {
43 k_spin_unlock(&lock, key);
44 return;
45 }
46
47 /*
48 * if the timer is periodic, start it again; don't add _TICK_ALIGN
49 * since we're already aligned to a tick boundary
50 */
51 if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
52 !K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
53 k_timeout_t next = timer->period;
54
55 /* see note about z_add_timeout() in z_impl_k_timer_start() */
56 next.ticks = MAX(next.ticks - 1, 0);
57
58 #ifdef CONFIG_TIMEOUT_64BIT
59 /* Exploit the fact that uptime during a kernel
60 * timeout handler reflects the time of the scheduled
61 * event and not real time to get some inexpensive
62 * protection against late interrupts. If we're
63 * delayed for any reason, we still end up calculating
64 * the next expiration as a regular stride from where
65 * we "should" have run. Requires absolute timeouts.
66 * (Note offset by one: we're nominally at the
67 * beginning of a tick, so need to defeat the "round
68 * down" behavior on timeout addition).
69 */
70 next = K_TIMEOUT_ABS_TICKS(k_uptime_ticks() + 1 + next.ticks);
71 #endif
72 z_add_timeout(&timer->timeout, z_timer_expiration_handler,
73 next);
74 }
75
76 /* update timer's status */
77 timer->status += 1U;
78
79 /* invoke timer expiry function */
80 if (timer->expiry_fn != NULL) {
81 /* Unlock for user handler. */
82 k_spin_unlock(&lock, key);
83 timer->expiry_fn(timer);
84 key = k_spin_lock(&lock);
85 }
86
87 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
88 k_spin_unlock(&lock, key);
89 return;
90 }
91
92 thread = z_waitq_head(&timer->wait_q);
93
94 if (thread == NULL) {
95 k_spin_unlock(&lock, key);
96 return;
97 }
98
99 z_unpend_thread_no_timeout(thread);
100
101 arch_thread_return_value_set(thread, 0);
102
103 k_spin_unlock(&lock, key);
104
105 z_ready_thread(thread);
106 }
107
108
k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_stop_t stop_fn)109 void k_timer_init(struct k_timer *timer,
110 k_timer_expiry_t expiry_fn,
111 k_timer_stop_t stop_fn)
112 {
113 timer->expiry_fn = expiry_fn;
114 timer->stop_fn = stop_fn;
115 timer->status = 0U;
116
117 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
118 z_waitq_init(&timer->wait_q);
119 }
120
121 z_init_timeout(&timer->timeout);
122
123 SYS_PORT_TRACING_OBJ_INIT(k_timer, timer);
124
125 timer->user_data = NULL;
126
127 z_object_init(timer);
128 }
129
130
z_impl_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)131 void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
132 k_timeout_t period)
133 {
134 SYS_PORT_TRACING_OBJ_FUNC(k_timer, start, timer, duration, period);
135
136 if (K_TIMEOUT_EQ(duration, K_FOREVER)) {
137 return;
138 }
139
140 /* z_add_timeout() always adds one to the incoming tick count
141 * to round up to the next tick (by convention it waits for
142 * "at least as long as the specified timeout"), but the
143 * period interval is always guaranteed to be reset from
144 * within the timer ISR, so no round up is desired and 1 is
145 * subtracted in there.
146 *
147 * Note that the duration (!) value gets the same treatment
148 * for backwards compatibility. This is unfortunate
149 * (i.e. k_timer_start() doesn't treat its initial sleep
150 * argument the same way k_sleep() does), but historical. The
151 * timer_api test relies on this behavior.
152 */
153 if (Z_TICK_ABS(duration.ticks) < 0) {
154 duration.ticks = MAX(duration.ticks - 1, 0);
155 }
156
157 (void)z_abort_timeout(&timer->timeout);
158 timer->period = period;
159 timer->status = 0U;
160
161 z_add_timeout(&timer->timeout, z_timer_expiration_handler,
162 duration);
163 }
164
165 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)166 static inline void z_vrfy_k_timer_start(struct k_timer *timer,
167 k_timeout_t duration,
168 k_timeout_t period)
169 {
170 Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
171 z_impl_k_timer_start(timer, duration, period);
172 }
173 #include <syscalls/k_timer_start_mrsh.c>
174 #endif
175
z_impl_k_timer_stop(struct k_timer * timer)176 void z_impl_k_timer_stop(struct k_timer *timer)
177 {
178 SYS_PORT_TRACING_OBJ_FUNC(k_timer, stop, timer);
179
180 bool inactive = (z_abort_timeout(&timer->timeout) != 0);
181
182 if (inactive) {
183 return;
184 }
185
186 if (timer->stop_fn != NULL) {
187 timer->stop_fn(timer);
188 }
189
190 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
191 struct k_thread *pending_thread = z_unpend1_no_timeout(&timer->wait_q);
192
193 if (pending_thread != NULL) {
194 z_ready_thread(pending_thread);
195 z_reschedule_unlocked();
196 }
197 }
198 }
199
200 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_stop(struct k_timer * timer)201 static inline void z_vrfy_k_timer_stop(struct k_timer *timer)
202 {
203 Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
204 z_impl_k_timer_stop(timer);
205 }
206 #include <syscalls/k_timer_stop_mrsh.c>
207 #endif
208
z_impl_k_timer_status_get(struct k_timer * timer)209 uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
210 {
211 k_spinlock_key_t key = k_spin_lock(&lock);
212 uint32_t result = timer->status;
213
214 timer->status = 0U;
215 k_spin_unlock(&lock, key);
216
217 return result;
218 }
219
220 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_status_get(struct k_timer * timer)221 static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
222 {
223 Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
224 return z_impl_k_timer_status_get(timer);
225 }
226 #include <syscalls/k_timer_status_get_mrsh.c>
227 #endif
228
z_impl_k_timer_status_sync(struct k_timer * timer)229 uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
230 {
231 __ASSERT(!arch_is_in_isr(), "");
232 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, status_sync, timer);
233
234 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
235 uint32_t result;
236
237 do {
238 k_spinlock_key_t key = k_spin_lock(&lock);
239
240 if (!z_is_inactive_timeout(&timer->timeout)) {
241 result = *(volatile uint32_t *)&timer->status;
242 timer->status = 0U;
243 k_spin_unlock(&lock, key);
244 if (result > 0) {
245 break;
246 }
247 } else {
248 result = timer->status;
249 k_spin_unlock(&lock, key);
250 break;
251 }
252 } while (true);
253
254 return result;
255 }
256
257 k_spinlock_key_t key = k_spin_lock(&lock);
258 uint32_t result = timer->status;
259
260 if (result == 0U) {
261 if (!z_is_inactive_timeout(&timer->timeout)) {
262 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_timer, status_sync, timer, K_FOREVER);
263
264 /* wait for timer to expire or stop */
265 (void)z_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
266
267 /* get updated timer status */
268 key = k_spin_lock(&lock);
269 result = timer->status;
270 } else {
271 /* timer is already stopped */
272 }
273 } else {
274 /* timer has already expired at least once */
275 }
276
277 timer->status = 0U;
278 k_spin_unlock(&lock, key);
279
280 /**
281 * @note New tracing hook
282 */
283 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, status_sync, timer, result);
284
285 return result;
286 }
287
288 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_status_sync(struct k_timer * timer)289 static inline uint32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
290 {
291 Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
292 return z_impl_k_timer_status_sync(timer);
293 }
294 #include <syscalls/k_timer_status_sync_mrsh.c>
295
z_vrfy_k_timer_remaining_ticks(const struct k_timer * timer)296 static inline k_ticks_t z_vrfy_k_timer_remaining_ticks(
297 const struct k_timer *timer)
298 {
299 Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
300 return z_impl_k_timer_remaining_ticks(timer);
301 }
302 #include <syscalls/k_timer_remaining_ticks_mrsh.c>
303
z_vrfy_k_timer_expires_ticks(const struct k_timer * timer)304 static inline k_ticks_t z_vrfy_k_timer_expires_ticks(
305 const struct k_timer *timer)
306 {
307 Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
308 return z_impl_k_timer_expires_ticks(timer);
309 }
310 #include <syscalls/k_timer_expires_ticks_mrsh.c>
311
z_vrfy_k_timer_user_data_get(const struct k_timer * timer)312 static inline void *z_vrfy_k_timer_user_data_get(const struct k_timer *timer)
313 {
314 Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
315 return z_impl_k_timer_user_data_get(timer);
316 }
317 #include <syscalls/k_timer_user_data_get_mrsh.c>
318
z_vrfy_k_timer_user_data_set(struct k_timer * timer,void * user_data)319 static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
320 void *user_data)
321 {
322 Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
323 z_impl_k_timer_user_data_set(timer, user_data);
324 }
325 #include <syscalls/k_timer_user_data_set_mrsh.c>
326
327 #endif
328