1 /*
2 * Copyright (c) 1997-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8
9 #include <zephyr/init.h>
10 #include <zephyr/internal/syscall_handler.h>
11 #include <stdbool.h>
12 #include <zephyr/spinlock.h>
13 #include <ksched.h>
14 #include <wait_q.h>
15
16 static struct k_spinlock lock;
17
18 #ifdef CONFIG_OBJ_CORE_TIMER
19 static struct k_obj_type obj_type_timer;
20 #endif /* CONFIG_OBJ_CORE_TIMER */
21
22 /**
23 * @brief Handle expiration of a kernel timer object.
24 *
25 * @param t Timeout used by the timer.
26 */
z_timer_expiration_handler(struct _timeout * t)27 void z_timer_expiration_handler(struct _timeout *t)
28 {
29 struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
30 struct k_thread *thread;
31 k_spinlock_key_t key = k_spin_lock(&lock);
32
33 /* In sys_clock_announce(), when a timeout expires, it is first removed
34 * from the timeout list, then its expiration handler is called (with
35 * unlocked interrupts). For kernel timers, the expiration handler is
36 * this function. Usually, the timeout structure related to the timer
37 * that is handled here will not be linked to the timeout list at this
38 * point. But it may happen that before this function is executed and
39 * interrupts are locked again, a given timer gets restarted from an
40 * interrupt context that has a priority higher than the system timer
41 * interrupt. Then, the timeout structure for this timer will turn out
42 * to be linked to the timeout list. And in such case, since the timer
43 * was restarted, its expiration handler should not be executed then,
44 * so the function exits immediately.
45 */
46 if (sys_dnode_is_linked(&t->node)) {
47 k_spin_unlock(&lock, key);
48 return;
49 }
50
51 /*
52 * if the timer is periodic, start it again; don't add _TICK_ALIGN
53 * since we're already aligned to a tick boundary
54 */
55 if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
56 !K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
57 k_timeout_t next = timer->period;
58
59 /* see note about z_add_timeout() in z_impl_k_timer_start() */
60 next.ticks = MAX(next.ticks - 1, 0);
61
62 #ifdef CONFIG_TIMEOUT_64BIT
63 /* Exploit the fact that uptime during a kernel
64 * timeout handler reflects the time of the scheduled
65 * event and not real time to get some inexpensive
66 * protection against late interrupts. If we're
67 * delayed for any reason, we still end up calculating
68 * the next expiration as a regular stride from where
69 * we "should" have run. Requires absolute timeouts.
70 * (Note offset by one: we're nominally at the
71 * beginning of a tick, so need to defeat the "round
72 * down" behavior on timeout addition).
73 */
74 next = K_TIMEOUT_ABS_TICKS(k_uptime_ticks() + 1 + next.ticks);
75 #endif /* CONFIG_TIMEOUT_64BIT */
76 z_add_timeout(&timer->timeout, z_timer_expiration_handler,
77 next);
78 }
79
80 /* update timer's status */
81 timer->status += 1U;
82
83 /* invoke timer expiry function */
84 if (timer->expiry_fn != NULL) {
85 /* Unlock for user handler. */
86 k_spin_unlock(&lock, key);
87 timer->expiry_fn(timer);
88 key = k_spin_lock(&lock);
89 }
90
91 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
92 k_spin_unlock(&lock, key);
93 return;
94 }
95
96 thread = z_waitq_head(&timer->wait_q);
97
98 if (thread == NULL) {
99 k_spin_unlock(&lock, key);
100 return;
101 }
102
103 z_unpend_thread_no_timeout(thread);
104
105 arch_thread_return_value_set(thread, 0);
106
107 k_spin_unlock(&lock, key);
108
109 z_ready_thread(thread);
110 }
111
112
k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_stop_t stop_fn)113 void k_timer_init(struct k_timer *timer,
114 k_timer_expiry_t expiry_fn,
115 k_timer_stop_t stop_fn)
116 {
117 timer->expiry_fn = expiry_fn;
118 timer->stop_fn = stop_fn;
119 timer->status = 0U;
120
121 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
122 z_waitq_init(&timer->wait_q);
123 }
124
125 z_init_timeout(&timer->timeout);
126
127 SYS_PORT_TRACING_OBJ_INIT(k_timer, timer);
128
129 timer->user_data = NULL;
130
131 k_object_init(timer);
132
133 #ifdef CONFIG_OBJ_CORE_TIMER
134 k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
135 #endif /* CONFIG_OBJ_CORE_TIMER */
136 }
137
138
z_impl_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)139 void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
140 k_timeout_t period)
141 {
142 SYS_PORT_TRACING_OBJ_FUNC(k_timer, start, timer, duration, period);
143
144 /* Acquire spinlock to ensure safety during concurrent calls to
145 * k_timer_start for scheduling or rescheduling. This is necessary
146 * since k_timer_start can be preempted, especially for the same
147 * timer instance.
148 */
149 k_spinlock_key_t key = k_spin_lock(&lock);
150
151 if (K_TIMEOUT_EQ(duration, K_FOREVER)) {
152 k_spin_unlock(&lock, key);
153 return;
154 }
155
156 /* z_add_timeout() always adds one to the incoming tick count
157 * to round up to the next tick (by convention it waits for
158 * "at least as long as the specified timeout"), but the
159 * period interval is always guaranteed to be reset from
160 * within the timer ISR, so no round up is desired and 1 is
161 * subtracted in there.
162 *
163 * Note that the duration (!) value gets the same treatment
164 * for backwards compatibility. This is unfortunate
165 * (i.e. k_timer_start() doesn't treat its initial sleep
166 * argument the same way k_sleep() does), but historical. The
167 * timer_api test relies on this behavior.
168 */
169 if (Z_IS_TIMEOUT_RELATIVE(duration)) {
170 /* For the duration == K_NO_WAIT case, ensure that behaviour
171 * is consistent for both 32-bit k_ticks_t which are unsigned
172 * and 64-bit k_ticks_t which are signed.
173 */
174 duration.ticks = MAX(1, duration.ticks);
175 duration.ticks = duration.ticks - 1;
176 }
177
178 (void)z_abort_timeout(&timer->timeout);
179 timer->period = period;
180 timer->status = 0U;
181
182 z_add_timeout(&timer->timeout, z_timer_expiration_handler,
183 duration);
184
185 k_spin_unlock(&lock, key);
186 }
187
188 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)189 static inline void z_vrfy_k_timer_start(struct k_timer *timer,
190 k_timeout_t duration,
191 k_timeout_t period)
192 {
193 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
194 z_impl_k_timer_start(timer, duration, period);
195 }
196 #include <zephyr/syscalls/k_timer_start_mrsh.c>
197 #endif /* CONFIG_USERSPACE */
198
z_impl_k_timer_stop(struct k_timer * timer)199 void z_impl_k_timer_stop(struct k_timer *timer)
200 {
201 SYS_PORT_TRACING_OBJ_FUNC(k_timer, stop, timer);
202
203 bool inactive = (z_abort_timeout(&timer->timeout) != 0);
204
205 if (inactive) {
206 return;
207 }
208
209 if (timer->stop_fn != NULL) {
210 timer->stop_fn(timer);
211 }
212
213 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
214 struct k_thread *pending_thread = z_unpend1_no_timeout(&timer->wait_q);
215
216 if (pending_thread != NULL) {
217 z_ready_thread(pending_thread);
218 z_reschedule_unlocked();
219 }
220 }
221 }
222
223 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_stop(struct k_timer * timer)224 static inline void z_vrfy_k_timer_stop(struct k_timer *timer)
225 {
226 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
227 z_impl_k_timer_stop(timer);
228 }
229 #include <zephyr/syscalls/k_timer_stop_mrsh.c>
230 #endif /* CONFIG_USERSPACE */
231
z_impl_k_timer_status_get(struct k_timer * timer)232 uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
233 {
234 k_spinlock_key_t key = k_spin_lock(&lock);
235 uint32_t result = timer->status;
236
237 timer->status = 0U;
238 k_spin_unlock(&lock, key);
239
240 return result;
241 }
242
243 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_status_get(struct k_timer * timer)244 static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
245 {
246 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
247 return z_impl_k_timer_status_get(timer);
248 }
249 #include <zephyr/syscalls/k_timer_status_get_mrsh.c>
250 #endif /* CONFIG_USERSPACE */
251
z_impl_k_timer_status_sync(struct k_timer * timer)252 uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
253 {
254 __ASSERT(!arch_is_in_isr(), "");
255 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, status_sync, timer);
256
257 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
258 uint32_t result;
259
260 do {
261 k_spinlock_key_t key = k_spin_lock(&lock);
262
263 if (!z_is_inactive_timeout(&timer->timeout)) {
264 result = *(volatile uint32_t *)&timer->status;
265 timer->status = 0U;
266 k_spin_unlock(&lock, key);
267 if (result > 0) {
268 break;
269 }
270 } else {
271 result = timer->status;
272 k_spin_unlock(&lock, key);
273 break;
274 }
275 } while (true);
276
277 return result;
278 }
279
280 k_spinlock_key_t key = k_spin_lock(&lock);
281 uint32_t result = timer->status;
282
283 if (result == 0U) {
284 if (!z_is_inactive_timeout(&timer->timeout)) {
285 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_timer, status_sync, timer, K_FOREVER);
286
287 /* wait for timer to expire or stop */
288 (void)z_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
289
290 /* get updated timer status */
291 key = k_spin_lock(&lock);
292 result = timer->status;
293 } else {
294 /* timer is already stopped */
295 }
296 } else {
297 /* timer has already expired at least once */
298 }
299
300 timer->status = 0U;
301 k_spin_unlock(&lock, key);
302
303 /**
304 * @note New tracing hook
305 */
306 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, status_sync, timer, result);
307
308 return result;
309 }
310
311 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_status_sync(struct k_timer * timer)312 static inline uint32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
313 {
314 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
315 return z_impl_k_timer_status_sync(timer);
316 }
317 #include <zephyr/syscalls/k_timer_status_sync_mrsh.c>
318
z_vrfy_k_timer_remaining_ticks(const struct k_timer * timer)319 static inline k_ticks_t z_vrfy_k_timer_remaining_ticks(
320 const struct k_timer *timer)
321 {
322 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
323 return z_impl_k_timer_remaining_ticks(timer);
324 }
325 #include <zephyr/syscalls/k_timer_remaining_ticks_mrsh.c>
326
z_vrfy_k_timer_expires_ticks(const struct k_timer * timer)327 static inline k_ticks_t z_vrfy_k_timer_expires_ticks(
328 const struct k_timer *timer)
329 {
330 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
331 return z_impl_k_timer_expires_ticks(timer);
332 }
333 #include <zephyr/syscalls/k_timer_expires_ticks_mrsh.c>
334
z_vrfy_k_timer_user_data_get(const struct k_timer * timer)335 static inline void *z_vrfy_k_timer_user_data_get(const struct k_timer *timer)
336 {
337 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
338 return z_impl_k_timer_user_data_get(timer);
339 }
340 #include <zephyr/syscalls/k_timer_user_data_get_mrsh.c>
341
z_vrfy_k_timer_user_data_set(struct k_timer * timer,void * user_data)342 static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
343 void *user_data)
344 {
345 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
346 z_impl_k_timer_user_data_set(timer, user_data);
347 }
348 #include <zephyr/syscalls/k_timer_user_data_set_mrsh.c>
349
350 #endif /* CONFIG_USERSPACE */
351
352 #ifdef CONFIG_OBJ_CORE_TIMER
init_timer_obj_core_list(void)353 static int init_timer_obj_core_list(void)
354 {
355 /* Initialize timer object type */
356
357 z_obj_type_init(&obj_type_timer, K_OBJ_TYPE_TIMER_ID,
358 offsetof(struct k_timer, obj_core));
359
360 /* Initialize and link statically defined timers */
361
362 STRUCT_SECTION_FOREACH(k_timer, timer) {
363 k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
364 }
365
366 return 0;
367 }
368 SYS_INIT(init_timer_obj_core_list, PRE_KERNEL_1,
369 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
370 #endif /* CONFIG_OBJ_CORE_TIMER */
371