1 /*
2  * Copyright (c) 1997-2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <zephyr/init.h>
10 #include <zephyr/internal/syscall_handler.h>
11 #include <stdbool.h>
12 #include <zephyr/spinlock.h>
13 #include <ksched.h>
14 #include <wait_q.h>
15 
16 static struct k_spinlock lock;
17 
18 #ifdef CONFIG_OBJ_CORE_TIMER
19 static struct k_obj_type obj_type_timer;
20 #endif /* CONFIG_OBJ_CORE_TIMER */
21 
22 /**
23  * @brief Handle expiration of a kernel timer object.
24  *
25  * @param t  Timeout used by the timer.
26  */
z_timer_expiration_handler(struct _timeout * t)27 void z_timer_expiration_handler(struct _timeout *t)
28 {
29 	struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
30 	struct k_thread *thread;
31 	k_spinlock_key_t key = k_spin_lock(&lock);
32 
33 	/* In sys_clock_announce(), when a timeout expires, it is first removed
34 	 * from the timeout list, then its expiration handler is called (with
35 	 * unlocked interrupts). For kernel timers, the expiration handler is
36 	 * this function. Usually, the timeout structure related to the timer
37 	 * that is handled here will not be linked to the timeout list at this
38 	 * point. But it may happen that before this function is executed and
39 	 * interrupts are locked again, a given timer gets restarted from an
40 	 * interrupt context that has a priority higher than the system timer
41 	 * interrupt. Then, the timeout structure for this timer will turn out
42 	 * to be linked to the timeout list. And in such case, since the timer
43 	 * was restarted, its expiration handler should not be executed then,
44 	 * so the function exits immediately.
45 	 */
46 	if (sys_dnode_is_linked(&t->node)) {
47 		k_spin_unlock(&lock, key);
48 		return;
49 	}
50 
51 	/*
52 	 * if the timer is periodic, start it again; don't add _TICK_ALIGN
53 	 * since we're already aligned to a tick boundary
54 	 */
55 	if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
56 	    !K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
57 		k_timeout_t next = timer->period;
58 
59 		/* see note about z_add_timeout() in z_impl_k_timer_start() */
60 		next.ticks = max(next.ticks - 1, 0);
61 
62 #ifdef CONFIG_TIMEOUT_64BIT
63 		/* Exploit the fact that uptime during a kernel
64 		 * timeout handler reflects the time of the scheduled
65 		 * event and not real time to get some inexpensive
66 		 * protection against late interrupts.  If we're
67 		 * delayed for any reason, we still end up calculating
68 		 * the next expiration as a regular stride from where
69 		 * we "should" have run.  Requires absolute timeouts.
70 		 * (Note offset by one: we're nominally at the
71 		 * beginning of a tick, so need to defeat the "round
72 		 * down" behavior on timeout addition).
73 		 */
74 		next = K_TIMEOUT_ABS_TICKS(k_uptime_ticks() + 1 + next.ticks);
75 #endif /* CONFIG_TIMEOUT_64BIT */
76 		z_add_timeout(&timer->timeout, z_timer_expiration_handler,
77 			      next);
78 	}
79 
80 	/* update timer's status */
81 	timer->status += 1U;
82 
83 	/* invoke timer expiry function */
84 	if (timer->expiry_fn != NULL) {
85 		/* Unlock for user handler. */
86 		k_spin_unlock(&lock, key);
87 
88 		SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, expiry, timer);
89 
90 		timer->expiry_fn(timer);
91 
92 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, expiry, timer);
93 
94 		key = k_spin_lock(&lock);
95 	}
96 
97 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
98 		k_spin_unlock(&lock, key);
99 		return;
100 	}
101 
102 	thread = z_waitq_head(&timer->wait_q);
103 
104 	if (thread == NULL) {
105 		k_spin_unlock(&lock, key);
106 		return;
107 	}
108 
109 	z_unpend_thread_no_timeout(thread);
110 
111 	arch_thread_return_value_set(thread, 0);
112 
113 	k_spin_unlock(&lock, key);
114 
115 	z_ready_thread(thread);
116 }
117 
118 
k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_stop_t stop_fn)119 void k_timer_init(struct k_timer *timer,
120 			 k_timer_expiry_t expiry_fn,
121 			 k_timer_stop_t stop_fn)
122 {
123 	timer->expiry_fn = expiry_fn;
124 	timer->stop_fn = stop_fn;
125 	timer->status = 0U;
126 
127 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
128 		z_waitq_init(&timer->wait_q);
129 	}
130 
131 	z_init_timeout(&timer->timeout);
132 
133 	SYS_PORT_TRACING_OBJ_INIT(k_timer, timer);
134 
135 	timer->user_data = NULL;
136 
137 	k_object_init(timer);
138 
139 #ifdef CONFIG_OBJ_CORE_TIMER
140 	k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
141 #endif /* CONFIG_OBJ_CORE_TIMER */
142 }
143 
144 
z_impl_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)145 void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
146 			  k_timeout_t period)
147 {
148 	SYS_PORT_TRACING_OBJ_FUNC(k_timer, start, timer, duration, period);
149 
150 	/* Acquire spinlock to ensure safety during concurrent calls to
151 	 * k_timer_start for scheduling or rescheduling. This is necessary
152 	 * since k_timer_start can be preempted, especially for the same
153 	 * timer instance.
154 	 */
155 	k_spinlock_key_t key = k_spin_lock(&lock);
156 
157 	if (K_TIMEOUT_EQ(duration, K_FOREVER)) {
158 		k_spin_unlock(&lock, key);
159 		return;
160 	}
161 
162 	/* z_add_timeout() always adds one to the incoming tick count
163 	 * to round up to the next tick (by convention it waits for
164 	 * "at least as long as the specified timeout"), but the
165 	 * period interval is always guaranteed to be reset from
166 	 * within the timer ISR, so no round up is desired and 1 is
167 	 * subtracted in there.
168 	 *
169 	 * Note that the duration (!) value gets the same treatment
170 	 * for backwards compatibility.  This is unfortunate
171 	 * (i.e. k_timer_start() doesn't treat its initial sleep
172 	 * argument the same way k_sleep() does), but historical.  The
173 	 * timer_api test relies on this behavior.
174 	 */
175 	if (Z_IS_TIMEOUT_RELATIVE(duration)) {
176 		/* For the duration == K_NO_WAIT case, ensure that behaviour
177 		 * is consistent for both 32-bit k_ticks_t which are unsigned
178 		 * and 64-bit k_ticks_t which are signed.
179 		 */
180 		duration.ticks = max(1, duration.ticks);
181 		duration.ticks = duration.ticks - 1;
182 	}
183 
184 	(void)z_abort_timeout(&timer->timeout);
185 	timer->period = period;
186 	timer->status = 0U;
187 
188 	z_add_timeout(&timer->timeout, z_timer_expiration_handler,
189 		     duration);
190 
191 	k_spin_unlock(&lock, key);
192 }
193 
194 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)195 static inline void z_vrfy_k_timer_start(struct k_timer *timer,
196 					k_timeout_t duration,
197 					k_timeout_t period)
198 {
199 	K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
200 	z_impl_k_timer_start(timer, duration, period);
201 }
202 #include <zephyr/syscalls/k_timer_start_mrsh.c>
203 #endif /* CONFIG_USERSPACE */
204 
z_impl_k_timer_stop(struct k_timer * timer)205 void z_impl_k_timer_stop(struct k_timer *timer)
206 {
207 	SYS_PORT_TRACING_OBJ_FUNC(k_timer, stop, timer);
208 
209 	bool inactive = (z_abort_timeout(&timer->timeout) != 0);
210 
211 	if (inactive) {
212 		return;
213 	}
214 
215 	if (timer->stop_fn != NULL) {
216 		SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, stop_fn_expiry, timer);
217 
218 		timer->stop_fn(timer);
219 
220 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, stop_fn_expiry, timer);
221 	}
222 
223 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
224 		struct k_thread *pending_thread = z_unpend1_no_timeout(&timer->wait_q);
225 
226 		if (pending_thread != NULL) {
227 			z_ready_thread(pending_thread);
228 			z_reschedule_unlocked();
229 		}
230 	}
231 }
232 
233 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_stop(struct k_timer * timer)234 static inline void z_vrfy_k_timer_stop(struct k_timer *timer)
235 {
236 	K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
237 	z_impl_k_timer_stop(timer);
238 }
239 #include <zephyr/syscalls/k_timer_stop_mrsh.c>
240 #endif /* CONFIG_USERSPACE */
241 
z_impl_k_timer_status_get(struct k_timer * timer)242 uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
243 {
244 	k_spinlock_key_t key = k_spin_lock(&lock);
245 	uint32_t result = timer->status;
246 
247 	timer->status = 0U;
248 	k_spin_unlock(&lock, key);
249 
250 	return result;
251 }
252 
253 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_status_get(struct k_timer * timer)254 static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
255 {
256 	K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
257 	return z_impl_k_timer_status_get(timer);
258 }
259 #include <zephyr/syscalls/k_timer_status_get_mrsh.c>
260 #endif /* CONFIG_USERSPACE */
261 
z_impl_k_timer_status_sync(struct k_timer * timer)262 uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
263 {
264 	__ASSERT(!arch_is_in_isr(), "");
265 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, status_sync, timer);
266 
267 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
268 		uint32_t result;
269 
270 		do {
271 			k_spinlock_key_t key = k_spin_lock(&lock);
272 
273 			if (!z_is_inactive_timeout(&timer->timeout)) {
274 				result = *(volatile uint32_t *)&timer->status;
275 				timer->status = 0U;
276 				k_spin_unlock(&lock, key);
277 				if (result > 0) {
278 					break;
279 				}
280 			} else {
281 				result = timer->status;
282 				k_spin_unlock(&lock, key);
283 				break;
284 			}
285 		} while (true);
286 
287 		return result;
288 	}
289 
290 	k_spinlock_key_t key = k_spin_lock(&lock);
291 	uint32_t result = timer->status;
292 
293 	if (result == 0U) {
294 		if (!z_is_inactive_timeout(&timer->timeout)) {
295 			SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_timer, status_sync, timer, K_FOREVER);
296 
297 			/* wait for timer to expire or stop */
298 			(void)z_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
299 
300 			/* get updated timer status */
301 			key = k_spin_lock(&lock);
302 			result = timer->status;
303 		} else {
304 			/* timer is already stopped */
305 		}
306 	} else {
307 		/* timer has already expired at least once */
308 	}
309 
310 	timer->status = 0U;
311 	k_spin_unlock(&lock, key);
312 
313 	/**
314 	 * @note	New tracing hook
315 	 */
316 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, status_sync, timer, result);
317 
318 	return result;
319 }
320 
321 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_status_sync(struct k_timer * timer)322 static inline uint32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
323 {
324 	K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
325 	return z_impl_k_timer_status_sync(timer);
326 }
327 #include <zephyr/syscalls/k_timer_status_sync_mrsh.c>
328 
z_vrfy_k_timer_remaining_ticks(const struct k_timer * timer)329 static inline k_ticks_t z_vrfy_k_timer_remaining_ticks(
330 						const struct k_timer *timer)
331 {
332 	K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
333 	return z_impl_k_timer_remaining_ticks(timer);
334 }
335 #include <zephyr/syscalls/k_timer_remaining_ticks_mrsh.c>
336 
z_vrfy_k_timer_expires_ticks(const struct k_timer * timer)337 static inline k_ticks_t z_vrfy_k_timer_expires_ticks(
338 						const struct k_timer *timer)
339 {
340 	K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
341 	return z_impl_k_timer_expires_ticks(timer);
342 }
343 #include <zephyr/syscalls/k_timer_expires_ticks_mrsh.c>
344 
z_vrfy_k_timer_user_data_get(const struct k_timer * timer)345 static inline void *z_vrfy_k_timer_user_data_get(const struct k_timer *timer)
346 {
347 	K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
348 	return z_impl_k_timer_user_data_get(timer);
349 }
350 #include <zephyr/syscalls/k_timer_user_data_get_mrsh.c>
351 
z_vrfy_k_timer_user_data_set(struct k_timer * timer,void * user_data)352 static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
353 						void *user_data)
354 {
355 	K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
356 	z_impl_k_timer_user_data_set(timer, user_data);
357 }
358 #include <zephyr/syscalls/k_timer_user_data_set_mrsh.c>
359 
360 #endif /* CONFIG_USERSPACE */
361 
362 #ifdef CONFIG_OBJ_CORE_TIMER
init_timer_obj_core_list(void)363 static int init_timer_obj_core_list(void)
364 {
365 	/* Initialize timer object type */
366 
367 	z_obj_type_init(&obj_type_timer, K_OBJ_TYPE_TIMER_ID,
368 			offsetof(struct k_timer, obj_core));
369 
370 	/* Initialize and link statically defined timers */
371 
372 	STRUCT_SECTION_FOREACH(k_timer, timer) {
373 		k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
374 	}
375 
376 	return 0;
377 }
378 SYS_INIT(init_timer_obj_core_list, PRE_KERNEL_1,
379 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
380 #endif /* CONFIG_OBJ_CORE_TIMER */
381