1 /*
2  * Copyright (c) 1997-2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <kernel.h>
8 
9 #include <init.h>
10 #include <ksched.h>
11 #include <wait_q.h>
12 #include <syscall_handler.h>
13 #include <stdbool.h>
14 #include <spinlock.h>
15 
16 static struct k_spinlock lock;
17 
18 /**
19  * @brief Handle expiration of a kernel timer object.
20  *
21  * @param t  Timeout used by the timer.
22  *
23  * @return N/A
24  */
z_timer_expiration_handler(struct _timeout * t)25 void z_timer_expiration_handler(struct _timeout *t)
26 {
27 	struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
28 	struct k_thread *thread;
29 	k_spinlock_key_t key = k_spin_lock(&lock);
30 
31 	/*
32 	 * if the timer is periodic, start it again; don't add _TICK_ALIGN
33 	 * since we're already aligned to a tick boundary
34 	 */
35 	if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
36 	    !K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
37 		z_add_timeout(&timer->timeout, z_timer_expiration_handler,
38 			     timer->period);
39 	}
40 
41 	/* update timer's status */
42 	timer->status += 1U;
43 
44 	/* invoke timer expiry function */
45 	if (timer->expiry_fn != NULL) {
46 		/* Unlock for user handler. */
47 		k_spin_unlock(&lock, key);
48 		timer->expiry_fn(timer);
49 		key = k_spin_lock(&lock);
50 	}
51 
52 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
53 		k_spin_unlock(&lock, key);
54 		return;
55 	}
56 
57 	thread = z_waitq_head(&timer->wait_q);
58 
59 	if (thread == NULL) {
60 		k_spin_unlock(&lock, key);
61 		return;
62 	}
63 
64 	z_unpend_thread_no_timeout(thread);
65 
66 	arch_thread_return_value_set(thread, 0);
67 
68 	k_spin_unlock(&lock, key);
69 
70 	z_ready_thread(thread);
71 }
72 
73 
k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_stop_t stop_fn)74 void k_timer_init(struct k_timer *timer,
75 			 k_timer_expiry_t expiry_fn,
76 			 k_timer_stop_t stop_fn)
77 {
78 	timer->expiry_fn = expiry_fn;
79 	timer->stop_fn = stop_fn;
80 	timer->status = 0U;
81 
82 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
83 		z_waitq_init(&timer->wait_q);
84 	}
85 
86 	z_init_timeout(&timer->timeout);
87 
88 	SYS_PORT_TRACING_OBJ_INIT(k_timer, timer);
89 
90 	timer->user_data = NULL;
91 
92 	z_object_init(timer);
93 }
94 
95 
z_impl_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)96 void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
97 			  k_timeout_t period)
98 {
99 	SYS_PORT_TRACING_OBJ_FUNC(k_timer, start, timer);
100 
101 	if (K_TIMEOUT_EQ(duration, K_FOREVER)) {
102 		return;
103 	}
104 
105 	/* z_add_timeout() always adds one to the incoming tick count
106 	 * to round up to the next tick (by convention it waits for
107 	 * "at least as long as the specified timeout"), but the
108 	 * period interval is always guaranteed to be reset from
109 	 * within the timer ISR, so no round up is desired.  Subtract
110 	 * one.
111 	 *
112 	 * Note that the duration (!) value gets the same treatment
113 	 * for backwards compatibility.  This is unfortunate
114 	 * (i.e. k_timer_start() doesn't treat its initial sleep
115 	 * argument the same way k_sleep() does), but historical.  The
116 	 * timer_api test relies on this behavior.
117 	 */
118 	if (!K_TIMEOUT_EQ(period, K_FOREVER) && period.ticks != 0 &&
119 	    Z_TICK_ABS(period.ticks) < 0) {
120 		period.ticks = MAX(period.ticks - 1, 1);
121 	}
122 	if (Z_TICK_ABS(duration.ticks) < 0) {
123 		duration.ticks = MAX(duration.ticks - 1, 0);
124 	}
125 
126 	(void)z_abort_timeout(&timer->timeout);
127 	timer->period = period;
128 	timer->status = 0U;
129 
130 	z_add_timeout(&timer->timeout, z_timer_expiration_handler,
131 		     duration);
132 }
133 
134 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)135 static inline void z_vrfy_k_timer_start(struct k_timer *timer,
136 					k_timeout_t duration,
137 					k_timeout_t period)
138 {
139 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
140 	z_impl_k_timer_start(timer, duration, period);
141 }
142 #include <syscalls/k_timer_start_mrsh.c>
143 #endif
144 
z_impl_k_timer_stop(struct k_timer * timer)145 void z_impl_k_timer_stop(struct k_timer *timer)
146 {
147 	SYS_PORT_TRACING_OBJ_FUNC(k_timer, stop, timer);
148 
149 	int inactive = z_abort_timeout(&timer->timeout) != 0;
150 
151 	if (inactive) {
152 		return;
153 	}
154 
155 	if (timer->stop_fn != NULL) {
156 		timer->stop_fn(timer);
157 	}
158 
159 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
160 		struct k_thread *pending_thread = z_unpend1_no_timeout(&timer->wait_q);
161 
162 		if (pending_thread != NULL) {
163 			z_ready_thread(pending_thread);
164 			z_reschedule_unlocked();
165 		}
166 	}
167 }
168 
169 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_stop(struct k_timer * timer)170 static inline void z_vrfy_k_timer_stop(struct k_timer *timer)
171 {
172 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
173 	z_impl_k_timer_stop(timer);
174 }
175 #include <syscalls/k_timer_stop_mrsh.c>
176 #endif
177 
z_impl_k_timer_status_get(struct k_timer * timer)178 uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
179 {
180 	k_spinlock_key_t key = k_spin_lock(&lock);
181 	uint32_t result = timer->status;
182 
183 	timer->status = 0U;
184 	k_spin_unlock(&lock, key);
185 
186 	return result;
187 }
188 
189 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_status_get(struct k_timer * timer)190 static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
191 {
192 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
193 	return z_impl_k_timer_status_get(timer);
194 }
195 #include <syscalls/k_timer_status_get_mrsh.c>
196 #endif
197 
z_impl_k_timer_status_sync(struct k_timer * timer)198 uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
199 {
200 	__ASSERT(!arch_is_in_isr(), "");
201 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, status_sync, timer);
202 
203 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
204 		uint32_t result;
205 
206 		do {
207 			k_spinlock_key_t key = k_spin_lock(&lock);
208 
209 			if (!z_is_inactive_timeout(&timer->timeout)) {
210 				result = *(volatile uint32_t *)&timer->status;
211 				timer->status = 0U;
212 				k_spin_unlock(&lock, key);
213 				if (result > 0) {
214 					break;
215 				}
216 			} else {
217 				result = timer->status;
218 				k_spin_unlock(&lock, key);
219 				break;
220 			}
221 		} while (true);
222 
223 		return result;
224 	}
225 
226 	k_spinlock_key_t key = k_spin_lock(&lock);
227 	uint32_t result = timer->status;
228 
229 	if (result == 0U) {
230 		if (!z_is_inactive_timeout(&timer->timeout)) {
231 			SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_timer, status_sync, timer, K_FOREVER);
232 
233 			/* wait for timer to expire or stop */
234 			(void)z_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
235 
236 			/* get updated timer status */
237 			key = k_spin_lock(&lock);
238 			result = timer->status;
239 		} else {
240 			/* timer is already stopped */
241 		}
242 	} else {
243 		/* timer has already expired at least once */
244 	}
245 
246 	timer->status = 0U;
247 	k_spin_unlock(&lock, key);
248 
249 	/**
250 	 * @note	New tracing hook
251 	 */
252 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, status_sync, timer, result);
253 
254 	return result;
255 }
256 
257 #ifdef CONFIG_USERSPACE
z_vrfy_k_timer_status_sync(struct k_timer * timer)258 static inline uint32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
259 {
260 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
261 	return z_impl_k_timer_status_sync(timer);
262 }
263 #include <syscalls/k_timer_status_sync_mrsh.c>
264 
z_vrfy_k_timer_remaining_ticks(const struct k_timer * timer)265 static inline k_ticks_t z_vrfy_k_timer_remaining_ticks(
266 						const struct k_timer *timer)
267 {
268 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
269 	return z_impl_k_timer_remaining_ticks(timer);
270 }
271 #include <syscalls/k_timer_remaining_ticks_mrsh.c>
272 
z_vrfy_k_timer_expires_ticks(const struct k_timer * timer)273 static inline k_ticks_t z_vrfy_k_timer_expires_ticks(
274 						const struct k_timer *timer)
275 {
276 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
277 	return z_impl_k_timer_expires_ticks(timer);
278 }
279 #include <syscalls/k_timer_expires_ticks_mrsh.c>
280 
z_vrfy_k_timer_user_data_get(const struct k_timer * timer)281 static inline void *z_vrfy_k_timer_user_data_get(const struct k_timer *timer)
282 {
283 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
284 	return z_impl_k_timer_user_data_get(timer);
285 }
286 #include <syscalls/k_timer_user_data_get_mrsh.c>
287 
z_vrfy_k_timer_user_data_set(struct k_timer * timer,void * user_data)288 static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
289 						void *user_data)
290 {
291 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
292 	z_impl_k_timer_user_data_set(timer, user_data);
293 }
294 #include <syscalls/k_timer_user_data_set_mrsh.c>
295 
296 #endif
297