1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/spinlock.h>
9 #include <ksched.h>
10 #include <zephyr/timeout_q.h>
11 #include <zephyr/syscall_handler.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/sys_clock.h>
14
15 static uint64_t curr_tick;
16
17 static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
18
19 static struct k_spinlock timeout_lock;
20
21 #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
22 ? K_TICKS_FOREVER : INT_MAX)
23
24 /* Cycles left to process in the currently-executing sys_clock_announce() */
25 static int announce_remaining;
26
27 #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
28 int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
29
30 #ifdef CONFIG_USERSPACE
z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)31 static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)
32 {
33 return z_impl_sys_clock_hw_cycles_per_sec_runtime_get();
34 }
35 #include <syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
36 #endif /* CONFIG_USERSPACE */
37 #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
38
first(void)39 static struct _timeout *first(void)
40 {
41 sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
42
43 return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
44 }
45
next(struct _timeout * t)46 static struct _timeout *next(struct _timeout *t)
47 {
48 sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
49
50 return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
51 }
52
remove_timeout(struct _timeout * t)53 static void remove_timeout(struct _timeout *t)
54 {
55 if (next(t) != NULL) {
56 next(t)->dticks += t->dticks;
57 }
58
59 sys_dlist_remove(&t->node);
60 }
61
elapsed(void)62 static int32_t elapsed(void)
63 {
64 return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
65 }
66
next_timeout(void)67 static int32_t next_timeout(void)
68 {
69 struct _timeout *to = first();
70 int32_t ticks_elapsed = elapsed();
71 int32_t ret;
72
73 if ((to == NULL) ||
74 ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
75 ret = MAX_WAIT;
76 } else {
77 ret = MAX(0, to->dticks - ticks_elapsed);
78 }
79
80 return ret;
81 }
82
z_add_timeout(struct _timeout * to,_timeout_func_t fn,k_timeout_t timeout)83 void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
84 k_timeout_t timeout)
85 {
86 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
87 return;
88 }
89
90 #ifdef CONFIG_KERNEL_COHERENCE
91 __ASSERT_NO_MSG(arch_mem_coherent(to));
92 #endif
93
94 __ASSERT(!sys_dnode_is_linked(&to->node), "");
95 to->fn = fn;
96
97 LOCKED(&timeout_lock) {
98 struct _timeout *t;
99
100 if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
101 Z_TICK_ABS(timeout.ticks) >= 0) {
102 k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
103
104 to->dticks = MAX(1, ticks);
105 } else {
106 to->dticks = timeout.ticks + 1 + elapsed();
107 }
108
109 for (t = first(); t != NULL; t = next(t)) {
110 if (t->dticks > to->dticks) {
111 t->dticks -= to->dticks;
112 sys_dlist_insert(&t->node, &to->node);
113 break;
114 }
115 to->dticks -= t->dticks;
116 }
117
118 if (t == NULL) {
119 sys_dlist_append(&timeout_list, &to->node);
120 }
121
122 if (to == first()) {
123 sys_clock_set_timeout(next_timeout(), false);
124 }
125 }
126 }
127
z_abort_timeout(struct _timeout * to)128 int z_abort_timeout(struct _timeout *to)
129 {
130 int ret = -EINVAL;
131
132 LOCKED(&timeout_lock) {
133 if (sys_dnode_is_linked(&to->node)) {
134 remove_timeout(to);
135 ret = 0;
136 }
137 }
138
139 return ret;
140 }
141
142 /* must be locked */
timeout_rem(const struct _timeout * timeout)143 static k_ticks_t timeout_rem(const struct _timeout *timeout)
144 {
145 k_ticks_t ticks = 0;
146
147 if (z_is_inactive_timeout(timeout)) {
148 return 0;
149 }
150
151 for (struct _timeout *t = first(); t != NULL; t = next(t)) {
152 ticks += t->dticks;
153 if (timeout == t) {
154 break;
155 }
156 }
157
158 return ticks - elapsed();
159 }
160
z_timeout_remaining(const struct _timeout * timeout)161 k_ticks_t z_timeout_remaining(const struct _timeout *timeout)
162 {
163 k_ticks_t ticks = 0;
164
165 LOCKED(&timeout_lock) {
166 ticks = timeout_rem(timeout);
167 }
168
169 return ticks;
170 }
171
z_timeout_expires(const struct _timeout * timeout)172 k_ticks_t z_timeout_expires(const struct _timeout *timeout)
173 {
174 k_ticks_t ticks = 0;
175
176 LOCKED(&timeout_lock) {
177 ticks = curr_tick + timeout_rem(timeout);
178 }
179
180 return ticks;
181 }
182
z_get_next_timeout_expiry(void)183 int32_t z_get_next_timeout_expiry(void)
184 {
185 int32_t ret = (int32_t) K_TICKS_FOREVER;
186
187 LOCKED(&timeout_lock) {
188 ret = next_timeout();
189 }
190 return ret;
191 }
192
sys_clock_announce(int32_t ticks)193 void sys_clock_announce(int32_t ticks)
194 {
195 k_spinlock_key_t key = k_spin_lock(&timeout_lock);
196
197 /* We release the lock around the callbacks below, so on SMP
198 * systems someone might be already running the loop. Don't
199 * race (which will cause paralllel execution of "sequential"
200 * timeouts and confuse apps), just increment the tick count
201 * and return.
202 */
203 if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) {
204 announce_remaining += ticks;
205 k_spin_unlock(&timeout_lock, key);
206 return;
207 }
208
209 announce_remaining = ticks;
210
211 struct _timeout *t = first();
212
213 for (t = first();
214 (t != NULL) && (t->dticks <= announce_remaining);
215 t = first()) {
216 int dt = t->dticks;
217
218 curr_tick += dt;
219 t->dticks = 0;
220 remove_timeout(t);
221
222 k_spin_unlock(&timeout_lock, key);
223 t->fn(t);
224 key = k_spin_lock(&timeout_lock);
225 announce_remaining -= dt;
226 }
227
228 if (t != NULL) {
229 t->dticks -= announce_remaining;
230 }
231
232 curr_tick += announce_remaining;
233 announce_remaining = 0;
234
235 sys_clock_set_timeout(next_timeout(), false);
236
237 k_spin_unlock(&timeout_lock, key);
238
239 #ifdef CONFIG_TIMESLICING
240 z_time_slice();
241 #endif
242 }
243
sys_clock_tick_get(void)244 int64_t sys_clock_tick_get(void)
245 {
246 uint64_t t = 0U;
247
248 LOCKED(&timeout_lock) {
249 t = curr_tick + elapsed();
250 }
251 return t;
252 }
253
sys_clock_tick_get_32(void)254 uint32_t sys_clock_tick_get_32(void)
255 {
256 #ifdef CONFIG_TICKLESS_KERNEL
257 return (uint32_t)sys_clock_tick_get();
258 #else
259 return (uint32_t)curr_tick;
260 #endif
261 }
262
z_impl_k_uptime_ticks(void)263 int64_t z_impl_k_uptime_ticks(void)
264 {
265 return sys_clock_tick_get();
266 }
267
268 #ifdef CONFIG_USERSPACE
z_vrfy_k_uptime_ticks(void)269 static inline int64_t z_vrfy_k_uptime_ticks(void)
270 {
271 return z_impl_k_uptime_ticks();
272 }
273 #include <syscalls/k_uptime_ticks_mrsh.c>
274 #endif
275
z_impl_k_busy_wait(uint32_t usec_to_wait)276 void z_impl_k_busy_wait(uint32_t usec_to_wait)
277 {
278 SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
279 if (usec_to_wait == 0U) {
280 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
281 return;
282 }
283
284 #if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
285 uint32_t start_cycles = k_cycle_get_32();
286
287 /* use 64-bit math to prevent overflow when multiplying */
288 uint32_t cycles_to_wait = (uint32_t)(
289 (uint64_t)usec_to_wait *
290 (uint64_t)sys_clock_hw_cycles_per_sec() /
291 (uint64_t)USEC_PER_SEC
292 );
293
294 for (;;) {
295 uint32_t current_cycles = k_cycle_get_32();
296
297 /* this handles the rollover on an unsigned 32-bit value */
298 if ((current_cycles - start_cycles) >= cycles_to_wait) {
299 break;
300 }
301 }
302 #else
303 arch_busy_wait(usec_to_wait);
304 #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
305 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
306 }
307
308 #ifdef CONFIG_USERSPACE
z_vrfy_k_busy_wait(uint32_t usec_to_wait)309 static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
310 {
311 z_impl_k_busy_wait(usec_to_wait);
312 }
313 #include <syscalls/k_busy_wait_mrsh.c>
314 #endif /* CONFIG_USERSPACE */
315
316 /* Returns the uptime expiration (relative to an unlocked "now"!) of a
317 * timeout object. When used correctly, this should be called once,
318 * synchronously with the user passing a new timeout value. It should
319 * not be used iteratively to adjust a timeout.
320 */
sys_clock_timeout_end_calc(k_timeout_t timeout)321 uint64_t sys_clock_timeout_end_calc(k_timeout_t timeout)
322 {
323 k_ticks_t dt;
324
325 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
326 return UINT64_MAX;
327 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
328 return sys_clock_tick_get();
329 } else {
330
331 dt = timeout.ticks;
332
333 if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
334 return Z_TICK_ABS(dt);
335 }
336 return sys_clock_tick_get() + MAX(1, dt);
337 }
338 }
339
340 #ifdef CONFIG_ZTEST
z_impl_sys_clock_tick_set(uint64_t tick)341 void z_impl_sys_clock_tick_set(uint64_t tick)
342 {
343 curr_tick = tick;
344 }
345
z_vrfy_sys_clock_tick_set(uint64_t tick)346 void z_vrfy_sys_clock_tick_set(uint64_t tick)
347 {
348 z_impl_sys_clock_tick_set(tick);
349 }
350 #endif
351