1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <kernel.h>
8 #include <spinlock.h>
9 #include <ksched.h>
10 #include <timeout_q.h>
11 #include <syscall_handler.h>
12 #include <drivers/timer/system_timer.h>
13 #include <sys_clock.h>
14
15 static uint64_t curr_tick;
16
17 static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
18
19 static struct k_spinlock timeout_lock;
20
21 #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
22 ? K_TICKS_FOREVER : INT_MAX)
23
24 /* Cycles left to process in the currently-executing sys_clock_announce() */
25 static int announce_remaining;
26
27 #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
28 int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
29
30 #ifdef CONFIG_USERSPACE
z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)31 static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)
32 {
33 return z_impl_sys_clock_hw_cycles_per_sec_runtime_get();
34 }
35 #include <syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
36 #endif /* CONFIG_USERSPACE */
37 #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
38
first(void)39 static struct _timeout *first(void)
40 {
41 sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
42
43 return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
44 }
45
next(struct _timeout * t)46 static struct _timeout *next(struct _timeout *t)
47 {
48 sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
49
50 return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
51 }
52
remove_timeout(struct _timeout * t)53 static void remove_timeout(struct _timeout *t)
54 {
55 if (next(t) != NULL) {
56 next(t)->dticks += t->dticks;
57 }
58
59 sys_dlist_remove(&t->node);
60 }
61
elapsed(void)62 static int32_t elapsed(void)
63 {
64 return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
65 }
66
next_timeout(void)67 static int32_t next_timeout(void)
68 {
69 struct _timeout *to = first();
70 int32_t ticks_elapsed = elapsed();
71 int32_t ret;
72
73 if ((to == NULL) ||
74 ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
75 ret = MAX_WAIT;
76 } else {
77 ret = MAX(0, to->dticks - ticks_elapsed);
78 }
79
80 #ifdef CONFIG_TIMESLICING
81 if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
82 ret = _current_cpu->slice_ticks;
83 }
84 #endif
85 return ret;
86 }
87
z_add_timeout(struct _timeout * to,_timeout_func_t fn,k_timeout_t timeout)88 void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
89 k_timeout_t timeout)
90 {
91 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
92 return;
93 }
94
95 #ifdef CONFIG_KERNEL_COHERENCE
96 __ASSERT_NO_MSG(arch_mem_coherent(to));
97 #endif
98
99 __ASSERT(!sys_dnode_is_linked(&to->node), "");
100 to->fn = fn;
101
102 LOCKED(&timeout_lock) {
103 struct _timeout *t;
104
105 if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
106 Z_TICK_ABS(timeout.ticks) >= 0) {
107 k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
108
109 to->dticks = MAX(1, ticks);
110 } else {
111 to->dticks = timeout.ticks + 1 + elapsed();
112 }
113
114 for (t = first(); t != NULL; t = next(t)) {
115 if (t->dticks > to->dticks) {
116 t->dticks -= to->dticks;
117 sys_dlist_insert(&t->node, &to->node);
118 break;
119 }
120 to->dticks -= t->dticks;
121 }
122
123 if (t == NULL) {
124 sys_dlist_append(&timeout_list, &to->node);
125 }
126
127 if (to == first()) {
128 #if CONFIG_TIMESLICING
129 /*
130 * This is not ideal, since it does not
131 * account the time elapsed since the
132 * last announcement, and slice_ticks is based
133 * on that. It means that the time remaining for
134 * the next announcement can be less than
135 * slice_ticks.
136 */
137 int32_t next_time = next_timeout();
138
139 if (next_time == 0 ||
140 _current_cpu->slice_ticks != next_time) {
141 sys_clock_set_timeout(next_time, false);
142 }
143 #else
144 sys_clock_set_timeout(next_timeout(), false);
145 #endif /* CONFIG_TIMESLICING */
146 }
147 }
148 }
149
z_abort_timeout(struct _timeout * to)150 int z_abort_timeout(struct _timeout *to)
151 {
152 int ret = -EINVAL;
153
154 LOCKED(&timeout_lock) {
155 if (sys_dnode_is_linked(&to->node)) {
156 remove_timeout(to);
157 ret = 0;
158 }
159 }
160
161 return ret;
162 }
163
164 /* must be locked */
timeout_rem(const struct _timeout * timeout)165 static k_ticks_t timeout_rem(const struct _timeout *timeout)
166 {
167 k_ticks_t ticks = 0;
168
169 if (z_is_inactive_timeout(timeout)) {
170 return 0;
171 }
172
173 for (struct _timeout *t = first(); t != NULL; t = next(t)) {
174 ticks += t->dticks;
175 if (timeout == t) {
176 break;
177 }
178 }
179
180 return ticks - elapsed();
181 }
182
z_timeout_remaining(const struct _timeout * timeout)183 k_ticks_t z_timeout_remaining(const struct _timeout *timeout)
184 {
185 k_ticks_t ticks = 0;
186
187 LOCKED(&timeout_lock) {
188 ticks = timeout_rem(timeout);
189 }
190
191 return ticks;
192 }
193
z_timeout_expires(const struct _timeout * timeout)194 k_ticks_t z_timeout_expires(const struct _timeout *timeout)
195 {
196 k_ticks_t ticks = 0;
197
198 LOCKED(&timeout_lock) {
199 ticks = curr_tick + timeout_rem(timeout);
200 }
201
202 return ticks;
203 }
204
z_get_next_timeout_expiry(void)205 int32_t z_get_next_timeout_expiry(void)
206 {
207 int32_t ret = (int32_t) K_TICKS_FOREVER;
208
209 LOCKED(&timeout_lock) {
210 ret = next_timeout();
211 }
212 return ret;
213 }
214
z_set_timeout_expiry(int32_t ticks,bool is_idle)215 void z_set_timeout_expiry(int32_t ticks, bool is_idle)
216 {
217 LOCKED(&timeout_lock) {
218 int next_to = next_timeout();
219 bool sooner = (next_to == K_TICKS_FOREVER)
220 || (ticks <= next_to);
221 bool imminent = next_to <= 1;
222
223 /* Only set new timeouts when they are sooner than
224 * what we have. Also don't try to set a timeout when
225 * one is about to expire: drivers have internal logic
226 * that will bump the timeout to the "next" tick if
227 * it's not considered to be settable as directed.
228 * SMP can't use this optimization though: we don't
229 * know when context switches happen until interrupt
230 * exit and so can't get the timeslicing clamp folded
231 * in.
232 */
233 if (!imminent && (sooner || IS_ENABLED(CONFIG_SMP))) {
234 sys_clock_set_timeout(MIN(ticks, next_to), is_idle);
235 }
236 }
237 }
238
sys_clock_announce(int32_t ticks)239 void sys_clock_announce(int32_t ticks)
240 {
241 #ifdef CONFIG_TIMESLICING
242 z_time_slice(ticks);
243 #endif
244
245 k_spinlock_key_t key = k_spin_lock(&timeout_lock);
246
247 /* We release the lock around the callbacks below, so on SMP
248 * systems someone might be already running the loop. Don't
249 * race (which will cause paralllel execution of "sequential"
250 * timeouts and confuse apps), just increment the tick count
251 * and return.
252 */
253 if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) {
254 announce_remaining += ticks;
255 k_spin_unlock(&timeout_lock, key);
256 return;
257 }
258
259 announce_remaining = ticks;
260
261 while (first() != NULL && first()->dticks <= announce_remaining) {
262 struct _timeout *t = first();
263 int dt = t->dticks;
264
265 curr_tick += dt;
266 t->dticks = 0;
267 remove_timeout(t);
268
269 k_spin_unlock(&timeout_lock, key);
270 t->fn(t);
271 key = k_spin_lock(&timeout_lock);
272 announce_remaining -= dt;
273 }
274
275 if (first() != NULL) {
276 first()->dticks -= announce_remaining;
277 }
278
279 curr_tick += announce_remaining;
280 announce_remaining = 0;
281
282 sys_clock_set_timeout(next_timeout(), false);
283
284 k_spin_unlock(&timeout_lock, key);
285 }
286
sys_clock_tick_get(void)287 int64_t sys_clock_tick_get(void)
288 {
289 uint64_t t = 0U;
290
291 LOCKED(&timeout_lock) {
292 t = curr_tick + elapsed();
293 }
294 return t;
295 }
296
sys_clock_tick_get_32(void)297 uint32_t sys_clock_tick_get_32(void)
298 {
299 #ifdef CONFIG_TICKLESS_KERNEL
300 return (uint32_t)sys_clock_tick_get();
301 #else
302 return (uint32_t)curr_tick;
303 #endif
304 }
305
z_impl_k_uptime_ticks(void)306 int64_t z_impl_k_uptime_ticks(void)
307 {
308 return sys_clock_tick_get();
309 }
310
311 #ifdef CONFIG_USERSPACE
z_vrfy_k_uptime_ticks(void)312 static inline int64_t z_vrfy_k_uptime_ticks(void)
313 {
314 return z_impl_k_uptime_ticks();
315 }
316 #include <syscalls/k_uptime_ticks_mrsh.c>
317 #endif
318
z_impl_k_busy_wait(uint32_t usec_to_wait)319 void z_impl_k_busy_wait(uint32_t usec_to_wait)
320 {
321 SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
322 if (usec_to_wait == 0U) {
323 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
324 return;
325 }
326
327 #if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
328 uint32_t start_cycles = k_cycle_get_32();
329
330 /* use 64-bit math to prevent overflow when multiplying */
331 uint32_t cycles_to_wait = (uint32_t)(
332 (uint64_t)usec_to_wait *
333 (uint64_t)sys_clock_hw_cycles_per_sec() /
334 (uint64_t)USEC_PER_SEC
335 );
336
337 for (;;) {
338 uint32_t current_cycles = k_cycle_get_32();
339
340 /* this handles the rollover on an unsigned 32-bit value */
341 if ((current_cycles - start_cycles) >= cycles_to_wait) {
342 break;
343 }
344 }
345 #else
346 arch_busy_wait(usec_to_wait);
347 #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
348 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
349 }
350
351 #ifdef CONFIG_USERSPACE
z_vrfy_k_busy_wait(uint32_t usec_to_wait)352 static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
353 {
354 z_impl_k_busy_wait(usec_to_wait);
355 }
356 #include <syscalls/k_busy_wait_mrsh.c>
357 #endif /* CONFIG_USERSPACE */
358
359 /* Returns the uptime expiration (relative to an unlocked "now"!) of a
360 * timeout object. When used correctly, this should be called once,
361 * synchronously with the user passing a new timeout value. It should
362 * not be used iteratively to adjust a timeout.
363 */
sys_clock_timeout_end_calc(k_timeout_t timeout)364 uint64_t sys_clock_timeout_end_calc(k_timeout_t timeout)
365 {
366 k_ticks_t dt;
367
368 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
369 return UINT64_MAX;
370 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
371 return sys_clock_tick_get();
372 } else {
373
374 dt = timeout.ticks;
375
376 if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
377 return Z_TICK_ABS(dt);
378 }
379 return sys_clock_tick_get() + MAX(1, dt);
380 }
381 }
382