1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/spinlock.h>
9 #include <ksched.h>
10 #include <timeout_q.h>
11 #include <zephyr/internal/syscall_handler.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/sys_clock.h>
14 
15 static uint64_t curr_tick;
16 
17 static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
18 
19 static struct k_spinlock timeout_lock;
20 
21 #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
22 		  ? K_TICKS_FOREVER : INT_MAX)
23 
24 /* Ticks left to process in the currently-executing sys_clock_announce() */
25 static int announce_remaining;
26 
27 #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
28 int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
29 
30 #ifdef CONFIG_USERSPACE
z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)31 static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)
32 {
33 	return z_impl_sys_clock_hw_cycles_per_sec_runtime_get();
34 }
35 #include <zephyr/syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
36 #endif /* CONFIG_USERSPACE */
37 #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
38 
first(void)39 static struct _timeout *first(void)
40 {
41 	sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
42 
43 	return (t == NULL) ? NULL : CONTAINER_OF(t, struct _timeout, node);
44 }
45 
next(struct _timeout * t)46 static struct _timeout *next(struct _timeout *t)
47 {
48 	sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
49 
50 	return (n == NULL) ? NULL : CONTAINER_OF(n, struct _timeout, node);
51 }
52 
remove_timeout(struct _timeout * t)53 static void remove_timeout(struct _timeout *t)
54 {
55 	if (next(t) != NULL) {
56 		next(t)->dticks += t->dticks;
57 	}
58 
59 	sys_dlist_remove(&t->node);
60 }
61 
elapsed(void)62 static int32_t elapsed(void)
63 {
64 	/* While sys_clock_announce() is executing, new relative timeouts will be
65 	 * scheduled relatively to the currently firing timeout's original tick
66 	 * value (=curr_tick) rather than relative to the current
67 	 * sys_clock_elapsed().
68 	 *
69 	 * This means that timeouts being scheduled from within timeout callbacks
70 	 * will be scheduled at well-defined offsets from the currently firing
71 	 * timeout.
72 	 *
73 	 * As a side effect, the same will happen if an ISR with higher priority
74 	 * preempts a timeout callback and schedules a timeout.
75 	 *
76 	 * The distinction is implemented by looking at announce_remaining which
77 	 * will be non-zero while sys_clock_announce() is executing and zero
78 	 * otherwise.
79 	 */
80 	return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
81 }
82 
next_timeout(void)83 static int32_t next_timeout(void)
84 {
85 	struct _timeout *to = first();
86 	int32_t ticks_elapsed = elapsed();
87 	int32_t ret;
88 
89 	if ((to == NULL) ||
90 	    ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
91 		ret = MAX_WAIT;
92 	} else {
93 		ret = MAX(0, to->dticks - ticks_elapsed);
94 	}
95 
96 	return ret;
97 }
98 
z_add_timeout(struct _timeout * to,_timeout_func_t fn,k_timeout_t timeout)99 void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
100 		   k_timeout_t timeout)
101 {
102 	if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
103 		return;
104 	}
105 
106 #ifdef CONFIG_KERNEL_COHERENCE
107 	__ASSERT_NO_MSG(arch_mem_coherent(to));
108 #endif /* CONFIG_KERNEL_COHERENCE */
109 
110 	__ASSERT(!sys_dnode_is_linked(&to->node), "");
111 	to->fn = fn;
112 
113 	K_SPINLOCK(&timeout_lock) {
114 		struct _timeout *t;
115 
116 		if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
117 		    (Z_TICK_ABS(timeout.ticks) >= 0)) {
118 			k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
119 
120 			to->dticks = MAX(1, ticks);
121 		} else {
122 			to->dticks = timeout.ticks + 1 + elapsed();
123 		}
124 
125 		for (t = first(); t != NULL; t = next(t)) {
126 			if (t->dticks > to->dticks) {
127 				t->dticks -= to->dticks;
128 				sys_dlist_insert(&t->node, &to->node);
129 				break;
130 			}
131 			to->dticks -= t->dticks;
132 		}
133 
134 		if (t == NULL) {
135 			sys_dlist_append(&timeout_list, &to->node);
136 		}
137 
138 		if (to == first() && announce_remaining == 0) {
139 			sys_clock_set_timeout(next_timeout(), false);
140 		}
141 	}
142 }
143 
z_abort_timeout(struct _timeout * to)144 int z_abort_timeout(struct _timeout *to)
145 {
146 	int ret = -EINVAL;
147 
148 	K_SPINLOCK(&timeout_lock) {
149 		if (sys_dnode_is_linked(&to->node)) {
150 			remove_timeout(to);
151 			ret = 0;
152 		}
153 	}
154 
155 	return ret;
156 }
157 
158 /* must be locked */
timeout_rem(const struct _timeout * timeout)159 static k_ticks_t timeout_rem(const struct _timeout *timeout)
160 {
161 	k_ticks_t ticks = 0;
162 
163 	for (struct _timeout *t = first(); t != NULL; t = next(t)) {
164 		ticks += t->dticks;
165 		if (timeout == t) {
166 			break;
167 		}
168 	}
169 
170 	return ticks;
171 }
172 
z_timeout_remaining(const struct _timeout * timeout)173 k_ticks_t z_timeout_remaining(const struct _timeout *timeout)
174 {
175 	k_ticks_t ticks = 0;
176 
177 	K_SPINLOCK(&timeout_lock) {
178 		if (!z_is_inactive_timeout(timeout)) {
179 			ticks = timeout_rem(timeout) - elapsed();
180 		}
181 	}
182 
183 	return ticks;
184 }
185 
z_timeout_expires(const struct _timeout * timeout)186 k_ticks_t z_timeout_expires(const struct _timeout *timeout)
187 {
188 	k_ticks_t ticks = 0;
189 
190 	K_SPINLOCK(&timeout_lock) {
191 		ticks = curr_tick;
192 		if (!z_is_inactive_timeout(timeout)) {
193 			ticks += timeout_rem(timeout);
194 		}
195 	}
196 
197 	return ticks;
198 }
199 
z_get_next_timeout_expiry(void)200 int32_t z_get_next_timeout_expiry(void)
201 {
202 	int32_t ret = (int32_t) K_TICKS_FOREVER;
203 
204 	K_SPINLOCK(&timeout_lock) {
205 		ret = next_timeout();
206 	}
207 	return ret;
208 }
209 
sys_clock_announce(int32_t ticks)210 void sys_clock_announce(int32_t ticks)
211 {
212 	k_spinlock_key_t key = k_spin_lock(&timeout_lock);
213 
214 	/* We release the lock around the callbacks below, so on SMP
215 	 * systems someone might be already running the loop.  Don't
216 	 * race (which will cause parallel execution of "sequential"
217 	 * timeouts and confuse apps), just increment the tick count
218 	 * and return.
219 	 */
220 	if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) {
221 		announce_remaining += ticks;
222 		k_spin_unlock(&timeout_lock, key);
223 		return;
224 	}
225 
226 	announce_remaining = ticks;
227 
228 	struct _timeout *t;
229 
230 	for (t = first();
231 	     (t != NULL) && (t->dticks <= announce_remaining);
232 	     t = first()) {
233 		int dt = t->dticks;
234 
235 		curr_tick += dt;
236 		t->dticks = 0;
237 		remove_timeout(t);
238 
239 		k_spin_unlock(&timeout_lock, key);
240 		t->fn(t);
241 		key = k_spin_lock(&timeout_lock);
242 		announce_remaining -= dt;
243 	}
244 
245 	if (t != NULL) {
246 		t->dticks -= announce_remaining;
247 	}
248 
249 	curr_tick += announce_remaining;
250 	announce_remaining = 0;
251 
252 	sys_clock_set_timeout(next_timeout(), false);
253 
254 	k_spin_unlock(&timeout_lock, key);
255 
256 #ifdef CONFIG_TIMESLICING
257 	z_time_slice();
258 #endif /* CONFIG_TIMESLICING */
259 }
260 
sys_clock_tick_get(void)261 int64_t sys_clock_tick_get(void)
262 {
263 	uint64_t t = 0U;
264 
265 	K_SPINLOCK(&timeout_lock) {
266 		t = curr_tick + elapsed();
267 	}
268 	return t;
269 }
270 
sys_clock_tick_get_32(void)271 uint32_t sys_clock_tick_get_32(void)
272 {
273 #ifdef CONFIG_TICKLESS_KERNEL
274 	return (uint32_t)sys_clock_tick_get();
275 #else
276 	return (uint32_t)curr_tick;
277 #endif /* CONFIG_TICKLESS_KERNEL */
278 }
279 
z_impl_k_uptime_ticks(void)280 int64_t z_impl_k_uptime_ticks(void)
281 {
282 	return sys_clock_tick_get();
283 }
284 
285 #ifdef CONFIG_USERSPACE
z_vrfy_k_uptime_ticks(void)286 static inline int64_t z_vrfy_k_uptime_ticks(void)
287 {
288 	return z_impl_k_uptime_ticks();
289 }
290 #include <zephyr/syscalls/k_uptime_ticks_mrsh.c>
291 #endif /* CONFIG_USERSPACE */
292 
sys_timepoint_calc(k_timeout_t timeout)293 k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
294 {
295 	k_timepoint_t timepoint;
296 
297 	if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
298 		timepoint.tick = UINT64_MAX;
299 	} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
300 		timepoint.tick = 0;
301 	} else {
302 		k_ticks_t dt = timeout.ticks;
303 
304 		if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
305 			timepoint.tick = Z_TICK_ABS(dt);
306 		} else {
307 			timepoint.tick = sys_clock_tick_get() + MAX(1, dt);
308 		}
309 	}
310 
311 	return timepoint;
312 }
313 
sys_timepoint_timeout(k_timepoint_t timepoint)314 k_timeout_t sys_timepoint_timeout(k_timepoint_t timepoint)
315 {
316 	uint64_t now, remaining;
317 
318 	if (timepoint.tick == UINT64_MAX) {
319 		return K_FOREVER;
320 	}
321 	if (timepoint.tick == 0) {
322 		return K_NO_WAIT;
323 	}
324 
325 	now = sys_clock_tick_get();
326 	remaining = (timepoint.tick > now) ? (timepoint.tick - now) : 0;
327 	return K_TICKS(remaining);
328 }
329 
330 #ifdef CONFIG_ZTEST
z_impl_sys_clock_tick_set(uint64_t tick)331 void z_impl_sys_clock_tick_set(uint64_t tick)
332 {
333 	curr_tick = tick;
334 }
335 
z_vrfy_sys_clock_tick_set(uint64_t tick)336 void z_vrfy_sys_clock_tick_set(uint64_t tick)
337 {
338 	z_impl_sys_clock_tick_set(tick);
339 }
340 #endif /* CONFIG_ZTEST */
341