1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/init.h>
7 #include <zephyr/drivers/timer/system_timer.h>
8 #include <zephyr/sys_clock.h>
9 #include <zephyr/spinlock.h>
10 #include <cmsis_core.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/drivers/counter.h>
14
15 #define COUNTER_MAX 0x00ffffff
16 #define TIMER_STOPPED 0xff000000
17
18 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
19 / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
20 #define MAX_TICKS ((k_ticks_t)(COUNTER_MAX / CYC_PER_TICK) - 1)
21 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
22
23 /* Minimum cycles in the future to try to program. Note that this is
24 * NOT simply "enough cycles to get the counter read and reprogrammed
25 * reliably" -- it becomes the minimum value of the LOAD register, and
26 * thus reflects how much time we can reliably see expire between
27 * calls to elapsed() to read the COUNTFLAG bit. So it needs to be
28 * set to be larger than the maximum time the interrupt might be
29 * masked. Choosing a fraction of a tick is probably a good enough
30 * default, with an absolute minimum of 1k cyc.
31 */
32 #define MIN_DELAY MAX(1024U, ((uint32_t)CYC_PER_TICK/16U))
33
34 #define TICKLESS (IS_ENABLED(CONFIG_TICKLESS_KERNEL))
35
36 static struct k_spinlock lock;
37
38 static uint32_t last_load;
39
40 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
41 #define cycle_t uint64_t
42 #else
43 #define cycle_t uint32_t
44 #endif
45
46 /*
47 * This local variable holds the amount of SysTick HW cycles elapsed
48 * and it is updated in sys_clock_isr() and sys_clock_set_timeout().
49 *
50 * Note:
51 * At an arbitrary point in time the "current" value of the SysTick
52 * HW timer is calculated as:
53 *
54 * t = cycle_counter + elapsed();
55 */
56 static cycle_t cycle_count;
57
58 /*
59 * This local variable holds the amount of elapsed SysTick HW cycles
60 * that have been announced to the kernel.
61 *
62 * Note:
63 * Additions/subtractions/comparisons of 64-bits values on 32-bits systems
64 * are very cheap. Divisions are not. Make sure the difference between
65 * cycle_count and announced_cycles is stored in a 32-bit variable before
66 * dividing it by CYC_PER_TICK.
67 */
68 static cycle_t announced_cycles;
69
70 /*
71 * This local variable holds the amount of elapsed HW cycles due to
72 * SysTick timer wraps ('overflows') and is used in the calculation
73 * in elapsed() function, as well as in the updates to cycle_count.
74 *
75 * Note:
76 * Each time cycle_count is updated with the value from overflow_cyc,
77 * the overflow_cyc must be reset to zero.
78 */
79 static volatile uint32_t overflow_cyc;
80
81 #ifdef CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER
82 /* This local variable indicates that the timeout was set right before
83 * entering idle state.
84 *
85 * It is used for chips that has to use a separate idle timer in such
86 * case because the Cortex-m SysTick is not clocked in the low power
87 * mode state.
88 */
89 static bool timeout_idle;
90
91 /* Cycle counter before entering the idle state. */
92 static cycle_t cycle_pre_idle;
93
94 /* Idle timer value before entering the idle state. */
95 static uint32_t idle_timer_pre_idle;
96
97 /* Idle timer used for timer while entering the idle state */
98 static const struct device *idle_timer = DEVICE_DT_GET(DT_CHOSEN(zephyr_cortex_m_idle_timer));
99 #endif /* CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER */
100
101 /* This internal function calculates the amount of HW cycles that have
102 * elapsed since the last time the absolute HW cycles counter has been
103 * updated. 'cycle_count' may be updated either by the ISR, or when we
104 * re-program the SysTick.LOAD register, in sys_clock_set_timeout().
105 *
106 * Additionally, the function updates the 'overflow_cyc' counter, that
107 * holds the amount of elapsed HW cycles due to (possibly) multiple
108 * timer wraps (overflows).
109 *
110 * Prerequisites:
111 * - reprogramming of SysTick.LOAD must be clearing the SysTick.COUNTER
112 * register and the 'overflow_cyc' counter.
113 * - ISR must be clearing the 'overflow_cyc' counter.
114 * - no more than one counter-wrap has occurred between
115 * - the timer reset or the last time the function was called
116 * - and until the current call of the function is completed.
117 * - the function is invoked with interrupts disabled.
118 */
elapsed(void)119 static uint32_t elapsed(void)
120 {
121 uint32_t val1 = SysTick->VAL; /* A */
122 uint32_t ctrl = SysTick->CTRL; /* B */
123 uint32_t val2 = SysTick->VAL; /* C */
124
125 /* SysTick behavior: The counter wraps after zero automatically.
126 * The COUNTFLAG field of the CTRL register is set when it
127 * decrements from 1 to 0. Reading the control register
128 * automatically clears that field. When a timer is started,
129 * count begins at zero then wraps after the first cycle.
130 * Reference:
131 * Armv6-m (B3.3.1) https://developer.arm.com/documentation/ddi0419
132 * Armv7-m (B3.3.1) https://developer.arm.com/documentation/ddi0403
133 * Armv8-m (B11.1) https://developer.arm.com/documentation/ddi0553
134 *
135 * First, manually wrap/realign val1 and val2 from [0:last_load-1]
136 * to [1:last_load]. This allows subsequent code to assume that
137 * COUNTFLAG and wrapping occur on the same cycle.
138 *
139 * If the count wrapped...
140 * 1) Before A then COUNTFLAG will be set and val1 >= val2
141 * 2) Between A and B then COUNTFLAG will be set and val1 < val2
142 * 3) Between B and C then COUNTFLAG will be clear and val1 < val2
143 * 4) After C we'll see it next time
144 *
145 * So the count in val2 is post-wrap and last_load needs to be
146 * added if and only if COUNTFLAG is set or val1 < val2.
147 */
148 if (val1 == 0) {
149 val1 = last_load;
150 }
151 if (val2 == 0) {
152 val2 = last_load;
153 }
154
155 if ((ctrl & SysTick_CTRL_COUNTFLAG_Msk)
156 || (val1 < val2)) {
157 overflow_cyc += last_load;
158
159 /* We know there was a wrap, but we might not have
160 * seen it in CTRL, so clear it. */
161 (void)SysTick->CTRL;
162 }
163
164 return (last_load - val2) + overflow_cyc;
165 }
166
167 /* Callout out of platform assembly, not hooked via IRQ_CONNECT... */
sys_clock_isr(void * arg)168 void sys_clock_isr(void *arg)
169 {
170 ARG_UNUSED(arg);
171 uint32_t dcycles;
172 uint32_t dticks;
173
174 /* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
175 elapsed();
176
177 /* Increment the amount of HW cycles elapsed (complete counter
178 * cycles) and announce the progress to the kernel.
179 */
180 cycle_count += overflow_cyc;
181 overflow_cyc = 0;
182
183 #ifdef CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER
184 /* Rare case, when the interrupt was triggered, with previously programmed
185 * LOAD value, just before entering the idle mode (SysTick is clocked) or right
186 * after exiting the idle mode, before executing the procedure in the
187 * sys_clock_idle_exit function.
188 */
189 if (timeout_idle) {
190 z_arm_int_exit();
191
192 return;
193 }
194 #endif /* CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER */
195
196 if (TICKLESS) {
197 /* In TICKLESS mode, the SysTick.LOAD is re-programmed
198 * in sys_clock_set_timeout(), followed by resetting of
199 * the counter (VAL = 0).
200 *
201 * If a timer wrap occurs right when we re-program LOAD,
202 * the ISR is triggered immediately after sys_clock_set_timeout()
203 * returns; in that case we shall not increment the cycle_count
204 * because the value has been updated before LOAD re-program.
205 *
206 * We can assess if this is the case by inspecting COUNTFLAG.
207 */
208
209 dcycles = cycle_count - announced_cycles;
210 dticks = dcycles / CYC_PER_TICK;
211 announced_cycles += dticks * CYC_PER_TICK;
212 sys_clock_announce(dticks);
213 } else {
214 sys_clock_announce(1);
215 }
216 z_arm_int_exit();
217 }
218
sys_clock_set_timeout(int32_t ticks,bool idle)219 void sys_clock_set_timeout(int32_t ticks, bool idle)
220 {
221 /* Fast CPUs and a 24 bit counter mean that even idle systems
222 * need to wake up multiple times per second. If the kernel
223 * allows us to miss tick announcements in idle, then shut off
224 * the counter. (Note: we can assume if idle==true that
225 * interrupts are already disabled)
226 */
227 if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && idle && ticks == K_TICKS_FOREVER) {
228 SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
229 last_load = TIMER_STOPPED;
230 return;
231 }
232
233 #ifdef CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER
234 if (idle) {
235 uint64_t timeout_us =
236 ((uint64_t)ticks * USEC_PER_SEC) / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
237 struct counter_alarm_cfg cfg = {
238 .callback = NULL,
239 .ticks = counter_us_to_ticks(idle_timer, timeout_us),
240 .user_data = NULL,
241 .flags = 0,
242 };
243
244 timeout_idle = true;
245
246 /* Set the alarm using timer that runs the idle.
247 * Needed rump-up/setting time, lower accurency etc. should be
248 * included in the exit-latency in the power state definition.
249 */
250 counter_cancel_channel_alarm(idle_timer, 0);
251 counter_set_channel_alarm(idle_timer, 0, &cfg);
252
253 /* Store current values to calculate a difference in
254 * measurements after exiting the idle state.
255 */
256 counter_get_value(idle_timer, &idle_timer_pre_idle);
257 cycle_pre_idle = cycle_count + elapsed();
258
259 return;
260 }
261 #endif /* CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER */
262
263 #if defined(CONFIG_TICKLESS_KERNEL)
264 uint32_t delay;
265 uint32_t val1, val2;
266 uint32_t last_load_ = last_load;
267
268 ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
269 ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
270
271 k_spinlock_key_t key = k_spin_lock(&lock);
272
273 uint32_t pending = elapsed();
274
275 val1 = SysTick->VAL;
276
277 cycle_count += pending;
278 overflow_cyc = 0U;
279
280 uint32_t unannounced = cycle_count - announced_cycles;
281
282 if ((int32_t)unannounced < 0) {
283 /* We haven't announced for more than half the 32-bit
284 * wrap duration, because new timeouts keep being set
285 * before the existing one fires. Force an announce
286 * to avoid loss of a wrap event, making sure the
287 * delay is at least the minimum delay possible.
288 */
289 last_load = MIN_DELAY;
290 } else {
291 /* Desired delay in the future */
292 delay = ticks * CYC_PER_TICK;
293
294 /* Round delay up to next tick boundary */
295 delay += unannounced;
296 delay = DIV_ROUND_UP(delay, CYC_PER_TICK) * CYC_PER_TICK;
297 delay -= unannounced;
298 delay = MAX(delay, MIN_DELAY);
299 if (delay > MAX_CYCLES) {
300 last_load = MAX_CYCLES;
301 } else {
302 last_load = delay;
303 }
304 }
305
306 val2 = SysTick->VAL;
307
308 SysTick->LOAD = last_load - 1;
309 SysTick->VAL = 0; /* resets timer to last_load */
310
311 /*
312 * Add elapsed cycles while computing the new load to cycle_count.
313 *
314 * Note that comparing val1 and val2 is normaly not good enough to
315 * guess if the counter wrapped during this interval. Indeed if val1 is
316 * close to LOAD, then there are little chances to catch val2 between
317 * val1 and LOAD after a wrap. COUNTFLAG should be checked in addition.
318 * But since the load computation is faster than MIN_DELAY, then we
319 * don't need to worry about this case.
320 */
321 if (val1 < val2) {
322 cycle_count += (val1 + (last_load_ - val2));
323 } else {
324 cycle_count += (val1 - val2);
325 }
326 k_spin_unlock(&lock, key);
327 #endif
328 }
329
sys_clock_elapsed(void)330 uint32_t sys_clock_elapsed(void)
331 {
332 if (!TICKLESS) {
333 return 0;
334 }
335
336 k_spinlock_key_t key = k_spin_lock(&lock);
337 uint32_t unannounced = cycle_count - announced_cycles;
338 uint32_t cyc = elapsed() + unannounced;
339
340 k_spin_unlock(&lock, key);
341 return cyc / CYC_PER_TICK;
342 }
343
sys_clock_cycle_get_32(void)344 uint32_t sys_clock_cycle_get_32(void)
345 {
346 k_spinlock_key_t key = k_spin_lock(&lock);
347 uint32_t ret = cycle_count;
348
349 ret += elapsed();
350 k_spin_unlock(&lock, key);
351 return ret;
352 }
353
354 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
sys_clock_cycle_get_64(void)355 uint64_t sys_clock_cycle_get_64(void)
356 {
357 k_spinlock_key_t key = k_spin_lock(&lock);
358 uint64_t ret = cycle_count + elapsed();
359
360 k_spin_unlock(&lock, key);
361 return ret;
362 }
363 #endif
364
sys_clock_idle_exit(void)365 void sys_clock_idle_exit(void)
366 {
367 #ifdef CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER
368 if (timeout_idle) {
369 cycle_t systick_diff, missed_cycles;
370 uint32_t idle_timer_diff, idle_timer_post, dcycles, dticks;
371 uint64_t systick_us, idle_timer_us;
372
373 /* Get current values for both timers */
374 counter_get_value(idle_timer, &idle_timer_post);
375 systick_diff = cycle_count + elapsed() - cycle_pre_idle;
376
377 /* Calculate has much time has pasted since last measurement for both timers */
378 /* Check IDLE timer overflow */
379 if (idle_timer_pre_idle > idle_timer_post) {
380 idle_timer_diff =
381 (counter_get_top_value(idle_timer) - idle_timer_pre_idle) +
382 idle_timer_post + 1;
383
384 } else {
385 idle_timer_diff = idle_timer_post - idle_timer_pre_idle;
386 }
387 idle_timer_us = counter_ticks_to_us(idle_timer, idle_timer_diff);
388 systick_us =
389 ((uint64_t)systick_diff * USEC_PER_SEC) / sys_clock_hw_cycles_per_sec();
390
391 /* Calculate difference in measurements to get how much time
392 * the SysTick missed in idle state.
393 */
394 if (idle_timer_us < systick_us) {
395 /* This case is possible, when the time in low power mode is
396 * very short or 0. SysTick usually has higher measurement
397 * resolution of than the IDLE timer, thus the measurement of
398 * passed time since the sys_clock_set_timeout call can be higher.
399 */
400 missed_cycles = 0;
401 } else {
402 uint64_t measurement_diff_us;
403
404 measurement_diff_us = idle_timer_us - systick_us;
405 missed_cycles = (sys_clock_hw_cycles_per_sec() * measurement_diff_us) /
406 USEC_PER_SEC;
407 }
408
409 /* Update the cycle counter to include the cycles missed in idle */
410 cycle_count += missed_cycles;
411
412 /* Announce the passed ticks to the kernel */
413 dcycles = cycle_count + elapsed() - announced_cycles;
414 dticks = dcycles / CYC_PER_TICK;
415 announced_cycles += dticks * CYC_PER_TICK;
416 sys_clock_announce(dticks);
417
418 /* We've alredy performed all needed operations */
419 timeout_idle = false;
420 }
421 #endif /* CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER */
422
423 if (last_load == TIMER_STOPPED) {
424 SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
425 }
426 }
427
sys_clock_disable(void)428 void sys_clock_disable(void)
429 {
430 SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
431 }
432
sys_clock_driver_init(void)433 static int sys_clock_driver_init(void)
434 {
435
436 NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
437 last_load = CYC_PER_TICK;
438 overflow_cyc = 0U;
439 SysTick->LOAD = last_load - 1;
440 SysTick->VAL = 0; /* resets timer to last_load */
441 SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
442 SysTick_CTRL_TICKINT_Msk |
443 SysTick_CTRL_CLKSOURCE_Msk);
444 return 0;
445 }
446
447 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
448 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
449