1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/init.h>
7 #include <zephyr/drivers/timer/system_timer.h>
8 #include <zephyr/sys_clock.h>
9 #include <zephyr/spinlock.h>
10 #include <cmsis_core.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/drivers/counter.h>
14
15 #include "cortex_m_systick.h"
16
17 #define COUNTER_MAX 0x00ffffff
18 #define TIMER_STOPPED 0xff000000
19
20 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
21 / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
22 #define MAX_TICKS ((k_ticks_t)(COUNTER_MAX / CYC_PER_TICK) - 1)
23 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
24
25 /* Minimum cycles in the future to try to program. Note that this is
26 * NOT simply "enough cycles to get the counter read and reprogrammed
27 * reliably" -- it becomes the minimum value of the LOAD register, and
28 * thus reflects how much time we can reliably see expire between
29 * calls to elapsed() to read the COUNTFLAG bit. So it needs to be
30 * set to be larger than the maximum time the interrupt might be
31 * masked. Choosing a fraction of a tick is probably a good enough
32 * default, with an absolute minimum of 1k cyc.
33 */
34 #define MIN_DELAY MAX(1024U, ((uint32_t)CYC_PER_TICK/16U))
35
36 static struct k_spinlock lock;
37
38 static uint32_t last_load;
39
40 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
41 typedef uint64_t cycle_t;
42 #else
43 typedef uint32_t cycle_t;
44 #endif
45
46 /*
47 * This local variable holds the amount of SysTick HW cycles elapsed
48 * and it is updated in sys_clock_isr() and sys_clock_set_timeout().
49 *
50 * Note:
51 * At an arbitrary point in time the "current" value of the SysTick
52 * HW timer is calculated as:
53 *
54 * t = cycle_counter + elapsed();
55 */
56 static cycle_t cycle_count;
57
58 /*
59 * This local variable holds the amount of elapsed SysTick HW cycles
60 * that have been announced to the kernel.
61 *
62 * Note:
63 * Additions/subtractions/comparisons of 64-bits values on 32-bits systems
64 * are very cheap. Divisions are not. Make sure the difference between
65 * cycle_count and announced_cycles is stored in a 32-bit variable before
66 * dividing it by CYC_PER_TICK.
67 */
68 static cycle_t announced_cycles;
69
70 /*
71 * This local variable holds the amount of elapsed HW cycles due to
72 * SysTick timer wraps ('overflows') and is used in the calculation
73 * in elapsed() function, as well as in the updates to cycle_count.
74 *
75 * Note:
76 * Each time cycle_count is updated with the value from overflow_cyc,
77 * the overflow_cyc must be reset to zero.
78 */
79 static volatile uint32_t overflow_cyc;
80
81 #if !defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE)
82 /* This local variable indicates that the timeout was set right before
83 * entering idle state.
84 *
85 * It is used for chips that has to use a separate idle timer in such
86 * case because the Cortex-m SysTick is not clocked in the low power
87 * mode state.
88 */
89 static bool timeout_idle;
90
91 #if !defined(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)
92 /* Cycle counter before entering the idle state. */
93 static cycle_t cycle_pre_idle;
94 #endif /* !CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
95
96 #if defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER)
97 /* Idle timer value before entering the idle state. */
98 static uint32_t idle_timer_pre_idle;
99
100 /* Idle timer used for timer while entering the idle state */
101 static const struct device *idle_timer = DEVICE_DT_GET(DT_CHOSEN(zephyr_cortex_m_idle_timer));
102 #endif /* CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER */
103 #endif /* !CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE */
104
105 #if defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER)
106 /**
107 * To simplify the driver, implement the callout to Counter API
108 * as hooks that would be provided by platform drivers if
109 * CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_HOOKS was selected instead.
110 */
z_cms_lptim_hook_on_lpm_entry(uint64_t max_lpm_time_us)111 void z_cms_lptim_hook_on_lpm_entry(uint64_t max_lpm_time_us)
112 {
113 struct counter_alarm_cfg cfg = {
114 .callback = NULL,
115 .ticks = counter_us_to_ticks(idle_timer, max_lpm_time_us),
116 .user_data = NULL,
117 .flags = 0,
118 };
119
120 /**
121 * Disable the counter alarm in case it was already running.
122 */
123 counter_cancel_channel_alarm(idle_timer, 0);
124
125 /* Set the alarm using timer that runs the idle.
126 * Needed rump-up/setting time, lower accurency etc. should be
127 * included in the exit-latency in the power state definition.
128 */
129 counter_set_channel_alarm(idle_timer, 0, &cfg);
130
131 /* Store current value of the selected timer to calculate a
132 * difference in measurements after exiting the idle state.
133 */
134 counter_get_value(idle_timer, &idle_timer_pre_idle);
135 }
136
z_cms_lptim_hook_on_lpm_exit(void)137 uint64_t z_cms_lptim_hook_on_lpm_exit(void)
138 {
139 /**
140 * Calculate how much time elapsed according to counter.
141 */
142 uint32_t idle_timer_post, idle_timer_diff;
143
144 counter_get_value(idle_timer, &idle_timer_post);
145
146 /**
147 * Check for counter timer overflow
148 * (TODO: this doesn't work for downcounting timers!)
149 */
150 if (idle_timer_pre_idle > idle_timer_post) {
151 idle_timer_diff =
152 (counter_get_top_value(idle_timer) - idle_timer_pre_idle) +
153 idle_timer_post + 1;
154 } else {
155 idle_timer_diff = idle_timer_post - idle_timer_pre_idle;
156 }
157
158 return (uint64_t)counter_ticks_to_us(idle_timer, idle_timer_diff);
159 }
160 #endif /* CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER */
161
162 /* This internal function calculates the amount of HW cycles that have
163 * elapsed since the last time the absolute HW cycles counter has been
164 * updated. 'cycle_count' may be updated either by the ISR, or when we
165 * re-program the SysTick.LOAD register, in sys_clock_set_timeout().
166 *
167 * Additionally, the function updates the 'overflow_cyc' counter, that
168 * holds the amount of elapsed HW cycles due to (possibly) multiple
169 * timer wraps (overflows).
170 *
171 * Prerequisites:
172 * - reprogramming of SysTick.LOAD must be clearing the SysTick.COUNTER
173 * register and the 'overflow_cyc' counter.
174 * - ISR must be clearing the 'overflow_cyc' counter.
175 * - no more than one counter-wrap has occurred between
176 * - the timer reset or the last time the function was called
177 * - and until the current call of the function is completed.
178 * - the function is invoked with interrupts disabled.
179 */
elapsed(void)180 static uint32_t elapsed(void)
181 {
182 uint32_t val1 = SysTick->VAL; /* A */
183 uint32_t ctrl = SysTick->CTRL; /* B */
184 uint32_t val2 = SysTick->VAL; /* C */
185
186 /* SysTick behavior: The counter wraps after zero automatically.
187 * The COUNTFLAG field of the CTRL register is set when it
188 * decrements from 1 to 0. Reading the control register
189 * automatically clears that field. When a timer is started,
190 * count begins at zero then wraps after the first cycle.
191 * Reference:
192 * Armv6-m (B3.3.1) https://developer.arm.com/documentation/ddi0419
193 * Armv7-m (B3.3.1) https://developer.arm.com/documentation/ddi0403
194 * Armv8-m (B11.1) https://developer.arm.com/documentation/ddi0553
195 *
196 * First, manually wrap/realign val1 and val2 from [0:last_load-1]
197 * to [1:last_load]. This allows subsequent code to assume that
198 * COUNTFLAG and wrapping occur on the same cycle.
199 *
200 * If the count wrapped...
201 * 1) Before A then COUNTFLAG will be set and val1 >= val2
202 * 2) Between A and B then COUNTFLAG will be set and val1 < val2
203 * 3) Between B and C then COUNTFLAG will be clear and val1 < val2
204 * 4) After C we'll see it next time
205 *
206 * So the count in val2 is post-wrap and last_load needs to be
207 * added if and only if COUNTFLAG is set or val1 < val2.
208 */
209 if (val1 == 0) {
210 val1 = last_load;
211 }
212 if (val2 == 0) {
213 val2 = last_load;
214 }
215
216 if ((ctrl & SysTick_CTRL_COUNTFLAG_Msk)
217 || (val1 < val2)) {
218 overflow_cyc += last_load;
219
220 /* We know there was a wrap, but we might not have
221 * seen it in CTRL, so clear it. */
222 (void)SysTick->CTRL;
223 }
224
225 return (last_load - val2) + overflow_cyc;
226 }
227
228 /* sys_clock_isr is calling directly from the platform's vectors table.
229 * However using ISR_DIRECT_DECLARE() is not so suitable due to possible
230 * tracing overflow, so here is a stripped down version of it.
231 */
232 ARCH_ISR_DIAG_OFF
sys_clock_isr(void)233 __attribute__((interrupt("IRQ"))) void sys_clock_isr(void)
234 {
235 #ifdef CONFIG_TRACING_ISR
236 sys_trace_isr_enter();
237 #endif /* CONFIG_TRACING_ISR */
238
239 uint32_t dcycles;
240 uint32_t dticks;
241
242 /* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
243 elapsed();
244
245 /* Increment the amount of HW cycles elapsed (complete counter
246 * cycles) and announce the progress to the kernel.
247 */
248 cycle_count += overflow_cyc;
249 overflow_cyc = 0;
250
251 #if defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER)
252 /* Rare case, when the interrupt was triggered, with previously programmed
253 * LOAD value, just before entering the idle mode (SysTick is clocked) or right
254 * after exiting the idle mode, before executing the procedure in the
255 * sys_clock_idle_exit function.
256 */
257 if (timeout_idle) {
258 ISR_DIRECT_PM();
259 z_arm_int_exit();
260
261 return;
262 }
263 #endif /* CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER */
264
265 if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
266 /* In TICKLESS mode, the SysTick.LOAD is re-programmed
267 * in sys_clock_set_timeout(), followed by resetting of
268 * the counter (VAL = 0).
269 *
270 * If a timer wrap occurs right when we re-program LOAD,
271 * the ISR is triggered immediately after sys_clock_set_timeout()
272 * returns; in that case we shall not increment the cycle_count
273 * because the value has been updated before LOAD re-program.
274 *
275 * We can assess if this is the case by inspecting COUNTFLAG.
276 */
277
278 dcycles = cycle_count - announced_cycles;
279 dticks = dcycles / CYC_PER_TICK;
280 announced_cycles += dticks * CYC_PER_TICK;
281 sys_clock_announce(dticks);
282 } else {
283 sys_clock_announce(1);
284 }
285
286 ISR_DIRECT_PM();
287
288 #ifdef CONFIG_TRACING_ISR
289 sys_trace_isr_exit();
290 #endif /* CONFIG_TRACING_ISR */
291
292 z_arm_int_exit();
293 }
294 ARCH_ISR_DIAG_ON
295
sys_clock_set_timeout(int32_t ticks,bool idle)296 void sys_clock_set_timeout(int32_t ticks, bool idle)
297 {
298 /* Fast CPUs and a 24 bit counter mean that even idle systems
299 * need to wake up multiple times per second. If the kernel
300 * allows us to miss tick announcements in idle, then shut off
301 * the counter. (Note: we can assume if idle==true that
302 * interrupts are already disabled)
303 */
304 if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && idle && ticks == K_TICKS_FOREVER) {
305 SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
306 last_load = TIMER_STOPPED;
307 return;
308 }
309
310 #if !defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE)
311 if (idle) {
312 uint64_t timeout_us =
313 ((uint64_t)ticks * USEC_PER_SEC) / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
314
315 timeout_idle = true;
316
317 /**
318 * Invoke platform-specific layer to configure LPTIM
319 * such that system wakes up after timeout elapses.
320 */
321 z_cms_lptim_hook_on_lpm_entry(timeout_us);
322
323 #if !defined(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)
324 /* Store current value of SysTick counter to be able to
325 * calculate a difference in measurements after exiting
326 * the low-power state.
327 */
328 cycle_pre_idle = cycle_count + elapsed();
329 #else /* CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
330 /**
331 * SysTick will be placed under reset once we enter
332 * low-power mode. Turn it off right now then update
333 * the cycle counter now, since we won't be able to
334 * to it after waking up.
335 */
336 sys_clock_disable();
337
338 cycle_count += elapsed();
339 overflow_cyc = 0;
340 #endif /* !CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
341 return;
342 }
343 #endif /* !CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE */
344
345 #if defined(CONFIG_TICKLESS_KERNEL)
346 uint32_t delay;
347 uint32_t val1, val2;
348 uint32_t last_load_ = last_load;
349
350 ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
351 ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
352
353 k_spinlock_key_t key = k_spin_lock(&lock);
354
355 uint32_t pending = elapsed();
356
357 val1 = SysTick->VAL;
358
359 cycle_count += pending;
360 overflow_cyc = 0U;
361
362 uint32_t unannounced = cycle_count - announced_cycles;
363
364 if ((int32_t)unannounced < 0) {
365 /* We haven't announced for more than half the 32-bit
366 * wrap duration, because new timeouts keep being set
367 * before the existing one fires. Force an announce
368 * to avoid loss of a wrap event, making sure the
369 * delay is at least the minimum delay possible.
370 */
371 last_load = MIN_DELAY;
372 } else {
373 /* Desired delay in the future */
374 delay = ticks * CYC_PER_TICK;
375
376 /* Round delay up to next tick boundary */
377 delay += unannounced;
378 delay = DIV_ROUND_UP(delay, CYC_PER_TICK) * CYC_PER_TICK;
379 delay -= unannounced;
380 delay = MAX(delay, MIN_DELAY);
381 if (delay > MAX_CYCLES) {
382 last_load = MAX_CYCLES;
383 } else {
384 last_load = delay;
385 }
386 }
387
388 val2 = SysTick->VAL;
389
390 SysTick->LOAD = last_load - 1;
391 SysTick->VAL = 0; /* resets timer to last_load */
392
393 /*
394 * Add elapsed cycles while computing the new load to cycle_count.
395 *
396 * Note that comparing val1 and val2 is normaly not good enough to
397 * guess if the counter wrapped during this interval. Indeed if val1 is
398 * close to LOAD, then there are little chances to catch val2 between
399 * val1 and LOAD after a wrap. COUNTFLAG should be checked in addition.
400 * But since the load computation is faster than MIN_DELAY, then we
401 * don't need to worry about this case.
402 */
403 if (val1 < val2) {
404 cycle_count += (val1 + (last_load_ - val2));
405 } else {
406 cycle_count += (val1 - val2);
407 }
408 k_spin_unlock(&lock, key);
409 #endif
410 }
411
sys_clock_elapsed(void)412 uint32_t sys_clock_elapsed(void)
413 {
414 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
415 return 0;
416 }
417
418 k_spinlock_key_t key = k_spin_lock(&lock);
419 uint32_t unannounced = cycle_count - announced_cycles;
420 uint32_t cyc = elapsed() + unannounced;
421
422 k_spin_unlock(&lock, key);
423 return cyc / CYC_PER_TICK;
424 }
425
sys_clock_cycle_get_32(void)426 uint32_t sys_clock_cycle_get_32(void)
427 {
428 k_spinlock_key_t key = k_spin_lock(&lock);
429 uint32_t ret = cycle_count;
430
431 ret += elapsed();
432 k_spin_unlock(&lock, key);
433 return ret;
434 }
435
436 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
sys_clock_cycle_get_64(void)437 uint64_t sys_clock_cycle_get_64(void)
438 {
439 k_spinlock_key_t key = k_spin_lock(&lock);
440 uint64_t ret = cycle_count + elapsed();
441
442 k_spin_unlock(&lock, key);
443 return ret;
444 }
445 #endif
446
sys_clock_idle_exit(void)447 void sys_clock_idle_exit(void)
448 {
449 #if !defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE)
450 if (timeout_idle) {
451 cycle_t systick_diff, missed_cycles;
452 uint32_t dcycles, dticks;
453 uint64_t systick_us, idle_timer_us;
454
455 #if !defined(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)
456 /**
457 * Get current value for SysTick and calculate how
458 * much time has passed since last measurement.
459 */
460 systick_diff = cycle_count + elapsed() - cycle_pre_idle;
461 systick_us =
462 ((uint64_t)systick_diff * USEC_PER_SEC) / sys_clock_hw_cycles_per_sec();
463 #else /* CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
464 /* SysTick was placed under reset so it didn't tick */
465 systick_diff = systick_us = 0;
466 #endif /* !CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
467
468 /**
469 * Query platform-specific code for elapsed time according to LPTIM.
470 */
471 idle_timer_us = z_cms_lptim_hook_on_lpm_exit();
472
473 /* Calculate difference in measurements to get how much time
474 * the SysTick missed in idle state.
475 */
476 if (idle_timer_us < systick_us) {
477 /* This case is possible, when the time in low power mode is
478 * very short or 0. SysTick usually has higher measurement
479 * resolution of than the IDLE timer, thus the measurement of
480 * passed time since the sys_clock_set_timeout call can be higher.
481 */
482 missed_cycles = 0;
483 } else {
484 uint64_t measurement_diff_us;
485
486 measurement_diff_us = idle_timer_us - systick_us;
487 missed_cycles = (sys_clock_hw_cycles_per_sec() * measurement_diff_us) /
488 USEC_PER_SEC;
489 }
490
491 /* Update the cycle counter to include the cycles missed in idle */
492 cycle_count += missed_cycles;
493
494 /* Announce the passed ticks to the kernel */
495 dcycles = cycle_count + elapsed() - announced_cycles;
496 dticks = dcycles / CYC_PER_TICK;
497 announced_cycles += dticks * CYC_PER_TICK;
498 sys_clock_announce(dticks);
499
500 /* We've alredy performed all needed operations */
501 timeout_idle = false;
502 }
503 #endif /* !CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE */
504
505 if (last_load == TIMER_STOPPED || IS_ENABLED(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)) {
506 /* SysTick was stopped or placed under reset.
507 * Restart the timer from scratch.
508 */
509 K_SPINLOCK(&lock) {
510 last_load = CYC_PER_TICK;
511 SysTick->LOAD = last_load - 1;
512 SysTick->VAL = 0; /* resets timer to last_load */
513 SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
514 }
515 }
516 }
517
sys_clock_disable(void)518 void sys_clock_disable(void)
519 {
520 SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
521 }
522
sys_clock_driver_init(void)523 static int sys_clock_driver_init(void)
524 {
525
526 NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
527 last_load = CYC_PER_TICK;
528 overflow_cyc = 0U;
529 SysTick->LOAD = last_load - 1;
530 SysTick->VAL = 0; /* resets timer to last_load */
531 SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
532 SysTick_CTRL_TICKINT_Msk |
533 SysTick_CTRL_CLKSOURCE_Msk);
534 return 0;
535 }
536
537 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
538 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
539