1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/init.h>
7 #include <zephyr/drivers/timer/system_timer.h>
8 #include <zephyr/sys_clock.h>
9 #include <zephyr/spinlock.h>
10 #include <cmsis_core.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/drivers/counter.h>
14
15 #define COUNTER_MAX 0x00ffffff
16 #define TIMER_STOPPED 0xff000000
17
18 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
19 / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
20 #define MAX_TICKS ((k_ticks_t)(COUNTER_MAX / CYC_PER_TICK) - 1)
21 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
22
23 /* Minimum cycles in the future to try to program. Note that this is
24 * NOT simply "enough cycles to get the counter read and reprogrammed
25 * reliably" -- it becomes the minimum value of the LOAD register, and
26 * thus reflects how much time we can reliably see expire between
27 * calls to elapsed() to read the COUNTFLAG bit. So it needs to be
28 * set to be larger than the maximum time the interrupt might be
29 * masked. Choosing a fraction of a tick is probably a good enough
30 * default, with an absolute minimum of 1k cyc.
31 */
32 #define MIN_DELAY MAX(1024U, ((uint32_t)CYC_PER_TICK/16U))
33
34 static struct k_spinlock lock;
35
36 static uint32_t last_load;
37
38 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
39 typedef uint64_t cycle_t;
40 #else
41 typedef uint32_t cycle_t;
42 #endif
43
44 /*
45 * This local variable holds the amount of SysTick HW cycles elapsed
46 * and it is updated in sys_clock_isr() and sys_clock_set_timeout().
47 *
48 * Note:
49 * At an arbitrary point in time the "current" value of the SysTick
50 * HW timer is calculated as:
51 *
52 * t = cycle_counter + elapsed();
53 */
54 static cycle_t cycle_count;
55
56 /*
57 * This local variable holds the amount of elapsed SysTick HW cycles
58 * that have been announced to the kernel.
59 *
60 * Note:
61 * Additions/subtractions/comparisons of 64-bits values on 32-bits systems
62 * are very cheap. Divisions are not. Make sure the difference between
63 * cycle_count and announced_cycles is stored in a 32-bit variable before
64 * dividing it by CYC_PER_TICK.
65 */
66 static cycle_t announced_cycles;
67
68 /*
69 * This local variable holds the amount of elapsed HW cycles due to
70 * SysTick timer wraps ('overflows') and is used in the calculation
71 * in elapsed() function, as well as in the updates to cycle_count.
72 *
73 * Note:
74 * Each time cycle_count is updated with the value from overflow_cyc,
75 * the overflow_cyc must be reset to zero.
76 */
77 static volatile uint32_t overflow_cyc;
78
79 #ifdef CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER
80 /* This local variable indicates that the timeout was set right before
81 * entering idle state.
82 *
83 * It is used for chips that has to use a separate idle timer in such
84 * case because the Cortex-m SysTick is not clocked in the low power
85 * mode state.
86 */
87 static bool timeout_idle;
88
89 /* Cycle counter before entering the idle state. */
90 static cycle_t cycle_pre_idle;
91
92 /* Idle timer value before entering the idle state. */
93 static uint32_t idle_timer_pre_idle;
94
95 /* Idle timer used for timer while entering the idle state */
96 static const struct device *idle_timer = DEVICE_DT_GET(DT_CHOSEN(zephyr_cortex_m_idle_timer));
97 #endif /* CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER */
98
99 /* This internal function calculates the amount of HW cycles that have
100 * elapsed since the last time the absolute HW cycles counter has been
101 * updated. 'cycle_count' may be updated either by the ISR, or when we
102 * re-program the SysTick.LOAD register, in sys_clock_set_timeout().
103 *
104 * Additionally, the function updates the 'overflow_cyc' counter, that
105 * holds the amount of elapsed HW cycles due to (possibly) multiple
106 * timer wraps (overflows).
107 *
108 * Prerequisites:
109 * - reprogramming of SysTick.LOAD must be clearing the SysTick.COUNTER
110 * register and the 'overflow_cyc' counter.
111 * - ISR must be clearing the 'overflow_cyc' counter.
112 * - no more than one counter-wrap has occurred between
113 * - the timer reset or the last time the function was called
114 * - and until the current call of the function is completed.
115 * - the function is invoked with interrupts disabled.
116 */
elapsed(void)117 static uint32_t elapsed(void)
118 {
119 uint32_t val1 = SysTick->VAL; /* A */
120 uint32_t ctrl = SysTick->CTRL; /* B */
121 uint32_t val2 = SysTick->VAL; /* C */
122
123 /* SysTick behavior: The counter wraps after zero automatically.
124 * The COUNTFLAG field of the CTRL register is set when it
125 * decrements from 1 to 0. Reading the control register
126 * automatically clears that field. When a timer is started,
127 * count begins at zero then wraps after the first cycle.
128 * Reference:
129 * Armv6-m (B3.3.1) https://developer.arm.com/documentation/ddi0419
130 * Armv7-m (B3.3.1) https://developer.arm.com/documentation/ddi0403
131 * Armv8-m (B11.1) https://developer.arm.com/documentation/ddi0553
132 *
133 * First, manually wrap/realign val1 and val2 from [0:last_load-1]
134 * to [1:last_load]. This allows subsequent code to assume that
135 * COUNTFLAG and wrapping occur on the same cycle.
136 *
137 * If the count wrapped...
138 * 1) Before A then COUNTFLAG will be set and val1 >= val2
139 * 2) Between A and B then COUNTFLAG will be set and val1 < val2
140 * 3) Between B and C then COUNTFLAG will be clear and val1 < val2
141 * 4) After C we'll see it next time
142 *
143 * So the count in val2 is post-wrap and last_load needs to be
144 * added if and only if COUNTFLAG is set or val1 < val2.
145 */
146 if (val1 == 0) {
147 val1 = last_load;
148 }
149 if (val2 == 0) {
150 val2 = last_load;
151 }
152
153 if ((ctrl & SysTick_CTRL_COUNTFLAG_Msk)
154 || (val1 < val2)) {
155 overflow_cyc += last_load;
156
157 /* We know there was a wrap, but we might not have
158 * seen it in CTRL, so clear it. */
159 (void)SysTick->CTRL;
160 }
161
162 return (last_load - val2) + overflow_cyc;
163 }
164
165 /* sys_clock_isr is calling directly from the platform's vectors table.
166 * However using ISR_DIRECT_DECLARE() is not so suitable due to possible
167 * tracing overflow, so here is a stripped down version of it.
168 */
169 ARCH_ISR_DIAG_OFF
sys_clock_isr(void)170 __attribute__((interrupt("IRQ"))) void sys_clock_isr(void)
171 {
172 uint32_t dcycles;
173 uint32_t dticks;
174
175 /* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
176 elapsed();
177
178 /* Increment the amount of HW cycles elapsed (complete counter
179 * cycles) and announce the progress to the kernel.
180 */
181 cycle_count += overflow_cyc;
182 overflow_cyc = 0;
183
184 #ifdef CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER
185 /* Rare case, when the interrupt was triggered, with previously programmed
186 * LOAD value, just before entering the idle mode (SysTick is clocked) or right
187 * after exiting the idle mode, before executing the procedure in the
188 * sys_clock_idle_exit function.
189 */
190 if (timeout_idle) {
191 ISR_DIRECT_PM();
192 z_arm_int_exit();
193
194 return;
195 }
196 #endif /* CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER */
197
198 if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
199 /* In TICKLESS mode, the SysTick.LOAD is re-programmed
200 * in sys_clock_set_timeout(), followed by resetting of
201 * the counter (VAL = 0).
202 *
203 * If a timer wrap occurs right when we re-program LOAD,
204 * the ISR is triggered immediately after sys_clock_set_timeout()
205 * returns; in that case we shall not increment the cycle_count
206 * because the value has been updated before LOAD re-program.
207 *
208 * We can assess if this is the case by inspecting COUNTFLAG.
209 */
210
211 dcycles = cycle_count - announced_cycles;
212 dticks = dcycles / CYC_PER_TICK;
213 announced_cycles += dticks * CYC_PER_TICK;
214 sys_clock_announce(dticks);
215 } else {
216 sys_clock_announce(1);
217 }
218
219 ISR_DIRECT_PM();
220 z_arm_int_exit();
221 }
222 ARCH_ISR_DIAG_ON
223
sys_clock_set_timeout(int32_t ticks,bool idle)224 void sys_clock_set_timeout(int32_t ticks, bool idle)
225 {
226 /* Fast CPUs and a 24 bit counter mean that even idle systems
227 * need to wake up multiple times per second. If the kernel
228 * allows us to miss tick announcements in idle, then shut off
229 * the counter. (Note: we can assume if idle==true that
230 * interrupts are already disabled)
231 */
232 if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && idle && ticks == K_TICKS_FOREVER) {
233 SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
234 last_load = TIMER_STOPPED;
235 return;
236 }
237
238 #ifdef CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER
239 if (idle) {
240 uint64_t timeout_us =
241 ((uint64_t)ticks * USEC_PER_SEC) / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
242 struct counter_alarm_cfg cfg = {
243 .callback = NULL,
244 .ticks = counter_us_to_ticks(idle_timer, timeout_us),
245 .user_data = NULL,
246 .flags = 0,
247 };
248
249 timeout_idle = true;
250
251 /* Set the alarm using timer that runs the idle.
252 * Needed rump-up/setting time, lower accurency etc. should be
253 * included in the exit-latency in the power state definition.
254 */
255 counter_cancel_channel_alarm(idle_timer, 0);
256 counter_set_channel_alarm(idle_timer, 0, &cfg);
257
258 /* Store current values to calculate a difference in
259 * measurements after exiting the idle state.
260 */
261 counter_get_value(idle_timer, &idle_timer_pre_idle);
262 cycle_pre_idle = cycle_count + elapsed();
263
264 return;
265 }
266 #endif /* CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER */
267
268 #if defined(CONFIG_TICKLESS_KERNEL)
269 uint32_t delay;
270 uint32_t val1, val2;
271 uint32_t last_load_ = last_load;
272
273 ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
274 ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
275
276 k_spinlock_key_t key = k_spin_lock(&lock);
277
278 uint32_t pending = elapsed();
279
280 val1 = SysTick->VAL;
281
282 cycle_count += pending;
283 overflow_cyc = 0U;
284
285 uint32_t unannounced = cycle_count - announced_cycles;
286
287 if ((int32_t)unannounced < 0) {
288 /* We haven't announced for more than half the 32-bit
289 * wrap duration, because new timeouts keep being set
290 * before the existing one fires. Force an announce
291 * to avoid loss of a wrap event, making sure the
292 * delay is at least the minimum delay possible.
293 */
294 last_load = MIN_DELAY;
295 } else {
296 /* Desired delay in the future */
297 delay = ticks * CYC_PER_TICK;
298
299 /* Round delay up to next tick boundary */
300 delay += unannounced;
301 delay = DIV_ROUND_UP(delay, CYC_PER_TICK) * CYC_PER_TICK;
302 delay -= unannounced;
303 delay = MAX(delay, MIN_DELAY);
304 if (delay > MAX_CYCLES) {
305 last_load = MAX_CYCLES;
306 } else {
307 last_load = delay;
308 }
309 }
310
311 val2 = SysTick->VAL;
312
313 SysTick->LOAD = last_load - 1;
314 SysTick->VAL = 0; /* resets timer to last_load */
315
316 /*
317 * Add elapsed cycles while computing the new load to cycle_count.
318 *
319 * Note that comparing val1 and val2 is normaly not good enough to
320 * guess if the counter wrapped during this interval. Indeed if val1 is
321 * close to LOAD, then there are little chances to catch val2 between
322 * val1 and LOAD after a wrap. COUNTFLAG should be checked in addition.
323 * But since the load computation is faster than MIN_DELAY, then we
324 * don't need to worry about this case.
325 */
326 if (val1 < val2) {
327 cycle_count += (val1 + (last_load_ - val2));
328 } else {
329 cycle_count += (val1 - val2);
330 }
331 k_spin_unlock(&lock, key);
332 #endif
333 }
334
sys_clock_elapsed(void)335 uint32_t sys_clock_elapsed(void)
336 {
337 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
338 return 0;
339 }
340
341 k_spinlock_key_t key = k_spin_lock(&lock);
342 uint32_t unannounced = cycle_count - announced_cycles;
343 uint32_t cyc = elapsed() + unannounced;
344
345 k_spin_unlock(&lock, key);
346 return cyc / CYC_PER_TICK;
347 }
348
sys_clock_cycle_get_32(void)349 uint32_t sys_clock_cycle_get_32(void)
350 {
351 k_spinlock_key_t key = k_spin_lock(&lock);
352 uint32_t ret = cycle_count;
353
354 ret += elapsed();
355 k_spin_unlock(&lock, key);
356 return ret;
357 }
358
359 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
sys_clock_cycle_get_64(void)360 uint64_t sys_clock_cycle_get_64(void)
361 {
362 k_spinlock_key_t key = k_spin_lock(&lock);
363 uint64_t ret = cycle_count + elapsed();
364
365 k_spin_unlock(&lock, key);
366 return ret;
367 }
368 #endif
369
sys_clock_idle_exit(void)370 void sys_clock_idle_exit(void)
371 {
372 #ifdef CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER
373 if (timeout_idle) {
374 cycle_t systick_diff, missed_cycles;
375 uint32_t idle_timer_diff, idle_timer_post, dcycles, dticks;
376 uint64_t systick_us, idle_timer_us;
377
378 /* Get current values for both timers */
379 counter_get_value(idle_timer, &idle_timer_post);
380 systick_diff = cycle_count + elapsed() - cycle_pre_idle;
381
382 /* Calculate has much time has pasted since last measurement for both timers */
383 /* Check IDLE timer overflow */
384 if (idle_timer_pre_idle > idle_timer_post) {
385 idle_timer_diff =
386 (counter_get_top_value(idle_timer) - idle_timer_pre_idle) +
387 idle_timer_post + 1;
388
389 } else {
390 idle_timer_diff = idle_timer_post - idle_timer_pre_idle;
391 }
392 idle_timer_us = counter_ticks_to_us(idle_timer, idle_timer_diff);
393 systick_us =
394 ((uint64_t)systick_diff * USEC_PER_SEC) / sys_clock_hw_cycles_per_sec();
395
396 /* Calculate difference in measurements to get how much time
397 * the SysTick missed in idle state.
398 */
399 if (idle_timer_us < systick_us) {
400 /* This case is possible, when the time in low power mode is
401 * very short or 0. SysTick usually has higher measurement
402 * resolution of than the IDLE timer, thus the measurement of
403 * passed time since the sys_clock_set_timeout call can be higher.
404 */
405 missed_cycles = 0;
406 } else {
407 uint64_t measurement_diff_us;
408
409 measurement_diff_us = idle_timer_us - systick_us;
410 missed_cycles = (sys_clock_hw_cycles_per_sec() * measurement_diff_us) /
411 USEC_PER_SEC;
412 }
413
414 /* Update the cycle counter to include the cycles missed in idle */
415 cycle_count += missed_cycles;
416
417 /* Announce the passed ticks to the kernel */
418 dcycles = cycle_count + elapsed() - announced_cycles;
419 dticks = dcycles / CYC_PER_TICK;
420 announced_cycles += dticks * CYC_PER_TICK;
421 sys_clock_announce(dticks);
422
423 /* We've alredy performed all needed operations */
424 timeout_idle = false;
425 }
426 #endif /* CONFIG_CORTEX_M_SYSTICK_IDLE_TIMER */
427
428 if (last_load == TIMER_STOPPED) {
429 /* We really don’t know here how much time has passed,
430 * so let’s restart the timer from scratch.
431 */
432 K_SPINLOCK(&lock) {
433 last_load = CYC_PER_TICK;
434 SysTick->LOAD = last_load - 1;
435 SysTick->VAL = 0; /* resets timer to last_load */
436 SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
437 }
438 }
439 }
440
sys_clock_disable(void)441 void sys_clock_disable(void)
442 {
443 SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
444 }
445
sys_clock_driver_init(void)446 static int sys_clock_driver_init(void)
447 {
448
449 NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
450 last_load = CYC_PER_TICK;
451 overflow_cyc = 0U;
452 SysTick->LOAD = last_load - 1;
453 SysTick->VAL = 0; /* resets timer to last_load */
454 SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
455 SysTick_CTRL_TICKINT_Msk |
456 SysTick_CTRL_CLKSOURCE_Msk);
457 return 0;
458 }
459
460 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
461 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
462