1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/init.h>
7 #include <zephyr/drivers/timer/system_timer.h>
8 #include <zephyr/sys_clock.h>
9 #include <zephyr/spinlock.h>
10 #include <cmsis_core.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/sys/util.h>
13 
14 #define COUNTER_MAX 0x00ffffff
15 #define TIMER_STOPPED 0xff000000
16 
17 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec()	\
18 		      / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
19 #define MAX_TICKS ((k_ticks_t)(COUNTER_MAX / CYC_PER_TICK) - 1)
20 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
21 
22 /* Minimum cycles in the future to try to program.  Note that this is
23  * NOT simply "enough cycles to get the counter read and reprogrammed
24  * reliably" -- it becomes the minimum value of the LOAD register, and
25  * thus reflects how much time we can reliably see expire between
26  * calls to elapsed() to read the COUNTFLAG bit.  So it needs to be
27  * set to be larger than the maximum time the interrupt might be
28  * masked.  Choosing a fraction of a tick is probably a good enough
29  * default, with an absolute minimum of 1k cyc.
30  */
31 #define MIN_DELAY MAX(1024U, ((uint32_t)CYC_PER_TICK/16U))
32 
33 #define TICKLESS (IS_ENABLED(CONFIG_TICKLESS_KERNEL))
34 
35 static struct k_spinlock lock;
36 
37 static uint32_t last_load;
38 
39 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
40 #define cycle_t uint64_t
41 #else
42 #define cycle_t uint32_t
43 #endif
44 
45 /*
46  * This local variable holds the amount of SysTick HW cycles elapsed
47  * and it is updated in sys_clock_isr() and sys_clock_set_timeout().
48  *
49  * Note:
50  *  At an arbitrary point in time the "current" value of the SysTick
51  *  HW timer is calculated as:
52  *
53  * t = cycle_counter + elapsed();
54  */
55 static cycle_t cycle_count;
56 
57 /*
58  * This local variable holds the amount of elapsed SysTick HW cycles
59  * that have been announced to the kernel.
60  *
61  * Note:
62  * Additions/subtractions/comparisons of 64-bits values on 32-bits systems
63  * are very cheap. Divisions are not. Make sure the difference between
64  * cycle_count and announced_cycles is stored in a 32-bit variable before
65  * dividing it by CYC_PER_TICK.
66  */
67 static cycle_t announced_cycles;
68 
69 /*
70  * This local variable holds the amount of elapsed HW cycles due to
71  * SysTick timer wraps ('overflows') and is used in the calculation
72  * in elapsed() function, as well as in the updates to cycle_count.
73  *
74  * Note:
75  * Each time cycle_count is updated with the value from overflow_cyc,
76  * the overflow_cyc must be reset to zero.
77  */
78 static volatile uint32_t overflow_cyc;
79 
80 /* This internal function calculates the amount of HW cycles that have
81  * elapsed since the last time the absolute HW cycles counter has been
82  * updated. 'cycle_count' may be updated either by the ISR, or when we
83  * re-program the SysTick.LOAD register, in sys_clock_set_timeout().
84  *
85  * Additionally, the function updates the 'overflow_cyc' counter, that
86  * holds the amount of elapsed HW cycles due to (possibly) multiple
87  * timer wraps (overflows).
88  *
89  * Prerequisites:
90  * - reprogramming of SysTick.LOAD must be clearing the SysTick.COUNTER
91  *   register and the 'overflow_cyc' counter.
92  * - ISR must be clearing the 'overflow_cyc' counter.
93  * - no more than one counter-wrap has occurred between
94  *     - the timer reset or the last time the function was called
95  *     - and until the current call of the function is completed.
96  * - the function is invoked with interrupts disabled.
97  */
elapsed(void)98 static uint32_t elapsed(void)
99 {
100 	uint32_t val1 = SysTick->VAL;	/* A */
101 	uint32_t ctrl = SysTick->CTRL;	/* B */
102 	uint32_t val2 = SysTick->VAL;	/* C */
103 
104 	/* SysTick behavior: The counter wraps after zero automatically.
105 	 * The COUNTFLAG field of the CTRL register is set when it
106 	 * decrements from 1 to 0. Reading the control register
107 	 * automatically clears that field. When a timer is started,
108 	 * count begins at zero then wraps after the first cycle.
109 	 * Reference:
110 	 *  Armv6-m (B3.3.1) https://developer.arm.com/documentation/ddi0419
111 	 *  Armv7-m (B3.3.1) https://developer.arm.com/documentation/ddi0403
112 	 *  Armv8-m (B11.1)  https://developer.arm.com/documentation/ddi0553
113 	 *
114 	 * First, manually wrap/realign val1 and val2 from [0:last_load-1]
115 	 * to [1:last_load]. This allows subsequent code to assume that
116 	 * COUNTFLAG and wrapping occur on the same cycle.
117 	 *
118 	 * If the count wrapped...
119 	 * 1) Before A then COUNTFLAG will be set and val1 >= val2
120 	 * 2) Between A and B then COUNTFLAG will be set and val1 < val2
121 	 * 3) Between B and C then COUNTFLAG will be clear and val1 < val2
122 	 * 4) After C we'll see it next time
123 	 *
124 	 * So the count in val2 is post-wrap and last_load needs to be
125 	 * added if and only if COUNTFLAG is set or val1 < val2.
126 	 */
127 	if (val1 == 0) {
128 		val1 = last_load;
129 	}
130 	if (val2 == 0) {
131 		val2 = last_load;
132 	}
133 
134 	if ((ctrl & SysTick_CTRL_COUNTFLAG_Msk)
135 	    || (val1 < val2)) {
136 		overflow_cyc += last_load;
137 
138 		/* We know there was a wrap, but we might not have
139 		 * seen it in CTRL, so clear it. */
140 		(void)SysTick->CTRL;
141 	}
142 
143 	return (last_load - val2) + overflow_cyc;
144 }
145 
146 /* Callout out of platform assembly, not hooked via IRQ_CONNECT... */
sys_clock_isr(void * arg)147 void sys_clock_isr(void *arg)
148 {
149 	ARG_UNUSED(arg);
150 	uint32_t dcycles;
151 	uint32_t dticks;
152 
153 	/* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
154 	elapsed();
155 
156 	/* Increment the amount of HW cycles elapsed (complete counter
157 	 * cycles) and announce the progress to the kernel.
158 	 */
159 	cycle_count += overflow_cyc;
160 	overflow_cyc = 0;
161 
162 	if (TICKLESS) {
163 		/* In TICKLESS mode, the SysTick.LOAD is re-programmed
164 		 * in sys_clock_set_timeout(), followed by resetting of
165 		 * the counter (VAL = 0).
166 		 *
167 		 * If a timer wrap occurs right when we re-program LOAD,
168 		 * the ISR is triggered immediately after sys_clock_set_timeout()
169 		 * returns; in that case we shall not increment the cycle_count
170 		 * because the value has been updated before LOAD re-program.
171 		 *
172 		 * We can assess if this is the case by inspecting COUNTFLAG.
173 		 */
174 
175 		dcycles = cycle_count - announced_cycles;
176 		dticks = dcycles / CYC_PER_TICK;
177 		announced_cycles += dticks * CYC_PER_TICK;
178 		sys_clock_announce(dticks);
179 	} else {
180 		sys_clock_announce(1);
181 	}
182 	z_arm_int_exit();
183 }
184 
sys_clock_set_timeout(int32_t ticks,bool idle)185 void sys_clock_set_timeout(int32_t ticks, bool idle)
186 {
187 	/* Fast CPUs and a 24 bit counter mean that even idle systems
188 	 * need to wake up multiple times per second.  If the kernel
189 	 * allows us to miss tick announcements in idle, then shut off
190 	 * the counter. (Note: we can assume if idle==true that
191 	 * interrupts are already disabled)
192 	 */
193 	if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && idle && ticks == K_TICKS_FOREVER) {
194 		SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
195 		last_load = TIMER_STOPPED;
196 		return;
197 	}
198 
199 #if defined(CONFIG_TICKLESS_KERNEL)
200 	uint32_t delay;
201 	uint32_t val1, val2;
202 	uint32_t last_load_ = last_load;
203 
204 	ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
205 	ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
206 
207 	k_spinlock_key_t key = k_spin_lock(&lock);
208 
209 	uint32_t pending = elapsed();
210 
211 	val1 = SysTick->VAL;
212 
213 	cycle_count += pending;
214 	overflow_cyc = 0U;
215 
216 	uint32_t unannounced = cycle_count - announced_cycles;
217 
218 	if ((int32_t)unannounced < 0) {
219 		/* We haven't announced for more than half the 32-bit
220 		 * wrap duration, because new timeouts keep being set
221 		 * before the existing one fires.  Force an announce
222 		 * to avoid loss of a wrap event, making sure the
223 		 * delay is at least the minimum delay possible.
224 		 */
225 		last_load = MIN_DELAY;
226 	} else {
227 		/* Desired delay in the future */
228 		delay = ticks * CYC_PER_TICK;
229 
230 		/* Round delay up to next tick boundary */
231 		delay += unannounced;
232 		delay = DIV_ROUND_UP(delay, CYC_PER_TICK) * CYC_PER_TICK;
233 		delay -= unannounced;
234 		delay = MAX(delay, MIN_DELAY);
235 		if (delay > MAX_CYCLES) {
236 			last_load = MAX_CYCLES;
237 		} else {
238 			last_load = delay;
239 		}
240 	}
241 
242 	val2 = SysTick->VAL;
243 
244 	SysTick->LOAD = last_load - 1;
245 	SysTick->VAL = 0; /* resets timer to last_load */
246 
247 	/*
248 	 * Add elapsed cycles while computing the new load to cycle_count.
249 	 *
250 	 * Note that comparing val1 and val2 is normaly not good enough to
251 	 * guess if the counter wrapped during this interval. Indeed if val1 is
252 	 * close to LOAD, then there are little chances to catch val2 between
253 	 * val1 and LOAD after a wrap. COUNTFLAG should be checked in addition.
254 	 * But since the load computation is faster than MIN_DELAY, then we
255 	 * don't need to worry about this case.
256 	 */
257 	if (val1 < val2) {
258 		cycle_count += (val1 + (last_load_ - val2));
259 	} else {
260 		cycle_count += (val1 - val2);
261 	}
262 	k_spin_unlock(&lock, key);
263 #endif
264 }
265 
sys_clock_elapsed(void)266 uint32_t sys_clock_elapsed(void)
267 {
268 	if (!TICKLESS) {
269 		return 0;
270 	}
271 
272 	k_spinlock_key_t key = k_spin_lock(&lock);
273 	uint32_t unannounced = cycle_count - announced_cycles;
274 	uint32_t cyc = elapsed() + unannounced;
275 
276 	k_spin_unlock(&lock, key);
277 	return cyc / CYC_PER_TICK;
278 }
279 
sys_clock_cycle_get_32(void)280 uint32_t sys_clock_cycle_get_32(void)
281 {
282 	k_spinlock_key_t key = k_spin_lock(&lock);
283 	uint32_t ret = cycle_count;
284 
285 	ret += elapsed();
286 	k_spin_unlock(&lock, key);
287 	return ret;
288 }
289 
290 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
sys_clock_cycle_get_64(void)291 uint64_t sys_clock_cycle_get_64(void)
292 {
293 	k_spinlock_key_t key = k_spin_lock(&lock);
294 	uint64_t ret = cycle_count + elapsed();
295 
296 	k_spin_unlock(&lock, key);
297 	return ret;
298 }
299 #endif
300 
sys_clock_idle_exit(void)301 void sys_clock_idle_exit(void)
302 {
303 	if (last_load == TIMER_STOPPED) {
304 		SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
305 	}
306 }
307 
sys_clock_disable(void)308 void sys_clock_disable(void)
309 {
310 	SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
311 }
312 
sys_clock_driver_init(void)313 static int sys_clock_driver_init(void)
314 {
315 
316 	NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
317 	last_load = CYC_PER_TICK;
318 	overflow_cyc = 0U;
319 	SysTick->LOAD = last_load - 1;
320 	SysTick->VAL = 0; /* resets timer to last_load */
321 	SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
322 			  SysTick_CTRL_TICKINT_Msk |
323 			  SysTick_CTRL_CLKSOURCE_Msk);
324 	return 0;
325 }
326 
327 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
328 	 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
329