1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <drivers/timer/system_timer.h>
7 #include <sys_clock.h>
8 #include <spinlock.h>
9 #include <arch/arm/aarch32/cortex_m/cmsis.h>
10 
11 #define COUNTER_MAX 0x00ffffff
12 #define TIMER_STOPPED 0xff000000
13 
14 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec()	\
15 		      / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
16 #define MAX_TICKS ((COUNTER_MAX / CYC_PER_TICK) - 1)
17 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
18 
19 /* Minimum cycles in the future to try to program.  Note that this is
20  * NOT simply "enough cycles to get the counter read and reprogrammed
21  * reliably" -- it becomes the minimum value of the LOAD register, and
22  * thus reflects how much time we can reliably see expire between
23  * calls to elapsed() to read the COUNTFLAG bit.  So it needs to be
24  * set to be larger than the maximum time the interrupt might be
25  * masked.  Choosing a fraction of a tick is probably a good enough
26  * default, with an absolute minimum of 1k cyc.
27  */
28 #define MIN_DELAY MAX(1024, (CYC_PER_TICK/16))
29 
30 #define TICKLESS (IS_ENABLED(CONFIG_TICKLESS_KERNEL))
31 
32 static struct k_spinlock lock;
33 
34 static uint32_t last_load;
35 
36 /*
37  * This local variable holds the amount of SysTick HW cycles elapsed
38  * and it is updated in sys_clock_isr() and sys_clock_set_timeout().
39  *
40  * Note:
41  *  At an arbitrary point in time the "current" value of the SysTick
42  *  HW timer is calculated as:
43  *
44  * t = cycle_counter + elapsed();
45  */
46 static uint32_t cycle_count;
47 
48 /*
49  * This local variable holds the amount of elapsed SysTick HW cycles
50  * that have been announced to the kernel.
51  */
52 static uint32_t announced_cycles;
53 
54 /*
55  * This local variable holds the amount of elapsed HW cycles due to
56  * SysTick timer wraps ('overflows') and is used in the calculation
57  * in elapsed() function, as well as in the updates to cycle_count.
58  *
59  * Note:
60  * Each time cycle_count is updated with the value from overflow_cyc,
61  * the overflow_cyc must be reset to zero.
62  */
63 static volatile uint32_t overflow_cyc;
64 
65 /* This internal function calculates the amount of HW cycles that have
66  * elapsed since the last time the absolute HW cycles counter has been
67  * updated. 'cycle_count' may be updated either by the ISR, or when we
68  * re-program the SysTick.LOAD register, in sys_clock_set_timeout().
69  *
70  * Additionally, the function updates the 'overflow_cyc' counter, that
71  * holds the amount of elapsed HW cycles due to (possibly) multiple
72  * timer wraps (overflows).
73  *
74  * Prerequisites:
75  * - reprogramming of SysTick.LOAD must be clearing the SysTick.COUNTER
76  *   register and the 'overflow_cyc' counter.
77  * - ISR must be clearing the 'overflow_cyc' counter.
78  * - no more than one counter-wrap has occurred between
79  *     - the timer reset or the last time the function was called
80  *     - and until the current call of the function is completed.
81  * - the function is invoked with interrupts disabled.
82  */
elapsed(void)83 static uint32_t elapsed(void)
84 {
85 	uint32_t val1 = SysTick->VAL;	/* A */
86 	uint32_t ctrl = SysTick->CTRL;	/* B */
87 	uint32_t val2 = SysTick->VAL;	/* C */
88 
89 	/* SysTick behavior: The counter wraps at zero automatically,
90 	 * setting the COUNTFLAG field of the CTRL register when it
91 	 * does.  Reading the control register automatically clears
92 	 * that field.
93 	 *
94 	 * If the count wrapped...
95 	 * 1) Before A then COUNTFLAG will be set and val1 >= val2
96 	 * 2) Between A and B then COUNTFLAG will be set and val1 < val2
97 	 * 3) Between B and C then COUNTFLAG will be clear and val1 < val2
98 	 * 4) After C we'll see it next time
99 	 *
100 	 * So the count in val2 is post-wrap and last_load needs to be
101 	 * added if and only if COUNTFLAG is set or val1 < val2.
102 	 */
103 	if ((ctrl & SysTick_CTRL_COUNTFLAG_Msk)
104 	    || (val1 < val2)) {
105 		overflow_cyc += last_load;
106 
107 		/* We know there was a wrap, but we might not have
108 		 * seen it in CTRL, so clear it. */
109 		(void)SysTick->CTRL;
110 	}
111 
112 	return (last_load - val2) + overflow_cyc;
113 }
114 
115 /* Callout out of platform assembly, not hooked via IRQ_CONNECT... */
sys_clock_isr(void * arg)116 void sys_clock_isr(void *arg)
117 {
118 	ARG_UNUSED(arg);
119 	uint32_t dticks;
120 
121 	/* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
122 	elapsed();
123 
124 	/* Increment the amount of HW cycles elapsed (complete counter
125 	 * cycles) and announce the progress to the kernel.
126 	 */
127 	cycle_count += overflow_cyc;
128 	overflow_cyc = 0;
129 
130 	if (TICKLESS) {
131 		/* In TICKLESS mode, the SysTick.LOAD is re-programmed
132 		 * in sys_clock_set_timeout(), followed by resetting of
133 		 * the counter (VAL = 0).
134 		 *
135 		 * If a timer wrap occurs right when we re-program LOAD,
136 		 * the ISR is triggered immediately after sys_clock_set_timeout()
137 		 * returns; in that case we shall not increment the cycle_count
138 		 * because the value has been updated before LOAD re-program.
139 		 *
140 		 * We can assess if this is the case by inspecting COUNTFLAG.
141 		 */
142 
143 		dticks = (cycle_count - announced_cycles) / CYC_PER_TICK;
144 		announced_cycles += dticks * CYC_PER_TICK;
145 		sys_clock_announce(dticks);
146 	} else {
147 		sys_clock_announce(1);
148 	}
149 	z_arm_int_exit();
150 }
151 
sys_clock_driver_init(const struct device * dev)152 int sys_clock_driver_init(const struct device *dev)
153 {
154 	ARG_UNUSED(dev);
155 
156 	NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
157 	last_load = CYC_PER_TICK - 1;
158 	overflow_cyc = 0U;
159 	SysTick->LOAD = last_load;
160 	SysTick->VAL = 0; /* resets timer to last_load */
161 	SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
162 			  SysTick_CTRL_TICKINT_Msk |
163 			  SysTick_CTRL_CLKSOURCE_Msk);
164 	return 0;
165 }
166 
sys_clock_set_timeout(int32_t ticks,bool idle)167 void sys_clock_set_timeout(int32_t ticks, bool idle)
168 {
169 	/* Fast CPUs and a 24 bit counter mean that even idle systems
170 	 * need to wake up multiple times per second.  If the kernel
171 	 * allows us to miss tick announcements in idle, then shut off
172 	 * the counter. (Note: we can assume if idle==true that
173 	 * interrupts are already disabled)
174 	 */
175 	if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && idle && ticks == K_TICKS_FOREVER) {
176 		SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
177 		last_load = TIMER_STOPPED;
178 		return;
179 	}
180 
181 #if defined(CONFIG_TICKLESS_KERNEL)
182 	uint32_t delay;
183 	uint32_t val1, val2;
184 	uint32_t last_load_ = last_load;
185 
186 	ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
187 	ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
188 
189 	k_spinlock_key_t key = k_spin_lock(&lock);
190 
191 	uint32_t pending = elapsed();
192 
193 	val1 = SysTick->VAL;
194 
195 	cycle_count += pending;
196 	overflow_cyc = 0U;
197 
198 	uint32_t unannounced = cycle_count - announced_cycles;
199 
200 	if ((int32_t)unannounced < 0) {
201 		/* We haven't announced for more than half the 32-bit
202 		 * wrap duration, because new timeouts keep being set
203 		 * before the existing one fires.  Force an announce
204 		 * to avoid loss of a wrap event, making sure the
205 		 * delay is at least the minimum delay possible.
206 		 */
207 		last_load = MIN_DELAY;
208 	} else {
209 		/* Desired delay in the future */
210 		delay = ticks * CYC_PER_TICK;
211 
212 		/* Round delay up to next tick boundary */
213 		delay += unannounced;
214 		delay =
215 		 ((delay + CYC_PER_TICK - 1) / CYC_PER_TICK) * CYC_PER_TICK;
216 		delay -= unannounced;
217 		delay = MAX(delay, MIN_DELAY);
218 		if (delay > MAX_CYCLES) {
219 			last_load = MAX_CYCLES;
220 		} else {
221 			last_load = delay;
222 		}
223 	}
224 
225 	val2 = SysTick->VAL;
226 
227 	SysTick->LOAD = last_load - 1;
228 	SysTick->VAL = 0; /* resets timer to last_load */
229 
230 	/*
231 	 * Add elapsed cycles while computing the new load to cycle_count.
232 	 *
233 	 * Note that comparing val1 and val2 is normaly not good enough to
234 	 * guess if the counter wrapped during this interval. Indeed if val1 is
235 	 * close to LOAD, then there are little chances to catch val2 between
236 	 * val1 and LOAD after a wrap. COUNTFLAG should be checked in addition.
237 	 * But since the load computation is faster than MIN_DELAY, then we
238 	 * don't need to worry about this case.
239 	 */
240 	if (val1 < val2) {
241 		cycle_count += (val1 + (last_load_ - val2));
242 	} else {
243 		cycle_count += (val1 - val2);
244 	}
245 	k_spin_unlock(&lock, key);
246 #endif
247 }
248 
sys_clock_elapsed(void)249 uint32_t sys_clock_elapsed(void)
250 {
251 	if (!TICKLESS) {
252 		return 0;
253 	}
254 
255 	k_spinlock_key_t key = k_spin_lock(&lock);
256 	uint32_t cyc = elapsed() + cycle_count - announced_cycles;
257 
258 	k_spin_unlock(&lock, key);
259 	return cyc / CYC_PER_TICK;
260 }
261 
sys_clock_cycle_get_32(void)262 uint32_t sys_clock_cycle_get_32(void)
263 {
264 	k_spinlock_key_t key = k_spin_lock(&lock);
265 	uint32_t ret = elapsed() + cycle_count;
266 
267 	k_spin_unlock(&lock, key);
268 	return ret;
269 }
270 
sys_clock_idle_exit(void)271 void sys_clock_idle_exit(void)
272 {
273 	if (last_load == TIMER_STOPPED) {
274 		SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
275 	}
276 }
277 
sys_clock_disable(void)278 void sys_clock_disable(void)
279 {
280 	SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
281 }
282