1 /*
2  * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/init.h>
7 #include <zephyr/drivers/timer/arm_arch_timer.h>
8 #include <zephyr/drivers/timer/system_timer.h>
9 #include <zephyr/irq.h>
10 #include <zephyr/sys_clock.h>
11 #include <zephyr/spinlock.h>
12 #include <zephyr/arch/cpu.h>
13 
14 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
15 /* precompute CYC_PER_TICK at driver init to avoid runtime double divisions */
16 static uint32_t cyc_per_tick;
17 #define CYC_PER_TICK cyc_per_tick
18 #else
19 #define CYC_PER_TICK (uint32_t)(sys_clock_hw_cycles_per_sec() \
20 				/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
21 #endif
22 
23 #if defined(CONFIG_GDBSTUB)
24 /* When interactively debugging, the cycle diff can overflow 32-bit variable */
25 #define cycle_diff_t uint64_t
26 #else
27 /* the unsigned long cast limits divisors to native CPU register width */
28 #define cycle_diff_t unsigned long
29 #endif
30 #define CYCLE_DIFF_MAX (~(cycle_diff_t)0)
31 
32 /*
33  * We have two constraints on the maximum number of cycles we can wait for.
34  *
35  * 1) sys_clock_announce() accepts at most INT32_MAX ticks.
36  *
37  * 2) The number of cycles between two reports must fit in a cycle_diff_t
38  *    variable before converting it to ticks.
39  *
40  * Then:
41  *
42  * 3) Pick the smallest between (1) and (2).
43  *
44  * 4) Take into account some room for the unavoidable IRQ servicing latency.
45  *    Let's use 3/4 of the max range.
46  *
47  * Finally let's add the LSB value to the result so to clear out a bunch of
48  * consecutive set bits coming from the original max values to produce a
49  * nicer literal for assembly generation.
50  */
51 #define CYCLES_MAX_1	((uint64_t)INT32_MAX * (uint64_t)CYC_PER_TICK)
52 #define CYCLES_MAX_2	((uint64_t)CYCLE_DIFF_MAX)
53 #define CYCLES_MAX_3	MIN(CYCLES_MAX_1, CYCLES_MAX_2)
54 #define CYCLES_MAX_4	(CYCLES_MAX_3 / 2 + CYCLES_MAX_3 / 4)
55 #define CYCLES_MAX_5	(CYCLES_MAX_4 + LSB_GET(CYCLES_MAX_4))
56 
57 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
58 /* precompute CYCLES_MAX at driver init to avoid runtime double divisions */
59 static uint64_t cycles_max;
60 #define CYCLES_MAX cycles_max
61 #else
62 #define CYCLES_MAX CYCLES_MAX_5
63 #endif
64 
65 static struct k_spinlock lock;
66 static uint64_t last_cycle;
67 static uint64_t last_tick;
68 static uint32_t last_elapsed;
69 
70 #if defined(CONFIG_TEST)
71 const int32_t z_sys_timer_irq_for_test = ARM_ARCH_TIMER_IRQ;
72 #endif
73 
arm_arch_timer_compare_isr(const void * arg)74 static void arm_arch_timer_compare_isr(const void *arg)
75 {
76 	ARG_UNUSED(arg);
77 
78 	k_spinlock_key_t key = k_spin_lock(&lock);
79 
80 #ifdef CONFIG_ARM_ARCH_TIMER_ERRATUM_740657
81 	/*
82 	 * Workaround required for Cortex-A9 MPCore erratum 740657
83 	 * comp. ARM Cortex-A9 processors Software Developers Errata Notice,
84 	 * ARM document ID032315.
85 	 */
86 
87 	if (!arm_arch_timer_get_int_status()) {
88 		/*
89 		 * If the event flag is not set, this is a spurious interrupt.
90 		 * DO NOT modify the compare register's value, DO NOT announce
91 		 * elapsed ticks!
92 		 */
93 		k_spin_unlock(&lock, key);
94 		return;
95 	}
96 #endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */
97 
98 	uint64_t curr_cycle = arm_arch_timer_count();
99 	uint64_t delta_cycles = curr_cycle - last_cycle;
100 	uint32_t delta_ticks = (cycle_diff_t)delta_cycles / CYC_PER_TICK;
101 
102 	last_cycle += (cycle_diff_t)delta_ticks * CYC_PER_TICK;
103 	last_tick += delta_ticks;
104 	last_elapsed = 0;
105 
106 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
107 		uint64_t next_cycle = last_cycle + CYC_PER_TICK;
108 
109 		arm_arch_timer_set_compare(next_cycle);
110 		arm_arch_timer_set_irq_mask(false);
111 	} else {
112 		arm_arch_timer_set_irq_mask(true);
113 #ifdef CONFIG_ARM_ARCH_TIMER_ERRATUM_740657
114 		/*
115 		 * In tickless mode, the compare register is normally not
116 		 * updated from within the ISR. Yet, to work around the timer's
117 		 * erratum, a new value *must* be written while the interrupt
118 		 * is being processed before the interrupt is acknowledged
119 		 * by the handling interrupt controller.
120 		 */
121 		arm_arch_timer_set_compare(~0ULL);
122 	}
123 
124 	/*
125 	 * Clear the event flag so that in case the erratum strikes (the timer's
126 	 * vector will still be indicated as pending by the GIC's pending register
127 	 * after this ISR has been executed) the error will be detected by the
128 	 * check performed upon entry of the ISR -> the event flag is not set,
129 	 * therefore, no actual hardware interrupt has occurred.
130 	 */
131 	arm_arch_timer_clear_int_status();
132 #else
133 	}
134 #endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */
135 
136 	k_spin_unlock(&lock, key);
137 
138 	sys_clock_announce(delta_ticks);
139 }
140 
sys_clock_set_timeout(int32_t ticks,bool idle)141 void sys_clock_set_timeout(int32_t ticks, bool idle)
142 {
143 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
144 		return;
145 	}
146 
147 	if (idle && ticks == K_TICKS_FOREVER) {
148 		return;
149 	}
150 
151 	k_spinlock_key_t key = k_spin_lock(&lock);
152 	uint64_t next_cycle;
153 
154 	if (ticks == K_TICKS_FOREVER) {
155 		next_cycle = last_cycle + CYCLES_MAX;
156 	} else {
157 		next_cycle = (last_tick + last_elapsed + ticks) * CYC_PER_TICK;
158 		if ((next_cycle - last_cycle) > CYCLES_MAX) {
159 			next_cycle = last_cycle + CYCLES_MAX;
160 		}
161 	}
162 
163 	arm_arch_timer_set_compare(next_cycle);
164 	arm_arch_timer_set_irq_mask(false);
165 	k_spin_unlock(&lock, key);
166 }
167 
sys_clock_elapsed(void)168 uint32_t sys_clock_elapsed(void)
169 {
170 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
171 		return 0;
172 	}
173 
174 	k_spinlock_key_t key = k_spin_lock(&lock);
175 	uint64_t curr_cycle = arm_arch_timer_count();
176 	uint64_t delta_cycles = curr_cycle - last_cycle;
177 	uint32_t delta_ticks = (cycle_diff_t)delta_cycles / CYC_PER_TICK;
178 
179 	last_elapsed = delta_ticks;
180 	k_spin_unlock(&lock, key);
181 	return delta_ticks;
182 }
183 
sys_clock_cycle_get_32(void)184 uint32_t sys_clock_cycle_get_32(void)
185 {
186 	return (uint32_t)arm_arch_timer_count();
187 }
188 
sys_clock_cycle_get_64(void)189 uint64_t sys_clock_cycle_get_64(void)
190 {
191 	return arm_arch_timer_count();
192 }
193 
194 #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
arch_busy_wait(uint32_t usec_to_wait)195 void arch_busy_wait(uint32_t usec_to_wait)
196 {
197 	if (usec_to_wait == 0) {
198 		return;
199 	}
200 
201 	uint64_t start_cycles = arm_arch_timer_count();
202 
203 	uint64_t cycles_to_wait = sys_clock_hw_cycles_per_sec() / USEC_PER_SEC * usec_to_wait;
204 
205 	for (;;) {
206 		uint64_t current_cycles = arm_arch_timer_count();
207 
208 		/* this handles the rollover on an unsigned 32-bit value */
209 		if ((current_cycles - start_cycles) >= cycles_to_wait) {
210 			break;
211 		}
212 	}
213 }
214 #endif
215 
216 #ifdef CONFIG_SMP
smp_timer_init(void)217 void smp_timer_init(void)
218 {
219 	/*
220 	 * set the initial status of timer0 of each secondary core
221 	 */
222 	arm_arch_timer_set_compare(last_cycle + CYC_PER_TICK);
223 	arm_arch_timer_enable(true);
224 	irq_enable(ARM_ARCH_TIMER_IRQ);
225 	arm_arch_timer_set_irq_mask(false);
226 }
227 #endif
228 
sys_clock_driver_init(void)229 static int sys_clock_driver_init(void)
230 {
231 
232 	IRQ_CONNECT(ARM_ARCH_TIMER_IRQ, ARM_ARCH_TIMER_PRIO,
233 		    arm_arch_timer_compare_isr, NULL, ARM_ARCH_TIMER_FLAGS);
234 	arm_arch_timer_init();
235 #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
236 	cyc_per_tick = sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
237 	cycles_max = CYCLES_MAX_5;
238 #endif
239 	arm_arch_timer_enable(true);
240 	last_tick = arm_arch_timer_count() / CYC_PER_TICK;
241 	last_cycle = last_tick * CYC_PER_TICK;
242 	arm_arch_timer_set_compare(last_cycle + CYC_PER_TICK);
243 	irq_enable(ARM_ARCH_TIMER_IRQ);
244 	arm_arch_timer_set_irq_mask(false);
245 
246 	return 0;
247 }
248 
249 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
250 	 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
251