1 /*
2  * Copyright (c) 2019 Intel Corporation
3  * Copyright (c) 2019 Microchip Technology Incorporated
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT microchip_xec_rtos_timer
8 
9 #include <zephyr/init.h>
10 #include <zephyr/devicetree.h>
11 #include <soc.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/sys_clock.h>
14 #include <zephyr/spinlock.h>
15 #include <cmsis_core.h>
16 #include <zephyr/irq.h>
17 
18 BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP), "XEC RTOS timer doesn't support SMP");
19 BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 32768,
20 	     "XEC RTOS timer HW frequency is fixed at 32768");
21 
22 #define DEBUG_RTOS_TIMER 0
23 
24 #if DEBUG_RTOS_TIMER != 0
25 /* Enable feature to halt timer on JTAG/SWD CPU halt */
26 #define TIMER_START_VAL (MCHP_RTMR_CTRL_BLK_EN | MCHP_RTMR_CTRL_START \
27 			 | MCHP_RTMR_CTRL_HW_HALT_EN)
28 #else
29 #define TIMER_START_VAL (MCHP_RTMR_CTRL_BLK_EN | MCHP_RTMR_CTRL_START)
30 #endif
31 
32 /*
33  * Overview:
34  *
35  * This driver enables the Microchip XEC 32KHz based RTOS timer as the Zephyr
36  * system timer. It supports both legacy ("tickful") mode as well as
37  * TICKLESS_KERNEL. The XEC RTOS timer is a down counter with a fixed
38  * frequency of 32768 Hz. The driver is based upon the Intel local APIC
39  * timer driver.
40  * Configuration:
41  *
42  * CONFIG_MCHP_XEC_RTOS_TIMER=y
43  *
44  * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC=<hz> must be set to 32768.
45  *
46  * To reduce truncation errors from accumulating due to conversion
47  * to/from time, ticks, and HW cycles set ticks per second equal to
48  * the frequency. With tickless kernel mode enabled the kernel will not
49  * program a periodic timer at this fast rate.
50  * CONFIG_SYS_CLOCK_TICKS_PER_SEC=32768
51  */
52 
53 #define CYCLES_PER_TICK \
54 	(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
55 
56 #define TIMER_REGS	\
57 	((struct rtmr_regs *)DT_INST_REG_ADDR(0))
58 
59 #define ECIA_XEC_REGS	\
60 	((struct ecia_regs *)DT_REG_ADDR(DT_NODELABEL(ecia)))
61 
62 #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
63 #define PCR_XEC_REGS	\
64 	((struct pcr_regs *)DT_REG_ADDR(DT_NODELABEL(pcr)))
65 
66 /*
67  * pcrs property at index 0 is register index into array of 32-bit PCR SLP_EN,
68  * CLK_REQ, or RST_EN registers. Property at index 1 is the bit position.
69  */                              /*DT_PROP_BY_IDX(DT_NODELABEL(kbc0), girqs, 0)*/
70 #define BTMR32_0_PCR_REG_IDX	(DT_PROP_BY_IDX(DT_NODELABEL(timer4), pcrs, 0))
71 #define BTMR32_0_PCR_BITPOS	(DT_PROP_BY_IDX(DT_NODELABEL(timer4), pcrs, 1))
72 
73 #define BTMR32_0_REGS	\
74 	((struct btmr_regs *)(DT_REG_ADDR(DT_NODELABEL(timer4))))
75 #endif
76 
77 /* Mask off bits[31:28] of 32-bit count */
78 #define TIMER_MAX		0x0fffffffu
79 #define TIMER_COUNT_MASK	0x0fffffffu
80 #define TIMER_STOPPED		0xf0000000u
81 
82 /* Adjust cycle count programmed into timer for HW restart latency */
83 #define TIMER_ADJUST_LIMIT	2
84 #define TIMER_ADJUST_CYCLES	1
85 
86 /* max number of ticks we can load into the timer in one shot */
87 #define MAX_TICKS (TIMER_MAX / CYCLES_PER_TICK)
88 
89 #define TIMER_GIRQ		DT_INST_PROP_BY_IDX(0, girqs, 0)
90 #define TIMER_GIRQ_POS		DT_INST_PROP_BY_IDX(0, girqs, 1)
91 #define TIMER_NVIC_NO		DT_INST_IRQN(0)
92 #define TIMER_NVIC_PRIO		DT_INST_IRQ(0, priority)
93 
94 /*
95  * The spinlock protects all access to the RTMR registers, as well as
96  * 'total_cycles', 'last_announcement', and 'cached_icr'.
97  *
98  * One important invariant that must be observed: `total_cycles` + `cached_icr`
99  * is always an integral multiple of CYCLE_PER_TICK; this is, timer interrupts
100  * are only ever scheduled to occur at tick boundaries.
101  */
102 
103 static struct k_spinlock lock;
104 static uint32_t total_cycles;
105 static uint32_t cached_icr = CYCLES_PER_TICK;
106 
107 /*
108  * NOTE: using inline for speed instead of call to external SoC function.
109  * MEC GIRQ numbers are documented as 8 to 26, check and convert to zero
110  * based index.
111  */
girq_src_clr(int girq,int bitpos)112 static inline void girq_src_clr(int girq, int bitpos)
113 {
114 	if ((girq < 8) || (girq > 26)) {
115 		return;
116 	}
117 
118 	ECIA_XEC_REGS->GIRQ[girq - 8].SRC = BIT(bitpos);
119 }
120 
girq_src_en(int girq,int bitpos)121 static inline void girq_src_en(int girq, int bitpos)
122 {
123 	if ((girq < 8) || (girq > 26)) {
124 		return;
125 	}
126 
127 	ECIA_XEC_REGS->GIRQ[girq - 8].EN_SET = BIT(bitpos);
128 }
129 
girq_src_dis(int girq,int bitpos)130 static inline void girq_src_dis(int girq, int bitpos)
131 {
132 	if ((girq < 8) || (girq > 26)) {
133 		return;
134 	}
135 
136 	ECIA_XEC_REGS->GIRQ[girq - 8].EN_CLR = BIT(bitpos);
137 }
138 
timer_restart(uint32_t countdown)139 static void timer_restart(uint32_t countdown)
140 {
141 	TIMER_REGS->CTRL = 0U;
142 	TIMER_REGS->CTRL = MCHP_RTMR_CTRL_BLK_EN;
143 	TIMER_REGS->PRLD = countdown;
144 	TIMER_REGS->CTRL = TIMER_START_VAL;
145 }
146 
147 /*
148  * Read the RTOS timer counter handling the case where the timer
149  * has been reloaded within 1 32KHz clock of reading its count register.
150  * The RTOS timer hardware must synchronize the write to its control register
151  * on the AHB clock domain with the 32KHz clock domain of its internal logic.
152  * This synchronization can take from nearly 0 time up to 1 32KHz clock as it
153  * depends upon which 48MHz AHB clock with a 32KHz period the register write
154  * was on. We detect the timer is in the load state by checking the read-only
155  * count register and the START bit in the control register. If count register
156  * is 0 and the START bit is set then the timer has been started and is in the
157  * process of moving the preload register value into the count register.
158  */
timer_count(void)159 static inline uint32_t timer_count(void)
160 {
161 	uint32_t ccr = TIMER_REGS->CNT;
162 
163 	if ((ccr == 0) && (TIMER_REGS->CTRL & MCHP_RTMR_CTRL_START)) {
164 		ccr = cached_icr;
165 	}
166 
167 	return ccr;
168 }
169 
170 #ifdef CONFIG_TICKLESS_KERNEL
171 
172 static uint32_t last_announcement;	/* last time we called sys_clock_announce() */
173 
174 /*
175  * Request a timeout n Zephyr ticks in the future from now.
176  * Requested number of ticks in the future of n <= 1 means the kernel wants
177  * the tick announced as soon as possible, ideally no more than one tick
178  * in the future.
179  *
180  * Per comment below we don't clear RTMR pending interrupt.
181  * RTMR counter register is read-only and is loaded from the preload
182  * register by a 0->1 transition of the control register start bit.
183  * Writing a new value to preload only takes effect once the count
184  * register reaches 0.
185  */
sys_clock_set_timeout(int32_t n,bool idle)186 void sys_clock_set_timeout(int32_t n, bool idle)
187 {
188 	ARG_UNUSED(idle);
189 
190 	uint32_t ccr, temp;
191 	int   full_ticks;	/* number of complete ticks we'll wait */
192 	uint32_t full_cycles;	/* full_ticks represented as cycles */
193 	uint32_t partial_cycles;	/* number of cycles to first tick boundary */
194 
195 	if (idle && (n == K_TICKS_FOREVER)) {
196 		/*
197 		 * We are not in a locked section. Are writes to two
198 		 * global objects safe from pre-emption?
199 		 */
200 		TIMER_REGS->CTRL = 0U; /* stop timer */
201 		cached_icr = TIMER_STOPPED;
202 		return;
203 	}
204 
205 	if (n < 1) {
206 		full_ticks = 0;
207 	} else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) {
208 		full_ticks = MAX_TICKS - 1;
209 	} else {
210 		full_ticks = n - 1;
211 	}
212 
213 	full_cycles = full_ticks * CYCLES_PER_TICK;
214 
215 	k_spinlock_key_t key = k_spin_lock(&lock);
216 
217 	ccr = timer_count();
218 
219 	/* turn off to clear any pending interrupt status */
220 	TIMER_REGS->CTRL = 0u;
221 	girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS);
222 	NVIC_ClearPendingIRQ(TIMER_NVIC_NO);
223 
224 	temp = total_cycles;
225 	temp += (cached_icr - ccr);
226 	temp &= TIMER_COUNT_MASK;
227 	total_cycles = temp;
228 
229 	partial_cycles = CYCLES_PER_TICK - (total_cycles % CYCLES_PER_TICK);
230 	cached_icr = full_cycles + partial_cycles;
231 	/* adjust for up to one 32KHz cycle startup time */
232 	temp = cached_icr;
233 	if (temp > TIMER_ADJUST_LIMIT) {
234 		temp -= TIMER_ADJUST_CYCLES;
235 	}
236 
237 	timer_restart(temp);
238 
239 	k_spin_unlock(&lock, key);
240 }
241 
242 /*
243  * Return the number of Zephyr ticks elapsed from last call to
244  * sys_clock_announce in the ISR. The caller casts uint32_t to int32_t.
245  * We must make sure bit[31] is 0 in the return value.
246  */
sys_clock_elapsed(void)247 uint32_t sys_clock_elapsed(void)
248 {
249 	uint32_t ccr;
250 	uint32_t ticks;
251 	int32_t elapsed;
252 
253 	k_spinlock_key_t key = k_spin_lock(&lock);
254 
255 	ccr = timer_count();
256 
257 	/* It may not look efficient but the compiler does a good job */
258 	elapsed = (int32_t)total_cycles - (int32_t)last_announcement;
259 	if (elapsed < 0) {
260 		elapsed = -1 * elapsed;
261 	}
262 	ticks = (uint32_t)elapsed;
263 	ticks += cached_icr - ccr;
264 	ticks /= CYCLES_PER_TICK;
265 	ticks &= TIMER_COUNT_MASK;
266 
267 	k_spin_unlock(&lock, key);
268 
269 	return ticks;
270 }
271 
xec_rtos_timer_isr(const void * arg)272 static void xec_rtos_timer_isr(const void *arg)
273 {
274 	ARG_UNUSED(arg);
275 
276 	uint32_t cycles;
277 	int32_t ticks;
278 
279 	k_spinlock_key_t key = k_spin_lock(&lock);
280 
281 	girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS);
282 
283 	/* Restart the timer as early as possible to minimize drift... */
284 	timer_restart(MAX_TICKS * CYCLES_PER_TICK);
285 
286 	cycles = cached_icr;
287 	cached_icr = MAX_TICKS * CYCLES_PER_TICK;
288 
289 	total_cycles += cycles;
290 	total_cycles &= TIMER_COUNT_MASK;
291 
292 	/* handle wrap by using (power of 2) - 1 mask */
293 	ticks = total_cycles - last_announcement;
294 	ticks &= TIMER_COUNT_MASK;
295 	ticks /= CYCLES_PER_TICK;
296 
297 	last_announcement = total_cycles;
298 
299 	k_spin_unlock(&lock, key);
300 	sys_clock_announce(ticks);
301 }
302 
303 #else
304 
305 /* Non-tickless kernel build. */
306 
xec_rtos_timer_isr(const void * arg)307 static void xec_rtos_timer_isr(const void *arg)
308 {
309 	ARG_UNUSED(arg);
310 
311 	k_spinlock_key_t key = k_spin_lock(&lock);
312 
313 	girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS);
314 
315 	/* Restart the timer as early as possible to minimize drift... */
316 	timer_restart(cached_icr);
317 
318 	uint32_t temp = total_cycles + CYCLES_PER_TICK;
319 
320 	total_cycles = temp & TIMER_COUNT_MASK;
321 	k_spin_unlock(&lock, key);
322 
323 	sys_clock_announce(1);
324 }
325 
sys_clock_elapsed(void)326 uint32_t sys_clock_elapsed(void)
327 {
328 	return 0U;
329 }
330 
331 #endif /* CONFIG_TICKLESS_KERNEL */
332 
333 /*
334  * Warning RTOS timer resolution is 30.5 us.
335  * This is called by two code paths:
336  * 1. Kernel call to k_cycle_get_32() -> arch_k_cycle_get_32() -> here.
337  *    The kernel is casting return to (int) and using it uncasted in math
338  *    expressions with int types. Expression result is stored in an int.
339  * 2. If CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT is not defined then
340  *    z_impl_k_busy_wait calls here. This code path uses the value as uint32_t.
341  *
342  */
sys_clock_cycle_get_32(void)343 uint32_t sys_clock_cycle_get_32(void)
344 {
345 	uint32_t ret;
346 	uint32_t ccr;
347 
348 	k_spinlock_key_t key = k_spin_lock(&lock);
349 
350 	ccr = timer_count();
351 	ret = (total_cycles + (cached_icr - ccr)) & TIMER_COUNT_MASK;
352 
353 	k_spin_unlock(&lock, key);
354 
355 	return ret;
356 }
357 
sys_clock_idle_exit(void)358 void sys_clock_idle_exit(void)
359 {
360 	if (cached_icr == TIMER_STOPPED) {
361 		cached_icr = CYCLES_PER_TICK;
362 		timer_restart(cached_icr);
363 	}
364 }
365 
sys_clock_disable(void)366 void sys_clock_disable(void)
367 {
368 	TIMER_REGS->CTRL = 0U;
369 }
370 
371 #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
372 
373 /*
374  * We implement custom busy wait using a MEC1501 basic timer running on
375  * the 48MHz clock domain. This code is here for future power management
376  * save/restore of the timer context.
377  */
378 
379 /*
380  * 32-bit basic timer 0 configured for 1MHz count up, auto-reload,
381  * and no interrupt generation.
382  */
arch_busy_wait(uint32_t usec_to_wait)383 void arch_busy_wait(uint32_t usec_to_wait)
384 {
385 	if (usec_to_wait == 0) {
386 		return;
387 	}
388 
389 	uint32_t start = BTMR32_0_REGS->CNT;
390 
391 	for (;;) {
392 		uint32_t curr = BTMR32_0_REGS->CNT;
393 
394 		if ((curr - start) >= usec_to_wait) {
395 			break;
396 		}
397 	}
398 }
399 #endif
400 
sys_clock_driver_init(void)401 static int sys_clock_driver_init(void)
402 {
403 
404 #ifdef CONFIG_TICKLESS_KERNEL
405 	cached_icr = MAX_TICKS;
406 #endif
407 
408 	TIMER_REGS->CTRL = 0u;
409 	girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS);
410 	girq_src_dis(TIMER_GIRQ, TIMER_GIRQ_POS);
411 	NVIC_ClearPendingIRQ(TIMER_NVIC_NO);
412 
413 	IRQ_CONNECT(TIMER_NVIC_NO, TIMER_NVIC_PRIO, xec_rtos_timer_isr, 0, 0);
414 	irq_enable(TIMER_NVIC_NO);
415 	girq_src_en(TIMER_GIRQ, TIMER_GIRQ_POS);
416 
417 #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
418 	uint32_t btmr_ctrl = (MCHP_BTMR_CTRL_ENABLE
419 			      | MCHP_BTMR_CTRL_AUTO_RESTART
420 			      | MCHP_BTMR_CTRL_COUNT_UP
421 			      | (47UL << MCHP_BTMR_CTRL_PRESCALE_POS));
422 
423 #if CONFIG_SOC_SERIES_MEC1501X
424 	mchp_pcr_periph_slp_ctrl(PCR_B32TMR0, 0);
425 #else
426 	PCR_XEC_REGS->SLP_EN[BTMR32_0_PCR_REG_IDX] &= ~BIT(BTMR32_0_PCR_BITPOS);
427 #endif
428 	BTMR32_0_REGS->CTRL = MCHP_BTMR_CTRL_SOFT_RESET;
429 	BTMR32_0_REGS->CTRL = btmr_ctrl;
430 	BTMR32_0_REGS->PRLD = UINT32_MAX;
431 	btmr_ctrl |= MCHP_BTMR_CTRL_START;
432 
433 	timer_restart(cached_icr);
434 	/* wait for RTOS timer to load count register from preload */
435 	while (TIMER_REGS->CNT == 0) {
436 		;
437 	}
438 
439 	BTMR32_0_REGS->CTRL = btmr_ctrl;
440 #else
441 	timer_restart(cached_icr);
442 #endif
443 
444 	return 0;
445 }
446 
447 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
448 	 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
449