1 /*
2  * Copyright (c) 2017 Jean-Paul Etienne <fractalclone@gmail.com>
3  * Contributors: 2018 Antmicro <www.antmicro.com>
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/toolchain.h>
9 #include <zephyr/irq.h>
10 #include <zephyr/arch/cpu.h>
11 
12 #include <zephyr/tracing/tracing.h>
13 
riscv_idle(unsigned int key)14 static ALWAYS_INLINE void riscv_idle(unsigned int key)
15 {
16 	sys_trace_idle();
17 	/* unlock interrupts */
18 	irq_unlock(key);
19 
20 	/* Wait for interrupt */
21 	__asm__ volatile("wfi");
22 }
23 
24 /**
25  * @brief Power save idle routine
26  *
27  * This function will be called by the kernel idle loop or possibly within
28  * an implementation of _pm_save_idle in the kernel when the
29  * '_pm_save_flag' variable is non-zero.
30  */
arch_cpu_idle(void)31 void arch_cpu_idle(void)
32 {
33 	riscv_idle(MSTATUS_IEN);
34 }
35 
36 /**
37  * @brief Atomically re-enable interrupts and enter low power mode
38  *
39  * INTERNAL
40  * The requirements for arch_cpu_atomic_idle() are as follows:
41  * 1) The enablement of interrupts and entering a low-power mode needs to be
42  *    atomic, i.e. there should be no period of time where interrupts are
43  *    enabled before the processor enters a low-power mode.  See the comments
44  *    in k_lifo_get(), for example, of the race condition that
45  *    occurs if this requirement is not met.
46  *
47  * 2) After waking up from the low-power mode, the interrupt lockout state
48  *    must be restored as indicated in the 'imask' input parameter.
49  */
arch_cpu_atomic_idle(unsigned int key)50 void arch_cpu_atomic_idle(unsigned int key)
51 {
52 	riscv_idle(key);
53 }
54