1 /*
2  * Copyright (c) 2011-2015 Wind River Systems, Inc.
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <zephyr/kernel.h>
7 #include <zephyr/tracing/tracing.h>
8 #include <zephyr/arch/cpu.h>
9 
10 #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
11 __pinned_func
arch_cpu_idle(void)12 void arch_cpu_idle(void)
13 {
14 	sys_trace_idle();
15 	__asm__ volatile (
16 	    "sti\n\t"
17 	    "hlt\n\t");
18 }
19 #endif
20 
21 #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
22 __pinned_func
arch_cpu_atomic_idle(unsigned int key)23 void arch_cpu_atomic_idle(unsigned int key)
24 {
25 	sys_trace_idle();
26 
27 	__asm__ volatile (
28 	    "sti\n\t"
29 	    /*
30 	     * The following statement appears in "Intel 64 and IA-32
31 	     * Architectures Software Developer's Manual", regarding the 'sti'
32 	     * instruction:
33 	     *
34 	     * "After the IF flag is set, the processor begins responding to
35 	     *    external, maskable interrupts after the next instruction is
36 	     *    executed."
37 	     *
38 	     * Thus the IA-32 implementation of arch_cpu_atomic_idle() will
39 	     * atomically re-enable interrupts and enter a low-power mode.
40 	     */
41 	    "hlt\n\t");
42 
43 	/* restore interrupt lockout state before returning to caller */
44 	if ((key & 0x200U) == 0U) {
45 		__asm__ volatile("cli");
46 	}
47 }
48 #endif
49