1 /*
2  * Copyright (c) 2011-2015 Wind River Systems, Inc.
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <zephyr/kernel.h>
7 #include <zephyr/tracing/tracing.h>
8 #include <zephyr/arch/cpu.h>
9 
10 __pinned_func
arch_cpu_idle(void)11 void arch_cpu_idle(void)
12 {
13 	sys_trace_idle();
14 	__asm__ volatile (
15 	    "sti\n\t"
16 	    "hlt\n\t");
17 }
18 
19 __pinned_func
arch_cpu_atomic_idle(unsigned int key)20 void arch_cpu_atomic_idle(unsigned int key)
21 {
22 	sys_trace_idle();
23 
24 	__asm__ volatile (
25 	    "sti\n\t"
26 	    /*
27 	     * The following statement appears in "Intel 64 and IA-32
28 	     * Architectures Software Developer's Manual", regarding the 'sti'
29 	     * instruction:
30 	     *
31 	     * "After the IF flag is set, the processor begins responding to
32 	     *    external, maskable interrupts after the next instruction is
33 	     *    executed."
34 	     *
35 	     * Thus the IA-32 implementation of arch_cpu_atomic_idle() will
36 	     * atomically re-enable interrupts and enter a low-power mode.
37 	     */
38 	    "hlt\n\t");
39 
40 	/* restore interrupt lockout state before returning to caller */
41 	if ((key & 0x200U) == 0U) {
42 		__asm__ volatile("cli");
43 	}
44 }
45