1 /*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/toolchain.h>
9 #include <zephyr/linker/sections.h>
10 #include <zephyr/drivers/timer/system_timer.h>
11 #include <zephyr/pm/pm.h>
12 #include <stdbool.h>
13 #include <zephyr/logging/log.h>
14 /* private kernel APIs */
15 #include <ksched.h>
16 #include <kswap.h>
17 #include <wait_q.h>
18
19 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
20
z_pm_save_idle_exit(void)21 void z_pm_save_idle_exit(void)
22 {
23 #ifdef CONFIG_PM
24 /* Some CPU low power states require notification at the ISR
25 * to allow any operations that needs to be done before kernel
26 * switches task or processes nested interrupts.
27 * This can be simply ignored if not required.
28 */
29 pm_system_resume();
30 #endif /* CONFIG_PM */
31 #ifdef CONFIG_SYS_CLOCK_EXISTS
32 sys_clock_idle_exit();
33 #endif
34 }
35
idle(void * unused1,void * unused2,void * unused3)36 void idle(void *unused1, void *unused2, void *unused3)
37 {
38 ARG_UNUSED(unused1);
39 ARG_UNUSED(unused2);
40 ARG_UNUSED(unused3);
41
42 __ASSERT_NO_MSG(_current->base.prio >= 0);
43
44 while (true) {
45 /* SMP systems without a working IPI can't actual
46 * enter an idle state, because they can't be notified
47 * of scheduler changes (i.e. threads they should
48 * run). They just spin instead, with a minimal
49 * relaxation loop to prevent hammering the scheduler
50 * lock and/or timer driver. This is intended as a
51 * fallback configuration for new platform bringup.
52 */
53 if (IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
54 for (volatile int i = 0; i < 100000; i++) {
55 /* Empty loop */
56 }
57 z_swap_unlocked();
58 }
59
60 /* Note weird API: k_cpu_idle() is called with local
61 * CPU interrupts masked, and returns with them
62 * unmasked. It does not take a spinlock or other
63 * higher level construct.
64 */
65 (void) arch_irq_lock();
66
67 #ifdef CONFIG_PM
68 _kernel.idle = z_get_next_timeout_expiry();
69
70 /*
71 * Call the suspend hook function of the soc interface
72 * to allow entry into a low power state. The function
73 * returns false if low power state was not entered, in
74 * which case, kernel does normal idle processing.
75 *
76 * This function is entered with interrupts disabled.
77 * If a low power state was entered, then the hook
78 * function should enable inerrupts before exiting.
79 * This is because the kernel does not do its own idle
80 * processing in those cases i.e. skips k_cpu_idle().
81 * The kernel's idle processing re-enables interrupts
82 * which is essential for the kernel's scheduling
83 * logic.
84 */
85 if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) {
86 k_cpu_idle();
87 }
88 #else
89 k_cpu_idle();
90 #endif
91
92 #if !defined(CONFIG_PREEMPT_ENABLED)
93 # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
94 /* A legacy mess: the idle thread is by definition
95 * preemptible as far as the modern scheduler is
96 * concerned, but older platforms use
97 * CONFIG_PREEMPT_ENABLED=n as an optimization hint
98 * that interrupt exit always returns to the
99 * interrupted context. So in that setup we need to
100 * explicitly yield in the idle thread otherwise
101 * nothing else will run once it starts.
102 */
103 if (_kernel.ready_q.cache != _current) {
104 z_swap_unlocked();
105 }
106 # endif
107 #endif
108 }
109 }
110
arch_spin_relax(void)111 void __weak arch_spin_relax(void)
112 {
113 __ASSERT(!arch_irq_unlocked(arch_irq_lock()),
114 "this is meant to be called with IRQs disabled");
115
116 arch_nop();
117 }
118