1 /*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/toolchain.h>
9 #include <zephyr/linker/sections.h>
10 #include <zephyr/drivers/timer/system_timer.h>
11 #include <zephyr/pm/pm.h>
12 #include <stdbool.h>
13 #include <zephyr/logging/log.h>
14 /* private kernel APIs */
15 #include <ksched.h>
16 #include <kswap.h>
17 #include <wait_q.h>
18
19 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
20
idle(void * unused1,void * unused2,void * unused3)21 void idle(void *unused1, void *unused2, void *unused3)
22 {
23 ARG_UNUSED(unused1);
24 ARG_UNUSED(unused2);
25 ARG_UNUSED(unused3);
26
27 __ASSERT_NO_MSG(arch_current_thread()->base.prio >= 0);
28
29 while (true) {
30 /* SMP systems without a working IPI can't actual
31 * enter an idle state, because they can't be notified
32 * of scheduler changes (i.e. threads they should
33 * run). They just spin instead, with a minimal
34 * relaxation loop to prevent hammering the scheduler
35 * lock and/or timer driver. This is intended as a
36 * fallback configuration for new platform bringup.
37 */
38 if (IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
39 for (volatile int i = 0; i < 100000; i++) {
40 /* Empty loop */
41 }
42 z_swap_unlocked();
43 }
44
45 /* Note weird API: k_cpu_idle() is called with local
46 * CPU interrupts masked, and returns with them
47 * unmasked. It does not take a spinlock or other
48 * higher level construct.
49 */
50 (void) arch_irq_lock();
51
52 #ifdef CONFIG_PM
53 _kernel.idle = z_get_next_timeout_expiry();
54
55 /*
56 * Call the suspend hook function of the soc interface
57 * to allow entry into a low power state. The function
58 * returns false if low power state was not entered, in
59 * which case, kernel does normal idle processing.
60 *
61 * This function is entered with interrupts disabled.
62 * If a low power state was entered, then the hook
63 * function should enable interrupts before exiting.
64 * This is because the kernel does not do its own idle
65 * processing in those cases i.e. skips k_cpu_idle().
66 * The kernel's idle processing re-enables interrupts
67 * which is essential for the kernel's scheduling
68 * logic.
69 */
70 if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) {
71 k_cpu_idle();
72 }
73 #else
74 k_cpu_idle();
75 #endif /* CONFIG_PM */
76
77 #if !defined(CONFIG_PREEMPT_ENABLED)
78 # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
79 /* A legacy mess: the idle thread is by definition
80 * preemptible as far as the modern scheduler is
81 * concerned, but older platforms use
82 * CONFIG_PREEMPT_ENABLED=n as an optimization hint
83 * that interrupt exit always returns to the
84 * interrupted context. So in that setup we need to
85 * explicitly yield in the idle thread otherwise
86 * nothing else will run once it starts.
87 */
88 if (_kernel.ready_q.cache != arch_current_thread()) {
89 z_swap_unlocked();
90 }
91 # endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */
92 #endif /* !defined(CONFIG_PREEMPT_ENABLED) */
93 }
94 }
95
arch_spin_relax(void)96 void __weak arch_spin_relax(void)
97 {
98 __ASSERT(!arch_irq_unlocked(arch_irq_lock()),
99 "this is meant to be called with IRQs disabled");
100
101 arch_nop();
102 }
103