1 /*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/toolchain.h>
9 #include <zephyr/linker/sections.h>
10 #include <zephyr/drivers/timer/system_timer.h>
11 #include <zephyr/llext/symbol.h>
12 #include <zephyr/pm/pm.h>
13 #include <stdbool.h>
14 #include <zephyr/logging/log.h>
15 /* private kernel APIs */
16 #include <ksched.h>
17 #include <kswap.h>
18 #include <wait_q.h>
19
20 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
21
idle(void * unused1,void * unused2,void * unused3)22 void idle(void *unused1, void *unused2, void *unused3)
23 {
24 ARG_UNUSED(unused1);
25 ARG_UNUSED(unused2);
26 ARG_UNUSED(unused3);
27
28 __ASSERT_NO_MSG(_current->base.prio >= 0);
29
30 while (true) {
31 /* SMP systems without a working IPI can't actual
32 * enter an idle state, because they can't be notified
33 * of scheduler changes (i.e. threads they should
34 * run). They just spin instead, with a minimal
35 * relaxation loop to prevent hammering the scheduler
36 * lock and/or timer driver. This is intended as a
37 * fallback configuration for new platform bringup.
38 */
39 if (IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
40 for (volatile int i = 0; i < 100000; i++) {
41 /* Empty loop */
42 }
43 z_swap_unlocked();
44 }
45
46 /* Note weird API: k_cpu_idle() is called with local
47 * CPU interrupts masked, and returns with them
48 * unmasked. It does not take a spinlock or other
49 * higher level construct.
50 */
51 (void) arch_irq_lock();
52
53 #ifdef CONFIG_PM
54 _kernel.idle = z_get_next_timeout_expiry();
55
56 /*
57 * Call the suspend hook function of the soc interface
58 * to allow entry into a low power state. The function
59 * returns false if low power state was not entered, in
60 * which case, kernel does normal idle processing.
61 *
62 * This function is entered with interrupts disabled.
63 * If a low power state was entered, then the hook
64 * function should enable interrupts before exiting.
65 * This is because the kernel does not do its own idle
66 * processing in those cases i.e. skips k_cpu_idle().
67 * The kernel's idle processing re-enables interrupts
68 * which is essential for the kernel's scheduling
69 * logic.
70 */
71 if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) {
72 k_cpu_idle();
73 }
74 #else
75 k_cpu_idle();
76 #endif /* CONFIG_PM */
77
78 #if !defined(CONFIG_PREEMPT_ENABLED)
79 # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
80 /* A legacy mess: the idle thread is by definition
81 * preemptible as far as the modern scheduler is
82 * concerned, but older platforms use
83 * CONFIG_PREEMPT_ENABLED=n as an optimization hint
84 * that interrupt exit always returns to the
85 * interrupted context. So in that setup we need to
86 * explicitly yield in the idle thread otherwise
87 * nothing else will run once it starts.
88 */
89 if (_kernel.ready_q.cache != _current) {
90 z_swap_unlocked();
91 }
92 # endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */
93 #endif /* !defined(CONFIG_PREEMPT_ENABLED) */
94 }
95 }
96
arch_spin_relax(void)97 void __weak arch_spin_relax(void)
98 {
99 __ASSERT(!arch_irq_unlocked(arch_irq_lock()),
100 "this is meant to be called with IRQs disabled");
101
102 arch_nop();
103 }
104 EXPORT_SYMBOL(arch_spin_relax);
105