1 /*
2  * Copyright (c) 2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <kernel.h>
8 #include <toolchain.h>
9 #include <linker/sections.h>
10 #include <drivers/timer/system_timer.h>
11 #include <wait_q.h>
12 #include <pm/pm.h>
13 #include <stdbool.h>
14 #include <logging/log.h>
15 #include <ksched.h>
16 #include <kswap.h>
17 
18 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
19 
20 /**
21  * @brief Indicate that kernel is idling in tickless mode
22  *
23  * Sets the kernel data structure idle field to either a positive value or
24  * K_FOREVER.
25  */
pm_save_idle(void)26 static void pm_save_idle(void)
27 {
28 #ifdef CONFIG_PM
29 	int32_t ticks = z_get_next_timeout_expiry();
30 	_kernel.idle = ticks;
31 
32 	/*
33 	 * Call the suspend hook function of the soc interface to allow
34 	 * entry into a low power state. The function returns
35 	 * PM_STATE_ACTIVE if low power state was not entered, in which
36 	 * case, kernel does normal idle processing.
37 	 *
38 	 * This function is entered with interrupts disabled. If a low power
39 	 * state was entered, then the hook function should enable inerrupts
40 	 * before exiting. This is because the kernel does not do its own idle
41 	 * processing in those cases i.e. skips k_cpu_idle(). The kernel's
42 	 * idle processing re-enables interrupts which is essential for
43 	 * the kernel's scheduling logic.
44 	 */
45 	if (pm_system_suspend(ticks) == PM_STATE_ACTIVE) {
46 		k_cpu_idle();
47 	}
48 #endif
49 }
50 
z_pm_save_idle_exit(int32_t ticks)51 void z_pm_save_idle_exit(int32_t ticks)
52 {
53 #ifdef CONFIG_PM
54 	/* Some CPU low power states require notification at the ISR
55 	 * to allow any operations that needs to be done before kernel
56 	 * switches task or processes nested interrupts.
57 	 * This can be simply ignored if not required.
58 	 */
59 	pm_system_resume();
60 #endif	/* CONFIG_PM */
61 	sys_clock_idle_exit();
62 }
63 
idle(void * unused1,void * unused2,void * unused3)64 void idle(void *unused1, void *unused2, void *unused3)
65 {
66 	ARG_UNUSED(unused1);
67 	ARG_UNUSED(unused2);
68 	ARG_UNUSED(unused3);
69 
70 	__ASSERT_NO_MSG(_current->base.prio >= 0);
71 
72 	while (true) {
73 		/* SMP systems without a working IPI can't
74 		 * actual enter an idle state, because they
75 		 * can't be notified of scheduler changes
76 		 * (i.e. threads they should run).  They just
77 		 * spin in a yield loop.  This is intended as
78 		 * a fallback configuration for new platform
79 		 * bringup.
80 		 */
81 		if (IS_ENABLED(CONFIG_SMP) &&
82 		    !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
83 			k_busy_wait(100);
84 			k_yield();
85 			continue;
86 		}
87 
88 		/* Note weird API: k_cpu_idle() is called with local
89 		 * CPU interrupts masked, and returns with them
90 		 * unmasked.  It does not take a spinlock or other
91 		 * higher level construct.
92 		 */
93 		(void) arch_irq_lock();
94 
95 		if (IS_ENABLED(CONFIG_PM)) {
96 			pm_save_idle();
97 		} else {
98 			k_cpu_idle();
99 		}
100 
101 #if !defined(CONFIG_PREEMPT_ENABLED)
102 # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
103 		/* A legacy mess: the idle thread is by definition
104 		 * preemptible as far as the modern scheduler is
105 		 * concerned, but older platforms use
106 		 * CONFIG_PREEMPT_ENABLED=n as an optimization hint
107 		 * that interrupt exit always returns to the
108 		 * interrupted context.  So in that setup we need to
109 		 * explicitly yield in the idle thread otherwise
110 		 * nothing else will run once it starts.
111 		 */
112 		if (_kernel.ready_q.cache != _current) {
113 			z_swap_unlocked();
114 		}
115 # endif
116 #endif
117 	}
118 }
119