1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/kernel_structs.h>
10 #include <zephyr/init.h>
11 #include <string.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/pm/device_runtime.h>
15 #include <zephyr/pm/pm.h>
16 #include <zephyr/pm/state.h>
17 #include <zephyr/pm/policy.h>
18 #include <zephyr/tracing/tracing.h>
19
20 #include "pm_stats.h"
21
22 #include <zephyr/logging/log.h>
23 LOG_MODULE_REGISTER(pm, CONFIG_PM_LOG_LEVEL);
24
25 #define CURRENT_CPU \
26 (COND_CODE_1(CONFIG_SMP, (arch_curr_cpu()->id), (_current_cpu->id)))
27
28 static ATOMIC_DEFINE(z_post_ops_required, CONFIG_MP_MAX_NUM_CPUS);
29 static sys_slist_t pm_notifiers = SYS_SLIST_STATIC_INIT(&pm_notifiers);
30
31 /*
32 * Properly initialize cpu power states. Do not make assumptions that
33 * ACTIVE_STATE is 0
34 */
35 #define CPU_PM_STATE_INIT(_, __) \
36 { .state = PM_STATE_ACTIVE }
37 static struct pm_state_info z_cpus_pm_state[] = {
38 LISTIFY(CONFIG_MP_MAX_NUM_CPUS, CPU_PM_STATE_INIT, (,))
39 };
40
41 static struct pm_state_info z_cpus_pm_forced_state[] = {
42 LISTIFY(CONFIG_MP_MAX_NUM_CPUS, CPU_PM_STATE_INIT, (,))
43 };
44
45 static struct k_spinlock pm_forced_state_lock;
46 static struct k_spinlock pm_notifier_lock;
47
48 #ifdef CONFIG_PM_DEVICE
49 TYPE_SECTION_START_EXTERN(const struct device *, pm_device_slots);
50
51 #if !defined(CONFIG_PM_DEVICE_RUNTIME_EXCLUSIVE)
52 /* Number of devices successfully suspended. */
53 static size_t num_susp;
54
pm_suspend_devices(void)55 static int pm_suspend_devices(void)
56 {
57 const struct device *devs;
58 size_t devc;
59
60 devc = z_device_get_all_static(&devs);
61
62 num_susp = 0;
63
64 for (const struct device *dev = devs + devc - 1; dev >= devs; dev--) {
65 int ret;
66
67 /*
68 * Ignore uninitialized devices, busy devices, wake up sources, and
69 * devices with runtime PM enabled.
70 */
71 if (!device_is_ready(dev) || pm_device_is_busy(dev) ||
72 pm_device_state_is_locked(dev) ||
73 pm_device_wakeup_is_enabled(dev) ||
74 pm_device_runtime_is_enabled(dev)) {
75 continue;
76 }
77
78 ret = pm_device_action_run(dev, PM_DEVICE_ACTION_SUSPEND);
79 /* ignore devices not supporting or already at the given state */
80 if ((ret == -ENOSYS) || (ret == -ENOTSUP) || (ret == -EALREADY)) {
81 continue;
82 } else if (ret < 0) {
83 LOG_ERR("Device %s did not enter %s state (%d)",
84 dev->name,
85 pm_device_state_str(PM_DEVICE_STATE_SUSPENDED),
86 ret);
87 return ret;
88 }
89
90 TYPE_SECTION_START(pm_device_slots)[num_susp] = dev;
91 num_susp++;
92 }
93
94 return 0;
95 }
96
pm_resume_devices(void)97 static void pm_resume_devices(void)
98 {
99 for (int i = (num_susp - 1); i >= 0; i--) {
100 pm_device_action_run(TYPE_SECTION_START(pm_device_slots)[i],
101 PM_DEVICE_ACTION_RESUME);
102 }
103
104 num_susp = 0;
105 }
106 #endif /* !CONFIG_PM_DEVICE_RUNTIME_EXCLUSIVE */
107 #endif /* CONFIG_PM_DEVICE */
108
109 /*
110 * Function called to notify when the system is entering / exiting a
111 * power state
112 */
pm_state_notify(bool entering_state)113 static inline void pm_state_notify(bool entering_state)
114 {
115 struct pm_notifier *notifier;
116 k_spinlock_key_t pm_notifier_key;
117 void (*callback)(enum pm_state state);
118
119 pm_notifier_key = k_spin_lock(&pm_notifier_lock);
120 SYS_SLIST_FOR_EACH_CONTAINER(&pm_notifiers, notifier, _node) {
121 if (entering_state) {
122 callback = notifier->state_entry;
123 } else {
124 callback = notifier->state_exit;
125 }
126
127 if (callback) {
128 callback(z_cpus_pm_state[_current_cpu->id].state);
129 }
130 }
131 k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
132 }
133
pm_system_resume(void)134 void pm_system_resume(void)
135 {
136 uint8_t id = CURRENT_CPU;
137
138 /*
139 * This notification is called from the ISR of the event
140 * that caused exit from kernel idling after PM operations.
141 *
142 * Some CPU low power states require enabling of interrupts
143 * atomically when entering those states. The wake up from
144 * such a state first executes code in the ISR of the interrupt
145 * that caused the wake. This hook will be called from the ISR.
146 * For such CPU LPS states, do post operations and restores here.
147 * The kernel scheduler will get control after the ISR finishes
148 * and it may schedule another thread.
149 */
150 if (atomic_test_and_clear_bit(z_post_ops_required, id)) {
151 pm_state_exit_post_ops(z_cpus_pm_state[id].state, z_cpus_pm_state[id].substate_id);
152 pm_state_notify(false);
153 z_cpus_pm_state[id] = (struct pm_state_info){PM_STATE_ACTIVE,
154 0, 0};
155 }
156 }
157
pm_state_force(uint8_t cpu,const struct pm_state_info * info)158 bool pm_state_force(uint8_t cpu, const struct pm_state_info *info)
159 {
160 k_spinlock_key_t key;
161
162 __ASSERT(info->state < PM_STATE_COUNT,
163 "Invalid power state %d!", info->state);
164
165 key = k_spin_lock(&pm_forced_state_lock);
166 z_cpus_pm_forced_state[cpu] = *info;
167 k_spin_unlock(&pm_forced_state_lock, key);
168
169 return true;
170 }
171
pm_system_suspend(int32_t ticks)172 bool pm_system_suspend(int32_t ticks)
173 {
174 uint8_t id = CURRENT_CPU;
175 k_spinlock_key_t key;
176
177 SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, ticks);
178
179 key = k_spin_lock(&pm_forced_state_lock);
180 if (z_cpus_pm_forced_state[id].state != PM_STATE_ACTIVE) {
181 z_cpus_pm_state[id] = z_cpus_pm_forced_state[id];
182 z_cpus_pm_forced_state[id].state = PM_STATE_ACTIVE;
183 } else {
184 const struct pm_state_info *info;
185
186 info = pm_policy_next_state(id, ticks);
187 if (info != NULL) {
188 z_cpus_pm_state[id] = *info;
189 }
190 }
191 k_spin_unlock(&pm_forced_state_lock, key);
192
193 if (z_cpus_pm_state[id].state == PM_STATE_ACTIVE) {
194 LOG_DBG("No PM operations done.");
195 SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
196 z_cpus_pm_state[id].state);
197 return false;
198 }
199
200 if (ticks != K_TICKS_FOREVER) {
201 /*
202 * We need to set the timer to interrupt a little bit early to
203 * accommodate the time required by the CPU to fully wake up.
204 */
205 sys_clock_set_timeout(ticks -
206 k_us_to_ticks_ceil32(
207 z_cpus_pm_state[id].exit_latency_us),
208 true);
209 }
210
211 #if defined(CONFIG_PM_DEVICE) && !defined(CONFIG_PM_DEVICE_RUNTIME_EXCLUSIVE)
212 if (atomic_sub(&_cpus_active, 1) == 1) {
213 if (z_cpus_pm_state[id].state != PM_STATE_RUNTIME_IDLE) {
214 if (pm_suspend_devices()) {
215 pm_resume_devices();
216 z_cpus_pm_state[id].state = PM_STATE_ACTIVE;
217 (void)atomic_add(&_cpus_active, 1);
218 SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
219 z_cpus_pm_state[id].state);
220 return false;
221 }
222 }
223 }
224 #endif
225 /*
226 * This function runs with interruptions locked but it is
227 * expected the SoC to unlock them in
228 * pm_state_exit_post_ops() when returning to active
229 * state. We don't want to be scheduled out yet, first we need
230 * to send a notification about leaving the idle state. So,
231 * we lock the scheduler here and unlock just after we have
232 * sent the notification in pm_system_resume().
233 */
234 k_sched_lock();
235 pm_stats_start();
236 /* Enter power state */
237 pm_state_notify(true);
238 atomic_set_bit(z_post_ops_required, id);
239 pm_state_set(z_cpus_pm_state[id].state, z_cpus_pm_state[id].substate_id);
240 pm_stats_stop();
241
242 /* Wake up sequence starts here */
243 #if defined(CONFIG_PM_DEVICE) && !defined(CONFIG_PM_DEVICE_RUNTIME_EXCLUSIVE)
244 if (atomic_add(&_cpus_active, 1) == 0) {
245 pm_resume_devices();
246 }
247 #endif
248 pm_stats_update(z_cpus_pm_state[id].state);
249 pm_system_resume();
250 k_sched_unlock();
251 SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
252 z_cpus_pm_state[id].state);
253
254 return true;
255 }
256
pm_notifier_register(struct pm_notifier * notifier)257 void pm_notifier_register(struct pm_notifier *notifier)
258 {
259 k_spinlock_key_t pm_notifier_key = k_spin_lock(&pm_notifier_lock);
260
261 sys_slist_append(&pm_notifiers, ¬ifier->_node);
262 k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
263 }
264
pm_notifier_unregister(struct pm_notifier * notifier)265 int pm_notifier_unregister(struct pm_notifier *notifier)
266 {
267 int ret = -EINVAL;
268 k_spinlock_key_t pm_notifier_key;
269
270 pm_notifier_key = k_spin_lock(&pm_notifier_lock);
271 if (sys_slist_find_and_remove(&pm_notifiers, &(notifier->_node))) {
272 ret = 0;
273 }
274 k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
275
276 return ret;
277 }
278
pm_state_next_get(uint8_t cpu)279 const struct pm_state_info *pm_state_next_get(uint8_t cpu)
280 {
281 return &z_cpus_pm_state[cpu];
282 }
283