1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/kernel_structs.h>
10 #include <zephyr/init.h>
11 #include <string.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/pm/device_runtime.h>
15 #include <zephyr/pm/pm.h>
16 #include <zephyr/pm/state.h>
17 #include <zephyr/pm/policy.h>
18 #include <zephyr/tracing/tracing.h>
19
20 #include "pm_stats.h"
21 #include "device_system_managed.h"
22
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(pm, CONFIG_PM_LOG_LEVEL);
25
26 static ATOMIC_DEFINE(z_post_ops_required, CONFIG_MP_MAX_NUM_CPUS);
27 static sys_slist_t pm_notifiers = SYS_SLIST_STATIC_INIT(&pm_notifiers);
28
29 /*
30 * Properly initialize cpu power states. Do not make assumptions that
31 * ACTIVE_STATE is 0
32 */
33 #define CPU_PM_STATE_INIT(_, __) \
34 { .state = PM_STATE_ACTIVE }
35 static struct pm_state_info z_cpus_pm_state[] = {
36 LISTIFY(CONFIG_MP_MAX_NUM_CPUS, CPU_PM_STATE_INIT, (,))
37 };
38
39 static struct pm_state_info z_cpus_pm_forced_state[] = {
40 LISTIFY(CONFIG_MP_MAX_NUM_CPUS, CPU_PM_STATE_INIT, (,))
41 };
42
43 static struct k_spinlock pm_forced_state_lock;
44 static struct k_spinlock pm_notifier_lock;
45
46 /*
47 * Function called to notify when the system is entering / exiting a
48 * power state
49 */
pm_state_notify(bool entering_state)50 static inline void pm_state_notify(bool entering_state)
51 {
52 struct pm_notifier *notifier;
53 k_spinlock_key_t pm_notifier_key;
54 void (*callback)(enum pm_state state);
55
56 pm_notifier_key = k_spin_lock(&pm_notifier_lock);
57 SYS_SLIST_FOR_EACH_CONTAINER(&pm_notifiers, notifier, _node) {
58 if (entering_state) {
59 callback = notifier->state_entry;
60 } else {
61 callback = notifier->state_exit;
62 }
63
64 if (callback) {
65 callback(z_cpus_pm_state[_current_cpu->id].state);
66 }
67 }
68 k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
69 }
70
ticks_expiring_sooner(int32_t ticks1,int32_t ticks2)71 static inline int32_t ticks_expiring_sooner(int32_t ticks1, int32_t ticks2)
72 {
73 /*
74 * Ticks are relative numbers that defines the number of ticks
75 * until the next event.
76 * Its maximum value is K_TICKS_FOREVER ((uint32_t)-1) which is -1
77 * when we cast it to (int32_t)
78 * We need to find out which one is the closest
79 */
80
81 __ASSERT(ticks1 >= -1, "ticks1 has unexpected negative value");
82 __ASSERT(ticks2 >= -1, "ticks2 has unexpected negative value");
83
84 if (ticks1 == K_TICKS_FOREVER) {
85 return ticks2;
86 }
87 if (ticks2 == K_TICKS_FOREVER) {
88 return ticks1;
89 }
90 /* At this step ticks1 and ticks2 are positive */
91 return MIN(ticks1, ticks2);
92 }
93
pm_system_resume(void)94 void pm_system_resume(void)
95 {
96 uint8_t id = _current_cpu->id;
97
98 /*
99 * This notification is called from the ISR of the event
100 * that caused exit from kernel idling after PM operations.
101 *
102 * Some CPU low power states require enabling of interrupts
103 * atomically when entering those states. The wake up from
104 * such a state first executes code in the ISR of the interrupt
105 * that caused the wake. This hook will be called from the ISR.
106 * For such CPU LPS states, do post operations and restores here.
107 * The kernel scheduler will get control after the ISR finishes
108 * and it may schedule another thread.
109 */
110 if (atomic_test_and_clear_bit(z_post_ops_required, id)) {
111 #ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED
112 if (atomic_add(&_cpus_active, 1) == 0) {
113 if ((z_cpus_pm_state[id].state != PM_STATE_RUNTIME_IDLE) &&
114 !z_cpus_pm_state[id].pm_device_disabled) {
115 pm_resume_devices();
116 }
117 }
118 #endif
119 pm_state_exit_post_ops(z_cpus_pm_state[id].state, z_cpus_pm_state[id].substate_id);
120 pm_state_notify(false);
121 #ifdef CONFIG_SYS_CLOCK_EXISTS
122 sys_clock_idle_exit();
123 #endif /* CONFIG_SYS_CLOCK_EXISTS */
124 z_cpus_pm_state[id] = (struct pm_state_info){PM_STATE_ACTIVE, 0, false,
125 0, 0};
126 }
127 }
128
pm_state_force(uint8_t cpu,const struct pm_state_info * info)129 bool pm_state_force(uint8_t cpu, const struct pm_state_info *info)
130 {
131 k_spinlock_key_t key;
132
133 __ASSERT(info->state < PM_STATE_COUNT,
134 "Invalid power state %d!", info->state);
135
136 key = k_spin_lock(&pm_forced_state_lock);
137 z_cpus_pm_forced_state[cpu] = *info;
138 k_spin_unlock(&pm_forced_state_lock, key);
139
140 return true;
141 }
142
pm_system_suspend(int32_t kernel_ticks)143 bool pm_system_suspend(int32_t kernel_ticks)
144 {
145 uint8_t id = _current_cpu->id;
146 k_spinlock_key_t key;
147 int32_t ticks, events_ticks;
148
149 SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, kernel_ticks);
150
151 /*
152 * CPU needs to be fully wake up before the event is triggered.
153 * We need to find out first the ticks to the next event
154 */
155 events_ticks = pm_policy_next_event_ticks();
156 ticks = ticks_expiring_sooner(kernel_ticks, events_ticks);
157
158 key = k_spin_lock(&pm_forced_state_lock);
159 if (z_cpus_pm_forced_state[id].state != PM_STATE_ACTIVE) {
160 z_cpus_pm_state[id] = z_cpus_pm_forced_state[id];
161 z_cpus_pm_forced_state[id].state = PM_STATE_ACTIVE;
162 } else {
163 const struct pm_state_info *info;
164
165 info = pm_policy_next_state(id, ticks);
166 if (info != NULL) {
167 z_cpus_pm_state[id] = *info;
168 } else {
169 z_cpus_pm_state[id].state = PM_STATE_ACTIVE;
170 }
171 }
172 k_spin_unlock(&pm_forced_state_lock, key);
173
174 if (z_cpus_pm_state[id].state == PM_STATE_ACTIVE) {
175 LOG_DBG("No PM operations done.");
176 SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
177 z_cpus_pm_state[id].state);
178 return false;
179 }
180
181 #ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED
182 if (atomic_sub(&_cpus_active, 1) == 1) {
183 if ((z_cpus_pm_state[id].state != PM_STATE_RUNTIME_IDLE) &&
184 !z_cpus_pm_state[id].pm_device_disabled) {
185 if (!pm_suspend_devices()) {
186 pm_resume_devices();
187 z_cpus_pm_state[id].state = PM_STATE_ACTIVE;
188 (void)atomic_add(&_cpus_active, 1);
189 SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
190 z_cpus_pm_state[id].state);
191 return false;
192 }
193 }
194 }
195 #endif
196
197 if ((z_cpus_pm_state[id].exit_latency_us != 0) &&
198 (ticks != K_TICKS_FOREVER)) {
199 /*
200 * We need to set the timer to interrupt a little bit early to
201 * accommodate the time required by the CPU to fully wake up.
202 */
203 sys_clock_set_timeout(ticks -
204 k_us_to_ticks_ceil32(
205 z_cpus_pm_state[id].exit_latency_us),
206 true);
207 }
208
209 /*
210 * This function runs with interruptions locked but it is
211 * expected the SoC to unlock them in
212 * pm_state_exit_post_ops() when returning to active
213 * state. We don't want to be scheduled out yet, first we need
214 * to send a notification about leaving the idle state. So,
215 * we lock the scheduler here and unlock just after we have
216 * sent the notification in pm_system_resume().
217 */
218 k_sched_lock();
219 pm_stats_start();
220 /* Enter power state */
221 pm_state_notify(true);
222 atomic_set_bit(z_post_ops_required, id);
223 pm_state_set(z_cpus_pm_state[id].state, z_cpus_pm_state[id].substate_id);
224 pm_stats_stop();
225
226 /* Wake up sequence starts here */
227 pm_stats_update(z_cpus_pm_state[id].state);
228 pm_system_resume();
229 k_sched_unlock();
230 SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
231 z_cpus_pm_state[id].state);
232
233 return true;
234 }
235
pm_notifier_register(struct pm_notifier * notifier)236 void pm_notifier_register(struct pm_notifier *notifier)
237 {
238 k_spinlock_key_t pm_notifier_key = k_spin_lock(&pm_notifier_lock);
239
240 sys_slist_append(&pm_notifiers, ¬ifier->_node);
241 k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
242 }
243
pm_notifier_unregister(struct pm_notifier * notifier)244 int pm_notifier_unregister(struct pm_notifier *notifier)
245 {
246 int ret = -EINVAL;
247 k_spinlock_key_t pm_notifier_key;
248
249 pm_notifier_key = k_spin_lock(&pm_notifier_lock);
250 if (sys_slist_find_and_remove(&pm_notifiers, &(notifier->_node))) {
251 ret = 0;
252 }
253 k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
254
255 return ret;
256 }
257
pm_state_next_get(uint8_t cpu)258 const struct pm_state_info *pm_state_next_get(uint8_t cpu)
259 {
260 return &z_cpus_pm_state[cpu];
261 }
262
z_pm_save_idle_exit(void)263 void z_pm_save_idle_exit(void)
264 {
265 /* Some CPU low power states require notification at the ISR
266 * to allow any operations that needs to be done before kernel
267 * switches task or processes nested interrupts.
268 * This can be simply ignored if not required.
269 */
270 pm_system_resume();
271 }
272