1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/device.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/kernel_structs.h>
10 #include <zephyr/init.h>
11 #include <string.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/pm/device_runtime.h>
15 #include <zephyr/pm/pm.h>
16 #include <zephyr/pm/state.h>
17 #include <zephyr/pm/policy.h>
18 #include <zephyr/tracing/tracing.h>
19 
20 #include "pm_stats.h"
21 #include "device_system_managed.h"
22 
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(pm, CONFIG_PM_LOG_LEVEL);
25 
26 static ATOMIC_DEFINE(z_post_ops_required, CONFIG_MP_MAX_NUM_CPUS);
27 static sys_slist_t pm_notifiers = SYS_SLIST_STATIC_INIT(&pm_notifiers);
28 
29 /*
30  * Properly initialize cpu power states. Do not make assumptions that
31  * ACTIVE_STATE is 0
32  */
33 #define CPU_PM_STATE_INIT(_, __)		\
34 	{ .state = PM_STATE_ACTIVE }
35 static struct pm_state_info z_cpus_pm_state[] = {
36 	LISTIFY(CONFIG_MP_MAX_NUM_CPUS, CPU_PM_STATE_INIT, (,))
37 };
38 
39 static struct pm_state_info z_cpus_pm_forced_state[] = {
40 	LISTIFY(CONFIG_MP_MAX_NUM_CPUS, CPU_PM_STATE_INIT, (,))
41 };
42 
43 static struct k_spinlock pm_forced_state_lock;
44 static struct k_spinlock pm_notifier_lock;
45 
46 /*
47  * Function called to notify when the system is entering / exiting a
48  * power state
49  */
pm_state_notify(bool entering_state)50 static inline void pm_state_notify(bool entering_state)
51 {
52 	struct pm_notifier *notifier;
53 	k_spinlock_key_t pm_notifier_key;
54 	void (*callback)(enum pm_state state);
55 
56 	pm_notifier_key = k_spin_lock(&pm_notifier_lock);
57 	SYS_SLIST_FOR_EACH_CONTAINER(&pm_notifiers, notifier, _node) {
58 		if (entering_state) {
59 			callback = notifier->state_entry;
60 		} else {
61 			callback = notifier->state_exit;
62 		}
63 
64 		if (callback) {
65 			callback(z_cpus_pm_state[_current_cpu->id].state);
66 		}
67 	}
68 	k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
69 }
70 
pm_system_resume(void)71 void pm_system_resume(void)
72 {
73 	uint8_t id = _current_cpu->id;
74 
75 	/*
76 	 * This notification is called from the ISR of the event
77 	 * that caused exit from kernel idling after PM operations.
78 	 *
79 	 * Some CPU low power states require enabling of interrupts
80 	 * atomically when entering those states. The wake up from
81 	 * such a state first executes code in the ISR of the interrupt
82 	 * that caused the wake. This hook will be called from the ISR.
83 	 * For such CPU LPS states, do post operations and restores here.
84 	 * The kernel scheduler will get control after the ISR finishes
85 	 * and it may schedule another thread.
86 	 */
87 	if (atomic_test_and_clear_bit(z_post_ops_required, id)) {
88 #ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED
89 		if (atomic_add(&_cpus_active, 1) == 0) {
90 			if ((z_cpus_pm_state[id].state != PM_STATE_RUNTIME_IDLE) &&
91 					!z_cpus_pm_state[id].pm_device_disabled) {
92 				pm_resume_devices();
93 			}
94 		}
95 #endif
96 		pm_state_exit_post_ops(z_cpus_pm_state[id].state, z_cpus_pm_state[id].substate_id);
97 		pm_state_notify(false);
98 #ifdef CONFIG_SYS_CLOCK_EXISTS
99 		sys_clock_idle_exit();
100 #endif /* CONFIG_SYS_CLOCK_EXISTS */
101 		z_cpus_pm_state[id] = (struct pm_state_info){PM_STATE_ACTIVE,
102 			0, 0};
103 	}
104 }
105 
pm_state_force(uint8_t cpu,const struct pm_state_info * info)106 bool pm_state_force(uint8_t cpu, const struct pm_state_info *info)
107 {
108 	k_spinlock_key_t key;
109 
110 	__ASSERT(info->state < PM_STATE_COUNT,
111 		 "Invalid power state %d!", info->state);
112 
113 	key = k_spin_lock(&pm_forced_state_lock);
114 	z_cpus_pm_forced_state[cpu] = *info;
115 	k_spin_unlock(&pm_forced_state_lock, key);
116 
117 	return true;
118 }
119 
pm_system_suspend(int32_t ticks)120 bool pm_system_suspend(int32_t ticks)
121 {
122 	uint8_t id = _current_cpu->id;
123 	k_spinlock_key_t key;
124 
125 	SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, ticks);
126 
127 	key = k_spin_lock(&pm_forced_state_lock);
128 	if (z_cpus_pm_forced_state[id].state != PM_STATE_ACTIVE) {
129 		z_cpus_pm_state[id] = z_cpus_pm_forced_state[id];
130 		z_cpus_pm_forced_state[id].state = PM_STATE_ACTIVE;
131 	} else {
132 		const struct pm_state_info *info;
133 
134 		info = pm_policy_next_state(id, ticks);
135 		if (info != NULL) {
136 			z_cpus_pm_state[id] = *info;
137 		} else {
138 			z_cpus_pm_state[id].state = PM_STATE_ACTIVE;
139 		}
140 	}
141 	k_spin_unlock(&pm_forced_state_lock, key);
142 
143 	if (z_cpus_pm_state[id].state == PM_STATE_ACTIVE) {
144 		LOG_DBG("No PM operations done.");
145 		SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
146 				   z_cpus_pm_state[id].state);
147 		return false;
148 	}
149 
150 #ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED
151 	if (atomic_sub(&_cpus_active, 1) == 1) {
152 		if ((z_cpus_pm_state[id].state != PM_STATE_RUNTIME_IDLE) &&
153 		    !z_cpus_pm_state[id].pm_device_disabled) {
154 			if (!pm_suspend_devices()) {
155 				pm_resume_devices();
156 				z_cpus_pm_state[id].state = PM_STATE_ACTIVE;
157 				(void)atomic_add(&_cpus_active, 1);
158 				SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
159 							   z_cpus_pm_state[id].state);
160 				return false;
161 			}
162 		}
163 	}
164 #endif
165 
166 	if ((z_cpus_pm_state[id].exit_latency_us != 0) &&
167 	    (ticks != K_TICKS_FOREVER)) {
168 		/*
169 		 * We need to set the timer to interrupt a little bit early to
170 		 * accommodate the time required by the CPU to fully wake up.
171 		 */
172 		sys_clock_set_timeout(ticks -
173 		     k_us_to_ticks_ceil32(
174 			     z_cpus_pm_state[id].exit_latency_us),
175 				     true);
176 	}
177 
178 	/*
179 	 * This function runs with interruptions locked but it is
180 	 * expected the SoC to unlock them in
181 	 * pm_state_exit_post_ops() when returning to active
182 	 * state. We don't want to be scheduled out yet, first we need
183 	 * to send a notification about leaving the idle state. So,
184 	 * we lock the scheduler here and unlock just after we have
185 	 * sent the notification in pm_system_resume().
186 	 */
187 	k_sched_lock();
188 	pm_stats_start();
189 	/* Enter power state */
190 	pm_state_notify(true);
191 	atomic_set_bit(z_post_ops_required, id);
192 	pm_state_set(z_cpus_pm_state[id].state, z_cpus_pm_state[id].substate_id);
193 	pm_stats_stop();
194 
195 	/* Wake up sequence starts here */
196 	pm_stats_update(z_cpus_pm_state[id].state);
197 	pm_system_resume();
198 	k_sched_unlock();
199 	SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
200 				   z_cpus_pm_state[id].state);
201 
202 	return true;
203 }
204 
pm_notifier_register(struct pm_notifier * notifier)205 void pm_notifier_register(struct pm_notifier *notifier)
206 {
207 	k_spinlock_key_t pm_notifier_key = k_spin_lock(&pm_notifier_lock);
208 
209 	sys_slist_append(&pm_notifiers, &notifier->_node);
210 	k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
211 }
212 
pm_notifier_unregister(struct pm_notifier * notifier)213 int pm_notifier_unregister(struct pm_notifier *notifier)
214 {
215 	int ret = -EINVAL;
216 	k_spinlock_key_t pm_notifier_key;
217 
218 	pm_notifier_key = k_spin_lock(&pm_notifier_lock);
219 	if (sys_slist_find_and_remove(&pm_notifiers, &(notifier->_node))) {
220 		ret = 0;
221 	}
222 	k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
223 
224 	return ret;
225 }
226 
pm_state_next_get(uint8_t cpu)227 const struct pm_state_info *pm_state_next_get(uint8_t cpu)
228 {
229 	return &z_cpus_pm_state[cpu];
230 }
231 
z_pm_save_idle_exit(void)232 void z_pm_save_idle_exit(void)
233 {
234 	/* Some CPU low power states require notification at the ISR
235 	 * to allow any operations that needs to be done before kernel
236 	 * switches task or processes nested interrupts.
237 	 * This can be simply ignored if not required.
238 	 */
239 	pm_system_resume();
240 }
241