1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  * Copyright (c) 2022 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <zephyr/pm/pm.h>
10 #include <zephyr/pm/policy.h>
11 #include <zephyr/pm/state.h>
12 #include <zephyr/spinlock.h>
13 #include <zephyr/sys_clock.h>
14 #include <zephyr/sys/__assert.h>
15 #include <zephyr/sys/time_units.h>
16 #include <zephyr/sys/atomic.h>
17 #include <zephyr/sys/util_macro.h>
18 #include <zephyr/toolchain.h>
19 #include <zephyr/pm/device.h>
20 
21 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
22 
23 #define DT_SUB_LOCK_INIT(node_id)				\
24 	{ .state = PM_STATE_DT_INIT(node_id),			\
25 	  .substate_id = DT_PROP_OR(node_id, substate_id, 0),	\
26 	  .lock = ATOMIC_INIT(0),				\
27 	},
28 
29 /**
30  * State and substate lock structure.
31  *
32  * This struct is associating a reference counting to each <state,substate>
33  * couple to be used with the pm_policy_substate_lock_* functions.
34  *
35  * Operations on this array are in the order of O(n) with the number of power
36  * states and this is mostly due to the random nature of the substate value
37  * (that can be anything from a small integer value to a bitmask). We can
38  * probably do better with an hashmap.
39  */
40 static struct {
41 	enum pm_state state;
42 	uint8_t substate_id;
43 	atomic_t lock;
44 } substate_lock_t[] = {
45 	DT_FOREACH_STATUS_OKAY(zephyr_power_state, DT_SUB_LOCK_INIT)
46 };
47 
48 #endif
49 
50 #if defined(CONFIG_PM_POLICY_DEVICE_CONSTRAINTS)
51 
52 struct pm_state_device_constraint {
53 	const struct device *const dev;
54 	size_t pm_constraints_size;
55 	struct pm_state_constraint *constraints;
56 };
57 
58 /**
59  * @brief Synthesize the name of the object that holds a device pm constraint.
60  *
61  * @param dev_id Device identifier.
62  */
63 #define PM_CONSTRAINTS_NAME(node_id) _CONCAT(__devicepmconstraints_, node_id)
64 
65 /**
66  * @brief initialize a device pm constraint with information from devicetree.
67  *
68  * @param node_id Node identifier.
69  */
70 #define PM_STATE_CONSTRAINT_INIT(node_id)                                     \
71 	{                                                                     \
72 		.state = PM_STATE_DT_INIT(node_id),                           \
73 		.substate_id = DT_PROP_OR(node_id, substate_id, 0),           \
74 	}
75 
76 /**
77  * @brief Helper macro to define a device pm constraints.
78  */
79 #define PM_STATE_CONSTRAINT_DEFINE(i, node_id)                                  \
80 	COND_CODE_1(DT_NODE_HAS_STATUS(DT_PHANDLE_BY_IDX(node_id,               \
81 		zephyr_disabling_power_states, i), okay),                       \
82 		(PM_STATE_CONSTRAINT_INIT(DT_PHANDLE_BY_IDX(node_id,            \
83 		zephyr_disabling_power_states, i)),), ())
84 
85 /**
86  * @brief Helper macro to generate a list of device pm constraints.
87  */
88 #define PM_STATE_CONSTRAINTS_DEFINE(node_id)                                           \
89 	{                                                                              \
90 		LISTIFY(DT_PROP_LEN_OR(node_id, zephyr_disabling_power_states, 0),     \
91 			PM_STATE_CONSTRAINT_DEFINE, (), node_id)                       \
92 	}
93 
94 /**
95  * @brief Helper macro to define an array of device pm constraints.
96  */
97 #define CONSTRAINTS_DEFINE(node_id)                         \
98 	Z_DECL_ALIGN(struct pm_state_constraint)            \
99 		PM_CONSTRAINTS_NAME(node_id)[] =            \
100 		PM_STATE_CONSTRAINTS_DEFINE(node_id);
101 
102 #define DEVICE_CONSTRAINTS_DEFINE(node_id)                                           \
103 	COND_CODE_0(DT_NODE_HAS_PROP(node_id, zephyr_disabling_power_states), (),    \
104 		(CONSTRAINTS_DEFINE(node_id)))
105 
106 DT_FOREACH_STATUS_OKAY_NODE(DEVICE_CONSTRAINTS_DEFINE)
107 
108 /**
109  * @brief Helper macro to initialize a pm state device constraint
110  */
111 #define PM_STATE_DEVICE_CONSTRAINT_INIT(node_id)                                              \
112 	{                                                                                     \
113 		.dev = DEVICE_DT_GET(node_id),                                                \
114 		.pm_constraints_size = DT_PROP_LEN(node_id, zephyr_disabling_power_states),   \
115 		.constraints = PM_CONSTRAINTS_NAME(node_id),                                  \
116 	},
117 
118 /**
119  * @brief Helper macro to initialize a pm state device constraint
120  */
121 #define PM_STATE_DEVICE_CONSTRAINT_DEFINE(node_id)                                      \
122 	COND_CODE_0(DT_NODE_HAS_PROP(node_id, zephyr_disabling_power_states), (),       \
123 		(PM_STATE_DEVICE_CONSTRAINT_INIT(node_id)))
124 
125 static struct pm_state_device_constraint _devices_constraints[] = {
126 	DT_FOREACH_STATUS_OKAY_NODE(PM_STATE_DEVICE_CONSTRAINT_DEFINE)
127 };
128 
129 #endif /* CONFIG_PM_POLICY_DEVICE_CONSTRAINTS */
130 
131 /** Lock to synchronize access to the latency request list. */
132 static struct k_spinlock latency_lock;
133 /** List of maximum latency requests. */
134 static sys_slist_t latency_reqs;
135 /** Maximum CPU latency in us */
136 static int32_t max_latency_us = SYS_FOREVER_US;
137 /** Maximum CPU latency in cycles */
138 static int32_t max_latency_cyc = -1;
139 /** List of latency change subscribers. */
140 static sys_slist_t latency_subs;
141 
142 /** Lock to synchronize access to the events list. */
143 static struct k_spinlock events_lock;
144 /** List of events. */
145 static sys_slist_t events_list;
146 /** Next event, in absolute cycles (<0: none, [0, UINT32_MAX]: cycles) */
147 static int64_t next_event_cyc = -1;
148 
149 /** @brief Update maximum allowed latency. */
update_max_latency(void)150 static void update_max_latency(void)
151 {
152 	int32_t new_max_latency_us = SYS_FOREVER_US;
153 	struct pm_policy_latency_request *req;
154 
155 	SYS_SLIST_FOR_EACH_CONTAINER(&latency_reqs, req, node) {
156 		if ((new_max_latency_us == SYS_FOREVER_US) ||
157 		    ((int32_t)req->value_us < new_max_latency_us)) {
158 			new_max_latency_us = (int32_t)req->value_us;
159 		}
160 	}
161 
162 	if (max_latency_us != new_max_latency_us) {
163 		struct pm_policy_latency_subscription *sreq;
164 		int32_t new_max_latency_cyc = -1;
165 
166 		SYS_SLIST_FOR_EACH_CONTAINER(&latency_subs, sreq, node) {
167 			sreq->cb(new_max_latency_us);
168 		}
169 
170 		if (new_max_latency_us != SYS_FOREVER_US) {
171 			new_max_latency_cyc = (int32_t)k_us_to_cyc_ceil32(new_max_latency_us);
172 		}
173 
174 		max_latency_us = new_max_latency_us;
175 		max_latency_cyc = new_max_latency_cyc;
176 	}
177 }
178 
179 /** @brief Update next event. */
update_next_event(uint32_t cyc)180 static void update_next_event(uint32_t cyc)
181 {
182 	int64_t new_next_event_cyc = -1;
183 	struct pm_policy_event *evt;
184 
185 	SYS_SLIST_FOR_EACH_CONTAINER(&events_list, evt, node) {
186 		uint64_t cyc_evt = evt->value_cyc;
187 
188 		/*
189 		 * cyc value is a 32-bit rolling counter:
190 		 *
191 		 * |---------------->-----------------------|
192 		 * 0               cyc                  UINT32_MAX
193 		 *
194 		 * Values from [0, cyc) are events happening later than
195 		 * [cyc, UINT32_MAX], so pad [0, cyc) with UINT32_MAX + 1 to do
196 		 * the comparison.
197 		 */
198 		if (cyc_evt < cyc) {
199 			cyc_evt += (uint64_t)UINT32_MAX + 1U;
200 		}
201 
202 		if ((new_next_event_cyc < 0) ||
203 		    (cyc_evt < new_next_event_cyc)) {
204 			new_next_event_cyc = cyc_evt;
205 		}
206 	}
207 
208 	/* undo padding for events in the [0, cyc) range */
209 	if (new_next_event_cyc > UINT32_MAX) {
210 		new_next_event_cyc -= (uint64_t)UINT32_MAX + 1U;
211 	}
212 
213 	next_event_cyc = new_next_event_cyc;
214 }
215 
216 #ifdef CONFIG_PM_POLICY_DEFAULT
pm_policy_next_state(uint8_t cpu,int32_t ticks)217 const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks)
218 {
219 	int64_t cyc = -1;
220 	uint8_t num_cpu_states;
221 	const struct pm_state_info *cpu_states;
222 
223 #ifdef CONFIG_PM_NEED_ALL_DEVICES_IDLE
224 	if (pm_device_is_any_busy()) {
225 		return NULL;
226 	}
227 #endif
228 
229 	if (ticks != K_TICKS_FOREVER) {
230 		cyc = k_ticks_to_cyc_ceil32(ticks);
231 	}
232 
233 	num_cpu_states = pm_state_cpu_get_all(cpu, &cpu_states);
234 
235 	if (next_event_cyc >= 0) {
236 		uint32_t cyc_curr = k_cycle_get_32();
237 		int64_t cyc_evt = next_event_cyc - cyc_curr;
238 
239 		/* event happening after cycle counter max value, pad */
240 		if (next_event_cyc <= cyc_curr) {
241 			cyc_evt += UINT32_MAX;
242 		}
243 
244 		if (cyc_evt > 0) {
245 			/* if there's no system wakeup event always wins,
246 			 * otherwise, who comes earlier wins
247 			 */
248 			if (cyc < 0) {
249 				cyc = cyc_evt;
250 			} else {
251 				cyc = MIN(cyc, cyc_evt);
252 			}
253 		}
254 	}
255 
256 	for (int16_t i = (int16_t)num_cpu_states - 1; i >= 0; i--) {
257 		const struct pm_state_info *state = &cpu_states[i];
258 		uint32_t min_residency_cyc, exit_latency_cyc;
259 
260 		/* check if there is a lock on state + substate */
261 		if (pm_policy_state_lock_is_active(state->state, state->substate_id)) {
262 			continue;
263 		}
264 
265 		min_residency_cyc = k_us_to_cyc_ceil32(state->min_residency_us);
266 		exit_latency_cyc = k_us_to_cyc_ceil32(state->exit_latency_us);
267 
268 		/* skip state if it brings too much latency */
269 		if ((max_latency_cyc >= 0) &&
270 		    (exit_latency_cyc >= max_latency_cyc)) {
271 			continue;
272 		}
273 
274 		if ((cyc < 0) ||
275 		    (cyc >= (min_residency_cyc + exit_latency_cyc))) {
276 			return state;
277 		}
278 	}
279 
280 	return NULL;
281 }
282 #endif
283 
pm_policy_state_lock_get(enum pm_state state,uint8_t substate_id)284 void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
285 {
286 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
287 	for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
288 		if (substate_lock_t[i].state == state &&
289 		   (substate_lock_t[i].substate_id == substate_id ||
290 		    substate_id == PM_ALL_SUBSTATES)) {
291 			atomic_inc(&substate_lock_t[i].lock);
292 		}
293 	}
294 #endif
295 }
296 
pm_policy_state_lock_put(enum pm_state state,uint8_t substate_id)297 void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
298 {
299 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
300 	for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
301 		if (substate_lock_t[i].state == state &&
302 		   (substate_lock_t[i].substate_id == substate_id ||
303 		    substate_id == PM_ALL_SUBSTATES)) {
304 			atomic_t cnt = atomic_dec(&substate_lock_t[i].lock);
305 
306 			ARG_UNUSED(cnt);
307 
308 			__ASSERT(cnt >= 1, "Unbalanced state lock get/put");
309 		}
310 	}
311 #endif
312 }
313 
pm_policy_state_lock_is_active(enum pm_state state,uint8_t substate_id)314 bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id)
315 {
316 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
317 	for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
318 		if (substate_lock_t[i].state == state &&
319 		   (substate_lock_t[i].substate_id == substate_id ||
320 		    substate_id == PM_ALL_SUBSTATES)) {
321 			return (atomic_get(&substate_lock_t[i].lock) != 0);
322 		}
323 	}
324 #endif
325 
326 	return false;
327 }
328 
pm_policy_latency_request_add(struct pm_policy_latency_request * req,uint32_t value_us)329 void pm_policy_latency_request_add(struct pm_policy_latency_request *req,
330 				   uint32_t value_us)
331 {
332 	req->value_us = value_us;
333 
334 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
335 
336 	sys_slist_append(&latency_reqs, &req->node);
337 	update_max_latency();
338 
339 	k_spin_unlock(&latency_lock, key);
340 }
341 
pm_policy_latency_request_update(struct pm_policy_latency_request * req,uint32_t value_us)342 void pm_policy_latency_request_update(struct pm_policy_latency_request *req,
343 				      uint32_t value_us)
344 {
345 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
346 
347 	req->value_us = value_us;
348 	update_max_latency();
349 
350 	k_spin_unlock(&latency_lock, key);
351 }
352 
pm_policy_latency_request_remove(struct pm_policy_latency_request * req)353 void pm_policy_latency_request_remove(struct pm_policy_latency_request *req)
354 {
355 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
356 
357 	(void)sys_slist_find_and_remove(&latency_reqs, &req->node);
358 	update_max_latency();
359 
360 	k_spin_unlock(&latency_lock, key);
361 }
362 
pm_policy_latency_changed_subscribe(struct pm_policy_latency_subscription * req,pm_policy_latency_changed_cb_t cb)363 void pm_policy_latency_changed_subscribe(struct pm_policy_latency_subscription *req,
364 					 pm_policy_latency_changed_cb_t cb)
365 {
366 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
367 
368 	req->cb = cb;
369 	sys_slist_append(&latency_subs, &req->node);
370 
371 	k_spin_unlock(&latency_lock, key);
372 }
373 
pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription * req)374 void pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription *req)
375 {
376 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
377 
378 	(void)sys_slist_find_and_remove(&latency_subs, &req->node);
379 
380 	k_spin_unlock(&latency_lock, key);
381 }
382 
pm_policy_event_register(struct pm_policy_event * evt,uint32_t time_us)383 void pm_policy_event_register(struct pm_policy_event *evt, uint32_t time_us)
384 {
385 	k_spinlock_key_t key = k_spin_lock(&events_lock);
386 	uint32_t cyc = k_cycle_get_32();
387 
388 	evt->value_cyc = cyc + k_us_to_cyc_ceil32(time_us);
389 	sys_slist_append(&events_list, &evt->node);
390 	update_next_event(cyc);
391 
392 	k_spin_unlock(&events_lock, key);
393 }
394 
pm_policy_event_update(struct pm_policy_event * evt,uint32_t time_us)395 void pm_policy_event_update(struct pm_policy_event *evt, uint32_t time_us)
396 {
397 	k_spinlock_key_t key = k_spin_lock(&events_lock);
398 	uint32_t cyc = k_cycle_get_32();
399 
400 	evt->value_cyc = cyc + k_us_to_cyc_ceil32(time_us);
401 	update_next_event(cyc);
402 
403 	k_spin_unlock(&events_lock, key);
404 }
405 
pm_policy_event_unregister(struct pm_policy_event * evt)406 void pm_policy_event_unregister(struct pm_policy_event *evt)
407 {
408 	k_spinlock_key_t key = k_spin_lock(&events_lock);
409 
410 	(void)sys_slist_find_and_remove(&events_list, &evt->node);
411 	update_next_event(k_cycle_get_32());
412 
413 	k_spin_unlock(&events_lock, key);
414 }
415 
pm_policy_device_power_lock_get(const struct device * dev)416 void pm_policy_device_power_lock_get(const struct device *dev)
417 {
418 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state) && defined(CONFIG_PM_POLICY_DEVICE_CONSTRAINTS)
419 	for (size_t i = 0; i < ARRAY_SIZE(_devices_constraints); i++) {
420 		if (_devices_constraints[i].dev == dev) {
421 			for (size_t j = 0; j < _devices_constraints[i].pm_constraints_size; j++) {
422 				pm_policy_state_lock_get(
423 						_devices_constraints[i].constraints[j].state,
424 						_devices_constraints[i].constraints[j].substate_id);
425 			}
426 			break;
427 		}
428 	}
429 #endif
430 }
431 
pm_policy_device_power_lock_put(const struct device * dev)432 void pm_policy_device_power_lock_put(const struct device *dev)
433 {
434 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state) && defined(CONFIG_PM_POLICY_DEVICE_CONSTRAINTS)
435 	for (size_t i = 0; i < ARRAY_SIZE(_devices_constraints); i++) {
436 		if (_devices_constraints[i].dev == dev) {
437 			for (size_t j = 0; j < _devices_constraints[i].pm_constraints_size; j++) {
438 				pm_policy_state_lock_put(
439 						_devices_constraints[i].constraints[j].state,
440 						_devices_constraints[i].constraints[j].substate_id);
441 			}
442 			break;
443 		}
444 	}
445 #endif
446 }
447