1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_PM_POLICY_H_
8 #define ZEPHYR_INCLUDE_PM_POLICY_H_
9 
10 #include <stdbool.h>
11 #include <stdint.h>
12 
13 #include <zephyr/device.h>
14 #include <zephyr/pm/state.h>
15 #include <zephyr/sys/slist.h>
16 #include <zephyr/toolchain.h>
17 
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21 
22 /**
23  * @brief System Power Management Policy API
24  * @defgroup subsys_pm_sys_policy Policy
25  * @ingroup subsys_pm_sys
26  * @{
27  */
28 
29 /**
30  * @brief Callback to notify when maximum latency changes.
31  *
32  * @param latency New maximum latency. Positive value represents latency in
33  * microseconds. SYS_FOREVER_US value lifts the latency constraint. Other values
34  * are forbidden.
35  */
36 typedef void (*pm_policy_latency_changed_cb_t)(int32_t latency);
37 
38 /**
39  * @brief Latency change subscription.
40  *
41  * @note All fields in this structure are meant for private usage.
42  */
43 struct pm_policy_latency_subscription {
44 	/** @cond INTERNAL_HIDDEN */
45 	sys_snode_t node;
46 	pm_policy_latency_changed_cb_t cb;
47 	/** @endcond */
48 };
49 
50 /**
51  * @brief Latency request.
52  *
53  * @note All fields in this structure are meant for private usage.
54  */
55 struct pm_policy_latency_request {
56 	/** @cond INTERNAL_HIDDEN */
57 	sys_snode_t node;
58 	uint32_t value_us;
59 	/** @endcond */
60 };
61 
62 /**
63  * @brief Event.
64  *
65  * @note All fields in this structure are meant for private usage.
66  */
67 struct pm_policy_event {
68 	/** @cond INTERNAL_HIDDEN */
69 	sys_snode_t node;
70 	int64_t uptime_ticks;
71 	/** @endcond */
72 };
73 
74 /** @cond INTERNAL_HIDDEN */
75 
76 /**
77  * @brief Function to get the next PM state
78  *
79  * This function is called by the power subsystem when the system is
80  * idle and returns the most appropriate state based on the number of
81  * ticks to the next event.
82  *
83  * @param cpu CPU index.
84  * @param ticks The number of ticks to the next scheduled event.
85  *
86  * @return The power state the system should use for the given cpu. The function
87  * will return NULL if system should remain into PM_STATE_ACTIVE.
88  */
89 const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks);
90 
91 /** @endcond */
92 
93 /** Special value for 'all substates'. */
94 #define PM_ALL_SUBSTATES (UINT8_MAX)
95 
96 #if defined(CONFIG_PM) || defined(__DOXYGEN__)
97 /**
98  * @brief Increase a power state lock counter.
99  *
100  * A power state will not be allowed on the first call of
101  * pm_policy_state_lock_get(). Subsequent calls will just increase a reference
102  * count, thus meaning this API can be safely used concurrently. A state will
103  * be allowed again after pm_policy_state_lock_put() is called as many times as
104  * pm_policy_state_lock_get().
105  *
106  * Note that the PM_STATE_ACTIVE state is always allowed, so calling this API
107  * with PM_STATE_ACTIVE will have no effect.
108  *
109  * @param state Power state.
110  * @param substate_id Power substate ID. Use PM_ALL_SUBSTATES to affect all the
111  *		      substates in the given power state.
112  *
113  * @see pm_policy_state_lock_put()
114  */
115 void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id);
116 
117 /**
118  * @brief Decrease a power state lock counter.
119  *
120  * @param state Power state.
121  * @param substate_id Power substate ID. Use PM_ALL_SUBSTATES to affect all the
122  *		      substates in the given power state.
123  *
124  * @see pm_policy_state_lock_get()
125  */
126 void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id);
127 
128 /**
129  * @brief Check if a power state lock is active (not allowed).
130  *
131  * @param state Power state.
132  * @param substate_id Power substate ID. Use PM_ALL_SUBSTATES to affect all the
133  *		      substates in the given power state.
134  *
135  * @retval true if power state lock is active.
136  * @retval false if power state lock is not active.
137  */
138 bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id);
139 
140 /**
141  * @brief Register an event.
142  *
143  * Events in the power-management policy context are defined as any source that
144  * will wake up the system at a known time in the future. By registering such
145  * event, the policy manager will be able to decide whether certain power states
146  * are worth entering or not.
147  *
148  * CPU is woken up before the time passed in cycle to minimize event handling
149  * latency. Once woken up, the CPU will be kept awake until the event has been
150  * handled, which is signaled by pm_policy_event_unregister() or moving event
151  * into the future using pm_policy_event_update().
152  *
153  * @param evt Event.
154  * @param uptime_ticks When the event will occur, in uptime ticks.
155  *
156  * @see pm_policy_event_unregister()
157  */
158 void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks);
159 
160 /**
161  * @brief Update an event.
162  *
163  * This shortcut allows for moving the time an event will occur without the
164  * need for an unregister + register cycle.
165  *
166  * @param evt Event.
167  * @param uptime_ticks When the event will occur, in uptime ticks.
168  *
169  * @see pm_policy_event_register
170  */
171 void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks);
172 
173 /**
174  * @brief Unregister an event.
175  *
176  * @param evt Event.
177  *
178  * @see pm_policy_event_register
179  */
180 void pm_policy_event_unregister(struct pm_policy_event *evt);
181 
182 /**
183  * @brief Increase power state locks.
184  *
185  * Set power state locks in all power states that disable power in the given
186  * device.
187  *
188  * @param dev Device reference.
189  *
190  * @see pm_policy_device_power_lock_put()
191  * @see pm_policy_state_lock_get()
192  */
193 void pm_policy_device_power_lock_get(const struct device *dev);
194 
195 /**
196  * @brief Decrease power state locks.
197  *
198  * Remove power state locks in all power states that disable power in the given
199  * device.
200  *
201  * @param dev Device reference.
202  *
203  * @see pm_policy_device_power_lock_get()
204  * @see pm_policy_state_lock_put()
205  */
206 void pm_policy_device_power_lock_put(const struct device *dev);
207 
208 /**
209  * @brief Returns the ticks until the next event
210  *
211  * If an event is registred, it will return the number of ticks until the next event, if the
212  * "next"/"oldest" registered event is in the past, it will return 0. Otherwise it returns -1.
213  *
214  * @retval >0 If next registered event is in the future
215  * @retval 0 If next registered event is now or in the past
216  * @retval -1 Otherwise
217  */
218 int64_t pm_policy_next_event_ticks(void);
219 
220 #else
pm_policy_state_lock_get(enum pm_state state,uint8_t substate_id)221 static inline void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
222 {
223 	ARG_UNUSED(state);
224 	ARG_UNUSED(substate_id);
225 }
226 
pm_policy_state_lock_put(enum pm_state state,uint8_t substate_id)227 static inline void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
228 {
229 	ARG_UNUSED(state);
230 	ARG_UNUSED(substate_id);
231 }
232 
pm_policy_state_lock_is_active(enum pm_state state,uint8_t substate_id)233 static inline bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id)
234 {
235 	ARG_UNUSED(state);
236 	ARG_UNUSED(substate_id);
237 
238 	return false;
239 }
240 
pm_policy_event_register(struct pm_policy_event * evt,uint32_t cycle)241 static inline void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle)
242 {
243 	ARG_UNUSED(evt);
244 	ARG_UNUSED(cycle);
245 }
246 
pm_policy_event_update(struct pm_policy_event * evt,uint32_t cycle)247 static inline void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle)
248 {
249 	ARG_UNUSED(evt);
250 	ARG_UNUSED(cycle);
251 }
252 
pm_policy_event_unregister(struct pm_policy_event * evt)253 static inline void pm_policy_event_unregister(struct pm_policy_event *evt)
254 {
255 	ARG_UNUSED(evt);
256 }
257 
pm_policy_device_power_lock_get(const struct device * dev)258 static inline void pm_policy_device_power_lock_get(const struct device *dev)
259 {
260 	ARG_UNUSED(dev);
261 }
262 
pm_policy_device_power_lock_put(const struct device * dev)263 static inline void pm_policy_device_power_lock_put(const struct device *dev)
264 {
265 	ARG_UNUSED(dev);
266 }
267 
pm_policy_next_event_ticks(void)268 static inline int64_t pm_policy_next_event_ticks(void)
269 {
270 	return -1;
271 }
272 
273 #endif /* CONFIG_PM */
274 
275 #if defined(CONFIG_PM) || defined(CONFIG_PM_POLICY_LATENCY_STANDALONE) || defined(__DOXYGEN__)
276 /**
277  * @brief Add a new latency requirement.
278  *
279  * The system will not enter any power state that would make the system to
280  * exceed the given latency value.
281  *
282  * @param req Latency request.
283  * @param value_us Maximum allowed latency in microseconds.
284  */
285 void pm_policy_latency_request_add(struct pm_policy_latency_request *req,
286 				   uint32_t value_us);
287 
288 /**
289  * @brief Update a latency requirement.
290  *
291  * @param req Latency request.
292  * @param value_us New maximum allowed latency in microseconds.
293  */
294 void pm_policy_latency_request_update(struct pm_policy_latency_request *req,
295 				      uint32_t value_us);
296 
297 /**
298  * @brief Remove a latency requirement.
299  *
300  * @param req Latency request.
301  */
302 void pm_policy_latency_request_remove(struct pm_policy_latency_request *req);
303 
304 /**
305  * @brief Subscribe to maximum latency changes.
306  *
307  * @param req Subscription request.
308  * @param cb Callback function (NULL to disable).
309  */
310 void pm_policy_latency_changed_subscribe(struct pm_policy_latency_subscription *req,
311 					 pm_policy_latency_changed_cb_t cb);
312 
313 /**
314  * @brief Unsubscribe to maximum latency changes.
315  *
316  * @param req Subscription request.
317  */
318 void pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription *req);
319 #else
pm_policy_latency_request_add(struct pm_policy_latency_request * req,uint32_t value_us)320 static inline void pm_policy_latency_request_add(
321 	struct pm_policy_latency_request *req, uint32_t value_us)
322 {
323 	ARG_UNUSED(req);
324 	ARG_UNUSED(value_us);
325 }
326 
pm_policy_latency_request_update(struct pm_policy_latency_request * req,uint32_t value_us)327 static inline void pm_policy_latency_request_update(
328 	struct pm_policy_latency_request *req, uint32_t value_us)
329 {
330 	ARG_UNUSED(req);
331 	ARG_UNUSED(value_us);
332 }
333 
pm_policy_latency_request_remove(struct pm_policy_latency_request * req)334 static inline void pm_policy_latency_request_remove(
335 	struct pm_policy_latency_request *req)
336 {
337 	ARG_UNUSED(req);
338 }
339 #endif /* CONFIG_PM CONFIG_PM_POLICY_LATENCY_STANDALONE */
340 
341 /**
342  * @}
343  */
344 
345 #ifdef __cplusplus
346 }
347 #endif
348 
349 #endif /* ZEPHYR_INCLUDE_PM_POLICY_H_ */
350