1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  * Copyright (c) 2021 Nordic Semiconductor ASA.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/pm/device.h>
9 #include <zephyr/pm/device_runtime.h>
10 #include <zephyr/sys/__assert.h>
11 
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
14 
15 #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN
16 #define PM_DOMAIN(_pm) \
17 	(_pm)->domain
18 #else
19 #define PM_DOMAIN(_pm) NULL
20 #endif
21 
22 #define EVENT_STATE_ACTIVE	BIT(PM_DEVICE_STATE_ACTIVE)
23 #define EVENT_STATE_SUSPENDED	BIT(PM_DEVICE_STATE_SUSPENDED)
24 
25 #define EVENT_MASK		(EVENT_STATE_ACTIVE | EVENT_STATE_SUSPENDED)
26 
27 /**
28  * @brief Suspend a device
29  *
30  * @note Asynchronous operations are not supported when in pre-kernel mode. In
31  * this case, the async flag will be always forced to be false, and so the
32  * the function will be blocking.
33  *
34  * @funcprops \pre_kernel_ok
35  *
36  * @param dev Device instance.
37  * @param async Perform operation asynchronously.
38  *
39  * @retval 0 If device has been suspended or queued for suspend.
40  * @retval -EALREADY If device is already suspended (can only happen if get/put
41  * calls are unbalanced).
42  * @retval -EBUSY If the device is busy.
43  * @retval -errno Other negative errno, result of the action callback.
44  */
runtime_suspend(const struct device * dev,bool async)45 static int runtime_suspend(const struct device *dev, bool async)
46 {
47 	int ret = 0;
48 	struct pm_device *pm = dev->pm;
49 
50 	/*
51 	 * Early return if device runtime is not enabled.
52 	 */
53 	if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
54 		return 0;
55 	}
56 
57 	if (k_is_pre_kernel()) {
58 		async = false;
59 	} else {
60 		ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
61 		if (ret < 0) {
62 			return -EBUSY;
63 		}
64 	}
65 
66 	if (pm->usage == 0U) {
67 		LOG_WRN("Unbalanced suspend");
68 		ret = -EALREADY;
69 		goto unlock;
70 	}
71 
72 	pm->usage--;
73 	if (pm->usage > 0U) {
74 		goto unlock;
75 	}
76 
77 	if (async && !k_is_pre_kernel()) {
78 		/* queue suspend */
79 		pm->state = PM_DEVICE_STATE_SUSPENDING;
80 		(void)k_work_schedule(&pm->work, K_NO_WAIT);
81 	} else {
82 		/* suspend now */
83 		ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
84 		if (ret < 0) {
85 			pm->usage++;
86 			goto unlock;
87 		}
88 
89 		pm->state = PM_DEVICE_STATE_SUSPENDED;
90 	}
91 
92 unlock:
93 	if (!k_is_pre_kernel()) {
94 		k_sem_give(&pm->lock);
95 	}
96 
97 	return ret;
98 }
99 
runtime_suspend_work(struct k_work * work)100 static void runtime_suspend_work(struct k_work *work)
101 {
102 	int ret;
103 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
104 	struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work);
105 
106 	ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
107 
108 	(void)k_sem_take(&pm->lock, K_FOREVER);
109 	if (ret < 0) {
110 		pm->usage++;
111 		pm->state = PM_DEVICE_STATE_ACTIVE;
112 	} else {
113 		pm->state = PM_DEVICE_STATE_SUSPENDED;
114 	}
115 	k_event_set(&pm->event, BIT(pm->state));
116 	k_sem_give(&pm->lock);
117 
118 	/*
119 	 * On async put, we have to suspend the domain when the device
120 	 * finishes its operation
121 	 */
122 	if (PM_DOMAIN(pm) != NULL) {
123 		(void)pm_device_runtime_put(PM_DOMAIN(pm));
124 	}
125 
126 	__ASSERT(ret == 0, "Could not suspend device (%d)", ret);
127 }
128 
pm_device_runtime_get(const struct device * dev)129 int pm_device_runtime_get(const struct device *dev)
130 {
131 	int ret = 0;
132 	struct pm_device *pm = dev->pm;
133 
134 	if (pm == NULL) {
135 		return 0;
136 	}
137 
138 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
139 
140 	/*
141 	 * Early return if device runtime is not enabled.
142 	 */
143 	if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
144 		return 0;
145 	}
146 
147 	if (!k_is_pre_kernel()) {
148 		ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
149 		if (ret < 0) {
150 			return -EWOULDBLOCK;
151 		}
152 	}
153 
154 	if (k_is_in_isr() && (pm->state == PM_DEVICE_STATE_SUSPENDING)) {
155 		ret = -EWOULDBLOCK;
156 		goto unlock;
157 	}
158 
159 	/*
160 	 * If the device is under a power domain, the domain has to be get
161 	 * first.
162 	 */
163 	if (PM_DOMAIN(pm) != NULL) {
164 		ret = pm_device_runtime_get(PM_DOMAIN(pm));
165 		if (ret != 0) {
166 			goto unlock;
167 		}
168 		/* Check if powering up this device failed */
169 		if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) {
170 			(void)pm_device_runtime_put(PM_DOMAIN(pm));
171 			ret = -EAGAIN;
172 			goto unlock;
173 		}
174 		/* Power domain successfully claimed */
175 		atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_PD_CLAIMED);
176 	}
177 
178 	pm->usage++;
179 
180 	if (!k_is_pre_kernel()) {
181 		/* wait until possible async suspend is completed */
182 		while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
183 			k_sem_give(&pm->lock);
184 
185 			k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
186 
187 			(void)k_sem_take(&pm->lock, K_FOREVER);
188 		}
189 	}
190 
191 	if (pm->usage > 1U) {
192 		goto unlock;
193 	}
194 
195 	ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
196 	if (ret < 0) {
197 		pm->usage--;
198 		goto unlock;
199 	}
200 
201 	pm->state = PM_DEVICE_STATE_ACTIVE;
202 
203 unlock:
204 	if (!k_is_pre_kernel()) {
205 		k_sem_give(&pm->lock);
206 	}
207 
208 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
209 
210 	return ret;
211 }
212 
pm_device_runtime_put(const struct device * dev)213 int pm_device_runtime_put(const struct device *dev)
214 {
215 	int ret;
216 
217 	__ASSERT(!k_is_in_isr(), "use pm_device_runtime_put_async() in ISR");
218 
219 	if (dev->pm == NULL) {
220 		return 0;
221 	}
222 
223 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev);
224 	ret = runtime_suspend(dev, false);
225 
226 	/*
227 	 * Now put the domain
228 	 */
229 	if ((ret == 0) &&
230 	    atomic_test_and_clear_bit(&dev->pm->flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
231 		ret = pm_device_runtime_put(PM_DOMAIN(dev->pm));
232 	}
233 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret);
234 
235 	return ret;
236 }
237 
pm_device_runtime_put_async(const struct device * dev)238 int pm_device_runtime_put_async(const struct device *dev)
239 {
240 	int ret;
241 
242 	if (dev->pm == NULL) {
243 		return 0;
244 	}
245 
246 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev);
247 	ret = runtime_suspend(dev, true);
248 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, ret);
249 
250 	return ret;
251 }
252 
253 __boot_func
pm_device_runtime_auto_enable(const struct device * dev)254 int pm_device_runtime_auto_enable(const struct device *dev)
255 {
256 	struct pm_device *pm = dev->pm;
257 
258 	/* No action needed if PM_DEVICE_FLAG_RUNTIME_AUTO is not enabled */
259 	if (!pm || !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) {
260 		return 0;
261 	}
262 	return pm_device_runtime_enable(dev);
263 }
264 
pm_device_runtime_enable(const struct device * dev)265 int pm_device_runtime_enable(const struct device *dev)
266 {
267 	int ret = 0;
268 	struct pm_device *pm = dev->pm;
269 
270 	if (pm == NULL) {
271 		return -ENOTSUP;
272 	}
273 
274 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_enable, dev);
275 
276 	if (pm_device_state_is_locked(dev)) {
277 		ret = -EPERM;
278 		goto end;
279 	}
280 
281 	if (!k_is_pre_kernel()) {
282 		(void)k_sem_take(&pm->lock, K_FOREVER);
283 	}
284 
285 	if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
286 		goto unlock;
287 	}
288 
289 	/* lazy init of PM fields */
290 	if (pm->dev == NULL) {
291 		pm->dev = dev;
292 		k_work_init_delayable(&pm->work, runtime_suspend_work);
293 	}
294 
295 	if (pm->state == PM_DEVICE_STATE_ACTIVE) {
296 		ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
297 		if (ret < 0) {
298 			goto unlock;
299 		}
300 		pm->state = PM_DEVICE_STATE_SUSPENDED;
301 	}
302 
303 	pm->usage = 0U;
304 
305 	atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
306 
307 unlock:
308 	if (!k_is_pre_kernel()) {
309 		k_sem_give(&pm->lock);
310 	}
311 
312 end:
313 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_enable, dev, ret);
314 	return ret;
315 }
316 
pm_device_runtime_disable(const struct device * dev)317 int pm_device_runtime_disable(const struct device *dev)
318 {
319 	int ret = 0;
320 	struct pm_device *pm = dev->pm;
321 
322 	if (pm == NULL) {
323 		return -ENOTSUP;
324 	}
325 
326 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
327 
328 	if (!k_is_pre_kernel()) {
329 		(void)k_sem_take(&pm->lock, K_FOREVER);
330 	}
331 
332 	if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
333 		goto unlock;
334 	}
335 
336 	/* wait until possible async suspend is completed */
337 	if (!k_is_pre_kernel()) {
338 		while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
339 			k_sem_give(&pm->lock);
340 
341 			k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
342 
343 			(void)k_sem_take(&pm->lock, K_FOREVER);
344 		}
345 	}
346 
347 	/* wake up the device if suspended */
348 	if (pm->state == PM_DEVICE_STATE_SUSPENDED) {
349 		ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
350 		if (ret < 0) {
351 			goto unlock;
352 		}
353 
354 		pm->state = PM_DEVICE_STATE_ACTIVE;
355 	}
356 
357 	atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
358 
359 unlock:
360 	if (!k_is_pre_kernel()) {
361 		k_sem_give(&pm->lock);
362 	}
363 
364 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);
365 
366 	return ret;
367 }
368 
pm_device_runtime_is_enabled(const struct device * dev)369 bool pm_device_runtime_is_enabled(const struct device *dev)
370 {
371 	struct pm_device *pm = dev->pm;
372 
373 	return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
374 }
375