1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  * Copyright (c) 2021 Nordic Semiconductor ASA.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/pm/device.h>
9 #include <zephyr/pm/device_runtime.h>
10 #include <zephyr/sys/__assert.h>
11 
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
14 
15 #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN
16 #define PM_DOMAIN(_pm) \
17 	(_pm)->domain
18 #else
19 #define PM_DOMAIN(_pm) NULL
20 #endif
21 
22 #define EVENT_STATE_ACTIVE	BIT(PM_DEVICE_STATE_ACTIVE)
23 #define EVENT_STATE_SUSPENDED	BIT(PM_DEVICE_STATE_SUSPENDED)
24 
25 #define EVENT_MASK		(EVENT_STATE_ACTIVE | EVENT_STATE_SUSPENDED)
26 
27 /**
28  * @brief Suspend a device
29  *
30  * @note Asynchronous operations are not supported when in pre-kernel mode. In
31  * this case, the async flag will be always forced to be false, and so the
32  * the function will be blocking.
33  *
34  * @funcprops \pre_kernel_ok
35  *
36  * @param dev Device instance.
37  * @param async Perform operation asynchronously.
38  * @param delay Period to delay the asynchronous operation.
39  *
40  * @retval 0 If device has been suspended or queued for suspend.
41  * @retval -EALREADY If device is already suspended (can only happen if get/put
42  * calls are unbalanced).
43  * @retval -EBUSY If the device is busy.
44  * @retval -errno Other negative errno, result of the action callback.
45  */
runtime_suspend(const struct device * dev,bool async,k_timeout_t delay)46 static int runtime_suspend(const struct device *dev, bool async,
47 			k_timeout_t delay)
48 {
49 	int ret = 0;
50 	struct pm_device *pm = dev->pm;
51 
52 	/*
53 	 * Early return if device runtime is not enabled.
54 	 */
55 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
56 		return 0;
57 	}
58 
59 	if (k_is_pre_kernel()) {
60 		async = false;
61 	} else {
62 		ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
63 		if (ret < 0) {
64 			return -EBUSY;
65 		}
66 	}
67 
68 	if (pm->base.usage == 0U) {
69 		LOG_WRN("Unbalanced suspend");
70 		ret = -EALREADY;
71 		goto unlock;
72 	}
73 
74 	pm->base.usage--;
75 	if (pm->base.usage > 0U) {
76 		goto unlock;
77 	}
78 
79 	if (async) {
80 		/* queue suspend */
81 		pm->base.state = PM_DEVICE_STATE_SUSPENDING;
82 		(void)k_work_schedule(&pm->work, delay);
83 	} else {
84 		/* suspend now */
85 		ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
86 		if (ret < 0) {
87 			pm->base.usage++;
88 			goto unlock;
89 		}
90 
91 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
92 	}
93 
94 unlock:
95 	if (!k_is_pre_kernel()) {
96 		k_sem_give(&pm->lock);
97 	}
98 
99 	return ret;
100 }
101 
runtime_suspend_work(struct k_work * work)102 static void runtime_suspend_work(struct k_work *work)
103 {
104 	int ret;
105 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
106 	struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work);
107 
108 	ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
109 
110 	(void)k_sem_take(&pm->lock, K_FOREVER);
111 	if (ret < 0) {
112 		pm->base.usage++;
113 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
114 	} else {
115 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
116 	}
117 	k_event_set(&pm->event, BIT(pm->base.state));
118 	k_sem_give(&pm->lock);
119 
120 	/*
121 	 * On async put, we have to suspend the domain when the device
122 	 * finishes its operation
123 	 */
124 	if ((ret == 0) &&
125 	    atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
126 		(void)pm_device_runtime_put(PM_DOMAIN(&pm->base));
127 	}
128 
129 	__ASSERT(ret == 0, "Could not suspend device (%d)", ret);
130 }
131 
get_sync_locked(const struct device * dev)132 static int get_sync_locked(const struct device *dev)
133 {
134 	int ret;
135 	struct pm_device_isr *pm = dev->pm_isr;
136 	uint32_t flags = pm->base.flags;
137 
138 	if (pm->base.usage == 0) {
139 		if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
140 			const struct device *domain = PM_DOMAIN(&pm->base);
141 
142 			if (domain->pm_base->flags & PM_DEVICE_FLAG_ISR_SAFE) {
143 				ret = pm_device_runtime_get(domain);
144 				if (ret < 0) {
145 					return ret;
146 				}
147 			} else {
148 				return -EWOULDBLOCK;
149 			}
150 		}
151 
152 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
153 		if (ret < 0) {
154 			return ret;
155 		}
156 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
157 	} else {
158 		ret = 0;
159 	}
160 
161 	pm->base.usage++;
162 
163 	return ret;
164 }
165 
pm_device_runtime_get(const struct device * dev)166 int pm_device_runtime_get(const struct device *dev)
167 {
168 	int ret = 0;
169 	struct pm_device *pm = dev->pm;
170 
171 	if (pm == NULL) {
172 		return 0;
173 	}
174 
175 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
176 
177 	/*
178 	 * Early return if device runtime is not enabled.
179 	 */
180 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
181 		return 0;
182 	}
183 
184 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
185 		struct pm_device_isr *pm_sync = dev->pm_isr;
186 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
187 
188 		ret = get_sync_locked(dev);
189 		k_spin_unlock(&pm_sync->lock, k);
190 		goto end;
191 	}
192 
193 	if (!k_is_pre_kernel()) {
194 		ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
195 		if (ret < 0) {
196 			return -EWOULDBLOCK;
197 		}
198 	}
199 
200 	if (k_is_in_isr() && (pm->base.state == PM_DEVICE_STATE_SUSPENDING)) {
201 		ret = -EWOULDBLOCK;
202 		goto unlock;
203 	}
204 
205 	/*
206 	 * If the device is under a power domain, the domain has to be get
207 	 * first.
208 	 */
209 	const struct device *domain = PM_DOMAIN(&pm->base);
210 
211 	if (domain != NULL) {
212 		ret = pm_device_runtime_get(domain);
213 		if (ret != 0) {
214 			goto unlock;
215 		}
216 		/* Check if powering up this device failed */
217 		if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) {
218 			(void)pm_device_runtime_put(domain);
219 			ret = -EAGAIN;
220 			goto unlock;
221 		}
222 		/* Power domain successfully claimed */
223 		atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED);
224 	}
225 
226 	pm->base.usage++;
227 
228 	/*
229 	 * Check if the device has a pending suspend operation (not started
230 	 * yet) and cancel it. This way we avoid unnecessary operations because
231 	 * the device is actually active.
232 	 */
233 	if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
234 		((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
235 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
236 		goto unlock;
237 	}
238 
239 	if (!k_is_pre_kernel()) {
240 		/*
241 		 * If the device is already suspending there is
242 		 * nothing else we can do but wait until it finishes.
243 		 */
244 		while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
245 			k_sem_give(&pm->lock);
246 
247 			k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
248 
249 			(void)k_sem_take(&pm->lock, K_FOREVER);
250 		}
251 	}
252 
253 	if (pm->base.usage > 1U) {
254 		goto unlock;
255 	}
256 
257 	ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
258 	if (ret < 0) {
259 		pm->base.usage--;
260 		goto unlock;
261 	}
262 
263 	pm->base.state = PM_DEVICE_STATE_ACTIVE;
264 
265 unlock:
266 	if (!k_is_pre_kernel()) {
267 		k_sem_give(&pm->lock);
268 	}
269 
270 end:
271 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
272 
273 	return ret;
274 }
275 
276 
put_sync_locked(const struct device * dev)277 static int put_sync_locked(const struct device *dev)
278 {
279 	int ret;
280 	struct pm_device_isr *pm = dev->pm_isr;
281 	uint32_t flags = pm->base.flags;
282 
283 	if (!(flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) {
284 		return 0;
285 	}
286 
287 	if (pm->base.usage == 0U) {
288 		return -EALREADY;
289 	}
290 
291 	pm->base.usage--;
292 	if (pm->base.usage == 0U) {
293 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
294 		if (ret < 0) {
295 			return ret;
296 		}
297 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
298 
299 		if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
300 			const struct device *domain = PM_DOMAIN(&pm->base);
301 
302 			if (domain->pm_base->flags & PM_DEVICE_FLAG_ISR_SAFE) {
303 				ret = put_sync_locked(domain);
304 			} else {
305 				ret = -EWOULDBLOCK;
306 			}
307 		}
308 	} else {
309 		ret = 0;
310 	}
311 
312 	return ret;
313 }
314 
pm_device_runtime_put(const struct device * dev)315 int pm_device_runtime_put(const struct device *dev)
316 {
317 	int ret;
318 
319 	if (dev->pm_base == NULL) {
320 		return 0;
321 	}
322 
323 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev);
324 
325 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
326 		struct pm_device_isr *pm_sync = dev->pm_isr;
327 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
328 
329 		ret = put_sync_locked(dev);
330 
331 		k_spin_unlock(&pm_sync->lock, k);
332 	} else {
333 		ret = runtime_suspend(dev, false, K_NO_WAIT);
334 
335 		/*
336 		 * Now put the domain
337 		 */
338 		if ((ret == 0) &&
339 		    atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
340 			ret = pm_device_runtime_put(PM_DOMAIN(dev->pm_base));
341 		}
342 	}
343 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret);
344 
345 	return ret;
346 }
347 
pm_device_runtime_put_async(const struct device * dev,k_timeout_t delay)348 int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay)
349 {
350 	int ret;
351 
352 	if (dev->pm_base == NULL) {
353 		return 0;
354 	}
355 
356 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev, delay);
357 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
358 		struct pm_device_isr *pm_sync = dev->pm_isr;
359 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
360 
361 		ret = put_sync_locked(dev);
362 
363 		k_spin_unlock(&pm_sync->lock, k);
364 	} else {
365 		ret = runtime_suspend(dev, true, delay);
366 	}
367 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, delay, ret);
368 
369 	return ret;
370 }
371 
372 __boot_func
pm_device_runtime_auto_enable(const struct device * dev)373 int pm_device_runtime_auto_enable(const struct device *dev)
374 {
375 	struct pm_device_base *pm = dev->pm_base;
376 
377 	/* No action needed if PM_DEVICE_FLAG_RUNTIME_AUTO is not enabled */
378 	if (!pm || !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) {
379 		return 0;
380 	}
381 	return pm_device_runtime_enable(dev);
382 }
383 
runtime_enable_sync(const struct device * dev)384 static int runtime_enable_sync(const struct device *dev)
385 {
386 	int ret;
387 	struct pm_device_isr *pm = dev->pm_isr;
388 	k_spinlock_key_t k = k_spin_lock(&pm->lock);
389 
390 	if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
391 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
392 		if (ret < 0) {
393 			goto unlock;
394 		}
395 
396 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
397 	} else {
398 		ret = 0;
399 	}
400 
401 	pm->base.flags |= BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
402 	pm->base.usage = 0U;
403 unlock:
404 	k_spin_unlock(&pm->lock, k);
405 	return ret;
406 }
407 
pm_device_runtime_enable(const struct device * dev)408 int pm_device_runtime_enable(const struct device *dev)
409 {
410 	int ret = 0;
411 	struct pm_device *pm = dev->pm;
412 
413 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_enable, dev);
414 
415 	if (pm == NULL) {
416 		ret = -ENOTSUP;
417 		goto end;
418 	}
419 
420 	if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
421 		goto end;
422 	}
423 
424 	if (pm_device_state_is_locked(dev)) {
425 		ret = -EPERM;
426 		goto end;
427 	}
428 
429 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
430 		ret = runtime_enable_sync(dev);
431 		goto end;
432 	}
433 
434 	if (!k_is_pre_kernel()) {
435 		(void)k_sem_take(&pm->lock, K_FOREVER);
436 	}
437 
438 	/* lazy init of PM fields */
439 	if (pm->dev == NULL) {
440 		pm->dev = dev;
441 		k_work_init_delayable(&pm->work, runtime_suspend_work);
442 	}
443 
444 	if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
445 		ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
446 		if (ret < 0) {
447 			goto unlock;
448 		}
449 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
450 	}
451 
452 	pm->base.usage = 0U;
453 
454 	atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
455 
456 unlock:
457 	if (!k_is_pre_kernel()) {
458 		k_sem_give(&pm->lock);
459 	}
460 
461 end:
462 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_enable, dev, ret);
463 	return ret;
464 }
465 
runtime_disable_sync(const struct device * dev)466 static int runtime_disable_sync(const struct device *dev)
467 {
468 	struct pm_device_isr *pm = dev->pm_isr;
469 	int ret;
470 	k_spinlock_key_t k = k_spin_lock(&pm->lock);
471 
472 	if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
473 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
474 		if (ret < 0) {
475 			goto unlock;
476 		}
477 
478 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
479 	} else {
480 		ret = 0;
481 	}
482 
483 	pm->base.flags &= ~BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
484 unlock:
485 	k_spin_unlock(&pm->lock, k);
486 	return ret;
487 }
488 
pm_device_runtime_disable(const struct device * dev)489 int pm_device_runtime_disable(const struct device *dev)
490 {
491 	int ret = 0;
492 	struct pm_device *pm = dev->pm;
493 
494 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
495 
496 	if (pm == NULL) {
497 		ret = -ENOTSUP;
498 		goto end;
499 	}
500 
501 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
502 		goto end;
503 	}
504 
505 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
506 		ret = runtime_disable_sync(dev);
507 		goto end;
508 	}
509 
510 	if (!k_is_pre_kernel()) {
511 		(void)k_sem_take(&pm->lock, K_FOREVER);
512 	}
513 
514 	if (!k_is_pre_kernel()) {
515 		if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
516 			((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
517 			pm->base.state = PM_DEVICE_STATE_ACTIVE;
518 			goto clear_bit;
519 		}
520 
521 		/* wait until possible async suspend is completed */
522 		while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
523 			k_sem_give(&pm->lock);
524 
525 			k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
526 
527 			(void)k_sem_take(&pm->lock, K_FOREVER);
528 		}
529 	}
530 
531 	/* wake up the device if suspended */
532 	if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
533 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
534 		if (ret < 0) {
535 			goto unlock;
536 		}
537 
538 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
539 	}
540 
541 clear_bit:
542 	atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
543 
544 unlock:
545 	if (!k_is_pre_kernel()) {
546 		k_sem_give(&pm->lock);
547 	}
548 
549 end:
550 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);
551 
552 	return ret;
553 }
554 
pm_device_runtime_is_enabled(const struct device * dev)555 bool pm_device_runtime_is_enabled(const struct device *dev)
556 {
557 	struct pm_device_base *pm = dev->pm_base;
558 
559 	return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
560 }
561