1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  * Copyright (c) 2021 Nordic Semiconductor ASA.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/pm/device.h>
9 #include <zephyr/pm/device_runtime.h>
10 #include <zephyr/sys/__assert.h>
11 
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
14 
15 #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN
16 #define PM_DOMAIN(_pm) \
17 	(_pm)->domain
18 #else
19 #define PM_DOMAIN(_pm) NULL
20 #endif
21 
22 #define EVENT_STATE_ACTIVE	BIT(PM_DEVICE_STATE_ACTIVE)
23 #define EVENT_STATE_SUSPENDED	BIT(PM_DEVICE_STATE_SUSPENDED)
24 
25 #define EVENT_MASK		(EVENT_STATE_ACTIVE | EVENT_STATE_SUSPENDED)
26 
27 /**
28  * @brief Suspend a device
29  *
30  * @note Asynchronous operations are not supported when in pre-kernel mode. In
31  * this case, the async flag will be always forced to be false, and so the
32  * function will be blocking.
33  *
34  * @funcprops \pre_kernel_ok
35  *
36  * @param dev Device instance.
37  * @param async Perform operation asynchronously.
38  * @param delay Period to delay the asynchronous operation.
39  *
40  * @retval 0 If device has been suspended or queued for suspend.
41  * @retval -EALREADY If device is already suspended (can only happen if get/put
42  * calls are unbalanced).
43  * @retval -EBUSY If the device is busy.
44  * @retval -errno Other negative errno, result of the action callback.
45  */
runtime_suspend(const struct device * dev,bool async,k_timeout_t delay)46 static int runtime_suspend(const struct device *dev, bool async,
47 			k_timeout_t delay)
48 {
49 	int ret = 0;
50 	struct pm_device *pm = dev->pm;
51 
52 	/*
53 	 * Early return if device runtime is not enabled.
54 	 */
55 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
56 		return 0;
57 	}
58 
59 	if (k_is_pre_kernel()) {
60 		async = false;
61 	} else {
62 		ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
63 		if (ret < 0) {
64 			return -EBUSY;
65 		}
66 	}
67 
68 	if (pm->base.usage == 0U) {
69 		LOG_WRN("Unbalanced suspend");
70 		ret = -EALREADY;
71 		goto unlock;
72 	}
73 
74 	pm->base.usage--;
75 	if (pm->base.usage > 0U) {
76 		goto unlock;
77 	}
78 
79 	if (async) {
80 		/* queue suspend */
81 		pm->base.state = PM_DEVICE_STATE_SUSPENDING;
82 		(void)k_work_schedule(&pm->work, delay);
83 	} else {
84 		/* suspend now */
85 		ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
86 		if (ret < 0) {
87 			pm->base.usage++;
88 			goto unlock;
89 		}
90 
91 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
92 	}
93 
94 unlock:
95 	if (!k_is_pre_kernel()) {
96 		k_sem_give(&pm->lock);
97 	}
98 
99 	return ret;
100 }
101 
runtime_suspend_work(struct k_work * work)102 static void runtime_suspend_work(struct k_work *work)
103 {
104 	int ret;
105 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
106 	struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work);
107 
108 	ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
109 
110 	(void)k_sem_take(&pm->lock, K_FOREVER);
111 	if (ret < 0) {
112 		pm->base.usage++;
113 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
114 	} else {
115 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
116 	}
117 	k_event_set(&pm->event, BIT(pm->base.state));
118 	k_sem_give(&pm->lock);
119 
120 	/*
121 	 * On async put, we have to suspend the domain when the device
122 	 * finishes its operation
123 	 */
124 	if ((ret == 0) &&
125 	    atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
126 		(void)pm_device_runtime_put(PM_DOMAIN(&pm->base));
127 	}
128 
129 	__ASSERT(ret == 0, "Could not suspend device (%d)", ret);
130 }
131 
get_sync_locked(const struct device * dev)132 static int get_sync_locked(const struct device *dev)
133 {
134 	int ret;
135 	struct pm_device_isr *pm = dev->pm_isr;
136 	uint32_t flags = pm->base.flags;
137 
138 	if (pm->base.usage == 0) {
139 		if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
140 			const struct device *domain = PM_DOMAIN(&pm->base);
141 
142 			if (domain->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE)) {
143 				ret = pm_device_runtime_get(domain);
144 				if (ret < 0) {
145 					return ret;
146 				}
147 			} else {
148 				return -EWOULDBLOCK;
149 			}
150 		}
151 
152 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
153 		if (ret < 0) {
154 			return ret;
155 		}
156 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
157 	} else {
158 		ret = 0;
159 	}
160 
161 	pm->base.usage++;
162 
163 	return ret;
164 }
165 
pm_device_runtime_get(const struct device * dev)166 int pm_device_runtime_get(const struct device *dev)
167 {
168 	int ret = 0;
169 	struct pm_device *pm = dev->pm;
170 
171 	if (pm == NULL) {
172 		return 0;
173 	}
174 
175 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
176 
177 	/*
178 	 * Early return if device runtime is not enabled.
179 	 */
180 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
181 		return 0;
182 	}
183 
184 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
185 		struct pm_device_isr *pm_sync = dev->pm_isr;
186 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
187 
188 		ret = get_sync_locked(dev);
189 		k_spin_unlock(&pm_sync->lock, k);
190 		goto end;
191 	}
192 
193 	if (!k_is_pre_kernel()) {
194 		ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
195 		if (ret < 0) {
196 			return -EWOULDBLOCK;
197 		}
198 	}
199 
200 	if (k_is_in_isr() && (pm->base.state == PM_DEVICE_STATE_SUSPENDING)) {
201 		ret = -EWOULDBLOCK;
202 		goto unlock;
203 	}
204 
205 	/*
206 	 * If the device is under a power domain, the domain has to be get
207 	 * first.
208 	 */
209 	const struct device *domain = PM_DOMAIN(&pm->base);
210 
211 	if (domain != NULL) {
212 		ret = pm_device_runtime_get(domain);
213 		if (ret != 0) {
214 			goto unlock;
215 		}
216 		/* Check if powering up this device failed */
217 		if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) {
218 			(void)pm_device_runtime_put(domain);
219 			ret = -EAGAIN;
220 			goto unlock;
221 		}
222 		/* Power domain successfully claimed */
223 		atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED);
224 	}
225 
226 	pm->base.usage++;
227 
228 	/*
229 	 * Check if the device has a pending suspend operation (not started
230 	 * yet) and cancel it. This way we avoid unnecessary operations because
231 	 * the device is actually active.
232 	 */
233 	if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
234 		((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
235 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
236 		goto unlock;
237 	}
238 
239 	if (!k_is_pre_kernel()) {
240 		/*
241 		 * If the device is already suspending there is
242 		 * nothing else we can do but wait until it finishes.
243 		 */
244 		while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
245 			k_event_clear(&pm->event, EVENT_MASK);
246 			k_sem_give(&pm->lock);
247 
248 			k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER);
249 
250 			(void)k_sem_take(&pm->lock, K_FOREVER);
251 		}
252 	}
253 
254 	if (pm->base.usage > 1U) {
255 		goto unlock;
256 	}
257 
258 	ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
259 	if (ret < 0) {
260 		pm->base.usage--;
261 		goto unlock;
262 	}
263 
264 	pm->base.state = PM_DEVICE_STATE_ACTIVE;
265 
266 unlock:
267 	if (!k_is_pre_kernel()) {
268 		k_sem_give(&pm->lock);
269 	}
270 
271 end:
272 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
273 
274 	return ret;
275 }
276 
277 
put_sync_locked(const struct device * dev)278 static int put_sync_locked(const struct device *dev)
279 {
280 	int ret;
281 	struct pm_device_isr *pm = dev->pm_isr;
282 	uint32_t flags = pm->base.flags;
283 
284 	if (!(flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) {
285 		return 0;
286 	}
287 
288 	if (pm->base.usage == 0U) {
289 		return -EALREADY;
290 	}
291 
292 	pm->base.usage--;
293 	if (pm->base.usage == 0U) {
294 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
295 		if (ret < 0) {
296 			return ret;
297 		}
298 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
299 
300 		if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
301 			const struct device *domain = PM_DOMAIN(&pm->base);
302 
303 			if (domain->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE)) {
304 				ret = put_sync_locked(domain);
305 			} else {
306 				ret = -EWOULDBLOCK;
307 			}
308 		}
309 	} else {
310 		ret = 0;
311 	}
312 
313 	return ret;
314 }
315 
pm_device_runtime_put(const struct device * dev)316 int pm_device_runtime_put(const struct device *dev)
317 {
318 	int ret;
319 
320 	if (dev->pm_base == NULL) {
321 		return 0;
322 	}
323 
324 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev);
325 
326 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
327 		struct pm_device_isr *pm_sync = dev->pm_isr;
328 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
329 
330 		ret = put_sync_locked(dev);
331 
332 		k_spin_unlock(&pm_sync->lock, k);
333 	} else {
334 		ret = runtime_suspend(dev, false, K_NO_WAIT);
335 
336 		/*
337 		 * Now put the domain
338 		 */
339 		if ((ret == 0) &&
340 		    atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
341 			ret = pm_device_runtime_put(PM_DOMAIN(dev->pm_base));
342 		}
343 	}
344 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret);
345 
346 	return ret;
347 }
348 
pm_device_runtime_put_async(const struct device * dev,k_timeout_t delay)349 int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay)
350 {
351 	int ret;
352 
353 	if (dev->pm_base == NULL) {
354 		return 0;
355 	}
356 
357 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev, delay);
358 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
359 		struct pm_device_isr *pm_sync = dev->pm_isr;
360 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
361 
362 		ret = put_sync_locked(dev);
363 
364 		k_spin_unlock(&pm_sync->lock, k);
365 	} else {
366 		ret = runtime_suspend(dev, true, delay);
367 	}
368 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, delay, ret);
369 
370 	return ret;
371 }
372 
373 __boot_func
pm_device_runtime_auto_enable(const struct device * dev)374 int pm_device_runtime_auto_enable(const struct device *dev)
375 {
376 	struct pm_device_base *pm = dev->pm_base;
377 
378 	/* No action needed if PM_DEVICE_FLAG_RUNTIME_AUTO is not enabled */
379 	if (!pm || !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) {
380 		return 0;
381 	}
382 	return pm_device_runtime_enable(dev);
383 }
384 
runtime_enable_sync(const struct device * dev)385 static int runtime_enable_sync(const struct device *dev)
386 {
387 	int ret;
388 	struct pm_device_isr *pm = dev->pm_isr;
389 	k_spinlock_key_t k = k_spin_lock(&pm->lock);
390 
391 	if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
392 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
393 		if (ret < 0) {
394 			goto unlock;
395 		}
396 
397 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
398 	} else {
399 		ret = 0;
400 	}
401 
402 	pm->base.flags |= BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
403 	pm->base.usage = 0U;
404 unlock:
405 	k_spin_unlock(&pm->lock, k);
406 	return ret;
407 }
408 
pm_device_runtime_enable(const struct device * dev)409 int pm_device_runtime_enable(const struct device *dev)
410 {
411 	int ret = 0;
412 	struct pm_device *pm = dev->pm;
413 
414 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_enable, dev);
415 
416 	if (pm == NULL) {
417 		ret = -ENOTSUP;
418 		goto end;
419 	}
420 
421 	if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
422 		goto end;
423 	}
424 
425 	if (pm_device_is_busy(dev)) {
426 		ret = -EBUSY;
427 		goto end;
428 	}
429 
430 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
431 		ret = runtime_enable_sync(dev);
432 		goto end;
433 	}
434 
435 	if (!k_is_pre_kernel()) {
436 		(void)k_sem_take(&pm->lock, K_FOREVER);
437 	}
438 
439 	/* lazy init of PM fields */
440 	if (pm->dev == NULL) {
441 		pm->dev = dev;
442 		k_work_init_delayable(&pm->work, runtime_suspend_work);
443 	}
444 
445 	if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
446 		ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
447 		if (ret < 0) {
448 			goto unlock;
449 		}
450 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
451 	}
452 
453 	pm->base.usage = 0U;
454 
455 	atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
456 
457 unlock:
458 	if (!k_is_pre_kernel()) {
459 		k_sem_give(&pm->lock);
460 	}
461 
462 end:
463 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_enable, dev, ret);
464 	return ret;
465 }
466 
runtime_disable_sync(const struct device * dev)467 static int runtime_disable_sync(const struct device *dev)
468 {
469 	struct pm_device_isr *pm = dev->pm_isr;
470 	int ret;
471 	k_spinlock_key_t k = k_spin_lock(&pm->lock);
472 
473 	if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
474 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
475 		if (ret < 0) {
476 			goto unlock;
477 		}
478 
479 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
480 	} else {
481 		ret = 0;
482 	}
483 
484 	pm->base.flags &= ~BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
485 unlock:
486 	k_spin_unlock(&pm->lock, k);
487 	return ret;
488 }
489 
pm_device_runtime_disable(const struct device * dev)490 int pm_device_runtime_disable(const struct device *dev)
491 {
492 	int ret = 0;
493 	struct pm_device *pm = dev->pm;
494 
495 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
496 
497 	if (pm == NULL) {
498 		ret = -ENOTSUP;
499 		goto end;
500 	}
501 
502 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
503 		goto end;
504 	}
505 
506 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
507 		ret = runtime_disable_sync(dev);
508 		goto end;
509 	}
510 
511 	if (!k_is_pre_kernel()) {
512 		(void)k_sem_take(&pm->lock, K_FOREVER);
513 	}
514 
515 	if (!k_is_pre_kernel()) {
516 		if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
517 			((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
518 			pm->base.state = PM_DEVICE_STATE_ACTIVE;
519 			goto clear_bit;
520 		}
521 
522 		/* wait until possible async suspend is completed */
523 		while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
524 			k_event_clear(&pm->event, EVENT_MASK);
525 			k_sem_give(&pm->lock);
526 
527 			k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER);
528 
529 			(void)k_sem_take(&pm->lock, K_FOREVER);
530 		}
531 	}
532 
533 	/* wake up the device if suspended */
534 	if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
535 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
536 		if (ret < 0) {
537 			goto unlock;
538 		}
539 
540 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
541 	}
542 
543 clear_bit:
544 	atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
545 
546 unlock:
547 	if (!k_is_pre_kernel()) {
548 		k_sem_give(&pm->lock);
549 	}
550 
551 end:
552 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);
553 
554 	return ret;
555 }
556 
pm_device_runtime_is_enabled(const struct device * dev)557 bool pm_device_runtime_is_enabled(const struct device *dev)
558 {
559 	struct pm_device_base *pm = dev->pm_base;
560 
561 	return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
562 }
563 
pm_device_runtime_usage(const struct device * dev)564 int pm_device_runtime_usage(const struct device *dev)
565 {
566 	if (!pm_device_runtime_is_enabled(dev)) {
567 		return -ENOTSUP;
568 	}
569 
570 	return dev->pm_base->usage;
571 }
572