1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  * Copyright (c) 2021 Nordic Semiconductor ASA.
4  * Copyright (c) 2025 HubbleNetwork.
5  * Copyright (c) 2025 NXP.
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/pm/device.h>
11 #include <zephyr/pm/device_runtime.h>
12 #include <zephyr/sys/__assert.h>
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
16 
17 #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN
18 #define PM_DOMAIN(_pm) \
19 	(_pm)->domain
20 #else
21 #define PM_DOMAIN(_pm) NULL
22 #endif
23 
24 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
25 #ifdef CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ
26 K_THREAD_STACK_DEFINE(pm_device_runtime_stack, CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_STACK_SIZE);
27 static struct k_work_q pm_device_runtime_wq;
28 #endif /* CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ */
29 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
30 
31 #define EVENT_STATE_ACTIVE	BIT(PM_DEVICE_STATE_ACTIVE)
32 #define EVENT_STATE_SUSPENDED	BIT(PM_DEVICE_STATE_SUSPENDED)
33 
34 #define EVENT_MASK		(EVENT_STATE_ACTIVE | EVENT_STATE_SUSPENDED)
35 
36 /**
37  * @brief Suspend a device
38  *
39  * @note Asynchronous operations are not supported when in pre-kernel mode. In
40  * this case, the async flag will be always forced to be false, and so the
41  * function will be blocking.
42  *
43  * @funcprops \pre_kernel_ok
44  *
45  * @param dev Device instance.
46  * @param async Perform operation asynchronously.
47  * @param delay Period to delay the asynchronous operation.
48  *
49  * @retval 0 If device has been suspended or queued for suspend.
50  * @retval -EALREADY If device is already suspended (can only happen if get/put
51  * calls are unbalanced).
52  * @retval -EBUSY If the device is busy.
53  * @retval -errno Other negative errno, result of the action callback.
54  */
runtime_suspend(const struct device * dev,bool async,k_timeout_t delay)55 static int runtime_suspend(const struct device *dev, bool async,
56 			k_timeout_t delay)
57 {
58 	int ret = 0;
59 	struct pm_device *pm = dev->pm;
60 
61 	/*
62 	 * Early return if device runtime is not enabled.
63 	 */
64 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
65 		return 0;
66 	}
67 
68 	if (k_is_pre_kernel()) {
69 		async = false;
70 	} else {
71 		ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
72 		if (ret < 0) {
73 			return -EBUSY;
74 		}
75 	}
76 
77 	if (pm->base.usage == 0U) {
78 		LOG_WRN("Unbalanced suspend");
79 		ret = -EALREADY;
80 		goto unlock;
81 	}
82 
83 	pm->base.usage--;
84 	if (pm->base.usage > 0U) {
85 		goto unlock;
86 	}
87 
88 	if (async) {
89 		/* queue suspend */
90 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
91 		pm->base.state = PM_DEVICE_STATE_SUSPENDING;
92 #ifdef CONFIG_PM_DEVICE_RUNTIME_USE_SYSTEM_WQ
93 		(void)k_work_schedule(&pm->work, delay);
94 #else
95 		(void)k_work_schedule_for_queue(&pm_device_runtime_wq, &pm->work, delay);
96 #endif /* CONFIG_PM_DEVICE_RUNTIME_USE_SYSTEM_WQ */
97 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
98 	} else {
99 		/* suspend now */
100 		ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
101 		if (ret < 0) {
102 			pm->base.usage++;
103 			goto unlock;
104 		}
105 
106 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
107 
108 		/* Now put the domain */
109 		if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
110 			(void)pm_device_runtime_put(PM_DOMAIN(dev->pm_base));
111 			atomic_clear_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED);
112 		}
113 	}
114 
115 unlock:
116 	if (!k_is_pre_kernel()) {
117 		k_sem_give(&pm->lock);
118 	}
119 
120 	return ret;
121 }
122 
123 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
runtime_suspend_work(struct k_work * work)124 static void runtime_suspend_work(struct k_work *work)
125 {
126 	int ret;
127 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
128 	struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work);
129 
130 	ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
131 
132 	(void)k_sem_take(&pm->lock, K_FOREVER);
133 	if (ret < 0) {
134 		pm->base.usage++;
135 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
136 	} else {
137 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
138 	}
139 	k_event_set(&pm->event, BIT(pm->base.state));
140 	k_sem_give(&pm->lock);
141 
142 	/*
143 	 * On async put, we have to suspend the domain when the device
144 	 * finishes its operation
145 	 */
146 	if ((ret == 0) &&
147 	    atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
148 		(void)pm_device_runtime_put(PM_DOMAIN(&pm->base));
149 		atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED);
150 	}
151 
152 	__ASSERT(ret == 0, "Could not suspend device (%d)", ret);
153 }
154 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
155 
get_sync_locked(const struct device * dev)156 static int get_sync_locked(const struct device *dev)
157 {
158 	int ret;
159 	struct pm_device_isr *pm = dev->pm_isr;
160 	uint32_t flags = pm->base.flags;
161 
162 	if (pm->base.usage == 0) {
163 		if ((flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) == 0) {
164 			const struct device *domain = PM_DOMAIN(&pm->base);
165 
166 			if (domain != NULL) {
167 				if ((domain->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE)) != 0) {
168 					ret = pm_device_runtime_get(domain);
169 					if (ret < 0) {
170 						return ret;
171 					}
172 					/* Power domain successfully claimed */
173 					pm->base.flags |= BIT(PM_DEVICE_FLAG_PD_CLAIMED);
174 				} else {
175 					return -EWOULDBLOCK;
176 				}
177 			}
178 		}
179 
180 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
181 		if (ret < 0) {
182 			return ret;
183 		}
184 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
185 	} else {
186 		ret = 0;
187 	}
188 
189 	pm->base.usage++;
190 
191 	return ret;
192 }
193 
pm_device_runtime_get(const struct device * dev)194 int pm_device_runtime_get(const struct device *dev)
195 {
196 	int ret = 0;
197 	struct pm_device *pm = dev->pm;
198 
199 	if (pm == NULL) {
200 		return 0;
201 	}
202 
203 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
204 
205 	/*
206 	 * Early return if device runtime is not enabled.
207 	 */
208 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
209 		return 0;
210 	}
211 
212 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
213 		struct pm_device_isr *pm_sync = dev->pm_isr;
214 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
215 
216 		ret = get_sync_locked(dev);
217 		k_spin_unlock(&pm_sync->lock, k);
218 		goto end;
219 	}
220 
221 	if (!k_is_pre_kernel()) {
222 		ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
223 		if (ret < 0) {
224 			return -EWOULDBLOCK;
225 		}
226 	}
227 
228 	if (k_is_in_isr() && (pm->base.state == PM_DEVICE_STATE_SUSPENDING)) {
229 		ret = -EWOULDBLOCK;
230 		goto unlock;
231 	}
232 
233 	/*
234 	 * If the device is under a power domain, the domain has to be get
235 	 * first.
236 	 */
237 	const struct device *domain = PM_DOMAIN(&pm->base);
238 
239 	if (domain != NULL && !atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
240 		ret = pm_device_runtime_get(domain);
241 		if (ret != 0) {
242 			goto unlock;
243 		}
244 		/* Check if powering up this device failed */
245 		if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) {
246 			(void)pm_device_runtime_put(domain);
247 			ret = -EAGAIN;
248 			goto unlock;
249 		}
250 		/* Power domain successfully claimed */
251 		atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED);
252 	}
253 
254 	pm->base.usage++;
255 
256 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
257 	/*
258 	 * Check if the device has a pending suspend operation (not started
259 	 * yet) and cancel it. This way we avoid unnecessary operations because
260 	 * the device is actually active.
261 	 */
262 	if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
263 		((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
264 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
265 		goto unlock;
266 	}
267 
268 	if (!k_is_pre_kernel()) {
269 		/*
270 		 * If the device is already suspending there is
271 		 * nothing else we can do but wait until it finishes.
272 		 */
273 		while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
274 			k_event_clear(&pm->event, EVENT_MASK);
275 			k_sem_give(&pm->lock);
276 
277 			k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER);
278 
279 			(void)k_sem_take(&pm->lock, K_FOREVER);
280 		}
281 	}
282 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
283 
284 	if (pm->base.usage > 1U) {
285 		goto unlock;
286 	}
287 
288 	ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
289 	if (ret < 0) {
290 		pm->base.usage--;
291 		if (domain != NULL) {
292 			(void)pm_device_runtime_put(domain);
293 		}
294 		goto unlock;
295 	}
296 
297 	pm->base.state = PM_DEVICE_STATE_ACTIVE;
298 
299 unlock:
300 	if (!k_is_pre_kernel()) {
301 		k_sem_give(&pm->lock);
302 	}
303 
304 end:
305 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
306 
307 	return ret;
308 }
309 
310 
put_sync_locked(const struct device * dev)311 static int put_sync_locked(const struct device *dev)
312 {
313 	int ret;
314 	struct pm_device_isr *pm = dev->pm_isr;
315 	uint32_t flags = pm->base.flags;
316 
317 	if (!(flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) {
318 		return 0;
319 	}
320 
321 	if (pm->base.usage == 0U) {
322 		return -EALREADY;
323 	}
324 
325 	pm->base.usage--;
326 	if (pm->base.usage == 0U) {
327 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
328 		if (ret < 0) {
329 			pm->base.usage++;
330 			return ret;
331 		}
332 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
333 
334 		if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
335 			const struct device *domain = PM_DOMAIN(&pm->base);
336 
337 			if (domain->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE)) {
338 				ret = put_sync_locked(domain);
339 				pm->base.flags &= ~BIT(PM_DEVICE_FLAG_PD_CLAIMED);
340 			} else {
341 				ret = -EWOULDBLOCK;
342 			}
343 		}
344 	} else {
345 		ret = 0;
346 	}
347 
348 	return ret;
349 }
350 
pm_device_runtime_put(const struct device * dev)351 int pm_device_runtime_put(const struct device *dev)
352 {
353 	int ret;
354 
355 	if (dev->pm_base == NULL) {
356 		return 0;
357 	}
358 
359 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev);
360 
361 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
362 		struct pm_device_isr *pm_sync = dev->pm_isr;
363 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
364 
365 		ret = put_sync_locked(dev);
366 
367 		k_spin_unlock(&pm_sync->lock, k);
368 	} else {
369 		ret = runtime_suspend(dev, false, K_NO_WAIT);
370 	}
371 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret);
372 
373 	return ret;
374 }
375 
pm_device_runtime_put_async(const struct device * dev,k_timeout_t delay)376 int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay)
377 {
378 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
379 	int ret;
380 
381 	if (dev->pm_base == NULL) {
382 		return 0;
383 	}
384 
385 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev, delay);
386 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
387 		struct pm_device_isr *pm_sync = dev->pm_isr;
388 		k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
389 
390 		ret = put_sync_locked(dev);
391 
392 		k_spin_unlock(&pm_sync->lock, k);
393 	} else {
394 		ret = runtime_suspend(dev, true, delay);
395 	}
396 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, delay, ret);
397 
398 	return ret;
399 #else
400 	LOG_WRN("Function not available");
401 	return -ENOSYS;
402 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
403 }
404 
405 __boot_func
pm_device_runtime_auto_enable(const struct device * dev)406 int pm_device_runtime_auto_enable(const struct device *dev)
407 {
408 	struct pm_device_base *pm = dev->pm_base;
409 
410 	if (!pm) {
411 		return 0;
412 	}
413 
414 	if (!IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME_DEFAULT_ENABLE) &&
415 	    !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) {
416 		return 0;
417 	}
418 
419 	return pm_device_runtime_enable(dev);
420 }
421 
runtime_enable_sync(const struct device * dev)422 static int runtime_enable_sync(const struct device *dev)
423 {
424 	int ret;
425 	struct pm_device_isr *pm = dev->pm_isr;
426 	k_spinlock_key_t k = k_spin_lock(&pm->lock);
427 
428 	if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
429 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
430 		if (ret < 0) {
431 			goto unlock;
432 		}
433 
434 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
435 	} else {
436 		ret = 0;
437 	}
438 
439 	pm->base.flags |= BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
440 	pm->base.usage = 0U;
441 unlock:
442 	k_spin_unlock(&pm->lock, k);
443 	return ret;
444 }
445 
pm_device_runtime_enable(const struct device * dev)446 int pm_device_runtime_enable(const struct device *dev)
447 {
448 	int ret = 0;
449 	struct pm_device *pm = dev->pm;
450 
451 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_enable, dev);
452 
453 	if (pm == NULL) {
454 		ret = -ENOTSUP;
455 		goto end;
456 	}
457 
458 	if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
459 		goto end;
460 	}
461 
462 	if (pm_device_is_busy(dev)) {
463 		ret = -EBUSY;
464 		goto end;
465 	}
466 
467 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
468 		ret = runtime_enable_sync(dev);
469 		goto end;
470 	}
471 
472 	if (!k_is_pre_kernel()) {
473 		(void)k_sem_take(&pm->lock, K_FOREVER);
474 	}
475 
476 	/* lazy init of PM fields */
477 	if (pm->dev == NULL) {
478 		pm->dev = dev;
479 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
480 		k_work_init_delayable(&pm->work, runtime_suspend_work);
481 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
482 	}
483 
484 	if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
485 		ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
486 		if (ret < 0) {
487 			goto unlock;
488 		}
489 		pm->base.state = PM_DEVICE_STATE_SUSPENDED;
490 	}
491 
492 	pm->base.usage = 0U;
493 
494 	atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
495 
496 unlock:
497 	if (!k_is_pre_kernel()) {
498 		k_sem_give(&pm->lock);
499 	}
500 
501 end:
502 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_enable, dev, ret);
503 	return ret;
504 }
505 
runtime_disable_sync(const struct device * dev)506 static int runtime_disable_sync(const struct device *dev)
507 {
508 	struct pm_device_isr *pm = dev->pm_isr;
509 	int ret;
510 	k_spinlock_key_t k = k_spin_lock(&pm->lock);
511 
512 	if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
513 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
514 		if (ret < 0) {
515 			goto unlock;
516 		}
517 
518 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
519 	} else {
520 		ret = 0;
521 	}
522 
523 	pm->base.flags &= ~BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
524 unlock:
525 	k_spin_unlock(&pm->lock, k);
526 	return ret;
527 }
528 
pm_device_runtime_disable(const struct device * dev)529 int pm_device_runtime_disable(const struct device *dev)
530 {
531 	int ret = 0;
532 	struct pm_device *pm = dev->pm;
533 
534 	SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
535 
536 	if (pm == NULL) {
537 		ret = -ENOTSUP;
538 		goto end;
539 	}
540 
541 	if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
542 		goto end;
543 	}
544 
545 	if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
546 		ret = runtime_disable_sync(dev);
547 		goto end;
548 	}
549 
550 	if (!k_is_pre_kernel()) {
551 		(void)k_sem_take(&pm->lock, K_FOREVER);
552 	}
553 
554 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
555 	if (!k_is_pre_kernel()) {
556 		if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
557 			((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
558 			pm->base.state = PM_DEVICE_STATE_ACTIVE;
559 			goto clear_bit;
560 		}
561 
562 		/* wait until possible async suspend is completed */
563 		while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
564 			k_event_clear(&pm->event, EVENT_MASK);
565 			k_sem_give(&pm->lock);
566 
567 			k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER);
568 
569 			(void)k_sem_take(&pm->lock, K_FOREVER);
570 		}
571 	}
572 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
573 
574 	/* wake up the device if suspended */
575 	if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
576 		ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
577 		if (ret < 0) {
578 			goto unlock;
579 		}
580 
581 		pm->base.state = PM_DEVICE_STATE_ACTIVE;
582 	}
583 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
584 clear_bit:
585 #endif
586 	atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
587 
588 unlock:
589 	if (!k_is_pre_kernel()) {
590 		k_sem_give(&pm->lock);
591 	}
592 
593 end:
594 	SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);
595 
596 	return ret;
597 }
598 
pm_device_runtime_is_enabled(const struct device * dev)599 bool pm_device_runtime_is_enabled(const struct device *dev)
600 {
601 	struct pm_device_base *pm = dev->pm_base;
602 
603 	return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
604 }
605 
pm_device_runtime_usage(const struct device * dev)606 int pm_device_runtime_usage(const struct device *dev)
607 {
608 	if (!pm_device_runtime_is_enabled(dev)) {
609 		return -ENOTSUP;
610 	}
611 
612 	return dev->pm_base->usage;
613 }
614 
615 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
616 #ifdef CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ
617 
pm_device_runtime_wq_init(void)618 static int pm_device_runtime_wq_init(void)
619 {
620 	const struct k_work_queue_config cfg = {.name = "PM DEVICE RUNTIME WQ"};
621 
622 	k_work_queue_init(&pm_device_runtime_wq);
623 
624 	k_work_queue_start(&pm_device_runtime_wq, pm_device_runtime_stack,
625 			   K_THREAD_STACK_SIZEOF(pm_device_runtime_stack),
626 			   CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_PRIO, &cfg);
627 
628 	return 0;
629 }
630 
631 SYS_INIT(pm_device_runtime_wq_init, POST_KERNEL,
632 	CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_INIT_PRIO);
633 
634 #endif /* CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ */
635 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
636