Lines Matching full:pm

8 #include <zephyr/pm/device.h>
9 #include <zephyr/pm/device_runtime.h>
50 struct pm_device *pm = dev->pm; in runtime_suspend() local
55 if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { in runtime_suspend()
62 ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER); in runtime_suspend()
68 if (pm->base.usage == 0U) { in runtime_suspend()
74 pm->base.usage--; in runtime_suspend()
75 if (pm->base.usage > 0U) { in runtime_suspend()
81 pm->base.state = PM_DEVICE_STATE_SUSPENDING; in runtime_suspend()
82 (void)k_work_schedule(&pm->work, delay); in runtime_suspend()
85 ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); in runtime_suspend()
87 pm->base.usage++; in runtime_suspend()
91 pm->base.state = PM_DEVICE_STATE_SUSPENDED; in runtime_suspend()
96 k_sem_give(&pm->lock); in runtime_suspend()
106 struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work); in runtime_suspend_work() local
108 ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); in runtime_suspend_work()
110 (void)k_sem_take(&pm->lock, K_FOREVER); in runtime_suspend_work()
112 pm->base.usage++; in runtime_suspend_work()
113 pm->base.state = PM_DEVICE_STATE_ACTIVE; in runtime_suspend_work()
115 pm->base.state = PM_DEVICE_STATE_SUSPENDED; in runtime_suspend_work()
117 k_event_set(&pm->event, BIT(pm->base.state)); in runtime_suspend_work()
118 k_sem_give(&pm->lock); in runtime_suspend_work()
125 atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED)) { in runtime_suspend_work()
126 (void)pm_device_runtime_put(PM_DOMAIN(&pm->base)); in runtime_suspend_work()
135 struct pm_device_isr *pm = dev->pm_isr; in get_sync_locked() local
136 uint32_t flags = pm->base.flags; in get_sync_locked()
138 if (pm->base.usage == 0) { in get_sync_locked()
140 const struct device *domain = PM_DOMAIN(&pm->base); in get_sync_locked()
152 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); in get_sync_locked()
156 pm->base.state = PM_DEVICE_STATE_ACTIVE; in get_sync_locked()
161 pm->base.usage++; in get_sync_locked()
169 struct pm_device *pm = dev->pm; in pm_device_runtime_get() local
171 if (pm == NULL) { in pm_device_runtime_get()
175 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev); in pm_device_runtime_get()
180 if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { in pm_device_runtime_get()
194 ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER); in pm_device_runtime_get()
200 if (k_is_in_isr() && (pm->base.state == PM_DEVICE_STATE_SUSPENDING)) { in pm_device_runtime_get()
209 const struct device *domain = PM_DOMAIN(&pm->base); in pm_device_runtime_get()
217 if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) { in pm_device_runtime_get()
223 atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED); in pm_device_runtime_get()
226 pm->base.usage++; in pm_device_runtime_get()
233 if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) && in pm_device_runtime_get()
234 ((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) { in pm_device_runtime_get()
235 pm->base.state = PM_DEVICE_STATE_ACTIVE; in pm_device_runtime_get()
244 while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) { in pm_device_runtime_get()
245 k_event_clear(&pm->event, EVENT_MASK); in pm_device_runtime_get()
246 k_sem_give(&pm->lock); in pm_device_runtime_get()
248 k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER); in pm_device_runtime_get()
250 (void)k_sem_take(&pm->lock, K_FOREVER); in pm_device_runtime_get()
254 if (pm->base.usage > 1U) { in pm_device_runtime_get()
258 ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_RESUME); in pm_device_runtime_get()
260 pm->base.usage--; in pm_device_runtime_get()
264 pm->base.state = PM_DEVICE_STATE_ACTIVE; in pm_device_runtime_get()
268 k_sem_give(&pm->lock); in pm_device_runtime_get()
272 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret); in pm_device_runtime_get()
281 struct pm_device_isr *pm = dev->pm_isr; in put_sync_locked() local
282 uint32_t flags = pm->base.flags; in put_sync_locked()
288 if (pm->base.usage == 0U) { in put_sync_locked()
292 pm->base.usage--; in put_sync_locked()
293 if (pm->base.usage == 0U) { in put_sync_locked()
294 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND); in put_sync_locked()
298 pm->base.state = PM_DEVICE_STATE_SUSPENDED; in put_sync_locked()
301 const struct device *domain = PM_DOMAIN(&pm->base); in put_sync_locked()
324 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev); in pm_device_runtime_put()
344 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret); in pm_device_runtime_put()
357 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev, delay); in pm_device_runtime_put_async()
368 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, delay, ret); in pm_device_runtime_put_async()
376 struct pm_device_base *pm = dev->pm_base; in pm_device_runtime_auto_enable() local
379 if (!pm || !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) { in pm_device_runtime_auto_enable()
388 struct pm_device_isr *pm = dev->pm_isr; in runtime_enable_sync() local
389 k_spinlock_key_t k = k_spin_lock(&pm->lock); in runtime_enable_sync()
391 if (pm->base.state == PM_DEVICE_STATE_ACTIVE) { in runtime_enable_sync()
392 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND); in runtime_enable_sync()
397 pm->base.state = PM_DEVICE_STATE_SUSPENDED; in runtime_enable_sync()
402 pm->base.flags |= BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED); in runtime_enable_sync()
403 pm->base.usage = 0U; in runtime_enable_sync()
405 k_spin_unlock(&pm->lock, k); in runtime_enable_sync()
412 struct pm_device *pm = dev->pm; in pm_device_runtime_enable() local
414 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_enable, dev); in pm_device_runtime_enable()
416 if (pm == NULL) { in pm_device_runtime_enable()
421 if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { in pm_device_runtime_enable()
436 (void)k_sem_take(&pm->lock, K_FOREVER); in pm_device_runtime_enable()
439 /* lazy init of PM fields */ in pm_device_runtime_enable()
440 if (pm->dev == NULL) { in pm_device_runtime_enable()
441 pm->dev = dev; in pm_device_runtime_enable()
442 k_work_init_delayable(&pm->work, runtime_suspend_work); in pm_device_runtime_enable()
445 if (pm->base.state == PM_DEVICE_STATE_ACTIVE) { in pm_device_runtime_enable()
446 ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); in pm_device_runtime_enable()
450 pm->base.state = PM_DEVICE_STATE_SUSPENDED; in pm_device_runtime_enable()
453 pm->base.usage = 0U; in pm_device_runtime_enable()
455 atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); in pm_device_runtime_enable()
459 k_sem_give(&pm->lock); in pm_device_runtime_enable()
463 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_enable, dev, ret); in pm_device_runtime_enable()
469 struct pm_device_isr *pm = dev->pm_isr; in runtime_disable_sync() local
471 k_spinlock_key_t k = k_spin_lock(&pm->lock); in runtime_disable_sync()
473 if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) { in runtime_disable_sync()
474 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); in runtime_disable_sync()
479 pm->base.state = PM_DEVICE_STATE_ACTIVE; in runtime_disable_sync()
484 pm->base.flags &= ~BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED); in runtime_disable_sync()
486 k_spin_unlock(&pm->lock, k); in runtime_disable_sync()
493 struct pm_device *pm = dev->pm; in pm_device_runtime_disable() local
495 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev); in pm_device_runtime_disable()
497 if (pm == NULL) { in pm_device_runtime_disable()
502 if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { in pm_device_runtime_disable()
512 (void)k_sem_take(&pm->lock, K_FOREVER); in pm_device_runtime_disable()
516 if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) && in pm_device_runtime_disable()
517 ((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) { in pm_device_runtime_disable()
518 pm->base.state = PM_DEVICE_STATE_ACTIVE; in pm_device_runtime_disable()
523 while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) { in pm_device_runtime_disable()
524 k_event_clear(&pm->event, EVENT_MASK); in pm_device_runtime_disable()
525 k_sem_give(&pm->lock); in pm_device_runtime_disable()
527 k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER); in pm_device_runtime_disable()
529 (void)k_sem_take(&pm->lock, K_FOREVER); in pm_device_runtime_disable()
534 if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) { in pm_device_runtime_disable()
535 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); in pm_device_runtime_disable()
540 pm->base.state = PM_DEVICE_STATE_ACTIVE; in pm_device_runtime_disable()
544 atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); in pm_device_runtime_disable()
548 k_sem_give(&pm->lock); in pm_device_runtime_disable()
552 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret); in pm_device_runtime_disable()
559 struct pm_device_base *pm = dev->pm_base; in pm_device_runtime_is_enabled() local
561 return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); in pm_device_runtime_is_enabled()