1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr.h>
8 #include <kernel.h>
9 #include <device.h>
10 #include <sys/__assert.h>
11 #include <pm/device_runtime.h>
12 #include <spinlock.h>
13
14 #define LOG_LEVEL CONFIG_PM_LOG_LEVEL /* From power module Kconfig */
15 #include <logging/log.h>
16 LOG_MODULE_DECLARE(power);
17
18 /* Device PM request type */
19 #define PM_DEVICE_SYNC BIT(0)
20 #define PM_DEVICE_ASYNC BIT(1)
21
pm_device_runtime_state_set(struct pm_device * pm)22 static void pm_device_runtime_state_set(struct pm_device *pm)
23 {
24 const struct device *dev = pm->dev;
25 int ret = 0;
26
27 /* Clear transitioning flags */
28 atomic_clear_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
29
30 switch (dev->pm->state) {
31 case PM_DEVICE_STATE_ACTIVE:
32 if ((dev->pm->usage == 0) && dev->pm->enable) {
33 ret = pm_device_state_set(dev, PM_DEVICE_STATE_SUSPENDED);
34 }
35 break;
36 case PM_DEVICE_STATE_SUSPENDED:
37 if ((dev->pm->usage > 0) || !dev->pm->enable) {
38 ret = pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE);
39 }
40 break;
41 default:
42 LOG_ERR("Invalid state!!\n");
43 }
44
45 __ASSERT(ret == 0, "Set Power state error");
46
47 /*
48 * This function returns the number of woken threads on success. There
49 * is nothing we can do with this information. Just ignoring it.
50 */
51 (void)k_condvar_broadcast(&dev->pm->condvar);
52 }
53
pm_work_handler(struct k_work * work)54 static void pm_work_handler(struct k_work *work)
55 {
56 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
57 struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work);
58
59 (void)k_mutex_lock(&pm->lock, K_FOREVER);
60 pm_device_runtime_state_set(pm);
61 (void)k_mutex_unlock(&pm->lock);
62 }
63
pm_device_request(const struct device * dev,enum pm_device_state state,uint32_t pm_flags)64 static int pm_device_request(const struct device *dev,
65 enum pm_device_state state, uint32_t pm_flags)
66 {
67 int ret = 0;
68
69 SYS_PORT_TRACING_FUNC_ENTER(pm, device_request, dev, state);
70
71 __ASSERT((state == PM_DEVICE_STATE_ACTIVE) ||
72 (state == PM_DEVICE_STATE_SUSPENDED),
73 "Invalid device PM state requested");
74
75 if (k_is_pre_kernel()) {
76 if (state == PM_DEVICE_STATE_ACTIVE) {
77 dev->pm->usage++;
78 } else {
79 dev->pm->usage--;
80 }
81
82 /* If we are being called before the kernel was initialized
83 * we can assume that the system took care of initialized
84 * devices properly. It means that all dependencies were
85 * satisfied and this call just incremented the reference count
86 * for this device.
87 */
88
89 /* Unfortunately this is not what is happening yet. There are
90 * cases, for example, like the pinmux being initialized before
91 * the gpio. Lets just power on/off the device.
92 */
93 if (dev->pm->usage == 1) {
94 (void)pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE);
95 } else if (dev->pm->usage == 0) {
96 (void)pm_device_state_set(dev, PM_DEVICE_STATE_SUSPENDED);
97 }
98 goto out;
99 }
100
101 (void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
102
103 if (!dev->pm->enable) {
104 ret = -ENOTSUP;
105 goto out_unlock;
106 }
107
108 if (state == PM_DEVICE_STATE_ACTIVE) {
109 dev->pm->usage++;
110 if (dev->pm->usage > 1) {
111 goto out_unlock;
112 }
113 } else {
114 /* Check if it is already 0 to avoid an underflow */
115 if (dev->pm->usage == 0) {
116 goto out_unlock;
117 }
118
119 dev->pm->usage--;
120 if (dev->pm->usage > 0) {
121 goto out_unlock;
122 }
123 }
124
125
126 /* Return in case of Async request */
127 if (pm_flags & PM_DEVICE_ASYNC) {
128 atomic_set_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
129 (void)k_work_schedule(&dev->pm->work, K_NO_WAIT);
130 goto out_unlock;
131 }
132
133 while ((k_work_delayable_is_pending(&dev->pm->work)) ||
134 atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
135 ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
136 K_FOREVER);
137 if (ret != 0) {
138 break;
139 }
140 }
141
142 pm_device_runtime_state_set(dev->pm);
143
144 /*
145 * dev->pm->state was set in pm_device_runtime_state_set(). As the
146 * device may not have been properly changed to the state or
147 * another thread we check it here before returning.
148 */
149 ret = state == dev->pm->state ? 0 : -EIO;
150
151 out_unlock:
152 (void)k_mutex_unlock(&dev->pm->lock);
153 out:
154 SYS_PORT_TRACING_FUNC_EXIT(pm, device_request, dev, ret);
155 return ret;
156 }
157
pm_device_get(const struct device * dev)158 int pm_device_get(const struct device *dev)
159 {
160 return pm_device_request(dev, PM_DEVICE_STATE_ACTIVE, 0);
161 }
162
pm_device_get_async(const struct device * dev)163 int pm_device_get_async(const struct device *dev)
164 {
165 return pm_device_request(dev, PM_DEVICE_STATE_ACTIVE, PM_DEVICE_ASYNC);
166 }
167
pm_device_put(const struct device * dev)168 int pm_device_put(const struct device *dev)
169 {
170 return pm_device_request(dev, PM_DEVICE_STATE_SUSPENDED, 0);
171 }
172
pm_device_put_async(const struct device * dev)173 int pm_device_put_async(const struct device *dev)
174 {
175 return pm_device_request(dev, PM_DEVICE_STATE_SUSPENDED, PM_DEVICE_ASYNC);
176 }
177
pm_device_enable(const struct device * dev)178 void pm_device_enable(const struct device *dev)
179 {
180 SYS_PORT_TRACING_FUNC_ENTER(pm, device_enable, dev);
181 if (k_is_pre_kernel()) {
182 dev->pm->dev = dev;
183 if (dev->pm_control != NULL) {
184 dev->pm->enable = true;
185 dev->pm->state = PM_DEVICE_STATE_SUSPENDED;
186 k_work_init_delayable(&dev->pm->work, pm_work_handler);
187 }
188 goto out;
189 }
190
191 (void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
192 if (dev->pm_control == NULL) {
193 dev->pm->enable = false;
194 goto out_unlock;
195 }
196
197 dev->pm->enable = true;
198
199 /* During the driver init, device can set the
200 * PM state accordingly. For later cases we need
201 * to check the usage and set the device PM state.
202 */
203 if (!dev->pm->dev) {
204 dev->pm->dev = dev;
205 dev->pm->state = PM_DEVICE_STATE_SUSPENDED;
206 k_work_init_delayable(&dev->pm->work, pm_work_handler);
207 } else {
208 k_work_schedule(&dev->pm->work, K_NO_WAIT);
209 }
210
211 out_unlock:
212 (void)k_mutex_unlock(&dev->pm->lock);
213 out:
214 SYS_PORT_TRACING_FUNC_EXIT(pm, device_enable, dev);
215 }
216
pm_device_disable(const struct device * dev)217 void pm_device_disable(const struct device *dev)
218 {
219 SYS_PORT_TRACING_FUNC_ENTER(pm, device_disable, dev);
220 __ASSERT(k_is_pre_kernel() == false, "Device should not be disabled "
221 "before kernel is initialized");
222
223 (void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
224 if (dev->pm->enable) {
225 dev->pm->enable = false;
226 /* Bring up the device before disabling the Idle PM */
227 k_work_schedule(&dev->pm->work, K_NO_WAIT);
228 }
229 (void)k_mutex_unlock(&dev->pm->lock);
230 SYS_PORT_TRACING_FUNC_EXIT(pm, device_disable, dev);
231 }
232
pm_device_wait(const struct device * dev,k_timeout_t timeout)233 int pm_device_wait(const struct device *dev, k_timeout_t timeout)
234 {
235 int ret = 0;
236
237 k_mutex_lock(&dev->pm->lock, K_FOREVER);
238 while ((k_work_delayable_is_pending(&dev->pm->work)) ||
239 atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
240 ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
241 timeout);
242 if (ret != 0) {
243 break;
244 }
245 }
246 k_mutex_unlock(&dev->pm->lock);
247
248 return ret;
249 }
250