1 /*
2 * Copyright (c) 2024 Nordic Semiconductor ASA
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6 #include <errno.h>
7
8 #include <zephyr/init.h>
9 #include <zephyr/logging/log.h>
10 #include <zephyr/sys/atomic.h>
11 #include <zephyr/sys/onoff.h>
12 #include <zephyr/spinlock.h>
13 #include <zephyr/sys/util.h>
14
15 #include <hal/nrf_gpio.h>
16 #include <nrf/gpd.h>
17 #include <nrfs_gdpwr.h>
18 #include <nrfs_backend_ipc_service.h>
19
20 LOG_MODULE_REGISTER(gpd, CONFIG_SOC_LOG_LEVEL);
21
22 /* enforce alignment between DT<->nrfs */
23 BUILD_ASSERT(GDPWR_GD_FAST_ACTIVE_0 == NRF_GPD_FAST_ACTIVE0);
24 BUILD_ASSERT(GDPWR_GD_FAST_ACTIVE_1 == NRF_GPD_FAST_ACTIVE1);
25 BUILD_ASSERT(GDPWR_GD_FAST_MAIN == NRF_GPD_FAST_MAIN);
26 BUILD_ASSERT(GDPWR_GD_SLOW_ACTIVE == NRF_GPD_SLOW_ACTIVE);
27 BUILD_ASSERT(GDPWR_GD_SLOW_MAIN == NRF_GPD_SLOW_MAIN);
28
29 struct gpd_onoff_manager {
30 struct onoff_manager mgr;
31 onoff_notify_fn notify;
32 uint8_t id;
33 struct k_mutex lock;
34 struct k_sem sem;
35 int res;
36 };
37
38 static void start(struct onoff_manager *mgr, onoff_notify_fn notify);
39 static void stop(struct onoff_manager *mgr, onoff_notify_fn notify);
40
41 #define GPD_READY_TIMEOUT_MS 1000
42
43 #define GPD_SERVICE_READY BIT(0)
44 #define GPD_SERVICE_ERROR BIT(1)
45 #define GPD_SERVICE_REQ_OK BIT(2)
46 #define GPD_SERVICE_REQ_ERR BIT(3)
47 static atomic_t gpd_service_status = ATOMIC_INIT(0);
48
49 static struct gpd_onoff_manager fast_active0 = {
50 .id = NRF_GPD_FAST_ACTIVE0,
51 .lock = Z_MUTEX_INITIALIZER(fast_active0.lock),
52 .sem = Z_SEM_INITIALIZER(fast_active0.sem, 0, 1),
53 };
54 static struct gpd_onoff_manager fast_active1 = {
55 .id = NRF_GPD_FAST_ACTIVE1,
56 .lock = Z_MUTEX_INITIALIZER(fast_active1.lock),
57 .sem = Z_SEM_INITIALIZER(fast_active1.sem, 0, 1),
58 };
59 static struct gpd_onoff_manager fast_main = {
60 .id = NRF_GPD_FAST_MAIN,
61 .lock = Z_MUTEX_INITIALIZER(fast_main.lock),
62 .sem = Z_SEM_INITIALIZER(fast_main.sem, 0, 1),
63 };
64 static struct gpd_onoff_manager slow_active = {
65 .id = NRF_GPD_SLOW_ACTIVE,
66 .lock = Z_MUTEX_INITIALIZER(slow_active.lock),
67 .sem = Z_SEM_INITIALIZER(slow_active.sem, 0, 1),
68 };
69 static struct gpd_onoff_manager slow_main = {
70 .id = NRF_GPD_SLOW_MAIN,
71 .lock = Z_MUTEX_INITIALIZER(slow_main.lock),
72 .sem = Z_SEM_INITIALIZER(slow_main.sem, 0, 1),
73 };
74
75 static const struct onoff_transitions transitions =
76 ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL);
77
get_mgr(uint8_t id)78 static struct gpd_onoff_manager *get_mgr(uint8_t id)
79 {
80 switch (id) {
81 case NRF_GPD_FAST_ACTIVE0:
82 return &fast_active0;
83 case NRF_GPD_FAST_ACTIVE1:
84 return &fast_active1;
85 case NRF_GPD_FAST_MAIN:
86 return &fast_main;
87 case NRF_GPD_SLOW_ACTIVE:
88 return &slow_active;
89 case NRF_GPD_SLOW_MAIN:
90 return &slow_main;
91 default:
92 return NULL;
93 }
94 }
95
request_cb(struct onoff_manager * mgr_,struct onoff_client * cli,uint32_t state,int res)96 static void request_cb(struct onoff_manager *mgr_, struct onoff_client *cli, uint32_t state,
97 int res)
98 {
99 ARG_UNUSED(cli);
100 ARG_UNUSED(state);
101
102 struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr_, struct gpd_onoff_manager, mgr);
103
104 gpd_mgr->res = res;
105 k_sem_give(&gpd_mgr->sem);
106 }
107
nrf_gpd_sync(struct gpd_onoff_manager * gpd_mgr)108 static int nrf_gpd_sync(struct gpd_onoff_manager *gpd_mgr)
109 {
110 int64_t start;
111 nrfs_err_t err;
112 k_spinlock_key_t key;
113 gdpwr_request_type_t request;
114
115 key = k_spin_lock(&gpd_mgr->mgr.lock);
116
117 if (gpd_mgr->mgr.refs == 0) {
118 request = GDPWR_POWER_REQUEST_CLEAR;
119 } else {
120 request = GDPWR_POWER_REQUEST_SET;
121 }
122
123 k_spin_unlock(&gpd_mgr->mgr.lock, key);
124
125 atomic_clear_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR);
126 atomic_clear_bit(&gpd_service_status, GPD_SERVICE_REQ_OK);
127
128 err = nrfs_gdpwr_power_request(gpd_mgr->id, request, gpd_mgr);
129 if (err != NRFS_SUCCESS) {
130 return -EIO;
131 }
132
133 start = k_uptime_get();
134 while (k_uptime_get() - start < GPD_READY_TIMEOUT_MS) {
135 if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR)) {
136 return -EIO;
137 }
138
139 if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_REQ_OK)) {
140 return 0;
141 }
142 }
143
144 LOG_ERR("nRFs GDPWR request timed out");
145
146 return -ETIMEDOUT;
147 }
148
evt_handler(nrfs_gdpwr_evt_t const * p_evt,void * context)149 static void evt_handler(nrfs_gdpwr_evt_t const *p_evt, void *context)
150 {
151 if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) {
152 struct gpd_onoff_manager *gpd_mgr = context;
153
154 switch (p_evt->type) {
155 case NRFS_GDPWR_REQ_APPLIED:
156 gpd_mgr->notify(&gpd_mgr->mgr, 0);
157 break;
158 default:
159 LOG_ERR("nRFs GDPWR request not applied");
160 gpd_mgr->notify(&gpd_mgr->mgr, -EIO);
161 break;
162 }
163 } else {
164 switch (p_evt->type) {
165 case NRFS_GDPWR_REQ_APPLIED:
166 atomic_set_bit(&gpd_service_status, GPD_SERVICE_REQ_OK);
167 break;
168 default:
169 LOG_ERR("nRFs GDPWR request not applied");
170 atomic_set_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR);
171 break;
172 }
173 }
174 }
175
start(struct onoff_manager * mgr,onoff_notify_fn notify)176 static void start(struct onoff_manager *mgr, onoff_notify_fn notify)
177 {
178 struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr, struct gpd_onoff_manager, mgr);
179
180 gpd_mgr->notify = notify;
181
182 if (!atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) {
183 notify(mgr, 0);
184 } else {
185 nrfs_err_t err;
186
187 err = nrfs_gdpwr_power_request(gpd_mgr->id, GDPWR_POWER_REQUEST_SET, gpd_mgr);
188 if (err != NRFS_SUCCESS) {
189 LOG_ERR("nRFs GDPWR request failed (%d)", err);
190 notify(mgr, -EIO);
191 }
192 }
193 }
194
stop(struct onoff_manager * mgr,onoff_notify_fn notify)195 static void stop(struct onoff_manager *mgr, onoff_notify_fn notify)
196 {
197 struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr, struct gpd_onoff_manager, mgr);
198
199 gpd_mgr->notify = notify;
200
201 if (!atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) {
202 notify(mgr, 0);
203 } else {
204 nrfs_err_t err;
205
206 err = nrfs_gdpwr_power_request(gpd_mgr->id, GDPWR_POWER_REQUEST_CLEAR, gpd_mgr);
207 if (err != NRFS_SUCCESS) {
208 LOG_ERR("nRFs GDPWR request failed (%d)", err);
209 notify(mgr, -EIO);
210 }
211 }
212 }
213
nrf_gpd_request(uint8_t id)214 int nrf_gpd_request(uint8_t id)
215 {
216 int ret;
217 struct onoff_client client;
218 struct gpd_onoff_manager *gpd_mgr;
219
220 gpd_mgr = get_mgr(id);
221 if (gpd_mgr == NULL) {
222 return -EINVAL;
223 }
224
225 if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_ERROR)) {
226 LOG_ERR("GPD service did not initialize properly");
227 return -EIO;
228 }
229
230 if (k_is_pre_kernel()) {
231 sys_notify_init_spinwait(&client.notify);
232
233 ret = onoff_request(&gpd_mgr->mgr, &client);
234 if (ret < 0) {
235 return ret;
236 }
237
238 while (sys_notify_fetch_result(&client.notify, &ret) == -EAGAIN) {
239 }
240 } else {
241 sys_notify_init_callback(&client.notify, request_cb);
242 k_mutex_lock(&gpd_mgr->lock, K_FOREVER);
243
244 ret = onoff_request(&gpd_mgr->mgr, &client);
245 if (ret >= 0) {
246 (void)k_sem_take(&gpd_mgr->sem, K_FOREVER);
247 ret = gpd_mgr->res;
248 }
249
250 k_mutex_unlock(&gpd_mgr->lock);
251 }
252
253 return ret;
254 }
255
nrf_gpd_release(uint8_t id)256 int nrf_gpd_release(uint8_t id)
257 {
258 struct gpd_onoff_manager *gpd_mgr;
259
260 gpd_mgr = get_mgr(id);
261 if (gpd_mgr == NULL) {
262 return -EINVAL;
263 }
264
265 if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_ERROR)) {
266 LOG_ERR("GPD service did not initialize properly");
267 return -EIO;
268 }
269
270 return onoff_release(&gpd_mgr->mgr);
271 }
272
nrf_gpd_retain_pins_set(const struct pinctrl_dev_config * pcfg,bool retain)273 int nrf_gpd_retain_pins_set(const struct pinctrl_dev_config *pcfg, bool retain)
274 {
275 const struct pinctrl_state *state;
276 int ret;
277
278 ret = pinctrl_lookup_state(pcfg, PINCTRL_STATE_DEFAULT, &state);
279 if (ret < 0) {
280 return ret;
281 }
282
283 for (uint8_t i = 0U; i < state->pin_cnt; i++) {
284 uint32_t pin = NRF_GET_PIN(state->pins[i]);
285
286 if (pin == NRF_PIN_DISCONNECTED) {
287 continue;
288 }
289
290 if (retain) {
291 nrf_gpio_pin_retain_enable(pin);
292 } else {
293 nrf_gpio_pin_retain_disable(pin);
294 }
295 }
296
297 return 0;
298 }
299
nrf_gpd_pre_init(void)300 static int nrf_gpd_pre_init(void)
301 {
302 int ret;
303
304 ret = onoff_manager_init(&fast_active0.mgr, &transitions);
305 if (ret < 0) {
306 return ret;
307 }
308
309 ret = onoff_manager_init(&fast_active1.mgr, &transitions);
310 if (ret < 0) {
311 return ret;
312 }
313
314 ret = onoff_manager_init(&fast_main.mgr, &transitions);
315 if (ret < 0) {
316 return ret;
317 }
318
319 ret = onoff_manager_init(&slow_active.mgr, &transitions);
320 if (ret < 0) {
321 return ret;
322 }
323
324 ret = onoff_manager_init(&slow_main.mgr, &transitions);
325 if (ret < 0) {
326 return ret;
327 }
328
329 return 0;
330 }
331
nrf_gpd_post_init(void)332 static int nrf_gpd_post_init(void)
333 {
334 nrfs_err_t err;
335 int ret;
336
337 err = nrfs_backend_wait_for_connection(K_FOREVER);
338 if (err != NRFS_SUCCESS) {
339 ret = -EIO;
340 goto err;
341 }
342
343 err = nrfs_gdpwr_init(evt_handler);
344 if (err != NRFS_SUCCESS) {
345 ret = -EIO;
346 goto err;
347 }
348
349 /* submit GD requests now to align collected statuses */
350 ret = nrf_gpd_sync(&fast_active0);
351 if (ret < 0) {
352 goto err;
353 }
354
355 ret = nrf_gpd_sync(&fast_active1);
356 if (ret < 0) {
357 goto err;
358 }
359
360 ret = nrf_gpd_sync(&fast_main);
361 if (ret < 0) {
362 goto err;
363 }
364
365 ret = nrf_gpd_sync(&slow_active);
366 if (ret < 0) {
367 goto err;
368 }
369
370 ret = nrf_gpd_sync(&slow_main);
371 if (ret < 0) {
372 goto err;
373 }
374
375 atomic_set_bit(&gpd_service_status, GPD_SERVICE_READY);
376
377 return 0;
378
379 err:
380 atomic_set_bit(&gpd_service_status, GPD_SERVICE_ERROR);
381
382 return ret;
383 }
384
385 SYS_INIT(nrf_gpd_pre_init, PRE_KERNEL_1, 0);
386 SYS_INIT(nrf_gpd_post_init, APPLICATION, CONFIG_APPLICATION_INIT_PRIORITY);
387