1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <errno.h>
7 
8 #include <zephyr/init.h>
9 #include <zephyr/logging/log.h>
10 #include <zephyr/sys/atomic.h>
11 #include <zephyr/sys/onoff.h>
12 #include <zephyr/spinlock.h>
13 #include <zephyr/sys/util.h>
14 
15 #include <hal/nrf_gpio.h>
16 #include <nrf/gpd.h>
17 #include <nrfs_gdpwr.h>
18 #include <nrfs_backend_ipc_service.h>
19 
20 LOG_MODULE_REGISTER(gpd, CONFIG_SOC_LOG_LEVEL);
21 
22 /* enforce alignment between DT<->nrfs */
23 BUILD_ASSERT(GDPWR_GD_FAST_ACTIVE_0 == NRF_GPD_FAST_ACTIVE0);
24 BUILD_ASSERT(GDPWR_GD_FAST_ACTIVE_1 == NRF_GPD_FAST_ACTIVE1);
25 BUILD_ASSERT(GDPWR_GD_FAST_MAIN == NRF_GPD_FAST_MAIN);
26 BUILD_ASSERT(GDPWR_GD_SLOW_ACTIVE == NRF_GPD_SLOW_ACTIVE);
27 BUILD_ASSERT(GDPWR_GD_SLOW_MAIN == NRF_GPD_SLOW_MAIN);
28 
29 struct gpd_onoff_manager {
30 	struct onoff_manager mgr;
31 	onoff_notify_fn notify;
32 	uint8_t id;
33 	struct k_mutex lock;
34 	struct k_sem sem;
35 	int res;
36 };
37 
38 static void start(struct onoff_manager *mgr, onoff_notify_fn notify);
39 static void stop(struct onoff_manager *mgr, onoff_notify_fn notify);
40 
41 #define GPD_READY_TIMEOUT_MS 1000
42 
43 #define GPD_SERVICE_READY   BIT(0)
44 #define GPD_SERVICE_ERROR   BIT(1)
45 #define GPD_SERVICE_REQ_OK  BIT(2)
46 #define GPD_SERVICE_REQ_ERR BIT(3)
47 static atomic_t gpd_service_status = ATOMIC_INIT(0);
48 
49 static struct gpd_onoff_manager fast_active0 = {
50 	.id = NRF_GPD_FAST_ACTIVE0,
51 	.lock = Z_MUTEX_INITIALIZER(fast_active0.lock),
52 	.sem = Z_SEM_INITIALIZER(fast_active0.sem, 0, 1),
53 };
54 static struct gpd_onoff_manager fast_active1 = {
55 	.id = NRF_GPD_FAST_ACTIVE1,
56 	.lock = Z_MUTEX_INITIALIZER(fast_active1.lock),
57 	.sem = Z_SEM_INITIALIZER(fast_active1.sem, 0, 1),
58 };
59 static struct gpd_onoff_manager fast_main = {
60 	.id = NRF_GPD_FAST_MAIN,
61 	.lock = Z_MUTEX_INITIALIZER(fast_main.lock),
62 	.sem = Z_SEM_INITIALIZER(fast_main.sem, 0, 1),
63 };
64 static struct gpd_onoff_manager slow_active = {
65 	.id = NRF_GPD_SLOW_ACTIVE,
66 	.lock = Z_MUTEX_INITIALIZER(slow_active.lock),
67 	.sem = Z_SEM_INITIALIZER(slow_active.sem, 0, 1),
68 };
69 static struct gpd_onoff_manager slow_main = {
70 	.id = NRF_GPD_SLOW_MAIN,
71 	.lock = Z_MUTEX_INITIALIZER(slow_main.lock),
72 	.sem = Z_SEM_INITIALIZER(slow_main.sem, 0, 1),
73 };
74 
75 static const struct onoff_transitions transitions =
76 	ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL);
77 
get_mgr(uint8_t id)78 static struct gpd_onoff_manager *get_mgr(uint8_t id)
79 {
80 	switch (id) {
81 	case NRF_GPD_FAST_ACTIVE0:
82 		return &fast_active0;
83 	case NRF_GPD_FAST_ACTIVE1:
84 		return &fast_active1;
85 	case NRF_GPD_FAST_MAIN:
86 		return &fast_main;
87 	case NRF_GPD_SLOW_ACTIVE:
88 		return &slow_active;
89 	case NRF_GPD_SLOW_MAIN:
90 		return &slow_main;
91 	default:
92 		return NULL;
93 	}
94 }
95 
request_cb(struct onoff_manager * mgr_,struct onoff_client * cli,uint32_t state,int res)96 static void request_cb(struct onoff_manager *mgr_, struct onoff_client *cli, uint32_t state,
97 		       int res)
98 {
99 	ARG_UNUSED(cli);
100 	ARG_UNUSED(state);
101 
102 	struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr_, struct gpd_onoff_manager, mgr);
103 
104 	gpd_mgr->res = res;
105 	k_sem_give(&gpd_mgr->sem);
106 }
107 
nrf_gpd_sync(struct gpd_onoff_manager * gpd_mgr)108 static int nrf_gpd_sync(struct gpd_onoff_manager *gpd_mgr)
109 {
110 	int64_t start;
111 	nrfs_err_t err;
112 	k_spinlock_key_t key;
113 	gdpwr_request_type_t request;
114 
115 	key = k_spin_lock(&gpd_mgr->mgr.lock);
116 
117 	if (gpd_mgr->mgr.refs == 0) {
118 		request = GDPWR_POWER_REQUEST_CLEAR;
119 	} else {
120 		request = GDPWR_POWER_REQUEST_SET;
121 	}
122 
123 	k_spin_unlock(&gpd_mgr->mgr.lock, key);
124 
125 	atomic_clear_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR);
126 	atomic_clear_bit(&gpd_service_status, GPD_SERVICE_REQ_OK);
127 
128 	err = nrfs_gdpwr_power_request(gpd_mgr->id, request, gpd_mgr);
129 	if (err != NRFS_SUCCESS) {
130 		return -EIO;
131 	}
132 
133 	start = k_uptime_get();
134 	while (k_uptime_get() - start < GPD_READY_TIMEOUT_MS) {
135 		if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR)) {
136 			return -EIO;
137 		}
138 
139 		if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_REQ_OK)) {
140 			return 0;
141 		}
142 
143 		k_yield();
144 	}
145 
146 	LOG_ERR("nRFs GDPWR request timed out");
147 
148 	return -ETIMEDOUT;
149 }
150 
evt_handler(nrfs_gdpwr_evt_t const * p_evt,void * context)151 static void evt_handler(nrfs_gdpwr_evt_t const *p_evt, void *context)
152 {
153 	if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) {
154 		struct gpd_onoff_manager *gpd_mgr = context;
155 
156 		switch (p_evt->type) {
157 		case NRFS_GDPWR_REQ_APPLIED:
158 			gpd_mgr->notify(&gpd_mgr->mgr, 0);
159 			break;
160 		default:
161 			LOG_ERR("nRFs GDPWR request not applied");
162 			gpd_mgr->notify(&gpd_mgr->mgr, -EIO);
163 			break;
164 		}
165 	} else {
166 		switch (p_evt->type) {
167 		case NRFS_GDPWR_REQ_APPLIED:
168 			atomic_set_bit(&gpd_service_status, GPD_SERVICE_REQ_OK);
169 			break;
170 		default:
171 			LOG_ERR("nRFs GDPWR request not applied");
172 			atomic_set_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR);
173 			break;
174 		}
175 	}
176 }
177 
start(struct onoff_manager * mgr,onoff_notify_fn notify)178 static void start(struct onoff_manager *mgr, onoff_notify_fn notify)
179 {
180 	struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr, struct gpd_onoff_manager, mgr);
181 
182 	gpd_mgr->notify = notify;
183 
184 	if (!atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) {
185 		notify(mgr, 0);
186 	} else {
187 		nrfs_err_t err;
188 
189 		err = nrfs_gdpwr_power_request(gpd_mgr->id, GDPWR_POWER_REQUEST_SET, gpd_mgr);
190 		if (err != NRFS_SUCCESS) {
191 			LOG_ERR("nRFs GDPWR request failed (%d)", err);
192 			notify(mgr, -EIO);
193 		}
194 	}
195 }
196 
stop(struct onoff_manager * mgr,onoff_notify_fn notify)197 static void stop(struct onoff_manager *mgr, onoff_notify_fn notify)
198 {
199 	struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr, struct gpd_onoff_manager, mgr);
200 
201 	gpd_mgr->notify = notify;
202 
203 	if (!atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) {
204 		notify(mgr, 0);
205 	} else {
206 		nrfs_err_t err;
207 
208 		err = nrfs_gdpwr_power_request(gpd_mgr->id, GDPWR_POWER_REQUEST_CLEAR, gpd_mgr);
209 		if (err != NRFS_SUCCESS) {
210 			LOG_ERR("nRFs GDPWR request failed (%d)", err);
211 			notify(mgr, -EIO);
212 		}
213 	}
214 }
215 
nrf_gpd_request(uint8_t id)216 int nrf_gpd_request(uint8_t id)
217 {
218 	int ret;
219 	struct onoff_client client;
220 	struct gpd_onoff_manager *gpd_mgr;
221 
222 	gpd_mgr = get_mgr(id);
223 	if (gpd_mgr == NULL) {
224 		return -EINVAL;
225 	}
226 
227 	if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_ERROR)) {
228 		LOG_ERR("GPD service did not initialize properly");
229 		return -EIO;
230 	}
231 
232 	if (k_is_pre_kernel()) {
233 		sys_notify_init_spinwait(&client.notify);
234 
235 		ret = onoff_request(&gpd_mgr->mgr, &client);
236 		if (ret < 0) {
237 			return ret;
238 		}
239 
240 		while (sys_notify_fetch_result(&client.notify, &ret) == -EAGAIN) {
241 		}
242 	} else {
243 		sys_notify_init_callback(&client.notify, request_cb);
244 		k_mutex_lock(&gpd_mgr->lock, K_FOREVER);
245 
246 		ret = onoff_request(&gpd_mgr->mgr, &client);
247 		if (ret >= 0) {
248 			(void)k_sem_take(&gpd_mgr->sem, K_FOREVER);
249 			ret = gpd_mgr->res;
250 		}
251 
252 		k_mutex_unlock(&gpd_mgr->lock);
253 	}
254 
255 	return ret;
256 }
257 
nrf_gpd_release(uint8_t id)258 int nrf_gpd_release(uint8_t id)
259 {
260 	struct gpd_onoff_manager *gpd_mgr;
261 
262 	gpd_mgr = get_mgr(id);
263 	if (gpd_mgr == NULL) {
264 		return -EINVAL;
265 	}
266 
267 	if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_ERROR)) {
268 		LOG_ERR("GPD service did not initialize properly");
269 		return -EIO;
270 	}
271 
272 	return onoff_release(&gpd_mgr->mgr);
273 }
274 
nrf_gpd_retain_pins_set(const struct pinctrl_dev_config * pcfg,bool retain)275 int nrf_gpd_retain_pins_set(const struct pinctrl_dev_config *pcfg, bool retain)
276 {
277 	const struct pinctrl_state *state;
278 	int ret;
279 
280 	ret = pinctrl_lookup_state(pcfg, PINCTRL_STATE_DEFAULT, &state);
281 	if (ret < 0) {
282 		return ret;
283 	}
284 
285 	for (uint8_t i = 0U; i < state->pin_cnt; i++) {
286 		uint32_t pin = NRF_GET_PIN(state->pins[i]);
287 
288 		if (pin == NRF_PIN_DISCONNECTED) {
289 			continue;
290 		}
291 
292 		if (retain) {
293 			nrf_gpio_pin_retain_enable(pin);
294 		} else {
295 			nrf_gpio_pin_retain_disable(pin);
296 		}
297 	}
298 
299 	return 0;
300 }
301 
nrf_gpd_pre_init(void)302 static int nrf_gpd_pre_init(void)
303 {
304 	int ret;
305 
306 	ret = onoff_manager_init(&fast_active0.mgr, &transitions);
307 	if (ret < 0) {
308 		return ret;
309 	}
310 
311 	ret = onoff_manager_init(&fast_active1.mgr, &transitions);
312 	if (ret < 0) {
313 		return ret;
314 	}
315 
316 	ret = onoff_manager_init(&fast_main.mgr, &transitions);
317 	if (ret < 0) {
318 		return ret;
319 	}
320 
321 	ret = onoff_manager_init(&slow_active.mgr, &transitions);
322 	if (ret < 0) {
323 		return ret;
324 	}
325 
326 	ret = onoff_manager_init(&slow_main.mgr, &transitions);
327 	if (ret < 0) {
328 		return ret;
329 	}
330 
331 	return 0;
332 }
333 
nrf_gpd_post_init(void)334 static int nrf_gpd_post_init(void)
335 {
336 	nrfs_err_t err;
337 	int ret;
338 
339 	err = nrfs_backend_wait_for_connection(K_FOREVER);
340 	if (err != NRFS_SUCCESS) {
341 		ret = -EIO;
342 		goto err;
343 	}
344 
345 	err = nrfs_gdpwr_init(evt_handler);
346 	if (err != NRFS_SUCCESS) {
347 		ret = -EIO;
348 		goto err;
349 	}
350 
351 	/* submit GD requests now to align collected statuses */
352 	ret = nrf_gpd_sync(&fast_active0);
353 	if (ret < 0) {
354 		goto err;
355 	}
356 
357 	ret = nrf_gpd_sync(&fast_active1);
358 	if (ret < 0) {
359 		goto err;
360 	}
361 
362 	ret = nrf_gpd_sync(&fast_main);
363 	if (ret < 0) {
364 		goto err;
365 	}
366 
367 	ret = nrf_gpd_sync(&slow_active);
368 	if (ret < 0) {
369 		goto err;
370 	}
371 
372 	ret = nrf_gpd_sync(&slow_main);
373 	if (ret < 0) {
374 		goto err;
375 	}
376 
377 	atomic_set_bit(&gpd_service_status, GPD_SERVICE_READY);
378 
379 	return 0;
380 
381 err:
382 	atomic_set_bit(&gpd_service_status, GPD_SERVICE_ERROR);
383 
384 	return ret;
385 }
386 
387 SYS_INIT(nrf_gpd_pre_init, PRE_KERNEL_1, 0);
388 SYS_INIT(nrf_gpd_post_init, APPLICATION, CONFIG_APPLICATION_INIT_PRIORITY);
389