1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #define DT_DRV_COMPAT nordic_nrf_hsfll_global
7 
8 #include "clock_control_nrf2_common.h"
9 #include <zephyr/devicetree.h>
10 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
11 #include <nrfs_gdfs.h>
12 
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_DECLARE(clock_control_nrf2, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
15 
16 #define GLOBAL_HSFLL_CLOCK_FREQUENCIES \
17 	DT_INST_PROP(0, supported_clock_frequencies)
18 
19 #define GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(idx) \
20 	DT_INST_PROP_BY_IDX(0, supported_clock_frequencies, idx)
21 
22 #define GLOBAL_HSFLL_CLOCK_FREQUENCIES_SIZE \
23 	DT_INST_PROP_LEN(0, supported_clock_frequencies)
24 
25 #define GLOBAL_HSFLL_FREQ_REQ_TIMEOUT \
26 	K_MSEC(CONFIG_CLOCK_CONTROL_NRF2_GLOBAL_HSFLL_TIMEOUT_MS)
27 
28 #define GLOBAL_HSFLL_INIT_LOW_REQ \
29 	CONFIG_CLOCK_CONTROL_NRF2_GLOBAL_HSFLL_REQ_LOW_FREQ
30 
31 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_SIZE == 4);
32 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(0) == 64000000);
33 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(1) == 128000000);
34 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(2) == 256000000);
35 BUILD_ASSERT(GLOBAL_HSFLL_CLOCK_FREQUENCIES_IDX(3) == 320000000);
36 BUILD_ASSERT(GDFS_FREQ_COUNT == 4);
37 BUILD_ASSERT(GDFS_FREQ_HIGH == 0);
38 BUILD_ASSERT(GDFS_FREQ_MEDHIGH == 1);
39 BUILD_ASSERT(GDFS_FREQ_MEDLOW == 2);
40 BUILD_ASSERT(GDFS_FREQ_LOW == 3);
41 
42 struct global_hsfll_dev_config {
43 	uint32_t clock_frequencies[GLOBAL_HSFLL_CLOCK_FREQUENCIES_SIZE];
44 };
45 
46 struct global_hsfll_dev_data {
47 	STRUCT_CLOCK_CONFIG(global_hsfll, GLOBAL_HSFLL_CLOCK_FREQUENCIES_SIZE) clk_cfg;
48 	const struct device *dev;
49 	struct k_work evt_work;
50 	nrfs_gdfs_evt_type_t evt;
51 	struct k_work_delayable timeout_dwork;
52 
53 #if GLOBAL_HSFLL_INIT_LOW_REQ
54 	struct k_sem evt_sem;
55 #endif /* GLOBAL_HSFLL_INIT_LOW_REQ */
56 };
57 
global_hsfll_get_max_clock_frequency(const struct device * dev)58 static uint32_t global_hsfll_get_max_clock_frequency(const struct device *dev)
59 {
60 	const struct global_hsfll_dev_config *dev_config = dev->config;
61 
62 	return dev_config->clock_frequencies[ARRAY_SIZE(dev_config->clock_frequencies) - 1];
63 }
64 
global_hsfll_find_mgr(const struct device * dev,const struct nrf_clock_spec * spec)65 static struct onoff_manager *global_hsfll_find_mgr(const struct device *dev,
66 						   const struct nrf_clock_spec *spec)
67 {
68 	struct global_hsfll_dev_data *dev_data = dev->data;
69 	const struct global_hsfll_dev_config *dev_config = dev->config;
70 	uint32_t frequency;
71 
72 	if (!spec) {
73 		return &dev_data->clk_cfg.onoff[0].mgr;
74 	}
75 
76 	if (spec->accuracy || spec->precision) {
77 		LOG_ERR("invalid specification of accuracy or precision");
78 		return NULL;
79 	}
80 
81 	frequency = spec->frequency == NRF_CLOCK_CONTROL_FREQUENCY_MAX
82 		  ? global_hsfll_get_max_clock_frequency(dev)
83 		  : spec->frequency;
84 
85 	for (uint8_t i = 0; i < ARRAY_SIZE(dev_config->clock_frequencies); i++) {
86 		if (dev_config->clock_frequencies[i] < frequency) {
87 			continue;
88 		}
89 
90 		return &dev_data->clk_cfg.onoff[i].mgr;
91 	}
92 
93 	LOG_ERR("invalid frequency");
94 	return NULL;
95 }
96 
api_request_global_hsfll(const struct device * dev,const struct nrf_clock_spec * spec,struct onoff_client * cli)97 static int api_request_global_hsfll(const struct device *dev,
98 				    const struct nrf_clock_spec *spec,
99 				    struct onoff_client *cli)
100 {
101 	struct onoff_manager *mgr = global_hsfll_find_mgr(dev, spec);
102 
103 	if (mgr) {
104 		return onoff_request(mgr, cli);
105 	}
106 
107 	return -EINVAL;
108 }
109 
api_release_global_hsfll(const struct device * dev,const struct nrf_clock_spec * spec)110 static int api_release_global_hsfll(const struct device *dev,
111 				    const struct nrf_clock_spec *spec)
112 {
113 	struct onoff_manager *mgr = global_hsfll_find_mgr(dev, spec);
114 
115 	if (mgr) {
116 		return onoff_release(mgr);
117 	}
118 
119 	return -EINVAL;
120 }
121 
api_cancel_or_release_global_hsfll(const struct device * dev,const struct nrf_clock_spec * spec,struct onoff_client * cli)122 static int api_cancel_or_release_global_hsfll(const struct device *dev,
123 					      const struct nrf_clock_spec *spec,
124 					      struct onoff_client *cli)
125 {
126 	struct onoff_manager *mgr = global_hsfll_find_mgr(dev, spec);
127 
128 	if (mgr) {
129 		return onoff_cancel_or_release(mgr, cli);
130 	}
131 
132 	return -EINVAL;
133 }
134 
135 static struct nrf_clock_control_driver_api driver_api = {
136 	.std_api = {
137 		.on = api_nosys_on_off,
138 		.off = api_nosys_on_off,
139 	},
140 	.request = api_request_global_hsfll,
141 	.release = api_release_global_hsfll,
142 	.cancel_or_release = api_cancel_or_release_global_hsfll,
143 };
144 
global_hsfll_freq_idx_to_nrfs_freq(const struct device * dev,uint8_t freq_idx)145 static enum gdfs_frequency_setting global_hsfll_freq_idx_to_nrfs_freq(const struct device *dev,
146 								      uint8_t freq_idx)
147 {
148 	const struct global_hsfll_dev_config *dev_config = dev->config;
149 
150 	return ARRAY_SIZE(dev_config->clock_frequencies) - 1 - freq_idx;
151 }
152 
global_hsfll_gdfs_freq_to_str(enum gdfs_frequency_setting freq)153 static const char *global_hsfll_gdfs_freq_to_str(enum gdfs_frequency_setting freq)
154 {
155 	switch (freq) {
156 	case GDFS_FREQ_HIGH:
157 		return "GDFS_FREQ_HIGH";
158 	case GDFS_FREQ_MEDHIGH:
159 		return "GDFS_FREQ_MEDHIGH";
160 	case GDFS_FREQ_MEDLOW:
161 		return "GDFS_FREQ_MEDLOW";
162 	case GDFS_FREQ_LOW:
163 		return "GDFS_FREQ_LOW";
164 	default:
165 		break;
166 	}
167 
168 	return "UNKNOWN";
169 }
170 
global_hsfll_work_handler(struct k_work * work)171 static void global_hsfll_work_handler(struct k_work *work)
172 {
173 	struct global_hsfll_dev_data *dev_data =
174 		CONTAINER_OF(work, struct global_hsfll_dev_data, clk_cfg.work);
175 	const struct device *dev = dev_data->dev;
176 	uint8_t freq_idx;
177 	enum gdfs_frequency_setting target_freq;
178 	nrfs_err_t err;
179 
180 	freq_idx = clock_config_update_begin(work);
181 	target_freq = global_hsfll_freq_idx_to_nrfs_freq(dev, freq_idx);
182 
183 	LOG_DBG("requesting %s", global_hsfll_gdfs_freq_to_str(target_freq));
184 	err = nrfs_gdfs_request_freq(target_freq, dev_data);
185 	if (err != NRFS_SUCCESS) {
186 		clock_config_update_end(&dev_data->clk_cfg, -EIO);
187 		return;
188 	}
189 
190 	k_work_schedule(&dev_data->timeout_dwork, GLOBAL_HSFLL_FREQ_REQ_TIMEOUT);
191 }
192 
global_hsfll_evt_handler(struct k_work * work)193 static void global_hsfll_evt_handler(struct k_work *work)
194 {
195 	struct global_hsfll_dev_data *dev_data =
196 		CONTAINER_OF(work, struct global_hsfll_dev_data, evt_work);
197 	int rc;
198 
199 	k_work_cancel_delayable(&dev_data->timeout_dwork);
200 	rc = dev_data->evt == NRFS_GDFS_EVT_FREQ_CONFIRMED ? 0 : -EIO;
201 	clock_config_update_end(&dev_data->clk_cfg, rc);
202 }
203 
204 #if GLOBAL_HSFLL_INIT_LOW_REQ
global_hfsll_nrfs_gdfs_init_evt_handler(nrfs_gdfs_evt_t const * p_evt,void * context)205 static void global_hfsll_nrfs_gdfs_init_evt_handler(nrfs_gdfs_evt_t const *p_evt, void *context)
206 {
207 	struct global_hsfll_dev_data *dev_data = context;
208 
209 	dev_data->evt = p_evt->type;
210 	k_sem_give(&dev_data->evt_sem);
211 }
212 #endif /* GLOBAL_HSFLL_INIT_LOW_REQ */
213 
global_hfsll_nrfs_gdfs_evt_handler(nrfs_gdfs_evt_t const * p_evt,void * context)214 static void global_hfsll_nrfs_gdfs_evt_handler(nrfs_gdfs_evt_t const *p_evt, void *context)
215 {
216 	struct global_hsfll_dev_data *dev_data = context;
217 
218 	if (k_work_is_pending(&dev_data->evt_work)) {
219 		return;
220 	}
221 
222 	dev_data->evt = p_evt->type;
223 	k_work_submit(&dev_data->evt_work);
224 }
225 
global_hsfll_timeout_handler(struct k_work * work)226 static void global_hsfll_timeout_handler(struct k_work *work)
227 {
228 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
229 	struct global_hsfll_dev_data *dev_data =
230 		CONTAINER_OF(dwork, struct global_hsfll_dev_data, timeout_dwork);
231 
232 	clock_config_update_end(&dev_data->clk_cfg, -ETIMEDOUT);
233 }
234 
global_hfsll_init(const struct device * dev)235 static int global_hfsll_init(const struct device *dev)
236 {
237 	struct global_hsfll_dev_data *dev_data = dev->data;
238 	nrfs_err_t err;
239 	int rc;
240 
241 	k_work_init_delayable(&dev_data->timeout_dwork, global_hsfll_timeout_handler);
242 	k_work_init(&dev_data->evt_work, global_hsfll_evt_handler);
243 
244 #if GLOBAL_HSFLL_INIT_LOW_REQ
245 	k_sem_init(&dev_data->evt_sem, 0, 1);
246 
247 	err = nrfs_gdfs_init(global_hfsll_nrfs_gdfs_init_evt_handler);
248 	if (err != NRFS_SUCCESS) {
249 		return -EIO;
250 	}
251 
252 	LOG_DBG("initial request %s", global_hsfll_gdfs_freq_to_str(GDFS_FREQ_LOW));
253 	err = nrfs_gdfs_request_freq(GDFS_FREQ_LOW, dev_data);
254 	if (err != NRFS_SUCCESS) {
255 		return -EIO;
256 	}
257 
258 	rc = k_sem_take(&dev_data->evt_sem, GLOBAL_HSFLL_FREQ_REQ_TIMEOUT);
259 	if (rc) {
260 		return -EIO;
261 	}
262 
263 	if (dev_data->evt != NRFS_GDFS_EVT_FREQ_CONFIRMED) {
264 		return -EIO;
265 	}
266 
267 	nrfs_gdfs_uninit();
268 #endif /* GLOBAL_HSFLL_INIT_LOW_REQ */
269 
270 	rc = clock_config_init(&dev_data->clk_cfg,
271 			       ARRAY_SIZE(dev_data->clk_cfg.onoff),
272 			       global_hsfll_work_handler);
273 	if (rc < 0) {
274 		return rc;
275 	}
276 
277 	err = nrfs_gdfs_init(global_hfsll_nrfs_gdfs_evt_handler);
278 	if (err != NRFS_SUCCESS) {
279 		return -EIO;
280 	}
281 
282 	return 0;
283 }
284 
285 static struct global_hsfll_dev_data driver_data = {
286 	.dev = DEVICE_DT_INST_GET(0),
287 };
288 
289 static const struct global_hsfll_dev_config driver_config = {
290 	GLOBAL_HSFLL_CLOCK_FREQUENCIES
291 };
292 
293 DEVICE_DT_INST_DEFINE(
294 	0,
295 	global_hfsll_init,
296 	NULL,
297 	&driver_data,
298 	&driver_config,
299 	POST_KERNEL,
300 	CONFIG_CLOCK_CONTROL_NRF2_GLOBAL_HSFLL_INIT_PRIORITY,
301 	&driver_api
302 );
303