1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include "clock_control_nrf2_common.h"
7 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
8 #include <hal/nrf_bicr.h>
9 
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(clock_control_nrf2, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
12 
13 #define FLAG_UPDATE_IN_PROGRESS BIT(FLAGS_COMMON_BITS - 1)
14 #define FLAG_UPDATE_NEEDED      BIT(FLAGS_COMMON_BITS - 2)
15 
16 #define ONOFF_CNT_MAX (FLAGS_COMMON_BITS - 2)
17 
18 #define CONTAINER_OF_ITEM(ptr, idx, type, array) \
19 	(type *)((char *)ptr - \
20 		 (idx * sizeof(array[0])) - \
21 		 offsetof(type, array[0]))
22 
23 #define BICR (NRF_BICR_Type *)DT_REG_ADDR(DT_NODELABEL(bicr))
24 
25 /*
26  * Definition of `struct clock_config_generic`.
27  * Used to access `clock_config_*` structures in a common way.
28  */
29 STRUCT_CLOCK_CONFIG(generic, ONOFF_CNT_MAX);
30 
31 /* Structure used for synchronous clock request. */
32 struct sync_req {
33 	struct onoff_client cli;
34 	struct k_sem sem;
35 	int res;
36 };
37 
update_config(struct clock_config_generic * cfg)38 static void update_config(struct clock_config_generic *cfg)
39 {
40 	atomic_val_t prev_flags = atomic_or(&cfg->flags, FLAG_UPDATE_NEEDED);
41 
42 	/* If the update work is already scheduled (FLAG_UPDATE_NEEDED was
43 	 * set before the above OR operation) or is currently being executed,
44 	 * it is not to be submitted again. In the latter case, it will be
45 	 * submitted by clock_config_update_end().
46 	 */
47 	if (prev_flags & (FLAG_UPDATE_NEEDED | FLAG_UPDATE_IN_PROGRESS)) {
48 		return;
49 	}
50 
51 	k_work_submit(&cfg->work);
52 }
53 
onoff_start_option(struct onoff_manager * mgr,onoff_notify_fn notify)54 static void onoff_start_option(struct onoff_manager *mgr,
55 			       onoff_notify_fn notify)
56 {
57 	struct clock_onoff *onoff =
58 		CONTAINER_OF(mgr, struct clock_onoff, mgr);
59 	struct clock_config_generic *cfg =
60 		CONTAINER_OF_ITEM(onoff, onoff->idx,
61 				  struct clock_config_generic, onoff);
62 
63 	onoff->notify = notify;
64 
65 	(void)atomic_or(&cfg->flags, BIT(onoff->idx));
66 	update_config(cfg);
67 }
68 
onoff_stop_option(struct onoff_manager * mgr,onoff_notify_fn notify)69 static void onoff_stop_option(struct onoff_manager *mgr,
70 			      onoff_notify_fn notify)
71 {
72 	struct clock_onoff *onoff =
73 		CONTAINER_OF(mgr, struct clock_onoff, mgr);
74 	struct clock_config_generic *cfg =
75 		CONTAINER_OF_ITEM(onoff, onoff->idx,
76 				  struct clock_config_generic, onoff);
77 
78 	(void)atomic_and(&cfg->flags, ~BIT(onoff->idx));
79 	update_config(cfg);
80 
81 	notify(mgr, 0);
82 }
83 
get_index_of_highest_bit(uint32_t value)84 static inline uint8_t get_index_of_highest_bit(uint32_t value)
85 {
86 	return value ? (uint8_t)(31 - __builtin_clz(value)) : 0;
87 }
88 
lfosc_get_accuracy(uint16_t * accuracy)89 int lfosc_get_accuracy(uint16_t *accuracy)
90 {
91 	switch (nrf_bicr_lfosc_accuracy_get(BICR)) {
92 	case NRF_BICR_LFOSC_ACCURACY_500PPM:
93 		*accuracy = 500U;
94 		break;
95 	case NRF_BICR_LFOSC_ACCURACY_250PPM:
96 		*accuracy = 250U;
97 		break;
98 	case NRF_BICR_LFOSC_ACCURACY_150PPM:
99 		*accuracy = 150U;
100 		break;
101 	case NRF_BICR_LFOSC_ACCURACY_100PPM:
102 		*accuracy = 100U;
103 		break;
104 	case NRF_BICR_LFOSC_ACCURACY_75PPM:
105 		*accuracy = 75U;
106 		break;
107 	case NRF_BICR_LFOSC_ACCURACY_50PPM:
108 		*accuracy = 50U;
109 		break;
110 	case NRF_BICR_LFOSC_ACCURACY_30PPM:
111 		*accuracy = 30U;
112 		break;
113 	case NRF_BICR_LFOSC_ACCURACY_20PPM:
114 		*accuracy = 20U;
115 		break;
116 	default:
117 		return -EINVAL;
118 	}
119 
120 	return 0;
121 }
122 
clock_config_init(void * clk_cfg,uint8_t onoff_cnt,k_work_handler_t update_work_handler)123 int clock_config_init(void *clk_cfg, uint8_t onoff_cnt, k_work_handler_t update_work_handler)
124 {
125 	struct clock_config_generic *cfg = clk_cfg;
126 
127 	__ASSERT_NO_MSG(onoff_cnt <= ONOFF_CNT_MAX);
128 
129 	for (int i = 0; i < onoff_cnt; ++i) {
130 		static const struct onoff_transitions transitions = {
131 			.start = onoff_start_option,
132 			.stop  = onoff_stop_option
133 		};
134 		int rc;
135 
136 		rc = onoff_manager_init(&cfg->onoff[i].mgr, &transitions);
137 		if (rc < 0) {
138 			return rc;
139 		}
140 
141 		cfg->onoff[i].idx = (uint8_t)i;
142 	}
143 
144 	cfg->onoff_cnt = onoff_cnt;
145 
146 	k_work_init(&cfg->work, update_work_handler);
147 
148 	return 0;
149 }
150 
clock_config_update_begin(struct k_work * work)151 uint8_t clock_config_update_begin(struct k_work *work)
152 {
153 	struct clock_config_generic *cfg =
154 		CONTAINER_OF(work, struct clock_config_generic, work);
155 	uint32_t active_options;
156 
157 	(void)atomic_or(&cfg->flags, FLAG_UPDATE_IN_PROGRESS);
158 	cfg->flags_snapshot = atomic_and(&cfg->flags, ~FLAG_UPDATE_NEEDED);
159 
160 	active_options = cfg->flags_snapshot & BIT_MASK(ONOFF_CNT_MAX);
161 	return get_index_of_highest_bit(active_options);
162 }
163 
clock_config_update_end(void * clk_cfg,int status)164 void clock_config_update_end(void *clk_cfg, int status)
165 {
166 	struct clock_config_generic *cfg = clk_cfg;
167 	atomic_val_t prev_flags;
168 
169 	prev_flags = atomic_and(&cfg->flags, ~FLAG_UPDATE_IN_PROGRESS);
170 	if (!(prev_flags & FLAG_UPDATE_IN_PROGRESS)) {
171 		return;
172 	}
173 
174 	for (int i = 0; i < cfg->onoff_cnt; ++i) {
175 		if (cfg->flags_snapshot & BIT(i)) {
176 			onoff_notify_fn notify = cfg->onoff[i].notify;
177 
178 			if (notify) {
179 				/* If an option was to be activated now
180 				 * (it is waiting for a notification) and
181 				 * the activation failed, this option's flag
182 				 * must be cleared (the option can no longer
183 				 * be considered active).
184 				 */
185 				if (status < 0) {
186 					(void)atomic_and(&cfg->flags, ~BIT(i));
187 				}
188 
189 				cfg->onoff[i].notify = NULL;
190 				notify(&cfg->onoff[i].mgr, status);
191 			}
192 		}
193 	}
194 
195 	if (prev_flags & FLAG_UPDATE_NEEDED) {
196 		k_work_submit(&cfg->work);
197 	}
198 }
199 
api_nosys_on_off(const struct device * dev,clock_control_subsys_t sys)200 int api_nosys_on_off(const struct device *dev, clock_control_subsys_t sys)
201 {
202 	ARG_UNUSED(dev);
203 	ARG_UNUSED(sys);
204 
205 	return -ENOSYS;
206 }
207 
sync_cb(struct onoff_manager * mgr,struct onoff_client * cli,uint32_t state,int res)208 static void sync_cb(struct onoff_manager *mgr, struct onoff_client *cli, uint32_t state, int res)
209 {
210 	struct sync_req *req = CONTAINER_OF(cli, struct sync_req, cli);
211 
212 	req->res = res;
213 	k_sem_give(&req->sem);
214 }
215 
nrf_clock_control_request_sync(const struct device * dev,const struct nrf_clock_spec * spec,k_timeout_t timeout)216 int nrf_clock_control_request_sync(const struct device *dev,
217 				   const struct nrf_clock_spec *spec,
218 				   k_timeout_t timeout)
219 {
220 	struct sync_req req = {
221 		.sem = Z_SEM_INITIALIZER(req.sem, 0, 1)
222 	};
223 	int err;
224 
225 	if (k_is_in_isr()) {
226 		return -EWOULDBLOCK;
227 	}
228 
229 	sys_notify_init_callback(&req.cli.notify, sync_cb);
230 
231 	err = nrf_clock_control_request(dev, spec, &req.cli);
232 	if (err < 0) {
233 		return err;
234 	}
235 
236 	err = k_sem_take(&req.sem, timeout);
237 	if (err < 0) {
238 		nrf_clock_control_cancel_or_release(dev, spec, &req.cli);
239 		return err;
240 	}
241 
242 	return req.res;
243 }
244