1 /*
2  * Copyright (c) 2016-2020 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <soc.h>
9 #include <zephyr/sys/onoff.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
12 #include "nrf_clock_calibration.h"
13 #include <nrfx_clock.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/shell/shell.h>
16 #include <zephyr/irq.h>
17 
18 LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
19 
20 #define DT_DRV_COMPAT nordic_nrf_clock
21 
22 
23 #define CTX_ONOFF		BIT(6)
24 #define CTX_API			BIT(7)
25 #define CTX_MASK (CTX_ONOFF | CTX_API)
26 
27 #define STATUS_MASK		0x7
28 #define GET_STATUS(flags)	(flags & STATUS_MASK)
29 #define GET_CTX(flags)		(flags & CTX_MASK)
30 
31 /* Used only by HF clock */
32 #define HF_USER_BT		BIT(0)
33 #define HF_USER_GENERIC		BIT(1)
34 
35 /* Helper logging macros which prepends subsys name to the log. */
36 #ifdef CONFIG_LOG
37 #define CLOCK_LOG(lvl, dev, subsys, ...) \
38 	LOG_##lvl("%s: " GET_ARG_N(1, __VA_ARGS__), \
39 		get_sub_config(dev, (enum clock_control_nrf_type)subsys)->name \
40 		COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__),\
41 				(), (, GET_ARGS_LESS_N(1, __VA_ARGS__))))
42 #else
43 #define CLOCK_LOG(...)
44 #endif
45 
46 #define ERR(dev, subsys, ...) CLOCK_LOG(ERR, dev, subsys, __VA_ARGS__)
47 #define WRN(dev, subsys, ...) CLOCK_LOG(WRN, dev, subsys, __VA_ARGS__)
48 #define INF(dev, subsys, ...) CLOCK_LOG(INF, dev, subsys, __VA_ARGS__)
49 #define DBG(dev, subsys, ...) CLOCK_LOG(DBG, dev, subsys, __VA_ARGS__)
50 
51 /* Clock subsys structure */
52 struct nrf_clock_control_sub_data {
53 	clock_control_cb_t cb;
54 	void *user_data;
55 	uint32_t flags;
56 };
57 
58 typedef void (*clk_ctrl_func_t)(void);
59 
60 /* Clock subsys static configuration */
61 struct nrf_clock_control_sub_config {
62 	clk_ctrl_func_t start;		/* Clock start function */
63 	clk_ctrl_func_t stop;		/* Clock stop function */
64 #ifdef CONFIG_LOG
65 	const char *name;
66 #endif
67 };
68 
69 struct nrf_clock_control_data {
70 	struct onoff_manager mgr[CLOCK_CONTROL_NRF_TYPE_COUNT];
71 	struct nrf_clock_control_sub_data subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
72 };
73 
74 struct nrf_clock_control_config {
75 	struct nrf_clock_control_sub_config
76 					subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
77 };
78 
79 static atomic_t hfclk_users;
80 static uint64_t hf_start_tstamp;
81 static uint64_t hf_stop_tstamp;
82 #if CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH
83 /* Client to request HFXO to synthesize low frequency clock. */
84 static struct onoff_client lfsynth_cli;
85 #endif
86 
get_sub_data(const struct device * dev,enum clock_control_nrf_type type)87 static struct nrf_clock_control_sub_data *get_sub_data(const struct device *dev,
88 						       enum clock_control_nrf_type type)
89 {
90 	struct nrf_clock_control_data *data = dev->data;
91 
92 	return &data->subsys[type];
93 }
94 
get_sub_config(const struct device * dev,enum clock_control_nrf_type type)95 static const struct nrf_clock_control_sub_config *get_sub_config(const struct device *dev,
96 								 enum clock_control_nrf_type type)
97 {
98 	const struct nrf_clock_control_config *config =
99 						dev->config;
100 
101 	return &config->subsys[type];
102 }
103 
get_onoff_manager(const struct device * dev,enum clock_control_nrf_type type)104 static struct onoff_manager *get_onoff_manager(const struct device *dev,
105 					       enum clock_control_nrf_type type)
106 {
107 	struct nrf_clock_control_data *data = dev->data;
108 
109 	return &data->mgr[type];
110 }
111 
112 
113 #define CLOCK_DEVICE DEVICE_DT_GET(DT_NODELABEL(clock))
114 
z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)115 struct onoff_manager *z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)
116 {
117 	return get_onoff_manager(CLOCK_DEVICE,
118 				(enum clock_control_nrf_type)(size_t)sys);
119 }
120 
get_status(const struct device * dev,clock_control_subsys_t subsys)121 static enum clock_control_status get_status(const struct device *dev,
122 					    clock_control_subsys_t subsys)
123 {
124 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
125 
126 	__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
127 
128 	return GET_STATUS(get_sub_data(dev, type)->flags);
129 }
130 
set_off_state(uint32_t * flags,uint32_t ctx)131 static int set_off_state(uint32_t *flags, uint32_t ctx)
132 {
133 	int err = 0;
134 	unsigned int key = irq_lock();
135 	uint32_t current_ctx = GET_CTX(*flags);
136 
137 	if ((current_ctx != 0) && (current_ctx != ctx)) {
138 		err = -EPERM;
139 	} else {
140 		*flags = CLOCK_CONTROL_STATUS_OFF;
141 	}
142 
143 	irq_unlock(key);
144 
145 	return err;
146 }
147 
set_starting_state(uint32_t * flags,uint32_t ctx)148 static int set_starting_state(uint32_t *flags, uint32_t ctx)
149 {
150 	int err = 0;
151 	unsigned int key = irq_lock();
152 	uint32_t current_ctx = GET_CTX(*flags);
153 
154 	if ((*flags & (STATUS_MASK)) == CLOCK_CONTROL_STATUS_OFF) {
155 		*flags = CLOCK_CONTROL_STATUS_STARTING | ctx;
156 	} else if (current_ctx != ctx) {
157 		err = -EPERM;
158 	} else {
159 		err = -EALREADY;
160 	}
161 
162 	irq_unlock(key);
163 
164 	return err;
165 }
166 
set_on_state(uint32_t * flags)167 static void set_on_state(uint32_t *flags)
168 {
169 	unsigned int key = irq_lock();
170 
171 	*flags = CLOCK_CONTROL_STATUS_ON | GET_CTX(*flags);
172 	irq_unlock(key);
173 }
174 
175 #ifdef CONFIG_CLOCK_CONTROL_NRF_HFINT_CALIBRATION
176 
nrf54l_errata_30_workaround(void)177 static void nrf54l_errata_30_workaround(void)
178 {
179 	while (FIELD_GET(CLOCK_XO_STAT_STATE_Msk, NRF_CLOCK->XO.STAT) !=
180 	       CLOCK_XO_STAT_STATE_Running) {
181 	}
182 	const uint32_t higher_bits = *((volatile uint32_t *)0x50120820UL) & 0xFFFFFFC0;
183 	*((volatile uint32_t *)0x50120864UL) = 1 | BIT(31);
184 	*((volatile uint32_t *)0x50120848UL) = 1;
185 	uint32_t off_abs = 24;
186 
187 	while (off_abs >= 24) {
188 		*((volatile uint32_t *)0x50120844UL) = 1;
189 		while (((*((volatile uint32_t *)0x50120840UL)) & (1 << 16)) != 0) {
190 		}
191 		const uint32_t current_cal = *((volatile uint32_t *)0x50120820UL) & 0x3F;
192 		const uint32_t cal_result = *((volatile uint32_t *)0x50120840UL) & 0x7FF;
193 		int32_t off = 1024 - cal_result;
194 
195 		off_abs = (off < 0) ? -off : off;
196 
197 		if (off >= 24 && current_cal < 0x3F) {
198 			*((volatile uint32_t *)0x50120820UL) = higher_bits | (current_cal + 1);
199 		} else if (off <= -24 && current_cal > 0) {
200 			*((volatile uint32_t *)0x50120820UL) = higher_bits | (current_cal - 1);
201 		}
202 	}
203 
204 	*((volatile uint32_t *)0x50120848UL) = 0;
205 	*((volatile uint32_t *)0x50120864UL) = 0;
206 }
207 
208 #if CONFIG_CLOCK_CONTROL_NRF_HFINT_CALIBRATION_PERIOD
209 
210 static struct onoff_client hf_cal_cli;
211 
calibration_finished_callback(struct onoff_manager * mgr,struct onoff_client * cli,uint32_t state,int res)212 static void calibration_finished_callback(struct onoff_manager *mgr,
213 					  struct onoff_client *cli,
214 					  uint32_t state,
215 					  int res)
216 {
217 	(void)onoff_cancel_or_release(mgr, cli);
218 }
219 
calibration_handler(struct k_timer * timer)220 static void calibration_handler(struct k_timer *timer)
221 {
222 	nrf_clock_hfclk_t clk_src;
223 
224 	bool ret = nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, &clk_src);
225 
226 	if (ret && (clk_src == NRF_CLOCK_HFCLK_HIGH_ACCURACY)) {
227 		return;
228 	}
229 
230 	sys_notify_init_callback(&hf_cal_cli.notify, calibration_finished_callback);
231 	(void)onoff_request(z_nrf_clock_control_get_onoff(CLOCK_CONTROL_NRF_SUBSYS_HF),
232 			    &hf_cal_cli);
233 }
234 
235 static K_TIMER_DEFINE(calibration_timer, calibration_handler, NULL);
236 
calibration_init(void)237 static int calibration_init(void)
238 {
239 	k_timer_start(&calibration_timer,
240 		      K_NO_WAIT,
241 		      K_MSEC(CONFIG_CLOCK_CONTROL_NRF_HFINT_CALIBRATION_PERIOD));
242 
243 	return 0;
244 }
245 
246 SYS_INIT(calibration_init, APPLICATION, 0);
247 
248 #endif /* CONFIG_CLOCK_CONTROL_NRF_HFINT_CALIBRATION_PERIOD */
249 #endif /* CONFIG_CLOCK_CONTROL_NRF_HFINT_CALIBRATION */
250 
clkstarted_handle(const struct device * dev,enum clock_control_nrf_type type)251 static void clkstarted_handle(const struct device *dev,
252 			      enum clock_control_nrf_type type)
253 {
254 #if CONFIG_CLOCK_CONTROL_NRF_HFINT_CALIBRATION
255 	if (nrf54l_errata_30() && (type == CLOCK_CONTROL_NRF_TYPE_HFCLK)) {
256 		nrf54l_errata_30_workaround();
257 	}
258 #endif
259 	struct nrf_clock_control_sub_data *sub_data = get_sub_data(dev, type);
260 	clock_control_cb_t callback = sub_data->cb;
261 	void *user_data = sub_data->user_data;
262 
263 	sub_data->cb = NULL;
264 	set_on_state(&sub_data->flags);
265 	DBG(dev, type, "Clock started");
266 
267 	if (callback) {
268 		callback(dev, (clock_control_subsys_t)type, user_data);
269 	}
270 }
271 
anomaly_132_workaround(void)272 static inline void anomaly_132_workaround(void)
273 {
274 #if (CONFIG_NRF52_ANOMALY_132_DELAY_US - 0)
275 	static bool once;
276 
277 	if (!once) {
278 		k_busy_wait(CONFIG_NRF52_ANOMALY_132_DELAY_US);
279 		once = true;
280 	}
281 #endif
282 }
283 
lfclk_start(void)284 static void lfclk_start(void)
285 {
286 	if (IS_ENABLED(CONFIG_NRF52_ANOMALY_132_WORKAROUND)) {
287 		anomaly_132_workaround();
288 	}
289 
290 #if CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH
291 	sys_notify_init_spinwait(&lfsynth_cli.notify);
292 	(void)onoff_request(z_nrf_clock_control_get_onoff(CLOCK_CONTROL_NRF_SUBSYS_HF),
293 			    &lfsynth_cli);
294 #endif
295 
296 	nrfx_clock_lfclk_start();
297 }
298 
lfclk_stop(void)299 static void lfclk_stop(void)
300 {
301 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
302 		z_nrf_clock_calibration_lfclk_stopped();
303 	}
304 
305 	nrfx_clock_lfclk_stop();
306 
307 #if CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH
308 	(void)onoff_cancel_or_release(z_nrf_clock_control_get_onoff(CLOCK_CONTROL_NRF_SUBSYS_HF),
309 				      &lfsynth_cli);
310 #endif
311 }
312 
hfclk_start(void)313 static void hfclk_start(void)
314 {
315 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
316 		hf_start_tstamp = k_uptime_get();
317 	}
318 
319 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLK);
320 }
321 
hfclk_stop(void)322 static void hfclk_stop(void)
323 {
324 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
325 		hf_stop_tstamp = k_uptime_get();
326 	}
327 
328 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLK);
329 }
330 
331 #if NRF_CLOCK_HAS_HFCLK24M
hfclk24m_start(void)332 static void hfclk24m_start(void)
333 {
334 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLK24M);
335 }
336 
hfclk24m_stop(void)337 static void hfclk24m_stop(void)
338 {
339 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLK24M);
340 }
341 #endif
342 
343 #if NRF_CLOCK_HAS_HFCLK192M
hfclk192m_start(void)344 static void hfclk192m_start(void)
345 {
346 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLK192M);
347 }
348 
hfclk192m_stop(void)349 static void hfclk192m_stop(void)
350 {
351 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLK192M);
352 }
353 #endif
354 
355 #if NRF_CLOCK_HAS_HFCLKAUDIO
hfclkaudio_start(void)356 static void hfclkaudio_start(void)
357 {
358 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
359 }
360 
hfclkaudio_stop(void)361 static void hfclkaudio_stop(void)
362 {
363 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
364 }
365 #endif
366 
get_hf_flags(void)367 static uint32_t *get_hf_flags(void)
368 {
369 	struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
370 
371 	return &data->subsys[CLOCK_CONTROL_NRF_TYPE_HFCLK].flags;
372 }
373 
generic_hfclk_start(void)374 static void generic_hfclk_start(void)
375 {
376 	nrf_clock_hfclk_t type;
377 	bool already_started = false;
378 	unsigned int key = irq_lock();
379 
380 	hfclk_users |= HF_USER_GENERIC;
381 	if (hfclk_users & HF_USER_BT) {
382 		(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, &type);
383 		if (type == NRF_CLOCK_HFCLK_HIGH_ACCURACY) {
384 			already_started = true;
385 			/* Set on state in case clock interrupt comes and we
386 			 * want to avoid handling that.
387 			 */
388 			set_on_state(get_hf_flags());
389 		}
390 	}
391 
392 	irq_unlock(key);
393 
394 	if (already_started) {
395 		/* Clock already started by z_nrf_clock_bt_ctlr_hf_request */
396 		clkstarted_handle(CLOCK_DEVICE,
397 				  CLOCK_CONTROL_NRF_TYPE_HFCLK);
398 		return;
399 	}
400 
401 	hfclk_start();
402 }
403 
generic_hfclk_stop(void)404 static void generic_hfclk_stop(void)
405 {
406 	/* It's not enough to use only atomic_and() here for synchronization,
407 	 * as the thread could be preempted right after that function but
408 	 * before hfclk_stop() is called and the preempting code could request
409 	 * the HFCLK again. Then, the HFCLK would be stopped inappropriately
410 	 * and hfclk_user would be left with an incorrect value.
411 	 */
412 	unsigned int key = irq_lock();
413 
414 	hfclk_users &= ~HF_USER_GENERIC;
415 	/* Skip stopping if BT is still requesting the clock. */
416 	if (!(hfclk_users & HF_USER_BT)) {
417 		hfclk_stop();
418 	}
419 
420 	irq_unlock(key);
421 }
422 
423 
z_nrf_clock_bt_ctlr_hf_request(void)424 void z_nrf_clock_bt_ctlr_hf_request(void)
425 {
426 	if (atomic_or(&hfclk_users, HF_USER_BT) & HF_USER_GENERIC) {
427 		/* generic request already activated clock. */
428 		return;
429 	}
430 
431 	hfclk_start();
432 }
433 
z_nrf_clock_bt_ctlr_hf_release(void)434 void z_nrf_clock_bt_ctlr_hf_release(void)
435 {
436 	/* It's not enough to use only atomic_and() here for synchronization,
437 	 * see the explanation in generic_hfclk_stop().
438 	 */
439 	unsigned int key = irq_lock();
440 
441 	hfclk_users &= ~HF_USER_BT;
442 	/* Skip stopping if generic is still requesting the clock. */
443 	if (!(hfclk_users & HF_USER_GENERIC)) {
444 		hfclk_stop();
445 	}
446 
447 	irq_unlock(key);
448 }
449 
450 #if DT_NODE_EXISTS(DT_NODELABEL(hfxo))
z_nrf_clock_bt_ctlr_hf_get_startup_time_us(void)451 uint32_t z_nrf_clock_bt_ctlr_hf_get_startup_time_us(void)
452 {
453 	return DT_PROP(DT_NODELABEL(hfxo), startup_time_us);
454 }
455 #endif
456 
stop(const struct device * dev,clock_control_subsys_t subsys,uint32_t ctx)457 static int stop(const struct device *dev, clock_control_subsys_t subsys,
458 		uint32_t ctx)
459 {
460 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
461 	struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
462 	int err;
463 
464 	__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
465 
466 	err = set_off_state(&subdata->flags, ctx);
467 	if (err < 0) {
468 		return err;
469 	}
470 
471 	get_sub_config(dev, type)->stop();
472 
473 	return 0;
474 }
475 
api_stop(const struct device * dev,clock_control_subsys_t subsys)476 static int api_stop(const struct device *dev, clock_control_subsys_t subsys)
477 {
478 	return stop(dev, subsys, CTX_API);
479 }
480 
async_start(const struct device * dev,clock_control_subsys_t subsys,clock_control_cb_t cb,void * user_data,uint32_t ctx)481 static int async_start(const struct device *dev, clock_control_subsys_t subsys,
482 			clock_control_cb_t cb, void *user_data, uint32_t ctx)
483 {
484 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
485 	struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
486 	int err;
487 
488 	err = set_starting_state(&subdata->flags, ctx);
489 	if (err < 0) {
490 		return err;
491 	}
492 
493 	subdata->cb = cb;
494 	subdata->user_data = user_data;
495 
496 	get_sub_config(dev, type)->start();
497 
498 	return 0;
499 }
500 
api_start(const struct device * dev,clock_control_subsys_t subsys,clock_control_cb_t cb,void * user_data)501 static int api_start(const struct device *dev, clock_control_subsys_t subsys,
502 		     clock_control_cb_t cb, void *user_data)
503 {
504 	return async_start(dev, subsys, cb, user_data, CTX_API);
505 }
506 
blocking_start_callback(const struct device * dev,clock_control_subsys_t subsys,void * user_data)507 static void blocking_start_callback(const struct device *dev,
508 				    clock_control_subsys_t subsys,
509 				    void *user_data)
510 {
511 	struct k_sem *sem = user_data;
512 
513 	k_sem_give(sem);
514 }
515 
api_blocking_start(const struct device * dev,clock_control_subsys_t subsys)516 static int api_blocking_start(const struct device *dev,
517 			      clock_control_subsys_t subsys)
518 {
519 	struct k_sem sem = Z_SEM_INITIALIZER(sem, 0, 1);
520 	int err;
521 
522 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
523 		return -ENOTSUP;
524 	}
525 
526 	err = api_start(dev, subsys, blocking_start_callback, &sem);
527 	if (err < 0) {
528 		return err;
529 	}
530 
531 	return k_sem_take(&sem, K_MSEC(500));
532 }
533 
get_subsys(struct onoff_manager * mgr)534 static clock_control_subsys_t get_subsys(struct onoff_manager *mgr)
535 {
536 	struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
537 	size_t offset = (size_t)(mgr - data->mgr);
538 
539 	return (clock_control_subsys_t)offset;
540 }
541 
onoff_stop(struct onoff_manager * mgr,onoff_notify_fn notify)542 static void onoff_stop(struct onoff_manager *mgr,
543 			onoff_notify_fn notify)
544 {
545 	int res;
546 
547 	res = stop(CLOCK_DEVICE, get_subsys(mgr), CTX_ONOFF);
548 	notify(mgr, res);
549 }
550 
onoff_started_callback(const struct device * dev,clock_control_subsys_t sys,void * user_data)551 static void onoff_started_callback(const struct device *dev,
552 				   clock_control_subsys_t sys,
553 				   void *user_data)
554 {
555 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)sys;
556 	struct onoff_manager *mgr = get_onoff_manager(dev, type);
557 	onoff_notify_fn notify = user_data;
558 
559 	notify(mgr, 0);
560 }
561 
onoff_start(struct onoff_manager * mgr,onoff_notify_fn notify)562 static void onoff_start(struct onoff_manager *mgr,
563 			onoff_notify_fn notify)
564 {
565 	int err;
566 
567 	err = async_start(CLOCK_DEVICE, get_subsys(mgr),
568 			  onoff_started_callback, notify, CTX_ONOFF);
569 	if (err < 0) {
570 		notify(mgr, err);
571 	}
572 }
573 
574 /** @brief Wait for LF clock availability or stability.
575  *
576  * If LF clock source is SYNTH or RC then there is no distinction between
577  * availability and stability. In case of XTAL source clock, system is initially
578  * starting RC and then seamlessly switches to XTAL. Running RC means clock
579  * availability and running target source means stability, That is because
580  * significant difference in startup time (<1ms vs >200ms).
581  *
582  * In order to get event/interrupt when RC is ready (allowing CPU sleeping) two
583  * stage startup sequence is used. Initially, LF source is set to RC and when
584  * LFSTARTED event is handled it is reconfigured to the target source clock.
585  * This approach is implemented in nrfx_clock driver and utilized here.
586  *
587  * @param mode Start mode.
588  */
lfclk_spinwait(enum nrf_lfclk_start_mode mode)589 static void lfclk_spinwait(enum nrf_lfclk_start_mode mode)
590 {
591 	static const nrf_clock_domain_t d = NRF_CLOCK_DOMAIN_LFCLK;
592 	static const nrf_clock_lfclk_t target_type =
593 		/* For sources XTAL, EXT_LOW_SWING, and EXT_FULL_SWING,
594 		 * NRF_CLOCK_LFCLK_XTAL is returned as the type of running clock.
595 		 */
596 		(IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_XTAL) ||
597 		 IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_LOW_SWING) ||
598 		 IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_FULL_SWING))
599 		? NRF_CLOCK_LFCLK_XTAL
600 		: CLOCK_CONTROL_NRF_K32SRC;
601 	nrf_clock_lfclk_t type;
602 
603 	if ((mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE) &&
604 	    (target_type == NRF_CLOCK_LFCLK_XTAL) &&
605 	    (nrf_clock_lf_srccopy_get(NRF_CLOCK) == CLOCK_CONTROL_NRF_K32SRC)) {
606 		/* If target clock source is using XTAL then due to two-stage
607 		 * clock startup sequence, RC might already be running.
608 		 * It can be determined by checking current LFCLK source. If it
609 		 * is set to the target clock source then it means that RC was
610 		 * started.
611 		 */
612 		return;
613 	}
614 
615 	bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
616 	int key = isr_mode ? irq_lock() : 0;
617 
618 	if (!isr_mode) {
619 		nrf_clock_int_disable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
620 	}
621 
622 	while (!(nrfx_clock_is_running(d, (void *)&type)
623 		 && ((type == target_type)
624 		     || (mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE)))) {
625 		/* Synth source start is almost instant and LFCLKSTARTED may
626 		 * happen before calling idle. That would lead to deadlock.
627 		 */
628 		if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH)) {
629 			if (isr_mode || !IS_ENABLED(CONFIG_MULTITHREADING)) {
630 				k_cpu_atomic_idle(key);
631 			} else {
632 				k_msleep(1);
633 			}
634 		}
635 
636 		/* Clock interrupt is locked, LFCLKSTARTED is handled here. */
637 		if ((target_type ==  NRF_CLOCK_LFCLK_XTAL)
638 		    && (nrf_clock_lf_src_get(NRF_CLOCK) == NRF_CLOCK_LFCLK_RC)
639 		    && nrf_clock_event_check(NRF_CLOCK,
640 					     NRF_CLOCK_EVENT_LFCLKSTARTED)) {
641 			nrf_clock_event_clear(NRF_CLOCK,
642 					      NRF_CLOCK_EVENT_LFCLKSTARTED);
643 			nrf_clock_lf_src_set(NRF_CLOCK,
644 					     CLOCK_CONTROL_NRF_K32SRC);
645 
646 			/* Clear pending interrupt, otherwise new clock event
647 			 * would not wake up from idle.
648 			 */
649 			NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
650 			nrf_clock_task_trigger(NRF_CLOCK,
651 					       NRF_CLOCK_TASK_LFCLKSTART);
652 		}
653 	}
654 
655 	if (isr_mode) {
656 		irq_unlock(key);
657 	} else {
658 		nrf_clock_int_enable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
659 	}
660 }
661 
z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)662 void z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)
663 {
664 	static atomic_t on;
665 	static struct onoff_client cli;
666 
667 	if (atomic_set(&on, 1) == 0) {
668 		int err;
669 		struct onoff_manager *mgr =
670 				get_onoff_manager(CLOCK_DEVICE,
671 						  CLOCK_CONTROL_NRF_TYPE_LFCLK);
672 
673 		sys_notify_init_spinwait(&cli.notify);
674 		err = onoff_request(mgr, &cli);
675 		__ASSERT_NO_MSG(err >= 0);
676 	}
677 
678 	/* In case of simulated board leave immediately. */
679 	if (IS_ENABLED(CONFIG_SOC_SERIES_BSIM_NRFXX)) {
680 		return;
681 	}
682 
683 	switch (start_mode) {
684 	case CLOCK_CONTROL_NRF_LF_START_AVAILABLE:
685 	case CLOCK_CONTROL_NRF_LF_START_STABLE:
686 		lfclk_spinwait(start_mode);
687 		break;
688 
689 	case CLOCK_CONTROL_NRF_LF_START_NOWAIT:
690 		break;
691 
692 	default:
693 		__ASSERT_NO_MSG(false);
694 	}
695 }
696 
hfclkstarted_handle(const struct device * dev)697 static void hfclkstarted_handle(const struct device *dev)
698 {
699 	struct nrf_clock_control_sub_data *data =
700 			get_sub_data(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
701 
702 	if (GET_STATUS(data->flags) == CLOCK_CONTROL_STATUS_STARTING) {
703 		/* Handler is called only if state is set. BT specific API
704 		 * does not set this state and does not require handler to
705 		 * be called.
706 		 */
707 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
708 	}
709 }
710 
clock_event_handler(nrfx_clock_evt_type_t event)711 static void clock_event_handler(nrfx_clock_evt_type_t event)
712 {
713 	const struct device *dev = CLOCK_DEVICE;
714 
715 	switch (event) {
716 #if NRF_CLOCK_HAS_XO_TUNE
717 	case NRFX_CLOCK_EVT_XO_TUNED:
718 		hfclkstarted_handle(dev);
719 		break;
720 	case NRFX_CLOCK_EVT_XO_TUNE_ERROR:
721 	case NRFX_CLOCK_EVT_XO_TUNE_FAILED:
722 		/* No processing needed. */
723 		break;
724 	case NRFX_CLOCK_EVT_HFCLK_STARTED:
725 		/* HFCLK is stable after XOTUNED event.
726 		 * HFCLK_STARTED means only that clock has been started.
727 		 */
728 		break;
729 #else
730 	/* HFCLK started should be used only if tune operation is done implicitly. */
731 	case NRFX_CLOCK_EVT_HFCLK_STARTED:
732 		hfclkstarted_handle(dev);
733 		break;
734 #endif
735 #if NRF_CLOCK_HAS_HFCLK24M
736 	case NRFX_CLOCK_EVT_HFCLK24M_STARTED:
737 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK24M);
738 		break;
739 #endif
740 #if NRF_CLOCK_HAS_HFCLK192M
741 	case NRFX_CLOCK_EVT_HFCLK192M_STARTED:
742 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK192M);
743 		break;
744 #endif
745 #if NRF_CLOCK_HAS_HFCLKAUDIO
746 	case NRFX_CLOCK_EVT_HFCLKAUDIO_STARTED:
747 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO);
748 		break;
749 #endif
750 	case NRFX_CLOCK_EVT_LFCLK_STARTED:
751 		if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
752 			z_nrf_clock_calibration_lfclk_started();
753 		}
754 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_LFCLK);
755 		break;
756 #if NRF_CLOCK_HAS_CALIBRATION || NRF_LFRC_HAS_CALIBRATION
757 	case NRFX_CLOCK_EVT_CAL_DONE:
758 		if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
759 			z_nrf_clock_calibration_done_handler();
760 		} else {
761 			/* Should not happen when calibration is disabled. */
762 			__ASSERT_NO_MSG(false);
763 		}
764 		break;
765 #endif
766 #if NRF_CLOCK_HAS_PLL
767 	case NRFX_CLOCK_EVT_PLL_STARTED:
768 		/* No processing needed. */
769 		break;
770 #endif
771 	default:
772 		__ASSERT_NO_MSG(0);
773 		break;
774 	}
775 }
776 
hfclkaudio_init(void)777 static void hfclkaudio_init(void)
778 {
779 #if DT_NODE_HAS_PROP(DT_NODELABEL(clock), hfclkaudio_frequency)
780 	const uint32_t frequency =
781 		DT_PROP(DT_NODELABEL(clock), hfclkaudio_frequency);
782 	/* As specified in the nRF5340 PS:
783 	 *
784 	 * FREQ_VALUE = 2^16 * ((12 * f_out / 32M) - 4)
785 	 */
786 	const uint32_t freq_value =
787 		(uint32_t)((384ULL * frequency) / 15625) - 262144;
788 
789 #if NRF_CLOCK_HAS_HFCLKAUDIO
790 	nrf_clock_hfclkaudio_config_set(NRF_CLOCK, freq_value);
791 #else
792 #error "hfclkaudio-frequency specified but HFCLKAUDIO clock is not present."
793 #endif /* NRF_CLOCK_HAS_HFCLKAUDIO */
794 #endif
795 }
796 
clk_init(const struct device * dev)797 static int clk_init(const struct device *dev)
798 {
799 	int err;
800 	static const struct onoff_transitions transitions = {
801 		.start = onoff_start,
802 		.stop = onoff_stop
803 	};
804 
805 #if NRF_LFRC_HAS_CALIBRATION
806 	IRQ_CONNECT(LFRC_IRQn, DT_INST_IRQ(0, priority), nrfx_isr, nrfx_power_clock_irq_handler, 0);
807 #endif
808 
809 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
810 		    nrfx_isr, nrfx_power_clock_irq_handler, 0);
811 
812 	if (nrfx_clock_init(clock_event_handler) != 0) {
813 		return -EIO;
814 	}
815 
816 	hfclkaudio_init();
817 
818 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
819 		struct nrf_clock_control_data *data = dev->data;
820 
821 		z_nrf_clock_calibration_init(data->mgr);
822 	}
823 
824 	nrfx_clock_enable();
825 
826 	for (enum clock_control_nrf_type i = 0;
827 		i < CLOCK_CONTROL_NRF_TYPE_COUNT; i++) {
828 		struct nrf_clock_control_sub_data *subdata =
829 						get_sub_data(dev, i);
830 
831 		err = onoff_manager_init(get_onoff_manager(dev, i),
832 					 &transitions);
833 		if (err < 0) {
834 			return err;
835 		}
836 
837 		subdata->flags = CLOCK_CONTROL_STATUS_OFF;
838 	}
839 
840 	return 0;
841 }
842 
843 static DEVICE_API(clock_control, clock_control_api) = {
844 	.on = api_blocking_start,
845 	.off = api_stop,
846 	.async_on = api_start,
847 	.get_status = get_status,
848 };
849 
850 static struct nrf_clock_control_data data;
851 
852 static const struct nrf_clock_control_config config = {
853 	.subsys = {
854 		[CLOCK_CONTROL_NRF_TYPE_HFCLK] = {
855 			.start = generic_hfclk_start,
856 			.stop = generic_hfclk_stop,
857 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk",))
858 		},
859 		[CLOCK_CONTROL_NRF_TYPE_LFCLK] = {
860 			.start = lfclk_start,
861 			.stop = lfclk_stop,
862 			IF_ENABLED(CONFIG_LOG, (.name = "lfclk",))
863 		},
864 #if NRF_CLOCK_HAS_HFCLK24M
865 		[CLOCK_CONTROL_NRF_TYPE_HFCLK24M] = {
866 			.start = hfclk24m_start,
867 			.stop = hfclk24m_stop,
868 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk24m",))
869 		},
870 #endif
871 #if NRF_CLOCK_HAS_HFCLK192M
872 		[CLOCK_CONTROL_NRF_TYPE_HFCLK192M] = {
873 			.start = hfclk192m_start,
874 			.stop = hfclk192m_stop,
875 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk192m",))
876 		},
877 #endif
878 #if NRF_CLOCK_HAS_HFCLKAUDIO
879 		[CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO] = {
880 			.start = hfclkaudio_start,
881 			.stop = hfclkaudio_stop,
882 			IF_ENABLED(CONFIG_LOG, (.name = "hfclkaudio",))
883 		},
884 #endif
885 	}
886 };
887 
888 DEVICE_DT_DEFINE(DT_NODELABEL(clock), clk_init, NULL,
889 		 &data, &config,
890 		 PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
891 		 &clock_control_api);
892 
893 #if defined(CONFIG_SHELL)
894 
cmd_status(const struct shell * sh,size_t argc,char ** argv)895 static int cmd_status(const struct shell *sh, size_t argc, char **argv)
896 {
897 	nrf_clock_hfclk_t hfclk_src;
898 	bool hf_status;
899 	bool lf_status = nrfx_clock_is_running(NRF_CLOCK_DOMAIN_LFCLK, NULL);
900 	struct onoff_manager *hf_mgr =
901 				get_onoff_manager(CLOCK_DEVICE,
902 						  CLOCK_CONTROL_NRF_TYPE_HFCLK);
903 	struct onoff_manager *lf_mgr =
904 				get_onoff_manager(CLOCK_DEVICE,
905 						  CLOCK_CONTROL_NRF_TYPE_LFCLK);
906 	uint32_t abs_start, abs_stop;
907 	unsigned int key = irq_lock();
908 	uint64_t now = k_uptime_get();
909 
910 	(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, (void *)&hfclk_src);
911 	hf_status = (hfclk_src == NRF_CLOCK_HFCLK_HIGH_ACCURACY);
912 
913 	abs_start = hf_start_tstamp;
914 	abs_stop = hf_stop_tstamp;
915 	irq_unlock(key);
916 
917 	shell_print(sh, "HF clock:");
918 	shell_print(sh, "\t- %srunning (users: %u)",
919 			hf_status ? "" : "not ", hf_mgr->refs);
920 	shell_print(sh, "\t- last start: %u ms (%u ms ago)",
921 			(uint32_t)abs_start, (uint32_t)(now - abs_start));
922 	shell_print(sh, "\t- last stop: %u ms (%u ms ago)",
923 			(uint32_t)abs_stop, (uint32_t)(now - abs_stop));
924 	shell_print(sh, "LF clock:");
925 	shell_print(sh, "\t- %srunning (users: %u)",
926 			lf_status ? "" : "not ", lf_mgr->refs);
927 
928 	return 0;
929 }
930 
931 SHELL_STATIC_SUBCMD_SET_CREATE(subcmds,
932 	SHELL_CMD_ARG(status, NULL, "Status", cmd_status, 1, 0),
933 	SHELL_SUBCMD_SET_END
934 );
935 
936 SHELL_COND_CMD_REGISTER(CONFIG_CLOCK_CONTROL_NRF_SHELL,
937 			nrf_clock_control, &subcmds,
938 			"Clock control commands",
939 			cmd_status);
940 
941 #endif /* defined(CONFIG_SHELL) */
942