1 /*
2  * Copyright (c) 2016-2020 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <soc.h>
9 #include <sys/onoff.h>
10 #include <drivers/clock_control.h>
11 #include <drivers/clock_control/nrf_clock_control.h>
12 #include "nrf_clock_calibration.h"
13 #include <nrfx_clock.h>
14 #include <logging/log.h>
15 #include <shell/shell.h>
16 
17 #if defined(CONFIG_SOC_NRF5340_CPUAPP) && \
18 	!defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
19 #include <hal/nrf_gpio.h>
20 #endif
21 
22 LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
23 
24 #define DT_DRV_COMPAT nordic_nrf_clock
25 
26 
27 #define CTX_ONOFF		BIT(6)
28 #define CTX_API			BIT(7)
29 #define CTX_MASK (CTX_ONOFF | CTX_API)
30 
31 #define STATUS_MASK		0x7
32 #define GET_STATUS(flags)	(flags & STATUS_MASK)
33 #define GET_CTX(flags)		(flags & CTX_MASK)
34 
35 /* Used only by HF clock */
36 #define HF_USER_BT		BIT(0)
37 #define HF_USER_GENERIC		BIT(1)
38 
39 /* Helper logging macros which prepends subsys name to the log. */
40 #ifdef CONFIG_LOG
41 #define CLOCK_LOG(lvl, dev, subsys, ...) \
42 	LOG_##lvl("%s: " GET_ARG_N(1, __VA_ARGS__), \
43 		get_sub_config(dev, (enum clock_control_nrf_type)subsys)->name \
44 		COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__),\
45 				(), (, GET_ARGS_LESS_N(1, __VA_ARGS__))))
46 #else
47 #define CLOCK_LOG(...)
48 #endif
49 
50 #define ERR(dev, subsys, ...) CLOCK_LOG(ERR, dev, subsys, __VA_ARGS__)
51 #define WRN(dev, subsys, ...) CLOCK_LOG(WRN, dev, subsys, __VA_ARGS__)
52 #define INF(dev, subsys, ...) CLOCK_LOG(INF, dev, subsys, __VA_ARGS__)
53 #define DBG(dev, subsys, ...) CLOCK_LOG(DBG, dev, subsys, __VA_ARGS__)
54 
55 /* Clock subsys structure */
56 struct nrf_clock_control_sub_data {
57 	clock_control_cb_t cb;
58 	void *user_data;
59 	uint32_t flags;
60 };
61 
62 typedef void (*clk_ctrl_func_t)(void);
63 
64 /* Clock subsys static configuration */
65 struct nrf_clock_control_sub_config {
66 	clk_ctrl_func_t start;		/* Clock start function */
67 	clk_ctrl_func_t stop;		/* Clock stop function */
68 #ifdef CONFIG_LOG
69 	const char *name;
70 #endif
71 };
72 
73 struct nrf_clock_control_data {
74 	struct onoff_manager mgr[CLOCK_CONTROL_NRF_TYPE_COUNT];
75 	struct nrf_clock_control_sub_data subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
76 };
77 
78 struct nrf_clock_control_config {
79 	struct nrf_clock_control_sub_config
80 					subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
81 };
82 
83 static atomic_t hfclk_users;
84 static uint64_t hf_start_tstamp;
85 static uint64_t hf_stop_tstamp;
86 
get_sub_data(const struct device * dev,enum clock_control_nrf_type type)87 static struct nrf_clock_control_sub_data *get_sub_data(const struct device *dev,
88 						       enum clock_control_nrf_type type)
89 {
90 	struct nrf_clock_control_data *data = dev->data;
91 
92 	return &data->subsys[type];
93 }
94 
get_sub_config(const struct device * dev,enum clock_control_nrf_type type)95 static const struct nrf_clock_control_sub_config *get_sub_config(const struct device *dev,
96 								 enum clock_control_nrf_type type)
97 {
98 	const struct nrf_clock_control_config *config =
99 						dev->config;
100 
101 	return &config->subsys[type];
102 }
103 
get_onoff_manager(const struct device * dev,enum clock_control_nrf_type type)104 static struct onoff_manager *get_onoff_manager(const struct device *dev,
105 					       enum clock_control_nrf_type type)
106 {
107 	struct nrf_clock_control_data *data = dev->data;
108 
109 	return &data->mgr[type];
110 }
111 
112 
113 #define CLOCK_DEVICE DEVICE_DT_GET(DT_NODELABEL(clock))
114 
z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)115 struct onoff_manager *z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)
116 {
117 	return get_onoff_manager(CLOCK_DEVICE,
118 				(enum clock_control_nrf_type)sys);
119 }
120 
get_status(const struct device * dev,clock_control_subsys_t subsys)121 static enum clock_control_status get_status(const struct device *dev,
122 					    clock_control_subsys_t subsys)
123 {
124 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)subsys;
125 
126 	__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
127 
128 	return GET_STATUS(get_sub_data(dev, type)->flags);
129 }
130 
set_off_state(uint32_t * flags,uint32_t ctx)131 static int set_off_state(uint32_t *flags, uint32_t ctx)
132 {
133 	int err = 0;
134 	int key = irq_lock();
135 	uint32_t current_ctx = GET_CTX(*flags);
136 
137 	if ((current_ctx != 0) && (current_ctx != ctx)) {
138 		err = -EPERM;
139 	} else {
140 		*flags = CLOCK_CONTROL_STATUS_OFF;
141 	}
142 
143 	irq_unlock(key);
144 
145 	return err;
146 }
147 
set_starting_state(uint32_t * flags,uint32_t ctx)148 static int set_starting_state(uint32_t *flags, uint32_t ctx)
149 {
150 	int err = 0;
151 	int key = irq_lock();
152 	uint32_t current_ctx = GET_CTX(*flags);
153 
154 	if ((*flags & (STATUS_MASK)) == CLOCK_CONTROL_STATUS_OFF) {
155 		*flags = CLOCK_CONTROL_STATUS_STARTING | ctx;
156 	} else if (current_ctx != ctx) {
157 		err = -EPERM;
158 	} else {
159 		err = -EALREADY;
160 	}
161 
162 	irq_unlock(key);
163 
164 	return err;
165 }
166 
set_on_state(uint32_t * flags)167 static void set_on_state(uint32_t *flags)
168 {
169 	int key = irq_lock();
170 
171 	*flags = CLOCK_CONTROL_STATUS_ON | GET_CTX(*flags);
172 	irq_unlock(key);
173 }
174 
clkstarted_handle(const struct device * dev,enum clock_control_nrf_type type)175 static void clkstarted_handle(const struct device *dev,
176 			      enum clock_control_nrf_type type)
177 {
178 	struct nrf_clock_control_sub_data *sub_data = get_sub_data(dev, type);
179 	clock_control_cb_t callback = sub_data->cb;
180 	void *user_data = sub_data->user_data;
181 
182 	sub_data->cb = NULL;
183 	set_on_state(&sub_data->flags);
184 	DBG(dev, type, "Clock started");
185 
186 	if (callback) {
187 		callback(dev, (clock_control_subsys_t)type, user_data);
188 	}
189 }
190 
anomaly_132_workaround(void)191 static inline void anomaly_132_workaround(void)
192 {
193 #if (CONFIG_NRF52_ANOMALY_132_DELAY_US - 0)
194 	static bool once;
195 
196 	if (!once) {
197 		k_busy_wait(CONFIG_NRF52_ANOMALY_132_DELAY_US);
198 		once = true;
199 	}
200 #endif
201 }
202 
lfclk_start(void)203 static void lfclk_start(void)
204 {
205 	if (IS_ENABLED(CONFIG_NRF52_ANOMALY_132_WORKAROUND)) {
206 		anomaly_132_workaround();
207 	}
208 
209 	nrfx_clock_lfclk_start();
210 }
211 
lfclk_stop(void)212 static void lfclk_stop(void)
213 {
214 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION) &&
215 	    !IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_FORCE_ALT)) {
216 		z_nrf_clock_calibration_lfclk_stopped();
217 	}
218 
219 	nrfx_clock_lfclk_stop();
220 }
221 
hfclk_start(void)222 static void hfclk_start(void)
223 {
224 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
225 		hf_start_tstamp = k_uptime_get();
226 	}
227 
228 	nrfx_clock_hfclk_start();
229 }
230 
hfclk_stop(void)231 static void hfclk_stop(void)
232 {
233 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
234 		hf_stop_tstamp = k_uptime_get();
235 	}
236 
237 	nrfx_clock_hfclk_stop();
238 }
239 
240 #if NRF_CLOCK_HAS_HFCLK192M
hfclk192m_start(void)241 static void hfclk192m_start(void)
242 {
243 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLK192M);
244 }
245 
hfclk192m_stop(void)246 static void hfclk192m_stop(void)
247 {
248 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLK192M);
249 }
250 #endif
251 
252 #if NRF_CLOCK_HAS_HFCLKAUDIO
hfclkaudio_start(void)253 static void hfclkaudio_start(void)
254 {
255 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
256 }
257 
hfclkaudio_stop(void)258 static void hfclkaudio_stop(void)
259 {
260 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
261 }
262 #endif
263 
get_hf_flags(void)264 static uint32_t *get_hf_flags(void)
265 {
266 	struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
267 
268 	return &data->subsys[CLOCK_CONTROL_NRF_TYPE_HFCLK].flags;
269 }
270 
generic_hfclk_start(void)271 static void generic_hfclk_start(void)
272 {
273 	nrf_clock_hfclk_t type;
274 	bool already_started = false;
275 	int key = irq_lock();
276 
277 	hfclk_users |= HF_USER_GENERIC;
278 	if (hfclk_users & HF_USER_BT) {
279 		(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, &type);
280 		if (type == NRF_CLOCK_HFCLK_HIGH_ACCURACY) {
281 			already_started = true;
282 			/* Set on state in case clock interrupt comes and we
283 			 * want to avoid handling that.
284 			 */
285 			set_on_state(get_hf_flags());
286 		}
287 	}
288 
289 	irq_unlock(key);
290 
291 	if (already_started) {
292 		/* Clock already started by z_nrf_clock_bt_ctlr_hf_request */
293 		clkstarted_handle(CLOCK_DEVICE,
294 				  CLOCK_CONTROL_NRF_TYPE_HFCLK);
295 		return;
296 	}
297 
298 	hfclk_start();
299 }
300 
generic_hfclk_stop(void)301 static void generic_hfclk_stop(void)
302 {
303 	if (atomic_and(&hfclk_users, ~HF_USER_GENERIC) & HF_USER_BT) {
304 		/* bt still requesting the clock. */
305 		return;
306 	}
307 
308 	hfclk_stop();
309 }
310 
311 
z_nrf_clock_bt_ctlr_hf_request(void)312 void z_nrf_clock_bt_ctlr_hf_request(void)
313 {
314 	if (atomic_or(&hfclk_users, HF_USER_BT) & HF_USER_GENERIC) {
315 		/* generic request already activated clock. */
316 		return;
317 	}
318 
319 	hfclk_start();
320 }
321 
z_nrf_clock_bt_ctlr_hf_release(void)322 void z_nrf_clock_bt_ctlr_hf_release(void)
323 {
324 	if (atomic_and(&hfclk_users, ~HF_USER_BT) & HF_USER_GENERIC) {
325 		/* generic still requesting the clock. */
326 		return;
327 	}
328 
329 	hfclk_stop();
330 }
331 
stop(const struct device * dev,clock_control_subsys_t subsys,uint32_t ctx)332 static int stop(const struct device *dev, clock_control_subsys_t subsys,
333 		uint32_t ctx)
334 {
335 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)subsys;
336 	struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
337 	int err;
338 
339 	__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
340 
341 	err = set_off_state(&subdata->flags, ctx);
342 	if (err < 0) {
343 		return err;
344 	}
345 
346 	get_sub_config(dev, type)->stop();
347 
348 	return 0;
349 }
350 
api_stop(const struct device * dev,clock_control_subsys_t subsys)351 static int api_stop(const struct device *dev, clock_control_subsys_t subsys)
352 {
353 	return stop(dev, subsys, CTX_API);
354 }
355 
async_start(const struct device * dev,clock_control_subsys_t subsys,clock_control_cb_t cb,void * user_data,uint32_t ctx)356 static int async_start(const struct device *dev, clock_control_subsys_t subsys,
357 			clock_control_cb_t cb, void *user_data, uint32_t ctx)
358 {
359 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)subsys;
360 	struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
361 	int err;
362 
363 	err = set_starting_state(&subdata->flags, ctx);
364 	if (err < 0) {
365 		return err;
366 	}
367 
368 	subdata->cb = cb;
369 	subdata->user_data = user_data;
370 
371 	 get_sub_config(dev, type)->start();
372 
373 	return 0;
374 }
375 
api_start(const struct device * dev,clock_control_subsys_t subsys,clock_control_cb_t cb,void * user_data)376 static int api_start(const struct device *dev, clock_control_subsys_t subsys,
377 		     clock_control_cb_t cb, void *user_data)
378 {
379 	return async_start(dev, subsys, cb, user_data, CTX_API);
380 }
381 
blocking_start_callback(const struct device * dev,clock_control_subsys_t subsys,void * user_data)382 static void blocking_start_callback(const struct device *dev,
383 				    clock_control_subsys_t subsys,
384 				    void *user_data)
385 {
386 	struct k_sem *sem = user_data;
387 
388 	k_sem_give(sem);
389 }
390 
api_blocking_start(const struct device * dev,clock_control_subsys_t subsys)391 static int api_blocking_start(const struct device *dev,
392 			      clock_control_subsys_t subsys)
393 {
394 	struct k_sem sem = Z_SEM_INITIALIZER(sem, 0, 1);
395 	int err;
396 
397 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
398 		return -ENOTSUP;
399 	}
400 
401 	err = api_start(dev, subsys, blocking_start_callback, &sem);
402 	if (err < 0) {
403 		return err;
404 	}
405 
406 	return k_sem_take(&sem, K_MSEC(500));
407 }
408 
get_subsys(struct onoff_manager * mgr)409 static clock_control_subsys_t get_subsys(struct onoff_manager *mgr)
410 {
411 	struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
412 	size_t offset = (size_t)(mgr - data->mgr);
413 
414 	return (clock_control_subsys_t)offset;
415 }
416 
onoff_stop(struct onoff_manager * mgr,onoff_notify_fn notify)417 static void onoff_stop(struct onoff_manager *mgr,
418 			onoff_notify_fn notify)
419 {
420 	int res;
421 
422 	res = stop(CLOCK_DEVICE, get_subsys(mgr), CTX_ONOFF);
423 	notify(mgr, res);
424 }
425 
onoff_started_callback(const struct device * dev,clock_control_subsys_t sys,void * user_data)426 static void onoff_started_callback(const struct device *dev,
427 				   clock_control_subsys_t sys,
428 				   void *user_data)
429 {
430 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)sys;
431 	struct onoff_manager *mgr = get_onoff_manager(dev, type);
432 	onoff_notify_fn notify = user_data;
433 
434 	notify(mgr, 0);
435 }
436 
onoff_start(struct onoff_manager * mgr,onoff_notify_fn notify)437 static void onoff_start(struct onoff_manager *mgr,
438 			onoff_notify_fn notify)
439 {
440 	int err;
441 
442 	err = async_start(CLOCK_DEVICE, get_subsys(mgr),
443 			  onoff_started_callback, notify, CTX_ONOFF);
444 	if (err < 0) {
445 		notify(mgr, err);
446 	}
447 }
448 
449 /** @brief Wait for LF clock availability or stability.
450  *
451  * If LF clock source is SYNTH or RC then there is no distinction between
452  * availability and stability. In case of XTAL source clock, system is initially
453  * starting RC and then seamlessly switches to XTAL. Running RC means clock
454  * availability and running target source means stability, That is because
455  * significant difference in startup time (<1ms vs >200ms).
456  *
457  * In order to get event/interrupt when RC is ready (allowing CPU sleeping) two
458  * stage startup sequence is used. Initially, LF source is set to RC and when
459  * LFSTARTED event is handled it is reconfigured to the target source clock.
460  * This approach is implemented in nrfx_clock driver and utilized here.
461  *
462  * @param mode Start mode.
463  */
lfclk_spinwait(enum nrf_lfclk_start_mode mode)464 static void lfclk_spinwait(enum nrf_lfclk_start_mode mode)
465 {
466 	static const nrf_clock_domain_t d = NRF_CLOCK_DOMAIN_LFCLK;
467 	static const nrf_clock_lfclk_t target_type =
468 		/* For sources XTAL, EXT_LOW_SWING, and EXT_FULL_SWING,
469 		 * NRF_CLOCK_LFCLK_Xtal is returned as the type of running clock.
470 		 */
471 		(IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_XTAL) ||
472 		 IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_LOW_SWING) ||
473 		 IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_FULL_SWING))
474 		? NRF_CLOCK_LFCLK_Xtal
475 		: CLOCK_CONTROL_NRF_K32SRC;
476 	nrf_clock_lfclk_t type;
477 
478 	if ((mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE) &&
479 	    (target_type == NRF_CLOCK_LFCLK_Xtal) &&
480 	    (nrf_clock_lf_srccopy_get(NRF_CLOCK) == CLOCK_CONTROL_NRF_K32SRC)) {
481 		/* If target clock source is using XTAL then due to two-stage
482 		 * clock startup sequence, RC might already be running.
483 		 * It can be determined by checking current LFCLK source. If it
484 		 * is set to the target clock source then it means that RC was
485 		 * started.
486 		 */
487 		return;
488 	}
489 
490 	bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
491 	int key = isr_mode ? irq_lock() : 0;
492 
493 	if (!isr_mode) {
494 		nrf_clock_int_disable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
495 	}
496 
497 	while (!(nrfx_clock_is_running(d, (void *)&type)
498 		 && ((type == target_type)
499 		     || (mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE)))) {
500 		/* Synth source start is almost instant and LFCLKSTARTED may
501 		 * happen before calling idle. That would lead to deadlock.
502 		 */
503 		if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH)) {
504 			if (isr_mode || !IS_ENABLED(CONFIG_MULTITHREADING)) {
505 				k_cpu_atomic_idle(key);
506 			} else {
507 				k_msleep(1);
508 			}
509 		}
510 
511 		/* Clock interrupt is locked, LFCLKSTARTED is handled here. */
512 		if ((target_type ==  NRF_CLOCK_LFCLK_Xtal)
513 		    && (nrf_clock_lf_src_get(NRF_CLOCK) == NRF_CLOCK_LFCLK_RC)
514 		    && nrf_clock_event_check(NRF_CLOCK,
515 					     NRF_CLOCK_EVENT_LFCLKSTARTED)) {
516 			nrf_clock_event_clear(NRF_CLOCK,
517 					      NRF_CLOCK_EVENT_LFCLKSTARTED);
518 			nrf_clock_lf_src_set(NRF_CLOCK,
519 					     CLOCK_CONTROL_NRF_K32SRC);
520 
521 			/* Clear pending interrupt, otherwise new clock event
522 			 * would not wake up from idle.
523 			 */
524 			NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
525 			nrf_clock_task_trigger(NRF_CLOCK,
526 					       NRF_CLOCK_TASK_LFCLKSTART);
527 		}
528 	}
529 
530 	if (isr_mode) {
531 		irq_unlock(key);
532 	} else {
533 		nrf_clock_int_enable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
534 	}
535 }
536 
z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)537 void z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)
538 {
539 	static atomic_t on;
540 	static struct onoff_client cli;
541 
542 	if (atomic_set(&on, 1) == 0) {
543 		int err;
544 		struct onoff_manager *mgr =
545 				get_onoff_manager(CLOCK_DEVICE,
546 						  CLOCK_CONTROL_NRF_TYPE_LFCLK);
547 
548 		sys_notify_init_spinwait(&cli.notify);
549 		err = onoff_request(mgr, &cli);
550 		__ASSERT_NO_MSG(err >= 0);
551 	}
552 
553 	/* In case of simulated board leave immediately. */
554 	if (IS_ENABLED(CONFIG_SOC_SERIES_BSIM_NRFXX)) {
555 		return;
556 	}
557 
558 	switch (start_mode) {
559 	case CLOCK_CONTROL_NRF_LF_START_AVAILABLE:
560 	case CLOCK_CONTROL_NRF_LF_START_STABLE:
561 		lfclk_spinwait(start_mode);
562 		break;
563 
564 	case CLOCK_CONTROL_NRF_LF_START_NOWAIT:
565 		break;
566 
567 	default:
568 		__ASSERT_NO_MSG(false);
569 	}
570 }
571 
clock_event_handler(nrfx_clock_evt_type_t event)572 static void clock_event_handler(nrfx_clock_evt_type_t event)
573 {
574 	const struct device *dev = CLOCK_DEVICE;
575 
576 	switch (event) {
577 	case NRFX_CLOCK_EVT_HFCLK_STARTED:
578 	{
579 		struct nrf_clock_control_sub_data *data =
580 				get_sub_data(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
581 
582 		/* Check needed due to anomaly 201:
583 		 * HFCLKSTARTED may be generated twice.
584 		 */
585 		if (GET_STATUS(data->flags) == CLOCK_CONTROL_STATUS_STARTING) {
586 			clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
587 		}
588 
589 		break;
590 	}
591 #if NRF_CLOCK_HAS_HFCLK192M
592 	case NRFX_CLOCK_EVT_HFCLK192M_STARTED:
593 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK192M);
594 		break;
595 #endif
596 #if NRF_CLOCK_HAS_HFCLKAUDIO
597 	case NRFX_CLOCK_EVT_HFCLKAUDIO_STARTED:
598 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO);
599 		break;
600 #endif
601 	case NRFX_CLOCK_EVT_LFCLK_STARTED:
602 		if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION) &&
603 		    !IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_FORCE_ALT)) {
604 			z_nrf_clock_calibration_lfclk_started();
605 		}
606 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_LFCLK);
607 		break;
608 	case NRFX_CLOCK_EVT_CAL_DONE:
609 		if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION) &&
610 		    !IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_FORCE_ALT)) {
611 			z_nrf_clock_calibration_done_handler();
612 		} else {
613 			/* Should not happen when calibration is disabled. */
614 			__ASSERT_NO_MSG(false);
615 		}
616 		break;
617 	default:
618 		__ASSERT_NO_MSG(0);
619 		break;
620 	}
621 }
622 
hfclkaudio_init(void)623 static void hfclkaudio_init(void)
624 {
625 #if DT_NODE_HAS_PROP(DT_NODELABEL(clock), hfclkaudio_frequency)
626 	const uint32_t frequency =
627 		DT_PROP(DT_NODELABEL(clock), hfclkaudio_frequency);
628 	/* As specified in the nRF5340 PS:
629 	 *
630 	 * FREQ_VALUE = 2^16 * ((12 * f_out / 32M) - 4)
631 	 */
632 	const uint32_t freq_value =
633 		(uint32_t)((384ULL * frequency) / 15625) - 262144;
634 
635 #if NRF_CLOCK_HAS_HFCLKAUDIO
636 	nrf_clock_hfclkaudio_config_set(NRF_CLOCK, freq_value);
637 #else
638 #error "hfclkaudio-frequency specified but HFCLKAUDIO clock is not present."
639 #endif /* NRF_CLOCK_HAS_HFCLKAUDIO */
640 #endif
641 }
642 
clk_init(const struct device * dev)643 static int clk_init(const struct device *dev)
644 {
645 	nrfx_err_t nrfx_err;
646 	int err;
647 	static const struct onoff_transitions transitions = {
648 		.start = onoff_start,
649 		.stop = onoff_stop
650 	};
651 
652 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
653 		    nrfx_isr, nrfx_power_clock_irq_handler, 0);
654 	irq_enable(DT_INST_IRQN(0));
655 
656 	nrfx_err = nrfx_clock_init(clock_event_handler);
657 	if (nrfx_err != NRFX_SUCCESS) {
658 		return -EIO;
659 	}
660 
661 	hfclkaudio_init();
662 
663 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION) &&
664 	    !IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_FORCE_ALT)) {
665 		struct nrf_clock_control_data *data = dev->data;
666 
667 		z_nrf_clock_calibration_init(data->mgr);
668 	}
669 
670 	nrfx_clock_enable();
671 
672 	for (enum clock_control_nrf_type i = 0;
673 		i < CLOCK_CONTROL_NRF_TYPE_COUNT; i++) {
674 		struct nrf_clock_control_sub_data *subdata =
675 						get_sub_data(dev, i);
676 
677 		err = onoff_manager_init(get_onoff_manager(dev, i),
678 					 &transitions);
679 		if (err < 0) {
680 			return err;
681 		}
682 
683 		subdata->flags = CLOCK_CONTROL_STATUS_OFF;
684 	}
685 
686 	return 0;
687 }
688 
689 static const struct clock_control_driver_api clock_control_api = {
690 	.on = api_blocking_start,
691 	.off = api_stop,
692 	.async_on = api_start,
693 	.get_status = get_status,
694 };
695 
696 static struct nrf_clock_control_data data;
697 
698 static const struct nrf_clock_control_config config = {
699 	.subsys = {
700 		[CLOCK_CONTROL_NRF_TYPE_HFCLK] = {
701 			.start = generic_hfclk_start,
702 			.stop = generic_hfclk_stop,
703 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk",))
704 		},
705 		[CLOCK_CONTROL_NRF_TYPE_LFCLK] = {
706 			.start = lfclk_start,
707 			.stop = lfclk_stop,
708 			IF_ENABLED(CONFIG_LOG, (.name = "lfclk",))
709 		},
710 #if NRF_CLOCK_HAS_HFCLK192M
711 		[CLOCK_CONTROL_NRF_TYPE_HFCLK192M] = {
712 			.start = hfclk192m_start,
713 			.stop = hfclk192m_stop,
714 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk192m",))
715 		},
716 #endif
717 #if NRF_CLOCK_HAS_HFCLKAUDIO
718 		[CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO] = {
719 			.start = hfclkaudio_start,
720 			.stop = hfclkaudio_stop,
721 			IF_ENABLED(CONFIG_LOG, (.name = "hfclkaudio",))
722 		},
723 #endif
724 	}
725 };
726 
727 DEVICE_DT_DEFINE(DT_NODELABEL(clock), clk_init, NULL,
728 		 &data, &config,
729 		 PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
730 		 &clock_control_api);
731 
cmd_status(const struct shell * shell,size_t argc,char ** argv)732 static int cmd_status(const struct shell *shell, size_t argc, char **argv)
733 {
734 	nrf_clock_hfclk_t hfclk_src;
735 	bool hf_status;
736 	bool lf_status = nrfx_clock_is_running(NRF_CLOCK_DOMAIN_LFCLK, NULL);
737 	struct onoff_manager *hf_mgr =
738 				get_onoff_manager(CLOCK_DEVICE,
739 						  CLOCK_CONTROL_NRF_TYPE_HFCLK);
740 	struct onoff_manager *lf_mgr =
741 				get_onoff_manager(CLOCK_DEVICE,
742 						  CLOCK_CONTROL_NRF_TYPE_LFCLK);
743 	uint32_t abs_start, abs_stop;
744 	int key = irq_lock();
745 	uint64_t now = k_uptime_get();
746 
747 	(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, (void *)&hfclk_src);
748 	hf_status = (hfclk_src == NRF_CLOCK_HFCLK_HIGH_ACCURACY);
749 
750 	abs_start = hf_start_tstamp;
751 	abs_stop = hf_stop_tstamp;
752 	irq_unlock(key);
753 
754 	shell_print(shell, "HF clock:");
755 	shell_print(shell, "\t- %srunning (users: %u)",
756 			hf_status ? "" : "not ", hf_mgr->refs);
757 	shell_print(shell, "\t- last start: %u ms (%u ms ago)",
758 			(uint32_t)abs_start, (uint32_t)(now - abs_start));
759 	shell_print(shell, "\t- last stop: %u ms (%u ms ago)",
760 			(uint32_t)abs_stop, (uint32_t)(now - abs_stop));
761 	shell_print(shell, "LF clock:");
762 	shell_print(shell, "\t- %srunning (users: %u)",
763 			lf_status ? "" : "not ", lf_mgr->refs);
764 
765 	return 0;
766 }
767 
768 SHELL_STATIC_SUBCMD_SET_CREATE(subcmds,
769 	SHELL_CMD_ARG(status, NULL, "Status", cmd_status, 1, 0),
770 	SHELL_SUBCMD_SET_END
771 );
772 
773 SHELL_COND_CMD_REGISTER(CONFIG_CLOCK_CONTROL_NRF_SHELL,
774 			nrf_clock_control, &subcmds,
775 			"Clock control commmands",
776 			cmd_status);
777