1 /*
2  * Copyright (c) 2016-2020 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <soc.h>
9 #include <zephyr/sys/onoff.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
12 #include "nrf_clock_calibration.h"
13 #include <nrfx_clock.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/shell/shell.h>
16 #include <zephyr/irq.h>
17 
18 LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
19 
20 #define DT_DRV_COMPAT nordic_nrf_clock
21 
22 
23 #define CTX_ONOFF		BIT(6)
24 #define CTX_API			BIT(7)
25 #define CTX_MASK (CTX_ONOFF | CTX_API)
26 
27 #define STATUS_MASK		0x7
28 #define GET_STATUS(flags)	(flags & STATUS_MASK)
29 #define GET_CTX(flags)		(flags & CTX_MASK)
30 
31 /* Used only by HF clock */
32 #define HF_USER_BT		BIT(0)
33 #define HF_USER_GENERIC		BIT(1)
34 
35 /* Helper logging macros which prepends subsys name to the log. */
36 #ifdef CONFIG_LOG
37 #define CLOCK_LOG(lvl, dev, subsys, ...) \
38 	LOG_##lvl("%s: " GET_ARG_N(1, __VA_ARGS__), \
39 		get_sub_config(dev, (enum clock_control_nrf_type)subsys)->name \
40 		COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__),\
41 				(), (, GET_ARGS_LESS_N(1, __VA_ARGS__))))
42 #else
43 #define CLOCK_LOG(...)
44 #endif
45 
46 #define ERR(dev, subsys, ...) CLOCK_LOG(ERR, dev, subsys, __VA_ARGS__)
47 #define WRN(dev, subsys, ...) CLOCK_LOG(WRN, dev, subsys, __VA_ARGS__)
48 #define INF(dev, subsys, ...) CLOCK_LOG(INF, dev, subsys, __VA_ARGS__)
49 #define DBG(dev, subsys, ...) CLOCK_LOG(DBG, dev, subsys, __VA_ARGS__)
50 
51 /* Clock subsys structure */
52 struct nrf_clock_control_sub_data {
53 	clock_control_cb_t cb;
54 	void *user_data;
55 	uint32_t flags;
56 };
57 
58 typedef void (*clk_ctrl_func_t)(void);
59 
60 /* Clock subsys static configuration */
61 struct nrf_clock_control_sub_config {
62 	clk_ctrl_func_t start;		/* Clock start function */
63 	clk_ctrl_func_t stop;		/* Clock stop function */
64 #ifdef CONFIG_LOG
65 	const char *name;
66 #endif
67 };
68 
69 struct nrf_clock_control_data {
70 	struct onoff_manager mgr[CLOCK_CONTROL_NRF_TYPE_COUNT];
71 	struct nrf_clock_control_sub_data subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
72 };
73 
74 struct nrf_clock_control_config {
75 	struct nrf_clock_control_sub_config
76 					subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
77 };
78 
79 static atomic_t hfclk_users;
80 static uint64_t hf_start_tstamp;
81 static uint64_t hf_stop_tstamp;
82 
get_sub_data(const struct device * dev,enum clock_control_nrf_type type)83 static struct nrf_clock_control_sub_data *get_sub_data(const struct device *dev,
84 						       enum clock_control_nrf_type type)
85 {
86 	struct nrf_clock_control_data *data = dev->data;
87 
88 	return &data->subsys[type];
89 }
90 
get_sub_config(const struct device * dev,enum clock_control_nrf_type type)91 static const struct nrf_clock_control_sub_config *get_sub_config(const struct device *dev,
92 								 enum clock_control_nrf_type type)
93 {
94 	const struct nrf_clock_control_config *config =
95 						dev->config;
96 
97 	return &config->subsys[type];
98 }
99 
get_onoff_manager(const struct device * dev,enum clock_control_nrf_type type)100 static struct onoff_manager *get_onoff_manager(const struct device *dev,
101 					       enum clock_control_nrf_type type)
102 {
103 	struct nrf_clock_control_data *data = dev->data;
104 
105 	return &data->mgr[type];
106 }
107 
108 
109 #define CLOCK_DEVICE DEVICE_DT_GET(DT_NODELABEL(clock))
110 
z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)111 struct onoff_manager *z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)
112 {
113 	return get_onoff_manager(CLOCK_DEVICE,
114 				(enum clock_control_nrf_type)(size_t)sys);
115 }
116 
get_status(const struct device * dev,clock_control_subsys_t subsys)117 static enum clock_control_status get_status(const struct device *dev,
118 					    clock_control_subsys_t subsys)
119 {
120 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
121 
122 	__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
123 
124 	return GET_STATUS(get_sub_data(dev, type)->flags);
125 }
126 
set_off_state(uint32_t * flags,uint32_t ctx)127 static int set_off_state(uint32_t *flags, uint32_t ctx)
128 {
129 	int err = 0;
130 	unsigned int key = irq_lock();
131 	uint32_t current_ctx = GET_CTX(*flags);
132 
133 	if ((current_ctx != 0) && (current_ctx != ctx)) {
134 		err = -EPERM;
135 	} else {
136 		*flags = CLOCK_CONTROL_STATUS_OFF;
137 	}
138 
139 	irq_unlock(key);
140 
141 	return err;
142 }
143 
set_starting_state(uint32_t * flags,uint32_t ctx)144 static int set_starting_state(uint32_t *flags, uint32_t ctx)
145 {
146 	int err = 0;
147 	unsigned int key = irq_lock();
148 	uint32_t current_ctx = GET_CTX(*flags);
149 
150 	if ((*flags & (STATUS_MASK)) == CLOCK_CONTROL_STATUS_OFF) {
151 		*flags = CLOCK_CONTROL_STATUS_STARTING | ctx;
152 	} else if (current_ctx != ctx) {
153 		err = -EPERM;
154 	} else {
155 		err = -EALREADY;
156 	}
157 
158 	irq_unlock(key);
159 
160 	return err;
161 }
162 
set_on_state(uint32_t * flags)163 static void set_on_state(uint32_t *flags)
164 {
165 	unsigned int key = irq_lock();
166 
167 	*flags = CLOCK_CONTROL_STATUS_ON | GET_CTX(*flags);
168 	irq_unlock(key);
169 }
170 
clkstarted_handle(const struct device * dev,enum clock_control_nrf_type type)171 static void clkstarted_handle(const struct device *dev,
172 			      enum clock_control_nrf_type type)
173 {
174 	struct nrf_clock_control_sub_data *sub_data = get_sub_data(dev, type);
175 	clock_control_cb_t callback = sub_data->cb;
176 	void *user_data = sub_data->user_data;
177 
178 	sub_data->cb = NULL;
179 	set_on_state(&sub_data->flags);
180 	DBG(dev, type, "Clock started");
181 
182 	if (callback) {
183 		callback(dev, (clock_control_subsys_t)type, user_data);
184 	}
185 }
186 
anomaly_132_workaround(void)187 static inline void anomaly_132_workaround(void)
188 {
189 #if (CONFIG_NRF52_ANOMALY_132_DELAY_US - 0)
190 	static bool once;
191 
192 	if (!once) {
193 		k_busy_wait(CONFIG_NRF52_ANOMALY_132_DELAY_US);
194 		once = true;
195 	}
196 #endif
197 }
198 
lfclk_start(void)199 static void lfclk_start(void)
200 {
201 	if (IS_ENABLED(CONFIG_NRF52_ANOMALY_132_WORKAROUND)) {
202 		anomaly_132_workaround();
203 	}
204 
205 	nrfx_clock_lfclk_start();
206 }
207 
lfclk_stop(void)208 static void lfclk_stop(void)
209 {
210 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
211 		z_nrf_clock_calibration_lfclk_stopped();
212 	}
213 
214 	nrfx_clock_lfclk_stop();
215 }
216 
hfclk_start(void)217 static void hfclk_start(void)
218 {
219 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
220 		hf_start_tstamp = k_uptime_get();
221 	}
222 
223 	nrfx_clock_hfclk_start();
224 }
225 
hfclk_stop(void)226 static void hfclk_stop(void)
227 {
228 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
229 		hf_stop_tstamp = k_uptime_get();
230 	}
231 
232 	nrfx_clock_hfclk_stop();
233 }
234 
235 #if NRF_CLOCK_HAS_HFCLK192M
hfclk192m_start(void)236 static void hfclk192m_start(void)
237 {
238 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLK192M);
239 }
240 
hfclk192m_stop(void)241 static void hfclk192m_stop(void)
242 {
243 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLK192M);
244 }
245 #endif
246 
247 #if NRF_CLOCK_HAS_HFCLKAUDIO
hfclkaudio_start(void)248 static void hfclkaudio_start(void)
249 {
250 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
251 }
252 
hfclkaudio_stop(void)253 static void hfclkaudio_stop(void)
254 {
255 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
256 }
257 #endif
258 
get_hf_flags(void)259 static uint32_t *get_hf_flags(void)
260 {
261 	struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
262 
263 	return &data->subsys[CLOCK_CONTROL_NRF_TYPE_HFCLK].flags;
264 }
265 
generic_hfclk_start(void)266 static void generic_hfclk_start(void)
267 {
268 	nrf_clock_hfclk_t type;
269 	bool already_started = false;
270 	unsigned int key = irq_lock();
271 
272 	hfclk_users |= HF_USER_GENERIC;
273 	if (hfclk_users & HF_USER_BT) {
274 		(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, &type);
275 		if (type == NRF_CLOCK_HFCLK_HIGH_ACCURACY) {
276 			already_started = true;
277 			/* Set on state in case clock interrupt comes and we
278 			 * want to avoid handling that.
279 			 */
280 			set_on_state(get_hf_flags());
281 		}
282 	}
283 
284 	irq_unlock(key);
285 
286 	if (already_started) {
287 		/* Clock already started by z_nrf_clock_bt_ctlr_hf_request */
288 		clkstarted_handle(CLOCK_DEVICE,
289 				  CLOCK_CONTROL_NRF_TYPE_HFCLK);
290 		return;
291 	}
292 
293 	hfclk_start();
294 }
295 
generic_hfclk_stop(void)296 static void generic_hfclk_stop(void)
297 {
298 	/* It's not enough to use only atomic_and() here for synchronization,
299 	 * as the thread could be preempted right after that function but
300 	 * before hfclk_stop() is called and the preempting code could request
301 	 * the HFCLK again. Then, the HFCLK would be stopped inappropriately
302 	 * and hfclk_user would be left with an incorrect value.
303 	 */
304 	unsigned int key = irq_lock();
305 
306 	hfclk_users &= ~HF_USER_GENERIC;
307 	/* Skip stopping if BT is still requesting the clock. */
308 	if (!(hfclk_users & HF_USER_BT)) {
309 		hfclk_stop();
310 	}
311 
312 	irq_unlock(key);
313 }
314 
315 
z_nrf_clock_bt_ctlr_hf_request(void)316 void z_nrf_clock_bt_ctlr_hf_request(void)
317 {
318 	if (atomic_or(&hfclk_users, HF_USER_BT) & HF_USER_GENERIC) {
319 		/* generic request already activated clock. */
320 		return;
321 	}
322 
323 	hfclk_start();
324 }
325 
z_nrf_clock_bt_ctlr_hf_release(void)326 void z_nrf_clock_bt_ctlr_hf_release(void)
327 {
328 	/* It's not enough to use only atomic_and() here for synchronization,
329 	 * see the explanation in generic_hfclk_stop().
330 	 */
331 	unsigned int key = irq_lock();
332 
333 	hfclk_users &= ~HF_USER_BT;
334 	/* Skip stopping if generic is still requesting the clock. */
335 	if (!(hfclk_users & HF_USER_GENERIC)) {
336 		hfclk_stop();
337 	}
338 
339 	irq_unlock(key);
340 }
341 
stop(const struct device * dev,clock_control_subsys_t subsys,uint32_t ctx)342 static int stop(const struct device *dev, clock_control_subsys_t subsys,
343 		uint32_t ctx)
344 {
345 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
346 	struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
347 	int err;
348 
349 	__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
350 
351 	err = set_off_state(&subdata->flags, ctx);
352 	if (err < 0) {
353 		return err;
354 	}
355 
356 	get_sub_config(dev, type)->stop();
357 
358 	return 0;
359 }
360 
api_stop(const struct device * dev,clock_control_subsys_t subsys)361 static int api_stop(const struct device *dev, clock_control_subsys_t subsys)
362 {
363 	return stop(dev, subsys, CTX_API);
364 }
365 
async_start(const struct device * dev,clock_control_subsys_t subsys,clock_control_cb_t cb,void * user_data,uint32_t ctx)366 static int async_start(const struct device *dev, clock_control_subsys_t subsys,
367 			clock_control_cb_t cb, void *user_data, uint32_t ctx)
368 {
369 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
370 	struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
371 	int err;
372 
373 	err = set_starting_state(&subdata->flags, ctx);
374 	if (err < 0) {
375 		return err;
376 	}
377 
378 	subdata->cb = cb;
379 	subdata->user_data = user_data;
380 
381 	 get_sub_config(dev, type)->start();
382 
383 	return 0;
384 }
385 
api_start(const struct device * dev,clock_control_subsys_t subsys,clock_control_cb_t cb,void * user_data)386 static int api_start(const struct device *dev, clock_control_subsys_t subsys,
387 		     clock_control_cb_t cb, void *user_data)
388 {
389 	return async_start(dev, subsys, cb, user_data, CTX_API);
390 }
391 
blocking_start_callback(const struct device * dev,clock_control_subsys_t subsys,void * user_data)392 static void blocking_start_callback(const struct device *dev,
393 				    clock_control_subsys_t subsys,
394 				    void *user_data)
395 {
396 	struct k_sem *sem = user_data;
397 
398 	k_sem_give(sem);
399 }
400 
api_blocking_start(const struct device * dev,clock_control_subsys_t subsys)401 static int api_blocking_start(const struct device *dev,
402 			      clock_control_subsys_t subsys)
403 {
404 	struct k_sem sem = Z_SEM_INITIALIZER(sem, 0, 1);
405 	int err;
406 
407 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
408 		return -ENOTSUP;
409 	}
410 
411 	err = api_start(dev, subsys, blocking_start_callback, &sem);
412 	if (err < 0) {
413 		return err;
414 	}
415 
416 	return k_sem_take(&sem, K_MSEC(500));
417 }
418 
get_subsys(struct onoff_manager * mgr)419 static clock_control_subsys_t get_subsys(struct onoff_manager *mgr)
420 {
421 	struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
422 	size_t offset = (size_t)(mgr - data->mgr);
423 
424 	return (clock_control_subsys_t)offset;
425 }
426 
onoff_stop(struct onoff_manager * mgr,onoff_notify_fn notify)427 static void onoff_stop(struct onoff_manager *mgr,
428 			onoff_notify_fn notify)
429 {
430 	int res;
431 
432 	res = stop(CLOCK_DEVICE, get_subsys(mgr), CTX_ONOFF);
433 	notify(mgr, res);
434 }
435 
onoff_started_callback(const struct device * dev,clock_control_subsys_t sys,void * user_data)436 static void onoff_started_callback(const struct device *dev,
437 				   clock_control_subsys_t sys,
438 				   void *user_data)
439 {
440 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)sys;
441 	struct onoff_manager *mgr = get_onoff_manager(dev, type);
442 	onoff_notify_fn notify = user_data;
443 
444 	notify(mgr, 0);
445 }
446 
onoff_start(struct onoff_manager * mgr,onoff_notify_fn notify)447 static void onoff_start(struct onoff_manager *mgr,
448 			onoff_notify_fn notify)
449 {
450 	int err;
451 
452 	err = async_start(CLOCK_DEVICE, get_subsys(mgr),
453 			  onoff_started_callback, notify, CTX_ONOFF);
454 	if (err < 0) {
455 		notify(mgr, err);
456 	}
457 }
458 
459 /** @brief Wait for LF clock availability or stability.
460  *
461  * If LF clock source is SYNTH or RC then there is no distinction between
462  * availability and stability. In case of XTAL source clock, system is initially
463  * starting RC and then seamlessly switches to XTAL. Running RC means clock
464  * availability and running target source means stability, That is because
465  * significant difference in startup time (<1ms vs >200ms).
466  *
467  * In order to get event/interrupt when RC is ready (allowing CPU sleeping) two
468  * stage startup sequence is used. Initially, LF source is set to RC and when
469  * LFSTARTED event is handled it is reconfigured to the target source clock.
470  * This approach is implemented in nrfx_clock driver and utilized here.
471  *
472  * @param mode Start mode.
473  */
lfclk_spinwait(enum nrf_lfclk_start_mode mode)474 static void lfclk_spinwait(enum nrf_lfclk_start_mode mode)
475 {
476 	static const nrf_clock_domain_t d = NRF_CLOCK_DOMAIN_LFCLK;
477 	static const nrf_clock_lfclk_t target_type =
478 		/* For sources XTAL, EXT_LOW_SWING, and EXT_FULL_SWING,
479 		 * NRF_CLOCK_LFCLK_XTAL is returned as the type of running clock.
480 		 */
481 		(IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_XTAL) ||
482 		 IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_LOW_SWING) ||
483 		 IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_FULL_SWING))
484 		? NRF_CLOCK_LFCLK_XTAL
485 		: CLOCK_CONTROL_NRF_K32SRC;
486 	nrf_clock_lfclk_t type;
487 
488 	if ((mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE) &&
489 	    (target_type == NRF_CLOCK_LFCLK_XTAL) &&
490 	    (nrf_clock_lf_srccopy_get(NRF_CLOCK) == CLOCK_CONTROL_NRF_K32SRC)) {
491 		/* If target clock source is using XTAL then due to two-stage
492 		 * clock startup sequence, RC might already be running.
493 		 * It can be determined by checking current LFCLK source. If it
494 		 * is set to the target clock source then it means that RC was
495 		 * started.
496 		 */
497 		return;
498 	}
499 
500 	bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
501 	int key = isr_mode ? irq_lock() : 0;
502 
503 	if (!isr_mode) {
504 		nrf_clock_int_disable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
505 	}
506 
507 	while (!(nrfx_clock_is_running(d, (void *)&type)
508 		 && ((type == target_type)
509 		     || (mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE)))) {
510 		/* Synth source start is almost instant and LFCLKSTARTED may
511 		 * happen before calling idle. That would lead to deadlock.
512 		 */
513 		if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH)) {
514 			if (isr_mode || !IS_ENABLED(CONFIG_MULTITHREADING)) {
515 				k_cpu_atomic_idle(key);
516 			} else {
517 				k_msleep(1);
518 			}
519 		}
520 
521 		/* Clock interrupt is locked, LFCLKSTARTED is handled here. */
522 		if ((target_type ==  NRF_CLOCK_LFCLK_XTAL)
523 		    && (nrf_clock_lf_src_get(NRF_CLOCK) == NRF_CLOCK_LFCLK_RC)
524 		    && nrf_clock_event_check(NRF_CLOCK,
525 					     NRF_CLOCK_EVENT_LFCLKSTARTED)) {
526 			nrf_clock_event_clear(NRF_CLOCK,
527 					      NRF_CLOCK_EVENT_LFCLKSTARTED);
528 			nrf_clock_lf_src_set(NRF_CLOCK,
529 					     CLOCK_CONTROL_NRF_K32SRC);
530 
531 			/* Clear pending interrupt, otherwise new clock event
532 			 * would not wake up from idle.
533 			 */
534 			NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
535 			nrf_clock_task_trigger(NRF_CLOCK,
536 					       NRF_CLOCK_TASK_LFCLKSTART);
537 		}
538 	}
539 
540 	if (isr_mode) {
541 		irq_unlock(key);
542 	} else {
543 		nrf_clock_int_enable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
544 	}
545 }
546 
z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)547 void z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)
548 {
549 	static atomic_t on;
550 	static struct onoff_client cli;
551 
552 	if (atomic_set(&on, 1) == 0) {
553 		int err;
554 		struct onoff_manager *mgr =
555 				get_onoff_manager(CLOCK_DEVICE,
556 						  CLOCK_CONTROL_NRF_TYPE_LFCLK);
557 
558 		sys_notify_init_spinwait(&cli.notify);
559 		err = onoff_request(mgr, &cli);
560 		__ASSERT_NO_MSG(err >= 0);
561 	}
562 
563 	/* In case of simulated board leave immediately. */
564 	if (IS_ENABLED(CONFIG_SOC_SERIES_BSIM_NRFXX)) {
565 		return;
566 	}
567 
568 	switch (start_mode) {
569 	case CLOCK_CONTROL_NRF_LF_START_AVAILABLE:
570 	case CLOCK_CONTROL_NRF_LF_START_STABLE:
571 		lfclk_spinwait(start_mode);
572 		break;
573 
574 	case CLOCK_CONTROL_NRF_LF_START_NOWAIT:
575 		break;
576 
577 	default:
578 		__ASSERT_NO_MSG(false);
579 	}
580 }
581 
clock_event_handler(nrfx_clock_evt_type_t event)582 static void clock_event_handler(nrfx_clock_evt_type_t event)
583 {
584 	const struct device *dev = CLOCK_DEVICE;
585 
586 	switch (event) {
587 	case NRFX_CLOCK_EVT_HFCLK_STARTED:
588 	{
589 		struct nrf_clock_control_sub_data *data =
590 				get_sub_data(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
591 
592 		/* Check needed due to anomaly 201:
593 		 * HFCLKSTARTED may be generated twice.
594 		 */
595 		if (GET_STATUS(data->flags) == CLOCK_CONTROL_STATUS_STARTING) {
596 			clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
597 		}
598 
599 		break;
600 	}
601 #if NRF_CLOCK_HAS_HFCLK192M
602 	case NRFX_CLOCK_EVT_HFCLK192M_STARTED:
603 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK192M);
604 		break;
605 #endif
606 #if NRF_CLOCK_HAS_HFCLKAUDIO
607 	case NRFX_CLOCK_EVT_HFCLKAUDIO_STARTED:
608 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO);
609 		break;
610 #endif
611 	case NRFX_CLOCK_EVT_LFCLK_STARTED:
612 		if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
613 			z_nrf_clock_calibration_lfclk_started();
614 		}
615 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_LFCLK);
616 		break;
617 	case NRFX_CLOCK_EVT_CAL_DONE:
618 		if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
619 			z_nrf_clock_calibration_done_handler();
620 		} else {
621 			/* Should not happen when calibration is disabled. */
622 			__ASSERT_NO_MSG(false);
623 		}
624 		break;
625 	default:
626 		__ASSERT_NO_MSG(0);
627 		break;
628 	}
629 }
630 
hfclkaudio_init(void)631 static void hfclkaudio_init(void)
632 {
633 #if DT_NODE_HAS_PROP(DT_NODELABEL(clock), hfclkaudio_frequency)
634 	const uint32_t frequency =
635 		DT_PROP(DT_NODELABEL(clock), hfclkaudio_frequency);
636 	/* As specified in the nRF5340 PS:
637 	 *
638 	 * FREQ_VALUE = 2^16 * ((12 * f_out / 32M) - 4)
639 	 */
640 	const uint32_t freq_value =
641 		(uint32_t)((384ULL * frequency) / 15625) - 262144;
642 
643 #if NRF_CLOCK_HAS_HFCLKAUDIO
644 	nrf_clock_hfclkaudio_config_set(NRF_CLOCK, freq_value);
645 #else
646 #error "hfclkaudio-frequency specified but HFCLKAUDIO clock is not present."
647 #endif /* NRF_CLOCK_HAS_HFCLKAUDIO */
648 #endif
649 }
650 
clk_init(const struct device * dev)651 static int clk_init(const struct device *dev)
652 {
653 	nrfx_err_t nrfx_err;
654 	int err;
655 	static const struct onoff_transitions transitions = {
656 		.start = onoff_start,
657 		.stop = onoff_stop
658 	};
659 
660 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
661 		    nrfx_isr, nrfx_power_clock_irq_handler, 0);
662 
663 	nrfx_err = nrfx_clock_init(clock_event_handler);
664 	if (nrfx_err != NRFX_SUCCESS) {
665 		return -EIO;
666 	}
667 
668 	hfclkaudio_init();
669 
670 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
671 		struct nrf_clock_control_data *data = dev->data;
672 
673 		z_nrf_clock_calibration_init(data->mgr);
674 	}
675 
676 	nrfx_clock_enable();
677 
678 	for (enum clock_control_nrf_type i = 0;
679 		i < CLOCK_CONTROL_NRF_TYPE_COUNT; i++) {
680 		struct nrf_clock_control_sub_data *subdata =
681 						get_sub_data(dev, i);
682 
683 		err = onoff_manager_init(get_onoff_manager(dev, i),
684 					 &transitions);
685 		if (err < 0) {
686 			return err;
687 		}
688 
689 		subdata->flags = CLOCK_CONTROL_STATUS_OFF;
690 	}
691 
692 	return 0;
693 }
694 
695 static DEVICE_API(clock_control, clock_control_api) = {
696 	.on = api_blocking_start,
697 	.off = api_stop,
698 	.async_on = api_start,
699 	.get_status = get_status,
700 };
701 
702 static struct nrf_clock_control_data data;
703 
704 static const struct nrf_clock_control_config config = {
705 	.subsys = {
706 		[CLOCK_CONTROL_NRF_TYPE_HFCLK] = {
707 			.start = generic_hfclk_start,
708 			.stop = generic_hfclk_stop,
709 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk",))
710 		},
711 		[CLOCK_CONTROL_NRF_TYPE_LFCLK] = {
712 			.start = lfclk_start,
713 			.stop = lfclk_stop,
714 			IF_ENABLED(CONFIG_LOG, (.name = "lfclk",))
715 		},
716 #if NRF_CLOCK_HAS_HFCLK192M
717 		[CLOCK_CONTROL_NRF_TYPE_HFCLK192M] = {
718 			.start = hfclk192m_start,
719 			.stop = hfclk192m_stop,
720 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk192m",))
721 		},
722 #endif
723 #if NRF_CLOCK_HAS_HFCLKAUDIO
724 		[CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO] = {
725 			.start = hfclkaudio_start,
726 			.stop = hfclkaudio_stop,
727 			IF_ENABLED(CONFIG_LOG, (.name = "hfclkaudio",))
728 		},
729 #endif
730 	}
731 };
732 
733 DEVICE_DT_DEFINE(DT_NODELABEL(clock), clk_init, NULL,
734 		 &data, &config,
735 		 PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
736 		 &clock_control_api);
737 
738 #if defined(CONFIG_SHELL)
739 
cmd_status(const struct shell * sh,size_t argc,char ** argv)740 static int cmd_status(const struct shell *sh, size_t argc, char **argv)
741 {
742 	nrf_clock_hfclk_t hfclk_src;
743 	bool hf_status;
744 	bool lf_status = nrfx_clock_is_running(NRF_CLOCK_DOMAIN_LFCLK, NULL);
745 	struct onoff_manager *hf_mgr =
746 				get_onoff_manager(CLOCK_DEVICE,
747 						  CLOCK_CONTROL_NRF_TYPE_HFCLK);
748 	struct onoff_manager *lf_mgr =
749 				get_onoff_manager(CLOCK_DEVICE,
750 						  CLOCK_CONTROL_NRF_TYPE_LFCLK);
751 	uint32_t abs_start, abs_stop;
752 	unsigned int key = irq_lock();
753 	uint64_t now = k_uptime_get();
754 
755 	(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, (void *)&hfclk_src);
756 	hf_status = (hfclk_src == NRF_CLOCK_HFCLK_HIGH_ACCURACY);
757 
758 	abs_start = hf_start_tstamp;
759 	abs_stop = hf_stop_tstamp;
760 	irq_unlock(key);
761 
762 	shell_print(sh, "HF clock:");
763 	shell_print(sh, "\t- %srunning (users: %u)",
764 			hf_status ? "" : "not ", hf_mgr->refs);
765 	shell_print(sh, "\t- last start: %u ms (%u ms ago)",
766 			(uint32_t)abs_start, (uint32_t)(now - abs_start));
767 	shell_print(sh, "\t- last stop: %u ms (%u ms ago)",
768 			(uint32_t)abs_stop, (uint32_t)(now - abs_stop));
769 	shell_print(sh, "LF clock:");
770 	shell_print(sh, "\t- %srunning (users: %u)",
771 			lf_status ? "" : "not ", lf_mgr->refs);
772 
773 	return 0;
774 }
775 
776 SHELL_STATIC_SUBCMD_SET_CREATE(subcmds,
777 	SHELL_CMD_ARG(status, NULL, "Status", cmd_status, 1, 0),
778 	SHELL_SUBCMD_SET_END
779 );
780 
781 SHELL_COND_CMD_REGISTER(CONFIG_CLOCK_CONTROL_NRF_SHELL,
782 			nrf_clock_control, &subcmds,
783 			"Clock control commands",
784 			cmd_status);
785 
786 #endif /* defined(CONFIG_SHELL) */
787