1 /*
2  * Copyright (c) 2016-2020 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <soc.h>
9 #include <zephyr/sys/onoff.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
12 #include "nrf_clock_calibration.h"
13 #include <nrfx_clock.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/shell/shell.h>
16 #include <zephyr/irq.h>
17 
18 LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
19 
20 #define DT_DRV_COMPAT nordic_nrf_clock
21 
22 
23 #define CTX_ONOFF		BIT(6)
24 #define CTX_API			BIT(7)
25 #define CTX_MASK (CTX_ONOFF | CTX_API)
26 
27 #define STATUS_MASK		0x7
28 #define GET_STATUS(flags)	(flags & STATUS_MASK)
29 #define GET_CTX(flags)		(flags & CTX_MASK)
30 
31 /* Used only by HF clock */
32 #define HF_USER_BT		BIT(0)
33 #define HF_USER_GENERIC		BIT(1)
34 
35 /* Helper logging macros which prepends subsys name to the log. */
36 #ifdef CONFIG_LOG
37 #define CLOCK_LOG(lvl, dev, subsys, ...) \
38 	LOG_##lvl("%s: " GET_ARG_N(1, __VA_ARGS__), \
39 		get_sub_config(dev, (enum clock_control_nrf_type)subsys)->name \
40 		COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__),\
41 				(), (, GET_ARGS_LESS_N(1, __VA_ARGS__))))
42 #else
43 #define CLOCK_LOG(...)
44 #endif
45 
46 #define ERR(dev, subsys, ...) CLOCK_LOG(ERR, dev, subsys, __VA_ARGS__)
47 #define WRN(dev, subsys, ...) CLOCK_LOG(WRN, dev, subsys, __VA_ARGS__)
48 #define INF(dev, subsys, ...) CLOCK_LOG(INF, dev, subsys, __VA_ARGS__)
49 #define DBG(dev, subsys, ...) CLOCK_LOG(DBG, dev, subsys, __VA_ARGS__)
50 
51 /* Clock subsys structure */
52 struct nrf_clock_control_sub_data {
53 	clock_control_cb_t cb;
54 	void *user_data;
55 	uint32_t flags;
56 };
57 
58 typedef void (*clk_ctrl_func_t)(void);
59 
60 /* Clock subsys static configuration */
61 struct nrf_clock_control_sub_config {
62 	clk_ctrl_func_t start;		/* Clock start function */
63 	clk_ctrl_func_t stop;		/* Clock stop function */
64 #ifdef CONFIG_LOG
65 	const char *name;
66 #endif
67 };
68 
69 struct nrf_clock_control_data {
70 	struct onoff_manager mgr[CLOCK_CONTROL_NRF_TYPE_COUNT];
71 	struct nrf_clock_control_sub_data subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
72 };
73 
74 struct nrf_clock_control_config {
75 	struct nrf_clock_control_sub_config
76 					subsys[CLOCK_CONTROL_NRF_TYPE_COUNT];
77 };
78 
79 static atomic_t hfclk_users;
80 static uint64_t hf_start_tstamp;
81 static uint64_t hf_stop_tstamp;
82 #if CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH
83 /* Client to request HFXO to synthesize low frequency clock. */
84 static struct onoff_client lfsynth_cli;
85 #endif
86 
get_sub_data(const struct device * dev,enum clock_control_nrf_type type)87 static struct nrf_clock_control_sub_data *get_sub_data(const struct device *dev,
88 						       enum clock_control_nrf_type type)
89 {
90 	struct nrf_clock_control_data *data = dev->data;
91 
92 	return &data->subsys[type];
93 }
94 
get_sub_config(const struct device * dev,enum clock_control_nrf_type type)95 static const struct nrf_clock_control_sub_config *get_sub_config(const struct device *dev,
96 								 enum clock_control_nrf_type type)
97 {
98 	const struct nrf_clock_control_config *config =
99 						dev->config;
100 
101 	return &config->subsys[type];
102 }
103 
get_onoff_manager(const struct device * dev,enum clock_control_nrf_type type)104 static struct onoff_manager *get_onoff_manager(const struct device *dev,
105 					       enum clock_control_nrf_type type)
106 {
107 	struct nrf_clock_control_data *data = dev->data;
108 
109 	return &data->mgr[type];
110 }
111 
112 
113 #define CLOCK_DEVICE DEVICE_DT_GET(DT_NODELABEL(clock))
114 
z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)115 struct onoff_manager *z_nrf_clock_control_get_onoff(clock_control_subsys_t sys)
116 {
117 	return get_onoff_manager(CLOCK_DEVICE,
118 				(enum clock_control_nrf_type)(size_t)sys);
119 }
120 
get_status(const struct device * dev,clock_control_subsys_t subsys)121 static enum clock_control_status get_status(const struct device *dev,
122 					    clock_control_subsys_t subsys)
123 {
124 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
125 
126 	__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
127 
128 	return GET_STATUS(get_sub_data(dev, type)->flags);
129 }
130 
set_off_state(uint32_t * flags,uint32_t ctx)131 static int set_off_state(uint32_t *flags, uint32_t ctx)
132 {
133 	int err = 0;
134 	unsigned int key = irq_lock();
135 	uint32_t current_ctx = GET_CTX(*flags);
136 
137 	if ((current_ctx != 0) && (current_ctx != ctx)) {
138 		err = -EPERM;
139 	} else {
140 		*flags = CLOCK_CONTROL_STATUS_OFF;
141 	}
142 
143 	irq_unlock(key);
144 
145 	return err;
146 }
147 
set_starting_state(uint32_t * flags,uint32_t ctx)148 static int set_starting_state(uint32_t *flags, uint32_t ctx)
149 {
150 	int err = 0;
151 	unsigned int key = irq_lock();
152 	uint32_t current_ctx = GET_CTX(*flags);
153 
154 	if ((*flags & (STATUS_MASK)) == CLOCK_CONTROL_STATUS_OFF) {
155 		*flags = CLOCK_CONTROL_STATUS_STARTING | ctx;
156 	} else if (current_ctx != ctx) {
157 		err = -EPERM;
158 	} else {
159 		err = -EALREADY;
160 	}
161 
162 	irq_unlock(key);
163 
164 	return err;
165 }
166 
set_on_state(uint32_t * flags)167 static void set_on_state(uint32_t *flags)
168 {
169 	unsigned int key = irq_lock();
170 
171 	*flags = CLOCK_CONTROL_STATUS_ON | GET_CTX(*flags);
172 	irq_unlock(key);
173 }
174 
clkstarted_handle(const struct device * dev,enum clock_control_nrf_type type)175 static void clkstarted_handle(const struct device *dev,
176 			      enum clock_control_nrf_type type)
177 {
178 	struct nrf_clock_control_sub_data *sub_data = get_sub_data(dev, type);
179 	clock_control_cb_t callback = sub_data->cb;
180 	void *user_data = sub_data->user_data;
181 
182 	sub_data->cb = NULL;
183 	set_on_state(&sub_data->flags);
184 	DBG(dev, type, "Clock started");
185 
186 	if (callback) {
187 		callback(dev, (clock_control_subsys_t)type, user_data);
188 	}
189 }
190 
anomaly_132_workaround(void)191 static inline void anomaly_132_workaround(void)
192 {
193 #if (CONFIG_NRF52_ANOMALY_132_DELAY_US - 0)
194 	static bool once;
195 
196 	if (!once) {
197 		k_busy_wait(CONFIG_NRF52_ANOMALY_132_DELAY_US);
198 		once = true;
199 	}
200 #endif
201 }
202 
lfclk_start(void)203 static void lfclk_start(void)
204 {
205 	if (IS_ENABLED(CONFIG_NRF52_ANOMALY_132_WORKAROUND)) {
206 		anomaly_132_workaround();
207 	}
208 
209 #if CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH
210 	sys_notify_init_spinwait(&lfsynth_cli.notify);
211 	(void)onoff_request(z_nrf_clock_control_get_onoff(CLOCK_CONTROL_NRF_SUBSYS_HF),
212 			    &lfsynth_cli);
213 #endif
214 
215 	nrfx_clock_lfclk_start();
216 }
217 
lfclk_stop(void)218 static void lfclk_stop(void)
219 {
220 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
221 		z_nrf_clock_calibration_lfclk_stopped();
222 	}
223 
224 	nrfx_clock_lfclk_stop();
225 
226 #if CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH
227 	(void)onoff_cancel_or_release(z_nrf_clock_control_get_onoff(CLOCK_CONTROL_NRF_SUBSYS_HF),
228 				      &lfsynth_cli);
229 #endif
230 }
231 
hfclk_start(void)232 static void hfclk_start(void)
233 {
234 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
235 		hf_start_tstamp = k_uptime_get();
236 	}
237 
238 	nrfx_clock_hfclk_start();
239 }
240 
hfclk_stop(void)241 static void hfclk_stop(void)
242 {
243 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_SHELL)) {
244 		hf_stop_tstamp = k_uptime_get();
245 	}
246 
247 	nrfx_clock_hfclk_stop();
248 }
249 
250 #if NRF_CLOCK_HAS_HFCLK192M
hfclk192m_start(void)251 static void hfclk192m_start(void)
252 {
253 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLK192M);
254 }
255 
hfclk192m_stop(void)256 static void hfclk192m_stop(void)
257 {
258 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLK192M);
259 }
260 #endif
261 
262 #if NRF_CLOCK_HAS_HFCLKAUDIO
hfclkaudio_start(void)263 static void hfclkaudio_start(void)
264 {
265 	nrfx_clock_start(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
266 }
267 
hfclkaudio_stop(void)268 static void hfclkaudio_stop(void)
269 {
270 	nrfx_clock_stop(NRF_CLOCK_DOMAIN_HFCLKAUDIO);
271 }
272 #endif
273 
get_hf_flags(void)274 static uint32_t *get_hf_flags(void)
275 {
276 	struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
277 
278 	return &data->subsys[CLOCK_CONTROL_NRF_TYPE_HFCLK].flags;
279 }
280 
generic_hfclk_start(void)281 static void generic_hfclk_start(void)
282 {
283 	nrf_clock_hfclk_t type;
284 	bool already_started = false;
285 	unsigned int key = irq_lock();
286 
287 	hfclk_users |= HF_USER_GENERIC;
288 	if (hfclk_users & HF_USER_BT) {
289 		(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, &type);
290 		if (type == NRF_CLOCK_HFCLK_HIGH_ACCURACY) {
291 			already_started = true;
292 			/* Set on state in case clock interrupt comes and we
293 			 * want to avoid handling that.
294 			 */
295 			set_on_state(get_hf_flags());
296 		}
297 	}
298 
299 	irq_unlock(key);
300 
301 	if (already_started) {
302 		/* Clock already started by z_nrf_clock_bt_ctlr_hf_request */
303 		clkstarted_handle(CLOCK_DEVICE,
304 				  CLOCK_CONTROL_NRF_TYPE_HFCLK);
305 		return;
306 	}
307 
308 	hfclk_start();
309 }
310 
generic_hfclk_stop(void)311 static void generic_hfclk_stop(void)
312 {
313 	/* It's not enough to use only atomic_and() here for synchronization,
314 	 * as the thread could be preempted right after that function but
315 	 * before hfclk_stop() is called and the preempting code could request
316 	 * the HFCLK again. Then, the HFCLK would be stopped inappropriately
317 	 * and hfclk_user would be left with an incorrect value.
318 	 */
319 	unsigned int key = irq_lock();
320 
321 	hfclk_users &= ~HF_USER_GENERIC;
322 	/* Skip stopping if BT is still requesting the clock. */
323 	if (!(hfclk_users & HF_USER_BT)) {
324 		hfclk_stop();
325 	}
326 
327 	irq_unlock(key);
328 }
329 
330 
z_nrf_clock_bt_ctlr_hf_request(void)331 void z_nrf_clock_bt_ctlr_hf_request(void)
332 {
333 	if (atomic_or(&hfclk_users, HF_USER_BT) & HF_USER_GENERIC) {
334 		/* generic request already activated clock. */
335 		return;
336 	}
337 
338 	hfclk_start();
339 }
340 
z_nrf_clock_bt_ctlr_hf_release(void)341 void z_nrf_clock_bt_ctlr_hf_release(void)
342 {
343 	/* It's not enough to use only atomic_and() here for synchronization,
344 	 * see the explanation in generic_hfclk_stop().
345 	 */
346 	unsigned int key = irq_lock();
347 
348 	hfclk_users &= ~HF_USER_BT;
349 	/* Skip stopping if generic is still requesting the clock. */
350 	if (!(hfclk_users & HF_USER_GENERIC)) {
351 		hfclk_stop();
352 	}
353 
354 	irq_unlock(key);
355 }
356 
stop(const struct device * dev,clock_control_subsys_t subsys,uint32_t ctx)357 static int stop(const struct device *dev, clock_control_subsys_t subsys,
358 		uint32_t ctx)
359 {
360 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
361 	struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
362 	int err;
363 
364 	__ASSERT_NO_MSG(type < CLOCK_CONTROL_NRF_TYPE_COUNT);
365 
366 	err = set_off_state(&subdata->flags, ctx);
367 	if (err < 0) {
368 		return err;
369 	}
370 
371 	get_sub_config(dev, type)->stop();
372 
373 	return 0;
374 }
375 
api_stop(const struct device * dev,clock_control_subsys_t subsys)376 static int api_stop(const struct device *dev, clock_control_subsys_t subsys)
377 {
378 	return stop(dev, subsys, CTX_API);
379 }
380 
async_start(const struct device * dev,clock_control_subsys_t subsys,clock_control_cb_t cb,void * user_data,uint32_t ctx)381 static int async_start(const struct device *dev, clock_control_subsys_t subsys,
382 			clock_control_cb_t cb, void *user_data, uint32_t ctx)
383 {
384 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)subsys;
385 	struct nrf_clock_control_sub_data *subdata = get_sub_data(dev, type);
386 	int err;
387 
388 	err = set_starting_state(&subdata->flags, ctx);
389 	if (err < 0) {
390 		return err;
391 	}
392 
393 	subdata->cb = cb;
394 	subdata->user_data = user_data;
395 
396 	 get_sub_config(dev, type)->start();
397 
398 	return 0;
399 }
400 
api_start(const struct device * dev,clock_control_subsys_t subsys,clock_control_cb_t cb,void * user_data)401 static int api_start(const struct device *dev, clock_control_subsys_t subsys,
402 		     clock_control_cb_t cb, void *user_data)
403 {
404 	return async_start(dev, subsys, cb, user_data, CTX_API);
405 }
406 
blocking_start_callback(const struct device * dev,clock_control_subsys_t subsys,void * user_data)407 static void blocking_start_callback(const struct device *dev,
408 				    clock_control_subsys_t subsys,
409 				    void *user_data)
410 {
411 	struct k_sem *sem = user_data;
412 
413 	k_sem_give(sem);
414 }
415 
api_blocking_start(const struct device * dev,clock_control_subsys_t subsys)416 static int api_blocking_start(const struct device *dev,
417 			      clock_control_subsys_t subsys)
418 {
419 	struct k_sem sem = Z_SEM_INITIALIZER(sem, 0, 1);
420 	int err;
421 
422 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
423 		return -ENOTSUP;
424 	}
425 
426 	err = api_start(dev, subsys, blocking_start_callback, &sem);
427 	if (err < 0) {
428 		return err;
429 	}
430 
431 	return k_sem_take(&sem, K_MSEC(500));
432 }
433 
get_subsys(struct onoff_manager * mgr)434 static clock_control_subsys_t get_subsys(struct onoff_manager *mgr)
435 {
436 	struct nrf_clock_control_data *data = CLOCK_DEVICE->data;
437 	size_t offset = (size_t)(mgr - data->mgr);
438 
439 	return (clock_control_subsys_t)offset;
440 }
441 
onoff_stop(struct onoff_manager * mgr,onoff_notify_fn notify)442 static void onoff_stop(struct onoff_manager *mgr,
443 			onoff_notify_fn notify)
444 {
445 	int res;
446 
447 	res = stop(CLOCK_DEVICE, get_subsys(mgr), CTX_ONOFF);
448 	notify(mgr, res);
449 }
450 
onoff_started_callback(const struct device * dev,clock_control_subsys_t sys,void * user_data)451 static void onoff_started_callback(const struct device *dev,
452 				   clock_control_subsys_t sys,
453 				   void *user_data)
454 {
455 	enum clock_control_nrf_type type = (enum clock_control_nrf_type)(size_t)sys;
456 	struct onoff_manager *mgr = get_onoff_manager(dev, type);
457 	onoff_notify_fn notify = user_data;
458 
459 	notify(mgr, 0);
460 }
461 
onoff_start(struct onoff_manager * mgr,onoff_notify_fn notify)462 static void onoff_start(struct onoff_manager *mgr,
463 			onoff_notify_fn notify)
464 {
465 	int err;
466 
467 	err = async_start(CLOCK_DEVICE, get_subsys(mgr),
468 			  onoff_started_callback, notify, CTX_ONOFF);
469 	if (err < 0) {
470 		notify(mgr, err);
471 	}
472 }
473 
474 /** @brief Wait for LF clock availability or stability.
475  *
476  * If LF clock source is SYNTH or RC then there is no distinction between
477  * availability and stability. In case of XTAL source clock, system is initially
478  * starting RC and then seamlessly switches to XTAL. Running RC means clock
479  * availability and running target source means stability, That is because
480  * significant difference in startup time (<1ms vs >200ms).
481  *
482  * In order to get event/interrupt when RC is ready (allowing CPU sleeping) two
483  * stage startup sequence is used. Initially, LF source is set to RC and when
484  * LFSTARTED event is handled it is reconfigured to the target source clock.
485  * This approach is implemented in nrfx_clock driver and utilized here.
486  *
487  * @param mode Start mode.
488  */
lfclk_spinwait(enum nrf_lfclk_start_mode mode)489 static void lfclk_spinwait(enum nrf_lfclk_start_mode mode)
490 {
491 	static const nrf_clock_domain_t d = NRF_CLOCK_DOMAIN_LFCLK;
492 	static const nrf_clock_lfclk_t target_type =
493 		/* For sources XTAL, EXT_LOW_SWING, and EXT_FULL_SWING,
494 		 * NRF_CLOCK_LFCLK_XTAL is returned as the type of running clock.
495 		 */
496 		(IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_XTAL) ||
497 		 IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_LOW_SWING) ||
498 		 IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_EXT_FULL_SWING))
499 		? NRF_CLOCK_LFCLK_XTAL
500 		: CLOCK_CONTROL_NRF_K32SRC;
501 	nrf_clock_lfclk_t type;
502 
503 	if ((mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE) &&
504 	    (target_type == NRF_CLOCK_LFCLK_XTAL) &&
505 	    (nrf_clock_lf_srccopy_get(NRF_CLOCK) == CLOCK_CONTROL_NRF_K32SRC)) {
506 		/* If target clock source is using XTAL then due to two-stage
507 		 * clock startup sequence, RC might already be running.
508 		 * It can be determined by checking current LFCLK source. If it
509 		 * is set to the target clock source then it means that RC was
510 		 * started.
511 		 */
512 		return;
513 	}
514 
515 	bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
516 	int key = isr_mode ? irq_lock() : 0;
517 
518 	if (!isr_mode) {
519 		nrf_clock_int_disable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
520 	}
521 
522 	while (!(nrfx_clock_is_running(d, (void *)&type)
523 		 && ((type == target_type)
524 		     || (mode == CLOCK_CONTROL_NRF_LF_START_AVAILABLE)))) {
525 		/* Synth source start is almost instant and LFCLKSTARTED may
526 		 * happen before calling idle. That would lead to deadlock.
527 		 */
528 		if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_SYNTH)) {
529 			if (isr_mode || !IS_ENABLED(CONFIG_MULTITHREADING)) {
530 				k_cpu_atomic_idle(key);
531 			} else {
532 				k_msleep(1);
533 			}
534 		}
535 
536 		/* Clock interrupt is locked, LFCLKSTARTED is handled here. */
537 		if ((target_type ==  NRF_CLOCK_LFCLK_XTAL)
538 		    && (nrf_clock_lf_src_get(NRF_CLOCK) == NRF_CLOCK_LFCLK_RC)
539 		    && nrf_clock_event_check(NRF_CLOCK,
540 					     NRF_CLOCK_EVENT_LFCLKSTARTED)) {
541 			nrf_clock_event_clear(NRF_CLOCK,
542 					      NRF_CLOCK_EVENT_LFCLKSTARTED);
543 			nrf_clock_lf_src_set(NRF_CLOCK,
544 					     CLOCK_CONTROL_NRF_K32SRC);
545 
546 			/* Clear pending interrupt, otherwise new clock event
547 			 * would not wake up from idle.
548 			 */
549 			NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
550 			nrf_clock_task_trigger(NRF_CLOCK,
551 					       NRF_CLOCK_TASK_LFCLKSTART);
552 		}
553 	}
554 
555 	if (isr_mode) {
556 		irq_unlock(key);
557 	} else {
558 		nrf_clock_int_enable(NRF_CLOCK, NRF_CLOCK_INT_LF_STARTED_MASK);
559 	}
560 }
561 
z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)562 void z_nrf_clock_control_lf_on(enum nrf_lfclk_start_mode start_mode)
563 {
564 	static atomic_t on;
565 	static struct onoff_client cli;
566 
567 	if (atomic_set(&on, 1) == 0) {
568 		int err;
569 		struct onoff_manager *mgr =
570 				get_onoff_manager(CLOCK_DEVICE,
571 						  CLOCK_CONTROL_NRF_TYPE_LFCLK);
572 
573 		sys_notify_init_spinwait(&cli.notify);
574 		err = onoff_request(mgr, &cli);
575 		__ASSERT_NO_MSG(err >= 0);
576 	}
577 
578 	/* In case of simulated board leave immediately. */
579 	if (IS_ENABLED(CONFIG_SOC_SERIES_BSIM_NRFXX)) {
580 		return;
581 	}
582 
583 	switch (start_mode) {
584 	case CLOCK_CONTROL_NRF_LF_START_AVAILABLE:
585 	case CLOCK_CONTROL_NRF_LF_START_STABLE:
586 		lfclk_spinwait(start_mode);
587 		break;
588 
589 	case CLOCK_CONTROL_NRF_LF_START_NOWAIT:
590 		break;
591 
592 	default:
593 		__ASSERT_NO_MSG(false);
594 	}
595 }
596 
clock_event_handler(nrfx_clock_evt_type_t event)597 static void clock_event_handler(nrfx_clock_evt_type_t event)
598 {
599 	const struct device *dev = CLOCK_DEVICE;
600 
601 	switch (event) {
602 	case NRFX_CLOCK_EVT_HFCLK_STARTED:
603 	{
604 		struct nrf_clock_control_sub_data *data =
605 				get_sub_data(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
606 
607 		/* Check needed due to anomaly 201:
608 		 * HFCLKSTARTED may be generated twice.
609 		 */
610 		if (GET_STATUS(data->flags) == CLOCK_CONTROL_STATUS_STARTING) {
611 			clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK);
612 		}
613 
614 		break;
615 	}
616 #if NRF_CLOCK_HAS_HFCLK192M
617 	case NRFX_CLOCK_EVT_HFCLK192M_STARTED:
618 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLK192M);
619 		break;
620 #endif
621 #if NRF_CLOCK_HAS_HFCLKAUDIO
622 	case NRFX_CLOCK_EVT_HFCLKAUDIO_STARTED:
623 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO);
624 		break;
625 #endif
626 	case NRFX_CLOCK_EVT_LFCLK_STARTED:
627 		if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
628 			z_nrf_clock_calibration_lfclk_started();
629 		}
630 		clkstarted_handle(dev, CLOCK_CONTROL_NRF_TYPE_LFCLK);
631 		break;
632 #if NRF_CLOCK_HAS_CALIBRATION
633 	case NRFX_CLOCK_EVT_CAL_DONE:
634 		if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
635 			z_nrf_clock_calibration_done_handler();
636 		} else {
637 			/* Should not happen when calibration is disabled. */
638 			__ASSERT_NO_MSG(false);
639 		}
640 		break;
641 #endif
642 #if NRF_CLOCK_HAS_PLL
643 	case NRFX_CLOCK_EVT_PLL_STARTED:
644 #endif
645 #if NRF_CLOCK_HAS_XO_TUNE
646 	case NRFX_CLOCK_EVT_XO_TUNED:
647 	case NRFX_CLOCK_EVT_XO_TUNE_ERROR:
648 	case NRFX_CLOCK_EVT_XO_TUNE_FAILED:
649 #endif
650 	{
651 		/* unhandled event */
652 		break;
653 	}
654 	default:
655 		__ASSERT_NO_MSG(0);
656 		break;
657 	}
658 }
659 
hfclkaudio_init(void)660 static void hfclkaudio_init(void)
661 {
662 #if DT_NODE_HAS_PROP(DT_NODELABEL(clock), hfclkaudio_frequency)
663 	const uint32_t frequency =
664 		DT_PROP(DT_NODELABEL(clock), hfclkaudio_frequency);
665 	/* As specified in the nRF5340 PS:
666 	 *
667 	 * FREQ_VALUE = 2^16 * ((12 * f_out / 32M) - 4)
668 	 */
669 	const uint32_t freq_value =
670 		(uint32_t)((384ULL * frequency) / 15625) - 262144;
671 
672 #if NRF_CLOCK_HAS_HFCLKAUDIO
673 	nrf_clock_hfclkaudio_config_set(NRF_CLOCK, freq_value);
674 #else
675 #error "hfclkaudio-frequency specified but HFCLKAUDIO clock is not present."
676 #endif /* NRF_CLOCK_HAS_HFCLKAUDIO */
677 #endif
678 }
679 
clk_init(const struct device * dev)680 static int clk_init(const struct device *dev)
681 {
682 	nrfx_err_t nrfx_err;
683 	int err;
684 	static const struct onoff_transitions transitions = {
685 		.start = onoff_start,
686 		.stop = onoff_stop
687 	};
688 
689 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
690 		    nrfx_isr, nrfx_power_clock_irq_handler, 0);
691 
692 	nrfx_err = nrfx_clock_init(clock_event_handler);
693 	if (nrfx_err != NRFX_SUCCESS) {
694 		return -EIO;
695 	}
696 
697 	hfclkaudio_init();
698 
699 	if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_DRIVER_CALIBRATION)) {
700 		struct nrf_clock_control_data *data = dev->data;
701 
702 		z_nrf_clock_calibration_init(data->mgr);
703 	}
704 
705 	nrfx_clock_enable();
706 
707 	for (enum clock_control_nrf_type i = 0;
708 		i < CLOCK_CONTROL_NRF_TYPE_COUNT; i++) {
709 		struct nrf_clock_control_sub_data *subdata =
710 						get_sub_data(dev, i);
711 
712 		err = onoff_manager_init(get_onoff_manager(dev, i),
713 					 &transitions);
714 		if (err < 0) {
715 			return err;
716 		}
717 
718 		subdata->flags = CLOCK_CONTROL_STATUS_OFF;
719 	}
720 
721 	return 0;
722 }
723 
724 static DEVICE_API(clock_control, clock_control_api) = {
725 	.on = api_blocking_start,
726 	.off = api_stop,
727 	.async_on = api_start,
728 	.get_status = get_status,
729 };
730 
731 static struct nrf_clock_control_data data;
732 
733 static const struct nrf_clock_control_config config = {
734 	.subsys = {
735 		[CLOCK_CONTROL_NRF_TYPE_HFCLK] = {
736 			.start = generic_hfclk_start,
737 			.stop = generic_hfclk_stop,
738 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk",))
739 		},
740 		[CLOCK_CONTROL_NRF_TYPE_LFCLK] = {
741 			.start = lfclk_start,
742 			.stop = lfclk_stop,
743 			IF_ENABLED(CONFIG_LOG, (.name = "lfclk",))
744 		},
745 #if NRF_CLOCK_HAS_HFCLK192M
746 		[CLOCK_CONTROL_NRF_TYPE_HFCLK192M] = {
747 			.start = hfclk192m_start,
748 			.stop = hfclk192m_stop,
749 			IF_ENABLED(CONFIG_LOG, (.name = "hfclk192m",))
750 		},
751 #endif
752 #if NRF_CLOCK_HAS_HFCLKAUDIO
753 		[CLOCK_CONTROL_NRF_TYPE_HFCLKAUDIO] = {
754 			.start = hfclkaudio_start,
755 			.stop = hfclkaudio_stop,
756 			IF_ENABLED(CONFIG_LOG, (.name = "hfclkaudio",))
757 		},
758 #endif
759 	}
760 };
761 
762 DEVICE_DT_DEFINE(DT_NODELABEL(clock), clk_init, NULL,
763 		 &data, &config,
764 		 PRE_KERNEL_1, CONFIG_CLOCK_CONTROL_INIT_PRIORITY,
765 		 &clock_control_api);
766 
767 #if defined(CONFIG_SHELL)
768 
cmd_status(const struct shell * sh,size_t argc,char ** argv)769 static int cmd_status(const struct shell *sh, size_t argc, char **argv)
770 {
771 	nrf_clock_hfclk_t hfclk_src;
772 	bool hf_status;
773 	bool lf_status = nrfx_clock_is_running(NRF_CLOCK_DOMAIN_LFCLK, NULL);
774 	struct onoff_manager *hf_mgr =
775 				get_onoff_manager(CLOCK_DEVICE,
776 						  CLOCK_CONTROL_NRF_TYPE_HFCLK);
777 	struct onoff_manager *lf_mgr =
778 				get_onoff_manager(CLOCK_DEVICE,
779 						  CLOCK_CONTROL_NRF_TYPE_LFCLK);
780 	uint32_t abs_start, abs_stop;
781 	unsigned int key = irq_lock();
782 	uint64_t now = k_uptime_get();
783 
784 	(void)nrfx_clock_is_running(NRF_CLOCK_DOMAIN_HFCLK, (void *)&hfclk_src);
785 	hf_status = (hfclk_src == NRF_CLOCK_HFCLK_HIGH_ACCURACY);
786 
787 	abs_start = hf_start_tstamp;
788 	abs_stop = hf_stop_tstamp;
789 	irq_unlock(key);
790 
791 	shell_print(sh, "HF clock:");
792 	shell_print(sh, "\t- %srunning (users: %u)",
793 			hf_status ? "" : "not ", hf_mgr->refs);
794 	shell_print(sh, "\t- last start: %u ms (%u ms ago)",
795 			(uint32_t)abs_start, (uint32_t)(now - abs_start));
796 	shell_print(sh, "\t- last stop: %u ms (%u ms ago)",
797 			(uint32_t)abs_stop, (uint32_t)(now - abs_stop));
798 	shell_print(sh, "LF clock:");
799 	shell_print(sh, "\t- %srunning (users: %u)",
800 			lf_status ? "" : "not ", lf_mgr->refs);
801 
802 	return 0;
803 }
804 
805 SHELL_STATIC_SUBCMD_SET_CREATE(subcmds,
806 	SHELL_CMD_ARG(status, NULL, "Status", cmd_status, 1, 0),
807 	SHELL_SUBCMD_SET_END
808 );
809 
810 SHELL_COND_CMD_REGISTER(CONFIG_CLOCK_CONTROL_NRF_SHELL,
811 			nrf_clock_control, &subcmds,
812 			"Clock control commands",
813 			cmd_status);
814 
815 #endif /* defined(CONFIG_SHELL) */
816