1 /*
2  * Copyright (c) 2016-2021 Nordic Semiconductor ASA
3  * Copyright (c) 2018 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/init.h>
9 #include <soc.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/drivers/timer/nrf_rtc_timer.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys_clock.h>
16 #include <zephyr/sys/barrier.h>
17 #include <haly/nrfy_rtc.h>
18 #include <zephyr/irq.h>
19 
20 #define RTC_PRETICK (IS_ENABLED(CONFIG_SOC_NRF53_RTC_PRETICK) && \
21 		     IS_ENABLED(CONFIG_SOC_NRF5340_CPUNET))
22 
23 #define EXT_CHAN_COUNT CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT
24 #define CHAN_COUNT (EXT_CHAN_COUNT + 1)
25 
26 #define RTC NRF_RTC1
27 #define RTC_IRQn NRFX_IRQ_NUMBER_GET(RTC)
28 #define RTC_LABEL rtc1
29 #define CHAN_COUNT_MAX (RTC1_CC_NUM - (RTC_PRETICK ? 1 : 0))
30 
31 BUILD_ASSERT(CHAN_COUNT <= CHAN_COUNT_MAX, "Not enough compare channels");
32 /* Ensure that counter driver for RTC1 is not enabled. */
33 BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_NODELABEL(RTC_LABEL), disabled),
34 	     "Counter for RTC1 must be disabled");
35 
36 #define COUNTER_BIT_WIDTH 24U
37 #define COUNTER_SPAN BIT(COUNTER_BIT_WIDTH)
38 #define COUNTER_MAX (COUNTER_SPAN - 1U)
39 #define COUNTER_HALF_SPAN (COUNTER_SPAN / 2U)
40 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec()	\
41 		      / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
42 #define MAX_TICKS ((COUNTER_HALF_SPAN - CYC_PER_TICK) / CYC_PER_TICK)
43 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
44 
45 #define OVERFLOW_RISK_RANGE_END (COUNTER_SPAN / 16)
46 #define ANCHOR_RANGE_START (COUNTER_SPAN / 8)
47 #define ANCHOR_RANGE_END (7 * COUNTER_SPAN / 8)
48 #define TARGET_TIME_INVALID (UINT64_MAX)
49 
50 extern void rtc_pretick_rtc1_isr_hook(void);
51 
52 static volatile uint32_t overflow_cnt;
53 static volatile uint64_t anchor;
54 static uint64_t last_count;
55 static bool sys_busy;
56 
57 struct z_nrf_rtc_timer_chan_data {
58 	z_nrf_rtc_timer_compare_handler_t callback;
59 	void *user_context;
60 	volatile uint64_t target_time;
61 };
62 
63 static struct z_nrf_rtc_timer_chan_data cc_data[CHAN_COUNT];
64 static atomic_t int_mask;
65 static atomic_t alloc_mask;
66 static atomic_t force_isr_mask;
67 
counter_sub(uint32_t a,uint32_t b)68 static uint32_t counter_sub(uint32_t a, uint32_t b)
69 {
70 	return (a - b) & COUNTER_MAX;
71 }
72 
set_comparator(int32_t chan,uint32_t cyc)73 static void set_comparator(int32_t chan, uint32_t cyc)
74 {
75 	nrfy_rtc_cc_set(RTC, chan, cyc & COUNTER_MAX);
76 }
77 
event_check(int32_t chan)78 static bool event_check(int32_t chan)
79 {
80 	return nrfy_rtc_event_check(RTC, NRF_RTC_CHANNEL_EVENT_ADDR(chan));
81 }
82 
event_clear(int32_t chan)83 static void event_clear(int32_t chan)
84 {
85 	nrfy_rtc_event_clear(RTC, NRF_RTC_CHANNEL_EVENT_ADDR(chan));
86 }
87 
event_enable(int32_t chan)88 static void event_enable(int32_t chan)
89 {
90 	nrfy_rtc_event_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
91 }
92 
event_disable(int32_t chan)93 static void event_disable(int32_t chan)
94 {
95 	nrfy_rtc_event_disable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
96 }
97 
counter(void)98 static uint32_t counter(void)
99 {
100 	return nrfy_rtc_counter_get(RTC);
101 }
102 
absolute_time_to_cc(uint64_t absolute_time)103 static uint32_t absolute_time_to_cc(uint64_t absolute_time)
104 {
105 	/* 24 least significant bits represent target CC value */
106 	return absolute_time & COUNTER_MAX;
107 }
108 
full_int_lock(void)109 static uint32_t full_int_lock(void)
110 {
111 	uint32_t mcu_critical_state;
112 
113 	if (IS_ENABLED(CONFIG_NRF_RTC_TIMER_LOCK_ZERO_LATENCY_IRQS)) {
114 		mcu_critical_state = __get_PRIMASK();
115 		__disable_irq();
116 	} else {
117 		mcu_critical_state = irq_lock();
118 	}
119 
120 	return mcu_critical_state;
121 }
122 
full_int_unlock(uint32_t mcu_critical_state)123 static void full_int_unlock(uint32_t mcu_critical_state)
124 {
125 	if (IS_ENABLED(CONFIG_NRF_RTC_TIMER_LOCK_ZERO_LATENCY_IRQS)) {
126 		__set_PRIMASK(mcu_critical_state);
127 	} else {
128 		irq_unlock(mcu_critical_state);
129 	}
130 }
131 
z_nrf_rtc_timer_compare_evt_address_get(int32_t chan)132 uint32_t z_nrf_rtc_timer_compare_evt_address_get(int32_t chan)
133 {
134 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
135 	return nrfy_rtc_event_address_get(RTC, nrfy_rtc_compare_event_get(chan));
136 }
137 
z_nrf_rtc_timer_capture_task_address_get(int32_t chan)138 uint32_t z_nrf_rtc_timer_capture_task_address_get(int32_t chan)
139 {
140 #if defined(RTC_TASKS_CAPTURE_TASKS_CAPTURE_Msk)
141 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
142 	if (chan == 0) {
143 		return 0;
144 	}
145 
146 	return nrfy_rtc_task_address_get(RTC, nrfy_rtc_capture_task_get(chan));
147 #else
148 	ARG_UNUSED(chan);
149 	return 0;
150 #endif
151 }
152 
compare_int_lock(int32_t chan)153 static bool compare_int_lock(int32_t chan)
154 {
155 	atomic_val_t prev = atomic_and(&int_mask, ~BIT(chan));
156 
157 	nrfy_rtc_int_disable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
158 
159 	barrier_dmem_fence_full();
160 	barrier_isync_fence_full();
161 
162 	return prev & BIT(chan);
163 }
164 
165 
z_nrf_rtc_timer_compare_int_lock(int32_t chan)166 bool z_nrf_rtc_timer_compare_int_lock(int32_t chan)
167 {
168 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
169 
170 	return compare_int_lock(chan);
171 }
172 
compare_int_unlock(int32_t chan,bool key)173 static void compare_int_unlock(int32_t chan, bool key)
174 {
175 	if (key) {
176 		atomic_or(&int_mask, BIT(chan));
177 		nrfy_rtc_int_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
178 		if (atomic_get(&force_isr_mask) & BIT(chan)) {
179 			NVIC_SetPendingIRQ(RTC_IRQn);
180 		}
181 	}
182 }
183 
z_nrf_rtc_timer_compare_int_unlock(int32_t chan,bool key)184 void z_nrf_rtc_timer_compare_int_unlock(int32_t chan, bool key)
185 {
186 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
187 
188 	compare_int_unlock(chan, key);
189 }
190 
z_nrf_rtc_timer_compare_read(int32_t chan)191 uint32_t z_nrf_rtc_timer_compare_read(int32_t chan)
192 {
193 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
194 
195 	return nrfy_rtc_cc_get(RTC, chan);
196 }
197 
z_nrf_rtc_timer_get_ticks(k_timeout_t t)198 uint64_t z_nrf_rtc_timer_get_ticks(k_timeout_t t)
199 {
200 	uint64_t curr_time;
201 	int64_t curr_tick;
202 	int64_t result;
203 	int64_t abs_ticks;
204 
205 	do {
206 		curr_time = z_nrf_rtc_timer_read();
207 		curr_tick = sys_clock_tick_get();
208 	} while (curr_time != z_nrf_rtc_timer_read());
209 
210 	abs_ticks = Z_TICK_ABS(t.ticks);
211 	if (abs_ticks < 0) {
212 		/* relative timeout */
213 		return (t.ticks > COUNTER_SPAN) ?
214 			-EINVAL : (curr_time + t.ticks);
215 	}
216 
217 	/* absolute timeout */
218 	result = abs_ticks - curr_tick;
219 
220 	if (result > COUNTER_SPAN) {
221 		return -EINVAL;
222 	}
223 
224 	return curr_time + result;
225 }
226 
227 /** @brief Function safely sets an alarm.
228  *
229  * It assumes that provided value is at most COUNTER_HALF_SPAN cycles from now
230  * (other values are considered to be from the past). It detects late setting
231  * and properly adjusts CC values that are too near in the future to guarantee
232  * triggering a COMPARE event soon, not after 512 seconds when the RTC wraps
233  * around first.
234  *
235  * @param[in] chan A channel for which a new CC value is to be set.
236  *
237  * @param[in] req_cc Requested CC register value to be set.
238  *
239  * @param[in] exact Use @c false to allow CC adjustment if @c req_cc value is
240  *                  close to the current value of the timer.
241  *                  Use @c true to disallow CC adjustment. The function can
242  *                  fail with -EINVAL result if @p req_cc is too close to the
243  *                  current value.
244  *
245  * @retval 0 The requested CC has been set successfully.
246  * @retval -EINVAL The requested CC value could not be reliably set.
247  */
set_alarm(int32_t chan,uint32_t req_cc,bool exact)248 static int set_alarm(int32_t chan, uint32_t req_cc, bool exact)
249 {
250 	int ret = 0;
251 
252 	/* Ensure that the value exposed in this driver API is consistent with
253 	 * assumptions of this function.
254 	 */
255 	BUILD_ASSERT(NRF_RTC_TIMER_MAX_SCHEDULE_SPAN <= COUNTER_HALF_SPAN);
256 
257 	/* According to product specifications, when the current counter value
258 	 * is N, a value of N+2 written to the CC register is guaranteed to
259 	 * trigger a COMPARE event at N+2, but tests show that this compare
260 	 * value can be missed when the previous CC value is N+1 and the write
261 	 * occurs in the second half of the RTC clock cycle (such situation can
262 	 * be provoked by test_next_cycle_timeouts in the nrf_rtc_timer suite).
263 	 * This never happens when the written value is N+3. Use 3 cycles as
264 	 * the nearest possible scheduling then.
265 	 */
266 	enum { MIN_CYCLES_FROM_NOW = 3 };
267 	uint32_t cc_val = req_cc;
268 	uint32_t cc_inc = MIN_CYCLES_FROM_NOW;
269 
270 	/* Disable event routing for the channel to avoid getting a COMPARE
271 	 * event for the previous CC value before the new one takes effect
272 	 * (however, even if such spurious event was generated, it would be
273 	 * properly filtered out in process_channel(), where the target time
274 	 * is checked).
275 	 * Clear also the event as it may already be generated at this point.
276 	 */
277 	event_disable(chan);
278 	event_clear(chan);
279 
280 	for (;;) {
281 		uint32_t now;
282 
283 		set_comparator(chan, cc_val);
284 		/* Enable event routing after the required CC value was set.
285 		 * Even though the above operation may get repeated (see below),
286 		 * there is no need to disable event routing in every iteration
287 		 * of the loop, as the COMPARE event resulting from any attempt
288 		 * of setting the CC register is acceptable (as mentioned above,
289 		 * process_channel() does the proper filtering).
290 		 */
291 		event_enable(chan);
292 
293 		now = counter();
294 
295 		/* Check if the CC register was successfully set to a value
296 		 * that will for sure trigger a COMPARE event as expected.
297 		 * If not, try again, adjusting the CC value accordingly.
298 		 * Increase the CC value by a larger number of cycles in each
299 		 * trial to avoid spending too much time in this loop if it
300 		 * continuously gets interrupted and delayed by something.
301 		 */
302 		if (counter_sub(cc_val, now + MIN_CYCLES_FROM_NOW) >
303 		    (COUNTER_HALF_SPAN - MIN_CYCLES_FROM_NOW)) {
304 			/* If the COMPARE event turns out to be already
305 			 * generated, check if the loop can be finished.
306 			 */
307 			if (event_check(chan)) {
308 				/* If the current counter value has not yet
309 				 * reached the requested CC value, the event
310 				 * must come from the previously set CC value
311 				 * (the alarm is apparently rescheduled).
312 				 * The event needs to be cleared then and the
313 				 * loop needs to be continued.
314 				 */
315 				now = counter();
316 				if (counter_sub(now, req_cc) > COUNTER_HALF_SPAN) {
317 					event_clear(chan);
318 					if (exact) {
319 						ret = -EINVAL;
320 						break;
321 					}
322 				} else {
323 					break;
324 				}
325 			} else if (exact) {
326 				ret = -EINVAL;
327 				break;
328 			}
329 
330 			cc_val = now + cc_inc;
331 			cc_inc++;
332 		} else {
333 			break;
334 		}
335 	}
336 
337 	return ret;
338 }
339 
compare_set_nolocks(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data,bool exact)340 static int compare_set_nolocks(int32_t chan, uint64_t target_time,
341 			z_nrf_rtc_timer_compare_handler_t handler,
342 			void *user_data, bool exact)
343 {
344 	int ret = 0;
345 	uint32_t cc_value = absolute_time_to_cc(target_time);
346 	uint64_t curr_time = z_nrf_rtc_timer_read();
347 
348 	if (curr_time < target_time) {
349 		if (target_time - curr_time > COUNTER_HALF_SPAN) {
350 			/* Target time is too distant. */
351 			return -EINVAL;
352 		}
353 
354 		if (target_time != cc_data[chan].target_time) {
355 			/* Target time is valid and is different than currently set.
356 			 * Set CC value.
357 			 */
358 			ret = set_alarm(chan, cc_value, exact);
359 		}
360 	} else if (!exact) {
361 		/* Force ISR handling when exiting from critical section. */
362 		atomic_or(&force_isr_mask, BIT(chan));
363 	} else {
364 		ret = -EINVAL;
365 	}
366 
367 	if (ret == 0) {
368 		cc_data[chan].target_time = target_time;
369 		cc_data[chan].callback = handler;
370 		cc_data[chan].user_context = user_data;
371 	}
372 
373 	return ret;
374 }
375 
compare_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data,bool exact)376 static int compare_set(int32_t chan, uint64_t target_time,
377 			z_nrf_rtc_timer_compare_handler_t handler,
378 			void *user_data, bool exact)
379 {
380 	bool key;
381 
382 	key = compare_int_lock(chan);
383 
384 	int ret = compare_set_nolocks(chan, target_time, handler, user_data, exact);
385 
386 	compare_int_unlock(chan, key);
387 
388 	return ret;
389 }
390 
z_nrf_rtc_timer_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data)391 int z_nrf_rtc_timer_set(int32_t chan, uint64_t target_time,
392 			 z_nrf_rtc_timer_compare_handler_t handler,
393 			 void *user_data)
394 {
395 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
396 
397 	return compare_set(chan, target_time, handler, user_data, false);
398 }
399 
z_nrf_rtc_timer_exact_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data)400 int z_nrf_rtc_timer_exact_set(int32_t chan, uint64_t target_time,
401 			      z_nrf_rtc_timer_compare_handler_t handler,
402 			      void *user_data)
403 {
404 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
405 
406 	return compare_set(chan, target_time, handler, user_data, true);
407 }
408 
z_nrf_rtc_timer_abort(int32_t chan)409 void z_nrf_rtc_timer_abort(int32_t chan)
410 {
411 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
412 
413 	bool key = compare_int_lock(chan);
414 
415 	cc_data[chan].target_time = TARGET_TIME_INVALID;
416 	event_clear(chan);
417 	event_disable(chan);
418 	(void)atomic_and(&force_isr_mask, ~BIT(chan));
419 
420 	compare_int_unlock(chan, key);
421 }
422 
z_nrf_rtc_timer_read(void)423 uint64_t z_nrf_rtc_timer_read(void)
424 {
425 	uint64_t val = ((uint64_t)overflow_cnt) << COUNTER_BIT_WIDTH;
426 
427 	barrier_dmem_fence_full();
428 
429 	uint32_t cntr = counter();
430 
431 	val += cntr;
432 
433 	if (cntr < OVERFLOW_RISK_RANGE_END) {
434 		/* `overflow_cnt` can have incorrect value due to still unhandled overflow or
435 		 * due to possibility that this code preempted overflow interrupt before final write
436 		 * of `overflow_cnt`. Update of `anchor` occurs far in time from this moment, so
437 		 * `anchor` is considered valid and stable. Because of this timing there is no risk
438 		 * of incorrect `anchor` value caused by non-atomic read of 64-bit `anchor`.
439 		 */
440 		if (val < anchor) {
441 			/* Unhandled overflow, detected, let's add correction */
442 			val += COUNTER_SPAN;
443 		}
444 	} else {
445 		/* `overflow_cnt` is considered valid and stable in this range, no need to
446 		 * check validity using `anchor`
447 		 */
448 	}
449 
450 	return val;
451 }
452 
in_anchor_range(uint32_t cc_value)453 static inline bool in_anchor_range(uint32_t cc_value)
454 {
455 	return (cc_value >= ANCHOR_RANGE_START) && (cc_value < ANCHOR_RANGE_END);
456 }
457 
anchor_update(uint32_t cc_value)458 static inline void anchor_update(uint32_t cc_value)
459 {
460 	/* Update anchor when far from overflow */
461 	if (in_anchor_range(cc_value)) {
462 		/* In this range `overflow_cnt` is considered valid and stable.
463 		 * Write of 64-bit `anchor` is non atomic. However it happens
464 		 * far in time from the moment the `anchor` is read in
465 		 * `z_nrf_rtc_timer_read`.
466 		 */
467 		anchor = (((uint64_t)overflow_cnt) << COUNTER_BIT_WIDTH) + cc_value;
468 	}
469 }
470 
sys_clock_timeout_handler(int32_t chan,uint64_t expire_time,void * user_data)471 static void sys_clock_timeout_handler(int32_t chan,
472 				      uint64_t expire_time,
473 				      void *user_data)
474 {
475 	uint32_t cc_value = absolute_time_to_cc(expire_time);
476 	uint32_t dticks = (uint32_t)(expire_time - last_count) / CYC_PER_TICK;
477 
478 	last_count += dticks * CYC_PER_TICK;
479 
480 	anchor_update(cc_value);
481 
482 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
483 		/* protection is not needed because we are in the RTC interrupt
484 		 * so it won't get preempted by the interrupt.
485 		 */
486 		compare_set(chan, last_count + CYC_PER_TICK,
487 					  sys_clock_timeout_handler, NULL, false);
488 	}
489 
490 	sys_clock_announce(dticks);
491 }
492 
channel_processing_check_and_clear(int32_t chan)493 static bool channel_processing_check_and_clear(int32_t chan)
494 {
495 	if (nrfy_rtc_int_enable_check(RTC, NRF_RTC_CHANNEL_INT_MASK(chan))) {
496 		/* The processing of channel can be caused by CC match
497 		 * or be forced.
498 		 */
499 		if ((atomic_and(&force_isr_mask, ~BIT(chan)) & BIT(chan)) ||
500 		    event_check(chan)) {
501 			event_clear(chan);
502 			return true;
503 		}
504 	}
505 
506 	return false;
507 }
508 
process_channel(int32_t chan)509 static void process_channel(int32_t chan)
510 {
511 	if (channel_processing_check_and_clear(chan)) {
512 		void *user_context;
513 		uint32_t mcu_critical_state;
514 		uint64_t curr_time;
515 		uint64_t expire_time;
516 		z_nrf_rtc_timer_compare_handler_t handler = NULL;
517 
518 		curr_time = z_nrf_rtc_timer_read();
519 
520 		/* This critical section is used to provide atomic access to
521 		 * cc_data structure and prevent higher priority contexts
522 		 * (including ZLIs) from overwriting it.
523 		 */
524 		mcu_critical_state = full_int_lock();
525 
526 		/* If target_time is in the past or is equal to current time
527 		 * value, execute the handler.
528 		 */
529 		expire_time = cc_data[chan].target_time;
530 		if (curr_time >= expire_time) {
531 			handler = cc_data[chan].callback;
532 			user_context = cc_data[chan].user_context;
533 			cc_data[chan].callback = NULL;
534 			cc_data[chan].target_time = TARGET_TIME_INVALID;
535 			event_disable(chan);
536 			/* Because of the way set_alarm() sets the CC register,
537 			 * it may turn out that another COMPARE event has been
538 			 * generated for the same alarm. Make sure the event
539 			 * is cleared, so that the ISR is not executed again
540 			 * unnecessarily.
541 			 */
542 			event_clear(chan);
543 		}
544 
545 		full_int_unlock(mcu_critical_state);
546 
547 		if (handler) {
548 			handler(chan, expire_time, user_context);
549 		}
550 	}
551 }
552 
553 /* Note: this function has public linkage, and MUST have this
554  * particular name.  The platform architecture itself doesn't care,
555  * but there is a test (tests/arch/arm_irq_vector_table) that needs
556  * to find it to it can set it in a custom vector table.  Should
557  * probably better abstract that at some point (e.g. query and reset
558  * it by pointer at runtime, maybe?) so we don't have this leaky
559  * symbol.
560  */
rtc_nrf_isr(const void * arg)561 void rtc_nrf_isr(const void *arg)
562 {
563 	ARG_UNUSED(arg);
564 
565 	if (RTC_PRETICK) {
566 		rtc_pretick_rtc1_isr_hook();
567 	}
568 
569 	if (nrfy_rtc_int_enable_check(RTC, NRF_RTC_INT_OVERFLOW_MASK) &&
570 	    nrfy_rtc_events_process(RTC, NRF_RTC_INT_OVERFLOW_MASK)) {
571 		overflow_cnt++;
572 	}
573 
574 	for (int32_t chan = 0; chan < CHAN_COUNT; chan++) {
575 		process_channel(chan);
576 	}
577 }
578 
z_nrf_rtc_timer_chan_alloc(void)579 int32_t z_nrf_rtc_timer_chan_alloc(void)
580 {
581 	int32_t chan;
582 	atomic_val_t prev;
583 	do {
584 		chan = alloc_mask ? 31 - __builtin_clz(alloc_mask) : -1;
585 		if (chan < 0) {
586 			return -ENOMEM;
587 		}
588 		prev = atomic_and(&alloc_mask, ~BIT(chan));
589 	} while (!(prev & BIT(chan)));
590 
591 	return chan;
592 }
593 
z_nrf_rtc_timer_chan_free(int32_t chan)594 void z_nrf_rtc_timer_chan_free(int32_t chan)
595 {
596 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
597 
598 	atomic_or(&alloc_mask, BIT(chan));
599 }
600 
601 
z_nrf_rtc_timer_trigger_overflow(void)602 int z_nrf_rtc_timer_trigger_overflow(void)
603 {
604 	uint32_t mcu_critical_state;
605 	int err = 0;
606 
607 	if (!IS_ENABLED(CONFIG_NRF_RTC_TIMER_TRIGGER_OVERFLOW) ||
608 	    (CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT > 0)) {
609 		return -ENOTSUP;
610 	}
611 
612 	mcu_critical_state = full_int_lock();
613 	if (sys_busy) {
614 		err = -EBUSY;
615 		goto bail;
616 	}
617 
618 	if (counter() >= (COUNTER_SPAN - 100)) {
619 		err = -EAGAIN;
620 		goto bail;
621 	}
622 
623 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_TRIGGER_OVERFLOW);
624 	k_busy_wait(80);
625 
626 	uint64_t now = z_nrf_rtc_timer_read();
627 
628 	if (err == 0) {
629 		sys_clock_timeout_handler(0, now, NULL);
630 	}
631 bail:
632 	full_int_unlock(mcu_critical_state);
633 
634 	return err;
635 }
636 
sys_clock_set_timeout(int32_t ticks,bool idle)637 void sys_clock_set_timeout(int32_t ticks, bool idle)
638 {
639 	ARG_UNUSED(idle);
640 	uint32_t cyc;
641 
642 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
643 		return;
644 	}
645 
646 	if (ticks == K_TICKS_FOREVER) {
647 		cyc = MAX_TICKS * CYC_PER_TICK;
648 		sys_busy = false;
649 	} else {
650 		/* Value of ticks can be zero or negative, what means "announce
651 		 * the next tick" (the same as ticks equal to 1).
652 		 */
653 		cyc = CLAMP(ticks, 1, (int32_t)MAX_TICKS);
654 		cyc *= CYC_PER_TICK;
655 		sys_busy = true;
656 	}
657 
658 	uint32_t unannounced = z_nrf_rtc_timer_read() - last_count;
659 
660 	/* If we haven't announced for more than half the 24-bit wrap
661 	 * duration, then force an announce to avoid loss of a wrap
662 	 * event.  This can happen if new timeouts keep being set
663 	 * before the existing one triggers the interrupt.
664 	 */
665 	if (unannounced >= COUNTER_HALF_SPAN) {
666 		cyc = 0;
667 	}
668 
669 	/* Get the cycles from last_count to the tick boundary after
670 	 * the requested ticks have passed starting now.
671 	 */
672 	cyc += unannounced;
673 	cyc = DIV_ROUND_UP(cyc, CYC_PER_TICK) * CYC_PER_TICK;
674 
675 	/* Due to elapsed time the calculation above might produce a
676 	 * duration that laps the counter.  Don't let it.
677 	 * This limitation also guarantees that the anchor will be properly
678 	 * updated before every overflow (see anchor_update()).
679 	 */
680 	if (cyc > MAX_CYCLES) {
681 		cyc = MAX_CYCLES;
682 	}
683 
684 	uint64_t target_time = cyc + last_count;
685 
686 	compare_set(0, target_time, sys_clock_timeout_handler, NULL, false);
687 }
688 
sys_clock_elapsed(void)689 uint32_t sys_clock_elapsed(void)
690 {
691 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
692 		return 0;
693 	}
694 
695 	return (z_nrf_rtc_timer_read() - last_count) / CYC_PER_TICK;
696 }
697 
sys_clock_cycle_get_32(void)698 uint32_t sys_clock_cycle_get_32(void)
699 {
700 	return (uint32_t)z_nrf_rtc_timer_read();
701 }
702 
int_event_disable_rtc(void)703 static void int_event_disable_rtc(void)
704 {
705 	uint32_t mask = NRF_RTC_INT_TICK_MASK     |
706 			NRF_RTC_INT_OVERFLOW_MASK |
707 			NRF_RTC_INT_COMPARE0_MASK |
708 			NRF_RTC_INT_COMPARE1_MASK |
709 			NRF_RTC_INT_COMPARE2_MASK |
710 			NRF_RTC_INT_COMPARE3_MASK;
711 
712 	/* Reset interrupt enabling to expected reset values */
713 	nrfy_rtc_int_disable(RTC, mask);
714 
715 	/* Reset event routing enabling to expected reset values */
716 	nrfy_rtc_event_disable(RTC, mask);
717 }
718 
sys_clock_disable(void)719 void sys_clock_disable(void)
720 {
721 	nrf_rtc_task_trigger(RTC, NRF_RTC_TASK_STOP);
722 	irq_disable(RTC_IRQn);
723 	int_event_disable_rtc();
724 	NVIC_ClearPendingIRQ(RTC_IRQn);
725 }
726 
sys_clock_driver_init(void)727 static int sys_clock_driver_init(void)
728 {
729 	static const enum nrf_lfclk_start_mode mode =
730 		IS_ENABLED(CONFIG_SYSTEM_CLOCK_NO_WAIT) ?
731 			CLOCK_CONTROL_NRF_LF_START_NOWAIT :
732 			(IS_ENABLED(CONFIG_SYSTEM_CLOCK_WAIT_FOR_AVAILABILITY) ?
733 			CLOCK_CONTROL_NRF_LF_START_AVAILABLE :
734 			CLOCK_CONTROL_NRF_LF_START_STABLE);
735 
736 	int_event_disable_rtc();
737 
738 	/* TODO: replace with counter driver to access RTC */
739 	nrfy_rtc_prescaler_set(RTC, 0);
740 	for (int32_t chan = 0; chan < CHAN_COUNT; chan++) {
741 		cc_data[chan].target_time = TARGET_TIME_INVALID;
742 		nrfy_rtc_int_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
743 	}
744 
745 	nrfy_rtc_int_enable(RTC, NRF_RTC_INT_OVERFLOW_MASK);
746 
747 	NVIC_ClearPendingIRQ(RTC_IRQn);
748 
749 	IRQ_CONNECT(RTC_IRQn, DT_IRQ(DT_NODELABEL(RTC_LABEL), priority),
750 		    rtc_nrf_isr, 0, 0);
751 	irq_enable(RTC_IRQn);
752 
753 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_CLEAR);
754 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_START);
755 
756 	int_mask = BIT_MASK(CHAN_COUNT);
757 	if (CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT) {
758 		alloc_mask = BIT_MASK(EXT_CHAN_COUNT) << 1;
759 	}
760 
761 	uint32_t initial_timeout = IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
762 		MAX_CYCLES : CYC_PER_TICK;
763 
764 	compare_set(0, initial_timeout, sys_clock_timeout_handler, NULL, false);
765 
766 	z_nrf_clock_control_lf_on(mode);
767 
768 	return 0;
769 }
770 
771 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
772 	 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
773