1 /*
2  * Copyright (c) 2016-2021 Nordic Semiconductor ASA
3  * Copyright (c) 2018 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/init.h>
9 #include <soc.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/drivers/timer/nrf_rtc_timer.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys_clock.h>
16 #include <zephyr/sys/barrier.h>
17 #include <haly/nrfy_rtc.h>
18 #include <zephyr/irq.h>
19 
20 #define RTC_BIT_WIDTH 24
21 
22 #if (CONFIG_NRF_RTC_COUNTER_BIT_WIDTH < RTC_BIT_WIDTH)
23 #define CUSTOM_COUNTER_BIT_WIDTH 1
24 #define WRAP_CH 0
25 #define SYS_CLOCK_CH 1
26 #include "helpers/nrfx_gppi.h"
27 #else
28 #define CUSTOM_COUNTER_BIT_WIDTH 0
29 #define SYS_CLOCK_CH 0
30 #endif
31 
32 #define RTC_PRETICK (IS_ENABLED(CONFIG_SOC_NRF53_RTC_PRETICK) && \
33 		     IS_ENABLED(CONFIG_SOC_NRF5340_CPUNET))
34 
35 #define EXT_CHAN_COUNT CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT
36 #define CHAN_COUNT (EXT_CHAN_COUNT + 1 + CUSTOM_COUNTER_BIT_WIDTH)
37 
38 #define RTC NRF_RTC1
39 #define RTC_IRQn NRFX_IRQ_NUMBER_GET(RTC)
40 #define RTC_LABEL rtc1
41 #define CHAN_COUNT_MAX (RTC1_CC_NUM - (RTC_PRETICK ? 1 : 0))
42 
43 BUILD_ASSERT(CHAN_COUNT <= CHAN_COUNT_MAX, "Not enough compare channels");
44 /* Ensure that counter driver for RTC1 is not enabled. */
45 BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_NODELABEL(RTC_LABEL), disabled),
46 	     "Counter for RTC1 must be disabled");
47 
48 #define COUNTER_BIT_WIDTH CONFIG_NRF_RTC_COUNTER_BIT_WIDTH
49 #define COUNTER_SPAN BIT(COUNTER_BIT_WIDTH)
50 #define COUNTER_MAX (COUNTER_SPAN - 1U)
51 #define COUNTER_HALF_SPAN (COUNTER_SPAN / 2U)
52 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec()	\
53 		      / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
54 #define MAX_TICKS ((COUNTER_HALF_SPAN - CYC_PER_TICK) / CYC_PER_TICK)
55 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
56 
57 #define OVERFLOW_RISK_RANGE_END (COUNTER_SPAN / 16)
58 #define ANCHOR_RANGE_START (COUNTER_SPAN / 8)
59 #define ANCHOR_RANGE_END (7 * COUNTER_SPAN / 8)
60 #define TARGET_TIME_INVALID (UINT64_MAX)
61 
62 extern void rtc_pretick_rtc1_isr_hook(void);
63 
64 static volatile uint32_t overflow_cnt;
65 static volatile uint64_t anchor;
66 static uint64_t last_count;
67 static bool sys_busy;
68 
69 struct z_nrf_rtc_timer_chan_data {
70 	z_nrf_rtc_timer_compare_handler_t callback;
71 	void *user_context;
72 	volatile uint64_t target_time;
73 };
74 
75 static struct z_nrf_rtc_timer_chan_data cc_data[CHAN_COUNT];
76 static atomic_t int_mask;
77 static atomic_t alloc_mask;
78 static atomic_t force_isr_mask;
79 
counter_sub(uint32_t a,uint32_t b)80 static uint32_t counter_sub(uint32_t a, uint32_t b)
81 {
82 	return (a - b) & COUNTER_MAX;
83 }
84 
set_comparator(int32_t chan,uint32_t cyc)85 static void set_comparator(int32_t chan, uint32_t cyc)
86 {
87 	nrfy_rtc_cc_set(RTC, chan, cyc & COUNTER_MAX);
88 }
89 
event_check(int32_t chan)90 static bool event_check(int32_t chan)
91 {
92 	return nrfy_rtc_event_check(RTC, NRF_RTC_CHANNEL_EVENT_ADDR(chan));
93 }
94 
event_clear(int32_t chan)95 static void event_clear(int32_t chan)
96 {
97 	nrfy_rtc_event_clear(RTC, NRF_RTC_CHANNEL_EVENT_ADDR(chan));
98 }
99 
event_enable(int32_t chan)100 static void event_enable(int32_t chan)
101 {
102 	nrfy_rtc_event_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
103 }
104 
event_disable(int32_t chan)105 static void event_disable(int32_t chan)
106 {
107 	nrfy_rtc_event_disable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
108 }
109 
counter(void)110 static uint32_t counter(void)
111 {
112 	return nrfy_rtc_counter_get(RTC);
113 }
114 
absolute_time_to_cc(uint64_t absolute_time)115 static uint32_t absolute_time_to_cc(uint64_t absolute_time)
116 {
117 	/* 24 least significant bits represent target CC value */
118 	return absolute_time & COUNTER_MAX;
119 }
120 
full_int_lock(void)121 static uint32_t full_int_lock(void)
122 {
123 	uint32_t mcu_critical_state;
124 
125 	if (IS_ENABLED(CONFIG_NRF_RTC_TIMER_LOCK_ZERO_LATENCY_IRQS)) {
126 		mcu_critical_state = __get_PRIMASK();
127 		__disable_irq();
128 	} else {
129 		mcu_critical_state = irq_lock();
130 	}
131 
132 	return mcu_critical_state;
133 }
134 
full_int_unlock(uint32_t mcu_critical_state)135 static void full_int_unlock(uint32_t mcu_critical_state)
136 {
137 	if (IS_ENABLED(CONFIG_NRF_RTC_TIMER_LOCK_ZERO_LATENCY_IRQS)) {
138 		__set_PRIMASK(mcu_critical_state);
139 	} else {
140 		irq_unlock(mcu_critical_state);
141 	}
142 }
143 
z_nrf_rtc_timer_compare_evt_address_get(int32_t chan)144 uint32_t z_nrf_rtc_timer_compare_evt_address_get(int32_t chan)
145 {
146 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
147 	return nrfy_rtc_event_address_get(RTC, nrfy_rtc_compare_event_get(chan));
148 }
149 
z_nrf_rtc_timer_capture_task_address_get(int32_t chan)150 uint32_t z_nrf_rtc_timer_capture_task_address_get(int32_t chan)
151 {
152 #if defined(RTC_TASKS_CAPTURE_TASKS_CAPTURE_Msk)
153 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
154 	if (chan == SYS_CLOCK_CH) {
155 		return 0;
156 	}
157 
158 	return nrfy_rtc_task_address_get(RTC, nrfy_rtc_capture_task_get(chan));
159 #else
160 	ARG_UNUSED(chan);
161 	return 0;
162 #endif
163 }
164 
compare_int_lock(int32_t chan)165 static bool compare_int_lock(int32_t chan)
166 {
167 	atomic_val_t prev = atomic_and(&int_mask, ~BIT(chan));
168 
169 	nrfy_rtc_int_disable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
170 
171 	barrier_dmem_fence_full();
172 	barrier_isync_fence_full();
173 
174 	return prev & BIT(chan);
175 }
176 
177 
z_nrf_rtc_timer_compare_int_lock(int32_t chan)178 bool z_nrf_rtc_timer_compare_int_lock(int32_t chan)
179 {
180 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
181 
182 	return compare_int_lock(chan);
183 }
184 
compare_int_unlock(int32_t chan,bool key)185 static void compare_int_unlock(int32_t chan, bool key)
186 {
187 	if (key) {
188 		atomic_or(&int_mask, BIT(chan));
189 		nrfy_rtc_int_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
190 		if (atomic_get(&force_isr_mask) & BIT(chan)) {
191 			NVIC_SetPendingIRQ(RTC_IRQn);
192 		}
193 	}
194 }
195 
z_nrf_rtc_timer_compare_int_unlock(int32_t chan,bool key)196 void z_nrf_rtc_timer_compare_int_unlock(int32_t chan, bool key)
197 {
198 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
199 
200 	compare_int_unlock(chan, key);
201 }
202 
z_nrf_rtc_timer_compare_read(int32_t chan)203 uint32_t z_nrf_rtc_timer_compare_read(int32_t chan)
204 {
205 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
206 
207 	return nrfy_rtc_cc_get(RTC, chan);
208 }
209 
z_nrf_rtc_timer_get_ticks(k_timeout_t t)210 uint64_t z_nrf_rtc_timer_get_ticks(k_timeout_t t)
211 {
212 	int64_t abs_ticks;
213 
214 	abs_ticks = Z_TICK_ABS(t.ticks);
215 	if (Z_IS_TIMEOUT_RELATIVE(t)) {
216 		return (t.ticks > COUNTER_SPAN) ?
217 			-EINVAL : (z_nrf_rtc_timer_read() + t.ticks * CYC_PER_TICK);
218 	}
219 
220 	/* absolute timeout */
221 	/* abs_ticks is int64_t so it has 63 bits. If CYC_PER_TICK is <=2 then
222 	 * any abs_ticks will fit in 64 bits after multiplying by CYC_PER_TICK
223 	 * but if CYC_PER_TICK is higher then it is possible that abs_ticks cannot
224 	 * be converted to RTC ticks and check for overflow is needed.
225 	 */
226 	if ((CYC_PER_TICK > 2) && (abs_ticks > (UINT64_MAX / CYC_PER_TICK))) {
227 		return -EINVAL;
228 	}
229 
230 	return abs_ticks * CYC_PER_TICK;
231 }
232 
233 /** @brief Function safely sets an alarm.
234  *
235  * It assumes that provided value is at most COUNTER_HALF_SPAN cycles from now
236  * (other values are considered to be from the past). It detects late setting
237  * and properly adjusts CC values that are too near in the future to guarantee
238  * triggering a COMPARE event soon, not after 512 seconds when the RTC wraps
239  * around first.
240  *
241  * @param[in] chan A channel for which a new CC value is to be set.
242  *
243  * @param[in] req_cc Requested CC register value to be set.
244  *
245  * @param[in] exact Use @c false to allow CC adjustment if @c req_cc value is
246  *                  close to the current value of the timer.
247  *                  Use @c true to disallow CC adjustment. The function can
248  *                  fail with -EINVAL result if @p req_cc is too close to the
249  *                  current value.
250  *
251  * @retval 0 The requested CC has been set successfully.
252  * @retval -EINVAL The requested CC value could not be reliably set.
253  */
set_alarm(int32_t chan,uint32_t req_cc,bool exact)254 static int set_alarm(int32_t chan, uint32_t req_cc, bool exact)
255 {
256 	int ret = 0;
257 
258 	/* Ensure that the value exposed in this driver API is consistent with
259 	 * assumptions of this function.
260 	 */
261 	BUILD_ASSERT(NRF_RTC_TIMER_MAX_SCHEDULE_SPAN <= COUNTER_HALF_SPAN);
262 
263 	/* According to product specifications, when the current counter value
264 	 * is N, a value of N+2 written to the CC register is guaranteed to
265 	 * trigger a COMPARE event at N+2, but tests show that this compare
266 	 * value can be missed when the previous CC value is N+1 and the write
267 	 * occurs in the second half of the RTC clock cycle (such situation can
268 	 * be provoked by test_next_cycle_timeouts in the nrf_rtc_timer suite).
269 	 * This never happens when the written value is N+3. Use 3 cycles as
270 	 * the nearest possible scheduling then.
271 	 */
272 	enum { MIN_CYCLES_FROM_NOW = 3 };
273 	uint32_t cc_val = req_cc;
274 	uint32_t cc_inc = MIN_CYCLES_FROM_NOW;
275 
276 	/* Disable event routing for the channel to avoid getting a COMPARE
277 	 * event for the previous CC value before the new one takes effect
278 	 * (however, even if such spurious event was generated, it would be
279 	 * properly filtered out in process_channel(), where the target time
280 	 * is checked).
281 	 * Clear also the event as it may already be generated at this point.
282 	 */
283 	event_disable(chan);
284 	event_clear(chan);
285 
286 	for (;;) {
287 		uint32_t now;
288 
289 #if CUSTOM_COUNTER_BIT_WIDTH
290 		/* If a CC value is 0 when a CLEAR task is set, this will not
291 		 * trigger a COMAPRE event. Need to use 1 instead.
292 		 */
293 		if ((cc_val & COUNTER_MAX) == 0) {
294 			cc_val = 1;
295 		}
296 #endif
297 		set_comparator(chan, cc_val);
298 		/* Enable event routing after the required CC value was set.
299 		 * Even though the above operation may get repeated (see below),
300 		 * there is no need to disable event routing in every iteration
301 		 * of the loop, as the COMPARE event resulting from any attempt
302 		 * of setting the CC register is acceptable (as mentioned above,
303 		 * process_channel() does the proper filtering).
304 		 */
305 		event_enable(chan);
306 
307 		now = counter();
308 
309 		/* Check if the CC register was successfully set to a value
310 		 * that will for sure trigger a COMPARE event as expected.
311 		 * If not, try again, adjusting the CC value accordingly.
312 		 * Increase the CC value by a larger number of cycles in each
313 		 * trial to avoid spending too much time in this loop if it
314 		 * continuously gets interrupted and delayed by something.
315 		 */
316 		if (counter_sub(cc_val, now + MIN_CYCLES_FROM_NOW) >
317 		    (COUNTER_HALF_SPAN - MIN_CYCLES_FROM_NOW)) {
318 			/* If the COMPARE event turns out to be already
319 			 * generated, check if the loop can be finished.
320 			 */
321 			if (event_check(chan)) {
322 				/* If the current counter value has not yet
323 				 * reached the requested CC value, the event
324 				 * must come from the previously set CC value
325 				 * (the alarm is apparently rescheduled).
326 				 * The event needs to be cleared then and the
327 				 * loop needs to be continued.
328 				 */
329 				now = counter();
330 				if (counter_sub(now, req_cc) > COUNTER_HALF_SPAN) {
331 					event_clear(chan);
332 					if (exact) {
333 						ret = -EINVAL;
334 						break;
335 					}
336 				} else {
337 					break;
338 				}
339 			} else if (exact) {
340 				ret = -EINVAL;
341 				break;
342 			}
343 
344 			cc_val = now + cc_inc;
345 			cc_inc++;
346 		} else {
347 			break;
348 		}
349 	}
350 
351 	return ret;
352 }
353 
compare_set_nolocks(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data,bool exact)354 static int compare_set_nolocks(int32_t chan, uint64_t target_time,
355 			z_nrf_rtc_timer_compare_handler_t handler,
356 			void *user_data, bool exact)
357 {
358 	int ret = 0;
359 	uint32_t cc_value = absolute_time_to_cc(target_time);
360 	uint64_t curr_time = z_nrf_rtc_timer_read();
361 
362 	if (curr_time < target_time) {
363 		if (target_time - curr_time > COUNTER_HALF_SPAN) {
364 			/* Target time is too distant. */
365 			return -EINVAL;
366 		}
367 
368 		if (target_time != cc_data[chan].target_time) {
369 			/* Target time is valid and is different than currently set.
370 			 * Set CC value.
371 			 */
372 			ret = set_alarm(chan, cc_value, exact);
373 		}
374 	} else if (!exact) {
375 		/* Force ISR handling when exiting from critical section. */
376 		atomic_or(&force_isr_mask, BIT(chan));
377 	} else {
378 		ret = -EINVAL;
379 	}
380 
381 	if (ret == 0) {
382 		cc_data[chan].target_time = target_time;
383 		cc_data[chan].callback = handler;
384 		cc_data[chan].user_context = user_data;
385 	}
386 
387 	return ret;
388 }
389 
compare_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data,bool exact)390 static int compare_set(int32_t chan, uint64_t target_time,
391 			z_nrf_rtc_timer_compare_handler_t handler,
392 			void *user_data, bool exact)
393 {
394 	bool key;
395 
396 	key = compare_int_lock(chan);
397 
398 	int ret = compare_set_nolocks(chan, target_time, handler, user_data, exact);
399 
400 	compare_int_unlock(chan, key);
401 
402 	return ret;
403 }
404 
z_nrf_rtc_timer_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data)405 int z_nrf_rtc_timer_set(int32_t chan, uint64_t target_time,
406 			 z_nrf_rtc_timer_compare_handler_t handler,
407 			 void *user_data)
408 {
409 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
410 
411 	return compare_set(chan, target_time, handler, user_data, false);
412 }
413 
z_nrf_rtc_timer_exact_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data)414 int z_nrf_rtc_timer_exact_set(int32_t chan, uint64_t target_time,
415 			      z_nrf_rtc_timer_compare_handler_t handler,
416 			      void *user_data)
417 {
418 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
419 
420 	return compare_set(chan, target_time, handler, user_data, true);
421 }
422 
z_nrf_rtc_timer_abort(int32_t chan)423 void z_nrf_rtc_timer_abort(int32_t chan)
424 {
425 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
426 
427 	bool key = compare_int_lock(chan);
428 
429 	cc_data[chan].target_time = TARGET_TIME_INVALID;
430 	event_clear(chan);
431 	event_disable(chan);
432 	(void)atomic_and(&force_isr_mask, ~BIT(chan));
433 
434 	compare_int_unlock(chan, key);
435 }
436 
z_nrf_rtc_timer_read(void)437 uint64_t z_nrf_rtc_timer_read(void)
438 {
439 	uint64_t val = ((uint64_t)overflow_cnt) << COUNTER_BIT_WIDTH;
440 
441 	barrier_dmem_fence_full();
442 
443 	uint32_t cntr = counter();
444 
445 #if CUSTOM_COUNTER_BIT_WIDTH
446 	/* If counter is equal to it maximum value while val is greater
447 	 * than anchor, then we can assume that overflow has been recorded
448 	 * in the overflow_cnt, but clear task has not been triggered yet.
449 	 * Treat counter as if it has been cleared.
450 	 */
451 	if ((cntr == COUNTER_MAX) && (val > anchor)) {
452 		cntr = 0;
453 	}
454 #endif
455 
456 	val += cntr;
457 
458 	if (cntr < OVERFLOW_RISK_RANGE_END) {
459 		/* `overflow_cnt` can have incorrect value due to still unhandled overflow or
460 		 * due to possibility that this code preempted overflow interrupt before final write
461 		 * of `overflow_cnt`. Update of `anchor` occurs far in time from this moment, so
462 		 * `anchor` is considered valid and stable. Because of this timing there is no risk
463 		 * of incorrect `anchor` value caused by non-atomic read of 64-bit `anchor`.
464 		 */
465 		if (val < anchor) {
466 			/* Unhandled overflow, detected, let's add correction */
467 			val += COUNTER_SPAN;
468 		}
469 	} else {
470 		/* `overflow_cnt` is considered valid and stable in this range, no need to
471 		 * check validity using `anchor`
472 		 */
473 	}
474 
475 	return val;
476 }
477 
in_anchor_range(uint32_t cc_value)478 static inline bool in_anchor_range(uint32_t cc_value)
479 {
480 	return (cc_value >= ANCHOR_RANGE_START) && (cc_value < ANCHOR_RANGE_END);
481 }
482 
anchor_update(uint32_t cc_value)483 static inline void anchor_update(uint32_t cc_value)
484 {
485 	/* Update anchor when far from overflow */
486 	if (in_anchor_range(cc_value)) {
487 		/* In this range `overflow_cnt` is considered valid and stable.
488 		 * Write of 64-bit `anchor` is non atomic. However it happens
489 		 * far in time from the moment the `anchor` is read in
490 		 * `z_nrf_rtc_timer_read`.
491 		 */
492 		anchor = (((uint64_t)overflow_cnt) << COUNTER_BIT_WIDTH) + cc_value;
493 	}
494 }
495 
sys_clock_timeout_handler(int32_t chan,uint64_t expire_time,void * user_data)496 static void sys_clock_timeout_handler(int32_t chan,
497 				      uint64_t expire_time,
498 				      void *user_data)
499 {
500 	uint32_t cc_value = absolute_time_to_cc(expire_time);
501 	uint32_t dticks = (uint32_t)(expire_time - last_count) / CYC_PER_TICK;
502 
503 	last_count += dticks * CYC_PER_TICK;
504 
505 	anchor_update(cc_value);
506 
507 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
508 		/* protection is not needed because we are in the RTC interrupt
509 		 * so it won't get preempted by the interrupt.
510 		 */
511 		compare_set(chan, last_count + CYC_PER_TICK,
512 					  sys_clock_timeout_handler, NULL, false);
513 	}
514 
515 	sys_clock_announce(dticks);
516 }
517 
channel_processing_check_and_clear(int32_t chan)518 static bool channel_processing_check_and_clear(int32_t chan)
519 {
520 	if (nrfy_rtc_int_enable_check(RTC, NRF_RTC_CHANNEL_INT_MASK(chan))) {
521 		/* The processing of channel can be caused by CC match
522 		 * or be forced.
523 		 */
524 		if ((atomic_and(&force_isr_mask, ~BIT(chan)) & BIT(chan)) ||
525 		    event_check(chan)) {
526 			event_clear(chan);
527 			return true;
528 		}
529 	}
530 
531 	return false;
532 }
533 
process_channel(int32_t chan)534 static void process_channel(int32_t chan)
535 {
536 	if (channel_processing_check_and_clear(chan)) {
537 		void *user_context;
538 		uint32_t mcu_critical_state;
539 		uint64_t curr_time;
540 		uint64_t expire_time;
541 		z_nrf_rtc_timer_compare_handler_t handler = NULL;
542 
543 		curr_time = z_nrf_rtc_timer_read();
544 
545 		/* This critical section is used to provide atomic access to
546 		 * cc_data structure and prevent higher priority contexts
547 		 * (including ZLIs) from overwriting it.
548 		 */
549 		mcu_critical_state = full_int_lock();
550 
551 		/* If target_time is in the past or is equal to current time
552 		 * value, execute the handler.
553 		 */
554 		expire_time = cc_data[chan].target_time;
555 		if (curr_time >= expire_time) {
556 			handler = cc_data[chan].callback;
557 			user_context = cc_data[chan].user_context;
558 			cc_data[chan].callback = NULL;
559 			cc_data[chan].target_time = TARGET_TIME_INVALID;
560 			event_disable(chan);
561 			/* Because of the way set_alarm() sets the CC register,
562 			 * it may turn out that another COMPARE event has been
563 			 * generated for the same alarm. Make sure the event
564 			 * is cleared, so that the ISR is not executed again
565 			 * unnecessarily.
566 			 */
567 			event_clear(chan);
568 		}
569 
570 		full_int_unlock(mcu_critical_state);
571 
572 		if (handler) {
573 			handler(chan, expire_time, user_context);
574 		}
575 	}
576 }
577 
578 /* Note: this function has public linkage, and MUST have this
579  * particular name.  The platform architecture itself doesn't care,
580  * but there is a test (tests/arch/arm_irq_vector_table) that needs
581  * to find it to it can set it in a custom vector table.  Should
582  * probably better abstract that at some point (e.g. query and reset
583  * it by pointer at runtime, maybe?) so we don't have this leaky
584  * symbol.
585  */
rtc_nrf_isr(const void * arg)586 void rtc_nrf_isr(const void *arg)
587 {
588 	ARG_UNUSED(arg);
589 
590 	if (RTC_PRETICK) {
591 		rtc_pretick_rtc1_isr_hook();
592 	}
593 
594 #if CUSTOM_COUNTER_BIT_WIDTH
595 	if (nrfy_rtc_int_enable_check(RTC, NRF_RTC_CHANNEL_INT_MASK(WRAP_CH)) &&
596 	    nrfy_rtc_events_process(RTC, NRF_RTC_CHANNEL_INT_MASK(WRAP_CH))) {
597 #else
598 	if (nrfy_rtc_int_enable_check(RTC, NRF_RTC_INT_OVERFLOW_MASK) &&
599 	    nrfy_rtc_events_process(RTC, NRF_RTC_INT_OVERFLOW_MASK)) {
600 #endif
601 		overflow_cnt++;
602 	}
603 
604 	for (int32_t chan = SYS_CLOCK_CH; chan < CHAN_COUNT; chan++) {
605 		process_channel(chan);
606 	}
607 }
608 
609 int32_t z_nrf_rtc_timer_chan_alloc(void)
610 {
611 	int32_t chan;
612 	atomic_val_t prev;
613 	do {
614 		chan = alloc_mask ? 31 - __builtin_clz(alloc_mask) : -1;
615 		if (chan < 0) {
616 			return -ENOMEM;
617 		}
618 		prev = atomic_and(&alloc_mask, ~BIT(chan));
619 	} while (!(prev & BIT(chan)));
620 
621 	return chan;
622 }
623 
624 void z_nrf_rtc_timer_chan_free(int32_t chan)
625 {
626 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
627 
628 	atomic_or(&alloc_mask, BIT(chan));
629 }
630 
631 
632 int z_nrf_rtc_timer_trigger_overflow(void)
633 {
634 	uint32_t mcu_critical_state;
635 	int err = 0;
636 
637 	if (!IS_ENABLED(CONFIG_NRF_RTC_TIMER_TRIGGER_OVERFLOW) ||
638 	    (CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT > 0)) {
639 		return -ENOTSUP;
640 	}
641 
642 	mcu_critical_state = full_int_lock();
643 	if (sys_busy) {
644 		err = -EBUSY;
645 		goto bail;
646 	}
647 
648 	if (counter() >= (COUNTER_SPAN - 100)) {
649 		err = -EAGAIN;
650 		goto bail;
651 	}
652 
653 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_TRIGGER_OVERFLOW);
654 	k_busy_wait(80);
655 
656 	uint64_t now = z_nrf_rtc_timer_read();
657 
658 	if (err == 0) {
659 		sys_clock_timeout_handler(SYS_CLOCK_CH, now, NULL);
660 	}
661 bail:
662 	full_int_unlock(mcu_critical_state);
663 
664 	return err;
665 }
666 
667 void sys_clock_set_timeout(int32_t ticks, bool idle)
668 {
669 	ARG_UNUSED(idle);
670 	uint32_t cyc;
671 
672 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
673 		return;
674 	}
675 
676 	if (ticks == K_TICKS_FOREVER) {
677 		cyc = MAX_TICKS * CYC_PER_TICK;
678 		sys_busy = false;
679 	} else {
680 		/* Value of ticks can be zero or negative, what means "announce
681 		 * the next tick" (the same as ticks equal to 1).
682 		 */
683 		cyc = CLAMP(ticks, 1, (int32_t)MAX_TICKS);
684 		cyc *= CYC_PER_TICK;
685 		sys_busy = true;
686 	}
687 
688 	uint32_t unannounced = z_nrf_rtc_timer_read() - last_count;
689 
690 	/* If we haven't announced for more than half the 24-bit wrap
691 	 * duration, then force an announce to avoid loss of a wrap
692 	 * event.  This can happen if new timeouts keep being set
693 	 * before the existing one triggers the interrupt.
694 	 */
695 	if (unannounced >= COUNTER_HALF_SPAN) {
696 		cyc = 0;
697 	}
698 
699 	/* Get the cycles from last_count to the tick boundary after
700 	 * the requested ticks have passed starting now.
701 	 */
702 	cyc += unannounced;
703 	cyc = DIV_ROUND_UP(cyc, CYC_PER_TICK) * CYC_PER_TICK;
704 
705 	/* Due to elapsed time the calculation above might produce a
706 	 * duration that laps the counter.  Don't let it.
707 	 * This limitation also guarantees that the anchor will be properly
708 	 * updated before every overflow (see anchor_update()).
709 	 */
710 	if (cyc > MAX_CYCLES) {
711 		cyc = MAX_CYCLES;
712 	}
713 
714 	uint64_t target_time = cyc + last_count;
715 
716 	compare_set(SYS_CLOCK_CH, target_time, sys_clock_timeout_handler, NULL, false);
717 }
718 
719 uint32_t sys_clock_elapsed(void)
720 {
721 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
722 		return 0;
723 	}
724 
725 	return (z_nrf_rtc_timer_read() - last_count) / CYC_PER_TICK;
726 }
727 
728 uint32_t sys_clock_cycle_get_32(void)
729 {
730 	return (uint32_t)z_nrf_rtc_timer_read();
731 }
732 
733 static void int_event_disable_rtc(void)
734 {
735 	uint32_t mask = NRF_RTC_INT_TICK_MASK     |
736 #if !CUSTOM_COUNTER_BIT_WIDTH
737 			NRF_RTC_INT_OVERFLOW_MASK |
738 #endif
739 			NRF_RTC_INT_COMPARE0_MASK |
740 			NRF_RTC_INT_COMPARE1_MASK |
741 			NRF_RTC_INT_COMPARE2_MASK |
742 			NRF_RTC_INT_COMPARE3_MASK;
743 
744 	/* Reset interrupt enabling to expected reset values */
745 	nrfy_rtc_int_disable(RTC, mask);
746 
747 	/* Reset event routing enabling to expected reset values */
748 	nrfy_rtc_event_disable(RTC, mask);
749 }
750 
751 void sys_clock_disable(void)
752 {
753 	nrf_rtc_task_trigger(RTC, NRF_RTC_TASK_STOP);
754 	irq_disable(RTC_IRQn);
755 	int_event_disable_rtc();
756 	NVIC_ClearPendingIRQ(RTC_IRQn);
757 }
758 
759 static int sys_clock_driver_init(void)
760 {
761 	int_event_disable_rtc();
762 
763 	/* TODO: replace with counter driver to access RTC */
764 	nrfy_rtc_prescaler_set(RTC, 0);
765 	for (int32_t chan = 0; chan < CHAN_COUNT; chan++) {
766 		cc_data[chan].target_time = TARGET_TIME_INVALID;
767 		nrfy_rtc_int_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
768 	}
769 
770 #if !CUSTOM_COUNTER_BIT_WIDTH
771 	nrfy_rtc_int_enable(RTC, NRF_RTC_INT_OVERFLOW_MASK);
772 #endif
773 
774 	NVIC_ClearPendingIRQ(RTC_IRQn);
775 
776 	IRQ_CONNECT(RTC_IRQn, DT_IRQ(DT_NODELABEL(RTC_LABEL), priority),
777 		    rtc_nrf_isr, 0, 0);
778 	irq_enable(RTC_IRQn);
779 
780 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_CLEAR);
781 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_START);
782 
783 	int_mask = BIT_MASK(CHAN_COUNT);
784 	if (CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT) {
785 		alloc_mask = BIT_MASK(CHAN_COUNT) & ~BIT(SYS_CLOCK_CH);
786 	}
787 
788 	uint32_t initial_timeout = IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
789 		MAX_CYCLES : CYC_PER_TICK;
790 
791 	compare_set(SYS_CLOCK_CH, initial_timeout, sys_clock_timeout_handler, NULL, false);
792 
793 #if defined(CONFIG_CLOCK_CONTROL_NRF)
794 	static const enum nrf_lfclk_start_mode mode =
795 		IS_ENABLED(CONFIG_SYSTEM_CLOCK_NO_WAIT) ?
796 			CLOCK_CONTROL_NRF_LF_START_NOWAIT :
797 			(IS_ENABLED(CONFIG_SYSTEM_CLOCK_WAIT_FOR_AVAILABILITY) ?
798 			CLOCK_CONTROL_NRF_LF_START_AVAILABLE :
799 			CLOCK_CONTROL_NRF_LF_START_STABLE);
800 
801 	z_nrf_clock_control_lf_on(mode);
802 #endif
803 
804 #if CUSTOM_COUNTER_BIT_WIDTH
805 	/* WRAP_CH reserved for wrapping. */
806 	alloc_mask &= ~BIT(WRAP_CH);
807 
808 	nrf_rtc_event_t evt = NRF_RTC_CHANNEL_EVENT_ADDR(WRAP_CH);
809 	int result;
810 	nrfx_gppi_handle_t handle;
811 
812 	nrfy_rtc_event_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(WRAP_CH));
813 	nrfy_rtc_cc_set(RTC, WRAP_CH, COUNTER_MAX);
814 	uint32_t evt_addr;
815 	uint32_t task_addr;
816 
817 	evt_addr = nrfy_rtc_event_address_get(RTC, evt);
818 	task_addr = nrfy_rtc_task_address_get(RTC, NRF_RTC_TASK_CLEAR);
819 
820 	result = nrfx_gppi_conn_alloc(evt_addr, task_addr, &handle);
821 	if (result < 0) {
822 		return result;
823 	}
824 	nrfx_gppi_conn_enable(handle);
825 #endif
826 	return 0;
827 }
828 
829 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
830 	 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
831