1 /*
2  * Copyright (c) 2016-2021 Nordic Semiconductor ASA
3  * Copyright (c) 2018 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/init.h>
9 #include <soc.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/clock_control/nrf_clock_control.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/drivers/timer/nrf_rtc_timer.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys_clock.h>
16 #include <zephyr/sys/barrier.h>
17 #include <haly/nrfy_rtc.h>
18 #include <zephyr/irq.h>
19 
20 #define RTC_PRETICK (IS_ENABLED(CONFIG_SOC_NRF53_RTC_PRETICK) && \
21 		     IS_ENABLED(CONFIG_SOC_NRF5340_CPUNET))
22 
23 #define EXT_CHAN_COUNT CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT
24 #define CHAN_COUNT (EXT_CHAN_COUNT + 1)
25 
26 #define RTC NRF_RTC1
27 #define RTC_IRQn NRFX_IRQ_NUMBER_GET(RTC)
28 #define RTC_LABEL rtc1
29 #define CHAN_COUNT_MAX (RTC1_CC_NUM - (RTC_PRETICK ? 1 : 0))
30 
31 BUILD_ASSERT(CHAN_COUNT <= CHAN_COUNT_MAX, "Not enough compare channels");
32 /* Ensure that counter driver for RTC1 is not enabled. */
33 BUILD_ASSERT(DT_NODE_HAS_STATUS(DT_NODELABEL(RTC_LABEL), disabled),
34 	     "Counter for RTC1 must be disabled");
35 
36 #define COUNTER_BIT_WIDTH 24U
37 #define COUNTER_SPAN BIT(COUNTER_BIT_WIDTH)
38 #define COUNTER_MAX (COUNTER_SPAN - 1U)
39 #define COUNTER_HALF_SPAN (COUNTER_SPAN / 2U)
40 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec()	\
41 		      / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
42 #define MAX_TICKS ((COUNTER_HALF_SPAN - CYC_PER_TICK) / CYC_PER_TICK)
43 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
44 
45 #define OVERFLOW_RISK_RANGE_END (COUNTER_SPAN / 16)
46 #define ANCHOR_RANGE_START (COUNTER_SPAN / 8)
47 #define ANCHOR_RANGE_END (7 * COUNTER_SPAN / 8)
48 #define TARGET_TIME_INVALID (UINT64_MAX)
49 
50 extern void rtc_pretick_rtc1_isr_hook(void);
51 
52 static volatile uint32_t overflow_cnt;
53 static volatile uint64_t anchor;
54 static uint64_t last_count;
55 static bool sys_busy;
56 
57 struct z_nrf_rtc_timer_chan_data {
58 	z_nrf_rtc_timer_compare_handler_t callback;
59 	void *user_context;
60 	volatile uint64_t target_time;
61 };
62 
63 static struct z_nrf_rtc_timer_chan_data cc_data[CHAN_COUNT];
64 static atomic_t int_mask;
65 static atomic_t alloc_mask;
66 static atomic_t force_isr_mask;
67 
counter_sub(uint32_t a,uint32_t b)68 static uint32_t counter_sub(uint32_t a, uint32_t b)
69 {
70 	return (a - b) & COUNTER_MAX;
71 }
72 
set_comparator(int32_t chan,uint32_t cyc)73 static void set_comparator(int32_t chan, uint32_t cyc)
74 {
75 	nrfy_rtc_cc_set(RTC, chan, cyc & COUNTER_MAX);
76 }
77 
event_check(int32_t chan)78 static bool event_check(int32_t chan)
79 {
80 	return nrfy_rtc_event_check(RTC, NRF_RTC_CHANNEL_EVENT_ADDR(chan));
81 }
82 
event_clear(int32_t chan)83 static void event_clear(int32_t chan)
84 {
85 	nrfy_rtc_event_clear(RTC, NRF_RTC_CHANNEL_EVENT_ADDR(chan));
86 }
87 
event_enable(int32_t chan)88 static void event_enable(int32_t chan)
89 {
90 	nrfy_rtc_event_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
91 }
92 
event_disable(int32_t chan)93 static void event_disable(int32_t chan)
94 {
95 	nrfy_rtc_event_disable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
96 }
97 
counter(void)98 static uint32_t counter(void)
99 {
100 	return nrfy_rtc_counter_get(RTC);
101 }
102 
absolute_time_to_cc(uint64_t absolute_time)103 static uint32_t absolute_time_to_cc(uint64_t absolute_time)
104 {
105 	/* 24 least significant bits represent target CC value */
106 	return absolute_time & COUNTER_MAX;
107 }
108 
full_int_lock(void)109 static uint32_t full_int_lock(void)
110 {
111 	uint32_t mcu_critical_state;
112 
113 	if (IS_ENABLED(CONFIG_NRF_RTC_TIMER_LOCK_ZERO_LATENCY_IRQS)) {
114 		mcu_critical_state = __get_PRIMASK();
115 		__disable_irq();
116 	} else {
117 		mcu_critical_state = irq_lock();
118 	}
119 
120 	return mcu_critical_state;
121 }
122 
full_int_unlock(uint32_t mcu_critical_state)123 static void full_int_unlock(uint32_t mcu_critical_state)
124 {
125 	if (IS_ENABLED(CONFIG_NRF_RTC_TIMER_LOCK_ZERO_LATENCY_IRQS)) {
126 		__set_PRIMASK(mcu_critical_state);
127 	} else {
128 		irq_unlock(mcu_critical_state);
129 	}
130 }
131 
z_nrf_rtc_timer_compare_evt_address_get(int32_t chan)132 uint32_t z_nrf_rtc_timer_compare_evt_address_get(int32_t chan)
133 {
134 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
135 	return nrfy_rtc_event_address_get(RTC, nrfy_rtc_compare_event_get(chan));
136 }
137 
z_nrf_rtc_timer_capture_task_address_get(int32_t chan)138 uint32_t z_nrf_rtc_timer_capture_task_address_get(int32_t chan)
139 {
140 #if defined(RTC_TASKS_CAPTURE_TASKS_CAPTURE_Msk)
141 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
142 	if (chan == 0) {
143 		return 0;
144 	}
145 
146 	return nrfy_rtc_task_address_get(RTC, nrfy_rtc_capture_task_get(chan));
147 #else
148 	ARG_UNUSED(chan);
149 	return 0;
150 #endif
151 }
152 
compare_int_lock(int32_t chan)153 static bool compare_int_lock(int32_t chan)
154 {
155 	atomic_val_t prev = atomic_and(&int_mask, ~BIT(chan));
156 
157 	nrfy_rtc_int_disable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
158 
159 	barrier_dmem_fence_full();
160 	barrier_isync_fence_full();
161 
162 	return prev & BIT(chan);
163 }
164 
165 
z_nrf_rtc_timer_compare_int_lock(int32_t chan)166 bool z_nrf_rtc_timer_compare_int_lock(int32_t chan)
167 {
168 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
169 
170 	return compare_int_lock(chan);
171 }
172 
compare_int_unlock(int32_t chan,bool key)173 static void compare_int_unlock(int32_t chan, bool key)
174 {
175 	if (key) {
176 		atomic_or(&int_mask, BIT(chan));
177 		nrfy_rtc_int_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
178 		if (atomic_get(&force_isr_mask) & BIT(chan)) {
179 			NVIC_SetPendingIRQ(RTC_IRQn);
180 		}
181 	}
182 }
183 
z_nrf_rtc_timer_compare_int_unlock(int32_t chan,bool key)184 void z_nrf_rtc_timer_compare_int_unlock(int32_t chan, bool key)
185 {
186 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
187 
188 	compare_int_unlock(chan, key);
189 }
190 
z_nrf_rtc_timer_compare_read(int32_t chan)191 uint32_t z_nrf_rtc_timer_compare_read(int32_t chan)
192 {
193 	__ASSERT_NO_MSG(chan >= 0 && chan < CHAN_COUNT);
194 
195 	return nrfy_rtc_cc_get(RTC, chan);
196 }
197 
z_nrf_rtc_timer_get_ticks(k_timeout_t t)198 uint64_t z_nrf_rtc_timer_get_ticks(k_timeout_t t)
199 {
200 	uint64_t curr_time;
201 	int64_t curr_tick;
202 	int64_t result;
203 	int64_t abs_ticks;
204 
205 	do {
206 		curr_time = z_nrf_rtc_timer_read();
207 		curr_tick = sys_clock_tick_get();
208 	} while (curr_time != z_nrf_rtc_timer_read());
209 
210 	abs_ticks = Z_TICK_ABS(t.ticks);
211 	if (Z_IS_TIMEOUT_RELATIVE(t)) {
212 		return (t.ticks > COUNTER_SPAN) ?
213 			-EINVAL : (curr_time + t.ticks);
214 	}
215 
216 	/* absolute timeout */
217 	result = abs_ticks - curr_tick;
218 
219 	if (result > COUNTER_SPAN) {
220 		return -EINVAL;
221 	}
222 
223 	return curr_time + result;
224 }
225 
226 /** @brief Function safely sets an alarm.
227  *
228  * It assumes that provided value is at most COUNTER_HALF_SPAN cycles from now
229  * (other values are considered to be from the past). It detects late setting
230  * and properly adjusts CC values that are too near in the future to guarantee
231  * triggering a COMPARE event soon, not after 512 seconds when the RTC wraps
232  * around first.
233  *
234  * @param[in] chan A channel for which a new CC value is to be set.
235  *
236  * @param[in] req_cc Requested CC register value to be set.
237  *
238  * @param[in] exact Use @c false to allow CC adjustment if @c req_cc value is
239  *                  close to the current value of the timer.
240  *                  Use @c true to disallow CC adjustment. The function can
241  *                  fail with -EINVAL result if @p req_cc is too close to the
242  *                  current value.
243  *
244  * @retval 0 The requested CC has been set successfully.
245  * @retval -EINVAL The requested CC value could not be reliably set.
246  */
set_alarm(int32_t chan,uint32_t req_cc,bool exact)247 static int set_alarm(int32_t chan, uint32_t req_cc, bool exact)
248 {
249 	int ret = 0;
250 
251 	/* Ensure that the value exposed in this driver API is consistent with
252 	 * assumptions of this function.
253 	 */
254 	BUILD_ASSERT(NRF_RTC_TIMER_MAX_SCHEDULE_SPAN <= COUNTER_HALF_SPAN);
255 
256 	/* According to product specifications, when the current counter value
257 	 * is N, a value of N+2 written to the CC register is guaranteed to
258 	 * trigger a COMPARE event at N+2, but tests show that this compare
259 	 * value can be missed when the previous CC value is N+1 and the write
260 	 * occurs in the second half of the RTC clock cycle (such situation can
261 	 * be provoked by test_next_cycle_timeouts in the nrf_rtc_timer suite).
262 	 * This never happens when the written value is N+3. Use 3 cycles as
263 	 * the nearest possible scheduling then.
264 	 */
265 	enum { MIN_CYCLES_FROM_NOW = 3 };
266 	uint32_t cc_val = req_cc;
267 	uint32_t cc_inc = MIN_CYCLES_FROM_NOW;
268 
269 	/* Disable event routing for the channel to avoid getting a COMPARE
270 	 * event for the previous CC value before the new one takes effect
271 	 * (however, even if such spurious event was generated, it would be
272 	 * properly filtered out in process_channel(), where the target time
273 	 * is checked).
274 	 * Clear also the event as it may already be generated at this point.
275 	 */
276 	event_disable(chan);
277 	event_clear(chan);
278 
279 	for (;;) {
280 		uint32_t now;
281 
282 		set_comparator(chan, cc_val);
283 		/* Enable event routing after the required CC value was set.
284 		 * Even though the above operation may get repeated (see below),
285 		 * there is no need to disable event routing in every iteration
286 		 * of the loop, as the COMPARE event resulting from any attempt
287 		 * of setting the CC register is acceptable (as mentioned above,
288 		 * process_channel() does the proper filtering).
289 		 */
290 		event_enable(chan);
291 
292 		now = counter();
293 
294 		/* Check if the CC register was successfully set to a value
295 		 * that will for sure trigger a COMPARE event as expected.
296 		 * If not, try again, adjusting the CC value accordingly.
297 		 * Increase the CC value by a larger number of cycles in each
298 		 * trial to avoid spending too much time in this loop if it
299 		 * continuously gets interrupted and delayed by something.
300 		 */
301 		if (counter_sub(cc_val, now + MIN_CYCLES_FROM_NOW) >
302 		    (COUNTER_HALF_SPAN - MIN_CYCLES_FROM_NOW)) {
303 			/* If the COMPARE event turns out to be already
304 			 * generated, check if the loop can be finished.
305 			 */
306 			if (event_check(chan)) {
307 				/* If the current counter value has not yet
308 				 * reached the requested CC value, the event
309 				 * must come from the previously set CC value
310 				 * (the alarm is apparently rescheduled).
311 				 * The event needs to be cleared then and the
312 				 * loop needs to be continued.
313 				 */
314 				now = counter();
315 				if (counter_sub(now, req_cc) > COUNTER_HALF_SPAN) {
316 					event_clear(chan);
317 					if (exact) {
318 						ret = -EINVAL;
319 						break;
320 					}
321 				} else {
322 					break;
323 				}
324 			} else if (exact) {
325 				ret = -EINVAL;
326 				break;
327 			}
328 
329 			cc_val = now + cc_inc;
330 			cc_inc++;
331 		} else {
332 			break;
333 		}
334 	}
335 
336 	return ret;
337 }
338 
compare_set_nolocks(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data,bool exact)339 static int compare_set_nolocks(int32_t chan, uint64_t target_time,
340 			z_nrf_rtc_timer_compare_handler_t handler,
341 			void *user_data, bool exact)
342 {
343 	int ret = 0;
344 	uint32_t cc_value = absolute_time_to_cc(target_time);
345 	uint64_t curr_time = z_nrf_rtc_timer_read();
346 
347 	if (curr_time < target_time) {
348 		if (target_time - curr_time > COUNTER_HALF_SPAN) {
349 			/* Target time is too distant. */
350 			return -EINVAL;
351 		}
352 
353 		if (target_time != cc_data[chan].target_time) {
354 			/* Target time is valid and is different than currently set.
355 			 * Set CC value.
356 			 */
357 			ret = set_alarm(chan, cc_value, exact);
358 		}
359 	} else if (!exact) {
360 		/* Force ISR handling when exiting from critical section. */
361 		atomic_or(&force_isr_mask, BIT(chan));
362 	} else {
363 		ret = -EINVAL;
364 	}
365 
366 	if (ret == 0) {
367 		cc_data[chan].target_time = target_time;
368 		cc_data[chan].callback = handler;
369 		cc_data[chan].user_context = user_data;
370 	}
371 
372 	return ret;
373 }
374 
compare_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data,bool exact)375 static int compare_set(int32_t chan, uint64_t target_time,
376 			z_nrf_rtc_timer_compare_handler_t handler,
377 			void *user_data, bool exact)
378 {
379 	bool key;
380 
381 	key = compare_int_lock(chan);
382 
383 	int ret = compare_set_nolocks(chan, target_time, handler, user_data, exact);
384 
385 	compare_int_unlock(chan, key);
386 
387 	return ret;
388 }
389 
z_nrf_rtc_timer_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data)390 int z_nrf_rtc_timer_set(int32_t chan, uint64_t target_time,
391 			 z_nrf_rtc_timer_compare_handler_t handler,
392 			 void *user_data)
393 {
394 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
395 
396 	return compare_set(chan, target_time, handler, user_data, false);
397 }
398 
z_nrf_rtc_timer_exact_set(int32_t chan,uint64_t target_time,z_nrf_rtc_timer_compare_handler_t handler,void * user_data)399 int z_nrf_rtc_timer_exact_set(int32_t chan, uint64_t target_time,
400 			      z_nrf_rtc_timer_compare_handler_t handler,
401 			      void *user_data)
402 {
403 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
404 
405 	return compare_set(chan, target_time, handler, user_data, true);
406 }
407 
z_nrf_rtc_timer_abort(int32_t chan)408 void z_nrf_rtc_timer_abort(int32_t chan)
409 {
410 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
411 
412 	bool key = compare_int_lock(chan);
413 
414 	cc_data[chan].target_time = TARGET_TIME_INVALID;
415 	event_clear(chan);
416 	event_disable(chan);
417 	(void)atomic_and(&force_isr_mask, ~BIT(chan));
418 
419 	compare_int_unlock(chan, key);
420 }
421 
z_nrf_rtc_timer_read(void)422 uint64_t z_nrf_rtc_timer_read(void)
423 {
424 	uint64_t val = ((uint64_t)overflow_cnt) << COUNTER_BIT_WIDTH;
425 
426 	barrier_dmem_fence_full();
427 
428 	uint32_t cntr = counter();
429 
430 	val += cntr;
431 
432 	if (cntr < OVERFLOW_RISK_RANGE_END) {
433 		/* `overflow_cnt` can have incorrect value due to still unhandled overflow or
434 		 * due to possibility that this code preempted overflow interrupt before final write
435 		 * of `overflow_cnt`. Update of `anchor` occurs far in time from this moment, so
436 		 * `anchor` is considered valid and stable. Because of this timing there is no risk
437 		 * of incorrect `anchor` value caused by non-atomic read of 64-bit `anchor`.
438 		 */
439 		if (val < anchor) {
440 			/* Unhandled overflow, detected, let's add correction */
441 			val += COUNTER_SPAN;
442 		}
443 	} else {
444 		/* `overflow_cnt` is considered valid and stable in this range, no need to
445 		 * check validity using `anchor`
446 		 */
447 	}
448 
449 	return val;
450 }
451 
in_anchor_range(uint32_t cc_value)452 static inline bool in_anchor_range(uint32_t cc_value)
453 {
454 	return (cc_value >= ANCHOR_RANGE_START) && (cc_value < ANCHOR_RANGE_END);
455 }
456 
anchor_update(uint32_t cc_value)457 static inline void anchor_update(uint32_t cc_value)
458 {
459 	/* Update anchor when far from overflow */
460 	if (in_anchor_range(cc_value)) {
461 		/* In this range `overflow_cnt` is considered valid and stable.
462 		 * Write of 64-bit `anchor` is non atomic. However it happens
463 		 * far in time from the moment the `anchor` is read in
464 		 * `z_nrf_rtc_timer_read`.
465 		 */
466 		anchor = (((uint64_t)overflow_cnt) << COUNTER_BIT_WIDTH) + cc_value;
467 	}
468 }
469 
sys_clock_timeout_handler(int32_t chan,uint64_t expire_time,void * user_data)470 static void sys_clock_timeout_handler(int32_t chan,
471 				      uint64_t expire_time,
472 				      void *user_data)
473 {
474 	uint32_t cc_value = absolute_time_to_cc(expire_time);
475 	uint32_t dticks = (uint32_t)(expire_time - last_count) / CYC_PER_TICK;
476 
477 	last_count += dticks * CYC_PER_TICK;
478 
479 	anchor_update(cc_value);
480 
481 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
482 		/* protection is not needed because we are in the RTC interrupt
483 		 * so it won't get preempted by the interrupt.
484 		 */
485 		compare_set(chan, last_count + CYC_PER_TICK,
486 					  sys_clock_timeout_handler, NULL, false);
487 	}
488 
489 	sys_clock_announce(dticks);
490 }
491 
channel_processing_check_and_clear(int32_t chan)492 static bool channel_processing_check_and_clear(int32_t chan)
493 {
494 	if (nrfy_rtc_int_enable_check(RTC, NRF_RTC_CHANNEL_INT_MASK(chan))) {
495 		/* The processing of channel can be caused by CC match
496 		 * or be forced.
497 		 */
498 		if ((atomic_and(&force_isr_mask, ~BIT(chan)) & BIT(chan)) ||
499 		    event_check(chan)) {
500 			event_clear(chan);
501 			return true;
502 		}
503 	}
504 
505 	return false;
506 }
507 
process_channel(int32_t chan)508 static void process_channel(int32_t chan)
509 {
510 	if (channel_processing_check_and_clear(chan)) {
511 		void *user_context;
512 		uint32_t mcu_critical_state;
513 		uint64_t curr_time;
514 		uint64_t expire_time;
515 		z_nrf_rtc_timer_compare_handler_t handler = NULL;
516 
517 		curr_time = z_nrf_rtc_timer_read();
518 
519 		/* This critical section is used to provide atomic access to
520 		 * cc_data structure and prevent higher priority contexts
521 		 * (including ZLIs) from overwriting it.
522 		 */
523 		mcu_critical_state = full_int_lock();
524 
525 		/* If target_time is in the past or is equal to current time
526 		 * value, execute the handler.
527 		 */
528 		expire_time = cc_data[chan].target_time;
529 		if (curr_time >= expire_time) {
530 			handler = cc_data[chan].callback;
531 			user_context = cc_data[chan].user_context;
532 			cc_data[chan].callback = NULL;
533 			cc_data[chan].target_time = TARGET_TIME_INVALID;
534 			event_disable(chan);
535 			/* Because of the way set_alarm() sets the CC register,
536 			 * it may turn out that another COMPARE event has been
537 			 * generated for the same alarm. Make sure the event
538 			 * is cleared, so that the ISR is not executed again
539 			 * unnecessarily.
540 			 */
541 			event_clear(chan);
542 		}
543 
544 		full_int_unlock(mcu_critical_state);
545 
546 		if (handler) {
547 			handler(chan, expire_time, user_context);
548 		}
549 	}
550 }
551 
552 /* Note: this function has public linkage, and MUST have this
553  * particular name.  The platform architecture itself doesn't care,
554  * but there is a test (tests/arch/arm_irq_vector_table) that needs
555  * to find it to it can set it in a custom vector table.  Should
556  * probably better abstract that at some point (e.g. query and reset
557  * it by pointer at runtime, maybe?) so we don't have this leaky
558  * symbol.
559  */
rtc_nrf_isr(const void * arg)560 void rtc_nrf_isr(const void *arg)
561 {
562 	ARG_UNUSED(arg);
563 
564 	if (RTC_PRETICK) {
565 		rtc_pretick_rtc1_isr_hook();
566 	}
567 
568 	if (nrfy_rtc_int_enable_check(RTC, NRF_RTC_INT_OVERFLOW_MASK) &&
569 	    nrfy_rtc_events_process(RTC, NRF_RTC_INT_OVERFLOW_MASK)) {
570 		overflow_cnt++;
571 	}
572 
573 	for (int32_t chan = 0; chan < CHAN_COUNT; chan++) {
574 		process_channel(chan);
575 	}
576 }
577 
z_nrf_rtc_timer_chan_alloc(void)578 int32_t z_nrf_rtc_timer_chan_alloc(void)
579 {
580 	int32_t chan;
581 	atomic_val_t prev;
582 	do {
583 		chan = alloc_mask ? 31 - __builtin_clz(alloc_mask) : -1;
584 		if (chan < 0) {
585 			return -ENOMEM;
586 		}
587 		prev = atomic_and(&alloc_mask, ~BIT(chan));
588 	} while (!(prev & BIT(chan)));
589 
590 	return chan;
591 }
592 
z_nrf_rtc_timer_chan_free(int32_t chan)593 void z_nrf_rtc_timer_chan_free(int32_t chan)
594 {
595 	__ASSERT_NO_MSG(chan > 0 && chan < CHAN_COUNT);
596 
597 	atomic_or(&alloc_mask, BIT(chan));
598 }
599 
600 
z_nrf_rtc_timer_trigger_overflow(void)601 int z_nrf_rtc_timer_trigger_overflow(void)
602 {
603 	uint32_t mcu_critical_state;
604 	int err = 0;
605 
606 	if (!IS_ENABLED(CONFIG_NRF_RTC_TIMER_TRIGGER_OVERFLOW) ||
607 	    (CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT > 0)) {
608 		return -ENOTSUP;
609 	}
610 
611 	mcu_critical_state = full_int_lock();
612 	if (sys_busy) {
613 		err = -EBUSY;
614 		goto bail;
615 	}
616 
617 	if (counter() >= (COUNTER_SPAN - 100)) {
618 		err = -EAGAIN;
619 		goto bail;
620 	}
621 
622 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_TRIGGER_OVERFLOW);
623 	k_busy_wait(80);
624 
625 	uint64_t now = z_nrf_rtc_timer_read();
626 
627 	if (err == 0) {
628 		sys_clock_timeout_handler(0, now, NULL);
629 	}
630 bail:
631 	full_int_unlock(mcu_critical_state);
632 
633 	return err;
634 }
635 
sys_clock_set_timeout(int32_t ticks,bool idle)636 void sys_clock_set_timeout(int32_t ticks, bool idle)
637 {
638 	ARG_UNUSED(idle);
639 	uint32_t cyc;
640 
641 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
642 		return;
643 	}
644 
645 	if (ticks == K_TICKS_FOREVER) {
646 		cyc = MAX_TICKS * CYC_PER_TICK;
647 		sys_busy = false;
648 	} else {
649 		/* Value of ticks can be zero or negative, what means "announce
650 		 * the next tick" (the same as ticks equal to 1).
651 		 */
652 		cyc = CLAMP(ticks, 1, (int32_t)MAX_TICKS);
653 		cyc *= CYC_PER_TICK;
654 		sys_busy = true;
655 	}
656 
657 	uint32_t unannounced = z_nrf_rtc_timer_read() - last_count;
658 
659 	/* If we haven't announced for more than half the 24-bit wrap
660 	 * duration, then force an announce to avoid loss of a wrap
661 	 * event.  This can happen if new timeouts keep being set
662 	 * before the existing one triggers the interrupt.
663 	 */
664 	if (unannounced >= COUNTER_HALF_SPAN) {
665 		cyc = 0;
666 	}
667 
668 	/* Get the cycles from last_count to the tick boundary after
669 	 * the requested ticks have passed starting now.
670 	 */
671 	cyc += unannounced;
672 	cyc = DIV_ROUND_UP(cyc, CYC_PER_TICK) * CYC_PER_TICK;
673 
674 	/* Due to elapsed time the calculation above might produce a
675 	 * duration that laps the counter.  Don't let it.
676 	 * This limitation also guarantees that the anchor will be properly
677 	 * updated before every overflow (see anchor_update()).
678 	 */
679 	if (cyc > MAX_CYCLES) {
680 		cyc = MAX_CYCLES;
681 	}
682 
683 	uint64_t target_time = cyc + last_count;
684 
685 	compare_set(0, target_time, sys_clock_timeout_handler, NULL, false);
686 }
687 
sys_clock_elapsed(void)688 uint32_t sys_clock_elapsed(void)
689 {
690 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
691 		return 0;
692 	}
693 
694 	return (z_nrf_rtc_timer_read() - last_count) / CYC_PER_TICK;
695 }
696 
sys_clock_cycle_get_32(void)697 uint32_t sys_clock_cycle_get_32(void)
698 {
699 	return (uint32_t)z_nrf_rtc_timer_read();
700 }
701 
int_event_disable_rtc(void)702 static void int_event_disable_rtc(void)
703 {
704 	uint32_t mask = NRF_RTC_INT_TICK_MASK     |
705 			NRF_RTC_INT_OVERFLOW_MASK |
706 			NRF_RTC_INT_COMPARE0_MASK |
707 			NRF_RTC_INT_COMPARE1_MASK |
708 			NRF_RTC_INT_COMPARE2_MASK |
709 			NRF_RTC_INT_COMPARE3_MASK;
710 
711 	/* Reset interrupt enabling to expected reset values */
712 	nrfy_rtc_int_disable(RTC, mask);
713 
714 	/* Reset event routing enabling to expected reset values */
715 	nrfy_rtc_event_disable(RTC, mask);
716 }
717 
sys_clock_disable(void)718 void sys_clock_disable(void)
719 {
720 	nrf_rtc_task_trigger(RTC, NRF_RTC_TASK_STOP);
721 	irq_disable(RTC_IRQn);
722 	int_event_disable_rtc();
723 	NVIC_ClearPendingIRQ(RTC_IRQn);
724 }
725 
sys_clock_driver_init(void)726 static int sys_clock_driver_init(void)
727 {
728 	int_event_disable_rtc();
729 
730 	/* TODO: replace with counter driver to access RTC */
731 	nrfy_rtc_prescaler_set(RTC, 0);
732 	for (int32_t chan = 0; chan < CHAN_COUNT; chan++) {
733 		cc_data[chan].target_time = TARGET_TIME_INVALID;
734 		nrfy_rtc_int_enable(RTC, NRF_RTC_CHANNEL_INT_MASK(chan));
735 	}
736 
737 	nrfy_rtc_int_enable(RTC, NRF_RTC_INT_OVERFLOW_MASK);
738 
739 	NVIC_ClearPendingIRQ(RTC_IRQn);
740 
741 	IRQ_CONNECT(RTC_IRQn, DT_IRQ(DT_NODELABEL(RTC_LABEL), priority),
742 		    rtc_nrf_isr, 0, 0);
743 	irq_enable(RTC_IRQn);
744 
745 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_CLEAR);
746 	nrfy_rtc_task_trigger(RTC, NRF_RTC_TASK_START);
747 
748 	int_mask = BIT_MASK(CHAN_COUNT);
749 	if (CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT) {
750 		alloc_mask = BIT_MASK(EXT_CHAN_COUNT) << 1;
751 	}
752 
753 	uint32_t initial_timeout = IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
754 		MAX_CYCLES : CYC_PER_TICK;
755 
756 	compare_set(0, initial_timeout, sys_clock_timeout_handler, NULL, false);
757 
758 #if defined(CONFIG_CLOCK_CONTROL_NRF)
759 	static const enum nrf_lfclk_start_mode mode =
760 		IS_ENABLED(CONFIG_SYSTEM_CLOCK_NO_WAIT) ?
761 			CLOCK_CONTROL_NRF_LF_START_NOWAIT :
762 			(IS_ENABLED(CONFIG_SYSTEM_CLOCK_WAIT_FOR_AVAILABILITY) ?
763 			CLOCK_CONTROL_NRF_LF_START_AVAILABLE :
764 			CLOCK_CONTROL_NRF_LF_START_STABLE);
765 
766 	z_nrf_clock_control_lf_on(mode);
767 #endif
768 
769 	return 0;
770 }
771 
772 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
773 	 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
774