1 /*
2 * Copyright (c) 2017 Oticon A/S
3 * Copyright (c) 2023 Nordic Semiconductor ASA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /*
9 * RTC - Real-time counter
10 *
11 * https://infocenter.nordicsemi.com/topic/ps_nrf52833/rtc.html?cp=5_1_0_5_19
12 * https://infocenter.nordicsemi.com/topic/ps_nrf5340/rtc.html?cp=4_0_0_6_27
13 */
14
15 /*
16 * This file provides the implementation of the RTC peripherals,
17 * and instantiates N of them, as described in the configuration (NHW_config.h)
18 *
19 * Notes:
20 *
21 * * TICK events are NOT modeled
22 *
23 * * The COUNTER register is only updated when read with the proper HAL function
24 *
25 * * Unlike the real HW, there is no jitter or (variable) delay in the tasks/events,
26 * operations, or synchronization of configuration:
27 * * Triggering a task (Thru the PPI or register write) starts the operation
28 * immediately (in HW it takes between 1/2 and 1+1/2 LFCLKs:
29 * "CLEAR and STOP and TRIGOVRFLW [..] will be delayed as long as it
30 * takes for the peripheral to clock a falling edge and a rising edge of the LFCLK.")
31 * * Events and interrupts are raised immediately once produced (in real HW
32 * they can be raised relatively at +-1/2 LFCLK or +-1/2 PCLK16M of each other)
33 * (In real HW this is due to the 32K-16M clock domain crossing synchronization)
34 * * A STOP task stops the counter immediately.
35 *
36 * * As the CLEAR task does not have delay, a SHORT of COMPARE<n>_CLEAR will cause the
37 * CLEAR to be instantaneous.
38 * While in the real HW, as per the spec:
39 * "If the COMPARE[i]_CLEAR short is enabled, the COUNTER will be cleared
40 * *one LFClk after* the COMPARE event"
41 *
42 * * Unlike in real HW reading the COUNTER register is instantaneous (in real HW
43 * it takes up to 6 PCLK16M cycles, during which the CPU is stalled.
44 *
45 * * Writing to the PRESCALER register when the RTC is running is not prevented
46 * (unlike in real HW), but doing so can have unintended consequences.
47 * Though the PRESCALER is shadowed into an internal register on the tasks
48 * START, CLEAR, and TRIGOVRFLW as per the spec.
49 *
50 * * Note, in nrf52 devices, the spec seems to confusingly state that the LFCLK
51 * clock must be ready before the RTC can be used, yet that a TRIGOVRFLW task
52 * will get the RTC to request the LFClock.
53 * But in real life (and the model) it seems no task will automatically
54 * request the clock.
55 * For nrf5340 devices, TRIGOVRFLW does not seem to request the CLOCK either,
56 * but START seems to as per the spec.
57 *
58 * * Note this model does not yet automatically request the LFCLK (for a nRF5340)
59 * with task START
60 *
61 * * This models assumes that once the LFCLK is started it is never stopped.
62 *
63 * * Note that, just like for the real RTC, the event generation logic is slightly
64 * different than for other peripherals
65 *
66 * Implementation notes:
67 * In a naive (but very simple) implementation the RTC model could be called
68 * every LFCLK tick. But this would be really slow (as we'd need to call it 32768
69 * times per second).
70 * Instead this model checks ahead, when the next compare or overflow event would
71 * trigger and sets a timer for its callback.
72 * There is one common timer exposed to the HW scheduler, and a set of internal timers
73 * per RTC instance (one per CC register, and one for the counter overflow)
74 *
75 * The RTC keeps track internally of the time with submicrosecond resolution
76 * (9 decimal bits) to have a exact representation of the LF clock ticks,
77 * and avoid accumulating rounding errors.
78 * With this current implementation, the maximum runtime of the RTC is limited to
79 * 2**64/2**9 microseconds from boot (~1142 years)
80 *
81 *
82 * Pending to implement:
83 * * TICK events
84 *
85 * * Delay of TASKs CLEAR, STOP and TRIGOVRFLW
86 *
87 * * For nrf5340: With task START, the RTC should automatically request the LFCLK source with RC oscillator if the LFCLK is not already running.
88 */
89
90 #include <string.h>
91 #include <stdbool.h>
92 #include <stdint.h>
93 #include "bs_tracing.h"
94 #include "bs_oswrap.h"
95 #include "nsi_hw_scheduler.h"
96 #include "nsi_tasks.h"
97 #include "nsi_hws_models_if.h"
98 #include "NHW_common_types.h"
99 #include "NHW_config.h"
100 #include "NHW_peri_types.h"
101 #include "NHW_xPPI.h"
102 #include "NHW_CLOCK.h"
103 #include "irq_ctrl.h"
104 #include "NHW_RTC.h"
105
106 #define RTC_COUNTER_MASK 0xFFFFFF /*24 bits*/
107 #define RTC_TRIGGER_OVERFLOW_COUNTER_VALUE 0xFFFFF0
108
109 #define SUB_US_BITS 9 // Bits representing sub-microsecond units
110
111 #define LF_CLOCK_PERIOD_subus 15625 /*in a fixed point format with 9 bits per us, the LF clock period*/
112 #define LF_CLOCK_PERIOD_us 31 /* the LF clock period in us ceil(1e6/32768) = ceil(30.517578125us) */
113
114 struct rtc_status {
115 NRF_RTC_Type *NRF_RTC_regs;
116
117 int n_CCs; //Number of compare/capture registers in this rtc instance
118
119 bs_time_t *cc_timers; //[n_CCs] When each CC will match (in microseconds)
120 bs_time_t overflow_timer; // When the timer will overflow (in microseconds)
121 uint64_t overflow_timer_sub_us; // When the timer will overflow (in sub-microsecond units)
122
123 uint64_t counter_startT_sub_us; //Time when the counter was "started" (really the time that would correspond to COUNTER = 0)
124 uint64_t counter_startT_negative_sub_us;
125
126 uint32_t counter_at_stop; //Internal counter value when the counter was *stopped*
127
128 uint32_t INTEN;
129 uint32_t PRESC;
130 bool running; // Is this RTC running/started
131
132 #if (NHW_HAS_DPPI)
133 uint dppi_map; //To which DPPI instance are this RTC subscribe&publish ports connected to
134 struct nhw_subsc_mem* subscribed_CAPTURE; //[n_CCs]
135 struct nhw_subsc_mem subscribed_START;
136 struct nhw_subsc_mem subscribed_STOP;
137 struct nhw_subsc_mem subscribed_CLEAR;
138 struct nhw_subsc_mem subscribed_TRIGOVRFLW;
139 #endif
140 };
141
142 static uint64_t first_lf_tick_time_sub_us;
143
144 static bs_time_t Timer_RTC = TIME_NEVER;
145 static struct rtc_status nhw_rtc_st[NHW_RTC_TOTAL_INST];
146 NRF_RTC_Type NRF_RTC_regs[NHW_RTC_TOTAL_INST];
147
148 static bs_time_t sub_us_time_to_us_time(uint64_t sub_us_time);
149 static uint64_t us_time_to_sub_us_time(bs_time_t us_time);
150 static void nhw_rtc_TASKS_CLEAR(uint rtc);
151 static void nhw_rtc_signal_OVERFLOW(uint rtc);
152 static void nhw_rtc_signal_COMPARE(uint rtc, uint cc);
153
nhw_rtc_init(void)154 static void nhw_rtc_init(void) {
155 #if (NHW_HAS_DPPI)
156 /* Mapping of peripheral instance to DPPI instance */
157 uint nhw_rtc_dppi_map[NHW_RTC_TOTAL_INST] = NHW_RTC_DPPI_MAP;
158 #endif
159 int RTC_n_CCs[NHW_RTC_TOTAL_INST] = NHW_RTC_N_CC;
160
161 memset(NRF_RTC_regs, 0, sizeof(NRF_RTC_regs));
162
163 for (int i = 0; i < NHW_RTC_TOTAL_INST ; i++) {
164 struct rtc_status *rtc_st = &nhw_rtc_st[i];
165
166 rtc_st->NRF_RTC_regs = &NRF_RTC_regs[i];
167 rtc_st->n_CCs = RTC_n_CCs[i];
168
169 rtc_st->counter_startT_sub_us = TIME_NEVER;
170
171 rtc_st->cc_timers = (bs_time_t *)bs_malloc(sizeof(bs_time_t)*RTC_n_CCs[i]);
172
173 for (int j = 0 ; j < rtc_st->n_CCs ; j++) {
174 rtc_st->cc_timers[j] = TIME_NEVER;
175 }
176 rtc_st->overflow_timer = TIME_NEVER;
177 rtc_st->overflow_timer_sub_us = TIME_NEVER;
178
179 #if (NHW_HAS_DPPI)
180 rtc_st->dppi_map = nhw_rtc_dppi_map[i];
181 rtc_st->subscribed_CAPTURE = (struct nhw_subsc_mem*)bs_calloc(RTC_n_CCs[i], sizeof(struct nhw_subsc_mem));
182 #endif
183 }
184 Timer_RTC = TIME_NEVER;
185 }
186
187 NSI_TASK(nhw_rtc_init, HW_INIT, 100);
188
189 /*
190 * Free all RTC instances resources before program exit
191 */
nhw_rtc_free(void)192 static void nhw_rtc_free(void)
193 {
194 for (int t = 0; t < NHW_RTC_TOTAL_INST; t++) {
195 struct rtc_status *rtc_st = &nhw_rtc_st[t];
196
197 free(rtc_st->cc_timers);
198 rtc_st->cc_timers = NULL;
199
200 #if (NHW_HAS_DPPI)
201 free(rtc_st->subscribed_CAPTURE);
202 rtc_st->subscribed_CAPTURE = NULL;
203 #endif /* (NHW_HAS_DPPI) */
204 }
205 }
206
207 NSI_TASK(nhw_rtc_free, ON_EXIT_PRE, 100);
208
209 /**
210 * Convert a time delta in sub-microseconds units to the equivalent time in microseconds.
211 * The value is always rounded UP. This is because otherwise events would be registered
212 * in a time in microseconds before the event actually occurred. This would lead to many imprecise
213 * event timings for example if the timing of an event would be calculated base on the last LF
214 * clock tick (which happens for example when triggering the CLEAR or TRIGGER_OVERFLOW tasks)
215 */
sub_us_time_to_us_time(uint64_t sub_us_time)216 static bs_time_t sub_us_time_to_us_time(uint64_t sub_us_time)
217 {
218 bs_time_t us_time = sub_us_time >> SUB_US_BITS;
219
220 if(sub_us_time % (1U << SUB_US_BITS) != 0) //rounding up
221 {
222 us_time += 1;
223 }
224
225 return us_time;
226 }
227
228 /**
229 * Convert a time delta in microseconds to the equivalent time in sub-microseconds units
230 */
us_time_to_sub_us_time(bs_time_t us_time)231 static uint64_t us_time_to_sub_us_time(bs_time_t us_time)
232 {
233 return us_time << SUB_US_BITS;
234 }
235
get_time_in_sub_us(void)236 static uint64_t get_time_in_sub_us(void)
237 {
238 bs_time_t now = nsi_hws_get_time();
239
240 if (now >= sub_us_time_to_us_time(TIME_NEVER)) {
241 bs_trace_error_time_line("Bummer, the RTC model only supports running for 1142 years\n");
242 /*If you really need this, generalize the calculation to more than 64 bits*/
243 }
244
245 return us_time_to_sub_us_time(now);
246 }
247
get_last_lf_tick_time_sub_us(void)248 static uint64_t get_last_lf_tick_time_sub_us(void) {
249 uint64_t now_sub_us = get_time_in_sub_us();
250
251 uint64_t n_lf_ticks = (now_sub_us - first_lf_tick_time_sub_us) / LF_CLOCK_PERIOD_subus; //floor()
252 uint64_t last_tick_time_sub_us = n_lf_ticks * LF_CLOCK_PERIOD_subus;
253 last_tick_time_sub_us += first_lf_tick_time_sub_us;
254
255 return last_tick_time_sub_us;
256 }
257
258 /**
259 * Convert a time delta in sub-microsecond units to the equivalent count accounting for the PRESCALER
260 * Note that the number is rounded down [floor()]
261 */
time_sub_us_to_counter(uint rtc,uint64_t delta_sub_us)262 static uint64_t time_sub_us_to_counter(uint rtc, uint64_t delta_sub_us) {
263 uint64_t ticks;
264
265 ticks = delta_sub_us / ((uint64_t)LF_CLOCK_PERIOD_subus * (nhw_rtc_st[rtc].PRESC + 1));
266 return ticks;
267 }
268
269 /**
270 * Convert a counter delta to sub-microsecond units accounting for the PRESCALER
271 */
counter_to_time_sub_us(uint rtc,uint64_t counter)272 static uint64_t counter_to_time_sub_us(uint rtc, uint64_t counter) {
273 uint64_t Elapsed;
274
275 Elapsed = counter * (uint64_t)LF_CLOCK_PERIOD_subus * (nhw_rtc_st[rtc].PRESC + 1);
276
277 return Elapsed;
278 }
279
280 /**
281 * Return the time in sub-microsecond units it takes for the COUNTER to do 1 wrap
282 */
time_of_1_counter_wrap_sub_us(uint rtc)283 static uint64_t time_of_1_counter_wrap_sub_us(uint rtc) {
284 return counter_to_time_sub_us(rtc, (uint64_t)RTC_COUNTER_MASK + 1);
285 }
286
287 /*
288 * Return the *next* time (in us) when the RTC counter will reach <counter_match>
289 */
get_counter_match_time(uint rtc,uint64_t counter_match,uint64_t * next_match_sub_us)290 static bs_time_t get_counter_match_time(uint rtc, uint64_t counter_match, uint64_t* next_match_sub_us)
291 {
292 struct rtc_status *this = &nhw_rtc_st[rtc];
293
294 bs_time_t next_match_us = TIME_NEVER;
295 *next_match_sub_us = TIME_NEVER;
296
297 if (this->running == true) {
298 uint64_t now_sub_us = get_time_in_sub_us();
299 uint64_t counter_match_sub_us = counter_to_time_sub_us(rtc, counter_match);
300
301 if(this->counter_startT_sub_us > 0)
302 {
303 *next_match_sub_us = this->counter_startT_sub_us
304 + counter_match_sub_us;
305 }
306 else if (counter_match_sub_us > this->counter_startT_negative_sub_us)
307 {
308 *next_match_sub_us = counter_match_sub_us - this->counter_startT_negative_sub_us;
309 }
310 else
311 {
312 *next_match_sub_us = time_of_1_counter_wrap_sub_us(rtc)
313 + counter_match_sub_us - this->counter_startT_negative_sub_us;
314 }
315
316 while(*next_match_sub_us <= now_sub_us)
317 {
318 *next_match_sub_us += time_of_1_counter_wrap_sub_us(rtc);
319 }
320
321 next_match_us = sub_us_time_to_us_time(*next_match_sub_us);
322 }
323
324 return next_match_us;
325 }
326
nhw_rtc_update_master_timer(void)327 static void nhw_rtc_update_master_timer(void) {
328 Timer_RTC = TIME_NEVER;
329 for (int rtc = 0; rtc < NHW_RTC_TOTAL_INST ; rtc++) {
330 struct rtc_status *this = &nhw_rtc_st[rtc];
331
332 if (this->running == false) {
333 continue;
334 }
335 for (int cc = 0 ; cc < this->n_CCs ; cc++) {
336 if (this->cc_timers[cc] < Timer_RTC) {
337 Timer_RTC = this->cc_timers[cc];
338 }
339 }
340
341 if (this->overflow_timer < Timer_RTC) {
342 Timer_RTC = this->overflow_timer;
343 }
344 }
345 nsi_hws_find_next_event();
346 }
347
348 /**
349 * Save in cc_timers[cc] the *next* time when this RTC will match the
350 * CC[cc] register
351 */
update_cc_timer(uint rtc,uint cc)352 static void update_cc_timer(uint rtc, uint cc) {
353 uint64_t match_sub_us; // Only to comply to the interface
354 nhw_rtc_st[rtc].cc_timers[cc] = get_counter_match_time(rtc, NRF_RTC_regs[rtc].CC[cc], &match_sub_us);
355 }
356
357 /*
358 * Update all cc_timers[*] for a RTC instance
359 * to the *next* time when they will match
360 */
update_all_cc_timers(uint rtc)361 static void update_all_cc_timers(uint rtc) {
362 for (int cc = 0 ; cc < nhw_rtc_st[rtc].n_CCs; cc++) {
363 update_cc_timer(rtc, cc);
364 }
365 }
366
update_overflow_timer(uint rtc)367 static void update_overflow_timer(uint rtc) {
368 struct rtc_status *this = &nhw_rtc_st[rtc];
369 this->overflow_timer = get_counter_match_time(rtc, RTC_COUNTER_MASK + 1, &this->overflow_timer_sub_us);
370 }
371
update_timers(int rtc)372 static void update_timers(int rtc)
373 {
374 update_all_cc_timers(rtc);
375 update_overflow_timer(rtc);
376 nhw_rtc_update_master_timer();
377 }
378
379 /**
380 * Sets the internal state of the counter like if the counter was just set to the specified value.
381 * This is done by setting the "counter start time"
382 * (counter_startT*) to an appropriate value, so that the time elapsed from the counter start
383 * corresponds to the given counter value. Such virtual "counter start time" can be negative.
384 */
nhw_rtc_set_counter(uint rtc,uint64_t counter_val)385 static void nhw_rtc_set_counter(uint rtc, uint64_t counter_val)
386 {
387 struct rtc_status *this = &nhw_rtc_st[rtc];
388
389 counter_val &= RTC_COUNTER_MASK;
390 uint64_t counter_val_sub_us = counter_to_time_sub_us(rtc, counter_val);
391
392 // All the functions which use this reset the <PRESC>, so it is like the counter was set
393 // on the last LF clock tick
394 uint64_t last_lf_tick_sub_us = get_last_lf_tick_time_sub_us();
395
396 if(last_lf_tick_sub_us >= counter_val_sub_us)
397 {
398 this->counter_startT_sub_us = last_lf_tick_sub_us - counter_val_sub_us;
399 this->counter_startT_negative_sub_us = 0;
400 }
401 else
402 {
403 this->counter_startT_sub_us = 0;
404 this->counter_startT_negative_sub_us = counter_val_sub_us - last_lf_tick_sub_us;
405 }
406
407 NRF_RTC_regs[rtc].COUNTER = counter_val;
408
409 update_timers(rtc);
410 }
411
handle_overflow_event(uint rtc)412 static void handle_overflow_event(uint rtc)
413 {
414 struct rtc_status *this = &nhw_rtc_st[rtc];
415
416 // The real time (in sub-microsecond units, not in microseconds)
417 // in which the current overflow event occurs.
418 // update_overflow_timer will overwrite overflow_timer_sub_us[rtc]
419 uint64_t current_overflow_event_sub_us = this->overflow_timer_sub_us;
420
421 update_overflow_timer(rtc); //Next time it will overflow
422
423 bs_trace_raw_time(8, "RTC%i: Timer overflow\n", rtc);
424
425 this->counter_startT_sub_us = current_overflow_event_sub_us;
426 this->counter_startT_negative_sub_us = 0;
427
428 nhw_rtc_signal_OVERFLOW(rtc);
429 }
430
nhw_rtc_timer_triggered(void)431 static void nhw_rtc_timer_triggered(void) {
432 for (int rtc = 0; rtc < NHW_RTC_TOTAL_INST ; rtc++) {
433 struct rtc_status *rtc_el = &nhw_rtc_st[rtc];
434 if (rtc_el->running == false) {
435 continue;
436 }
437
438 for (int cc = 0 ; cc < rtc_el->n_CCs ; cc++) {
439 if (rtc_el->cc_timers[cc] == Timer_RTC ){ //This CC is matching now
440 update_cc_timer(rtc, cc); //Next time it will match
441 nhw_rtc_signal_COMPARE(rtc, cc);
442 }
443 }
444
445 if (rtc_el->overflow_timer == Timer_RTC) { //Overflow occurred now
446 handle_overflow_event(rtc); // this must always be the last event, as it might update counter_startT_sub_us
447 }
448
449 }
450 nhw_rtc_update_master_timer();
451 }
452
453 NSI_HW_EVENT(Timer_RTC, nhw_rtc_timer_triggered, 50);
454
455 /**
456 * Check if an EVTEN or INTEN has the tick event set
457 */
check_not_supported_TICK(uint32_t i)458 static void check_not_supported_TICK(uint32_t i) {
459 if (i & RTC_EVTEN_TICK_Msk) {
460 bs_trace_warning_line_time("RTC: The TICK functionality is not modelled\n");
461 }
462 }
463
nhw_rtc_notify_first_lf_tick(void)464 void nhw_rtc_notify_first_lf_tick(void) {
465 first_lf_tick_time_sub_us = get_time_in_sub_us();
466 bs_trace_raw_time(9, "RTC: First lf tick\n");
467 }
468
469 /*
470 * Update the counter register so it can be read by SW
471 */
nhw_rtc_update_COUNTER(uint rtc)472 void nhw_rtc_update_COUNTER(uint rtc) {
473 struct rtc_status *this = &nhw_rtc_st[rtc];
474 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
475
476 if (this->running == true) {
477 uint64_t count;
478 count = time_sub_us_to_counter(rtc,
479 get_time_in_sub_us() - this->counter_startT_sub_us
480 + this->counter_startT_negative_sub_us);
481 RTC_regs->COUNTER = count & RTC_COUNTER_MASK;
482 } else {
483 RTC_regs->COUNTER = this->counter_at_stop & RTC_COUNTER_MASK;
484 }
485 }
486
487 /**
488 * TASK_START triggered handler
489 */
nhw_rtc_TASKS_START(uint rtc)490 static void nhw_rtc_TASKS_START(uint rtc) {
491 struct rtc_status *this = &nhw_rtc_st[rtc];
492
493 if (this->running == true) {
494 return;
495 }
496 bs_trace_raw_time(5, "RTC%i: TASK_START\n", rtc);
497 this->running = true;
498
499 /* Pre-scaler value is latched to an internal register on tasks START, CLEAR, and TRIGOVRFLW */
500 this->PRESC = NRF_RTC_regs[rtc].PRESCALER;
501
502 //If the counter is not zero at start, is like if the counter was started earlier
503 nhw_rtc_set_counter(rtc, this->counter_at_stop);
504 }
505
506 /**
507 * TASK_STOP triggered handler
508 */
nhw_rtc_TASKS_STOP(uint rtc)509 static void nhw_rtc_TASKS_STOP(uint rtc) {
510 struct rtc_status *this = &nhw_rtc_st[rtc];
511
512 if (this->running == false) {
513 return;
514 }
515 bs_trace_raw_time(5, "RTC%i: TASK_STOP\n", rtc);
516 this->running = false;
517 this->counter_at_stop = time_sub_us_to_counter(rtc,
518 get_time_in_sub_us() - this->counter_startT_sub_us
519 + this->counter_startT_negative_sub_us); //we save the value when the counter was stoped in case it is started again without clearing it
520 this->counter_at_stop &= RTC_COUNTER_MASK;
521 NRF_RTC_regs[rtc].COUNTER = this->counter_at_stop;
522 for (int cc = 0 ; cc < this->n_CCs ; cc++) {
523 this->cc_timers[cc] = TIME_NEVER;
524 }
525 this->overflow_timer = TIME_NEVER;
526 nhw_rtc_update_master_timer();
527 }
528
529 /**
530 * TASK_CLEAR triggered handler
531 */
nhw_rtc_TASKS_CLEAR(uint rtc)532 static void nhw_rtc_TASKS_CLEAR(uint rtc) {
533 bs_trace_raw_time(5, "RTC%i: TASK_CLEAR\n", rtc);
534
535 /* Pre-scaler value is latched to an internal register on tasks START, CLEAR, and TRIGOVRFLW */
536 nhw_rtc_st[rtc].PRESC = NRF_RTC_regs[rtc].PRESCALER;
537 nhw_rtc_st[rtc].counter_at_stop = 0;
538 nhw_rtc_set_counter(rtc, 0);
539 }
540
541 /**
542 * TASK_TRIGGER_OVERFLOW triggered handler
543 */
nhw_rtc_TASKS_TRIGOVRFLW(uint rtc)544 static void nhw_rtc_TASKS_TRIGOVRFLW(uint rtc) {
545
546 bs_trace_raw_time(5, "RTC%i: TASK_TRIGGER_OVERFLOW\n", rtc);
547
548 /* Pre-scaler value is latched to an internal register on tasks START, CLEAR, and TRIGOVRFLW */
549 nhw_rtc_st[rtc].PRESC = NRF_RTC_regs[rtc].PRESCALER;
550 nhw_rtc_set_counter(rtc, RTC_TRIGGER_OVERFLOW_COUNTER_VALUE);
551 }
552
553 #if (NHW_RTC_HAS_CAPTURE)
nhw_rtc_TASKS_CAPTURE(uint rtc,uint cc_n)554 static void nhw_rtc_TASKS_CAPTURE(uint rtc, uint cc_n) {
555 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
556
557 nhw_rtc_update_COUNTER(rtc);
558 RTC_regs->CC[cc_n] = RTC_regs->COUNTER;
559
560 nhw_rtc_regw_sideeffects_CC(rtc, cc_n);
561 }
562 #endif /* NHW_RTC_HAS_CAPTURE */
563
nhw_rtc_eval_interrupts(uint rtc)564 static void nhw_rtc_eval_interrupts(uint rtc) {
565 /* Mapping of peripheral instance to {int controller instance, int number} */
566 static struct nhw_irq_mapping nhw_rtc_irq_map[NHW_RTC_TOTAL_INST] = NHW_RTC_INT_MAP;
567 static bool RTC_int_line[NHW_RTC_TOTAL_INST]; /* Is the RTC currently driving its interrupt line high */
568
569 struct rtc_status *this = &nhw_rtc_st[rtc];
570 bool new_int_line = false;
571
572 for (int cc = 0; cc < this->n_CCs; cc++) {
573 int mask = this->INTEN & (RTC_INTENSET_COMPARE0_Msk << cc);
574 if (NRF_RTC_regs[rtc].EVENTS_COMPARE[cc] && mask) {
575 new_int_line = true;
576 break; /* No need to check more */
577 }
578 }
579
580 if (NRF_RTC_regs[rtc].EVENTS_TICK && (this->INTEN & RTC_INTENSET_TICK_Msk)) {
581 new_int_line = true;
582 }
583 if (NRF_RTC_regs[rtc].EVENTS_OVRFLW && (this->INTEN & RTC_INTENSET_OVRFLW_Msk)) {
584 new_int_line = true;
585 }
586
587 if (RTC_int_line[rtc] == false && new_int_line == true) {
588 RTC_int_line[rtc] = true;
589 hw_irq_ctrl_raise_level_irq_line(nhw_rtc_irq_map[rtc].cntl_inst,
590 nhw_rtc_irq_map[rtc].int_nbr);
591 } else if (RTC_int_line[rtc] == true && new_int_line == false) {
592 RTC_int_line[rtc] = false;
593
594 hw_irq_ctrl_lower_level_irq_line(nhw_rtc_irq_map[rtc].cntl_inst,
595 nhw_rtc_irq_map[rtc].int_nbr);
596 }
597 }
598
nhw_rtc_signal_COMPARE(uint rtc,uint cc)599 static void nhw_rtc_signal_COMPARE(uint rtc, uint cc)
600 {
601 struct rtc_status *this = &nhw_rtc_st[rtc];
602 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
603
604 #if (NHW_RTC_HAS_SHORT_COMP_CLEAR)
605 if (RTC_regs->SHORTS & (RTC_SHORTS_COMPARE0_CLEAR_Msk << cc)) {
606 nhw_rtc_TASKS_CLEAR(rtc);
607 bs_trace_warning_line_time("RTC: COMPARE->CLEAR short used, but CLEAR is instantaneous."
608 "If you are using this to generate a periodic interrupt, the period"
609 "will be 1 count too short\n");
610 }
611 #endif /* NHW_RTC_HAS_SHORT_COMP_CLEAR */
612
613 uint32_t mask = RTC_EVTEN_COMPARE0_Msk << cc;
614
615 if (!((RTC_regs->EVTEN | this->INTEN) & mask)) {
616 return;
617 }
618
619 RTC_regs->EVENTS_COMPARE[cc] = 1;
620
621 if (RTC_regs->EVTEN & mask) {
622 #if (NHW_HAS_PPI)
623 ppi_event_types_t event = RTC0_EVENTS_COMPARE_0;
624 switch (rtc){
625 case 0:
626 event = RTC0_EVENTS_COMPARE_0;
627 break;
628 case 1:
629 event = RTC1_EVENTS_COMPARE_0;
630 break;
631 case 2:
632 event = RTC2_EVENTS_COMPARE_0;
633 break;
634 }
635 event += cc;
636 nrf_ppi_event(event);
637 #elif (NHW_HAS_DPPI)
638 nhw_dppi_event_signal_if(this->dppi_map,
639 RTC_regs->PUBLISH_COMPARE[cc]);
640 #endif
641 }
642
643 nhw_rtc_eval_interrupts(rtc);
644 }
645
nhw_rtc_signal_OVERFLOW(uint rtc)646 static void nhw_rtc_signal_OVERFLOW(uint rtc)
647 {
648 struct rtc_status *this = &nhw_rtc_st[rtc];
649 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
650
651 if (!((RTC_regs->EVTEN | this->INTEN) & RTC_EVTEN_OVRFLW_Msk)) {
652 return;
653 }
654
655 RTC_regs->EVENTS_OVRFLW = 1;
656
657 if (RTC_regs->EVTEN & RTC_EVTEN_OVRFLW_Msk) {
658 #if (NHW_HAS_PPI)
659 ppi_event_types_t event = RTC0_EVENTS_OVRFLW;
660 switch (rtc){
661 case 0:
662 event = RTC0_EVENTS_OVRFLW;
663 break;
664 case 1:
665 event = RTC1_EVENTS_OVRFLW;
666 break;
667 case 2:
668 event = RTC2_EVENTS_OVRFLW;
669 break;
670 }
671 nrf_ppi_event(event);
672 #elif (NHW_HAS_DPPI)
673 nhw_dppi_event_signal_if(this->dppi_map,
674 RTC_regs->PUBLISH_OVRFLW);
675 #endif
676 }
677
678 nhw_rtc_eval_interrupts(rtc);
679 }
680
681
nhw_rtc_signal_TICK(uint rtc)682 /*static*/ void nhw_rtc_signal_TICK(uint rtc) /*Not yet used, as all TICK functionality is not yet implemented */
683 {
684 struct rtc_status *this = &nhw_rtc_st[rtc];
685 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
686
687 if (!((RTC_regs->EVTEN | this->INTEN) & RTC_EVTEN_TICK_Msk)) {
688 return;
689 }
690
691 RTC_regs->EVENTS_TICK = 1;
692
693 if (RTC_regs->EVTEN & RTC_EVTEN_TICK_Msk) {
694 #if (NHW_HAS_PPI)
695 ppi_event_types_t event = RTC0_EVENTS_TICK;
696 switch (rtc){
697 case 0:
698 event = RTC0_EVENTS_TICK;
699 break;
700 case 1:
701 event = RTC1_EVENTS_TICK;
702 break;
703 case 2:
704 event = RTC2_EVENTS_TICK;
705 break;
706 }
707 nrf_ppi_event(event);
708 #elif (NHW_HAS_DPPI)
709 nhw_dppi_event_signal_if(this->dppi_map,
710 RTC_regs->PUBLISH_TICK);
711 #endif
712 }
713
714 nhw_rtc_eval_interrupts(rtc);
715 }
716
nhw_rtc_regw_sideeffect_TASKS_START(uint i)717 void nhw_rtc_regw_sideeffect_TASKS_START(uint i) {
718 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
719
720 if (RTC_regs->TASKS_START) {
721 RTC_regs->TASKS_START = 0;
722 nhw_rtc_TASKS_START(i);
723 }
724 }
725
nhw_rtc_regw_sideeffect_TASKS_STOP(uint i)726 void nhw_rtc_regw_sideeffect_TASKS_STOP(uint i) {
727 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
728
729 if (RTC_regs->TASKS_STOP) {
730 RTC_regs->TASKS_STOP = 0;
731 nhw_rtc_TASKS_STOP(i);
732 }
733 }
734
nhw_rtc_regw_sideeffect_TASKS_CLEAR(uint i)735 void nhw_rtc_regw_sideeffect_TASKS_CLEAR(uint i) {
736 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
737
738 if (RTC_regs->TASKS_CLEAR) {
739 RTC_regs->TASKS_CLEAR = 0;
740 nhw_rtc_TASKS_CLEAR(i);
741 }
742 }
743
nhw_rtc_regw_sideeffect_TASKS_TRIGOVRFLW(uint i)744 void nhw_rtc_regw_sideeffect_TASKS_TRIGOVRFLW(uint i) {
745 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
746
747 if (RTC_regs->TASKS_TRIGOVRFLW) {
748 RTC_regs->TASKS_TRIGOVRFLW = 0;
749 nhw_rtc_TASKS_TRIGOVRFLW(i);
750 }
751 }
752
753 #if (NHW_RTC_HAS_CAPTURE)
nhw_rtc_regw_sideeffect_TASKS_CAPTURE(uint i,uint cc)754 void nhw_rtc_regw_sideeffect_TASKS_CAPTURE(uint i, uint cc) {
755 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
756
757 if (RTC_regs->TASKS_CAPTURE[cc]) {
758 RTC_regs->TASKS_CAPTURE[cc] = 0;
759 nhw_rtc_TASKS_CAPTURE(i, cc);
760 }
761 }
762 #endif /* NHW_RTC_HAS_CAPTURE */
763
764 #if (NHW_HAS_DPPI)
765
nhw_rtc_taskcapture_wrap(void * param)766 static void nhw_rtc_taskcapture_wrap(void* param) {
767 unsigned int inst = (uintptr_t)param >> 16;
768 uint cc_n = (uintptr_t)param & 0xFFFF;
769 nhw_rtc_TASKS_CAPTURE(inst, cc_n);
770 }
771
nhw_rtc_regw_sideeffects_SUBSCRIBE_CAPTURE(uint inst,uint cc_n)772 void nhw_rtc_regw_sideeffects_SUBSCRIBE_CAPTURE(uint inst, uint cc_n) {
773 struct rtc_status *this = &nhw_rtc_st[inst];
774
775 nhw_dppi_common_subscribe_sideeffect(this->dppi_map,
776 this->NRF_RTC_regs->SUBSCRIBE_CAPTURE[cc_n],
777 &this->subscribed_CAPTURE[cc_n],
778 nhw_rtc_taskcapture_wrap,
779 (void*)((inst << 16) + cc_n));
780 }
781
782 #define NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(TASK_N) \
783 static void nhw_rtc_task##TASK_N##_wrap(void* param) \
784 { \
785 nhw_rtc_TASKS_##TASK_N((int) param); \
786 } \
787 \
788 void nhw_rtc_regw_sideeffects_SUBSCRIBE_##TASK_N(uint inst) \
789 { \
790 struct rtc_status *this = &nhw_rtc_st[inst]; \
791 \
792 nhw_dppi_common_subscribe_sideeffect(this->dppi_map, \
793 this->NRF_RTC_regs->SUBSCRIBE_##TASK_N, \
794 &this->subscribed_##TASK_N, \
795 nhw_rtc_task##TASK_N##_wrap, \
796 (void*) inst); \
797 }
798
799 NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(START)
NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(STOP)800 NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(STOP)
801 NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(CLEAR)
802 NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(TRIGOVRFLW)
803
804 #endif /* NHW_HAS_DPPI */
805
806 void nhw_rtc_regw_sideeffect_INTENSET(uint rtc)
807 {
808 struct rtc_status *this = &nhw_rtc_st[rtc];
809 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
810
811 if ( RTC_regs->INTENSET ){
812 this->INTEN |= RTC_regs->INTENSET;
813 RTC_regs->INTENSET = this->INTEN;
814
815 check_not_supported_TICK(this->INTEN);
816 nhw_rtc_eval_interrupts(rtc);
817 }
818 }
819
nhw_rtc_regw_sideeffect_INTENCLR(uint rtc)820 void nhw_rtc_regw_sideeffect_INTENCLR(uint rtc)
821 {
822 struct rtc_status *this = &nhw_rtc_st[rtc];
823 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
824
825 if ( RTC_regs->INTENCLR ){
826 this->INTEN &= ~RTC_regs->INTENCLR;
827 RTC_regs->INTENSET = this->INTEN;
828 RTC_regs->INTENCLR = 0;
829
830 nhw_rtc_eval_interrupts(rtc);
831 }
832 }
833
nhw_rtc_regw_sideeffect_EVTENSET(uint i)834 void nhw_rtc_regw_sideeffect_EVTENSET(uint i) {
835 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
836
837 if ( RTC_regs->EVTENSET ){
838 RTC_regs->EVTEN |= RTC_regs->EVTENSET;
839 RTC_regs->EVTENSET = RTC_regs->EVTEN;
840 check_not_supported_TICK(RTC_regs->EVTEN);
841 }
842 }
843
nhw_rtc_regw_sideeffect_EVTENCLR(uint i)844 void nhw_rtc_regw_sideeffect_EVTENCLR(uint i) {
845 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
846
847 if ( RTC_regs->EVTENCLR ){
848 RTC_regs->EVTEN &= ~RTC_regs->EVTENCLR;
849 RTC_regs->EVTENSET = RTC_regs->EVTEN;
850 RTC_regs->EVTENCLR = 0;
851 }
852 }
853
nhw_rtc_regw_sideeffects_EVENTS_all(uint rtc)854 void nhw_rtc_regw_sideeffects_EVENTS_all(uint rtc) {
855 nhw_rtc_eval_interrupts(rtc);
856 }
857
nhw_rtc_regw_sideeffects_CC(uint rtc,uint cc_n)858 void nhw_rtc_regw_sideeffects_CC(uint rtc, uint cc_n) {
859 struct rtc_status *this = &nhw_rtc_st[rtc];
860
861 if (this->running == true) {
862 update_cc_timer(rtc, cc_n);
863 nhw_rtc_update_master_timer();
864 }
865 }
866
867 #if (NHW_HAS_PPI)
nhw_rtc0_TASKS_START(void)868 void nhw_rtc0_TASKS_START(void) { nhw_rtc_TASKS_START(0); }
nhw_rtc0_TASKS_STOP(void)869 void nhw_rtc0_TASKS_STOP(void) { nhw_rtc_TASKS_STOP(0); }
nhw_rtc0_TASKS_CLEAR(void)870 void nhw_rtc0_TASKS_CLEAR(void) { nhw_rtc_TASKS_CLEAR(0); }
nhw_rtc0_TASKS_TRIGOVRFLW(void)871 void nhw_rtc0_TASKS_TRIGOVRFLW(void) { nhw_rtc_TASKS_TRIGOVRFLW(0); }
nhw_rtc1_TASKS_START(void)872 void nhw_rtc1_TASKS_START(void) { nhw_rtc_TASKS_START(1); }
nhw_rtc1_TASKS_STOP(void)873 void nhw_rtc1_TASKS_STOP(void) { nhw_rtc_TASKS_STOP(1); }
nhw_rtc1_TASKS_CLEAR(void)874 void nhw_rtc1_TASKS_CLEAR(void) { nhw_rtc_TASKS_CLEAR(1); }
nhw_rtc1_TASKS_TRIGOVRFLW(void)875 void nhw_rtc1_TASKS_TRIGOVRFLW(void) { nhw_rtc_TASKS_TRIGOVRFLW(1); }
nhw_rtc2_TASKS_START(void)876 void nhw_rtc2_TASKS_START(void) { nhw_rtc_TASKS_START(2); }
nhw_rtc2_TASKS_STOP(void)877 void nhw_rtc2_TASKS_STOP(void) { nhw_rtc_TASKS_STOP(2); }
nhw_rtc2_TASKS_CLEAR(void)878 void nhw_rtc2_TASKS_CLEAR(void) { nhw_rtc_TASKS_CLEAR(2); }
nhw_rtc2_TASKS_TRIGOVRFLW(void)879 void nhw_rtc2_TASKS_TRIGOVRFLW(void) { nhw_rtc_TASKS_TRIGOVRFLW(2); }
880 #endif /* NHW_HAS_PPI */
881