1 /*
2 * Copyright (c) 2017 Oticon A/S
3 * Copyright (c) 2023 Nordic Semiconductor ASA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /*
9 * RTC - Real-time counter
10 *
11 * https://infocenter.nordicsemi.com/topic/ps_nrf52833/rtc.html?cp=5_1_0_5_19
12 * https://infocenter.nordicsemi.com/topic/ps_nrf5340/rtc.html?cp=4_0_0_6_27
13 */
14
15 /*
16 * This file provides the implementation of the RTC peripherals,
17 * and instantiates N of them, as described in the configuration (NHW_config.h)
18 *
19 * Notes:
20 *
21 * * TICK events are NOT modeled
22 *
23 * * The COUNTER register is only updated when read with the proper HAL function
24 *
25 * * Unlike the real HW, there is no jitter or (variable) delay in the tasks/events,
26 * operations, or synchronization of configuration:
27 * * Triggering a task (Thru the PPI or register write) starts the operation
28 * immediately (in HW it takes between 1/2 and 1+1/2 LFCLKs:
29 * "CLEAR and STOP and TRIGOVRFLW [..] will be delayed as long as it
30 * takes for the peripheral to clock a falling edge and a rising edge of the LFCLK.")
31 * * Events and interrupts are raised immediately once produced (in real HW
32 * they can be raised relatively at +-1/2 LFCLK or +-1/2 PCLK16M of each other)
33 * (In real HW this is due to the 32K-16M clock domain crossing synchronization)
34 * * A STOP task stops the counter immediately.
35 *
36 * * As the CLEAR task does not have delay, a SHORT of COMPARE<n>_CLEAR will cause the
37 * CLEAR to be instantaneous.
38 * While in the real HW, as per the spec:
39 * "If the COMPARE[i]_CLEAR short is enabled, the COUNTER will be cleared
40 * *one LFClk after* the COMPARE event"
41 *
42 * * Unlike in real HW reading the COUNTER register is instantaneous (in real HW
43 * it takes up to 6 PCLK16M cycles, during which the CPU is stalled.
44 *
45 * * Writing to the PRESCALER register when the RTC is running is not prevented
46 * (unlike in real HW), but doing so can have unintended consequences.
47 * Though the PRESCALER is shadowed into an internal register on the tasks
48 * START, CLEAR, and TRIGOVRFLW as per the spec.
49 *
50 * * Note, in nrf52 devices, the spec seems to confusingly state that the LFCLK
51 * clock must be ready before the RTC can be used, yet that a TRIGOVRFLW task
52 * will get the RTC to request the LFClock.
53 * But in real life (and the model) it seems no task will automatically
54 * request the clock.
55 * For nrf5340 devices, TRIGOVRFLW does not seem to request the CLOCK either,
56 * but START seems to as per the spec.
57 *
58 * * Note this model does not yet automatically request the LFCLK (for a nRF5340)
59 * with task START
60 *
61 * * This models assumes that once the LFCLK is started it is never stopped.
62 *
63 * * Note that, just like for the real RTC, the event generation logic is slightly
64 * different than for other peripherals
65 *
66 * Implementation notes:
67 * In a naive (but very simple) implementation the RTC model could be called
68 * every LFCLK tick. But this would be really slow (as we'd need to call it 32768
69 * times per second).
70 * Instead this model checks ahead, when the next compare or overflow event would
71 * trigger and sets a timer for its callback.
72 * There is one common timer exposed to the HW scheduler, and a set of internal timers
73 * per RTC instance (one per CC register, and one for the counter overflow)
74 *
75 * The RTC keeps track internally of the time with submicrosecond resolution
76 * (9 decimal bits) to have a exact representation of the LF clock ticks,
77 * and avoid accumulating rounding errors.
78 * With this current implementation, the maximum runtime of the RTC is limited to
79 * 2**64/2**9 microseconds from boot (~1142 years)
80 *
81 *
82 * Pending to implement:
83 * * TICK events
84 *
85 * * Delay of TASKs CLEAR, STOP and TRIGOVRFLW
86 *
87 * * For nrf5340: With task START, the RTC should automatically request the LFCLK source with RC oscillator if the LFCLK is not already running.
88 */
89
90 #include <string.h>
91 #include <stdbool.h>
92 #include <stdint.h>
93 #include "bs_tracing.h"
94 #include "bs_oswrap.h"
95 #include "nsi_hw_scheduler.h"
96 #include "nsi_tasks.h"
97 #include "nsi_hws_models_if.h"
98 #include "NHW_common_types.h"
99 #include "NHW_config.h"
100 #include "NHW_peri_types.h"
101 #include "NHW_xPPI.h"
102 #include "NHW_CLOCK.h"
103 #include "irq_ctrl.h"
104 #include "NHW_RTC.h"
105
106 #define RTC_COUNTER_MASK 0xFFFFFF /*24 bits*/
107 #define RTC_TRIGGER_OVERFLOW_COUNTER_VALUE 0xFFFFF0
108
109 #define SUB_US_BITS 9 // Bits representing sub-microsecond units
110
111 #define LF_CLOCK_PERIOD_subus 15625 /*in a fixed point format with 9 bits per us, the LF clock period*/
112 #define LF_CLOCK_PERIOD_us 31 /* the LF clock period in us ceil(1e6/32768) = ceil(30.517578125us) */
113
114 struct rtc_status {
115 NRF_RTC_Type *NRF_RTC_regs;
116
117 int n_CCs; //Number of compare/capture registers in this rtc instance
118
119 bs_time_t *cc_timers; //[n_CCs] When each CC will match (in microseconds)
120 bs_time_t overflow_timer; // When the timer will overflow (in microseconds)
121 uint64_t overflow_timer_sub_us; // When the timer will overflow (in sub-microsecond units)
122
123 uint64_t counter_startT_sub_us; //Time when the counter was "started" (really the time that would correspond to COUNTER = 0)
124 uint64_t counter_startT_negative_sub_us;
125
126 uint32_t counter_at_stop; //Internal counter value when the counter was *stopped*
127
128 uint32_t INTEN;
129 uint32_t PRESC;
130 bool running; // Is this RTC running/started
131
132 #if (NHW_HAS_DPPI)
133 uint dppi_map; //To which DPPI instance are this RTC subscribe&publish ports connected to
134 struct nhw_subsc_mem* subscribed_CAPTURE; //[n_CCs]
135 struct nhw_subsc_mem subscribed_START;
136 struct nhw_subsc_mem subscribed_STOP;
137 struct nhw_subsc_mem subscribed_CLEAR;
138 struct nhw_subsc_mem subscribed_TRIGOVRFLW;
139 #endif
140 };
141
142 static uint64_t first_lf_tick_time_sub_us;
143
144 static bs_time_t Timer_RTC = TIME_NEVER;
145 static struct rtc_status nhw_rtc_st[NHW_RTC_TOTAL_INST];
146 NRF_RTC_Type NRF_RTC_regs[NHW_RTC_TOTAL_INST];
147
148 static bs_time_t sub_us_time_to_us_time(uint64_t sub_us_time);
149 static uint64_t us_time_to_sub_us_time(bs_time_t us_time);
150 static void nhw_rtc_TASKS_CLEAR(uint rtc);
151 static void nhw_rtc_signal_OVERFLOW(uint rtc);
152 static void nhw_rtc_signal_COMPARE(uint rtc, uint cc);
153
nhw_rtc_init(void)154 static void nhw_rtc_init(void) {
155 #if (NHW_HAS_DPPI)
156 /* Mapping of peripheral instance to DPPI instance */
157 uint nhw_rtc_dppi_map[NHW_RTC_TOTAL_INST] = NHW_RTC_DPPI_MAP;
158 #endif
159 int RTC_n_CCs[NHW_RTC_TOTAL_INST] = NHW_RTC_N_CC;
160
161 memset(NRF_RTC_regs, 0, sizeof(NRF_RTC_regs));
162
163 for (int i = 0; i < NHW_RTC_TOTAL_INST ; i++) {
164 struct rtc_status *rtc_st = &nhw_rtc_st[i];
165
166 rtc_st->NRF_RTC_regs = &NRF_RTC_regs[i];
167 rtc_st->n_CCs = RTC_n_CCs[i];
168
169 rtc_st->counter_startT_sub_us = TIME_NEVER;
170
171 rtc_st->cc_timers = (bs_time_t *)bs_malloc(sizeof(bs_time_t)*RTC_n_CCs[i]);
172
173 for (int j = 0 ; j < rtc_st->n_CCs ; j++) {
174 rtc_st->cc_timers[j] = TIME_NEVER;
175 }
176 rtc_st->overflow_timer = TIME_NEVER;
177 rtc_st->overflow_timer_sub_us = TIME_NEVER;
178
179 #if (NHW_HAS_DPPI)
180 rtc_st->dppi_map = nhw_rtc_dppi_map[i];
181 rtc_st->subscribed_CAPTURE = (struct nhw_subsc_mem*)bs_calloc(RTC_n_CCs[i], sizeof(struct nhw_subsc_mem));
182 #endif
183 }
184 Timer_RTC = TIME_NEVER;
185 }
186
187 NSI_TASK(nhw_rtc_init, HW_INIT, 100);
188
189 /*
190 * Free all RTC instances resources before program exit
191 */
nhw_rtc_free(void)192 static void nhw_rtc_free(void)
193 {
194 for (int t = 0; t < NHW_RTC_TOTAL_INST; t++) {
195 struct rtc_status *rtc_st = &nhw_rtc_st[t];
196
197 free(rtc_st->cc_timers);
198 rtc_st->cc_timers = NULL;
199
200 #if (NHW_HAS_DPPI)
201 free(rtc_st->subscribed_CAPTURE);
202 rtc_st->subscribed_CAPTURE = NULL;
203 #endif /* (NHW_HAS_DPPI) */
204 }
205 }
206
207 NSI_TASK(nhw_rtc_free, ON_EXIT_PRE, 100);
208
209 /**
210 * Convert a time delta in sub-microseconds units to the equivalent time in microseconds.
211 * The value is always rounded UP. This is because otherwise events would be registered
212 * in a time in microseconds before the event actually occurred. This would lead to many imprecise
213 * event timings for example if the timing of an event would be calculated base on the last LF
214 * clock tick (which happens for example when triggering the CLEAR or TRIGGER_OVERFLOW tasks)
215 */
sub_us_time_to_us_time(uint64_t sub_us_time)216 static bs_time_t sub_us_time_to_us_time(uint64_t sub_us_time)
217 {
218 bs_time_t us_time = sub_us_time >> SUB_US_BITS;
219
220 if(sub_us_time % (1U << SUB_US_BITS) != 0) //rounding up
221 {
222 us_time += 1;
223 }
224
225 return us_time;
226 }
227
228 /**
229 * Convert a time delta in microseconds to the equivalent time in sub-microseconds units
230 */
us_time_to_sub_us_time(bs_time_t us_time)231 static uint64_t us_time_to_sub_us_time(bs_time_t us_time)
232 {
233 return us_time << SUB_US_BITS;
234 }
235
get_time_in_sub_us(void)236 static uint64_t get_time_in_sub_us(void)
237 {
238 bs_time_t now = nsi_hws_get_time();
239
240 if (now >= sub_us_time_to_us_time(TIME_NEVER)) {
241 bs_trace_error_time_line("Bummer, the RTC model only supports running for 1142 years\n");
242 /*If you really need this, generalize the calculation to more than 64 bits*/
243 }
244
245 return us_time_to_sub_us_time(now);
246 }
247
get_last_lf_tick_time_sub_us(void)248 static uint64_t get_last_lf_tick_time_sub_us(void) {
249 uint64_t now_sub_us = get_time_in_sub_us();
250
251 uint64_t n_lf_ticks = (now_sub_us - first_lf_tick_time_sub_us) / LF_CLOCK_PERIOD_subus; //floor()
252 uint64_t last_tick_time_sub_us = n_lf_ticks * LF_CLOCK_PERIOD_subus;
253 last_tick_time_sub_us += first_lf_tick_time_sub_us;
254
255 return last_tick_time_sub_us;
256 }
257
258 /**
259 * Convert a time delta in sub-microsecond units to the equivalent count accounting for the PRESCALER
260 * Note that the number is rounded down [floor()]
261 */
time_sub_us_to_counter(uint rtc,uint64_t delta_sub_us)262 static uint64_t time_sub_us_to_counter(uint rtc, uint64_t delta_sub_us) {
263 uint64_t ticks;
264
265 ticks = delta_sub_us / ((uint64_t)LF_CLOCK_PERIOD_subus * (nhw_rtc_st[rtc].PRESC + 1));
266 return ticks;
267 }
268
269 /**
270 * Convert a counter delta to sub-microsecond units accounting for the PRESCALER
271 */
counter_to_time_sub_us(uint rtc,uint64_t counter)272 static uint64_t counter_to_time_sub_us(uint rtc, uint64_t counter) {
273 uint64_t Elapsed;
274
275 Elapsed = counter * (uint64_t)LF_CLOCK_PERIOD_subus * (nhw_rtc_st[rtc].PRESC + 1);
276
277 return Elapsed;
278 }
279
280 /**
281 * Return the time in sub-microsecond units it takes for the COUNTER to do 1 wrap
282 */
time_of_1_counter_wrap_sub_us(uint rtc)283 static uint64_t time_of_1_counter_wrap_sub_us(uint rtc) {
284 return counter_to_time_sub_us(rtc, (uint64_t)RTC_COUNTER_MASK + 1);
285 }
286
287 /*
288 * Return the *next* time (in us) when the RTC counter will reach <counter_match>
289 */
get_counter_match_time(uint rtc,uint64_t counter_match,uint64_t * next_match_sub_us)290 static bs_time_t get_counter_match_time(uint rtc, uint64_t counter_match, uint64_t* next_match_sub_us)
291 {
292 struct rtc_status *this = &nhw_rtc_st[rtc];
293
294 bs_time_t next_match_us = TIME_NEVER;
295 *next_match_sub_us = TIME_NEVER;
296
297 if (this->running == true) {
298 uint64_t now_sub_us = get_time_in_sub_us();
299 uint64_t counter_match_sub_us = counter_to_time_sub_us(rtc, counter_match);
300
301 if(this->counter_startT_sub_us > 0)
302 {
303 *next_match_sub_us = this->counter_startT_sub_us
304 + counter_match_sub_us;
305 }
306 else if (counter_match_sub_us > this->counter_startT_negative_sub_us)
307 {
308 *next_match_sub_us = counter_match_sub_us - this->counter_startT_negative_sub_us;
309 }
310 else
311 {
312 *next_match_sub_us = time_of_1_counter_wrap_sub_us(rtc)
313 + counter_match_sub_us - this->counter_startT_negative_sub_us;
314 }
315
316 while(*next_match_sub_us <= now_sub_us)
317 {
318 *next_match_sub_us += time_of_1_counter_wrap_sub_us(rtc);
319 }
320
321 next_match_us = sub_us_time_to_us_time(*next_match_sub_us);
322 }
323
324 return next_match_us;
325 }
326
nhw_rtc_update_master_timer(void)327 static void nhw_rtc_update_master_timer(void) {
328 Timer_RTC = TIME_NEVER;
329 for (int rtc = 0; rtc < NHW_RTC_TOTAL_INST ; rtc++) {
330 struct rtc_status *this = &nhw_rtc_st[rtc];
331
332 if (this->running == false) {
333 continue;
334 }
335 for (int cc = 0 ; cc < this->n_CCs ; cc++) {
336 if (this->cc_timers[cc] < Timer_RTC) {
337 Timer_RTC = this->cc_timers[cc];
338 }
339 }
340
341 if (this->overflow_timer < Timer_RTC) {
342 Timer_RTC = this->overflow_timer;
343 }
344 }
345 nsi_hws_find_next_event();
346 }
347
348 /**
349 * Save in cc_timers[cc] the *next* time when this RTC will match the
350 * CC[cc] register
351 */
update_cc_timer(uint rtc,uint cc)352 static void update_cc_timer(uint rtc, uint cc) {
353 uint64_t match_sub_us; // Only to comply to the interface
354 nhw_rtc_st[rtc].cc_timers[cc] = get_counter_match_time(rtc, NRF_RTC_regs[rtc].CC[cc], &match_sub_us);
355 }
356
357 /*
358 * Update all cc_timers[*] for a RTC instance
359 * to the *next* time when they will match
360 */
update_all_cc_timers(uint rtc)361 static void update_all_cc_timers(uint rtc) {
362 for (int cc = 0 ; cc < nhw_rtc_st[rtc].n_CCs; cc++) {
363 update_cc_timer(rtc, cc);
364 }
365 }
366
update_overflow_timer(uint rtc)367 static void update_overflow_timer(uint rtc) {
368 struct rtc_status *this = &nhw_rtc_st[rtc];
369 this->overflow_timer = get_counter_match_time(rtc, RTC_COUNTER_MASK + 1, &this->overflow_timer_sub_us);
370 }
371
update_timers(int rtc)372 static void update_timers(int rtc)
373 {
374 update_all_cc_timers(rtc);
375 update_overflow_timer(rtc);
376 nhw_rtc_update_master_timer();
377 }
378
379 /**
380 * Sets the internal state of the counter like if the counter was just set to the specified value.
381 * This is done by setting the "counter start time"
382 * (counter_startT*) to an appropriate value, so that the time elapsed from the counter start
383 * corresponds to the given counter value. Such virtual "counter start time" can be negative.
384 */
nhw_rtc_set_counter(uint rtc,uint64_t counter_val)385 static void nhw_rtc_set_counter(uint rtc, uint64_t counter_val)
386 {
387 struct rtc_status *this = &nhw_rtc_st[rtc];
388
389 counter_val &= RTC_COUNTER_MASK;
390 uint64_t counter_val_sub_us = counter_to_time_sub_us(rtc, counter_val);
391
392 // All the functions which use this reset the <PRESC>, so it is like the counter was set
393 // on the last LF clock tick
394 uint64_t last_lf_tick_sub_us = get_last_lf_tick_time_sub_us();
395
396 if(last_lf_tick_sub_us >= counter_val_sub_us)
397 {
398 this->counter_startT_sub_us = last_lf_tick_sub_us - counter_val_sub_us;
399 this->counter_startT_negative_sub_us = 0;
400 }
401 else
402 {
403 this->counter_startT_sub_us = 0;
404 this->counter_startT_negative_sub_us = counter_val_sub_us - last_lf_tick_sub_us;
405 }
406
407 NRF_RTC_regs[rtc].COUNTER = counter_val;
408
409 update_timers(rtc);
410 }
411
handle_overflow_event(uint rtc)412 static void handle_overflow_event(uint rtc)
413 {
414 struct rtc_status *this = &nhw_rtc_st[rtc];
415
416 // The real time (in sub-microsecond units, not in microseconds)
417 // in which the current overflow event occurs.
418 // update_overflow_timer will overwrite overflow_timer_sub_us[rtc]
419 uint64_t current_overflow_event_sub_us = this->overflow_timer_sub_us;
420
421 update_overflow_timer(rtc); //Next time it will overflow
422
423 bs_trace_raw_time(8, "RTC%i: Timer overflow\n", rtc);
424
425 this->counter_startT_sub_us = current_overflow_event_sub_us;
426 this->counter_startT_negative_sub_us = 0;
427
428 nhw_rtc_signal_OVERFLOW(rtc);
429 }
430
nhw_rtc_timer_triggered(void)431 static void nhw_rtc_timer_triggered(void) {
432 for (int rtc = 0; rtc < NHW_RTC_TOTAL_INST ; rtc++) {
433 struct rtc_status *rtc_el = &nhw_rtc_st[rtc];
434 if (rtc_el->running == false) {
435 continue;
436 }
437
438 for (int cc = 0 ; cc < rtc_el->n_CCs ; cc++) {
439 if (rtc_el->cc_timers[cc] == Timer_RTC ){ //This CC is matching now
440 update_cc_timer(rtc, cc); //Next time it will match
441 nhw_rtc_signal_COMPARE(rtc, cc);
442 }
443 }
444
445 if (rtc_el->overflow_timer == Timer_RTC) { //Overflow occurred now
446 handle_overflow_event(rtc); // this must always be the last event, as it might update counter_startT_sub_us
447 }
448
449 }
450 nhw_rtc_update_master_timer();
451 }
452
453 NSI_HW_EVENT(Timer_RTC, nhw_rtc_timer_triggered, 50);
454
455 /**
456 * Check if an EVTEN or INTEN has the tick event set
457 */
check_not_supported_TICK(uint32_t i)458 static void check_not_supported_TICK(uint32_t i) {
459 if (i & RTC_EVTEN_TICK_Msk) {
460 bs_trace_warning_line_time("RTC: The TICK functionality is not modelled\n");
461 }
462 }
463
nhw_rtc_notify_first_lf_tick(void)464 void nhw_rtc_notify_first_lf_tick(void) {
465 first_lf_tick_time_sub_us = get_time_in_sub_us();
466 bs_trace_raw_time(9, "RTC: First lf tick\n");
467 }
468
469 /*
470 * Update the counter register so it can be read by SW
471 */
nhw_rtc_update_COUNTER(uint rtc)472 void nhw_rtc_update_COUNTER(uint rtc) {
473 struct rtc_status *this = &nhw_rtc_st[rtc];
474 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
475
476 if (this->running == true) {
477 uint64_t count;
478 count = time_sub_us_to_counter(rtc,
479 get_time_in_sub_us() - this->counter_startT_sub_us
480 + this->counter_startT_negative_sub_us);
481 RTC_regs->COUNTER = count & RTC_COUNTER_MASK;
482 } else {
483 RTC_regs->COUNTER = this->counter_at_stop & RTC_COUNTER_MASK;
484 }
485 }
486
487 /**
488 * TASK_START triggered handler
489 */
nhw_rtc_TASKS_START(uint rtc)490 static void nhw_rtc_TASKS_START(uint rtc) {
491 struct rtc_status *this = &nhw_rtc_st[rtc];
492
493 if (this->running == true) {
494 return;
495 }
496 bs_trace_raw_time(5, "RTC%i: TASK_START\n", rtc);
497 this->running = true;
498
499 /* Pre-scaler value is latched to an internal register on tasks START, CLEAR, and TRIGOVRFLW */
500 this->PRESC = NRF_RTC_regs[rtc].PRESCALER;
501
502 //If the counter is not zero at start, is like if the counter was started earlier
503 nhw_rtc_set_counter(rtc, this->counter_at_stop);
504 }
505
506 /**
507 * TASK_STOP triggered handler
508 */
nhw_rtc_TASKS_STOP(uint rtc)509 static void nhw_rtc_TASKS_STOP(uint rtc) {
510 struct rtc_status *this = &nhw_rtc_st[rtc];
511
512 if (this->running == false) {
513 return;
514 }
515 bs_trace_raw_time(5, "RTC%i: TASK_STOP\n", rtc);
516 this->running = false;
517 this->counter_at_stop = time_sub_us_to_counter(rtc,
518 get_time_in_sub_us() - this->counter_startT_sub_us
519 + this->counter_startT_negative_sub_us); //we save the value when the counter was stoped in case it is started again without clearing it
520 this->counter_at_stop &= RTC_COUNTER_MASK;
521 NRF_RTC_regs[rtc].COUNTER = this->counter_at_stop;
522 for (int cc = 0 ; cc < this->n_CCs ; cc++) {
523 this->cc_timers[cc] = TIME_NEVER;
524 }
525 this->overflow_timer = TIME_NEVER;
526 nhw_rtc_update_master_timer();
527 }
528
529 /**
530 * TASK_CLEAR triggered handler
531 */
nhw_rtc_TASKS_CLEAR(uint rtc)532 static void nhw_rtc_TASKS_CLEAR(uint rtc) {
533 bs_trace_raw_time(5, "RTC%i: TASK_CLEAR\n", rtc);
534
535 /* Pre-scaler value is latched to an internal register on tasks START, CLEAR, and TRIGOVRFLW */
536 nhw_rtc_st[rtc].PRESC = NRF_RTC_regs[rtc].PRESCALER;
537 nhw_rtc_set_counter(rtc, 0);
538 }
539
540 /**
541 * TASK_TRIGGER_OVERFLOW triggered handler
542 */
nhw_rtc_TASKS_TRIGOVRFLW(uint rtc)543 static void nhw_rtc_TASKS_TRIGOVRFLW(uint rtc) {
544
545 bs_trace_raw_time(5, "RTC%i: TASK_TRIGGER_OVERFLOW\n", rtc);
546
547 /* Pre-scaler value is latched to an internal register on tasks START, CLEAR, and TRIGOVRFLW */
548 nhw_rtc_st[rtc].PRESC = NRF_RTC_regs[rtc].PRESCALER;
549 nhw_rtc_set_counter(rtc, RTC_TRIGGER_OVERFLOW_COUNTER_VALUE);
550 }
551
552 #if (NHW_RTC_HAS_CAPTURE)
nhw_rtc_TASKS_CAPTURE(uint rtc,uint cc_n)553 static void nhw_rtc_TASKS_CAPTURE(uint rtc, uint cc_n) {
554 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
555
556 nhw_rtc_update_COUNTER(rtc);
557 RTC_regs->CC[cc_n] = RTC_regs->COUNTER;
558
559 nhw_rtc_regw_sideeffects_CC(rtc, cc_n);
560 }
561 #endif /* NHW_RTC_HAS_CAPTURE */
562
nhw_rtc_eval_interrupts(uint rtc)563 static void nhw_rtc_eval_interrupts(uint rtc) {
564 /* Mapping of peripheral instance to {int controller instance, int number} */
565 static struct nhw_irq_mapping nhw_rtc_irq_map[NHW_RTC_TOTAL_INST] = NHW_RTC_INT_MAP;
566 static bool RTC_int_line[NHW_RTC_TOTAL_INST]; /* Is the RTC currently driving its interrupt line high */
567
568 struct rtc_status *this = &nhw_rtc_st[rtc];
569 bool new_int_line = false;
570
571 for (int cc = 0; cc < this->n_CCs; cc++) {
572 int mask = this->INTEN & (RTC_INTENSET_COMPARE0_Msk << cc);
573 if (NRF_RTC_regs[rtc].EVENTS_COMPARE[cc] && mask) {
574 new_int_line = true;
575 break; /* No need to check more */
576 }
577 }
578
579 if (NRF_RTC_regs[rtc].EVENTS_TICK && (this->INTEN & RTC_INTENSET_TICK_Msk)) {
580 new_int_line = true;
581 }
582 if (NRF_RTC_regs[rtc].EVENTS_OVRFLW && (this->INTEN & RTC_INTENSET_OVRFLW_Msk)) {
583 new_int_line = true;
584 }
585
586 if (RTC_int_line[rtc] == false && new_int_line == true) {
587 RTC_int_line[rtc] = true;
588 hw_irq_ctrl_raise_level_irq_line(nhw_rtc_irq_map[rtc].cntl_inst,
589 nhw_rtc_irq_map[rtc].int_nbr);
590 } else if (RTC_int_line[rtc] == true && new_int_line == false) {
591 RTC_int_line[rtc] = false;
592
593 hw_irq_ctrl_lower_level_irq_line(nhw_rtc_irq_map[rtc].cntl_inst,
594 nhw_rtc_irq_map[rtc].int_nbr);
595 }
596 }
597
nhw_rtc_signal_COMPARE(uint rtc,uint cc)598 static void nhw_rtc_signal_COMPARE(uint rtc, uint cc)
599 {
600 struct rtc_status *this = &nhw_rtc_st[rtc];
601 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
602
603 #if (NHW_RTC_HAS_SHORT_COMP_CLEAR)
604 if (RTC_regs->SHORTS & (RTC_SHORTS_COMPARE0_CLEAR_Msk << cc)) {
605 nhw_rtc_TASKS_CLEAR(rtc);
606 bs_trace_warning_line_time("RTC: COMPARE->CLEAR short used, but CLEAR is instantaneous."
607 "If you are using this to generate a periodic interrupt, the period"
608 "will be 1 count too short\n");
609 }
610 #endif /* NHW_RTC_HAS_SHORT_COMP_CLEAR */
611
612 uint32_t mask = RTC_EVTEN_COMPARE0_Msk << cc;
613
614 if (!((RTC_regs->EVTEN | this->INTEN) & mask)) {
615 return;
616 }
617
618 RTC_regs->EVENTS_COMPARE[cc] = 1;
619
620 if (RTC_regs->EVTEN & mask) {
621 #if (NHW_HAS_PPI)
622 ppi_event_types_t event = RTC0_EVENTS_COMPARE_0;
623 switch (rtc){
624 case 0:
625 event = RTC0_EVENTS_COMPARE_0;
626 break;
627 case 1:
628 event = RTC1_EVENTS_COMPARE_0;
629 break;
630 case 2:
631 event = RTC2_EVENTS_COMPARE_0;
632 break;
633 }
634 event += cc;
635 nrf_ppi_event(event);
636 #elif (NHW_HAS_DPPI)
637 nhw_dppi_event_signal_if(this->dppi_map,
638 RTC_regs->PUBLISH_COMPARE[cc]);
639 #endif
640 }
641
642 nhw_rtc_eval_interrupts(rtc);
643 }
644
nhw_rtc_signal_OVERFLOW(uint rtc)645 static void nhw_rtc_signal_OVERFLOW(uint rtc)
646 {
647 struct rtc_status *this = &nhw_rtc_st[rtc];
648 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
649
650 if (!((RTC_regs->EVTEN | this->INTEN) & RTC_EVTEN_OVRFLW_Msk)) {
651 return;
652 }
653
654 RTC_regs->EVENTS_OVRFLW = 1;
655
656 if (RTC_regs->EVTEN & RTC_EVTEN_OVRFLW_Msk) {
657 #if (NHW_HAS_PPI)
658 ppi_event_types_t event = RTC0_EVENTS_OVRFLW;
659 switch (rtc){
660 case 0:
661 event = RTC0_EVENTS_OVRFLW;
662 break;
663 case 1:
664 event = RTC1_EVENTS_OVRFLW;
665 break;
666 case 2:
667 event = RTC2_EVENTS_OVRFLW;
668 break;
669 }
670 nrf_ppi_event(event);
671 #elif (NHW_HAS_DPPI)
672 nhw_dppi_event_signal_if(this->dppi_map,
673 RTC_regs->PUBLISH_OVRFLW);
674 #endif
675 }
676
677 nhw_rtc_eval_interrupts(rtc);
678 }
679
680
nhw_rtc_signal_TICK(uint rtc)681 /*static*/ void nhw_rtc_signal_TICK(uint rtc) /*Not yet used, as all TICK functionality is not yet implemented */
682 {
683 struct rtc_status *this = &nhw_rtc_st[rtc];
684 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
685
686 if (!((RTC_regs->EVTEN | this->INTEN) & RTC_EVTEN_TICK_Msk)) {
687 return;
688 }
689
690 RTC_regs->EVENTS_TICK = 1;
691
692 if (RTC_regs->EVTEN & RTC_EVTEN_TICK_Msk) {
693 #if (NHW_HAS_PPI)
694 ppi_event_types_t event = RTC0_EVENTS_TICK;
695 switch (rtc){
696 case 0:
697 event = RTC0_EVENTS_TICK;
698 break;
699 case 1:
700 event = RTC1_EVENTS_TICK;
701 break;
702 case 2:
703 event = RTC2_EVENTS_TICK;
704 break;
705 }
706 nrf_ppi_event(event);
707 #elif (NHW_HAS_DPPI)
708 nhw_dppi_event_signal_if(this->dppi_map,
709 RTC_regs->PUBLISH_TICK);
710 #endif
711 }
712
713 nhw_rtc_eval_interrupts(rtc);
714 }
715
nhw_rtc_regw_sideeffect_TASKS_START(uint i)716 void nhw_rtc_regw_sideeffect_TASKS_START(uint i) {
717 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
718
719 if (RTC_regs->TASKS_START) {
720 RTC_regs->TASKS_START = 0;
721 nhw_rtc_TASKS_START(i);
722 }
723 }
724
nhw_rtc_regw_sideeffect_TASKS_STOP(uint i)725 void nhw_rtc_regw_sideeffect_TASKS_STOP(uint i) {
726 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
727
728 if (RTC_regs->TASKS_STOP) {
729 RTC_regs->TASKS_STOP = 0;
730 nhw_rtc_TASKS_STOP(i);
731 }
732 }
733
nhw_rtc_regw_sideeffect_TASKS_CLEAR(uint i)734 void nhw_rtc_regw_sideeffect_TASKS_CLEAR(uint i) {
735 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
736
737 if (RTC_regs->TASKS_CLEAR) {
738 RTC_regs->TASKS_CLEAR = 0;
739 nhw_rtc_TASKS_CLEAR(i);
740 }
741 }
742
nhw_rtc_regw_sideeffect_TASKS_TRIGOVRFLW(uint i)743 void nhw_rtc_regw_sideeffect_TASKS_TRIGOVRFLW(uint i) {
744 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
745
746 if (RTC_regs->TASKS_TRIGOVRFLW) {
747 RTC_regs->TASKS_TRIGOVRFLW = 0;
748 nhw_rtc_TASKS_TRIGOVRFLW(i);
749 }
750 }
751
752 #if (NHW_RTC_HAS_CAPTURE)
nhw_rtc_regw_sideeffect_TASKS_CAPTURE(uint i,uint cc)753 void nhw_rtc_regw_sideeffect_TASKS_CAPTURE(uint i, uint cc) {
754 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
755
756 if (RTC_regs->TASKS_CAPTURE[cc]) {
757 RTC_regs->TASKS_CAPTURE[cc] = 0;
758 nhw_rtc_TASKS_CAPTURE(i, cc);
759 }
760 }
761 #endif /* NHW_RTC_HAS_CAPTURE */
762
763 #if (NHW_HAS_DPPI)
764
nhw_rtc_taskcapture_wrap(void * param)765 static void nhw_rtc_taskcapture_wrap(void* param) {
766 unsigned int inst = (uintptr_t)param >> 16;
767 uint cc_n = (uintptr_t)param & 0xFFFF;
768 nhw_rtc_TASKS_CAPTURE(inst, cc_n);
769 }
770
nhw_rtc_regw_sideeffects_SUBSCRIBE_CAPTURE(uint inst,uint cc_n)771 void nhw_rtc_regw_sideeffects_SUBSCRIBE_CAPTURE(uint inst, uint cc_n) {
772 struct rtc_status *this = &nhw_rtc_st[inst];
773
774 nhw_dppi_common_subscribe_sideeffect(this->dppi_map,
775 this->NRF_RTC_regs->SUBSCRIBE_CAPTURE[cc_n],
776 &this->subscribed_CAPTURE[cc_n],
777 nhw_rtc_taskcapture_wrap,
778 (void*)((inst << 16) + cc_n));
779 }
780
781 #define NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(TASK_N) \
782 static void nhw_rtc_task##TASK_N##_wrap(void* param) \
783 { \
784 nhw_rtc_TASKS_##TASK_N((int) param); \
785 } \
786 \
787 void nhw_rtc_regw_sideeffects_SUBSCRIBE_##TASK_N(uint inst) \
788 { \
789 struct rtc_status *this = &nhw_rtc_st[inst]; \
790 \
791 nhw_dppi_common_subscribe_sideeffect(this->dppi_map, \
792 this->NRF_RTC_regs->SUBSCRIBE_##TASK_N, \
793 &this->subscribed_##TASK_N, \
794 nhw_rtc_task##TASK_N##_wrap, \
795 (void*) inst); \
796 }
797
798 NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(START)
NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(STOP)799 NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(STOP)
800 NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(CLEAR)
801 NHW_RTC_REGW_SIDEFFECTS_SUBSCRIBE(TRIGOVRFLW)
802
803 #endif /* NHW_HAS_DPPI */
804
805 void nhw_rtc_regw_sideeffect_INTENSET(uint rtc)
806 {
807 struct rtc_status *this = &nhw_rtc_st[rtc];
808 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
809
810 if ( RTC_regs->INTENSET ){
811 this->INTEN |= RTC_regs->INTENSET;
812 RTC_regs->INTENSET = this->INTEN;
813
814 check_not_supported_TICK(this->INTEN);
815 nhw_rtc_eval_interrupts(rtc);
816 }
817 }
818
nhw_rtc_regw_sideeffect_INTENCLR(uint rtc)819 void nhw_rtc_regw_sideeffect_INTENCLR(uint rtc)
820 {
821 struct rtc_status *this = &nhw_rtc_st[rtc];
822 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[rtc];
823
824 if ( RTC_regs->INTENCLR ){
825 this->INTEN &= ~RTC_regs->INTENCLR;
826 RTC_regs->INTENSET = this->INTEN;
827 RTC_regs->INTENCLR = 0;
828
829 nhw_rtc_eval_interrupts(rtc);
830 }
831 }
832
nhw_rtc_regw_sideeffect_EVTENSET(uint i)833 void nhw_rtc_regw_sideeffect_EVTENSET(uint i) {
834 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
835
836 if ( RTC_regs->EVTENSET ){
837 RTC_regs->EVTEN |= RTC_regs->EVTENSET;
838 RTC_regs->EVTENSET = RTC_regs->EVTEN;
839 check_not_supported_TICK(RTC_regs->EVTEN);
840 }
841 }
842
nhw_rtc_regw_sideeffect_EVTENCLR(uint i)843 void nhw_rtc_regw_sideeffect_EVTENCLR(uint i) {
844 NRF_RTC_Type *RTC_regs = &NRF_RTC_regs[i];
845
846 if ( RTC_regs->EVTENCLR ){
847 RTC_regs->EVTEN &= ~RTC_regs->EVTENCLR;
848 RTC_regs->EVTENSET = RTC_regs->EVTEN;
849 RTC_regs->EVTENCLR = 0;
850 }
851 }
852
nhw_rtc_regw_sideeffects_EVENTS_all(uint rtc)853 void nhw_rtc_regw_sideeffects_EVENTS_all(uint rtc) {
854 nhw_rtc_eval_interrupts(rtc);
855 }
856
nhw_rtc_regw_sideeffects_CC(uint rtc,uint cc_n)857 void nhw_rtc_regw_sideeffects_CC(uint rtc, uint cc_n) {
858 struct rtc_status *this = &nhw_rtc_st[rtc];
859
860 if (this->running == true) {
861 update_cc_timer(rtc, cc_n);
862 nhw_rtc_update_master_timer();
863 }
864 }
865
866 #if (NHW_HAS_PPI)
nhw_rtc0_TASKS_START(void)867 void nhw_rtc0_TASKS_START(void) { nhw_rtc_TASKS_START(0); }
nhw_rtc0_TASKS_STOP(void)868 void nhw_rtc0_TASKS_STOP(void) { nhw_rtc_TASKS_STOP(0); }
nhw_rtc0_TASKS_CLEAR(void)869 void nhw_rtc0_TASKS_CLEAR(void) { nhw_rtc_TASKS_CLEAR(0); }
nhw_rtc0_TASKS_TRIGOVRFLW(void)870 void nhw_rtc0_TASKS_TRIGOVRFLW(void) { nhw_rtc_TASKS_TRIGOVRFLW(0); }
nhw_rtc1_TASKS_START(void)871 void nhw_rtc1_TASKS_START(void) { nhw_rtc_TASKS_START(1); }
nhw_rtc1_TASKS_STOP(void)872 void nhw_rtc1_TASKS_STOP(void) { nhw_rtc_TASKS_STOP(1); }
nhw_rtc1_TASKS_CLEAR(void)873 void nhw_rtc1_TASKS_CLEAR(void) { nhw_rtc_TASKS_CLEAR(1); }
nhw_rtc1_TASKS_TRIGOVRFLW(void)874 void nhw_rtc1_TASKS_TRIGOVRFLW(void) { nhw_rtc_TASKS_TRIGOVRFLW(1); }
nhw_rtc2_TASKS_START(void)875 void nhw_rtc2_TASKS_START(void) { nhw_rtc_TASKS_START(2); }
nhw_rtc2_TASKS_STOP(void)876 void nhw_rtc2_TASKS_STOP(void) { nhw_rtc_TASKS_STOP(2); }
nhw_rtc2_TASKS_CLEAR(void)877 void nhw_rtc2_TASKS_CLEAR(void) { nhw_rtc_TASKS_CLEAR(2); }
nhw_rtc2_TASKS_TRIGOVRFLW(void)878 void nhw_rtc2_TASKS_TRIGOVRFLW(void) { nhw_rtc_TASKS_TRIGOVRFLW(2); }
879 #endif /* NHW_HAS_PPI */
880