1 /*
2 * Copyright (c) 2020, Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/ztest.h>
7 #include <zephyr/drivers/timer/nrf_rtc_timer.h>
8 #include <hal/nrf_rtc.h>
9 #include <hal/nrf_timer.h>
10 #include <zephyr/irq.h>
11
12 struct test_data {
13 uint64_t target_time;
14 uint32_t window;
15 uint32_t delay;
16 int err;
17 };
18
19 static int timeout_handler_cnt;
20
ISR_DIRECT_DECLARE(timer0_isr_wrapper)21 ISR_DIRECT_DECLARE(timer0_isr_wrapper)
22 {
23 nrf_timer_event_clear(NRF_TIMER0, NRF_TIMER_EVENT_COMPARE0);
24
25 k_busy_wait(60);
26
27 return 0;
28 }
29
init_zli_timer0(void)30 static void init_zli_timer0(void)
31 {
32 nrf_timer_mode_set(NRF_TIMER0, NRF_TIMER_MODE_TIMER);
33 nrf_timer_bit_width_set(NRF_TIMER0, NRF_TIMER_BIT_WIDTH_32);
34 nrf_timer_prescaler_set(NRF_TIMER0, NRF_TIMER_FREQ_1MHz);
35 nrf_timer_cc_set(NRF_TIMER0, 0, 100);
36 nrf_timer_int_enable(NRF_TIMER0, NRF_TIMER_INT_COMPARE0_MASK);
37 nrf_timer_shorts_enable(NRF_TIMER0,
38 NRF_TIMER_SHORT_COMPARE0_CLEAR_MASK);
39
40 IRQ_DIRECT_CONNECT(TIMER0_IRQn, 0,
41 timer0_isr_wrapper,
42 IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS) ?
43 IRQ_ZERO_LATENCY : 0);
44 irq_enable(TIMER0_IRQn);
45 }
46
start_zli_timer0(void)47 static void start_zli_timer0(void)
48 {
49 nrf_timer_task_trigger(NRF_TIMER0, NRF_TIMER_TASK_START);
50 }
51
stop_zli_timer0(void)52 static void stop_zli_timer0(void)
53 {
54 nrf_timer_task_trigger(NRF_TIMER0, NRF_TIMER_TASK_STOP);
55 }
56
inject_overflow(void)57 static void inject_overflow(void)
58 {
59 /* Bump overflow counter by 100. */
60 uint32_t overflow_count = 100;
61
62 while (overflow_count--) {
63 nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_TRIGGER_OVERFLOW);
64 /* Wait for RTC counter to reach overflow from 0xFFFFF0 and
65 * get handled.
66 */
67 k_busy_wait(1000);
68 }
69 }
70
timeout_handler(int32_t id,uint64_t expire_time,void * user_data)71 static void timeout_handler(int32_t id, uint64_t expire_time, void *user_data)
72 {
73 struct test_data *data = user_data;
74 uint64_t now = z_nrf_rtc_timer_read();
75 uint64_t diff = (now - expire_time);
76
77 zassert_true(diff <= data->delay,
78 "Handler called in wrong time (%llu), set time: %llu, "
79 "got time: %llu",
80 now, data->target_time, expire_time);
81
82 if ((expire_time >= data->target_time) &&
83 (expire_time <= (data->target_time + data->window))) {
84 data->err = 0;
85 }
86 timeout_handler_cnt++;
87 }
88
test_timeout(int32_t chan,k_timeout_t t,bool ext_window)89 static void test_timeout(int32_t chan, k_timeout_t t, bool ext_window)
90 {
91 int64_t ticks = z_nrf_rtc_timer_get_ticks(t);
92 struct test_data test_data = {
93 .target_time = ticks,
94 .window = ext_window ? 100 : (Z_TICK_ABS(t.ticks) ? 0 : 32),
95 .delay = ext_window ? 100 : 2,
96 .err = -EINVAL
97 };
98
99 z_nrf_rtc_timer_set(chan, (uint64_t)ticks, timeout_handler, &test_data);
100
101 /* wait additional arbitrary time. */
102 k_busy_wait(1000);
103 k_sleep(t);
104
105 zassert_equal(test_data.err, 0, "Unexpected err: %d", test_data.err);
106 }
107
108
ZTEST(nrf_rtc_timer,test_basic)109 ZTEST(nrf_rtc_timer, test_basic)
110 {
111 int32_t chan = z_nrf_rtc_timer_chan_alloc();
112
113 zassert_true(chan >= 0, "Failed to allocate RTC channel (%d).", chan);
114
115 k_timeout_t t0 =
116 Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() + K_MSEC(1).ticks));
117
118 test_timeout(chan, t0, false);
119
120 k_timeout_t t1 = K_MSEC(4);
121
122 test_timeout(chan, t1, false);
123
124
125 k_timeout_t t2 = K_MSEC(100);
126
127 test_timeout(chan, t2, false);
128
129 /* value in the past should expire immediately (2 ticks from now)*/
130 k_timeout_t t3 =
131 Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() - K_MSEC(1).ticks));
132
133 test_timeout(chan, t3, true);
134
135 z_nrf_rtc_timer_chan_free(chan);
136 }
137
ZTEST(nrf_rtc_timer,test_z_nrf_rtc_timer_compare_evt_address_get)138 ZTEST(nrf_rtc_timer, test_z_nrf_rtc_timer_compare_evt_address_get)
139 {
140 uint32_t evt_addr;
141
142 evt_addr = z_nrf_rtc_timer_compare_evt_address_get(0);
143 zassert_equal(evt_addr, (uint32_t)&NRF_RTC1->EVENTS_COMPARE[0],
144 "Unexpected event addr:%x", evt_addr);
145 }
146
ZTEST(nrf_rtc_timer,test_int_disable_enabled)147 ZTEST(nrf_rtc_timer, test_int_disable_enabled)
148 {
149 uint64_t now = z_nrf_rtc_timer_read();
150 uint64_t t = 1000;
151 struct test_data data = {
152 .target_time = now + t,
153 .window = 1000,
154 .delay = 2000,
155 .err = -EINVAL
156 };
157 bool key;
158 int32_t chan;
159
160 chan = z_nrf_rtc_timer_chan_alloc();
161 zassert_true(chan >= 0, "Failed to allocate RTC channel.");
162
163 z_nrf_rtc_timer_set(chan, data.target_time, timeout_handler, &data);
164
165 zassert_equal(data.err, -EINVAL, "Unexpected err: %d", data.err);
166 key = z_nrf_rtc_timer_compare_int_lock(chan);
167
168 k_sleep(Z_TIMEOUT_TICKS(t + 100));
169 /* No event yet. */
170 zassert_equal(data.err, -EINVAL, "Unexpected err: %d", data.err);
171
172 z_nrf_rtc_timer_compare_int_unlock(chan, key);
173 k_busy_wait(100);
174 zassert_equal(data.err, 0, "Unexpected err: %d", data.err);
175
176 z_nrf_rtc_timer_chan_free(chan);
177 }
178
ZTEST(nrf_rtc_timer,test_get_ticks)179 ZTEST(nrf_rtc_timer, test_get_ticks)
180 {
181 k_timeout_t t = K_MSEC(1);
182 uint64_t exp_ticks = z_nrf_rtc_timer_read() + t.ticks;
183 int ticks;
184
185 /* Relative 1ms from now timeout converted to RTC */
186 ticks = z_nrf_rtc_timer_get_ticks(t);
187 zassert_true((ticks >= exp_ticks) && (ticks <= (exp_ticks + 1)),
188 "Unexpected result %d (expected: %d)", ticks, exp_ticks);
189
190 /* Absolute timeout 1ms in the past */
191 t = Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() - K_MSEC(1).ticks));
192 exp_ticks = z_nrf_rtc_timer_read() - K_MSEC(1).ticks;
193 ticks = z_nrf_rtc_timer_get_ticks(t);
194 zassert_true((ticks >= exp_ticks - 1) && (ticks <= exp_ticks),
195 "Unexpected result %d (expected: %d)", ticks, exp_ticks);
196
197 /* Absolute timeout 10ms in the future */
198 t = Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() + K_MSEC(10).ticks));
199 exp_ticks = z_nrf_rtc_timer_read() + K_MSEC(10).ticks;
200 ticks = z_nrf_rtc_timer_get_ticks(t);
201 zassert_true((ticks >= exp_ticks - 1) && (ticks <= exp_ticks),
202 "Unexpected result %d (expected: %d)", ticks, exp_ticks);
203
204 /* too far in the future */
205 t = Z_TIMEOUT_TICKS(sys_clock_tick_get() + 0x01000001);
206 ticks = z_nrf_rtc_timer_get_ticks(t);
207 zassert_equal(ticks, -EINVAL, "Unexpected ticks: %d", ticks);
208 }
209
210
sched_handler(int32_t id,uint64_t expire_time,void * user_data)211 static void sched_handler(int32_t id, uint64_t expire_time, void *user_data)
212 {
213 int64_t now = sys_clock_tick_get();
214 int rtc_ticks_now =
215 z_nrf_rtc_timer_get_ticks(Z_TIMEOUT_TICKS(Z_TICK_ABS(now)));
216 uint64_t *evt_uptime_us = user_data;
217
218 *evt_uptime_us =
219 k_ticks_to_us_floor64(now - (rtc_ticks_now - expire_time));
220 }
221
ZTEST(nrf_rtc_timer,test_absolute_scheduling)222 ZTEST(nrf_rtc_timer, test_absolute_scheduling)
223 {
224 k_timeout_t t;
225 int64_t now_us = k_ticks_to_us_floor64(sys_clock_tick_get());
226 uint64_t target_us = now_us + 5678;
227 uint64_t evt_uptime_us;
228 uint64_t rtc_ticks;
229 int32_t chan;
230
231 chan = z_nrf_rtc_timer_chan_alloc();
232 zassert_true(chan >= 0, "Failed to allocate RTC channel.");
233
234 /* schedule event in 5678us from now */
235 t = Z_TIMEOUT_TICKS(Z_TICK_ABS(K_USEC(target_us).ticks));
236 rtc_ticks = (uint64_t)z_nrf_rtc_timer_get_ticks(t);
237
238 z_nrf_rtc_timer_set(chan, rtc_ticks, sched_handler, &evt_uptime_us);
239
240 k_busy_wait(5678);
241
242 PRINT("RTC event scheduled at %dus for %dus,"
243 "event occured at %dus (uptime)\n",
244 (uint32_t)now_us, (uint32_t)target_us, (uint32_t)evt_uptime_us);
245
246 /* schedule event now. */
247 now_us = k_ticks_to_us_floor64(sys_clock_tick_get());
248 t = Z_TIMEOUT_TICKS(Z_TICK_ABS(K_USEC(now_us).ticks));
249 rtc_ticks = (uint64_t)z_nrf_rtc_timer_get_ticks(t);
250
251 z_nrf_rtc_timer_set(chan, rtc_ticks, sched_handler, &evt_uptime_us);
252
253 k_busy_wait(200);
254
255 PRINT("RTC event scheduled now, at %dus,"
256 "event occured at %dus (uptime)\n",
257 (uint32_t)now_us, (uint32_t)evt_uptime_us);
258
259 z_nrf_rtc_timer_chan_free(chan);
260 }
261
ZTEST(nrf_rtc_timer,test_alloc_free)262 ZTEST(nrf_rtc_timer, test_alloc_free)
263 {
264 int32_t chan[CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT];
265 int32_t inv_ch;
266
267 for (int i = 0; i < CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT; i++) {
268 chan[i] = z_nrf_rtc_timer_chan_alloc();
269 zassert_true(chan[i] >= 0, "Failed to allocate RTC channel.");
270 }
271
272 inv_ch = z_nrf_rtc_timer_chan_alloc();
273 zassert_equal(inv_ch, -ENOMEM, "Unexpected return value %d", inv_ch);
274
275 for (int i = 0; i < CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT; i++) {
276 z_nrf_rtc_timer_chan_free(chan[i]);
277 }
278 }
279
ZTEST(nrf_rtc_timer,test_stress)280 ZTEST(nrf_rtc_timer, test_stress)
281 {
282 int x = 0;
283 uint32_t start = k_uptime_get_32();
284 uint32_t test_time = 5000;
285 int32_t chan = z_nrf_rtc_timer_chan_alloc();
286
287 zassert_true(chan >= 0, "Failed to allocate RTC channel.");
288 start_zli_timer0();
289
290 do {
291 k_timeout_t t = K_USEC(40 + x);
292
293 test_timeout(chan, t, true);
294 /* On every iteration modify timeout to randomize it a bit
295 * against fixed zli interrupt pattern.
296 */
297 x += 30;
298 if (x > 200) {
299 x = 0;
300 }
301 } while ((k_uptime_get_32() - start) < test_time);
302
303 stop_zli_timer0();
304 z_nrf_rtc_timer_chan_free(chan);
305 }
306
ZTEST(nrf_rtc_timer,test_resetting_cc)307 ZTEST(nrf_rtc_timer, test_resetting_cc)
308 {
309 uint32_t start = k_uptime_get_32();
310 uint32_t test_time = 1000;
311 int32_t chan = z_nrf_rtc_timer_chan_alloc();
312 int i = 0;
313 int cnt = 0;
314
315 zassert_true(chan >= 0, "Failed to allocate RTC channel.");
316
317 timeout_handler_cnt = 0;
318
319 do {
320 uint64_t now = z_nrf_rtc_timer_read();
321 struct test_data test_data = {
322 .target_time = now + 5,
323 .window = 0,
324 .delay = 0,
325 .err = -EINVAL
326 };
327
328 /* Set timer but expect that it will never expire because
329 * it will be later on reset.
330 */
331 z_nrf_rtc_timer_set(chan, now + 2, timeout_handler, &test_data);
332
333 /* Arbitrary variable delay to reset CC before expiring first
334 * request but very close.
335 */
336 k_busy_wait(i);
337 i = (i + 1) % 20;
338
339 z_nrf_rtc_timer_set(chan, now + 5, timeout_handler, &test_data);
340 k_busy_wait((5 + 1)*31);
341 cnt++;
342 } while ((k_uptime_get_32() - start) < test_time);
343
344 zassert_equal(timeout_handler_cnt, cnt,
345 "Unexpected timeout count %d (exp: %d)",
346 timeout_handler_cnt, cnt);
347 z_nrf_rtc_timer_chan_free(chan);
348 }
349
overflow_sched_handler(int32_t id,uint64_t expire_time,void * user_data)350 static void overflow_sched_handler(int32_t id, uint64_t expire_time,
351 void *user_data)
352 {
353 uint64_t now = z_nrf_rtc_timer_read();
354 uint64_t *evt_uptime = user_data;
355
356 *evt_uptime = now - expire_time;
357 }
358
359 /* This test is to be executed as the last, due to interference in overflow
360 * counter, resulting in nRF RTC timer ticks and kernel ticks desynchronization.
361 */
ZTEST(nrf_rtc_timer,test_overflow)362 ZTEST(nrf_rtc_timer, test_overflow)
363 {
364 PRINT("RTC ticks before overflow injection: %u\r\n",
365 (uint32_t)z_nrf_rtc_timer_read());
366
367 inject_overflow();
368
369 PRINT("RTC ticks after overflow injection: %u\r\n",
370 (uint32_t)z_nrf_rtc_timer_read());
371
372 uint64_t now;
373 uint64_t target_time;
374 uint64_t evt_uptime;
375 int32_t chan;
376
377 chan = z_nrf_rtc_timer_chan_alloc();
378 zassert_true(chan >= 0, "Failed to allocate RTC channel.");
379
380 /* Schedule event in 5 ticks from now. */
381 evt_uptime = UINT64_MAX;
382 now = z_nrf_rtc_timer_read();
383 target_time = now + 5;
384 z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
385 &evt_uptime);
386
387 k_busy_wait(k_ticks_to_us_floor64(5 + 1));
388
389 PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
390 "event occurred at %llu ticks (uptime)\n",
391 now, target_time, evt_uptime);
392 zassert_not_equal(UINT64_MAX, evt_uptime,
393 "Expired timer shall overwrite evt_uptime");
394
395 /* Schedule event now. */
396 evt_uptime = UINT64_MAX;
397 now = z_nrf_rtc_timer_read();
398 target_time = now;
399
400 z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
401 &evt_uptime);
402
403 k_busy_wait(200);
404
405 zassert_not_equal(UINT64_MAX, evt_uptime,
406 "Expired timer shall overwrite evt_uptime");
407 PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
408 "event occurred at %llu ticks (uptime)\n",
409 now, target_time, evt_uptime);
410
411 /* Schedule event far in the past. */
412 evt_uptime = UINT64_MAX;
413 now = z_nrf_rtc_timer_read();
414 target_time = now - 2 * NRF_RTC_TIMER_MAX_SCHEDULE_SPAN;
415
416 z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
417 &evt_uptime);
418
419 k_busy_wait(200);
420
421 zassert_not_equal(UINT64_MAX, evt_uptime,
422 "Expired timer shall overwrite evt_uptime");
423 PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
424 "event occurred at %llu ticks (uptime)\n",
425 now, target_time, evt_uptime);
426
427 z_nrf_rtc_timer_chan_free(chan);
428 }
429
next_cycle_timeout_handler(int32_t chan,uint64_t expire_time,void * user_data)430 static void next_cycle_timeout_handler(int32_t chan,
431 uint64_t expire_time,
432 void *user_data)
433 {
434 static uint32_t delay;
435 uint32_t *timeouts_left = (uint32_t *)user_data;
436
437 if (--*timeouts_left) {
438 k_busy_wait(delay);
439 ++delay;
440
441 z_nrf_rtc_timer_set(chan, z_nrf_rtc_timer_read() + 1,
442 next_cycle_timeout_handler, user_data);
443 }
444 }
445
ZTEST(nrf_rtc_timer,test_next_cycle_timeouts)446 ZTEST(nrf_rtc_timer, test_next_cycle_timeouts)
447 {
448 enum {
449 MAX_TIMEOUTS = 60,
450 /* Allow 5 cycles per each expected timeout. */
451 CYCLES_TO_WAIT = 5 * MAX_TIMEOUTS,
452 };
453 volatile uint32_t timeouts_left = MAX_TIMEOUTS;
454 int32_t chan;
455 uint32_t start;
456
457 chan = z_nrf_rtc_timer_chan_alloc();
458 zassert_true(chan > 0, "Failed to allocate RTC channel.");
459
460 /* First timeout is scheduled here, all further ones are scheduled
461 * from the timeout handler, always on the next cycle of the system
462 * timer but after a delay that increases 1 microsecond each time.
463 */
464 z_nrf_rtc_timer_set(chan, z_nrf_rtc_timer_read() + 1,
465 next_cycle_timeout_handler, (void *)&timeouts_left);
466
467 start = k_cycle_get_32();
468 while (timeouts_left) {
469 if ((k_cycle_get_32() - start) > CYCLES_TO_WAIT) {
470 break;
471 }
472 Z_SPIN_DELAY(10);
473 }
474
475 zassert_equal(0, timeouts_left,
476 "Failed to get %u timeouts.", timeouts_left);
477
478 z_nrf_rtc_timer_chan_free(chan);
479 }
480
tight_rescheduling_handler(int32_t chan,uint64_t expire_time,void * user_data)481 static void tight_rescheduling_handler(int32_t chan,
482 uint64_t expire_time,
483 void *user_data)
484 {
485 if (user_data) {
486 *(bool *)user_data = true;
487 }
488 }
489
ZTEST(nrf_rtc_timer,test_tight_rescheduling)490 ZTEST(nrf_rtc_timer, test_tight_rescheduling)
491 {
492 int32_t chan;
493 volatile bool expired;
494 /* This test tries to schedule an alarm to CYCLE_DIFF cycles from
495 * the current moment and then, after a delay that is changed in
496 * each iteration, tries to reschedule this alarm to one cycle later.
497 * It does not matter if the first alarm actually occurs, the key
498 * thing is to always get the second one.
499 */
500 enum {
501 CYCLE_DIFF = 5,
502 /* One RTC cycle is ~30.5 us. Check a range of delays from
503 * more than one cycle before the moment on which the first
504 * alarm is scheduled to a few microseconds after that alarm
505 * (when it is actually too late for rescheduling).
506 */
507 DELAY_MIN = 30 * CYCLE_DIFF - 40,
508 DELAY_MAX = 30 * CYCLE_DIFF + 10,
509 };
510
511 chan = z_nrf_rtc_timer_chan_alloc();
512 zassert_true(chan > 0, "Failed to allocate RTC channel.");
513
514 /* Repeat the whole test a couple of times to get also (presumably)
515 * various micro delays resulting from execution of the test routine
516 * itself asynchronously to the RTC.
517 */
518 for (uint32_t i = 0; i < 20; ++i) {
519 for (uint32_t delay = DELAY_MIN; delay <= DELAY_MAX; ++delay) {
520 uint64_t start = z_nrf_rtc_timer_read();
521
522 z_nrf_rtc_timer_set(chan, start + CYCLE_DIFF,
523 tight_rescheduling_handler, NULL);
524
525 k_busy_wait(delay);
526
527 expired = false;
528 z_nrf_rtc_timer_set(chan, start + CYCLE_DIFF + 1,
529 tight_rescheduling_handler, (void *)&expired);
530
531 while (!expired &&
532 (z_nrf_rtc_timer_read() - start) <
533 CYCLE_DIFF + 10) {
534 Z_SPIN_DELAY(10);
535 }
536 zassert_true(expired,
537 "Timeout expiration missed (d: %u us, i: %u)",
538 delay, i);
539 }
540 }
541
542 z_nrf_rtc_timer_chan_free(chan);
543 }
544
rtc_timer_setup(void)545 static void *rtc_timer_setup(void)
546 {
547 init_zli_timer0();
548
549 return NULL;
550 }
551
552 ZTEST_SUITE(nrf_rtc_timer, NULL, rtc_timer_setup, NULL, NULL, NULL);
553