1 /*
2  * Copyright (c) 2020, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/ztest.h>
7 #include <zephyr/drivers/timer/nrf_rtc_timer.h>
8 #include <hal/nrf_rtc.h>
9 #include <hal/nrf_timer.h>
10 #include <zephyr/irq.h>
11 
12 struct test_data {
13 	uint64_t target_time;
14 	uint32_t window;
15 	uint32_t delay;
16 	int err;
17 };
18 
19 static int timeout_handler_cnt;
20 
ISR_DIRECT_DECLARE(timer0_isr_wrapper)21 ISR_DIRECT_DECLARE(timer0_isr_wrapper)
22 {
23 	nrf_timer_event_clear(NRF_TIMER0, NRF_TIMER_EVENT_COMPARE0);
24 
25 	k_busy_wait(60);
26 
27 	return 0;
28 }
29 
init_zli_timer0(void)30 static void init_zli_timer0(void)
31 {
32 	nrf_timer_mode_set(NRF_TIMER0, NRF_TIMER_MODE_TIMER);
33 	nrf_timer_bit_width_set(NRF_TIMER0, NRF_TIMER_BIT_WIDTH_32);
34 	nrf_timer_prescaler_set(NRF_TIMER0, NRF_TIMER_FREQ_1MHz);
35 	nrf_timer_cc_set(NRF_TIMER0, 0, 100);
36 	nrf_timer_int_enable(NRF_TIMER0, NRF_TIMER_INT_COMPARE0_MASK);
37 	nrf_timer_shorts_enable(NRF_TIMER0,
38 				NRF_TIMER_SHORT_COMPARE0_CLEAR_MASK);
39 
40 	IRQ_DIRECT_CONNECT(TIMER0_IRQn, 0,
41 			   timer0_isr_wrapper,
42 			   IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS) ?
43 			   IRQ_ZERO_LATENCY : 0);
44 	irq_enable(TIMER0_IRQn);
45 }
46 
start_zli_timer0(void)47 static void start_zli_timer0(void)
48 {
49 	nrf_timer_task_trigger(NRF_TIMER0, NRF_TIMER_TASK_START);
50 }
51 
stop_zli_timer0(void)52 static void stop_zli_timer0(void)
53 {
54 	nrf_timer_task_trigger(NRF_TIMER0, NRF_TIMER_TASK_STOP);
55 }
56 
inject_overflow(void)57 static void inject_overflow(void)
58 {
59 	/* Bump overflow counter by 100. */
60 	uint32_t overflow_count = 100;
61 
62 	while (overflow_count--) {
63 		nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_TRIGGER_OVERFLOW);
64 		/* Wait for RTC counter to reach overflow from 0xFFFFF0 and
65 		 * get handled.
66 		 */
67 		k_busy_wait(1000);
68 	}
69 }
70 
timeout_handler(int32_t id,uint64_t expire_time,void * user_data)71 static void timeout_handler(int32_t id, uint64_t expire_time, void *user_data)
72 {
73 	struct test_data *data = user_data;
74 	uint64_t now = z_nrf_rtc_timer_read();
75 	uint64_t diff = (now - expire_time);
76 
77 	zassert_true(diff <= data->delay,
78 		"Handler called in wrong time (%llu), set time: %llu, "
79 		"got time: %llu",
80 		now, data->target_time, expire_time);
81 
82 	if ((expire_time >= data->target_time) &&
83 	    (expire_time <= (data->target_time + data->window))) {
84 		data->err = 0;
85 	}
86 	timeout_handler_cnt++;
87 }
88 
test_timeout(int32_t chan,k_timeout_t t,bool ext_window)89 static void test_timeout(int32_t chan, k_timeout_t t, bool ext_window)
90 {
91 	int64_t ticks = z_nrf_rtc_timer_get_ticks(t);
92 	struct test_data test_data = {
93 		.target_time = ticks,
94 		.window = ext_window ? 100 : (Z_TICK_ABS(t.ticks) ? 0 : 32),
95 		.delay = ext_window ? 100 : 2,
96 		.err = -EINVAL
97 	};
98 
99 	z_nrf_rtc_timer_set(chan, (uint64_t)ticks, timeout_handler, &test_data);
100 
101 	/* wait additional arbitrary time. */
102 	k_busy_wait(1000);
103 	k_sleep(t);
104 
105 	zassert_equal(test_data.err, 0, "Unexpected err: %d", test_data.err);
106 }
107 
108 
ZTEST(nrf_rtc_timer,test_basic)109 ZTEST(nrf_rtc_timer, test_basic)
110 {
111 	int32_t chan = z_nrf_rtc_timer_chan_alloc();
112 
113 	zassert_true(chan >= 0, "Failed to allocate RTC channel (%d).", chan);
114 
115 	k_timeout_t t0 =
116 		Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() + K_MSEC(1).ticks));
117 
118 	test_timeout(chan, t0, false);
119 
120 	k_timeout_t t1 = K_MSEC(4);
121 
122 	test_timeout(chan, t1, false);
123 
124 
125 	k_timeout_t t2 = K_MSEC(100);
126 
127 	test_timeout(chan, t2, false);
128 
129 	/* value in the past should expire immediately (2 ticks from now)*/
130 	k_timeout_t t3 =
131 		Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() - K_MSEC(1).ticks));
132 
133 	test_timeout(chan, t3, true);
134 
135 	z_nrf_rtc_timer_chan_free(chan);
136 }
137 
ZTEST(nrf_rtc_timer,test_z_nrf_rtc_timer_compare_evt_address_get)138 ZTEST(nrf_rtc_timer, test_z_nrf_rtc_timer_compare_evt_address_get)
139 {
140 	uint32_t evt_addr;
141 
142 	evt_addr = z_nrf_rtc_timer_compare_evt_address_get(0);
143 	zassert_equal(evt_addr, (uint32_t)&NRF_RTC1->EVENTS_COMPARE[0],
144 			"Unexpected event addr:%x", evt_addr);
145 }
146 
ZTEST(nrf_rtc_timer,test_int_disable_enabled)147 ZTEST(nrf_rtc_timer, test_int_disable_enabled)
148 {
149 	uint64_t now = z_nrf_rtc_timer_read();
150 	uint64_t t = 1000;
151 	struct test_data data = {
152 		.target_time = now + t,
153 		.window = 1000,
154 		.delay = 2000,
155 		.err = -EINVAL
156 	};
157 	bool key;
158 	int32_t chan;
159 
160 	chan = z_nrf_rtc_timer_chan_alloc();
161 	zassert_true(chan >= 0, "Failed to allocate RTC channel.");
162 
163 	z_nrf_rtc_timer_set(chan, data.target_time, timeout_handler, &data);
164 
165 	zassert_equal(data.err, -EINVAL, "Unexpected err: %d", data.err);
166 	key = z_nrf_rtc_timer_compare_int_lock(chan);
167 
168 	k_sleep(Z_TIMEOUT_TICKS(t + 100));
169 	/* No event yet. */
170 	zassert_equal(data.err, -EINVAL, "Unexpected err: %d", data.err);
171 
172 	z_nrf_rtc_timer_compare_int_unlock(chan, key);
173 	k_busy_wait(100);
174 	zassert_equal(data.err, 0, "Unexpected err: %d", data.err);
175 
176 	z_nrf_rtc_timer_chan_free(chan);
177 }
178 
ZTEST(nrf_rtc_timer,test_get_ticks)179 ZTEST(nrf_rtc_timer, test_get_ticks)
180 {
181 	k_timeout_t t = K_MSEC(1);
182 	uint64_t exp_ticks = z_nrf_rtc_timer_read() + t.ticks;
183 	int ticks;
184 
185 	/* Relative 1ms from now timeout converted to RTC */
186 	ticks = z_nrf_rtc_timer_get_ticks(t);
187 	zassert_true((ticks >= exp_ticks) && (ticks <= (exp_ticks + 1)),
188 		     "Unexpected result %d (expected: %d)", ticks, (int)exp_ticks);
189 
190 	/* Absolute timeout 1ms in the past */
191 	t = Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() - K_MSEC(1).ticks));
192 	exp_ticks = z_nrf_rtc_timer_read() - K_MSEC(1).ticks;
193 	ticks = z_nrf_rtc_timer_get_ticks(t);
194 	zassert_true((ticks >= exp_ticks - 1) && (ticks <= exp_ticks),
195 		     "Unexpected result %d (expected: %d)", ticks, (int)exp_ticks);
196 
197 	/* Absolute timeout 10ms in the future */
198 	t = Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() + K_MSEC(10).ticks));
199 	exp_ticks = z_nrf_rtc_timer_read() + K_MSEC(10).ticks;
200 	ticks = z_nrf_rtc_timer_get_ticks(t);
201 	zassert_true((ticks >= exp_ticks - 1) && (ticks <= exp_ticks),
202 		     "Unexpected result %d (expected: %d)", ticks, (int)exp_ticks);
203 
204 	/* too far in the future */
205 	t = Z_TIMEOUT_TICKS(sys_clock_tick_get() + 0x01000001);
206 	ticks = z_nrf_rtc_timer_get_ticks(t);
207 	zassert_equal(ticks, -EINVAL, "Unexpected ticks: %d", ticks);
208 }
209 
210 
sched_handler(int32_t id,uint64_t expire_time,void * user_data)211 static void sched_handler(int32_t id, uint64_t expire_time, void *user_data)
212 {
213 	int64_t now = sys_clock_tick_get();
214 	int rtc_ticks_now =
215 	     z_nrf_rtc_timer_get_ticks(Z_TIMEOUT_TICKS(Z_TICK_ABS(now)));
216 	uint64_t *evt_uptime_us = user_data;
217 
218 	*evt_uptime_us =
219 	    k_ticks_to_us_floor64(now - (rtc_ticks_now - expire_time));
220 }
221 
ZTEST(nrf_rtc_timer,test_absolute_scheduling)222 ZTEST(nrf_rtc_timer, test_absolute_scheduling)
223 {
224 	k_timeout_t t;
225 	int64_t now_us = k_ticks_to_us_floor64(sys_clock_tick_get());
226 	uint64_t target_us = now_us + 5678;
227 	uint64_t evt_uptime_us;
228 	uint64_t rtc_ticks;
229 	int32_t chan;
230 
231 	chan = z_nrf_rtc_timer_chan_alloc();
232 	zassert_true(chan >= 0, "Failed to allocate RTC channel.");
233 
234 	/* schedule event in 5678us from now */
235 	t = Z_TIMEOUT_TICKS(Z_TICK_ABS(K_USEC(target_us).ticks));
236 	rtc_ticks = (uint64_t)z_nrf_rtc_timer_get_ticks(t);
237 
238 	z_nrf_rtc_timer_set(chan, rtc_ticks, sched_handler, &evt_uptime_us);
239 
240 	k_busy_wait(5678);
241 
242 	PRINT("RTC event scheduled at %dus for %dus,"
243 	      "event occured at %dus (uptime)\n",
244 		(uint32_t)now_us, (uint32_t)target_us, (uint32_t)evt_uptime_us);
245 
246 	/* schedule event now. */
247 	now_us = k_ticks_to_us_floor64(sys_clock_tick_get());
248 	t = Z_TIMEOUT_TICKS(Z_TICK_ABS(K_USEC(now_us).ticks));
249 	rtc_ticks = (uint64_t)z_nrf_rtc_timer_get_ticks(t);
250 
251 	z_nrf_rtc_timer_set(chan, rtc_ticks, sched_handler, &evt_uptime_us);
252 
253 	k_busy_wait(200);
254 
255 	PRINT("RTC event scheduled now, at %dus,"
256 	      "event occured at %dus (uptime)\n",
257 		(uint32_t)now_us, (uint32_t)evt_uptime_us);
258 
259 	z_nrf_rtc_timer_chan_free(chan);
260 }
261 
ZTEST(nrf_rtc_timer,test_alloc_free)262 ZTEST(nrf_rtc_timer, test_alloc_free)
263 {
264 	int32_t chan[CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT];
265 	int32_t inv_ch;
266 
267 	for (int i = 0; i < CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT; i++) {
268 		chan[i] = z_nrf_rtc_timer_chan_alloc();
269 		zassert_true(chan[i] >= 0, "Failed to allocate RTC channel.");
270 	}
271 
272 	inv_ch = z_nrf_rtc_timer_chan_alloc();
273 	zassert_equal(inv_ch, -ENOMEM, "Unexpected return value %d", inv_ch);
274 
275 	for (int i = 0; i < CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT; i++) {
276 		z_nrf_rtc_timer_chan_free(chan[i]);
277 	}
278 }
279 
ZTEST(nrf_rtc_timer,test_stress)280 ZTEST(nrf_rtc_timer, test_stress)
281 {
282 	int x = 0;
283 	uint32_t start = k_uptime_get_32();
284 	uint32_t test_time = 5000;
285 	int32_t chan = z_nrf_rtc_timer_chan_alloc();
286 
287 	zassert_true(chan >= 0, "Failed to allocate RTC channel.");
288 	start_zli_timer0();
289 
290 	do {
291 		k_timeout_t t = K_USEC(40 + x);
292 
293 		test_timeout(chan, t, true);
294 		/* On every iteration modify timeout to randomize it a bit
295 		 * against fixed zli interrupt pattern.
296 		 */
297 		x += 30;
298 		if (x > 200) {
299 			x = 0;
300 		}
301 	} while ((k_uptime_get_32() - start) < test_time);
302 
303 	stop_zli_timer0();
304 	z_nrf_rtc_timer_chan_free(chan);
305 }
306 
ZTEST(nrf_rtc_timer,test_resetting_cc)307 ZTEST(nrf_rtc_timer, test_resetting_cc)
308 {
309 	uint32_t start = k_uptime_get_32();
310 	uint32_t test_time = 1000;
311 	int32_t chan = z_nrf_rtc_timer_chan_alloc();
312 	int i = 0;
313 	int cnt = 0;
314 
315 	zassert_true(chan >= 0, "Failed to allocate RTC channel.");
316 
317 	timeout_handler_cnt = 0;
318 
319 	do {
320 		uint64_t now = z_nrf_rtc_timer_read();
321 		struct test_data test_data = {
322 			.target_time = now + 5,
323 			.window = 0,
324 			/* For lower bit width, target_time may be equal to maximum counter value.
325 			 * In such case, due to PPI connection clearing the timer, counter value
326 			 * read in the handler may be slightly off the set counter value.
327 			 */
328 			.delay = (CONFIG_NRF_RTC_COUNTER_BIT_WIDTH < 24) ? 2 : 0,
329 			.err = -EINVAL
330 		};
331 
332 		/* Set timer but expect that it will never expire because
333 		 * it will be later on reset.
334 		 */
335 		z_nrf_rtc_timer_set(chan, now + 2, timeout_handler, &test_data);
336 
337 		/* Arbitrary variable delay to reset CC before expiring first
338 		 * request but very close.
339 		 */
340 		k_busy_wait(i);
341 		i = (i + 1) % 20;
342 
343 		z_nrf_rtc_timer_set(chan, now + 5, timeout_handler, &test_data);
344 		k_busy_wait((5 + 1)*31);
345 		cnt++;
346 	} while ((k_uptime_get_32() - start) < test_time);
347 
348 	zassert_equal(timeout_handler_cnt, cnt,
349 		      "Unexpected timeout count %d (exp: %d)",
350 		      timeout_handler_cnt, cnt);
351 	z_nrf_rtc_timer_chan_free(chan);
352 }
353 
overflow_sched_handler(int32_t id,uint64_t expire_time,void * user_data)354 static void overflow_sched_handler(int32_t id, uint64_t expire_time,
355 				   void *user_data)
356 {
357 	uint64_t now = z_nrf_rtc_timer_read();
358 	uint64_t *evt_uptime = user_data;
359 
360 	*evt_uptime = now - expire_time;
361 }
362 
363 /* This test is to be executed as the last, due to interference in overflow
364  * counter, resulting in nRF RTC timer ticks and kernel ticks desynchronization.
365  */
ZTEST(nrf_rtc_timer,test_overflow)366 ZTEST(nrf_rtc_timer, test_overflow)
367 {
368 	/* For bit width lower than default 24, overflow injection is not possible. */
369 	if (CONFIG_NRF_RTC_COUNTER_BIT_WIDTH < 24) {
370 		ztest_test_skip();
371 	}
372 
373 	PRINT("RTC ticks before overflow injection: %u\r\n",
374 	      (uint32_t)z_nrf_rtc_timer_read());
375 
376 	inject_overflow();
377 
378 	PRINT("RTC ticks after overflow injection: %u\r\n",
379 	      (uint32_t)z_nrf_rtc_timer_read());
380 
381 	uint64_t now;
382 	uint64_t target_time;
383 	uint64_t evt_uptime;
384 	int32_t chan;
385 
386 	chan = z_nrf_rtc_timer_chan_alloc();
387 	zassert_true(chan >= 0, "Failed to allocate RTC channel.");
388 
389 	/* Schedule event in 5 ticks from now. */
390 	evt_uptime = UINT64_MAX;
391 	now = z_nrf_rtc_timer_read();
392 	target_time = now + 5;
393 	z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
394 			    &evt_uptime);
395 
396 	k_busy_wait(k_ticks_to_us_floor64(5 + 1));
397 
398 	PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
399 	      "event occurred at %llu ticks (uptime)\n",
400 	      now, target_time, evt_uptime);
401 	zassert_not_equal(UINT64_MAX, evt_uptime,
402 			  "Expired timer shall overwrite evt_uptime");
403 
404 	/* Schedule event now. */
405 	evt_uptime = UINT64_MAX;
406 	now = z_nrf_rtc_timer_read();
407 	target_time = now;
408 
409 	z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
410 			    &evt_uptime);
411 
412 	k_busy_wait(200);
413 
414 	zassert_not_equal(UINT64_MAX, evt_uptime,
415 			  "Expired timer shall overwrite evt_uptime");
416 	PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
417 	      "event occurred at %llu ticks (uptime)\n",
418 	      now, target_time, evt_uptime);
419 
420 	/* Schedule event far in the past. */
421 	evt_uptime = UINT64_MAX;
422 	now = z_nrf_rtc_timer_read();
423 	target_time = now - 2 * NRF_RTC_TIMER_MAX_SCHEDULE_SPAN;
424 
425 	z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
426 			    &evt_uptime);
427 
428 	k_busy_wait(200);
429 
430 	zassert_not_equal(UINT64_MAX, evt_uptime,
431 			  "Expired timer shall overwrite evt_uptime");
432 	PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
433 	      "event occurred at %llu ticks (uptime)\n",
434 	      now, target_time, evt_uptime);
435 
436 	z_nrf_rtc_timer_chan_free(chan);
437 }
438 
next_cycle_timeout_handler(int32_t chan,uint64_t expire_time,void * user_data)439 static void next_cycle_timeout_handler(int32_t chan,
440 				       uint64_t expire_time,
441 				       void *user_data)
442 {
443 	static uint32_t delay;
444 	uint32_t *timeouts_left = (uint32_t *)user_data;
445 
446 	if (--*timeouts_left) {
447 		k_busy_wait(delay);
448 		++delay;
449 
450 		z_nrf_rtc_timer_set(chan, z_nrf_rtc_timer_read() + 1,
451 			next_cycle_timeout_handler, user_data);
452 	}
453 }
454 
ZTEST(nrf_rtc_timer,test_next_cycle_timeouts)455 ZTEST(nrf_rtc_timer, test_next_cycle_timeouts)
456 {
457 	enum {
458 		MAX_TIMEOUTS = 60,
459 		/* Allow 5 cycles per each expected timeout. */
460 		CYCLES_TO_WAIT = 5 * MAX_TIMEOUTS,
461 	};
462 	volatile uint32_t timeouts_left = MAX_TIMEOUTS;
463 	int32_t chan;
464 	uint32_t start;
465 
466 	chan = z_nrf_rtc_timer_chan_alloc();
467 	zassert_true(chan > 0, "Failed to allocate RTC channel.");
468 
469 	/* First timeout is scheduled here, all further ones are scheduled
470 	 * from the timeout handler, always on the next cycle of the system
471 	 * timer but after a delay that increases 1 microsecond each time.
472 	 */
473 	z_nrf_rtc_timer_set(chan, z_nrf_rtc_timer_read() + 1,
474 		next_cycle_timeout_handler, (void *)&timeouts_left);
475 
476 	start = k_cycle_get_32();
477 	while (timeouts_left) {
478 		if ((k_cycle_get_32() - start) > CYCLES_TO_WAIT) {
479 			break;
480 		}
481 		Z_SPIN_DELAY(10);
482 	}
483 
484 	zassert_equal(0, timeouts_left,
485 		"Failed to get %u timeouts.", timeouts_left);
486 
487 	z_nrf_rtc_timer_chan_free(chan);
488 }
489 
tight_rescheduling_handler(int32_t chan,uint64_t expire_time,void * user_data)490 static void tight_rescheduling_handler(int32_t chan,
491 				       uint64_t expire_time,
492 				       void *user_data)
493 {
494 	if (user_data) {
495 		*(bool *)user_data = true;
496 	}
497 }
498 
ZTEST(nrf_rtc_timer,test_tight_rescheduling)499 ZTEST(nrf_rtc_timer, test_tight_rescheduling)
500 {
501 	int32_t chan;
502 	volatile bool expired;
503 	/* This test tries to schedule an alarm to CYCLE_DIFF cycles from
504 	 * the current moment and then, after a delay that is changed in
505 	 * each iteration, tries to reschedule this alarm to one cycle later.
506 	 * It does not matter if the first alarm actually occurs, the key
507 	 * thing is to always get the second one.
508 	 */
509 	enum {
510 		CYCLE_DIFF = 5,
511 		/* One RTC cycle is ~30.5 us. Check a range of delays from
512 		 * more than one cycle before the moment on which the first
513 		 * alarm is scheduled to a few microseconds after that alarm
514 		 * (when it is actually too late for rescheduling).
515 		 */
516 		DELAY_MIN = 30 * CYCLE_DIFF - 40,
517 		DELAY_MAX = 30 * CYCLE_DIFF + 10,
518 	};
519 
520 	chan = z_nrf_rtc_timer_chan_alloc();
521 	zassert_true(chan > 0, "Failed to allocate RTC channel.");
522 
523 	/* Repeat the whole test a couple of times to get also (presumably)
524 	 * various micro delays resulting from execution of the test routine
525 	 * itself asynchronously to the RTC.
526 	 */
527 	for (uint32_t i = 0; i < 20; ++i) {
528 		for (uint32_t delay = DELAY_MIN; delay <= DELAY_MAX; ++delay) {
529 			uint64_t start = z_nrf_rtc_timer_read();
530 
531 			z_nrf_rtc_timer_set(chan, start + CYCLE_DIFF,
532 				tight_rescheduling_handler, NULL);
533 
534 			k_busy_wait(delay);
535 
536 			expired = false;
537 			z_nrf_rtc_timer_set(chan, start + CYCLE_DIFF + 1,
538 				tight_rescheduling_handler, (void *)&expired);
539 
540 			while (!expired &&
541 				(z_nrf_rtc_timer_read() - start) <
542 					CYCLE_DIFF + 10) {
543 				Z_SPIN_DELAY(10);
544 			}
545 			zassert_true(expired,
546 				"Timeout expiration missed (d: %u us, i: %u)",
547 				delay, i);
548 		}
549 	}
550 
551 	z_nrf_rtc_timer_chan_free(chan);
552 }
553 
rtc_timer_setup(void)554 static void *rtc_timer_setup(void)
555 {
556 	init_zli_timer0();
557 
558 	return NULL;
559 }
560 
561 ZTEST_SUITE(nrf_rtc_timer, NULL, rtc_timer_setup, NULL, NULL, NULL);
562