1 /*
2  * Copyright (c) 2022, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/ztest.h>
7 #include <zephyr/drivers/timer/nrf_grtc_timer.h>
8 #include <zephyr/drivers/counter.h>
9 #include <zephyr/drivers/timer/system_timer.h>
10 #include <zephyr/random/random.h>
11 #include <zephyr/logging/log.h>
12 #include <zephyr/busy_sim.h>
13 #include <zephyr/debug/cpu_load.h>
14 #include <nrfx_grtc.h>
15 #include <hal/nrf_grtc.h>
16 LOG_MODULE_REGISTER(test, 1);
17 
18 #define GRTC_SLEW_TICKS 10
19 #define NUMBER_OF_TRIES 2000
20 #define CYC_PER_TICK                                                                               \
21 	((uint64_t)sys_clock_hw_cycles_per_sec() / (uint64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC)
22 #define TIMER_COUNT_TIME_MS 10
23 #define WAIT_FOR_TIMER_EVENT_TIME_MS TIMER_COUNT_TIME_MS + 5
24 
25 static volatile uint8_t compare_isr_call_counter;
26 
27 /* GRTC timer compare interrupt handler */
timer_compare_interrupt_handler(int32_t id,uint64_t expire_time,void * user_data)28 static void timer_compare_interrupt_handler(int32_t id, uint64_t expire_time, void *user_data)
29 {
30 	compare_isr_call_counter++;
31 	TC_PRINT("Compare value reached, user data: '%s'\n", (char *)user_data);
32 	TC_PRINT("Call counter: %d\n", compare_isr_call_counter);
33 }
34 
ZTEST(nrf_grtc_timer,test_get_ticks)35 ZTEST(nrf_grtc_timer, test_get_ticks)
36 {
37 	k_timeout_t t = K_MSEC(1);
38 
39 	uint64_t grtc_start_value = z_nrf_grtc_timer_startup_value_get();
40 	uint64_t exp_ticks = z_nrf_grtc_timer_read() + t.ticks * CYC_PER_TICK;
41 	int64_t ticks;
42 
43 	/* Relative 1ms from now timeout converted to GRTC */
44 	ticks = z_nrf_grtc_timer_get_ticks(t);
45 	zassert_true((ticks >= exp_ticks) && (ticks <= (exp_ticks + GRTC_SLEW_TICKS)),
46 		     "Unexpected result %" PRId64 " (expected: %" PRId64 ")", ticks, exp_ticks);
47 
48 	k_msleep(1);
49 
50 	for (uint32_t i = 0; i < NUMBER_OF_TRIES; i++) {
51 		/* Absolute timeout 1ms in the past */
52 		uint64_t curr_tick;
53 		uint64_t curr_grtc_tick;
54 		uint64_t curr_tick2;
55 
56 		do {
57 			/* GRTC and system tick must be read during single system tick. */
58 			curr_tick = sys_clock_tick_get();
59 			curr_grtc_tick = z_nrf_grtc_timer_read();
60 			curr_tick2 = sys_clock_tick_get();
61 		} while (curr_tick != curr_tick2);
62 
63 		curr_tick += (grtc_start_value / CYC_PER_TICK);
64 		t = Z_TIMEOUT_TICKS(Z_TICK_ABS(curr_tick - K_MSEC(1).ticks));
65 
66 		exp_ticks = curr_grtc_tick - K_MSEC(1).ticks * CYC_PER_TICK;
67 		ticks = z_nrf_grtc_timer_get_ticks(t);
68 
69 		zassert_true((ticks >= (exp_ticks - CYC_PER_TICK + 1)) &&
70 				     (ticks <= (exp_ticks + GRTC_SLEW_TICKS)),
71 			     "Unexpected result %" PRId64 " (expected: %" PRId64 ")", ticks,
72 			     exp_ticks);
73 
74 		/* Absolute timeout 10ms in the future */
75 		do {
76 			/* GRTC and system tick must be read during single system tick. */
77 			curr_tick = sys_clock_tick_get();
78 			curr_grtc_tick = z_nrf_grtc_timer_read();
79 			curr_tick2 = sys_clock_tick_get();
80 		} while (curr_tick != curr_tick2);
81 
82 		curr_tick += (grtc_start_value / CYC_PER_TICK);
83 		t = Z_TIMEOUT_TICKS(Z_TICK_ABS(curr_tick + K_MSEC(10).ticks));
84 		exp_ticks = curr_grtc_tick + K_MSEC(10).ticks * CYC_PER_TICK;
85 		ticks = z_nrf_grtc_timer_get_ticks(t);
86 		zassert_true((ticks >= (exp_ticks - CYC_PER_TICK + 1)) &&
87 				     (ticks <= (exp_ticks + GRTC_SLEW_TICKS)),
88 			     "Unexpected result %" PRId64 " (expected: %" PRId64 ")", ticks,
89 			     exp_ticks);
90 	}
91 }
92 
ZTEST(nrf_grtc_timer,test_timer_count_in_compare_mode)93 ZTEST(nrf_grtc_timer, test_timer_count_in_compare_mode)
94 {
95 	int err;
96 	uint64_t test_ticks = 0;
97 	uint64_t compare_value = 0;
98 	char user_data[] = "test_timer_count_in_compare_mode\n";
99 	int32_t channel = z_nrf_grtc_timer_chan_alloc();
100 
101 	TC_PRINT("Allocated GRTC channel %d\n", channel);
102 	if (channel < 0) {
103 		TC_PRINT("Failed to allocate GRTC channel, chan=%d\n", channel);
104 		ztest_test_fail();
105 	}
106 
107 	compare_isr_call_counter = 0;
108 	test_ticks = z_nrf_grtc_timer_get_ticks(K_MSEC(TIMER_COUNT_TIME_MS));
109 	err = z_nrf_grtc_timer_set(channel, test_ticks, timer_compare_interrupt_handler,
110 				   (void *)user_data);
111 
112 	zassert_equal(err, 0, "z_nrf_grtc_timer_set raised an error: %d", err);
113 
114 	z_nrf_grtc_timer_compare_read(channel, &compare_value);
115 	zassert_true(K_TIMEOUT_EQ(K_TICKS(compare_value), K_TICKS(test_ticks)),
116 		     "Compare register set failed");
117 	zassert_equal(err, 0, "Unexpected error raised when setting timer, err: %d", err);
118 
119 	k_sleep(K_MSEC(WAIT_FOR_TIMER_EVENT_TIME_MS));
120 
121 	TC_PRINT("Compare event generated ?: %d\n", z_nrf_grtc_timer_compare_evt_check(channel));
122 	TC_PRINT("Compare event register address: %X\n",
123 		 z_nrf_grtc_timer_compare_evt_address_get(channel));
124 
125 	zassert_equal(compare_isr_call_counter, 1, "Compare isr call counter: %d",
126 		      compare_isr_call_counter);
127 	z_nrf_grtc_timer_chan_free(channel);
128 }
129 
ZTEST(nrf_grtc_timer,test_timer_abort_in_compare_mode)130 ZTEST(nrf_grtc_timer, test_timer_abort_in_compare_mode)
131 {
132 	int err;
133 	uint64_t test_ticks = 0;
134 	uint64_t compare_value = 0;
135 	char user_data[] = "test_timer_abort_in_compare_mode\n";
136 	int32_t channel = z_nrf_grtc_timer_chan_alloc();
137 
138 	TC_PRINT("Allocated GRTC channel %d\n", channel);
139 	if (channel < 0) {
140 		TC_PRINT("Failed to allocate GRTC channel, chan=%d\n", channel);
141 		ztest_test_fail();
142 	}
143 
144 	compare_isr_call_counter = 0;
145 	test_ticks = z_nrf_grtc_timer_get_ticks(K_MSEC(TIMER_COUNT_TIME_MS));
146 	err = z_nrf_grtc_timer_set(channel, test_ticks, timer_compare_interrupt_handler,
147 				   (void *)user_data);
148 	zassert_equal(err, 0, "z_nrf_grtc_timer_set raised an error: %d", err);
149 
150 	z_nrf_grtc_timer_abort(channel);
151 
152 	z_nrf_grtc_timer_compare_read(channel, &compare_value);
153 	zassert_true(K_TIMEOUT_EQ(K_TICKS(compare_value), K_TICKS(test_ticks)),
154 		     "Compare register set failed");
155 
156 	zassert_equal(err, 0, "Unexpected error raised when setting timer, err: %d", err);
157 
158 	k_sleep(K_MSEC(WAIT_FOR_TIMER_EVENT_TIME_MS));
159 	zassert_equal(compare_isr_call_counter, 0, "Compare isr call counter: %d",
160 		      compare_isr_call_counter);
161 	z_nrf_grtc_timer_chan_free(channel);
162 }
163 
164 enum test_timer_state {
165 	TIMER_IDLE,
166 	TIMER_PREPARE,
167 	TIMER_ACTIVE
168 };
169 
170 enum test_ctx {
171 	TEST_HIGH_PRI,
172 	TEST_TIMER_CB,
173 	TEST_THREAD
174 };
175 
176 struct test_grtc_timer {
177 	struct k_timer timer;
178 	uint32_t ticks;
179 	uint32_t expire;
180 	uint32_t start_cnt;
181 	uint32_t expire_cnt;
182 	uint32_t abort_cnt;
183 	uint32_t exp_expire;
184 	int max_late;
185 	int min_late;
186 	int avg_late;
187 	uint32_t early_cnt;
188 	enum test_timer_state state;
189 };
190 
191 static atomic_t test_active_cnt;
192 static struct test_grtc_timer timers[8];
193 static uint32_t test_end;
194 static k_tid_t test_tid;
195 static volatile bool test_run;
196 static uint32_t ctx_cnt[3];
197 static const char *const ctx_name[] = { "HIGH PRIO ISR", "TIMER CALLBACK", "THREAD" };
198 
stress_test_action(int ctx,int id)199 static bool stress_test_action(int ctx, int id)
200 {
201 	struct test_grtc_timer *timer = &timers[id];
202 
203 	ctx_cnt[ctx]++;
204 	if (timer->state == TIMER_ACTIVE) {
205 		/* Aborting soon to expire timers from higher interrupt priority may lead
206 		 * to test failures.
207 		 */
208 		if (ctx == 0 && (k_timer_remaining_get(&timer->timer) < 5)) {
209 			return true;
210 		}
211 
212 		if (timer->abort_cnt < timer->expire_cnt / 2) {
213 			bool any_active;
214 
215 			timer->state = TIMER_PREPARE;
216 			k_timer_stop(&timer->timer);
217 			timer->abort_cnt++;
218 			any_active = atomic_dec(&test_active_cnt) > 1;
219 			timer->state = TIMER_IDLE;
220 
221 			return any_active;
222 		}
223 	} else if (timer->state == TIMER_IDLE) {
224 		int ticks = 10 + (sys_rand32_get() & 0x3F);
225 		k_timeout_t t = K_TICKS(ticks);
226 
227 		timer->state = TIMER_PREPARE;
228 		timer->exp_expire =  k_ticks_to_cyc_floor32(sys_clock_tick_get_32() + ticks);
229 		timer->ticks = ticks;
230 		k_timer_start(&timer->timer, t, K_NO_WAIT);
231 		atomic_inc(&test_active_cnt);
232 		timer->start_cnt++;
233 		timer->state = TIMER_ACTIVE;
234 	}
235 
236 	return true;
237 }
238 
stress_test_actions(int ctx)239 static void stress_test_actions(int ctx)
240 {
241 	uint32_t r = sys_rand32_get();
242 	int action_cnt = max(r & 0x3, 1);
243 	int tmr_id = (r >> 8) % ARRAY_SIZE(timers);
244 
245 	/* Occasionally wake thread context from which timer actions are also executed. */
246 	if ((((r >> 2) & 0x3) == 0) || test_active_cnt < 2) {
247 		LOG_DBG("ctx:%d thread wakeup", ctx);
248 		k_wakeup(test_tid);
249 	}
250 
251 	for (int i = 0; i < action_cnt; i++) {
252 		if (stress_test_action(ctx, tmr_id) == false) {
253 			stress_test_action(ctx, tmr_id);
254 		}
255 	}
256 }
257 
timer_cb(struct k_timer * timer)258 static void timer_cb(struct k_timer *timer)
259 {
260 	struct test_grtc_timer *test_timer = CONTAINER_OF(timer, struct test_grtc_timer, timer);
261 	uint32_t now = k_cycle_get_32();
262 	int diff = now - test_timer->exp_expire;
263 
264 	atomic_dec(&test_active_cnt);
265 	zassert_true(diff >= 0);
266 	test_timer->max_late = MAX(diff, test_timer->max_late);
267 	test_timer->min_late = MIN(diff, test_timer->min_late);
268 
269 	if (test_timer->expire_cnt == 0) {
270 		test_timer->avg_late = diff;
271 	} else {
272 		test_timer->avg_late = (test_timer->avg_late * test_timer->expire_cnt + diff) /
273 				(test_timer->expire_cnt + 1);
274 	}
275 
276 	test_timer->expire_cnt++;
277 	test_timer->state = TIMER_IDLE;
278 
279 	if (test_run) {
280 		stress_test_actions(TEST_TIMER_CB);
281 	}
282 }
283 
counter_set(const struct device * dev,struct counter_alarm_cfg * cfg)284 static void counter_set(const struct device *dev, struct counter_alarm_cfg *cfg)
285 {
286 	int err;
287 	uint32_t us = 150 + (sys_rand32_get() & 0x3F);
288 
289 	cfg->ticks = counter_us_to_ticks(dev, us);
290 	err = counter_set_channel_alarm(dev, 0, cfg);
291 	zassert_equal(err, 0);
292 }
293 
counter_cb(const struct device * dev,uint8_t chan_id,uint32_t ticks,void * user_data)294 static void counter_cb(const struct device *dev, uint8_t chan_id, uint32_t ticks, void *user_data)
295 {
296 	struct counter_alarm_cfg *config = user_data;
297 
298 	if (test_run) {
299 		stress_test_actions(TEST_HIGH_PRI);
300 		counter_set(dev, config);
301 	}
302 }
303 
report_progress(uint32_t start,uint32_t end)304 static void report_progress(uint32_t start, uint32_t end)
305 {
306 	static uint32_t next_report;
307 	static uint32_t step;
308 	static uint32_t progress;
309 
310 	if (next_report == 0) {
311 		step = (end - start) / 10;
312 		next_report = start + step;
313 	}
314 
315 	if (k_uptime_get_32() > next_report) {
316 		next_report += step;
317 		progress += 10;
318 		printk("%d%%\r", progress);
319 	}
320 }
321 
grtc_stress_test(bool busy_sim_en)322 static void grtc_stress_test(bool busy_sim_en)
323 {
324 	static struct counter_alarm_cfg alarm_cfg;
325 #if DT_NODE_EXISTS(DT_NODELABEL(test_timer)) && DT_NODE_HAS_STATUS(DT_NODELABEL(test_timer), okay)
326 	const struct device *const counter_dev = DEVICE_DT_GET(DT_NODELABEL(test_timer));
327 #else
328 	const struct device *const counter_dev = NULL;
329 #endif
330 	uint32_t test_ms = 5000;
331 	uint32_t test_start = k_uptime_get_32();
332 	uint32_t load;
333 
334 	test_end = k_cycle_get_32() + k_ms_to_cyc_floor32(test_ms);
335 	test_tid = k_current_get();
336 
337 	for (size_t i = 0; i < ARRAY_SIZE(timers); i++) {
338 		k_timer_init(&timers[i].timer, timer_cb, NULL);
339 	}
340 
341 	if (IS_ENABLED(CONFIG_CPU_LOAD)) {
342 		(void)cpu_load_get(true);
343 	}
344 
345 	if (counter_dev) {
346 		counter_start(counter_dev);
347 	}
348 
349 	alarm_cfg.callback = counter_cb;
350 	alarm_cfg.user_data = &alarm_cfg;
351 	test_run = true;
352 
353 	if (counter_dev) {
354 		counter_set(counter_dev, &alarm_cfg);
355 	}
356 
357 	if (busy_sim_en) {
358 #ifdef CONFIG_TEST_BUSY_SIM
359 		busy_sim_start(500, 200, 1000, 400, NULL);
360 #endif
361 	}
362 
363 	LOG_DBG("Starting test, will end at %d", test_end);
364 	while (k_cycle_get_32() < test_end) {
365 		report_progress(test_start, test_start + test_ms);
366 		stress_test_actions(TEST_THREAD);
367 		k_sleep(K_MSEC(test_ms));
368 	}
369 
370 	load = IS_ENABLED(CONFIG_CPU_LOAD) ? cpu_load_get(true) : 0;
371 
372 	test_run = false;
373 	k_msleep(50);
374 
375 	for (size_t i = 0; i < ARRAY_SIZE(timers); i++) {
376 		zassert_equal(timers[i].state, TIMER_IDLE, "Unexpected timer %d state:%d",
377 				i, timers[i].state);
378 		TC_PRINT("Timer%d (%p)\r\n\tstart_cnt:%d abort_cnt:%d expire_cnt:%d\n",
379 			i, &timers[i], timers[i].start_cnt, timers[i].abort_cnt,
380 			timers[i].expire_cnt);
381 		TC_PRINT("\tavarage late:%d ticks, max late:%d, min late:%d early:%d\n",
382 				timers[i].avg_late, timers[i].max_late, timers[i].min_late,
383 				timers[i].early_cnt);
384 	}
385 
386 	for (size_t i = 0; i < ARRAY_SIZE(ctx_cnt); i++) {
387 		TC_PRINT("Context: %s executed %d times\n", ctx_name[i], ctx_cnt[i]);
388 	}
389 	TC_PRINT("CPU load during test:%d.%d\n", load / 10, load % 10);
390 
391 	if (busy_sim_en) {
392 #ifdef CONFIG_TEST_BUSY_SIM
393 		busy_sim_stop();
394 #endif
395 	}
396 
397 	if (counter_dev) {
398 		counter_stop(counter_dev);
399 	}
400 
401 #ifdef CONFIG_COVERAGE
402 	/* Wait a few seconds before exit, giving the test the
403 	 * opportunity to dump some output before coverage data gets emitted
404 	 */
405 	k_sleep(K_MSEC(5000));
406 #endif
407 }
408 
ZTEST(nrf_grtc_timer,test_stress)409 ZTEST(nrf_grtc_timer, test_stress)
410 {
411 	grtc_stress_test(false);
412 }
413 
414 ZTEST_SUITE(nrf_grtc_timer, NULL, NULL, NULL, NULL, NULL);
415