1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 
9 #include <zephyr/app_memory/app_memdomain.h>
10 #ifdef CONFIG_USERSPACE
11 #include <zephyr/sys/libc-hooks.h>
12 #endif
13 #include <zephyr/logging/log_ctrl.h>
14 #include <zephyr/sys/reboot.h>
15 
16 #include <zephyr/llext/symbol.h>
17 
18 #include <zephyr/sys/barrier.h>
19 
20 #ifdef KERNEL
21 static struct k_thread ztest_thread;
22 #endif
23 static bool failed_expectation;
24 
25 #ifdef CONFIG_ZTEST_SHELL
26 #include <zephyr/shell/shell.h>
27 #endif
28 
29 #ifdef CONFIG_ZTEST_SHUFFLE
30 #include <stdlib.h>
31 #include <time.h>
32 #include <zephyr/random/random.h>
33 
34 #define NUM_ITER_PER_SUITE CONFIG_ZTEST_SHUFFLE_SUITE_REPEAT_COUNT
35 #define NUM_ITER_PER_TEST  CONFIG_ZTEST_SHUFFLE_TEST_REPEAT_COUNT
36 #else
37 #define NUM_ITER_PER_SUITE 1
38 #define NUM_ITER_PER_TEST  1
39 #endif
40 
41 #ifdef CONFIG_ZTEST_COVERAGE_RESET_BEFORE_TESTS
42 #include <coverage.h>
43 #endif
44 
45 /* ZTEST_DMEM and ZTEST_BMEM are used for the application shared memory test  */
46 
47 /**
48  * @brief The current status of the test binary
49  */
50 enum ztest_status {
51 	ZTEST_STATUS_OK,
52 	ZTEST_STATUS_HAS_FAILURE,
53 	ZTEST_STATUS_CRITICAL_ERROR
54 };
55 
56 /**
57  * @brief Tracks the current phase that ztest is operating in.
58  */
59 ZTEST_DMEM enum ztest_phase cur_phase = TEST_PHASE_FRAMEWORK;
60 
61 static ZTEST_BMEM enum ztest_status test_status = ZTEST_STATUS_OK;
62 
63 extern ZTEST_DMEM const struct ztest_arch_api ztest_api;
64 
65 static void __ztest_show_suite_summary(void);
66 
end_report(void)67 static void end_report(void)
68 {
69 	__ztest_show_suite_summary();
70 	if (test_status) {
71 		TC_END_REPORT(TC_FAIL);
72 	} else {
73 		TC_END_REPORT(TC_PASS);
74 	}
75 }
76 
cleanup_test(struct ztest_unit_test * test)77 static int cleanup_test(struct ztest_unit_test *test)
78 {
79 	int ret = TC_PASS;
80 	int mock_status;
81 
82 	mock_status = z_cleanup_mock();
83 
84 #ifdef KERNEL
85 	/* we need to remove the ztest_thread information from the timeout_q.
86 	 * Because we reuse the same k_thread structure this would
87 	 * causes some problems.
88 	 */
89 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
90 		k_thread_abort(&ztest_thread);
91 	}
92 #endif
93 
94 	if (!ret && mock_status == 1) {
95 		PRINT("Test %s failed: Unused mock parameter values\n", test->name);
96 		ret = TC_FAIL;
97 	} else if (!ret && mock_status == 2) {
98 		PRINT("Test %s failed: Unused mock return values\n", test->name);
99 		ret = TC_FAIL;
100 	} else {
101 		;
102 	}
103 
104 	return ret;
105 }
106 
107 #ifdef KERNEL
108 
109 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
110 #define MAX_NUM_CPUHOLD (CONFIG_MP_MAX_NUM_CPUS - 1)
111 #define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
112 
113 struct cpuhold_pool_item {
114 	struct k_thread  thread;
115 	bool             used;
116 };
117 
118 static struct cpuhold_pool_item cpuhold_pool_items[MAX_NUM_CPUHOLD + 1];
119 
120 K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD + 1, CPUHOLD_STACK_SZ);
121 
122 static struct k_sem cpuhold_sem;
123 
124 volatile int cpuhold_active;
125 volatile bool cpuhold_spawned;
126 
find_unused_thread(void)127 static int find_unused_thread(void)
128 {
129 	for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
130 		if (!cpuhold_pool_items[i].used) {
131 			return i;
132 		}
133 	}
134 
135 	return -1;
136 }
137 
mark_thread_unused(struct k_thread * thread)138 static void mark_thread_unused(struct k_thread *thread)
139 {
140 	for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
141 		if (&cpuhold_pool_items[i].thread == thread) {
142 			cpuhold_pool_items[i].used = false;
143 		}
144 	}
145 }
146 
wait_for_thread_to_switch_out(struct k_thread * thread)147 static inline void wait_for_thread_to_switch_out(struct k_thread *thread)
148 {
149 	unsigned int key = arch_irq_lock();
150 	volatile void **shp = (void *)&thread->switch_handle;
151 
152 	while (*shp == NULL) {
153 		arch_spin_relax();
154 	}
155 	/* Read barrier: don't allow any subsequent loads in the
156 	 * calling code to reorder before we saw switch_handle go
157 	 * non-null.
158 	 */
159 	barrier_dmem_fence_full();
160 
161 	arch_irq_unlock(key);
162 }
163 
164 /* "Holds" a CPU for use with the "1cpu" test cases.  Note that we
165  * can't use tools like the cpumask feature because we have tests that
166  * may need to control that configuration themselves.  We do this at
167  * the lowest level, but locking interrupts directly and spinning.
168  */
cpu_hold(void * arg1,void * arg2,void * arg3)169 static void cpu_hold(void *arg1, void *arg2, void *arg3)
170 {
171 	struct k_thread *thread = arg1;
172 	unsigned int idx = (unsigned int)(uintptr_t)arg2;
173 	char tname[CONFIG_THREAD_MAX_NAME_LEN];
174 
175 	ARG_UNUSED(arg3);
176 
177 	if (arch_proc_id() == 0) {
178 		int i;
179 
180 		i = find_unused_thread();
181 
182 		__ASSERT_NO_MSG(i != -1);
183 
184 		cpuhold_spawned = false;
185 
186 		cpuhold_pool_items[i].used = true;
187 		k_thread_create(&cpuhold_pool_items[i].thread,
188 				cpuhold_stacks[i], CPUHOLD_STACK_SZ,
189 				cpu_hold, k_current_get(),
190 				(void *)(uintptr_t)idx, NULL,
191 				K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
192 
193 		/*
194 		 * Busy-wait until we know the spawned thread is running to
195 		 * ensure it does not spawn on CPU0.
196 		 */
197 
198 		while (!cpuhold_spawned) {
199 			k_busy_wait(1000);
200 		}
201 
202 		return;
203 	}
204 
205 	if (thread != NULL) {
206 		cpuhold_spawned = true;
207 
208 		/* Busywait until a new thread is scheduled in on CPU0 */
209 
210 		wait_for_thread_to_switch_out(thread);
211 
212 		mark_thread_unused(thread);
213 	}
214 
215 	if (IS_ENABLED(CONFIG_THREAD_NAME)) {
216 		snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", idx);
217 		k_thread_name_set(k_current_get(), tname);
218 	}
219 
220 
221 	uint32_t dt, start_ms = k_uptime_get_32();
222 	unsigned int key = arch_irq_lock();
223 
224 	k_sem_give(&cpuhold_sem);
225 
226 #if (defined(CONFIG_ARM64) || defined(CONFIG_RISCV)) && defined(CONFIG_FPU_SHARING)
227 	/*
228 	 * We'll be spinning with IRQs disabled. The flush-your-FPU request
229 	 * IPI will never be serviced during that time. Therefore we flush
230 	 * the FPU preemptively here to prevent any other CPU waiting after
231 	 * this CPU forever and deadlock the system.
232 	 */
233 	k_float_disable(_current_cpu->arch.fpu_owner);
234 #endif
235 
236 	while (cpuhold_active) {
237 		k_busy_wait(1000);
238 	}
239 
240 	/* Holding the CPU via spinning is expensive, and abusing this
241 	 * for long-running test cases tends to overload the CI system
242 	 * (qemu runs separate CPUs in different threads, but the CI
243 	 * logic views it as one "job") and cause other test failures.
244 	 */
245 	dt = k_uptime_get_32() - start_ms;
246 	zassert_true(dt < CONFIG_ZTEST_CPU_HOLD_TIME_MS,
247 		     "1cpu test took too long (%d ms)", dt);
248 	arch_irq_unlock(key);
249 }
250 #endif /* CONFIG_SMP && (CONFIG_MP_MAX_NUM_CPUS > 1) */
251 
z_impl_z_test_1cpu_start(void)252 void z_impl_z_test_1cpu_start(void)
253 {
254 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
255 	unsigned int num_cpus = arch_num_cpus();
256 	int j;
257 
258 	cpuhold_active = 1;
259 
260 	k_sem_init(&cpuhold_sem, 0, 999);
261 
262 	/* Spawn N-1 threads to "hold" the other CPUs, waiting for
263 	 * each to signal us that it's locked and spinning.
264 	 */
265 	for (int i = 0; i < num_cpus - 1; i++) {
266 		j = find_unused_thread();
267 
268 		__ASSERT_NO_MSG(j != -1);
269 
270 		cpuhold_pool_items[j].used = true;
271 		k_thread_create(&cpuhold_pool_items[j].thread,
272 				cpuhold_stacks[j], CPUHOLD_STACK_SZ,
273 				cpu_hold, NULL, (void *)(uintptr_t)i, NULL,
274 				K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
275 		k_sem_take(&cpuhold_sem, K_FOREVER);
276 	}
277 #endif
278 }
279 
z_impl_z_test_1cpu_stop(void)280 void z_impl_z_test_1cpu_stop(void)
281 {
282 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
283 	cpuhold_active = 0;
284 
285 	for (int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
286 		if (cpuhold_pool_items[i].used) {
287 			k_thread_abort(&cpuhold_pool_items[i].thread);
288 			cpuhold_pool_items[i].used = false;
289 		}
290 	}
291 #endif
292 }
293 
294 #ifdef CONFIG_USERSPACE
z_vrfy_z_test_1cpu_start(void)295 void z_vrfy_z_test_1cpu_start(void) { z_impl_z_test_1cpu_start(); }
296 #include <zephyr/syscalls/z_test_1cpu_start_mrsh.c>
297 
z_vrfy_z_test_1cpu_stop(void)298 void z_vrfy_z_test_1cpu_stop(void) { z_impl_z_test_1cpu_stop(); }
299 #include <zephyr/syscalls/z_test_1cpu_stop_mrsh.c>
300 #endif /* CONFIG_USERSPACE */
301 #endif
302 
run_test_rules(bool is_before,struct ztest_unit_test * test,void * data)303 __maybe_unused static void run_test_rules(bool is_before, struct ztest_unit_test *test, void *data)
304 {
305 	for (struct ztest_test_rule *rule = _ztest_test_rule_list_start;
306 	     rule < _ztest_test_rule_list_end; ++rule) {
307 		if (is_before && rule->before_each) {
308 			rule->before_each(test, data);
309 		} else if (!is_before && rule->after_each) {
310 			rule->after_each(test, data);
311 		}
312 	}
313 }
314 
run_test_functions(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)315 static void run_test_functions(struct ztest_suite_node *suite, struct ztest_unit_test *test,
316 			       void *data)
317 {
318 	__ztest_set_test_phase(TEST_PHASE_TEST);
319 	test->test(data);
320 }
321 
322 COND_CODE_1(KERNEL, (ZTEST_BMEM), ()) static enum ztest_result test_result;
323 
get_final_test_result(const struct ztest_unit_test * test,int ret)324 static int get_final_test_result(const struct ztest_unit_test *test, int ret)
325 {
326 	enum ztest_expected_result expected_result = -1;
327 
328 	for (struct ztest_expected_result_entry *expectation =
329 		     _ztest_expected_result_entry_list_start;
330 	     expectation < _ztest_expected_result_entry_list_end; ++expectation) {
331 		if (strcmp(expectation->test_name, test->name) == 0 &&
332 		    strcmp(expectation->test_suite_name, test->test_suite_name) == 0) {
333 			expected_result = expectation->expected_result;
334 			break;
335 		}
336 	}
337 
338 	if (expected_result == ZTEST_EXPECTED_RESULT_FAIL) {
339 		/* Expected a failure:
340 		 * - If we got a failure, return TC_PASS
341 		 * - Otherwise force a failure
342 		 */
343 		return (ret == TC_FAIL) ? TC_PASS : TC_FAIL;
344 	}
345 	if (expected_result == ZTEST_EXPECTED_RESULT_SKIP) {
346 		/* Expected a skip:
347 		 * - If we got a skip, return TC_PASS
348 		 * - Otherwise force a failure
349 		 */
350 		return (ret == TC_SKIP) ? TC_PASS : TC_FAIL;
351 	}
352 	/* No expectation was made, no change is needed. */
353 	return ret;
354 }
355 
356 /**
357  * @brief Get a friendly name string for a given test phrase.
358  *
359  * @param phase an enum ztest_phase value describing the desired test phase
360  * @returns a string name for `phase`
361  */
get_friendly_phase_name(enum ztest_phase phase)362 static inline const char *get_friendly_phase_name(enum ztest_phase phase)
363 {
364 	switch (phase) {
365 	case TEST_PHASE_SETUP:
366 		return "setup";
367 	case TEST_PHASE_BEFORE:
368 		return "before";
369 	case TEST_PHASE_TEST:
370 		return "test";
371 	case TEST_PHASE_AFTER:
372 		return "after";
373 	case TEST_PHASE_TEARDOWN:
374 		return "teardown";
375 	case TEST_PHASE_FRAMEWORK:
376 		return "framework";
377 	default:
378 		return "(unknown)";
379 	}
380 }
381 
382 static bool current_test_failed_assumption;
ztest_skip_failed_assumption(void)383 void ztest_skip_failed_assumption(void)
384 {
385 	if (IS_ENABLED(CONFIG_ZTEST_FAIL_ON_ASSUME)) {
386 		current_test_failed_assumption = true;
387 	}
388 	ztest_test_skip();
389 }
390 
391 #ifndef KERNEL
392 
393 /* Static code analysis tool can raise a violation that the standard header
394  * <setjmp.h> shall not be used.
395  *
396  * setjmp is using in a test code, not in a runtime code, it is acceptable.
397  * It is a deliberate deviation.
398  */
399 #include <setjmp.h> /* parasoft-suppress MISRAC2012-RULE_21_4-a MISRAC2012-RULE_21_4-b*/
400 #include <signal.h>
401 #include <stdlib.h>
402 #include <string.h>
403 
404 #define FAIL_FAST 0
405 
406 static jmp_buf test_fail;
407 static jmp_buf test_pass;
408 static jmp_buf test_skip;
409 static jmp_buf stack_fail;
410 static jmp_buf test_suite_fail;
411 
ztest_test_fail(void)412 void ztest_test_fail(void)
413 {
414 	switch (cur_phase) {
415 	case TEST_PHASE_SETUP:
416 		PRINT(" at %s function\n", get_friendly_phase_name(cur_phase));
417 		longjmp(test_suite_fail, 1);
418 	case TEST_PHASE_BEFORE:
419 	case TEST_PHASE_TEST:
420 		PRINT(" at %s function\n", get_friendly_phase_name(cur_phase));
421 		longjmp(test_fail, 1);
422 	case TEST_PHASE_AFTER:
423 	case TEST_PHASE_TEARDOWN:
424 	case TEST_PHASE_FRAMEWORK:
425 		PRINT(" ERROR: cannot fail in test phase '%s()', bailing\n",
426 		      get_friendly_phase_name(cur_phase));
427 		longjmp(stack_fail, 1);
428 	}
429 }
430 EXPORT_SYMBOL(ztest_test_fail);
431 
ztest_test_pass(void)432 void ztest_test_pass(void)
433 {
434 	if (cur_phase == TEST_PHASE_TEST) {
435 		longjmp(test_pass, 1);
436 	}
437 	PRINT(" ERROR: cannot pass in test phase '%s()', bailing\n",
438 	      get_friendly_phase_name(cur_phase));
439 	longjmp(stack_fail, 1);
440 }
441 EXPORT_SYMBOL(ztest_test_pass);
442 
ztest_test_skip(void)443 void ztest_test_skip(void)
444 {
445 	switch (cur_phase) {
446 	case TEST_PHASE_SETUP:
447 	case TEST_PHASE_BEFORE:
448 	case TEST_PHASE_TEST:
449 		longjmp(test_skip, 1);
450 	default:
451 		PRINT(" ERROR: cannot skip in test phase '%s()', bailing\n",
452 		      get_friendly_phase_name(cur_phase));
453 		longjmp(stack_fail, 1);
454 	}
455 }
456 EXPORT_SYMBOL(ztest_test_skip);
457 
ztest_test_expect_fail(void)458 void ztest_test_expect_fail(void)
459 {
460 	failed_expectation = true;
461 
462 	switch (cur_phase) {
463 	case TEST_PHASE_SETUP:
464 		PRINT(" at %s function\n", get_friendly_phase_name(cur_phase));
465 		break;
466 	case TEST_PHASE_BEFORE:
467 	case TEST_PHASE_TEST:
468 		PRINT(" at %s function\n", get_friendly_phase_name(cur_phase));
469 		break;
470 	case TEST_PHASE_AFTER:
471 	case TEST_PHASE_TEARDOWN:
472 	case TEST_PHASE_FRAMEWORK:
473 		PRINT(" ERROR: cannot fail in test phase '%s()', bailing\n",
474 		      get_friendly_phase_name(cur_phase));
475 		longjmp(stack_fail, 1);
476 	}
477 }
478 
run_test(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)479 static int run_test(struct ztest_suite_node *suite, struct ztest_unit_test *test, void *data)
480 {
481 	int ret = TC_PASS;
482 
483 	TC_START(test->name);
484 	__ztest_set_test_phase(TEST_PHASE_BEFORE);
485 
486 	if (test_result == ZTEST_RESULT_SUITE_FAIL) {
487 		ret = TC_FAIL;
488 		goto out;
489 	}
490 
491 	if (setjmp(test_fail)) {
492 		ret = TC_FAIL;
493 		goto out;
494 	}
495 
496 	if (setjmp(test_pass)) {
497 		ret = TC_PASS;
498 		goto out;
499 	}
500 
501 	if (setjmp(test_skip)) {
502 		ret = TC_SKIP;
503 		goto out;
504 	}
505 
506 	run_test_rules(/*is_before=*/true, test, data);
507 	if (suite->before) {
508 		suite->before(data);
509 	}
510 	run_test_functions(suite, test, data);
511 out:
512 	if (failed_expectation) {
513 		failed_expectation = false;
514 		ret = TC_FAIL;
515 	}
516 
517 	__ztest_set_test_phase(TEST_PHASE_AFTER);
518 	if (test_result != ZTEST_RESULT_SUITE_FAIL) {
519 		if (suite->after != NULL) {
520 			suite->after(data);
521 		}
522 		run_test_rules(/*is_before=*/false, test, data);
523 	}
524 	__ztest_set_test_phase(TEST_PHASE_FRAMEWORK);
525 	ret |= cleanup_test(test);
526 
527 	ret = get_final_test_result(test, ret);
528 	Z_TC_END_RESULT(ret, test->name);
529 	if (ret == TC_SKIP && current_test_failed_assumption) {
530 		test_status = 1;
531 	}
532 
533 	return ret;
534 }
535 
536 #else /* KERNEL */
537 
538 /* Zephyr's probably going to cause all tests to fail if one test fails, so
539  * skip the rest of tests if one of them fails
540  */
541 #ifdef CONFIG_ZTEST_FAIL_FAST
542 #define FAIL_FAST 1
543 #else
544 #define FAIL_FAST 0
545 #endif
546 
547 K_THREAD_STACK_DEFINE(ztest_thread_stack, CONFIG_ZTEST_STACK_SIZE + CONFIG_TEST_EXTRA_STACK_SIZE);
548 
test_finalize(void)549 static void test_finalize(void)
550 {
551 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
552 		k_thread_abort(&ztest_thread);
553 		if (k_is_in_isr()) {
554 			return;
555 		}
556 
557 		k_thread_abort(k_current_get());
558 		CODE_UNREACHABLE;
559 	}
560 }
561 
ztest_test_fail(void)562 void ztest_test_fail(void)
563 {
564 	switch (cur_phase) {
565 	case TEST_PHASE_SETUP:
566 		__ztest_set_test_result(ZTEST_RESULT_SUITE_FAIL);
567 		break;
568 	case TEST_PHASE_BEFORE:
569 	case TEST_PHASE_TEST:
570 		__ztest_set_test_result(ZTEST_RESULT_FAIL);
571 		test_finalize();
572 		break;
573 	default:
574 		PRINT(" ERROR: cannot fail in test phase '%s()', bailing\n",
575 		      get_friendly_phase_name(cur_phase));
576 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
577 		break;
578 	}
579 }
580 EXPORT_SYMBOL(ztest_test_fail);
581 
ztest_test_pass(void)582 void ztest_test_pass(void)
583 {
584 	switch (cur_phase) {
585 	case TEST_PHASE_TEST:
586 		__ztest_set_test_result(ZTEST_RESULT_PASS);
587 		test_finalize();
588 		break;
589 	default:
590 		PRINT(" ERROR: cannot pass in test phase '%s()', bailing\n",
591 		      get_friendly_phase_name(cur_phase));
592 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
593 		if (cur_phase == TEST_PHASE_BEFORE) {
594 			test_finalize();
595 		}
596 	}
597 }
598 EXPORT_SYMBOL(ztest_test_pass);
599 
ztest_test_skip(void)600 void ztest_test_skip(void)
601 {
602 	switch (cur_phase) {
603 	case TEST_PHASE_SETUP:
604 		__ztest_set_test_result(ZTEST_RESULT_SUITE_SKIP);
605 		break;
606 	case TEST_PHASE_BEFORE:
607 	case TEST_PHASE_TEST:
608 		__ztest_set_test_result(ZTEST_RESULT_SKIP);
609 		test_finalize();
610 		break;
611 	default:
612 		PRINT(" ERROR: cannot skip in test phase '%s()', bailing\n",
613 		      get_friendly_phase_name(cur_phase));
614 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
615 		break;
616 	}
617 }
618 EXPORT_SYMBOL(ztest_test_skip);
619 
ztest_test_expect_fail(void)620 void ztest_test_expect_fail(void)
621 {
622 	failed_expectation = true;
623 }
624 
ztest_simple_1cpu_before(void * data)625 void ztest_simple_1cpu_before(void *data)
626 {
627 	ARG_UNUSED(data);
628 	z_test_1cpu_start();
629 }
630 
ztest_simple_1cpu_after(void * data)631 void ztest_simple_1cpu_after(void *data)
632 {
633 	ARG_UNUSED(data);
634 	z_test_1cpu_stop();
635 }
636 
test_cb(void * a,void * b,void * c)637 static void test_cb(void *a, void *b, void *c)
638 {
639 	struct ztest_suite_node *suite = a;
640 	struct ztest_unit_test *test = b;
641 	const bool config_user_mode = FIELD_GET(K_USER, test->thread_options) != 0;
642 
643 	if (!IS_ENABLED(CONFIG_USERSPACE) || !k_is_user_context()) {
644 		__ztest_set_test_result(ZTEST_RESULT_PENDING);
645 		run_test_rules(/*is_before=*/true, test, /*data=*/c);
646 		if (suite->before) {
647 			suite->before(/*data=*/c);
648 		}
649 		if (IS_ENABLED(CONFIG_USERSPACE) && config_user_mode) {
650 			k_thread_user_mode_enter(test_cb, a, b, c);
651 		}
652 	}
653 	run_test_functions(suite, test, c);
654 	__ztest_set_test_result(ZTEST_RESULT_PASS);
655 }
656 
run_test(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)657 static int run_test(struct ztest_suite_node *suite, struct ztest_unit_test *test, void *data)
658 {
659 	int ret = TC_PASS;
660 
661 #if CONFIG_ZTEST_TEST_DELAY_MS > 0
662 	k_busy_wait(CONFIG_ZTEST_TEST_DELAY_MS * USEC_PER_MSEC);
663 #endif
664 	TC_START(test->name);
665 
666 	__ztest_set_test_phase(TEST_PHASE_BEFORE);
667 
668 	/* If the suite's setup function marked us as skipped, don't bother
669 	 * running the tests.
670 	 */
671 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
672 		get_start_time_cyc();
673 		k_thread_create(&ztest_thread, ztest_thread_stack,
674 				K_THREAD_STACK_SIZEOF(ztest_thread_stack),
675 				test_cb, suite, test, data,
676 				CONFIG_ZTEST_THREAD_PRIORITY,
677 				K_INHERIT_PERMS, K_FOREVER);
678 
679 		k_thread_access_grant(&ztest_thread, suite, test, suite->stats);
680 		if (test->name != NULL) {
681 			k_thread_name_set(&ztest_thread, test->name);
682 		}
683 		/* Only start the thread if we're not skipping the suite */
684 		if (test_result != ZTEST_RESULT_SUITE_SKIP &&
685 		    test_result != ZTEST_RESULT_SUITE_FAIL) {
686 			k_thread_start(&ztest_thread);
687 			k_thread_join(&ztest_thread, K_FOREVER);
688 		}
689 	} else if (test_result != ZTEST_RESULT_SUITE_SKIP &&
690 		   test_result != ZTEST_RESULT_SUITE_FAIL) {
691 		__ztest_set_test_result(ZTEST_RESULT_PENDING);
692 		get_start_time_cyc();
693 		run_test_rules(/*is_before=*/true, test, data);
694 		if (suite->before) {
695 			suite->before(data);
696 		}
697 		run_test_functions(suite, test, data);
698 	}
699 
700 	__ztest_set_test_phase(TEST_PHASE_AFTER);
701 	if (suite->after != NULL) {
702 		suite->after(data);
703 	}
704 	run_test_rules(/*is_before=*/false, test, data);
705 
706 	get_test_duration_ms();
707 	if (tc_spend_time > test->stats->duration_worst_ms) {
708 		test->stats->duration_worst_ms = tc_spend_time;
709 	}
710 
711 	__ztest_set_test_phase(TEST_PHASE_FRAMEWORK);
712 
713 	/* Flush all logs in case deferred mode and default logging thread are used. */
714 	while (IS_ENABLED(CONFIG_TEST_LOGGING_FLUSH_AFTER_TEST) &&
715 	       IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && log_data_pending()) {
716 		k_msleep(100);
717 	}
718 
719 	if (test_result == ZTEST_RESULT_FAIL || test_result == ZTEST_RESULT_SUITE_FAIL ||
720 	    failed_expectation) {
721 		ret = TC_FAIL;
722 		failed_expectation = false;
723 	} else if (test_result == ZTEST_RESULT_SKIP || test_result == ZTEST_RESULT_SUITE_SKIP) {
724 		ret = TC_SKIP;
725 	}
726 
727 	if (test_result == ZTEST_RESULT_PASS || !FAIL_FAST) {
728 		ret |= cleanup_test(test);
729 	}
730 
731 	ret = get_final_test_result(test, ret);
732 	Z_TC_END_RESULT(ret, test->name);
733 	if (ret == TC_SKIP && current_test_failed_assumption) {
734 		test_status = 1;
735 	}
736 
737 	return ret;
738 }
739 
740 #endif /* !KERNEL */
741 
ztest_find_test_suite(const char * name)742 static struct ztest_suite_node *ztest_find_test_suite(const char *name)
743 {
744 	struct ztest_suite_node *node;
745 
746 	for (node = _ztest_suite_node_list_start; node < _ztest_suite_node_list_end; ++node) {
747 		if (strcmp(name, node->name) == 0) {
748 			return node;
749 		}
750 	}
751 
752 	return NULL;
753 }
754 
z_ztest_get_next_test(const char * suite,struct ztest_unit_test * prev)755 struct ztest_unit_test *z_ztest_get_next_test(const char *suite, struct ztest_unit_test *prev)
756 {
757 	struct ztest_unit_test *test = (prev == NULL) ? _ztest_unit_test_list_start : prev + 1;
758 
759 	for (; test < _ztest_unit_test_list_end; ++test) {
760 		if (strcmp(suite, test->test_suite_name) == 0) {
761 			return test;
762 		}
763 	}
764 	return NULL;
765 }
766 
767 #if CONFIG_ZTEST_SHUFFLE
z_ztest_shuffle(bool shuffle,void * dest[],intptr_t start,size_t num_items,size_t element_size)768 static void z_ztest_shuffle(bool shuffle, void *dest[], intptr_t start,
769 			    size_t num_items, size_t element_size)
770 {
771 	/* Initialize dest array */
772 	for (size_t i = 0; i < num_items; ++i) {
773 		dest[i] = (void *)(start + (i * element_size));
774 	}
775 	void *tmp;
776 
777 	/* Shuffle dest array */
778 	if (shuffle) {
779 		for (size_t i = num_items - 1; i > 0; i--) {
780 			int j = sys_rand32_get() % (i + 1);
781 
782 			if (i != j) {
783 				tmp = dest[j];
784 				dest[j] = dest[i];
785 				dest[i] = tmp;
786 			}
787 		}
788 	}
789 }
790 #endif
791 
z_ztest_run_test_suite_ptr(struct ztest_suite_node * suite,bool shuffle,int suite_iter,int case_iter)792 static int z_ztest_run_test_suite_ptr(struct ztest_suite_node *suite,
793 				      bool shuffle, int suite_iter, int case_iter)
794 {
795 	struct ztest_unit_test *test = NULL;
796 	void *data = NULL;
797 	int fail = 0;
798 	int tc_result = TC_PASS;
799 
800 	if (FAIL_FAST && test_status != ZTEST_STATUS_OK) {
801 		return test_status;
802 	}
803 
804 	if (suite == NULL) {
805 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
806 		return -1;
807 	}
808 
809 #ifndef KERNEL
810 	if (setjmp(stack_fail)) {
811 		PRINT("TESTSUITE crashed.\n");
812 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
813 		end_report();
814 		exit(1);
815 	}
816 #else
817 	k_object_access_all_grant(&ztest_thread);
818 #endif
819 
820 	TC_SUITE_START(suite->name);
821 	current_test_failed_assumption = false;
822 	__ztest_set_test_result(ZTEST_RESULT_PENDING);
823 	__ztest_set_test_phase(TEST_PHASE_SETUP);
824 #ifndef KERNEL
825 	if (setjmp(test_suite_fail)) {
826 		__ztest_set_test_result(ZTEST_RESULT_SUITE_FAIL);
827 	}
828 #endif
829 	if (test_result != ZTEST_RESULT_SUITE_FAIL && suite->setup != NULL) {
830 		data = suite->setup();
831 	}
832 
833 	for (int i = 0; i < case_iter; i++) {
834 #ifdef CONFIG_ZTEST_SHUFFLE
835 		struct ztest_unit_test *tests_to_run[ZTEST_TEST_COUNT];
836 
837 		memset(tests_to_run, 0, ZTEST_TEST_COUNT * sizeof(struct ztest_unit_test *));
838 		z_ztest_shuffle(shuffle, (void **)tests_to_run,
839 				(intptr_t)_ztest_unit_test_list_start,
840 				ZTEST_TEST_COUNT, sizeof(struct ztest_unit_test));
841 		for (size_t j = 0; j < ZTEST_TEST_COUNT; ++j) {
842 			test = tests_to_run[j];
843 			/* Make sure that the test belongs to this suite */
844 			if (strcmp(suite->name, test->test_suite_name) != 0) {
845 				continue;
846 			}
847 			if (ztest_api.should_test_run(suite->name, test->name)) {
848 				test->stats->run_count++;
849 				tc_result = run_test(suite, test, data);
850 				if (tc_result == TC_PASS) {
851 					test->stats->pass_count++;
852 				} else if (tc_result == TC_SKIP) {
853 					test->stats->skip_count++;
854 				} else if (tc_result == TC_FAIL) {
855 					test->stats->fail_count++;
856 				}
857 				if (tc_result == TC_FAIL) {
858 					fail++;
859 				}
860 			}
861 
862 			if ((fail && FAIL_FAST) || test_status == ZTEST_STATUS_CRITICAL_ERROR) {
863 				break;
864 			}
865 		}
866 #else
867 		while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
868 			if (ztest_api.should_test_run(suite->name, test->name)) {
869 				test->stats->run_count++;
870 				tc_result = run_test(suite, test, data);
871 				if (tc_result == TC_PASS) {
872 					test->stats->pass_count++;
873 				} else if (tc_result == TC_SKIP) {
874 					test->stats->skip_count++;
875 				} else if (tc_result == TC_FAIL) {
876 					test->stats->fail_count++;
877 				}
878 
879 				if (tc_result == TC_FAIL) {
880 					fail++;
881 				}
882 			}
883 
884 			if ((fail && FAIL_FAST) || test_status == ZTEST_STATUS_CRITICAL_ERROR) {
885 				break;
886 			}
887 		}
888 #endif
889 		if (test_status == ZTEST_STATUS_OK && fail != 0) {
890 			test_status = ZTEST_STATUS_HAS_FAILURE;
891 		}
892 	}
893 
894 	TC_SUITE_END(suite->name, (fail > 0 ? TC_FAIL : TC_PASS));
895 	__ztest_set_test_phase(TEST_PHASE_TEARDOWN);
896 	if (suite->teardown != NULL) {
897 		suite->teardown(data);
898 	}
899 
900 	return fail;
901 }
902 
z_ztest_run_test_suite(const char * name,bool shuffle,int suite_iter,int case_iter)903 int z_ztest_run_test_suite(const char *name, bool shuffle, int suite_iter, int case_iter)
904 {
905 	return z_ztest_run_test_suite_ptr(ztest_find_test_suite(name),
906 					shuffle, suite_iter, case_iter);
907 }
908 
909 #ifdef CONFIG_USERSPACE
910 K_APPMEM_PARTITION_DEFINE(ztest_mem_partition);
911 #endif
912 
__ztest_init_unit_test_result_for_suite(struct ztest_suite_node * suite)913 static void __ztest_init_unit_test_result_for_suite(struct ztest_suite_node *suite)
914 {
915 	struct ztest_unit_test *test = NULL;
916 
917 	while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
918 		test->stats->run_count = 0;
919 		test->stats->skip_count = 0;
920 		test->stats->fail_count = 0;
921 		test->stats->pass_count = 0;
922 		test->stats->duration_worst_ms = 0;
923 	}
924 }
925 
flush_log(void)926 static void flush_log(void)
927 {
928 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
929 		while (log_data_pending()) {
930 			k_sleep(K_MSEC(10));
931 		}
932 		k_sleep(K_MSEC(10));
933 	} else {
934 		while (LOG_PROCESS()) {
935 		}
936 	}
937 }
938 
939 /* Show one line summary for a test suite.
940  */
__ztest_show_suite_summary_oneline(struct ztest_suite_node * suite)941 static void __ztest_show_suite_summary_oneline(struct ztest_suite_node *suite)
942 {
943 	int distinct_pass = 0, distinct_fail = 0, distinct_skip = 0, distinct_total = 0;
944 	int effective_total = 0;
945 	int expanded_pass = 0, expanded_passrate = 0;
946 	int passrate_major = 0, passrate_minor = 0, passrate_tail = 0;
947 	int suite_result = TC_PASS;
948 
949 	struct ztest_unit_test *test = NULL;
950 	unsigned int suite_duration_worst_ms = 0;
951 
952 	/** summary of disctinct run  */
953 	while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
954 		distinct_total++;
955 		suite_duration_worst_ms += test->stats->duration_worst_ms;
956 		if (test->stats->skip_count == test->stats->run_count) {
957 			distinct_skip++;
958 		} else if (test->stats->pass_count == test->stats->run_count) {
959 			distinct_pass++;
960 		} else {
961 			distinct_fail++;
962 		}
963 	}
964 
965 	if (distinct_skip == distinct_total) {
966 		suite_result = TC_SKIP;
967 		passrate_major = passrate_minor = 0;
968 	} else {
969 		suite_result = (distinct_fail > 0) ? TC_FAIL : TC_PASS;
970 		effective_total = distinct_total - distinct_skip;
971 		expanded_pass = distinct_pass * 100000;
972 		expanded_passrate = expanded_pass / effective_total;
973 		passrate_major = expanded_passrate / 1000;
974 		passrate_minor = (expanded_passrate - passrate_major * 1000) / 10;
975 		passrate_tail = expanded_passrate - passrate_major * 1000 - passrate_minor * 10;
976 		if (passrate_tail >= 5) { /* rounding */
977 			passrate_minor++;
978 		}
979 	}
980 
981 	TC_SUMMARY_PRINT("SUITE %s - %3d.%02d%% [%s]: pass = %d, fail = %d, "
982 				"skip = %d, total = %d duration = %u.%03u seconds\n",
983 				TC_RESULT_TO_STR(suite_result),
984 				passrate_major, passrate_minor,
985 				suite->name, distinct_pass, distinct_fail,
986 				distinct_skip, distinct_total,
987 				suite_duration_worst_ms / 1000, suite_duration_worst_ms % 1000);
988 	flush_log();
989 }
990 
__ztest_show_suite_summary_verbose(struct ztest_suite_node * suite)991 static void __ztest_show_suite_summary_verbose(struct ztest_suite_node *suite)
992 {
993 	struct ztest_unit_test *test = NULL;
994 	int tc_result = TC_PASS;
995 	int flush_frequency = 0;
996 
997 	if (IS_ENABLED(CONFIG_ZTEST_VERBOSE_SUMMARY) == 0) {
998 		return;
999 	}
1000 
1001 	while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
1002 		if (test->stats->skip_count == test->stats->run_count) {
1003 			tc_result = TC_SKIP;
1004 		} else if (test->stats->pass_count == test->stats->run_count) {
1005 			tc_result = TC_PASS;
1006 		} else if (test->stats->pass_count == 0) {
1007 			tc_result = TC_FAIL;
1008 		} else {
1009 			tc_result = TC_FLAKY;
1010 		}
1011 
1012 		if (tc_result == TC_FLAKY) {
1013 			TC_SUMMARY_PRINT(" - %s - [%s.%s] - (Failed %d of %d attempts)"
1014 					 " - duration = %u.%03u seconds\n",
1015 					TC_RESULT_TO_STR(tc_result),
1016 					test->test_suite_name, test->name,
1017 					test->stats->run_count - test->stats->pass_count,
1018 					test->stats->run_count,
1019 					test->stats->duration_worst_ms / 1000,
1020 					test->stats->duration_worst_ms % 1000);
1021 		} else {
1022 			TC_SUMMARY_PRINT(" - %s - [%s.%s] duration = %u.%03u seconds\n",
1023 					TC_RESULT_TO_STR(tc_result),
1024 					test->test_suite_name, test->name,
1025 					test->stats->duration_worst_ms / 1000,
1026 					test->stats->duration_worst_ms % 1000);
1027 		}
1028 
1029 		if (flush_frequency % 3 == 0) {
1030 			/** Reduce the flush frequencey a bit to speed up the output */
1031 			flush_log();
1032 		}
1033 		flush_frequency++;
1034 	}
1035 	TC_SUMMARY_PRINT("\n");
1036 	flush_log();
1037 }
1038 
__ztest_show_suite_summary(void)1039 static void __ztest_show_suite_summary(void)
1040 {
1041 	if (IS_ENABLED(CONFIG_ZTEST_SUMMARY) == 0) {
1042 		return;
1043 	}
1044 	/* Flush the log a lot to ensure that no summary content
1045 	 * is dropped if it goes through the logging subsystem.
1046 	 */
1047 	flush_log();
1048 	TC_SUMMARY_PRINT("\n------ TESTSUITE SUMMARY START ------\n\n");
1049 	flush_log();
1050 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1051 	     ptr < _ztest_suite_node_list_end; ++ptr) {
1052 
1053 		__ztest_show_suite_summary_oneline(ptr);
1054 		__ztest_show_suite_summary_verbose(ptr);
1055 	}
1056 	TC_SUMMARY_PRINT("------ TESTSUITE SUMMARY END ------\n\n");
1057 	flush_log();
1058 }
1059 
__ztest_run_test_suite(struct ztest_suite_node * ptr,const void * state,bool shuffle,int suite_iter,int case_iter)1060 static int __ztest_run_test_suite(struct ztest_suite_node *ptr,
1061 			const void *state, bool shuffle, int suite_iter, int case_iter)
1062 {
1063 	struct ztest_suite_stats *stats = ptr->stats;
1064 	int count = 0;
1065 
1066 	for (int i = 0; i < suite_iter; i++) {
1067 		if (ztest_api.should_suite_run(state, ptr)) {
1068 			int fail = z_ztest_run_test_suite_ptr(ptr, shuffle, suite_iter, case_iter);
1069 
1070 			count++;
1071 			stats->run_count++;
1072 			stats->fail_count += (fail != 0) ? 1 : 0;
1073 		} else {
1074 			stats->skip_count++;
1075 		}
1076 	}
1077 
1078 	return count;
1079 }
1080 
z_impl_ztest_run_test_suites(const void * state,bool shuffle,int suite_iter,int case_iter)1081 int z_impl_ztest_run_test_suites(const void *state, bool shuffle, int suite_iter, int case_iter)
1082 {
1083 	int count = 0;
1084 
1085 	if (test_status == ZTEST_STATUS_CRITICAL_ERROR) {
1086 		return count;
1087 	}
1088 
1089 #ifdef CONFIG_ZTEST_COVERAGE_RESET_BEFORE_TESTS
1090 	gcov_reset_all_counts();
1091 #endif
1092 
1093 #ifdef CONFIG_ZTEST_SHUFFLE
1094 	struct ztest_suite_node *suites_to_run[ZTEST_SUITE_COUNT];
1095 
1096 	memset(suites_to_run, 0, ZTEST_SUITE_COUNT * sizeof(struct ztest_suite_node *));
1097 	z_ztest_shuffle(shuffle, (void **)suites_to_run, (intptr_t)_ztest_suite_node_list_start,
1098 			ZTEST_SUITE_COUNT, sizeof(struct ztest_suite_node));
1099 	for (size_t i = 0; i < ZTEST_SUITE_COUNT; ++i) {
1100 		__ztest_init_unit_test_result_for_suite(suites_to_run[i]);
1101 	}
1102 	for (size_t i = 0; i < ZTEST_SUITE_COUNT; ++i) {
1103 		count += __ztest_run_test_suite(suites_to_run[i],
1104 				state, shuffle, suite_iter, case_iter);
1105 		/* Stop running tests if we have a critical error or if we have a failure and
1106 		 * FAIL_FAST was set
1107 		 */
1108 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1109 				(test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1110 			break;
1111 		}
1112 	}
1113 #else
1114 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1115 			ptr < _ztest_suite_node_list_end; ++ptr) {
1116 		__ztest_init_unit_test_result_for_suite(ptr);
1117 		count += __ztest_run_test_suite(ptr, state, shuffle, suite_iter, case_iter);
1118 		/* Stop running tests if we have a critical error or if we have a failure and
1119 		 * FAIL_FAST was set
1120 		 */
1121 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1122 				(test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1123 			break;
1124 		}
1125 	}
1126 #endif
1127 
1128 	return count;
1129 }
1130 
z_impl___ztest_set_test_result(enum ztest_result new_result)1131 void z_impl___ztest_set_test_result(enum ztest_result new_result)
1132 {
1133 	test_result = new_result;
1134 }
1135 
z_impl___ztest_set_test_phase(enum ztest_phase new_phase)1136 void z_impl___ztest_set_test_phase(enum ztest_phase new_phase)
1137 {
1138 	cur_phase = new_phase;
1139 }
1140 
1141 #ifdef CONFIG_USERSPACE
z_vrfy___ztest_set_test_result(enum ztest_result new_result)1142 void z_vrfy___ztest_set_test_result(enum ztest_result new_result)
1143 {
1144 	z_impl___ztest_set_test_result(new_result);
1145 }
1146 #include <zephyr/syscalls/__ztest_set_test_result_mrsh.c>
1147 
z_vrfy___ztest_set_test_phase(enum ztest_phase new_phase)1148 void z_vrfy___ztest_set_test_phase(enum ztest_phase new_phase)
1149 {
1150 	z_impl___ztest_set_test_phase(new_phase);
1151 }
1152 #include <zephyr/syscalls/__ztest_set_test_phase_mrsh.c>
1153 #endif /* CONFIG_USERSPACE */
1154 
ztest_verify_all_test_suites_ran(void)1155 void ztest_verify_all_test_suites_ran(void)
1156 {
1157 	bool all_tests_run = true;
1158 	struct ztest_suite_node *suite;
1159 	struct ztest_unit_test *test;
1160 
1161 	if (IS_ENABLED(CONFIG_ZTEST_VERIFY_RUN_ALL)) {
1162 		for (suite = _ztest_suite_node_list_start; suite < _ztest_suite_node_list_end;
1163 		     ++suite) {
1164 			if (suite->stats->run_count < 1) {
1165 				PRINT("ERROR: Test suite '%s' did not run.\n", suite->name);
1166 				all_tests_run = false;
1167 			}
1168 		}
1169 
1170 		for (test = _ztest_unit_test_list_start; test < _ztest_unit_test_list_end; ++test) {
1171 			suite = ztest_find_test_suite(test->test_suite_name);
1172 			if (suite == NULL) {
1173 				PRINT("ERROR: Test '%s' assigned to test suite '%s' which doesn't "
1174 				      "exist\n",
1175 				      test->name, test->test_suite_name);
1176 				all_tests_run = false;
1177 			}
1178 		}
1179 
1180 		if (!all_tests_run) {
1181 			test_status = ZTEST_STATUS_HAS_FAILURE;
1182 		}
1183 	}
1184 
1185 	for (test = _ztest_unit_test_list_start; test < _ztest_unit_test_list_end; ++test) {
1186 		if (test->stats->fail_count + test->stats->pass_count + test->stats->skip_count !=
1187 		    test->stats->run_count) {
1188 			PRINT("Bad stats for %s.%s\n", test->test_suite_name, test->name);
1189 			test_status = 1;
1190 		}
1191 	}
1192 }
1193 
ztest_run_all(const void * state,bool shuffle,int suite_iter,int case_iter)1194 void ztest_run_all(const void *state, bool shuffle, int suite_iter, int case_iter)
1195 {
1196 	ztest_api.run_all(state, shuffle, suite_iter, case_iter);
1197 }
1198 
test_main(void)1199 void __weak test_main(void)
1200 {
1201 #if CONFIG_ZTEST_SHUFFLE
1202 	ztest_run_all(NULL, true, NUM_ITER_PER_SUITE, NUM_ITER_PER_TEST);
1203 #else
1204 	ztest_run_all(NULL, false, NUM_ITER_PER_SUITE, NUM_ITER_PER_TEST);
1205 #endif
1206 	ztest_verify_all_test_suites_ran();
1207 }
1208 
1209 #ifndef KERNEL
main(void)1210 int main(void)
1211 {
1212 	z_init_mock();
1213 	test_main();
1214 	end_report();
1215 #ifdef CONFIG_ZTEST_NO_YIELD
1216 	/*
1217 	 * Rather than yielding to idle thread, keep the part awake so debugger can
1218 	 * still access it, since some SOCs cannot be debugged in low power states.
1219 	 */
1220 	uint32_t key = irq_lock();
1221 
1222 	while (1) {
1223 		; /* Spin */
1224 	}
1225 	irq_unlock(key);
1226 #endif
1227 	return test_status;
1228 }
1229 #else
1230 
1231 /* Shell */
1232 
1233 #ifdef CONFIG_ZTEST_SHELL
cmd_list_suites(const struct shell * sh,size_t argc,char ** argv)1234 static int cmd_list_suites(const struct shell *sh, size_t argc, char **argv)
1235 {
1236 	struct ztest_suite_node *suite;
1237 
1238 	for (suite = _ztest_suite_node_list_start; suite < _ztest_suite_node_list_end; ++suite) {
1239 		shell_print(sh, "%s", suite->name);
1240 	}
1241 	return 0;
1242 }
1243 
cmd_list_cases(const struct shell * sh,size_t argc,char ** argv)1244 static int cmd_list_cases(const struct shell *sh, size_t argc, char **argv)
1245 {
1246 	struct ztest_suite_node *ptr;
1247 	struct ztest_unit_test *test = NULL;
1248 	int test_count = 0;
1249 
1250 	for (ptr = _ztest_suite_node_list_start; ptr < _ztest_suite_node_list_end; ++ptr) {
1251 		test = NULL;
1252 		while ((test = z_ztest_get_next_test(ptr->name, test)) != NULL) {
1253 			shell_print(sh, "%s::%s", test->test_suite_name, test->name);
1254 			test_count++;
1255 		}
1256 	}
1257 	return 0;
1258 }
1259 extern void ztest_set_test_args(char *argv);
1260 extern void ztest_reset_test_args(void);
1261 
cmd_runall(const struct shell * sh,size_t argc,char ** argv)1262 static int cmd_runall(const struct shell *sh, size_t argc, char **argv)
1263 {
1264 	ztest_reset_test_args();
1265 	ztest_run_all(NULL, false, 1, 1);
1266 	end_report();
1267 	return 0;
1268 }
1269 
1270 #ifdef CONFIG_ZTEST_SHUFFLE
cmd_shuffle(const struct shell * sh,size_t argc,char ** argv)1271 static int cmd_shuffle(const struct shell *sh, size_t argc, char **argv)
1272 {
1273 
1274 	struct getopt_state *state;
1275 	int opt;
1276 	static struct option long_options[] = {{"suite_iter", required_argument, 0, 's'},
1277 		{"case_iter", required_argument, 0, 'c'},
1278 		{0, 0, 0, 0}};
1279 	int opt_index = 0;
1280 	int val;
1281 	int opt_num = 0;
1282 
1283 	int suite_iter = 1;
1284 	int case_iter = 1;
1285 
1286 	while ((opt = getopt_long(argc, argv, "s:c:", long_options, &opt_index)) != -1) {
1287 		state = getopt_state_get();
1288 		switch (opt) {
1289 		case 's':
1290 			val = atoi(state->optarg);
1291 			if (val < 1) {
1292 				shell_fprintf(sh, SHELL_ERROR,
1293 					"Invalid number of suite interations\n");
1294 				return -ENOEXEC;
1295 			}
1296 			suite_iter = val;
1297 			opt_num++;
1298 			break;
1299 		case 'c':
1300 			val = atoi(state->optarg);
1301 			if (val < 1) {
1302 				shell_fprintf(sh, SHELL_ERROR,
1303 					"Invalid number of case interations\n");
1304 				return -ENOEXEC;
1305 			}
1306 			case_iter = val;
1307 			opt_num++;
1308 			break;
1309 		default:
1310 			shell_fprintf(sh, SHELL_ERROR,
1311 				"Invalid option or option usage: %s\n", argv[opt_index + 1]);
1312 			return -ENOEXEC;
1313 		}
1314 	}
1315 	ztest_reset_test_args();
1316 	ztest_run_all(NULL, true, suite_iter, case_iter);
1317 	end_report();
1318 	return 0;
1319 }
1320 #endif
1321 
cmd_run_suite(const struct shell * sh,size_t argc,char ** argv)1322 static int cmd_run_suite(const struct shell *sh, size_t argc, char **argv)
1323 {
1324 	int count = 0;
1325 	bool shuffle = false;
1326 
1327 	ztest_set_test_args(argv[1]);
1328 
1329 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1330 			ptr < _ztest_suite_node_list_end; ++ptr) {
1331 		__ztest_init_unit_test_result_for_suite(ptr);
1332 		count += __ztest_run_test_suite(ptr, NULL, shuffle, 1, 1);
1333 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1334 				(test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1335 			break;
1336 		}
1337 	}
1338 	return 0;
1339 }
1340 
1341 static void testsuite_list_get(size_t idx, struct shell_static_entry *entry);
1342 
1343 SHELL_DYNAMIC_CMD_CREATE(testsuite_names, testsuite_list_get);
1344 
testsuite_get_all_static(struct ztest_suite_node const ** suites)1345 static size_t testsuite_get_all_static(struct ztest_suite_node const **suites)
1346 {
1347 	*suites = _ztest_suite_node_list_start;
1348 	return _ztest_suite_node_list_end - _ztest_suite_node_list_start;
1349 }
1350 
suite_lookup(size_t idx,const char * prefix)1351 static const struct ztest_suite_node *suite_lookup(size_t idx, const char *prefix)
1352 {
1353 	size_t match_idx = 0;
1354 	const struct ztest_suite_node *suite;
1355 	size_t len = testsuite_get_all_static(&suite);
1356 	const struct ztest_suite_node *suite_end = suite + len;
1357 
1358 	while (suite < suite_end) {
1359 		if ((suite->name != NULL) && (strlen(suite->name) != 0) &&
1360 				((prefix == NULL) ||
1361 				(strncmp(prefix, suite->name, strlen(prefix)) == 0))) {
1362 			if (match_idx == idx) {
1363 				return suite;
1364 			}
1365 			++match_idx;
1366 		}
1367 		++suite;
1368 	}
1369 
1370 	return NULL;
1371 }
1372 
testsuite_list_get(size_t idx,struct shell_static_entry * entry)1373 static void testsuite_list_get(size_t idx, struct shell_static_entry *entry)
1374 {
1375 	const struct ztest_suite_node *suite = suite_lookup(idx, "");
1376 
1377 	entry->syntax = (suite != NULL) ? suite->name : NULL;
1378 	entry->handler = NULL;
1379 	entry->help = NULL;
1380 	entry->subcmd = NULL;
1381 }
1382 
1383 	SHELL_STATIC_SUBCMD_SET_CREATE(
1384 		sub_ztest_cmds,
1385 		SHELL_CMD_ARG(run-all, NULL, "Run all tests", cmd_runall, 0, 0),
1386 #ifdef CONFIG_ZTEST_SHUFFLE
1387 		SHELL_COND_CMD_ARG(CONFIG_ZTEST_SHUFFLE, shuffle, NULL,
1388 			"Shuffle tests", cmd_shuffle, 0, 2),
1389 #endif
1390 		SHELL_CMD_ARG(list-testsuites, NULL,
1391 			"List all test suites", cmd_list_suites, 0, 0),
1392 		SHELL_CMD_ARG(list-testcases, NULL,
1393 			"List all test cases", cmd_list_cases, 0, 0),
1394 		SHELL_CMD_ARG(run-testsuite, &testsuite_names,
1395 			"Run test suite", cmd_run_suite, 2, 0),
1396 		SHELL_CMD_ARG(run-testcase, NULL, "Run testcase", cmd_run_suite, 2, 0),
1397 		SHELL_SUBCMD_SET_END /* Array terminated. */
1398 	);
1399 
1400 SHELL_CMD_REGISTER(ztest, &sub_ztest_cmds, "Ztest commands", NULL);
1401 #endif /* CONFIG_ZTEST_SHELL */
1402 
main(void)1403 int main(void)
1404 {
1405 #ifdef CONFIG_USERSPACE
1406 	/* Partition containing globals tagged with ZTEST_DMEM and ZTEST_BMEM
1407 	 * macros. Any variables that user code may reference need to be
1408 	 * placed in this partition if no other memory domain configuration
1409 	 * is made.
1410 	 */
1411 	k_mem_domain_add_partition(&k_mem_domain_default, &ztest_mem_partition);
1412 #ifdef Z_MALLOC_PARTITION_EXISTS
1413 	/* Allow access to malloc() memory */
1414 	k_mem_domain_add_partition(&k_mem_domain_default, &z_malloc_partition);
1415 #endif
1416 #endif /* CONFIG_USERSPACE */
1417 
1418 	z_init_mock();
1419 #ifndef CONFIG_ZTEST_SHELL
1420 	test_main();
1421 	end_report();
1422 	flush_log();
1423 	LOG_PANIC();
1424 	if (IS_ENABLED(CONFIG_ZTEST_RETEST_IF_PASSED)) {
1425 		static __noinit struct {
1426 			uint32_t magic;
1427 			uint32_t boots;
1428 		} state;
1429 		const uint32_t magic = 0x152ac523;
1430 
1431 		if (state.magic != magic) {
1432 			state.magic = magic;
1433 			state.boots = 0;
1434 		}
1435 		state.boots += 1;
1436 		if (test_status == 0) {
1437 			PRINT("Reset board #%u to test again\n", state.boots);
1438 			k_msleep(10);
1439 			sys_reboot(SYS_REBOOT_COLD);
1440 		} else {
1441 			PRINT("Failed after %u attempts\n", state.boots);
1442 			state.boots = 0;
1443 		}
1444 	}
1445 #ifdef CONFIG_ZTEST_NO_YIELD
1446 	/*
1447 	 * Rather than yielding to idle thread, keep the part awake so debugger can
1448 	 * still access it, since some SOCs cannot be debugged in low power states.
1449 	 */
1450 	uint32_t key = irq_lock();
1451 
1452 	while (1) {
1453 		; /* Spin */
1454 	}
1455 	irq_unlock(key);
1456 #endif /* CONFIG_ZTEST_NO_YIELD */
1457 #endif /* CONFIG_ZTEST_SHELL */
1458 	return 0;
1459 }
1460 #endif
1461