1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdlib.h>
8 #include <zephyr/ztest.h>
9 
10 #include <zephyr/app_memory/app_memdomain.h>
11 #ifdef CONFIG_USERSPACE
12 #include <zephyr/sys/libc-hooks.h>
13 #endif
14 #include <zephyr/logging/log_ctrl.h>
15 #include <zephyr/sys/reboot.h>
16 
17 #include <zephyr/llext/symbol.h>
18 
19 #include <zephyr/sys/barrier.h>
20 
21 #ifdef KERNEL
22 static struct k_thread ztest_thread;
23 #endif
24 static bool failed_expectation;
25 
26 #ifdef CONFIG_ZTEST_SHELL
27 #include <zephyr/shell/shell.h>
28 #endif
29 
30 #ifdef CONFIG_ZTEST_SHUFFLE
31 #include <time.h>
32 #include <zephyr/random/random.h>
33 #ifndef CONFIG_ZTEST_REPEAT
34 #define NUM_ITER_PER_SUITE CONFIG_ZTEST_SHUFFLE_SUITE_REPEAT_COUNT
35 #define NUM_ITER_PER_TEST  CONFIG_ZTEST_SHUFFLE_TEST_REPEAT_COUNT
36 #endif
37 #endif /* CONFIG_ZTEST_SHUFFLE */
38 
39 #ifdef CONFIG_ZTEST_REPEAT
40 #define NUM_ITER_PER_SUITE CONFIG_ZTEST_SUITE_REPEAT_COUNT
41 #define NUM_ITER_PER_TEST  CONFIG_ZTEST_TEST_REPEAT_COUNT
42 #else
43 #ifndef CONFIG_ZTEST_SHUFFLE
44 #define NUM_ITER_PER_SUITE 1
45 #define NUM_ITER_PER_TEST  1
46 #endif
47 #endif
48 
49 #ifdef CONFIG_ZTEST_COVERAGE_RESET_BEFORE_TESTS
50 #include <coverage.h>
51 #endif
52 
53 /* ZTEST_DMEM and ZTEST_BMEM are used for the application shared memory test  */
54 
55 /**
56  * @brief The current status of the test binary
57  */
58 enum ztest_status {
59 	ZTEST_STATUS_OK,
60 	ZTEST_STATUS_HAS_FAILURE,
61 	ZTEST_STATUS_CRITICAL_ERROR
62 };
63 
64 /**
65  * @brief Tracks the current phase that ztest is operating in.
66  */
67 ZTEST_DMEM enum ztest_phase cur_phase = TEST_PHASE_FRAMEWORK;
68 
69 static ZTEST_BMEM enum ztest_status test_status = ZTEST_STATUS_OK;
70 
71 extern ZTEST_DMEM const struct ztest_arch_api ztest_api;
72 
73 static void __ztest_show_suite_summary(void);
74 
end_report(void)75 static void end_report(void)
76 {
77 	__ztest_show_suite_summary();
78 	if (test_status) {
79 		TC_END_REPORT(TC_FAIL);
80 	} else {
81 		TC_END_REPORT(TC_PASS);
82 	}
83 }
84 
cleanup_test(struct ztest_unit_test * test)85 static int cleanup_test(struct ztest_unit_test *test)
86 {
87 	int ret = TC_PASS;
88 	int mock_status;
89 
90 	mock_status = z_cleanup_mock();
91 
92 #ifdef KERNEL
93 	/* we need to remove the ztest_thread information from the timeout_q.
94 	 * Because we reuse the same k_thread structure this would
95 	 * causes some problems.
96 	 */
97 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
98 		k_thread_abort(&ztest_thread);
99 	}
100 #endif
101 
102 	if (!ret && mock_status == 1) {
103 		PRINT_DATA("Test %s failed: Unused mock parameter values\n", test->name);
104 		ret = TC_FAIL;
105 	} else if (!ret && mock_status == 2) {
106 		PRINT_DATA("Test %s failed: Unused mock return values\n", test->name);
107 		ret = TC_FAIL;
108 	} else {
109 		;
110 	}
111 
112 	return ret;
113 }
114 
115 #ifdef KERNEL
116 
117 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
118 #define MAX_NUM_CPUHOLD  (CONFIG_MP_MAX_NUM_CPUS - 1)
119 #define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
120 
121 struct cpuhold_pool_item {
122 	struct k_thread thread;
123 	bool used;
124 };
125 
126 static struct cpuhold_pool_item cpuhold_pool_items[MAX_NUM_CPUHOLD + 1];
127 
128 K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD + 1, CPUHOLD_STACK_SZ);
129 
130 static struct k_sem cpuhold_sem;
131 
132 volatile int cpuhold_active;
133 volatile bool cpuhold_spawned;
134 
find_unused_thread(void)135 static int find_unused_thread(void)
136 {
137 	for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
138 		if (!cpuhold_pool_items[i].used) {
139 			return i;
140 		}
141 	}
142 
143 	return -1;
144 }
145 
mark_thread_unused(struct k_thread * thread)146 static void mark_thread_unused(struct k_thread *thread)
147 {
148 	for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
149 		if (&cpuhold_pool_items[i].thread == thread) {
150 			cpuhold_pool_items[i].used = false;
151 		}
152 	}
153 }
154 
wait_for_thread_to_switch_out(struct k_thread * thread)155 static inline void wait_for_thread_to_switch_out(struct k_thread *thread)
156 {
157 	unsigned int key = arch_irq_lock();
158 	volatile void **shp = (void *)&thread->switch_handle;
159 
160 	while (*shp == NULL) {
161 		arch_spin_relax();
162 	}
163 	/* Read barrier: don't allow any subsequent loads in the
164 	 * calling code to reorder before we saw switch_handle go
165 	 * non-null.
166 	 */
167 	barrier_dmem_fence_full();
168 
169 	arch_irq_unlock(key);
170 }
171 
172 /* "Holds" a CPU for use with the "1cpu" test cases.  Note that we
173  * can't use tools like the cpumask feature because we have tests that
174  * may need to control that configuration themselves.  We do this at
175  * the lowest level, but locking interrupts directly and spinning.
176  */
cpu_hold(void * arg1,void * arg2,void * arg3)177 static void cpu_hold(void *arg1, void *arg2, void *arg3)
178 {
179 	struct k_thread *thread = arg1;
180 	unsigned int idx = (unsigned int)(uintptr_t)arg2;
181 	char tname[CONFIG_THREAD_MAX_NAME_LEN];
182 
183 	ARG_UNUSED(arg3);
184 
185 	if (arch_proc_id() == 0) {
186 		int i;
187 
188 		i = find_unused_thread();
189 
190 		__ASSERT_NO_MSG(i != -1);
191 
192 		cpuhold_spawned = false;
193 
194 		cpuhold_pool_items[i].used = true;
195 		k_thread_create(&cpuhold_pool_items[i].thread, cpuhold_stacks[i], CPUHOLD_STACK_SZ,
196 				cpu_hold, k_current_get(), (void *)(uintptr_t)idx, NULL,
197 				K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
198 
199 		/*
200 		 * Busy-wait until we know the spawned thread is running to
201 		 * ensure it does not spawn on CPU0.
202 		 */
203 
204 		while (!cpuhold_spawned) {
205 			k_busy_wait(1000);
206 		}
207 
208 		return;
209 	}
210 
211 	if (thread != NULL) {
212 		cpuhold_spawned = true;
213 
214 		/* Busywait until a new thread is scheduled in on CPU0 */
215 
216 		wait_for_thread_to_switch_out(thread);
217 
218 		mark_thread_unused(thread);
219 	}
220 
221 	if (IS_ENABLED(CONFIG_THREAD_NAME)) {
222 		snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", idx);
223 		k_thread_name_set(k_current_get(), tname);
224 	}
225 
226 	uint32_t dt, start_ms = k_uptime_get_32();
227 	unsigned int key = arch_irq_lock();
228 
229 	k_sem_give(&cpuhold_sem);
230 
231 #if (defined(CONFIG_ARM64) || defined(CONFIG_RISCV)) && defined(CONFIG_FPU_SHARING)
232 	/*
233 	 * We'll be spinning with IRQs disabled. The flush-your-FPU request
234 	 * IPI will never be serviced during that time. Therefore we flush
235 	 * the FPU preemptively here to prevent any other CPU waiting after
236 	 * this CPU forever and deadlock the system.
237 	 */
238 	k_float_disable(_current_cpu->arch.fpu_owner);
239 #endif
240 
241 	while (cpuhold_active) {
242 		k_busy_wait(1000);
243 	}
244 
245 	/* Holding the CPU via spinning is expensive, and abusing this
246 	 * for long-running test cases tends to overload the CI system
247 	 * (qemu runs separate CPUs in different threads, but the CI
248 	 * logic views it as one "job") and cause other test failures.
249 	 */
250 	dt = k_uptime_get_32() - start_ms;
251 	zassert_true(dt < CONFIG_ZTEST_CPU_HOLD_TIME_MS, "1cpu test took too long (%d ms)", dt);
252 	arch_irq_unlock(key);
253 }
254 #endif /* CONFIG_SMP && (CONFIG_MP_MAX_NUM_CPUS > 1) */
255 
z_impl_z_test_1cpu_start(void)256 void z_impl_z_test_1cpu_start(void)
257 {
258 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
259 	unsigned int num_cpus = arch_num_cpus();
260 	int j;
261 
262 	cpuhold_active = 1;
263 
264 	k_sem_init(&cpuhold_sem, 0, 999);
265 
266 	/* Spawn N-1 threads to "hold" the other CPUs, waiting for
267 	 * each to signal us that it's locked and spinning.
268 	 */
269 	for (int i = 0; i < num_cpus - 1; i++) {
270 		j = find_unused_thread();
271 
272 		__ASSERT_NO_MSG(j != -1);
273 
274 		cpuhold_pool_items[j].used = true;
275 		k_thread_create(&cpuhold_pool_items[j].thread, cpuhold_stacks[j], CPUHOLD_STACK_SZ,
276 				cpu_hold, NULL, (void *)(uintptr_t)i, NULL, K_HIGHEST_THREAD_PRIO,
277 				0, K_NO_WAIT);
278 		k_sem_take(&cpuhold_sem, K_FOREVER);
279 	}
280 #endif
281 }
282 
z_impl_z_test_1cpu_stop(void)283 void z_impl_z_test_1cpu_stop(void)
284 {
285 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
286 	cpuhold_active = 0;
287 
288 	for (int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
289 		if (cpuhold_pool_items[i].used) {
290 			k_thread_abort(&cpuhold_pool_items[i].thread);
291 			cpuhold_pool_items[i].used = false;
292 		}
293 	}
294 #endif
295 }
296 
297 #ifdef CONFIG_USERSPACE
z_vrfy_z_test_1cpu_start(void)298 void z_vrfy_z_test_1cpu_start(void)
299 {
300 	z_impl_z_test_1cpu_start();
301 }
302 #include <zephyr/syscalls/z_test_1cpu_start_mrsh.c>
303 
z_vrfy_z_test_1cpu_stop(void)304 void z_vrfy_z_test_1cpu_stop(void)
305 {
306 	z_impl_z_test_1cpu_stop();
307 }
308 #include <zephyr/syscalls/z_test_1cpu_stop_mrsh.c>
309 #endif /* CONFIG_USERSPACE */
310 #endif
311 
run_test_rules(bool is_before,struct ztest_unit_test * test,void * data)312 __maybe_unused static void run_test_rules(bool is_before, struct ztest_unit_test *test, void *data)
313 {
314 	for (struct ztest_test_rule *rule = _ztest_test_rule_list_start;
315 	     rule < _ztest_test_rule_list_end; ++rule) {
316 		if (is_before && rule->before_each) {
317 			rule->before_each(test, data);
318 		} else if (!is_before && rule->after_each) {
319 			rule->after_each(test, data);
320 		}
321 	}
322 }
323 
run_test_functions(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)324 static void run_test_functions(struct ztest_suite_node *suite, struct ztest_unit_test *test,
325 			       void *data)
326 {
327 	__ztest_set_test_phase(TEST_PHASE_TEST);
328 	test->test(data);
329 }
330 
331 COND_CODE_1(KERNEL, (ZTEST_BMEM), ()) static enum ztest_result test_result;
332 
get_final_test_result(const struct ztest_unit_test * test,int ret)333 static int get_final_test_result(const struct ztest_unit_test *test, int ret)
334 {
335 	enum ztest_expected_result expected_result = -1;
336 
337 	for (struct ztest_expected_result_entry *expectation =
338 		     _ztest_expected_result_entry_list_start;
339 	     expectation < _ztest_expected_result_entry_list_end; ++expectation) {
340 		if (strcmp(expectation->test_name, test->name) == 0 &&
341 		    strcmp(expectation->test_suite_name, test->test_suite_name) == 0) {
342 			expected_result = expectation->expected_result;
343 			break;
344 		}
345 	}
346 
347 	if (expected_result == ZTEST_EXPECTED_RESULT_FAIL) {
348 		/* Expected a failure:
349 		 * - If we got a failure, return TC_PASS
350 		 * - Otherwise force a failure
351 		 */
352 		return (ret == TC_FAIL) ? TC_PASS : TC_FAIL;
353 	}
354 	if (expected_result == ZTEST_EXPECTED_RESULT_SKIP) {
355 		/* Expected a skip:
356 		 * - If we got a skip, return TC_PASS
357 		 * - Otherwise force a failure
358 		 */
359 		return (ret == TC_SKIP) ? TC_PASS : TC_FAIL;
360 	}
361 	/* No expectation was made, no change is needed. */
362 	return ret;
363 }
364 
365 /**
366  * @brief Get a friendly name string for a given test phrase.
367  *
368  * @param phase an enum ztest_phase value describing the desired test phase
369  * @returns a string name for `phase`
370  */
get_friendly_phase_name(enum ztest_phase phase)371 static inline const char *get_friendly_phase_name(enum ztest_phase phase)
372 {
373 	switch (phase) {
374 	case TEST_PHASE_SETUP:
375 		return "setup";
376 	case TEST_PHASE_BEFORE:
377 		return "before";
378 	case TEST_PHASE_TEST:
379 		return "test";
380 	case TEST_PHASE_AFTER:
381 		return "after";
382 	case TEST_PHASE_TEARDOWN:
383 		return "teardown";
384 	case TEST_PHASE_FRAMEWORK:
385 		return "framework";
386 	default:
387 		return "(unknown)";
388 	}
389 }
390 
391 static bool current_test_failed_assumption;
ztest_skip_failed_assumption(void)392 void ztest_skip_failed_assumption(void)
393 {
394 	if (IS_ENABLED(CONFIG_ZTEST_FAIL_ON_ASSUME)) {
395 		current_test_failed_assumption = true;
396 	}
397 	ztest_test_skip();
398 }
399 
400 #ifndef KERNEL
401 
402 /* Static code analysis tool can raise a violation that the standard header
403  * <setjmp.h> shall not be used.
404  *
405  * setjmp is using in a test code, not in a runtime code, it is acceptable.
406  * It is a deliberate deviation.
407  */
408 #include <setjmp.h> /* parasoft-suppress MISRAC2012-RULE_21_4-a MISRAC2012-RULE_21_4-b*/
409 #include <signal.h>
410 #include <stdlib.h>
411 #include <string.h>
412 
413 #define FAIL_FAST 0
414 
415 static jmp_buf test_fail;
416 static jmp_buf test_pass;
417 static jmp_buf test_skip;
418 static jmp_buf stack_fail;
419 static jmp_buf test_suite_fail;
420 
ztest_test_fail(void)421 void ztest_test_fail(void)
422 {
423 	switch (cur_phase) {
424 	case TEST_PHASE_SETUP:
425 		PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
426 		longjmp(test_suite_fail, 1);
427 	case TEST_PHASE_BEFORE:
428 	case TEST_PHASE_TEST:
429 		PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
430 		longjmp(test_fail, 1);
431 	case TEST_PHASE_AFTER:
432 	case TEST_PHASE_TEARDOWN:
433 	case TEST_PHASE_FRAMEWORK:
434 		PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
435 			   get_friendly_phase_name(cur_phase));
436 		longjmp(stack_fail, 1);
437 	}
438 }
439 EXPORT_SYMBOL(ztest_test_fail);
440 
ztest_test_pass(void)441 void ztest_test_pass(void)
442 {
443 	if (cur_phase == TEST_PHASE_TEST) {
444 		longjmp(test_pass, 1);
445 	}
446 	PRINT_DATA(" ERROR: cannot pass in test phase '%s()', bailing\n",
447 		   get_friendly_phase_name(cur_phase));
448 	longjmp(stack_fail, 1);
449 }
450 EXPORT_SYMBOL(ztest_test_pass);
451 
ztest_test_skip(void)452 void ztest_test_skip(void)
453 {
454 	switch (cur_phase) {
455 	case TEST_PHASE_SETUP:
456 	case TEST_PHASE_BEFORE:
457 	case TEST_PHASE_TEST:
458 		longjmp(test_skip, 1);
459 	default:
460 		PRINT_DATA(" ERROR: cannot skip in test phase '%s()', bailing\n",
461 			   get_friendly_phase_name(cur_phase));
462 		longjmp(stack_fail, 1);
463 	}
464 }
465 EXPORT_SYMBOL(ztest_test_skip);
466 
ztest_test_expect_fail(void)467 void ztest_test_expect_fail(void)
468 {
469 	failed_expectation = true;
470 
471 	switch (cur_phase) {
472 	case TEST_PHASE_SETUP:
473 		PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
474 		break;
475 	case TEST_PHASE_BEFORE:
476 	case TEST_PHASE_TEST:
477 		PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
478 		break;
479 	case TEST_PHASE_AFTER:
480 	case TEST_PHASE_TEARDOWN:
481 	case TEST_PHASE_FRAMEWORK:
482 		PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
483 			   get_friendly_phase_name(cur_phase));
484 		longjmp(stack_fail, 1);
485 	}
486 }
487 
run_test(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)488 static int run_test(struct ztest_suite_node *suite, struct ztest_unit_test *test, void *data)
489 {
490 	int ret = TC_PASS;
491 
492 	TC_START(test->name);
493 	__ztest_set_test_phase(TEST_PHASE_BEFORE);
494 
495 	if (test_result == ZTEST_RESULT_SUITE_FAIL) {
496 		ret = TC_FAIL;
497 		goto out;
498 	}
499 
500 	if (setjmp(test_fail)) {
501 		ret = TC_FAIL;
502 		goto out;
503 	}
504 
505 	if (setjmp(test_pass)) {
506 		ret = TC_PASS;
507 		goto out;
508 	}
509 
510 	if (setjmp(test_skip)) {
511 		ret = TC_SKIP;
512 		goto out;
513 	}
514 
515 	run_test_rules(/*is_before=*/true, test, data);
516 	if (suite->before) {
517 		suite->before(data);
518 	}
519 	run_test_functions(suite, test, data);
520 out:
521 	if (failed_expectation) {
522 		failed_expectation = false;
523 		ret = TC_FAIL;
524 	}
525 
526 	__ztest_set_test_phase(TEST_PHASE_AFTER);
527 	if (test_result != ZTEST_RESULT_SUITE_FAIL) {
528 		if (suite->after != NULL) {
529 			suite->after(data);
530 		}
531 		run_test_rules(/*is_before=*/false, test, data);
532 	}
533 	__ztest_set_test_phase(TEST_PHASE_FRAMEWORK);
534 	ret |= cleanup_test(test);
535 
536 	ret = get_final_test_result(test, ret);
537 	Z_TC_END_RESULT(ret, test->name);
538 	if (ret == TC_SKIP && current_test_failed_assumption) {
539 		test_status = 1;
540 	}
541 
542 	return ret;
543 }
544 
545 #else /* KERNEL */
546 
547 /* Zephyr's probably going to cause all tests to fail if one test fails, so
548  * skip the rest of tests if one of them fails
549  */
550 #ifdef CONFIG_ZTEST_FAIL_FAST
551 #define FAIL_FAST 1
552 #else
553 #define FAIL_FAST 0
554 #endif
555 
556 K_THREAD_STACK_DEFINE(ztest_thread_stack, CONFIG_ZTEST_STACK_SIZE + CONFIG_TEST_EXTRA_STACK_SIZE);
557 
test_finalize(void)558 static void test_finalize(void)
559 {
560 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
561 		k_thread_abort(&ztest_thread);
562 		if (k_is_in_isr()) {
563 			return;
564 		}
565 
566 		k_thread_abort(k_current_get());
567 		CODE_UNREACHABLE;
568 	}
569 }
570 
ztest_test_fail(void)571 void ztest_test_fail(void)
572 {
573 	switch (cur_phase) {
574 	case TEST_PHASE_SETUP:
575 		__ztest_set_test_result(ZTEST_RESULT_SUITE_FAIL);
576 		break;
577 	case TEST_PHASE_BEFORE:
578 	case TEST_PHASE_TEST:
579 		__ztest_set_test_result(ZTEST_RESULT_FAIL);
580 		test_finalize();
581 		break;
582 	default:
583 		PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
584 			   get_friendly_phase_name(cur_phase));
585 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
586 		break;
587 	}
588 }
589 EXPORT_SYMBOL(ztest_test_fail);
590 
ztest_test_pass(void)591 void ztest_test_pass(void)
592 {
593 	switch (cur_phase) {
594 	case TEST_PHASE_TEST:
595 		__ztest_set_test_result(ZTEST_RESULT_PASS);
596 		test_finalize();
597 		break;
598 	default:
599 		PRINT_DATA(" ERROR: cannot pass in test phase '%s()', bailing\n",
600 			   get_friendly_phase_name(cur_phase));
601 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
602 		if (cur_phase == TEST_PHASE_BEFORE) {
603 			test_finalize();
604 		}
605 	}
606 }
607 EXPORT_SYMBOL(ztest_test_pass);
608 
ztest_test_skip(void)609 void ztest_test_skip(void)
610 {
611 	switch (cur_phase) {
612 	case TEST_PHASE_SETUP:
613 		__ztest_set_test_result(ZTEST_RESULT_SUITE_SKIP);
614 		break;
615 	case TEST_PHASE_BEFORE:
616 	case TEST_PHASE_TEST:
617 		__ztest_set_test_result(ZTEST_RESULT_SKIP);
618 		test_finalize();
619 		break;
620 	default:
621 		PRINT_DATA(" ERROR: cannot skip in test phase '%s()', bailing\n",
622 			   get_friendly_phase_name(cur_phase));
623 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
624 		break;
625 	}
626 }
627 EXPORT_SYMBOL(ztest_test_skip);
628 
ztest_test_expect_fail(void)629 void ztest_test_expect_fail(void)
630 {
631 	failed_expectation = true;
632 }
633 
ztest_simple_1cpu_before(void * data)634 void ztest_simple_1cpu_before(void *data)
635 {
636 	ARG_UNUSED(data);
637 	z_test_1cpu_start();
638 }
639 
ztest_simple_1cpu_after(void * data)640 void ztest_simple_1cpu_after(void *data)
641 {
642 	ARG_UNUSED(data);
643 	z_test_1cpu_stop();
644 }
645 
test_cb(void * a,void * b,void * c)646 static void test_cb(void *a, void *b, void *c)
647 {
648 	struct ztest_suite_node *suite = a;
649 	struct ztest_unit_test *test = b;
650 	const bool config_user_mode = FIELD_GET(K_USER, test->thread_options) != 0;
651 
652 	if (!IS_ENABLED(CONFIG_USERSPACE) || !k_is_user_context()) {
653 		__ztest_set_test_result(ZTEST_RESULT_PENDING);
654 		run_test_rules(/*is_before=*/true, test, /*data=*/c);
655 		if (suite->before) {
656 			suite->before(/*data=*/c);
657 		}
658 		if (IS_ENABLED(CONFIG_USERSPACE) && config_user_mode) {
659 			k_thread_user_mode_enter(test_cb, a, b, c);
660 		}
661 	}
662 	run_test_functions(suite, test, c);
663 	__ztest_set_test_result(ZTEST_RESULT_PASS);
664 }
665 
run_test(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)666 static int run_test(struct ztest_suite_node *suite, struct ztest_unit_test *test, void *data)
667 {
668 	int ret = TC_PASS;
669 
670 #if CONFIG_ZTEST_TEST_DELAY_MS > 0
671 	k_busy_wait(CONFIG_ZTEST_TEST_DELAY_MS * USEC_PER_MSEC);
672 #endif
673 	TC_START(test->name);
674 
675 	__ztest_set_test_phase(TEST_PHASE_BEFORE);
676 
677 	/* If the suite's setup function marked us as skipped, don't bother
678 	 * running the tests.
679 	 */
680 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
681 		get_start_time_cyc();
682 		k_thread_create(&ztest_thread, ztest_thread_stack,
683 				K_THREAD_STACK_SIZEOF(ztest_thread_stack), test_cb, suite, test,
684 				data, CONFIG_ZTEST_THREAD_PRIORITY, K_INHERIT_PERMS, K_FOREVER);
685 
686 		k_thread_access_grant(&ztest_thread, suite, test, suite->stats);
687 		if (test->name != NULL) {
688 			k_thread_name_set(&ztest_thread, test->name);
689 		}
690 		/* Only start the thread if we're not skipping the suite */
691 		if (test_result != ZTEST_RESULT_SUITE_SKIP &&
692 		    test_result != ZTEST_RESULT_SUITE_FAIL) {
693 			k_thread_start(&ztest_thread);
694 			k_thread_join(&ztest_thread, K_FOREVER);
695 		}
696 	} else if (test_result != ZTEST_RESULT_SUITE_SKIP &&
697 		   test_result != ZTEST_RESULT_SUITE_FAIL) {
698 		__ztest_set_test_result(ZTEST_RESULT_PENDING);
699 		get_start_time_cyc();
700 		run_test_rules(/*is_before=*/true, test, data);
701 		if (suite->before) {
702 			suite->before(data);
703 		}
704 		run_test_functions(suite, test, data);
705 	}
706 
707 	__ztest_set_test_phase(TEST_PHASE_AFTER);
708 	if (suite->after != NULL) {
709 		suite->after(data);
710 	}
711 	run_test_rules(/*is_before=*/false, test, data);
712 
713 	get_test_duration_ms();
714 	if (tc_spend_time > test->stats->duration_worst_ms) {
715 		test->stats->duration_worst_ms = tc_spend_time;
716 	}
717 
718 	__ztest_set_test_phase(TEST_PHASE_FRAMEWORK);
719 
720 	/* Flush all logs in case deferred mode and default logging thread are used. */
721 	while (IS_ENABLED(CONFIG_TEST_LOGGING_FLUSH_AFTER_TEST) &&
722 	       IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && log_data_pending()) {
723 		k_msleep(100);
724 	}
725 
726 	if (test_result == ZTEST_RESULT_FAIL || test_result == ZTEST_RESULT_SUITE_FAIL ||
727 	    failed_expectation) {
728 		ret = TC_FAIL;
729 		failed_expectation = false;
730 	} else if (test_result == ZTEST_RESULT_SKIP || test_result == ZTEST_RESULT_SUITE_SKIP) {
731 		ret = TC_SKIP;
732 	}
733 
734 	if (test_result == ZTEST_RESULT_PASS || !FAIL_FAST) {
735 		ret |= cleanup_test(test);
736 	}
737 
738 	ret = get_final_test_result(test, ret);
739 	Z_TC_END_RESULT(ret, test->name);
740 	if (ret == TC_SKIP && current_test_failed_assumption) {
741 		test_status = 1;
742 	}
743 
744 	return ret;
745 }
746 
747 #endif /* !KERNEL */
748 
ztest_find_test_suite(const char * name)749 static struct ztest_suite_node *ztest_find_test_suite(const char *name)
750 {
751 	struct ztest_suite_node *node;
752 
753 	for (node = _ztest_suite_node_list_start; node < _ztest_suite_node_list_end; ++node) {
754 		if (strcmp(name, node->name) == 0) {
755 			return node;
756 		}
757 	}
758 
759 	return NULL;
760 }
761 
z_ztest_get_next_test(const char * suite,struct ztest_unit_test * prev)762 struct ztest_unit_test *z_ztest_get_next_test(const char *suite, struct ztest_unit_test *prev)
763 {
764 	struct ztest_unit_test *test = (prev == NULL) ? _ztest_unit_test_list_start : prev + 1;
765 
766 	for (; test < _ztest_unit_test_list_end; ++test) {
767 		if (strcmp(suite, test->test_suite_name) == 0) {
768 			return test;
769 		}
770 	}
771 	return NULL;
772 }
773 
774 #if CONFIG_ZTEST_SHUFFLE
z_ztest_shuffle(bool shuffle,void * dest[],intptr_t start,size_t num_items,size_t element_size)775 static void z_ztest_shuffle(bool shuffle, void *dest[], intptr_t start, size_t num_items,
776 			    size_t element_size)
777 {
778 	/* Initialize dest array */
779 	for (size_t i = 0; i < num_items; ++i) {
780 		dest[i] = (void *)(start + (i * element_size));
781 	}
782 	void *tmp;
783 
784 	/* Shuffle dest array */
785 	if (shuffle) {
786 		for (size_t i = num_items - 1; i > 0; i--) {
787 			int j = sys_rand32_get() % (i + 1);
788 
789 			if (i != j) {
790 				tmp = dest[j];
791 				dest[j] = dest[i];
792 				dest[i] = tmp;
793 			}
794 		}
795 	}
796 }
797 #endif
798 
z_ztest_run_test_suite_ptr(struct ztest_suite_node * suite,bool shuffle,int suite_iter,int case_iter,void * param)799 static int z_ztest_run_test_suite_ptr(struct ztest_suite_node *suite, bool shuffle, int suite_iter,
800 				      int case_iter, void *param)
801 {
802 	struct ztest_unit_test *test = NULL;
803 	void *data = NULL;
804 	int fail = 0;
805 	int tc_result = TC_PASS;
806 
807 	if (FAIL_FAST && test_status != ZTEST_STATUS_OK) {
808 		return test_status;
809 	}
810 
811 	if (suite == NULL) {
812 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
813 		return -1;
814 	}
815 
816 #ifndef KERNEL
817 	if (setjmp(stack_fail)) {
818 		PRINT_DATA("TESTSUITE crashed.\n");
819 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
820 		end_report();
821 		exit(1);
822 	}
823 #else
824 	k_object_access_all_grant(&ztest_thread);
825 #endif
826 
827 	TC_SUITE_START(suite->name);
828 	current_test_failed_assumption = false;
829 	__ztest_set_test_result(ZTEST_RESULT_PENDING);
830 	__ztest_set_test_phase(TEST_PHASE_SETUP);
831 #ifndef KERNEL
832 	if (setjmp(test_suite_fail)) {
833 		__ztest_set_test_result(ZTEST_RESULT_SUITE_FAIL);
834 	}
835 #endif
836 	if (test_result != ZTEST_RESULT_SUITE_FAIL && suite->setup != NULL) {
837 		data = suite->setup();
838 	}
839 	if (param != NULL) {
840 		data = param;
841 	}
842 
843 	for (int i = 0; i < case_iter; i++) {
844 #ifdef CONFIG_ZTEST_SHUFFLE
845 		struct ztest_unit_test *tests_to_run[ZTEST_TEST_COUNT];
846 
847 		memset(tests_to_run, 0, ZTEST_TEST_COUNT * sizeof(struct ztest_unit_test *));
848 		z_ztest_shuffle(shuffle, (void **)tests_to_run,
849 				(intptr_t)_ztest_unit_test_list_start, ZTEST_TEST_COUNT,
850 				sizeof(struct ztest_unit_test));
851 		for (size_t j = 0; j < ZTEST_TEST_COUNT; ++j) {
852 			test = tests_to_run[j];
853 			/* Make sure that the test belongs to this suite */
854 			if (strcmp(suite->name, test->test_suite_name) != 0) {
855 				continue;
856 			}
857 			if (ztest_api.should_test_run(suite->name, test->name)) {
858 				test->stats->run_count++;
859 				tc_result = run_test(suite, test, data);
860 				if (tc_result == TC_PASS) {
861 					test->stats->pass_count++;
862 				} else if (tc_result == TC_SKIP) {
863 					test->stats->skip_count++;
864 				} else if (tc_result == TC_FAIL) {
865 					test->stats->fail_count++;
866 				}
867 				if (tc_result == TC_FAIL) {
868 					fail++;
869 				}
870 			}
871 
872 			if ((fail && FAIL_FAST) || test_status == ZTEST_STATUS_CRITICAL_ERROR) {
873 				break;
874 			}
875 		}
876 #else
877 		while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
878 			if (ztest_api.should_test_run(suite->name, test->name)) {
879 				test->stats->run_count++;
880 				tc_result = run_test(suite, test, data);
881 				if (tc_result == TC_PASS) {
882 					test->stats->pass_count++;
883 				} else if (tc_result == TC_SKIP) {
884 					test->stats->skip_count++;
885 				} else if (tc_result == TC_FAIL) {
886 					test->stats->fail_count++;
887 				}
888 
889 				if (tc_result == TC_FAIL) {
890 					fail++;
891 				}
892 			}
893 
894 			if ((fail && FAIL_FAST) || test_status == ZTEST_STATUS_CRITICAL_ERROR) {
895 				break;
896 			}
897 		}
898 #endif
899 		if (test_status == ZTEST_STATUS_OK && fail != 0) {
900 			test_status = ZTEST_STATUS_HAS_FAILURE;
901 		}
902 	}
903 
904 	TC_SUITE_END(suite->name, (fail > 0 ? TC_FAIL : TC_PASS));
905 	__ztest_set_test_phase(TEST_PHASE_TEARDOWN);
906 	if (suite->teardown != NULL) {
907 		suite->teardown(data);
908 	}
909 
910 	return fail;
911 }
912 
z_ztest_run_test_suite(const char * name,bool shuffle,int suite_iter,int case_iter,void * param)913 int z_ztest_run_test_suite(const char *name, bool shuffle,
914 	int suite_iter, int case_iter, void *param)
915 {
916 	return z_ztest_run_test_suite_ptr(ztest_find_test_suite(name), shuffle, suite_iter,
917 					  case_iter, param);
918 }
919 
920 #ifdef CONFIG_USERSPACE
921 K_APPMEM_PARTITION_DEFINE(ztest_mem_partition);
922 #endif
923 
__ztest_init_unit_test_result_for_suite(struct ztest_suite_node * suite)924 static void __ztest_init_unit_test_result_for_suite(struct ztest_suite_node *suite)
925 {
926 	struct ztest_unit_test *test = NULL;
927 
928 	while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
929 		test->stats->run_count = 0;
930 		test->stats->skip_count = 0;
931 		test->stats->fail_count = 0;
932 		test->stats->pass_count = 0;
933 		test->stats->duration_worst_ms = 0;
934 	}
935 }
936 
flush_log(void)937 static void flush_log(void)
938 {
939 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
940 		while (log_data_pending()) {
941 			k_sleep(K_MSEC(10));
942 		}
943 		k_sleep(K_MSEC(10));
944 	} else {
945 		while (LOG_PROCESS()) {
946 		}
947 	}
948 }
949 
950 /* Show one line summary for a test suite.
951  */
__ztest_show_suite_summary_oneline(struct ztest_suite_node * suite)952 static void __ztest_show_suite_summary_oneline(struct ztest_suite_node *suite)
953 {
954 	int distinct_pass = 0, distinct_fail = 0, distinct_skip = 0, distinct_total = 0;
955 	int effective_total = 0;
956 	int expanded_pass = 0, expanded_passrate = 0;
957 	int passrate_major = 0, passrate_minor = 0, passrate_tail = 0;
958 	int suite_result = TC_PASS;
959 
960 	struct ztest_unit_test *test = NULL;
961 	unsigned int suite_duration_worst_ms = 0;
962 
963 	/** summary of distinct run  */
964 	while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
965 		distinct_total++;
966 		suite_duration_worst_ms += test->stats->duration_worst_ms;
967 		if (test->stats->skip_count == test->stats->run_count) {
968 			distinct_skip++;
969 		} else if (test->stats->pass_count == test->stats->run_count) {
970 			distinct_pass++;
971 		} else {
972 			distinct_fail++;
973 		}
974 	}
975 
976 	if (distinct_skip == distinct_total) {
977 		suite_result = TC_SKIP;
978 		passrate_major = passrate_minor = 0;
979 	} else {
980 		suite_result = (distinct_fail > 0) ? TC_FAIL : TC_PASS;
981 		effective_total = distinct_total - distinct_skip;
982 		expanded_pass = distinct_pass * 100000;
983 		expanded_passrate = expanded_pass / effective_total;
984 		passrate_major = expanded_passrate / 1000;
985 		passrate_minor = (expanded_passrate - passrate_major * 1000) / 10;
986 		passrate_tail = expanded_passrate - passrate_major * 1000 - passrate_minor * 10;
987 		if (passrate_tail >= 5) { /* rounding */
988 			passrate_minor++;
989 		}
990 	}
991 
992 	TC_SUMMARY_PRINT("SUITE %s - %3d.%02d%% [%s]: pass = %d, fail = %d, "
993 			 "skip = %d, total = %d duration = %u.%03u seconds\n",
994 			 TC_RESULT_TO_STR(suite_result), passrate_major, passrate_minor,
995 			 suite->name, distinct_pass, distinct_fail, distinct_skip, distinct_total,
996 			 suite_duration_worst_ms / 1000, suite_duration_worst_ms % 1000);
997 	flush_log();
998 }
999 
__ztest_show_suite_summary_verbose(struct ztest_suite_node * suite)1000 static void __ztest_show_suite_summary_verbose(struct ztest_suite_node *suite)
1001 {
1002 	struct ztest_unit_test *test = NULL;
1003 	int tc_result = TC_PASS;
1004 	int flush_frequency = 0;
1005 
1006 	if (IS_ENABLED(CONFIG_ZTEST_VERBOSE_SUMMARY) == 0) {
1007 		return;
1008 	}
1009 
1010 	while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
1011 		if (test->stats->skip_count == test->stats->run_count) {
1012 			tc_result = TC_SKIP;
1013 		} else if (test->stats->pass_count == test->stats->run_count) {
1014 			tc_result = TC_PASS;
1015 		} else if (test->stats->pass_count == 0) {
1016 			tc_result = TC_FAIL;
1017 		} else {
1018 			tc_result = TC_FLAKY;
1019 		}
1020 
1021 		if (tc_result == TC_FLAKY) {
1022 			TC_SUMMARY_PRINT(
1023 				" - %s - [%s.%s] - (Failed %d of %d attempts)"
1024 				" - duration = %u.%03u seconds\n",
1025 				TC_RESULT_TO_STR(tc_result), test->test_suite_name, test->name,
1026 				test->stats->run_count - test->stats->pass_count,
1027 				test->stats->run_count, test->stats->duration_worst_ms / 1000,
1028 				test->stats->duration_worst_ms % 1000);
1029 		} else {
1030 			TC_SUMMARY_PRINT(" - %s - [%s.%s] duration = %u.%03u seconds\n",
1031 					 TC_RESULT_TO_STR(tc_result), test->test_suite_name,
1032 					 test->name, test->stats->duration_worst_ms / 1000,
1033 					 test->stats->duration_worst_ms % 1000);
1034 		}
1035 
1036 		if (flush_frequency % 3 == 0) {
1037 			/** Reduce the flush frequency a bit to speed up the output */
1038 			flush_log();
1039 		}
1040 		flush_frequency++;
1041 	}
1042 	TC_SUMMARY_PRINT("\n");
1043 	flush_log();
1044 }
1045 
__ztest_show_suite_summary(void)1046 static void __ztest_show_suite_summary(void)
1047 {
1048 	if (IS_ENABLED(CONFIG_ZTEST_SUMMARY) == 0) {
1049 		return;
1050 	}
1051 	/* Flush the log a lot to ensure that no summary content
1052 	 * is dropped if it goes through the logging subsystem.
1053 	 */
1054 	flush_log();
1055 	TC_SUMMARY_PRINT("\n------ TESTSUITE SUMMARY START ------\n\n");
1056 	flush_log();
1057 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1058 	     ptr < _ztest_suite_node_list_end; ++ptr) {
1059 
1060 		__ztest_show_suite_summary_oneline(ptr);
1061 		__ztest_show_suite_summary_verbose(ptr);
1062 	}
1063 	TC_SUMMARY_PRINT("------ TESTSUITE SUMMARY END ------\n\n");
1064 	flush_log();
1065 }
1066 
__ztest_run_test_suite(struct ztest_suite_node * ptr,const void * state,bool shuffle,int suite_iter,int case_iter,void * param)1067 static int __ztest_run_test_suite(struct ztest_suite_node *ptr, const void *state, bool shuffle,
1068 				  int suite_iter, int case_iter, void *param)
1069 {
1070 	struct ztest_suite_stats *stats = ptr->stats;
1071 	int count = 0;
1072 
1073 	for (int i = 0; i < suite_iter; i++) {
1074 		if (ztest_api.should_suite_run(state, ptr)) {
1075 			int fail = z_ztest_run_test_suite_ptr(ptr, shuffle,
1076 							suite_iter, case_iter, param);
1077 
1078 			count++;
1079 			stats->run_count++;
1080 			stats->fail_count += (fail != 0) ? 1 : 0;
1081 		} else {
1082 			stats->skip_count++;
1083 		}
1084 	}
1085 
1086 	return count;
1087 }
1088 
z_impl_ztest_run_test_suites(const void * state,bool shuffle,int suite_iter,int case_iter)1089 int z_impl_ztest_run_test_suites(const void *state, bool shuffle, int suite_iter, int case_iter)
1090 {
1091 	int count = 0;
1092 	void *param = NULL;
1093 	if (test_status == ZTEST_STATUS_CRITICAL_ERROR) {
1094 		return count;
1095 	}
1096 
1097 #ifdef CONFIG_ZTEST_COVERAGE_RESET_BEFORE_TESTS
1098 	gcov_reset_all_counts();
1099 #endif
1100 
1101 #ifdef CONFIG_ZTEST_SHUFFLE
1102 	struct ztest_suite_node *suites_to_run[ZTEST_SUITE_COUNT];
1103 
1104 	memset(suites_to_run, 0, ZTEST_SUITE_COUNT * sizeof(struct ztest_suite_node *));
1105 	z_ztest_shuffle(shuffle, (void **)suites_to_run, (intptr_t)_ztest_suite_node_list_start,
1106 			ZTEST_SUITE_COUNT, sizeof(struct ztest_suite_node));
1107 	for (size_t i = 0; i < ZTEST_SUITE_COUNT; ++i) {
1108 		__ztest_init_unit_test_result_for_suite(suites_to_run[i]);
1109 	}
1110 	for (size_t i = 0; i < ZTEST_SUITE_COUNT; ++i) {
1111 		count += __ztest_run_test_suite(suites_to_run[i], state, shuffle, suite_iter,
1112 						case_iter, param);
1113 		/* Stop running tests if we have a critical error or if we have a failure and
1114 		 * FAIL_FAST was set
1115 		 */
1116 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1117 		    (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1118 			break;
1119 		}
1120 	}
1121 #else
1122 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1123 	     ptr < _ztest_suite_node_list_end; ++ptr) {
1124 		__ztest_init_unit_test_result_for_suite(ptr);
1125 		count += __ztest_run_test_suite(ptr, state, shuffle, suite_iter, case_iter, param);
1126 		/* Stop running tests if we have a critical error or if we have a failure and
1127 		 * FAIL_FAST was set
1128 		 */
1129 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1130 		    (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1131 			break;
1132 		}
1133 	}
1134 #endif
1135 
1136 	return count;
1137 }
1138 
z_impl___ztest_set_test_result(enum ztest_result new_result)1139 void z_impl___ztest_set_test_result(enum ztest_result new_result)
1140 {
1141 	test_result = new_result;
1142 }
1143 
z_impl___ztest_set_test_phase(enum ztest_phase new_phase)1144 void z_impl___ztest_set_test_phase(enum ztest_phase new_phase)
1145 {
1146 	cur_phase = new_phase;
1147 }
1148 
1149 #ifdef CONFIG_USERSPACE
z_vrfy___ztest_set_test_result(enum ztest_result new_result)1150 void z_vrfy___ztest_set_test_result(enum ztest_result new_result)
1151 {
1152 	z_impl___ztest_set_test_result(new_result);
1153 }
1154 #include <zephyr/syscalls/__ztest_set_test_result_mrsh.c>
1155 
z_vrfy___ztest_set_test_phase(enum ztest_phase new_phase)1156 void z_vrfy___ztest_set_test_phase(enum ztest_phase new_phase)
1157 {
1158 	z_impl___ztest_set_test_phase(new_phase);
1159 }
1160 #include <zephyr/syscalls/__ztest_set_test_phase_mrsh.c>
1161 #endif /* CONFIG_USERSPACE */
1162 
ztest_verify_all_test_suites_ran(void)1163 void ztest_verify_all_test_suites_ran(void)
1164 {
1165 	bool all_tests_run = true;
1166 	struct ztest_suite_node *suite;
1167 	struct ztest_unit_test *test;
1168 
1169 	if (IS_ENABLED(CONFIG_ZTEST_VERIFY_RUN_ALL)) {
1170 		for (suite = _ztest_suite_node_list_start; suite < _ztest_suite_node_list_end;
1171 		     ++suite) {
1172 			if (suite->stats->run_count < 1) {
1173 				PRINT_DATA("ERROR: Test suite '%s' did not run.\n", suite->name);
1174 				all_tests_run = false;
1175 			}
1176 		}
1177 
1178 		for (test = _ztest_unit_test_list_start; test < _ztest_unit_test_list_end; ++test) {
1179 			suite = ztest_find_test_suite(test->test_suite_name);
1180 			if (suite == NULL) {
1181 				PRINT_DATA("ERROR: Test '%s' assigned to test suite '%s' which "
1182 					   "doesn't "
1183 					   "exist\n",
1184 					   test->name, test->test_suite_name);
1185 				all_tests_run = false;
1186 			}
1187 		}
1188 
1189 		if (!all_tests_run) {
1190 			test_status = ZTEST_STATUS_HAS_FAILURE;
1191 		}
1192 	}
1193 
1194 	for (test = _ztest_unit_test_list_start; test < _ztest_unit_test_list_end; ++test) {
1195 		if (test->stats->fail_count + test->stats->pass_count + test->stats->skip_count !=
1196 		    test->stats->run_count) {
1197 			PRINT_DATA("Bad stats for %s.%s\n", test->test_suite_name, test->name);
1198 			test_status = 1;
1199 		}
1200 	}
1201 }
1202 
ztest_run_all(const void * state,bool shuffle,int suite_iter,int case_iter)1203 void ztest_run_all(const void *state, bool shuffle, int suite_iter, int case_iter)
1204 {
1205 	ztest_api.run_all(state, shuffle, suite_iter, case_iter);
1206 }
1207 
test_main(void)1208 void __weak test_main(void)
1209 {
1210 #if CONFIG_ZTEST_SHUFFLE
1211 	ztest_run_all(NULL, true, NUM_ITER_PER_SUITE, NUM_ITER_PER_TEST);
1212 #else
1213 	ztest_run_all(NULL, false, NUM_ITER_PER_SUITE, NUM_ITER_PER_TEST);
1214 #endif
1215 	ztest_verify_all_test_suites_ran();
1216 }
1217 
1218 #ifndef KERNEL
main(void)1219 int main(void)
1220 {
1221 	z_init_mock();
1222 	test_main();
1223 	end_report();
1224 #ifdef CONFIG_ZTEST_NO_YIELD
1225 	/*
1226 	 * Rather than yielding to idle thread, keep the part awake so debugger can
1227 	 * still access it, since some SOCs cannot be debugged in low power states.
1228 	 */
1229 	uint32_t key = irq_lock();
1230 
1231 	while (1) {
1232 		; /* Spin */
1233 	}
1234 	irq_unlock(key);
1235 #endif
1236 	return test_status;
1237 }
1238 #else
1239 
1240 /* Shell */
1241 
1242 #ifdef CONFIG_ZTEST_SHELL
cmd_list_suites(const struct shell * sh,size_t argc,char ** argv)1243 static int cmd_list_suites(const struct shell *sh, size_t argc, char **argv)
1244 {
1245 	struct ztest_suite_node *suite;
1246 
1247 	for (suite = _ztest_suite_node_list_start; suite < _ztest_suite_node_list_end; ++suite) {
1248 		shell_print(sh, "%s", suite->name);
1249 	}
1250 	return 0;
1251 }
1252 
cmd_list_cases(const struct shell * sh,size_t argc,char ** argv)1253 static int cmd_list_cases(const struct shell *sh, size_t argc, char **argv)
1254 {
1255 	struct ztest_suite_node *ptr;
1256 	struct ztest_unit_test *test = NULL;
1257 	int test_count = 0;
1258 
1259 	for (ptr = _ztest_suite_node_list_start; ptr < _ztest_suite_node_list_end; ++ptr) {
1260 		test = NULL;
1261 		while ((test = z_ztest_get_next_test(ptr->name, test)) != NULL) {
1262 			shell_print(sh, "%s::%s", test->test_suite_name, test->name);
1263 			test_count++;
1264 		}
1265 	}
1266 	return 0;
1267 }
1268 extern void ztest_set_test_args(char *argv);
1269 extern void ztest_reset_test_args(void);
1270 
cmd_runall(const struct shell * sh,size_t argc,char ** argv)1271 static int cmd_runall(const struct shell *sh, size_t argc, char **argv)
1272 {
1273 	ztest_reset_test_args();
1274 	ztest_run_all(NULL, false, 1, 1);
1275 	end_report();
1276 	return 0;
1277 }
1278 
1279 #ifdef CONFIG_ZTEST_SHUFFLE
cmd_shuffle(const struct shell * sh,size_t argc,char ** argv)1280 static int cmd_shuffle(const struct shell *sh, size_t argc, char **argv)
1281 {
1282 
1283 	struct getopt_state *state;
1284 	int opt;
1285 	static struct option long_options[] = {{"suite_iter", required_argument, 0, 's'},
1286 					       {"case_iter", required_argument, 0, 'c'},
1287 					       {0, 0, 0, 0}};
1288 	int opt_index = 0;
1289 	int val;
1290 	int opt_num = 0;
1291 
1292 	int suite_iter = 1;
1293 	int case_iter = 1;
1294 
1295 	while ((opt = getopt_long(argc, argv, "s:c:", long_options, &opt_index)) != -1) {
1296 		state = getopt_state_get();
1297 		switch (opt) {
1298 		case 's':
1299 			val = atoi(state->optarg);
1300 			if (val < 1) {
1301 				shell_error(sh, "Invalid number of suite iterations");
1302 				return -ENOEXEC;
1303 			}
1304 			suite_iter = val;
1305 			opt_num++;
1306 			break;
1307 		case 'c':
1308 			val = atoi(state->optarg);
1309 			if (val < 1) {
1310 				shell_error(sh, "Invalid number of case iterations");
1311 				return -ENOEXEC;
1312 			}
1313 			case_iter = val;
1314 			opt_num++;
1315 			break;
1316 		default:
1317 			shell_error(sh, "Invalid option or option usage: %s",
1318 				    argv[opt_index + 1]);
1319 			return -ENOEXEC;
1320 		}
1321 	}
1322 	ztest_reset_test_args();
1323 	ztest_run_all(NULL, true, suite_iter, case_iter);
1324 	end_report();
1325 	return 0;
1326 }
1327 #endif
1328 
cmd_run_suite(const struct shell * sh,size_t argc,char ** argv)1329 static int cmd_run_suite(const struct shell *sh, size_t argc, char **argv)
1330 {
1331 	struct getopt_state *state;
1332 	int opt;
1333 	static struct option long_options[] = {{"repeat_iter", required_argument, NULL, 'r'},
1334 		{NULL, 0, NULL, 0}};
1335 	int opt_index = 0;
1336 	int val;
1337 	int opt_num = 0;
1338 	void *param = NULL;
1339 	int repeat_iter = 1;
1340 
1341 	while ((opt = getopt_long(argc, argv, "r:p:", long_options, &opt_index)) != -1) {
1342 		state = getopt_state_get();
1343 		switch (opt) {
1344 		case 'r':
1345 			val = atoi(state->optarg);
1346 			if (val < 1) {
1347 				shell_fprintf(sh, SHELL_ERROR,
1348 					"Invalid number of suite interations\n");
1349 				return -ENOEXEC;
1350 			}
1351 			repeat_iter = val;
1352 			opt_num++;
1353 			break;
1354 		case 'p':
1355 			param = state->optarg;
1356 			opt_num++;
1357 			break;
1358 		default:
1359 			shell_fprintf(sh, SHELL_ERROR,
1360 				"Invalid option or option usage: %s\n", argv[opt_index + 1]);
1361 			return -ENOEXEC;
1362 		}
1363 	}
1364 	int count = 0;
1365 	bool shuffle = false;
1366 	const char *shell_command = argv[0];
1367 
1368 	/*
1369 	 * This if statement determines which argv contains the test name.
1370 	 * If the optional argument is used, the test name is in the third
1371 	 * argv instead of the first.
1372 	 */
1373 	if (opt_num == 1) {
1374 		ztest_set_test_args(argv[3]);
1375 	} else {
1376 		ztest_set_test_args(argv[1]);
1377 	}
1378 
1379 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1380 	     ptr < _ztest_suite_node_list_end; ++ptr) {
1381 		__ztest_init_unit_test_result_for_suite(ptr);
1382 		if (strcmp(shell_command, "run-testcase") == 0) {
1383 			count += __ztest_run_test_suite(ptr, NULL, shuffle, 1, repeat_iter, param);
1384 		} else if (strcmp(shell_command, "run-testsuite") == 0) {
1385 			count += __ztest_run_test_suite(ptr, NULL, shuffle, repeat_iter, 1, NULL);
1386 		}
1387 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1388 		    (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1389 			break;
1390 		}
1391 	}
1392 	return 0;
1393 }
1394 
1395 static void testsuite_list_get(size_t idx, struct shell_static_entry *entry);
1396 
1397 SHELL_DYNAMIC_CMD_CREATE(testsuite_names, testsuite_list_get);
1398 
testsuite_get_all_static(struct ztest_suite_node const ** suites)1399 static size_t testsuite_get_all_static(struct ztest_suite_node const **suites)
1400 {
1401 	*suites = _ztest_suite_node_list_start;
1402 	return _ztest_suite_node_list_end - _ztest_suite_node_list_start;
1403 }
1404 
suite_lookup(size_t idx,const char * prefix)1405 static const struct ztest_suite_node *suite_lookup(size_t idx, const char *prefix)
1406 {
1407 	size_t match_idx = 0;
1408 	const struct ztest_suite_node *suite;
1409 	size_t len = testsuite_get_all_static(&suite);
1410 	const struct ztest_suite_node *suite_end = suite + len;
1411 
1412 	while (suite < suite_end) {
1413 		if ((suite->name != NULL) && (strlen(suite->name) != 0) &&
1414 		    ((prefix == NULL) || (strncmp(prefix, suite->name, strlen(prefix)) == 0))) {
1415 			if (match_idx == idx) {
1416 				return suite;
1417 			}
1418 			++match_idx;
1419 		}
1420 		++suite;
1421 	}
1422 
1423 	return NULL;
1424 }
1425 
testsuite_list_get(size_t idx,struct shell_static_entry * entry)1426 static void testsuite_list_get(size_t idx, struct shell_static_entry *entry)
1427 {
1428 	const struct ztest_suite_node *suite = suite_lookup(idx, "");
1429 
1430 	entry->syntax = (suite != NULL) ? suite->name : NULL;
1431 	entry->handler = NULL;
1432 	entry->help = NULL;
1433 	entry->subcmd = NULL;
1434 }
1435 
1436 /* clang-format off */
1437 	SHELL_STATIC_SUBCMD_SET_CREATE(
1438 		sub_ztest_cmds,
1439 		SHELL_CMD_ARG(run-all, NULL, "Run all tests", cmd_runall, 0, 0),
1440 #ifdef CONFIG_ZTEST_SHUFFLE
1441 		SHELL_COND_CMD_ARG(CONFIG_ZTEST_SHUFFLE, shuffle, NULL,
1442 			"Shuffle tests", cmd_shuffle, 0, 2),
1443 #endif
1444 		SHELL_CMD_ARG(list-testsuites, NULL,
1445 			"List all test suites", cmd_list_suites, 0, 0),
1446 		SHELL_CMD_ARG(list-testcases, NULL,
1447 			"List all test cases", cmd_list_cases, 0, 0),
1448 		SHELL_CMD_ARG(run-testsuite, &testsuite_names,
1449 			"Run test suite", cmd_run_suite, 2, 2),
1450 		SHELL_CMD_ARG(run-testcase, NULL, "Run testcase", cmd_run_suite, 2, 2),
1451 		SHELL_SUBCMD_SET_END /* Array terminated. */
1452 	);
1453 /* clang-format on */
1454 
1455 SHELL_CMD_REGISTER(ztest, &sub_ztest_cmds, "Ztest commands", NULL);
1456 #endif /* CONFIG_ZTEST_SHELL */
1457 
main(void)1458 int main(void)
1459 {
1460 #ifdef CONFIG_USERSPACE
1461 	/* Partition containing globals tagged with ZTEST_DMEM and ZTEST_BMEM
1462 	 * macros. Any variables that user code may reference need to be
1463 	 * placed in this partition if no other memory domain configuration
1464 	 * is made.
1465 	 */
1466 	k_mem_domain_add_partition(&k_mem_domain_default, &ztest_mem_partition);
1467 #ifdef Z_MALLOC_PARTITION_EXISTS
1468 	/* Allow access to malloc() memory */
1469 	k_mem_domain_add_partition(&k_mem_domain_default, &z_malloc_partition);
1470 #endif
1471 #endif /* CONFIG_USERSPACE */
1472 
1473 	z_init_mock();
1474 #ifndef CONFIG_ZTEST_SHELL
1475 	test_main();
1476 	end_report();
1477 	flush_log();
1478 	LOG_PANIC();
1479 	if (IS_ENABLED(CONFIG_ZTEST_RETEST_IF_PASSED)) {
1480 		static __noinit struct {
1481 			uint32_t magic;
1482 			uint32_t boots;
1483 		} state;
1484 		const uint32_t magic = 0x152ac523;
1485 
1486 		if (state.magic != magic) {
1487 			state.magic = magic;
1488 			state.boots = 0;
1489 		}
1490 		state.boots += 1;
1491 		if (test_status == 0) {
1492 			PRINT_DATA("Reset board #%u to test again\n", state.boots);
1493 			k_msleep(10);
1494 			sys_reboot(SYS_REBOOT_COLD);
1495 		} else {
1496 			PRINT_DATA("Failed after %u attempts\n", state.boots);
1497 			state.boots = 0;
1498 		}
1499 	}
1500 #ifdef CONFIG_ZTEST_NO_YIELD
1501 	/*
1502 	 * Rather than yielding to idle thread, keep the part awake so debugger can
1503 	 * still access it, since some SOCs cannot be debugged in low power states.
1504 	 */
1505 	uint32_t key = irq_lock();
1506 
1507 	while (1) {
1508 		; /* Spin */
1509 	}
1510 	irq_unlock(key);
1511 #endif /* CONFIG_ZTEST_NO_YIELD */
1512 #endif /* CONFIG_ZTEST_SHELL */
1513 	return 0;
1514 }
1515 #endif
1516