1 /*
2 * Copyright (c) 2016 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdlib.h>
8 #include <zephyr/ztest.h>
9
10 #include <zephyr/app_memory/app_memdomain.h>
11 #ifdef CONFIG_USERSPACE
12 #include <zephyr/sys/libc-hooks.h>
13 #endif
14 #include <zephyr/logging/log_ctrl.h>
15 #include <zephyr/sys/reboot.h>
16
17 #include <zephyr/llext/symbol.h>
18
19 #include <zephyr/sys/barrier.h>
20
21 #ifdef KERNEL
22 static struct k_thread ztest_thread;
23 #endif
24 static bool failed_expectation;
25
26 #ifdef CONFIG_ZTEST_SHELL
27 #include <zephyr/shell/shell.h>
28 #endif
29
30 #ifdef CONFIG_ZTEST_SHUFFLE
31 #include <time.h>
32 #include <zephyr/random/random.h>
33 #ifndef CONFIG_ZTEST_REPEAT
34 #define NUM_ITER_PER_SUITE CONFIG_ZTEST_SHUFFLE_SUITE_REPEAT_COUNT
35 #define NUM_ITER_PER_TEST CONFIG_ZTEST_SHUFFLE_TEST_REPEAT_COUNT
36 #endif
37 #endif /* CONFIG_ZTEST_SHUFFLE */
38
39 #ifdef CONFIG_ZTEST_REPEAT
40 #define NUM_ITER_PER_SUITE CONFIG_ZTEST_SUITE_REPEAT_COUNT
41 #define NUM_ITER_PER_TEST CONFIG_ZTEST_TEST_REPEAT_COUNT
42 #else
43 #ifndef CONFIG_ZTEST_SHUFFLE
44 #define NUM_ITER_PER_SUITE 1
45 #define NUM_ITER_PER_TEST 1
46 #endif
47 #endif
48
49 #ifdef CONFIG_ZTEST_COVERAGE_RESET_BEFORE_TESTS
50 #include <coverage.h>
51 #endif
52
53 /* ZTEST_DMEM and ZTEST_BMEM are used for the application shared memory test */
54
55 /**
56 * @brief The current status of the test binary
57 */
58 enum ztest_status {
59 ZTEST_STATUS_OK,
60 ZTEST_STATUS_HAS_FAILURE,
61 ZTEST_STATUS_CRITICAL_ERROR
62 };
63
64 /**
65 * @brief Tracks the current phase that ztest is operating in.
66 */
67 ZTEST_DMEM enum ztest_phase cur_phase = TEST_PHASE_FRAMEWORK;
68
69 static ZTEST_BMEM enum ztest_status test_status = ZTEST_STATUS_OK;
70
71 extern ZTEST_DMEM const struct ztest_arch_api ztest_api;
72
73 static void __ztest_show_suite_summary(void);
74
end_report(void)75 static void end_report(void)
76 {
77 __ztest_show_suite_summary();
78 if (test_status) {
79 TC_END_REPORT(TC_FAIL);
80 } else {
81 TC_END_REPORT(TC_PASS);
82 }
83 }
84
cleanup_test(struct ztest_unit_test * test)85 static int cleanup_test(struct ztest_unit_test *test)
86 {
87 int ret = TC_PASS;
88 int mock_status;
89
90 mock_status = z_cleanup_mock();
91
92 #ifdef KERNEL
93 /* we need to remove the ztest_thread information from the timeout_q.
94 * Because we reuse the same k_thread structure this would
95 * causes some problems.
96 */
97 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
98 k_thread_abort(&ztest_thread);
99 }
100 #endif
101
102 if (!ret && mock_status == 1) {
103 PRINT_DATA("Test %s failed: Unused mock parameter values\n", test->name);
104 ret = TC_FAIL;
105 } else if (!ret && mock_status == 2) {
106 PRINT_DATA("Test %s failed: Unused mock return values\n", test->name);
107 ret = TC_FAIL;
108 } else {
109 ;
110 }
111
112 return ret;
113 }
114
115 #ifdef KERNEL
116
117 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
118 #define MAX_NUM_CPUHOLD (CONFIG_MP_MAX_NUM_CPUS - 1)
119 #define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
120
121 struct cpuhold_pool_item {
122 struct k_thread thread;
123 bool used;
124 };
125
126 static struct cpuhold_pool_item cpuhold_pool_items[MAX_NUM_CPUHOLD + 1];
127
128 K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD + 1, CPUHOLD_STACK_SZ);
129
130 static struct k_sem cpuhold_sem;
131
132 volatile int cpuhold_active;
133 volatile bool cpuhold_spawned;
134
find_unused_thread(void)135 static int find_unused_thread(void)
136 {
137 for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
138 if (!cpuhold_pool_items[i].used) {
139 return i;
140 }
141 }
142
143 return -1;
144 }
145
mark_thread_unused(struct k_thread * thread)146 static void mark_thread_unused(struct k_thread *thread)
147 {
148 for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
149 if (&cpuhold_pool_items[i].thread == thread) {
150 cpuhold_pool_items[i].used = false;
151 }
152 }
153 }
154
wait_for_thread_to_switch_out(struct k_thread * thread)155 static inline void wait_for_thread_to_switch_out(struct k_thread *thread)
156 {
157 unsigned int key = arch_irq_lock();
158 volatile void **shp = (void *)&thread->switch_handle;
159
160 while (*shp == NULL) {
161 arch_spin_relax();
162 }
163 /* Read barrier: don't allow any subsequent loads in the
164 * calling code to reorder before we saw switch_handle go
165 * non-null.
166 */
167 barrier_dmem_fence_full();
168
169 arch_irq_unlock(key);
170 }
171
172 /* "Holds" a CPU for use with the "1cpu" test cases. Note that we
173 * can't use tools like the cpumask feature because we have tests that
174 * may need to control that configuration themselves. We do this at
175 * the lowest level, but locking interrupts directly and spinning.
176 */
cpu_hold(void * arg1,void * arg2,void * arg3)177 static void cpu_hold(void *arg1, void *arg2, void *arg3)
178 {
179 struct k_thread *thread = arg1;
180 unsigned int idx = (unsigned int)(uintptr_t)arg2;
181 char tname[CONFIG_THREAD_MAX_NAME_LEN];
182
183 ARG_UNUSED(arg3);
184
185 if (arch_proc_id() == 0) {
186 int i;
187
188 i = find_unused_thread();
189
190 __ASSERT_NO_MSG(i != -1);
191
192 cpuhold_spawned = false;
193
194 cpuhold_pool_items[i].used = true;
195 k_thread_create(&cpuhold_pool_items[i].thread, cpuhold_stacks[i], CPUHOLD_STACK_SZ,
196 cpu_hold, k_current_get(), (void *)(uintptr_t)idx, NULL,
197 K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
198
199 /*
200 * Busy-wait until we know the spawned thread is running to
201 * ensure it does not spawn on CPU0.
202 */
203
204 while (!cpuhold_spawned) {
205 k_busy_wait(1000);
206 }
207
208 return;
209 }
210
211 if (thread != NULL) {
212 cpuhold_spawned = true;
213
214 /* Busywait until a new thread is scheduled in on CPU0 */
215
216 wait_for_thread_to_switch_out(thread);
217
218 mark_thread_unused(thread);
219 }
220
221 if (IS_ENABLED(CONFIG_THREAD_NAME)) {
222 snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", idx);
223 k_thread_name_set(k_current_get(), tname);
224 }
225
226 uint32_t dt, start_ms = k_uptime_get_32();
227 unsigned int key = arch_irq_lock();
228
229 k_sem_give(&cpuhold_sem);
230
231 #if (defined(CONFIG_ARM64) || defined(CONFIG_RISCV)) && defined(CONFIG_FPU_SHARING)
232 /*
233 * We'll be spinning with IRQs disabled. The flush-your-FPU request
234 * IPI will never be serviced during that time. Therefore we flush
235 * the FPU preemptively here to prevent any other CPU waiting after
236 * this CPU forever and deadlock the system.
237 */
238 k_float_disable(_current_cpu->arch.fpu_owner);
239 #endif
240
241 while (cpuhold_active) {
242 k_busy_wait(1000);
243 }
244
245 /* Holding the CPU via spinning is expensive, and abusing this
246 * for long-running test cases tends to overload the CI system
247 * (qemu runs separate CPUs in different threads, but the CI
248 * logic views it as one "job") and cause other test failures.
249 */
250 dt = k_uptime_get_32() - start_ms;
251 zassert_true(dt < CONFIG_ZTEST_CPU_HOLD_TIME_MS, "1cpu test took too long (%d ms)", dt);
252 arch_irq_unlock(key);
253 }
254 #endif /* CONFIG_SMP && (CONFIG_MP_MAX_NUM_CPUS > 1) */
255
z_impl_z_test_1cpu_start(void)256 void z_impl_z_test_1cpu_start(void)
257 {
258 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
259 unsigned int num_cpus = arch_num_cpus();
260 int j;
261
262 cpuhold_active = 1;
263
264 k_sem_init(&cpuhold_sem, 0, 999);
265
266 /* Spawn N-1 threads to "hold" the other CPUs, waiting for
267 * each to signal us that it's locked and spinning.
268 */
269 for (int i = 0; i < num_cpus - 1; i++) {
270 j = find_unused_thread();
271
272 __ASSERT_NO_MSG(j != -1);
273
274 cpuhold_pool_items[j].used = true;
275 k_thread_create(&cpuhold_pool_items[j].thread, cpuhold_stacks[j], CPUHOLD_STACK_SZ,
276 cpu_hold, NULL, (void *)(uintptr_t)i, NULL, K_HIGHEST_THREAD_PRIO,
277 0, K_NO_WAIT);
278 k_sem_take(&cpuhold_sem, K_FOREVER);
279 }
280 #endif
281 }
282
z_impl_z_test_1cpu_stop(void)283 void z_impl_z_test_1cpu_stop(void)
284 {
285 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
286 cpuhold_active = 0;
287
288 for (int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
289 if (cpuhold_pool_items[i].used) {
290 k_thread_abort(&cpuhold_pool_items[i].thread);
291 cpuhold_pool_items[i].used = false;
292 }
293 }
294 #endif
295 }
296
297 #ifdef CONFIG_USERSPACE
z_vrfy_z_test_1cpu_start(void)298 void z_vrfy_z_test_1cpu_start(void)
299 {
300 z_impl_z_test_1cpu_start();
301 }
302 #include <zephyr/syscalls/z_test_1cpu_start_mrsh.c>
303
z_vrfy_z_test_1cpu_stop(void)304 void z_vrfy_z_test_1cpu_stop(void)
305 {
306 z_impl_z_test_1cpu_stop();
307 }
308 #include <zephyr/syscalls/z_test_1cpu_stop_mrsh.c>
309 #endif /* CONFIG_USERSPACE */
310 #endif
311
run_test_rules(bool is_before,struct ztest_unit_test * test,void * data)312 __maybe_unused static void run_test_rules(bool is_before, struct ztest_unit_test *test, void *data)
313 {
314 for (struct ztest_test_rule *rule = _ztest_test_rule_list_start;
315 rule < _ztest_test_rule_list_end; ++rule) {
316 if (is_before && rule->before_each) {
317 rule->before_each(test, data);
318 } else if (!is_before && rule->after_each) {
319 rule->after_each(test, data);
320 }
321 }
322 }
323
run_test_functions(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)324 static void run_test_functions(struct ztest_suite_node *suite, struct ztest_unit_test *test,
325 void *data)
326 {
327 __ztest_set_test_phase(TEST_PHASE_TEST);
328 test->test(data);
329 }
330
331 COND_CODE_1(KERNEL, (ZTEST_BMEM), ()) static enum ztest_result test_result;
332
get_final_test_result(const struct ztest_unit_test * test,int ret)333 static int get_final_test_result(const struct ztest_unit_test *test, int ret)
334 {
335 enum ztest_expected_result expected_result = -1;
336
337 for (struct ztest_expected_result_entry *expectation =
338 _ztest_expected_result_entry_list_start;
339 expectation < _ztest_expected_result_entry_list_end; ++expectation) {
340 if (strcmp(expectation->test_name, test->name) == 0 &&
341 strcmp(expectation->test_suite_name, test->test_suite_name) == 0) {
342 expected_result = expectation->expected_result;
343 break;
344 }
345 }
346
347 if (expected_result == ZTEST_EXPECTED_RESULT_FAIL) {
348 /* Expected a failure:
349 * - If we got a failure, return TC_PASS
350 * - Otherwise force a failure
351 */
352 return (ret == TC_FAIL) ? TC_PASS : TC_FAIL;
353 }
354 if (expected_result == ZTEST_EXPECTED_RESULT_SKIP) {
355 /* Expected a skip:
356 * - If we got a skip, return TC_PASS
357 * - Otherwise force a failure
358 */
359 return (ret == TC_SKIP) ? TC_PASS : TC_FAIL;
360 }
361 /* No expectation was made, no change is needed. */
362 return ret;
363 }
364
365 /**
366 * @brief Get a friendly name string for a given test phrase.
367 *
368 * @param phase an enum ztest_phase value describing the desired test phase
369 * @returns a string name for `phase`
370 */
get_friendly_phase_name(enum ztest_phase phase)371 static inline const char *get_friendly_phase_name(enum ztest_phase phase)
372 {
373 switch (phase) {
374 case TEST_PHASE_SETUP:
375 return "setup";
376 case TEST_PHASE_BEFORE:
377 return "before";
378 case TEST_PHASE_TEST:
379 return "test";
380 case TEST_PHASE_AFTER:
381 return "after";
382 case TEST_PHASE_TEARDOWN:
383 return "teardown";
384 case TEST_PHASE_FRAMEWORK:
385 return "framework";
386 default:
387 return "(unknown)";
388 }
389 }
390
391 static bool current_test_failed_assumption;
ztest_skip_failed_assumption(void)392 void ztest_skip_failed_assumption(void)
393 {
394 if (IS_ENABLED(CONFIG_ZTEST_FAIL_ON_ASSUME)) {
395 current_test_failed_assumption = true;
396 }
397 ztest_test_skip();
398 }
399
400 #ifndef KERNEL
401
402 /* Static code analysis tool can raise a violation that the standard header
403 * <setjmp.h> shall not be used.
404 *
405 * setjmp is using in a test code, not in a runtime code, it is acceptable.
406 * It is a deliberate deviation.
407 */
408 #include <setjmp.h> /* parasoft-suppress MISRAC2012-RULE_21_4-a MISRAC2012-RULE_21_4-b*/
409 #include <signal.h>
410 #include <stdlib.h>
411 #include <string.h>
412
413 #define FAIL_FAST 0
414
415 static jmp_buf test_fail;
416 static jmp_buf test_pass;
417 static jmp_buf test_skip;
418 static jmp_buf stack_fail;
419 static jmp_buf test_suite_fail;
420
ztest_test_fail(void)421 void ztest_test_fail(void)
422 {
423 switch (cur_phase) {
424 case TEST_PHASE_SETUP:
425 PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
426 longjmp(test_suite_fail, 1);
427 case TEST_PHASE_BEFORE:
428 case TEST_PHASE_TEST:
429 PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
430 longjmp(test_fail, 1);
431 case TEST_PHASE_AFTER:
432 case TEST_PHASE_TEARDOWN:
433 case TEST_PHASE_FRAMEWORK:
434 PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
435 get_friendly_phase_name(cur_phase));
436 longjmp(stack_fail, 1);
437 }
438 }
439 EXPORT_SYMBOL(ztest_test_fail);
440
ztest_test_pass(void)441 void ztest_test_pass(void)
442 {
443 if (cur_phase == TEST_PHASE_TEST) {
444 longjmp(test_pass, 1);
445 }
446 PRINT_DATA(" ERROR: cannot pass in test phase '%s()', bailing\n",
447 get_friendly_phase_name(cur_phase));
448 longjmp(stack_fail, 1);
449 }
450 EXPORT_SYMBOL(ztest_test_pass);
451
ztest_test_skip(void)452 void ztest_test_skip(void)
453 {
454 switch (cur_phase) {
455 case TEST_PHASE_SETUP:
456 case TEST_PHASE_BEFORE:
457 case TEST_PHASE_TEST:
458 longjmp(test_skip, 1);
459 default:
460 PRINT_DATA(" ERROR: cannot skip in test phase '%s()', bailing\n",
461 get_friendly_phase_name(cur_phase));
462 longjmp(stack_fail, 1);
463 }
464 }
465 EXPORT_SYMBOL(ztest_test_skip);
466
ztest_test_expect_fail(void)467 void ztest_test_expect_fail(void)
468 {
469 failed_expectation = true;
470
471 switch (cur_phase) {
472 case TEST_PHASE_SETUP:
473 PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
474 break;
475 case TEST_PHASE_BEFORE:
476 case TEST_PHASE_TEST:
477 PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
478 break;
479 case TEST_PHASE_AFTER:
480 case TEST_PHASE_TEARDOWN:
481 case TEST_PHASE_FRAMEWORK:
482 PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
483 get_friendly_phase_name(cur_phase));
484 longjmp(stack_fail, 1);
485 }
486 }
487
run_test(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)488 static int run_test(struct ztest_suite_node *suite, struct ztest_unit_test *test, void *data)
489 {
490 int ret = TC_PASS;
491
492 TC_START(test->name);
493 __ztest_set_test_phase(TEST_PHASE_BEFORE);
494
495 if (test_result == ZTEST_RESULT_SUITE_FAIL) {
496 ret = TC_FAIL;
497 goto out;
498 }
499
500 if (setjmp(test_fail)) {
501 ret = TC_FAIL;
502 goto out;
503 }
504
505 if (setjmp(test_pass)) {
506 ret = TC_PASS;
507 goto out;
508 }
509
510 if (setjmp(test_skip)) {
511 ret = TC_SKIP;
512 goto out;
513 }
514
515 run_test_rules(/*is_before=*/true, test, data);
516 if (suite->before) {
517 suite->before(data);
518 }
519 run_test_functions(suite, test, data);
520 out:
521 if (failed_expectation) {
522 failed_expectation = false;
523 ret = TC_FAIL;
524 }
525
526 __ztest_set_test_phase(TEST_PHASE_AFTER);
527 if (test_result != ZTEST_RESULT_SUITE_FAIL) {
528 if (suite->after != NULL) {
529 suite->after(data);
530 }
531 run_test_rules(/*is_before=*/false, test, data);
532 }
533 __ztest_set_test_phase(TEST_PHASE_FRAMEWORK);
534 ret |= cleanup_test(test);
535
536 ret = get_final_test_result(test, ret);
537 Z_TC_END_RESULT(ret, test->name);
538 if (ret == TC_SKIP && current_test_failed_assumption) {
539 test_status = 1;
540 }
541
542 return ret;
543 }
544
545 #else /* KERNEL */
546
547 /* Zephyr's probably going to cause all tests to fail if one test fails, so
548 * skip the rest of tests if one of them fails
549 */
550 #ifdef CONFIG_ZTEST_FAIL_FAST
551 #define FAIL_FAST 1
552 #else
553 #define FAIL_FAST 0
554 #endif
555
556 K_THREAD_STACK_DEFINE(ztest_thread_stack, CONFIG_ZTEST_STACK_SIZE + CONFIG_TEST_EXTRA_STACK_SIZE);
557
test_finalize(void)558 static void test_finalize(void)
559 {
560 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
561 k_thread_abort(&ztest_thread);
562 if (k_is_in_isr()) {
563 return;
564 }
565
566 k_thread_abort(k_current_get());
567 CODE_UNREACHABLE;
568 }
569 }
570
ztest_test_fail(void)571 void ztest_test_fail(void)
572 {
573 switch (cur_phase) {
574 case TEST_PHASE_SETUP:
575 __ztest_set_test_result(ZTEST_RESULT_SUITE_FAIL);
576 break;
577 case TEST_PHASE_BEFORE:
578 case TEST_PHASE_TEST:
579 __ztest_set_test_result(ZTEST_RESULT_FAIL);
580 test_finalize();
581 break;
582 default:
583 PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
584 get_friendly_phase_name(cur_phase));
585 test_status = ZTEST_STATUS_CRITICAL_ERROR;
586 break;
587 }
588 }
589 EXPORT_SYMBOL(ztest_test_fail);
590
ztest_test_pass(void)591 void ztest_test_pass(void)
592 {
593 switch (cur_phase) {
594 case TEST_PHASE_TEST:
595 __ztest_set_test_result(ZTEST_RESULT_PASS);
596 test_finalize();
597 break;
598 default:
599 PRINT_DATA(" ERROR: cannot pass in test phase '%s()', bailing\n",
600 get_friendly_phase_name(cur_phase));
601 test_status = ZTEST_STATUS_CRITICAL_ERROR;
602 if (cur_phase == TEST_PHASE_BEFORE) {
603 test_finalize();
604 }
605 }
606 }
607 EXPORT_SYMBOL(ztest_test_pass);
608
ztest_test_skip(void)609 void ztest_test_skip(void)
610 {
611 switch (cur_phase) {
612 case TEST_PHASE_SETUP:
613 __ztest_set_test_result(ZTEST_RESULT_SUITE_SKIP);
614 break;
615 case TEST_PHASE_BEFORE:
616 case TEST_PHASE_TEST:
617 __ztest_set_test_result(ZTEST_RESULT_SKIP);
618 test_finalize();
619 break;
620 default:
621 PRINT_DATA(" ERROR: cannot skip in test phase '%s()', bailing\n",
622 get_friendly_phase_name(cur_phase));
623 test_status = ZTEST_STATUS_CRITICAL_ERROR;
624 break;
625 }
626 }
627 EXPORT_SYMBOL(ztest_test_skip);
628
ztest_test_expect_fail(void)629 void ztest_test_expect_fail(void)
630 {
631 failed_expectation = true;
632 }
633
ztest_simple_1cpu_before(void * data)634 void ztest_simple_1cpu_before(void *data)
635 {
636 ARG_UNUSED(data);
637 z_test_1cpu_start();
638 }
639
ztest_simple_1cpu_after(void * data)640 void ztest_simple_1cpu_after(void *data)
641 {
642 ARG_UNUSED(data);
643 z_test_1cpu_stop();
644 }
645
test_cb(void * a,void * b,void * c)646 static void test_cb(void *a, void *b, void *c)
647 {
648 struct ztest_suite_node *suite = a;
649 struct ztest_unit_test *test = b;
650 const bool config_user_mode = FIELD_GET(K_USER, test->thread_options) != 0;
651
652 if (!IS_ENABLED(CONFIG_USERSPACE) || !k_is_user_context()) {
653 __ztest_set_test_result(ZTEST_RESULT_PENDING);
654 run_test_rules(/*is_before=*/true, test, /*data=*/c);
655 if (suite->before) {
656 suite->before(/*data=*/c);
657 }
658 if (IS_ENABLED(CONFIG_USERSPACE) && config_user_mode) {
659 k_thread_user_mode_enter(test_cb, a, b, c);
660 }
661 }
662 run_test_functions(suite, test, c);
663 __ztest_set_test_result(ZTEST_RESULT_PASS);
664 }
665
run_test(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)666 static int run_test(struct ztest_suite_node *suite, struct ztest_unit_test *test, void *data)
667 {
668 int ret = TC_PASS;
669
670 #if CONFIG_ZTEST_TEST_DELAY_MS > 0
671 k_busy_wait(CONFIG_ZTEST_TEST_DELAY_MS * USEC_PER_MSEC);
672 #endif
673 TC_START(test->name);
674
675 __ztest_set_test_phase(TEST_PHASE_BEFORE);
676
677 /* If the suite's setup function marked us as skipped, don't bother
678 * running the tests.
679 */
680 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
681 get_start_time_cyc();
682 k_thread_create(&ztest_thread, ztest_thread_stack,
683 K_THREAD_STACK_SIZEOF(ztest_thread_stack), test_cb, suite, test,
684 data, CONFIG_ZTEST_THREAD_PRIORITY, K_INHERIT_PERMS, K_FOREVER);
685
686 k_thread_access_grant(&ztest_thread, suite, test, suite->stats);
687 if (test->name != NULL) {
688 k_thread_name_set(&ztest_thread, test->name);
689 }
690 /* Only start the thread if we're not skipping the suite */
691 if (test_result != ZTEST_RESULT_SUITE_SKIP &&
692 test_result != ZTEST_RESULT_SUITE_FAIL) {
693 k_thread_start(&ztest_thread);
694 k_thread_join(&ztest_thread, K_FOREVER);
695 }
696 } else if (test_result != ZTEST_RESULT_SUITE_SKIP &&
697 test_result != ZTEST_RESULT_SUITE_FAIL) {
698 __ztest_set_test_result(ZTEST_RESULT_PENDING);
699 get_start_time_cyc();
700 run_test_rules(/*is_before=*/true, test, data);
701 if (suite->before) {
702 suite->before(data);
703 }
704 run_test_functions(suite, test, data);
705 }
706
707 __ztest_set_test_phase(TEST_PHASE_AFTER);
708 if (suite->after != NULL) {
709 suite->after(data);
710 }
711 run_test_rules(/*is_before=*/false, test, data);
712
713 get_test_duration_ms();
714 if (tc_spend_time > test->stats->duration_worst_ms) {
715 test->stats->duration_worst_ms = tc_spend_time;
716 }
717
718 __ztest_set_test_phase(TEST_PHASE_FRAMEWORK);
719
720 /* Flush all logs in case deferred mode and default logging thread are used. */
721 while (IS_ENABLED(CONFIG_TEST_LOGGING_FLUSH_AFTER_TEST) &&
722 IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && log_data_pending()) {
723 k_msleep(100);
724 }
725
726 if (test_result == ZTEST_RESULT_FAIL || test_result == ZTEST_RESULT_SUITE_FAIL ||
727 failed_expectation) {
728 ret = TC_FAIL;
729 failed_expectation = false;
730 } else if (test_result == ZTEST_RESULT_SKIP || test_result == ZTEST_RESULT_SUITE_SKIP) {
731 ret = TC_SKIP;
732 }
733
734 if (test_result == ZTEST_RESULT_PASS || !FAIL_FAST) {
735 ret |= cleanup_test(test);
736 }
737
738 ret = get_final_test_result(test, ret);
739 Z_TC_END_RESULT(ret, test->name);
740 if (ret == TC_SKIP && current_test_failed_assumption) {
741 test_status = 1;
742 }
743
744 return ret;
745 }
746
747 #endif /* !KERNEL */
748
ztest_find_test_suite(const char * name)749 static struct ztest_suite_node *ztest_find_test_suite(const char *name)
750 {
751 struct ztest_suite_node *node;
752
753 for (node = _ztest_suite_node_list_start; node < _ztest_suite_node_list_end; ++node) {
754 if (strcmp(name, node->name) == 0) {
755 return node;
756 }
757 }
758
759 return NULL;
760 }
761
z_ztest_get_next_test(const char * suite,struct ztest_unit_test * prev)762 struct ztest_unit_test *z_ztest_get_next_test(const char *suite, struct ztest_unit_test *prev)
763 {
764 struct ztest_unit_test *test = (prev == NULL) ? _ztest_unit_test_list_start : prev + 1;
765
766 for (; test < _ztest_unit_test_list_end; ++test) {
767 if (strcmp(suite, test->test_suite_name) == 0) {
768 return test;
769 }
770 }
771 return NULL;
772 }
773
774 #if CONFIG_ZTEST_SHUFFLE
z_ztest_shuffle(bool shuffle,void * dest[],intptr_t start,size_t num_items,size_t element_size)775 static void z_ztest_shuffle(bool shuffle, void *dest[], intptr_t start, size_t num_items,
776 size_t element_size)
777 {
778 /* Initialize dest array */
779 for (size_t i = 0; i < num_items; ++i) {
780 dest[i] = (void *)(start + (i * element_size));
781 }
782 void *tmp;
783
784 /* Shuffle dest array */
785 if (shuffle) {
786 for (size_t i = num_items - 1; i > 0; i--) {
787 int j = sys_rand32_get() % (i + 1);
788
789 if (i != j) {
790 tmp = dest[j];
791 dest[j] = dest[i];
792 dest[i] = tmp;
793 }
794 }
795 }
796 }
797 #endif
798
z_ztest_run_test_suite_ptr(struct ztest_suite_node * suite,bool shuffle,int suite_iter,int case_iter,void * param)799 static int z_ztest_run_test_suite_ptr(struct ztest_suite_node *suite, bool shuffle, int suite_iter,
800 int case_iter, void *param)
801 {
802 struct ztest_unit_test *test = NULL;
803 void *data = NULL;
804 int fail = 0;
805 int tc_result = TC_PASS;
806
807 if (FAIL_FAST && test_status != ZTEST_STATUS_OK) {
808 return test_status;
809 }
810
811 if (suite == NULL) {
812 test_status = ZTEST_STATUS_CRITICAL_ERROR;
813 return -1;
814 }
815
816 #ifndef KERNEL
817 if (setjmp(stack_fail)) {
818 PRINT_DATA("TESTSUITE crashed.\n");
819 test_status = ZTEST_STATUS_CRITICAL_ERROR;
820 end_report();
821 exit(1);
822 }
823 #else
824 k_object_access_all_grant(&ztest_thread);
825 #endif
826
827 TC_SUITE_START(suite->name);
828 current_test_failed_assumption = false;
829 __ztest_set_test_result(ZTEST_RESULT_PENDING);
830 __ztest_set_test_phase(TEST_PHASE_SETUP);
831 #ifndef KERNEL
832 if (setjmp(test_suite_fail)) {
833 __ztest_set_test_result(ZTEST_RESULT_SUITE_FAIL);
834 }
835 #endif
836 if (test_result != ZTEST_RESULT_SUITE_FAIL && suite->setup != NULL) {
837 data = suite->setup();
838 }
839 if (param != NULL) {
840 data = param;
841 }
842
843 for (int i = 0; i < case_iter; i++) {
844 #ifdef CONFIG_ZTEST_SHUFFLE
845 struct ztest_unit_test *tests_to_run[ZTEST_TEST_COUNT];
846
847 memset(tests_to_run, 0, ZTEST_TEST_COUNT * sizeof(struct ztest_unit_test *));
848 z_ztest_shuffle(shuffle, (void **)tests_to_run,
849 (intptr_t)_ztest_unit_test_list_start, ZTEST_TEST_COUNT,
850 sizeof(struct ztest_unit_test));
851 for (size_t j = 0; j < ZTEST_TEST_COUNT; ++j) {
852 test = tests_to_run[j];
853 /* Make sure that the test belongs to this suite */
854 if (strcmp(suite->name, test->test_suite_name) != 0) {
855 continue;
856 }
857 if (ztest_api.should_test_run(suite->name, test->name)) {
858 test->stats->run_count++;
859 tc_result = run_test(suite, test, data);
860 if (tc_result == TC_PASS) {
861 test->stats->pass_count++;
862 } else if (tc_result == TC_SKIP) {
863 test->stats->skip_count++;
864 } else if (tc_result == TC_FAIL) {
865 test->stats->fail_count++;
866 }
867 if (tc_result == TC_FAIL) {
868 fail++;
869 }
870 }
871
872 if ((fail && FAIL_FAST) || test_status == ZTEST_STATUS_CRITICAL_ERROR) {
873 break;
874 }
875 }
876 #else
877 while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
878 if (ztest_api.should_test_run(suite->name, test->name)) {
879 test->stats->run_count++;
880 tc_result = run_test(suite, test, data);
881 if (tc_result == TC_PASS) {
882 test->stats->pass_count++;
883 } else if (tc_result == TC_SKIP) {
884 test->stats->skip_count++;
885 } else if (tc_result == TC_FAIL) {
886 test->stats->fail_count++;
887 }
888
889 if (tc_result == TC_FAIL) {
890 fail++;
891 }
892 }
893
894 if ((fail && FAIL_FAST) || test_status == ZTEST_STATUS_CRITICAL_ERROR) {
895 break;
896 }
897 }
898 #endif
899 if (test_status == ZTEST_STATUS_OK && fail != 0) {
900 test_status = ZTEST_STATUS_HAS_FAILURE;
901 }
902 }
903
904 TC_SUITE_END(suite->name, (fail > 0 ? TC_FAIL : TC_PASS));
905 __ztest_set_test_phase(TEST_PHASE_TEARDOWN);
906 if (suite->teardown != NULL) {
907 suite->teardown(data);
908 }
909
910 return fail;
911 }
912
z_ztest_run_test_suite(const char * name,bool shuffle,int suite_iter,int case_iter,void * param)913 int z_ztest_run_test_suite(const char *name, bool shuffle,
914 int suite_iter, int case_iter, void *param)
915 {
916 return z_ztest_run_test_suite_ptr(ztest_find_test_suite(name), shuffle, suite_iter,
917 case_iter, param);
918 }
919
920 #ifdef CONFIG_USERSPACE
921 K_APPMEM_PARTITION_DEFINE(ztest_mem_partition);
922 #endif
923
924 /* Show one line summary for a test suite.
925 */
__ztest_show_suite_summary_oneline(struct ztest_suite_node * suite)926 static void __ztest_show_suite_summary_oneline(struct ztest_suite_node *suite)
927 {
928 int distinct_pass = 0, distinct_fail = 0, distinct_skip = 0, distinct_total = 0;
929 int effective_total = 0;
930 int expanded_pass = 0, expanded_passrate = 0;
931 int passrate_major = 0, passrate_minor = 0, passrate_tail = 0;
932 int suite_result = TC_PASS;
933
934 struct ztest_unit_test *test = NULL;
935 unsigned int suite_duration_worst_ms = 0;
936
937 /** summary of distinct run */
938 while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
939 distinct_total++;
940 suite_duration_worst_ms += test->stats->duration_worst_ms;
941 if (test->stats->skip_count == test->stats->run_count) {
942 distinct_skip++;
943 } else if (test->stats->pass_count == test->stats->run_count) {
944 distinct_pass++;
945 } else {
946 distinct_fail++;
947 }
948 }
949
950 if (distinct_skip == distinct_total) {
951 suite_result = TC_SKIP;
952 passrate_major = passrate_minor = 0;
953 } else {
954 suite_result = (distinct_fail > 0) ? TC_FAIL : TC_PASS;
955 effective_total = distinct_total - distinct_skip;
956 expanded_pass = distinct_pass * 100000;
957 expanded_passrate = expanded_pass / effective_total;
958 passrate_major = expanded_passrate / 1000;
959 passrate_minor = (expanded_passrate - passrate_major * 1000) / 10;
960 passrate_tail = expanded_passrate - passrate_major * 1000 - passrate_minor * 10;
961 if (passrate_tail >= 5) { /* rounding */
962 passrate_minor++;
963 }
964 }
965
966 TC_SUMMARY_PRINT("SUITE %s - %3d.%02d%% [%s]: pass = %d, fail = %d, "
967 "skip = %d, total = %d duration = %u.%03u seconds\n",
968 TC_RESULT_TO_STR(suite_result), passrate_major, passrate_minor,
969 suite->name, distinct_pass, distinct_fail, distinct_skip, distinct_total,
970 suite_duration_worst_ms / 1000, suite_duration_worst_ms % 1000);
971 log_flush();
972 }
973
__ztest_show_suite_summary_verbose(struct ztest_suite_node * suite)974 static void __ztest_show_suite_summary_verbose(struct ztest_suite_node *suite)
975 {
976 struct ztest_unit_test *test = NULL;
977 int tc_result = TC_PASS;
978 int flush_frequency = 0;
979
980 if (IS_ENABLED(CONFIG_ZTEST_VERBOSE_SUMMARY) == 0) {
981 return;
982 }
983
984 while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
985 if (test->stats->skip_count == test->stats->run_count) {
986 tc_result = TC_SKIP;
987 } else if (test->stats->pass_count == test->stats->run_count) {
988 tc_result = TC_PASS;
989 } else if (test->stats->pass_count == 0) {
990 tc_result = TC_FAIL;
991 } else {
992 tc_result = TC_FLAKY;
993 }
994
995 if (tc_result == TC_FLAKY) {
996 TC_SUMMARY_PRINT(
997 " - %s - [%s.%s] - (Failed %d of %d attempts)"
998 " - duration = %u.%03u seconds\n",
999 TC_RESULT_TO_STR(tc_result), test->test_suite_name, test->name,
1000 test->stats->run_count - test->stats->pass_count,
1001 test->stats->run_count, test->stats->duration_worst_ms / 1000,
1002 test->stats->duration_worst_ms % 1000);
1003 } else {
1004 TC_SUMMARY_PRINT(" - %s - [%s.%s] duration = %u.%03u seconds\n",
1005 TC_RESULT_TO_STR(tc_result), test->test_suite_name,
1006 test->name, test->stats->duration_worst_ms / 1000,
1007 test->stats->duration_worst_ms % 1000);
1008 }
1009
1010 if (flush_frequency % 3 == 0) {
1011 /** Reduce the flush frequency a bit to speed up the output */
1012 log_flush();
1013 }
1014 flush_frequency++;
1015 }
1016 TC_SUMMARY_PRINT("\n");
1017 log_flush();
1018 }
1019
__ztest_show_suite_summary(void)1020 static void __ztest_show_suite_summary(void)
1021 {
1022 if (IS_ENABLED(CONFIG_ZTEST_SUMMARY) == 0) {
1023 return;
1024 }
1025 /* Flush the log a lot to ensure that no summary content
1026 * is dropped if it goes through the logging subsystem.
1027 */
1028 log_flush();
1029 TC_SUMMARY_PRINT("\n------ TESTSUITE SUMMARY START ------\n\n");
1030 log_flush();
1031 for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1032 ptr < _ztest_suite_node_list_end; ++ptr) {
1033
1034 __ztest_show_suite_summary_oneline(ptr);
1035 __ztest_show_suite_summary_verbose(ptr);
1036 }
1037 TC_SUMMARY_PRINT("------ TESTSUITE SUMMARY END ------\n\n");
1038 log_flush();
1039 }
1040
__ztest_run_test_suite(struct ztest_suite_node * ptr,const void * state,bool shuffle,int suite_iter,int case_iter,void * param)1041 static int __ztest_run_test_suite(struct ztest_suite_node *ptr, const void *state, bool shuffle,
1042 int suite_iter, int case_iter, void *param)
1043 {
1044 struct ztest_suite_stats *stats = ptr->stats;
1045 int count = 0;
1046
1047 for (int i = 0; i < suite_iter; i++) {
1048 if (ztest_api.should_suite_run(state, ptr)) {
1049 int fail = z_ztest_run_test_suite_ptr(ptr, shuffle,
1050 suite_iter, case_iter, param);
1051
1052 count++;
1053 stats->run_count++;
1054 stats->fail_count += (fail != 0) ? 1 : 0;
1055 } else {
1056 stats->skip_count++;
1057 }
1058 }
1059
1060 return count;
1061 }
1062
z_impl_ztest_run_test_suites(const void * state,bool shuffle,int suite_iter,int case_iter)1063 int z_impl_ztest_run_test_suites(const void *state, bool shuffle, int suite_iter, int case_iter)
1064 {
1065 int count = 0;
1066 void *param = NULL;
1067 if (test_status == ZTEST_STATUS_CRITICAL_ERROR) {
1068 return count;
1069 }
1070
1071 #ifdef CONFIG_ZTEST_COVERAGE_RESET_BEFORE_TESTS
1072 gcov_reset_all_counts();
1073 #endif
1074
1075 #ifdef CONFIG_ZTEST_SHUFFLE
1076 struct ztest_suite_node *suites_to_run[ZTEST_SUITE_COUNT];
1077
1078 memset(suites_to_run, 0, ZTEST_SUITE_COUNT * sizeof(struct ztest_suite_node *));
1079 z_ztest_shuffle(shuffle, (void **)suites_to_run, (intptr_t)_ztest_suite_node_list_start,
1080 ZTEST_SUITE_COUNT, sizeof(struct ztest_suite_node));
1081 for (size_t i = 0; i < ZTEST_SUITE_COUNT; ++i) {
1082 count += __ztest_run_test_suite(suites_to_run[i], state, shuffle, suite_iter,
1083 case_iter, param);
1084 /* Stop running tests if we have a critical error or if we have a failure and
1085 * FAIL_FAST was set
1086 */
1087 if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1088 (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1089 break;
1090 }
1091 }
1092 #else
1093 for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1094 ptr < _ztest_suite_node_list_end; ++ptr) {
1095 count += __ztest_run_test_suite(ptr, state, shuffle, suite_iter, case_iter, param);
1096 /* Stop running tests if we have a critical error or if we have a failure and
1097 * FAIL_FAST was set
1098 */
1099 if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1100 (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1101 break;
1102 }
1103 }
1104 #endif
1105
1106 return count;
1107 }
1108
z_impl___ztest_set_test_result(enum ztest_result new_result)1109 void z_impl___ztest_set_test_result(enum ztest_result new_result)
1110 {
1111 test_result = new_result;
1112 }
1113
z_impl___ztest_set_test_phase(enum ztest_phase new_phase)1114 void z_impl___ztest_set_test_phase(enum ztest_phase new_phase)
1115 {
1116 cur_phase = new_phase;
1117 }
1118
1119 #ifdef CONFIG_USERSPACE
z_vrfy___ztest_set_test_result(enum ztest_result new_result)1120 void z_vrfy___ztest_set_test_result(enum ztest_result new_result)
1121 {
1122 z_impl___ztest_set_test_result(new_result);
1123 }
1124 #include <zephyr/syscalls/__ztest_set_test_result_mrsh.c>
1125
z_vrfy___ztest_set_test_phase(enum ztest_phase new_phase)1126 void z_vrfy___ztest_set_test_phase(enum ztest_phase new_phase)
1127 {
1128 z_impl___ztest_set_test_phase(new_phase);
1129 }
1130 #include <zephyr/syscalls/__ztest_set_test_phase_mrsh.c>
1131 #endif /* CONFIG_USERSPACE */
1132
ztest_verify_all_test_suites_ran(void)1133 void ztest_verify_all_test_suites_ran(void)
1134 {
1135 bool all_tests_run = true;
1136 struct ztest_suite_node *suite;
1137 struct ztest_unit_test *test;
1138
1139 if (IS_ENABLED(CONFIG_ZTEST_VERIFY_RUN_ALL)) {
1140 for (suite = _ztest_suite_node_list_start; suite < _ztest_suite_node_list_end;
1141 ++suite) {
1142 if (suite->stats->run_count < 1) {
1143 PRINT_DATA("ERROR: Test suite '%s' did not run.\n", suite->name);
1144 all_tests_run = false;
1145 }
1146 }
1147
1148 for (test = _ztest_unit_test_list_start; test < _ztest_unit_test_list_end; ++test) {
1149 suite = ztest_find_test_suite(test->test_suite_name);
1150 if (suite == NULL) {
1151 PRINT_DATA("ERROR: Test '%s' assigned to test suite '%s' which "
1152 "doesn't "
1153 "exist\n",
1154 test->name, test->test_suite_name);
1155 all_tests_run = false;
1156 }
1157 }
1158
1159 if (!all_tests_run) {
1160 test_status = ZTEST_STATUS_HAS_FAILURE;
1161 }
1162 }
1163
1164 for (test = _ztest_unit_test_list_start; test < _ztest_unit_test_list_end; ++test) {
1165 if (test->stats->fail_count + test->stats->pass_count + test->stats->skip_count !=
1166 test->stats->run_count) {
1167 PRINT_DATA("Bad stats for %s.%s\n", test->test_suite_name, test->name);
1168 test_status = 1;
1169 }
1170 }
1171 }
1172
ztest_run_all(const void * state,bool shuffle,int suite_iter,int case_iter)1173 void ztest_run_all(const void *state, bool shuffle, int suite_iter, int case_iter)
1174 {
1175 ztest_api.run_all(state, shuffle, suite_iter, case_iter);
1176 }
1177
test_main(void)1178 void __weak test_main(void)
1179 {
1180 #if CONFIG_ZTEST_SHUFFLE
1181 ztest_run_all(NULL, true, NUM_ITER_PER_SUITE, NUM_ITER_PER_TEST);
1182 #else
1183 ztest_run_all(NULL, false, NUM_ITER_PER_SUITE, NUM_ITER_PER_TEST);
1184 #endif
1185 ztest_verify_all_test_suites_ran();
1186 }
1187
1188 #ifndef KERNEL
main(void)1189 int main(void)
1190 {
1191 z_init_mock();
1192 test_main();
1193 end_report();
1194 #ifdef CONFIG_ZTEST_NO_YIELD
1195 /*
1196 * Rather than yielding to idle thread, keep the part awake so debugger can
1197 * still access it, since some SOCs cannot be debugged in low power states.
1198 */
1199 uint32_t key = irq_lock();
1200
1201 while (1) {
1202 ; /* Spin */
1203 }
1204 irq_unlock(key);
1205 #endif
1206 return test_status;
1207 }
1208 #else
1209
1210 /* Shell */
1211
1212 #ifdef CONFIG_ZTEST_SHELL
cmd_list_suites(const struct shell * sh,size_t argc,char ** argv)1213 static int cmd_list_suites(const struct shell *sh, size_t argc, char **argv)
1214 {
1215 struct ztest_suite_node *suite;
1216
1217 for (suite = _ztest_suite_node_list_start; suite < _ztest_suite_node_list_end; ++suite) {
1218 shell_print(sh, "%s", suite->name);
1219 }
1220 return 0;
1221 }
1222
cmd_list_cases(const struct shell * sh,size_t argc,char ** argv)1223 static int cmd_list_cases(const struct shell *sh, size_t argc, char **argv)
1224 {
1225 struct ztest_suite_node *ptr;
1226 struct ztest_unit_test *test = NULL;
1227 int test_count = 0;
1228
1229 for (ptr = _ztest_suite_node_list_start; ptr < _ztest_suite_node_list_end; ++ptr) {
1230 test = NULL;
1231 while ((test = z_ztest_get_next_test(ptr->name, test)) != NULL) {
1232 shell_print(sh, "%s::%s", test->test_suite_name, test->name);
1233 test_count++;
1234 }
1235 }
1236 return 0;
1237 }
1238 extern void ztest_set_test_args(char *argv);
1239 extern void ztest_reset_test_args(void);
1240
cmd_runall(const struct shell * sh,size_t argc,char ** argv)1241 static int cmd_runall(const struct shell *sh, size_t argc, char **argv)
1242 {
1243 ztest_reset_test_args();
1244 ztest_run_all(NULL, false, 1, 1);
1245 end_report();
1246 return 0;
1247 }
1248
1249 #ifdef CONFIG_ZTEST_SHUFFLE
cmd_shuffle(const struct shell * sh,size_t argc,char ** argv)1250 static int cmd_shuffle(const struct shell *sh, size_t argc, char **argv)
1251 {
1252
1253 struct getopt_state *state;
1254 int opt;
1255 static struct option long_options[] = {{"suite_iter", required_argument, 0, 's'},
1256 {"case_iter", required_argument, 0, 'c'},
1257 {0, 0, 0, 0}};
1258 int opt_index = 0;
1259 int val;
1260 int opt_num = 0;
1261
1262 int suite_iter = 1;
1263 int case_iter = 1;
1264
1265 while ((opt = getopt_long(argc, argv, "s:c:", long_options, &opt_index)) != -1) {
1266 state = getopt_state_get();
1267 switch (opt) {
1268 case 's':
1269 val = atoi(state->optarg);
1270 if (val < 1) {
1271 shell_error(sh, "Invalid number of suite iterations");
1272 return -ENOEXEC;
1273 }
1274 suite_iter = val;
1275 opt_num++;
1276 break;
1277 case 'c':
1278 val = atoi(state->optarg);
1279 if (val < 1) {
1280 shell_error(sh, "Invalid number of case iterations");
1281 return -ENOEXEC;
1282 }
1283 case_iter = val;
1284 opt_num++;
1285 break;
1286 default:
1287 shell_error(sh, "Invalid option or option usage: %s",
1288 argv[opt_index + 1]);
1289 return -ENOEXEC;
1290 }
1291 }
1292 ztest_reset_test_args();
1293 ztest_run_all(NULL, true, suite_iter, case_iter);
1294 end_report();
1295 return 0;
1296 }
1297 #endif
1298
cmd_run_suite(const struct shell * sh,size_t argc,char ** argv)1299 static int cmd_run_suite(const struct shell *sh, size_t argc, char **argv)
1300 {
1301 struct getopt_state *state;
1302 int opt;
1303 static struct option long_options[] = {{"repeat_iter", required_argument, NULL, 'r'},
1304 {NULL, 0, NULL, 0}};
1305 int opt_index = 0;
1306 int val;
1307 int opt_num = 0;
1308 void *param = NULL;
1309 int repeat_iter = 1;
1310
1311 while ((opt = getopt_long(argc, argv, "r:p:", long_options, &opt_index)) != -1) {
1312 state = getopt_state_get();
1313 switch (opt) {
1314 case 'r':
1315 val = atoi(state->optarg);
1316 if (val < 1) {
1317 shell_fprintf(sh, SHELL_ERROR,
1318 "Invalid number of suite interations\n");
1319 return -ENOEXEC;
1320 }
1321 repeat_iter = val;
1322 opt_num++;
1323 break;
1324 case 'p':
1325 param = state->optarg;
1326 opt_num++;
1327 break;
1328 default:
1329 shell_fprintf(sh, SHELL_ERROR,
1330 "Invalid option or option usage: %s\n", argv[opt_index + 1]);
1331 return -ENOEXEC;
1332 }
1333 }
1334 int count = 0;
1335 bool shuffle = false;
1336 const char *shell_command = argv[0];
1337
1338 /*
1339 * This if statement determines which argv contains the test name.
1340 * If the optional argument is used, the test name is in the third
1341 * argv instead of the first.
1342 */
1343 if (opt_num == 1) {
1344 ztest_set_test_args(argv[3]);
1345 } else {
1346 ztest_set_test_args(argv[1]);
1347 }
1348
1349 for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1350 ptr < _ztest_suite_node_list_end; ++ptr) {
1351 if (strcmp(shell_command, "run-testcase") == 0) {
1352 count += __ztest_run_test_suite(ptr, NULL, shuffle, 1, repeat_iter, param);
1353 } else if (strcmp(shell_command, "run-testsuite") == 0) {
1354 count += __ztest_run_test_suite(ptr, NULL, shuffle, repeat_iter, 1, NULL);
1355 }
1356 if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1357 (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1358 break;
1359 }
1360 }
1361 return 0;
1362 }
1363
1364 static void testsuite_list_get(size_t idx, struct shell_static_entry *entry);
1365
1366 SHELL_DYNAMIC_CMD_CREATE(testsuite_names, testsuite_list_get);
1367
testsuite_get_all_static(struct ztest_suite_node const ** suites)1368 static size_t testsuite_get_all_static(struct ztest_suite_node const **suites)
1369 {
1370 *suites = _ztest_suite_node_list_start;
1371 return _ztest_suite_node_list_end - _ztest_suite_node_list_start;
1372 }
1373
suite_lookup(size_t idx,const char * prefix)1374 static const struct ztest_suite_node *suite_lookup(size_t idx, const char *prefix)
1375 {
1376 size_t match_idx = 0;
1377 const struct ztest_suite_node *suite;
1378 size_t len = testsuite_get_all_static(&suite);
1379 const struct ztest_suite_node *suite_end = suite + len;
1380
1381 while (suite < suite_end) {
1382 if ((suite->name != NULL) && (strlen(suite->name) != 0) &&
1383 ((prefix == NULL) || (strncmp(prefix, suite->name, strlen(prefix)) == 0))) {
1384 if (match_idx == idx) {
1385 return suite;
1386 }
1387 ++match_idx;
1388 }
1389 ++suite;
1390 }
1391
1392 return NULL;
1393 }
1394
testsuite_list_get(size_t idx,struct shell_static_entry * entry)1395 static void testsuite_list_get(size_t idx, struct shell_static_entry *entry)
1396 {
1397 const struct ztest_suite_node *suite = suite_lookup(idx, "");
1398
1399 entry->syntax = (suite != NULL) ? suite->name : NULL;
1400 entry->handler = NULL;
1401 entry->help = NULL;
1402 entry->subcmd = NULL;
1403 }
1404
1405 /* clang-format off */
1406 SHELL_STATIC_SUBCMD_SET_CREATE(
1407 sub_ztest_cmds,
1408 SHELL_CMD_ARG(run-all, NULL, "Run all tests", cmd_runall, 0, 0),
1409 #ifdef CONFIG_ZTEST_SHUFFLE
1410 SHELL_COND_CMD_ARG(CONFIG_ZTEST_SHUFFLE, shuffle, NULL,
1411 "Shuffle tests", cmd_shuffle, 0, 2),
1412 #endif
1413 SHELL_CMD_ARG(list-testsuites, NULL,
1414 "List all test suites", cmd_list_suites, 0, 0),
1415 SHELL_CMD_ARG(list-testcases, NULL,
1416 "List all test cases", cmd_list_cases, 0, 0),
1417 SHELL_CMD_ARG(run-testsuite, &testsuite_names,
1418 "Run test suite", cmd_run_suite, 2, 2),
1419 SHELL_CMD_ARG(run-testcase, NULL, "Run testcase", cmd_run_suite, 2, 2),
1420 SHELL_SUBCMD_SET_END /* Array terminated. */
1421 );
1422 /* clang-format on */
1423
1424 SHELL_CMD_REGISTER(ztest, &sub_ztest_cmds, "Ztest commands", NULL);
1425 #endif /* CONFIG_ZTEST_SHELL */
1426
main(void)1427 int main(void)
1428 {
1429 #ifdef CONFIG_USERSPACE
1430 /* Partition containing globals tagged with ZTEST_DMEM and ZTEST_BMEM
1431 * macros. Any variables that user code may reference need to be
1432 * placed in this partition if no other memory domain configuration
1433 * is made.
1434 */
1435 k_mem_domain_add_partition(&k_mem_domain_default, &ztest_mem_partition);
1436 #ifdef Z_MALLOC_PARTITION_EXISTS
1437 /* Allow access to malloc() memory */
1438 k_mem_domain_add_partition(&k_mem_domain_default, &z_malloc_partition);
1439 #endif
1440 #endif /* CONFIG_USERSPACE */
1441
1442 z_init_mock();
1443 #ifndef CONFIG_ZTEST_SHELL
1444 test_main();
1445 end_report();
1446 log_flush();
1447 LOG_PANIC();
1448 if (IS_ENABLED(CONFIG_ZTEST_RETEST_IF_PASSED)) {
1449 static __noinit struct {
1450 uint32_t magic;
1451 uint32_t boots;
1452 } state;
1453 const uint32_t magic = 0x152ac523;
1454
1455 if (state.magic != magic) {
1456 state.magic = magic;
1457 state.boots = 0;
1458 }
1459 state.boots += 1;
1460 if (test_status == 0) {
1461 PRINT_DATA("Reset board #%u to test again\n", state.boots);
1462 k_msleep(10);
1463 sys_reboot(SYS_REBOOT_COLD);
1464 } else {
1465 PRINT_DATA("Failed after %u attempts\n", state.boots);
1466 state.boots = 0;
1467 }
1468 }
1469 #ifdef CONFIG_ZTEST_NO_YIELD
1470 /*
1471 * Rather than yielding to idle thread, keep the part awake so debugger can
1472 * still access it, since some SOCs cannot be debugged in low power states.
1473 */
1474 uint32_t key = irq_lock();
1475
1476 while (1) {
1477 ; /* Spin */
1478 }
1479 irq_unlock(key);
1480 #endif /* CONFIG_ZTEST_NO_YIELD */
1481 #endif /* CONFIG_ZTEST_SHELL */
1482 return 0;
1483 }
1484 #endif
1485