1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <ztest.h>
8 #include <stdio.h>
9 #include <app_memory/app_memdomain.h>
10 #ifdef CONFIG_USERSPACE
11 #include <sys/libc-hooks.h>
12 #endif
13 #include <sys/reboot.h>
14 
15 #ifdef KERNEL
16 static struct k_thread ztest_thread;
17 #endif
18 
19 #ifdef CONFIG_ARCH_POSIX
20 #include <unistd.h>
21 #endif
22 
23 /* ZTEST_DMEM and ZTEST_BMEM are used for the application shared memory test  */
24 
25 ZTEST_DMEM enum {
26 	TEST_PHASE_SETUP,
27 	TEST_PHASE_TEST,
28 	TEST_PHASE_TEARDOWN,
29 	TEST_PHASE_FRAMEWORK
30 } phase = TEST_PHASE_FRAMEWORK;
31 
32 static ZTEST_BMEM int test_status;
33 
34 /**
35  * @brief Try to shorten a filename by removing the current directory
36  *
37  * This helps to reduce the very long filenames in assertion failures. It
38  * removes the current directory from the filename and returns the rest.
39  * This makes assertions a lot more readable, and sometimes they fit on one
40  * line.
41  *
42  * @param file Filename to check
43  * @returns Shortened filename, or @file if it could not be shortened
44  */
ztest_relative_filename(const char * file)45 const char *ztest_relative_filename(const char *file)
46 {
47 #ifdef CONFIG_ARCH_POSIX
48 	const char *cwd;
49 	char buf[200];
50 
51 	cwd = getcwd(buf, sizeof(buf));
52 	if (cwd && strlen(file) > strlen(cwd) &&
53 	    !strncmp(file, cwd, strlen(cwd)))
54 		return file + strlen(cwd) + 1; /* move past the trailing '/' */
55 #endif
56 	return file;
57 }
58 
cleanup_test(struct unit_test * test)59 static int cleanup_test(struct unit_test *test)
60 {
61 	int ret = TC_PASS;
62 	int mock_status;
63 
64 	mock_status = z_cleanup_mock();
65 
66 #ifdef KERNEL
67 	/* we need to remove the ztest_thread information from the timeout_q.
68 	 * Because we reuse the same k_thread structure this would
69 	 * causes some problems.
70 	 */
71 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
72 		k_thread_abort(&ztest_thread);
73 	}
74 #endif
75 
76 	if (!ret && mock_status == 1) {
77 		PRINT("Test %s failed: Unused mock parameter values\n",
78 		      test->name);
79 		ret = TC_FAIL;
80 	} else if (!ret && mock_status == 2) {
81 		PRINT("Test %s failed: Unused mock return values\n",
82 		      test->name);
83 		ret = TC_FAIL;
84 	} else {
85 		;
86 	}
87 
88 	return ret;
89 }
90 
91 #ifdef KERNEL
92 #ifdef CONFIG_SMP
93 #define NUM_CPUHOLD (CONFIG_MP_NUM_CPUS - 1)
94 #else
95 #define NUM_CPUHOLD 0
96 #endif
97 #define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACKSIZE)
98 
99 static struct k_thread cpuhold_threads[NUM_CPUHOLD];
100 K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, NUM_CPUHOLD, CPUHOLD_STACK_SZ);
101 static struct k_sem cpuhold_sem;
102 volatile int cpuhold_active;
103 
104 /* "Holds" a CPU for use with the "1cpu" test cases.  Note that we
105  * can't use tools like the cpumask feature because we have tests that
106  * may need to control that configuration themselves.  We do this at
107  * the lowest level, but locking interrupts directly and spinning.
108  */
cpu_hold(void * arg1,void * arg2,void * arg3)109 static void cpu_hold(void *arg1, void *arg2, void *arg3)
110 {
111 	ARG_UNUSED(arg1);
112 	ARG_UNUSED(arg2);
113 	ARG_UNUSED(arg3);
114 	unsigned int key = arch_irq_lock();
115 	uint32_t dt, start_ms = k_uptime_get_32();
116 
117 	k_sem_give(&cpuhold_sem);
118 
119 #if defined(CONFIG_ARM64) && defined(CONFIG_FPU_SHARING)
120 	/*
121 	 * We'll be spinning with IRQs disabled. The flush-your-FPU request
122 	 * IPI will never be serviced during that time. Therefore we flush
123 	 * the FPU preemptively here to prevent any other CPU waiting after
124 	 * this CPU forever and deadlock the system.
125 	 */
126 	extern void z_arm64_flush_local_fpu(void);
127 	z_arm64_flush_local_fpu();
128 #endif
129 
130 	while (cpuhold_active) {
131 		k_busy_wait(1000);
132 	}
133 
134 	/* Holding the CPU via spinning is expensive, and abusing this
135 	 * for long-running test cases tends to overload the CI system
136 	 * (qemu runs separate CPUs in different threads, but the CI
137 	 * logic views it as one "job") and cause other test failures.
138 	 */
139 	dt = k_uptime_get_32() - start_ms;
140 	zassert_true(dt < 3000,
141 		     "1cpu test took too long (%d ms)", dt);
142 	arch_irq_unlock(key);
143 }
144 
z_impl_z_test_1cpu_start(void)145 void z_impl_z_test_1cpu_start(void)
146 {
147 	cpuhold_active = 1;
148 #ifdef CONFIG_THREAD_NAME
149 	char tname[CONFIG_THREAD_MAX_NAME_LEN];
150 #endif
151 	k_sem_init(&cpuhold_sem, 0, 999);
152 
153 	/* Spawn N-1 threads to "hold" the other CPUs, waiting for
154 	 * each to signal us that it's locked and spinning.
155 	 *
156 	 * Note that NUM_CPUHOLD can be a value that causes coverity
157 	 * to flag the following loop as DEADCODE so suppress the warning.
158 	 */
159 	/* coverity[DEADCODE] */
160 	for (int i = 0; i < NUM_CPUHOLD; i++)  {
161 		k_thread_create(&cpuhold_threads[i],
162 				cpuhold_stacks[i], CPUHOLD_STACK_SZ,
163 				(k_thread_entry_t) cpu_hold, NULL, NULL, NULL,
164 				K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
165 #ifdef CONFIG_THREAD_NAME
166 		snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", i);
167 		k_thread_name_set(&cpuhold_threads[i], tname);
168 #endif
169 		k_sem_take(&cpuhold_sem, K_FOREVER);
170 	}
171 }
172 
z_impl_z_test_1cpu_stop(void)173 void z_impl_z_test_1cpu_stop(void)
174 {
175 	cpuhold_active = 0;
176 
177 	/* Note that NUM_CPUHOLD can be a value that causes coverity
178 	 * to flag the following loop as DEADCODE so suppress the warning.
179 	 */
180 	/* coverity[DEADCODE] */
181 	for (int i = 0; i < NUM_CPUHOLD; i++)  {
182 		k_thread_abort(&cpuhold_threads[i]);
183 	}
184 }
185 
186 #ifdef CONFIG_USERSPACE
z_vrfy_z_test_1cpu_start(void)187 void z_vrfy_z_test_1cpu_start(void)
188 {
189 	z_impl_z_test_1cpu_start();
190 }
191 #include <syscalls/z_test_1cpu_start_mrsh.c>
192 
z_vrfy_z_test_1cpu_stop(void)193 void z_vrfy_z_test_1cpu_stop(void)
194 {
195 	z_impl_z_test_1cpu_stop();
196 }
197 #include <syscalls/z_test_1cpu_stop_mrsh.c>
198 #endif /* CONFIG_USERSPACE */
199 #endif
200 
run_test_functions(struct unit_test * test)201 static void run_test_functions(struct unit_test *test)
202 {
203 	phase = TEST_PHASE_SETUP;
204 	test->setup();
205 	phase = TEST_PHASE_TEST;
206 	test->test();
207 }
208 
209 #ifndef KERNEL
210 
211 /* Static code analysis tool can raise a violation that the standard header
212  * <setjmp.h> shall not be used.
213  *
214  * setjmp is using in a test code, not in a runtime code, it is acceptable.
215  * It is a deliberate deviation.
216  */
217 #include <setjmp.h> /* parasoft-suppress MISRAC2012-RULE_21_4-a MISRAC2012-RULE_21_4-b*/
218 #include <signal.h>
219 #include <string.h>
220 #include <stdlib.h>
221 
222 #define FAIL_FAST 0
223 
224 static jmp_buf test_fail;
225 static jmp_buf test_pass;
226 static jmp_buf stack_fail;
227 
ztest_test_fail(void)228 void ztest_test_fail(void)
229 {
230 	raise(SIGABRT);
231 }
232 
ztest_test_pass(void)233 void ztest_test_pass(void)
234 {
235 	longjmp(test_pass, 1);
236 }
237 
handle_signal(int sig)238 static void handle_signal(int sig)
239 {
240 	static const char *const phase_str[] = {
241 		"setup",
242 		"unit test",
243 		"teardown",
244 	};
245 
246 	PRINT("    %s", strsignal(sig));
247 	switch (phase) {
248 	case TEST_PHASE_SETUP:
249 	case TEST_PHASE_TEST:
250 	case TEST_PHASE_TEARDOWN:
251 		PRINT(" at %s function\n", phase_str[phase]);
252 		longjmp(test_fail, 1);
253 	case TEST_PHASE_FRAMEWORK:
254 		PRINT("\n");
255 		longjmp(stack_fail, 1);
256 	}
257 }
258 
init_testing(void)259 static void init_testing(void)
260 {
261 	signal(SIGABRT, handle_signal);
262 	signal(SIGSEGV, handle_signal);
263 
264 	if (setjmp(stack_fail)) {
265 		PRINT("Test suite crashed.");
266 		exit(1);
267 	}
268 }
269 
run_test(struct unit_test * test)270 static int run_test(struct unit_test *test)
271 {
272 	int ret = TC_PASS;
273 
274 	TC_START(test->name);
275 
276 	if (setjmp(test_fail)) {
277 		ret = TC_FAIL;
278 		goto out;
279 	}
280 
281 	if (setjmp(test_pass)) {
282 		ret = TC_PASS;
283 		goto out;
284 	}
285 
286 	run_test_functions(test);
287 out:
288 	ret |= cleanup_test(test);
289 	Z_TC_END_RESULT(ret, test->name);
290 
291 	return ret;
292 }
293 
294 #else /* KERNEL */
295 
296 /* Zephyr's probably going to cause all tests to fail if one test fails, so
297  * skip the rest of tests if one of them fails
298  */
299 #ifdef CONFIG_ZTEST_FAIL_FAST
300 #define FAIL_FAST 1
301 #else
302 #define FAIL_FAST 0
303 #endif
304 
305 K_THREAD_STACK_DEFINE(ztest_thread_stack, CONFIG_ZTEST_STACKSIZE +
306 		      CONFIG_TEST_EXTRA_STACKSIZE);
307 static ZTEST_BMEM int test_result;
308 
test_finalize(void)309 static void test_finalize(void)
310 {
311 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
312 		k_thread_abort(&ztest_thread);
313 		k_thread_abort(k_current_get());
314 	}
315 }
316 
ztest_test_fail(void)317 void ztest_test_fail(void)
318 {
319 	test_result = -1;
320 	test_finalize();
321 }
322 
ztest_test_pass(void)323 void ztest_test_pass(void)
324 {
325 	test_result = 0;
326 	test_finalize();
327 }
328 
ztest_test_skip(void)329 void ztest_test_skip(void)
330 {
331 	test_result = -2;
332 	test_finalize();
333 }
334 
init_testing(void)335 static void init_testing(void)
336 {
337 	k_object_access_all_grant(&ztest_thread);
338 }
339 
test_cb(void * a,void * dummy2,void * dummy)340 static void test_cb(void *a, void *dummy2, void *dummy)
341 {
342 	struct unit_test *test = (struct unit_test *)a;
343 
344 	ARG_UNUSED(dummy2);
345 	ARG_UNUSED(dummy);
346 
347 	test_result = 1;
348 	run_test_functions(test);
349 	test_result = 0;
350 }
351 
run_test(struct unit_test * test)352 static int run_test(struct unit_test *test)
353 {
354 	int ret = TC_PASS;
355 
356 	TC_START(test->name);
357 
358 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
359 		k_thread_create(&ztest_thread, ztest_thread_stack,
360 				K_THREAD_STACK_SIZEOF(ztest_thread_stack),
361 				(k_thread_entry_t) test_cb, (struct unit_test *)test,
362 				NULL, NULL, CONFIG_ZTEST_THREAD_PRIORITY,
363 				test->thread_options | K_INHERIT_PERMS,
364 					K_FOREVER);
365 
366 		if (test->name != NULL) {
367 			k_thread_name_set(&ztest_thread, test->name);
368 		}
369 		k_thread_start(&ztest_thread);
370 		k_thread_join(&ztest_thread, K_FOREVER);
371 	} else {
372 		test_result = 1;
373 		run_test_functions(test);
374 	}
375 
376 	phase = TEST_PHASE_TEARDOWN;
377 	test->teardown();
378 	phase = TEST_PHASE_FRAMEWORK;
379 
380 	if (test_result == -1) {
381 		ret = TC_FAIL;
382 	}
383 
384 	if (!test_result || !FAIL_FAST) {
385 		ret |= cleanup_test(test);
386 	}
387 
388 	if (test_result == -2) {
389 		Z_TC_END_RESULT(TC_SKIP, test->name);
390 	} else {
391 		Z_TC_END_RESULT(ret, test->name);
392 	}
393 
394 	return ret;
395 }
396 
397 #endif /* !KERNEL */
398 
z_ztest_run_test_suite(const char * name,struct unit_test * suite)399 void z_ztest_run_test_suite(const char *name, struct unit_test *suite)
400 {
401 	int fail = 0;
402 
403 	if (test_status < 0) {
404 		return;
405 	}
406 
407 	init_testing();
408 
409 	TC_SUITE_START(name);
410 	while (suite->test) {
411 		fail += run_test(suite);
412 		suite++;
413 
414 		if (fail && FAIL_FAST) {
415 			break;
416 		}
417 	}
418 	TC_SUITE_END(name, (fail > 0 ? TC_FAIL : TC_PASS));
419 
420 	test_status = (test_status || fail) ? 1 : 0;
421 }
422 
end_report(void)423 void end_report(void)
424 {
425 	if (test_status) {
426 		TC_END_REPORT(TC_FAIL);
427 	} else {
428 		TC_END_REPORT(TC_PASS);
429 	}
430 }
431 
432 #ifdef CONFIG_USERSPACE
433 K_APPMEM_PARTITION_DEFINE(ztest_mem_partition);
434 #endif
435 
436 #ifndef KERNEL
main(void)437 int main(void)
438 {
439 	z_init_mock();
440 	test_main();
441 	end_report();
442 
443 	return test_status;
444 }
445 #else
main(void)446 void main(void)
447 {
448 #ifdef CONFIG_USERSPACE
449 	/* Partition containing globals tagged with ZTEST_DMEM and ZTEST_BMEM
450 	 * macros. Any variables that user code may reference need to be
451 	 * placed in this partition if no other memory domain configuration
452 	 * is made.
453 	 */
454 	k_mem_domain_add_partition(&k_mem_domain_default,
455 				   &ztest_mem_partition);
456 #ifdef Z_MALLOC_PARTITION_EXISTS
457 	/* Allow access to malloc() memory */
458 	k_mem_domain_add_partition(&k_mem_domain_default,
459 				   &z_malloc_partition);
460 #endif
461 #endif /* CONFIG_USERSPACE */
462 
463 	z_init_mock();
464 	test_main();
465 	end_report();
466 	if (IS_ENABLED(CONFIG_ZTEST_RETEST_IF_PASSED)) {
467 		static __noinit struct {
468 			uint32_t magic;
469 			uint32_t boots;
470 		} state;
471 		const uint32_t magic = 0x152ac523;
472 
473 		if (state.magic != magic) {
474 			state.magic = magic;
475 			state.boots = 0;
476 		}
477 		state.boots += 1;
478 		if (test_status == 0) {
479 			PRINT("Reset board #%u to test again\n",
480 				state.boots);
481 			k_msleep(10);
482 			sys_reboot(SYS_REBOOT_COLD);
483 		} else {
484 			PRINT("Failed after %u attempts\n", state.boots);
485 			state.boots = 0;
486 		}
487 	}
488 }
489 #endif
490