1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/tc_util.h>
10 #include <zephyr/kernel_structs.h>
11 #include <zephyr/irq_offload.h>
12 #include <kswap.h>
13 #include <assert.h>
14 
15 #if defined(CONFIG_USERSPACE)
16 #include <zephyr/kernel/mm.h>
17 #include <zephyr/internal/syscall_handler.h>
18 #include "test_syscalls.h"
19 #endif
20 
21 #if defined(CONFIG_DEMAND_PAGING)
22 #include <zephyr/kernel/mm/demand_paging.h>
23 #endif
24 
25 #if defined(CONFIG_X86) && defined(CONFIG_X86_MMU)
26 #define STACKSIZE (8192)
27 #else
28 #define  STACKSIZE (2048 + CONFIG_TEST_EXTRA_STACK_SIZE)
29 #endif
30 #define MAIN_PRIORITY 7
31 #define PRIORITY 5
32 
33 static K_THREAD_STACK_DEFINE(alt_stack, STACKSIZE);
34 
35 #if defined(CONFIG_STACK_SENTINEL) && !defined(CONFIG_ARCH_POSIX)
36 #define OVERFLOW_STACKSIZE (STACKSIZE / 2)
37 static k_thread_stack_t *overflow_stack =
38 		alt_stack + (STACKSIZE - OVERFLOW_STACKSIZE);
39 #else
40 #if defined(CONFIG_USERSPACE) && defined(CONFIG_ARC)
41 /* for ARC, privilege stack is merged into defined stack */
42 #define OVERFLOW_STACKSIZE (STACKSIZE + CONFIG_PRIVILEGED_STACK_SIZE)
43 #else
44 #define OVERFLOW_STACKSIZE STACKSIZE
45 #endif
46 #endif
47 
48 static struct k_thread alt_thread;
49 volatile int rv;
50 
51 static ZTEST_DMEM volatile int expected_reason = -1;
52 
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)53 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
54 {
55 	TC_PRINT("Caught system error -- reason %d\n", reason);
56 
57 	if (expected_reason == -1) {
58 		printk("Was not expecting a crash\n");
59 		TC_END_REPORT(TC_FAIL);
60 		k_fatal_halt(reason);
61 	}
62 
63 	if (k_current_get() != &alt_thread) {
64 		printk("Wrong thread crashed\n");
65 		TC_END_REPORT(TC_FAIL);
66 		k_fatal_halt(reason);
67 	}
68 
69 	if (reason != expected_reason) {
70 		printk("Wrong crash type got %d expected %d\n", reason,
71 		       expected_reason);
72 		TC_END_REPORT(TC_FAIL);
73 		k_fatal_halt(reason);
74 	}
75 
76 	expected_reason = -1;
77 }
78 
entry_cpu_exception(void * p1,void * p2,void * p3)79 void entry_cpu_exception(void *p1, void *p2, void *p3)
80 {
81 	expected_reason = K_ERR_CPU_EXCEPTION;
82 
83 #if defined(CONFIG_X86)
84 	__asm__ volatile ("ud2");
85 #elif defined(CONFIG_NIOS2)
86 	__asm__ volatile ("trap");
87 #elif defined(CONFIG_ARC)
88 	__asm__ volatile ("swi");
89 #elif defined(CONFIG_RISCV)
90 	/* Illegal instruction on RISCV. */
91 	__asm__ volatile (".word 0x77777777");
92 #else
93 	/* Triggers usage fault on ARM, illegal instruction on
94 	 * xtensa, TLB exception (instruction fetch) on MIPS.
95 	 */
96 	{
97 		volatile long illegal = 0;
98 		((void(*)(void))&illegal)();
99 	}
100 #endif
101 	rv = TC_FAIL;
102 }
103 
entry_cpu_exception_extend(void * p1,void * p2,void * p3)104 void entry_cpu_exception_extend(void *p1, void *p2, void *p3)
105 {
106 	expected_reason = K_ERR_CPU_EXCEPTION;
107 
108 #if defined(CONFIG_ARM64)
109 	__asm__ volatile ("svc 0");
110 #elif defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
111 	__asm__ volatile ("udf #0");
112 #elif defined(CONFIG_CPU_CORTEX_M)
113 	__asm__ volatile ("udf #0");
114 #elif defined(CONFIG_NIOS2)
115 	__asm__ volatile ("trap");
116 #elif defined(CONFIG_RISCV)
117 	/* In riscv architecture, use an undefined
118 	 * instruction to trigger illegal instruction on RISCV.
119 	 */
120 	__asm__ volatile ("unimp");
121 	/* In arc architecture, SWI instruction is used
122 	 * to trigger soft interrupt.
123 	 */
124 #elif defined(CONFIG_ARC)
125 	__asm__ volatile ("swi");
126 #else
127 	/* used to create a divide by zero error on X86 and MIPS */
128 	volatile int error;
129 	volatile int zero = 0;
130 
131 	error = 32;     /* avoid static checker uninitialized warnings */
132 	error = error / zero;
133 #endif
134 	rv = TC_FAIL;
135 }
136 
entry_oops(void * p1,void * p2,void * p3)137 void entry_oops(void *p1, void *p2, void *p3)
138 {
139 	expected_reason = K_ERR_KERNEL_OOPS;
140 
141 	k_oops();
142 	TC_ERROR("SHOULD NEVER SEE THIS\n");
143 	rv = TC_FAIL;
144 }
145 
entry_panic(void * p1,void * p2,void * p3)146 void entry_panic(void *p1, void *p2, void *p3)
147 {
148 	expected_reason = K_ERR_KERNEL_PANIC;
149 
150 	k_panic();
151 	TC_ERROR("SHOULD NEVER SEE THIS\n");
152 	rv = TC_FAIL;
153 }
154 
entry_zephyr_assert(void * p1,void * p2,void * p3)155 void entry_zephyr_assert(void *p1, void *p2, void *p3)
156 {
157 	expected_reason = K_ERR_KERNEL_PANIC;
158 
159 	__ASSERT(0, "intentionally failed assertion");
160 	rv = TC_FAIL;
161 }
162 
entry_arbitrary_reason(void * p1,void * p2,void * p3)163 void entry_arbitrary_reason(void *p1, void *p2, void *p3)
164 {
165 	expected_reason = INT_MAX;
166 
167 	z_except_reason(INT_MAX);
168 	TC_ERROR("SHOULD NEVER SEE THIS\n");
169 	rv = TC_FAIL;
170 }
171 
entry_arbitrary_reason_negative(void * p1,void * p2,void * p3)172 void entry_arbitrary_reason_negative(void *p1, void *p2, void *p3)
173 {
174 	expected_reason = -2;
175 
176 	z_except_reason(-2);
177 	TC_ERROR("SHOULD NEVER SEE THIS\n");
178 	rv = TC_FAIL;
179 }
180 
181 #ifndef CONFIG_ARCH_POSIX
182 #ifdef CONFIG_STACK_SENTINEL
blow_up_stack(void)183 __no_optimization void blow_up_stack(void)
184 {
185 	char buf[OVERFLOW_STACKSIZE];
186 
187 	expected_reason = K_ERR_STACK_CHK_FAIL;
188 	TC_PRINT("posting %zu bytes of junk to stack...\n", sizeof(buf));
189 	(void)memset(buf, 0xbb, sizeof(buf));
190 }
191 #else
192 /* stack sentinel doesn't catch it in time before it trashes the entire kernel
193  */
194 
195 #if defined(__GNUC__)
196 #pragma GCC diagnostic push
197 #pragma GCC diagnostic ignored "-Wpragmas"
198 #pragma GCC diagnostic ignored "-Winfinite-recursion"
199 #endif
200 
stack_smasher(int val)201 __no_optimization int stack_smasher(int val)
202 {
203 	return stack_smasher(val * 2) + stack_smasher(val * 3);
204 }
205 
206 #if defined(__GNUC__)
207 #pragma GCC diagnostic pop
208 #endif
209 
blow_up_stack(void)210 void blow_up_stack(void)
211 {
212 	expected_reason = K_ERR_STACK_CHK_FAIL;
213 
214 	stack_smasher(37);
215 }
216 
217 #if defined(CONFIG_USERSPACE)
218 
z_impl_blow_up_priv_stack(void)219 void z_impl_blow_up_priv_stack(void)
220 {
221 	blow_up_stack();
222 }
223 
z_vrfy_blow_up_priv_stack(void)224 static inline void z_vrfy_blow_up_priv_stack(void)
225 {
226 	z_impl_blow_up_priv_stack();
227 }
228 #include <zephyr/syscalls/blow_up_priv_stack_mrsh.c>
229 
230 #endif /* CONFIG_USERSPACE */
231 #endif /* CONFIG_STACK_SENTINEL */
232 
stack_sentinel_timer(void * p1,void * p2,void * p3)233 void stack_sentinel_timer(void *p1, void *p2, void *p3)
234 {
235 	/* We need to guarantee that we receive an interrupt, so set a
236 	 * k_timer and spin until we die.  Spinning alone won't work
237 	 * on a tickless kernel.
238 	 */
239 	static struct k_timer timer;
240 
241 	blow_up_stack();
242 	k_timer_init(&timer, NULL, NULL);
243 	k_timer_start(&timer, K_MSEC(1), K_NO_WAIT);
244 	while (true) {
245 	}
246 }
247 
stack_sentinel_swap(void * p1,void * p2,void * p3)248 void stack_sentinel_swap(void *p1, void *p2, void *p3)
249 {
250 	/* Test that stack overflow check due to swap works */
251 	blow_up_stack();
252 	TC_PRINT("swapping...\n");
253 	z_swap_unlocked();
254 	TC_ERROR("should never see this\n");
255 	rv = TC_FAIL;
256 }
257 
stack_hw_overflow(void * p1,void * p2,void * p3)258 void stack_hw_overflow(void *p1, void *p2, void *p3)
259 {
260 	/* Test that HW stack overflow check works */
261 	blow_up_stack();
262 	TC_ERROR("should never see this\n");
263 	rv = TC_FAIL;
264 }
265 
266 #if defined(CONFIG_USERSPACE)
user_priv_stack_hw_overflow(void * p1,void * p2,void * p3)267 void user_priv_stack_hw_overflow(void *p1, void *p2, void *p3)
268 {
269 	/* Test that HW stack overflow check works
270 	 * on a user thread's privilege stack.
271 	 */
272 	blow_up_priv_stack();
273 	TC_ERROR("should never see this\n");
274 	rv = TC_FAIL;
275 }
276 #endif /* CONFIG_USERSPACE */
277 
check_stack_overflow(k_thread_entry_t handler,uint32_t flags)278 void check_stack_overflow(k_thread_entry_t handler, uint32_t flags)
279 {
280 #ifdef CONFIG_STACK_SENTINEL
281 	/* When testing stack sentinel feature, the overflow stack is a
282 	 * smaller section of alt_stack near the end.
283 	 * In this way when it gets overflowed by blow_up_stack() we don't
284 	 * corrupt anything else and prevent the test case from completing.
285 	 */
286 	k_thread_create(&alt_thread, overflow_stack, OVERFLOW_STACKSIZE,
287 #else
288 	k_thread_create(&alt_thread, alt_stack,
289 			K_THREAD_STACK_SIZEOF(alt_stack),
290 #endif /* CONFIG_STACK_SENTINEL */
291 			handler,
292 			NULL, NULL, NULL, K_PRIO_PREEMPT(PRIORITY), flags,
293 			K_NO_WAIT);
294 
295 	zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
296 }
297 #endif /* !CONFIG_ARCH_POSIX */
298 
299 /**
300  * @brief Test the kernel fatal error handling works correctly
301  * @details Manually trigger the crash with various ways and check
302  * that the kernel is handling that properly or not. Also the crash reason
303  * should match. Check for stack sentinel feature by overflowing the
304  * thread's stack and check for the exception.
305  *
306  * @ingroup kernel_common_tests
307  */
ZTEST(fatal_exception,test_fatal)308 ZTEST(fatal_exception, test_fatal)
309 {
310 	rv = TC_PASS;
311 
312 	/*
313 	 * Main thread(test_main) priority was 10 but ztest thread runs at
314 	 * priority -1. To run the test smoothly make both main and ztest
315 	 * threads run at same priority level.
316 	 */
317 	k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY));
318 
319 #ifndef CONFIG_ARCH_POSIX
320 	TC_PRINT("test alt thread 1: generic CPU exception\n");
321 	k_thread_create(&alt_thread, alt_stack,
322 			K_THREAD_STACK_SIZEOF(alt_stack),
323 			entry_cpu_exception,
324 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
325 			K_NO_WAIT);
326 	zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
327 
328 	TC_PRINT("test alt thread 1: generic CPU exception divide zero\n");
329 	k_thread_create(&alt_thread, alt_stack,
330 			K_THREAD_STACK_SIZEOF(alt_stack),
331 			entry_cpu_exception_extend,
332 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
333 			K_NO_WAIT);
334 	zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
335 #else
336 	/*
337 	 * We want the native OS to handle segfaults so we can debug it
338 	 * with the normal linux tools
339 	 */
340 	TC_PRINT("test alt thread 1: skipped for POSIX ARCH\n");
341 #endif
342 
343 	TC_PRINT("test alt thread 2: initiate kernel oops\n");
344 	k_thread_create(&alt_thread, alt_stack,
345 			K_THREAD_STACK_SIZEOF(alt_stack),
346 			entry_oops,
347 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
348 			K_NO_WAIT);
349 	k_thread_abort(&alt_thread);
350 	zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
351 
352 	TC_PRINT("test alt thread 3: initiate kernel panic\n");
353 	k_thread_create(&alt_thread, alt_stack,
354 			K_THREAD_STACK_SIZEOF(alt_stack),
355 			entry_panic,
356 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
357 			K_NO_WAIT);
358 	k_thread_abort(&alt_thread);
359 	zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
360 
361 #if defined(CONFIG_ASSERT)
362 	/* This test shall be skip while ASSERT is off */
363 	TC_PRINT("test alt thread 4: fail assertion\n");
364 	k_thread_create(&alt_thread, alt_stack,
365 			K_THREAD_STACK_SIZEOF(alt_stack),
366 			entry_zephyr_assert,
367 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
368 			K_NO_WAIT);
369 	k_thread_abort(&alt_thread);
370 	zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
371 #endif
372 
373 	TC_PRINT("test alt thread 5: initiate arbitrary SW exception\n");
374 	k_thread_create(&alt_thread, alt_stack,
375 			K_THREAD_STACK_SIZEOF(alt_stack),
376 			entry_arbitrary_reason,
377 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
378 			K_NO_WAIT);
379 	k_thread_abort(&alt_thread);
380 
381 	zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
382 	TC_PRINT("test alt thread 6: initiate arbitrary SW exception negative\n");
383 	k_thread_create(&alt_thread, alt_stack,
384 			K_THREAD_STACK_SIZEOF(alt_stack),
385 			entry_arbitrary_reason_negative,
386 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
387 			K_NO_WAIT);
388 	k_thread_abort(&alt_thread);
389 	zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
390 
391 #ifndef CONFIG_ARCH_POSIX
392 
393 #ifdef CONFIG_STACK_SENTINEL
394 	TC_PRINT("test stack sentinel overflow - timer irq\n");
395 	check_stack_overflow(stack_sentinel_timer, 0);
396 
397 	TC_PRINT("test stack sentinel overflow - swap\n");
398 	check_stack_overflow(stack_sentinel_swap, 0);
399 #endif /* CONFIG_STACK_SENTINEL */
400 
401 #ifdef CONFIG_HW_STACK_PROTECTION
402 	/* HW based stack overflow detection.
403 	 * Do this twice to show that HW-based solutions work more than
404 	 * once.
405 	 */
406 
407 	TC_PRINT("test stack HW-based overflow - supervisor 1\n");
408 	check_stack_overflow(stack_hw_overflow, 0);
409 
410 	TC_PRINT("test stack HW-based overflow - supervisor 2\n");
411 	check_stack_overflow(stack_hw_overflow, 0);
412 
413 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
414 	TC_PRINT("test stack HW-based overflow (FPU thread) - supervisor 1\n");
415 	check_stack_overflow(stack_hw_overflow, K_FP_REGS);
416 
417 	TC_PRINT("test stack HW-based overflow (FPU thread) - supervisor 2\n");
418 	check_stack_overflow(stack_hw_overflow, K_FP_REGS);
419 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
420 
421 #ifdef CONFIG_USERSPACE
422 
423 	TC_PRINT("test stack HW-based overflow - user 1\n");
424 	check_stack_overflow(stack_hw_overflow, K_USER);
425 
426 	TC_PRINT("test stack HW-based overflow - user 2\n");
427 	check_stack_overflow(stack_hw_overflow, K_USER);
428 
429 	TC_PRINT("test stack HW-based overflow - user priv stack 1\n");
430 	check_stack_overflow(user_priv_stack_hw_overflow, K_USER);
431 
432 	TC_PRINT("test stack HW-based overflow - user priv stack 2\n");
433 	check_stack_overflow(user_priv_stack_hw_overflow, K_USER);
434 
435 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
436 	TC_PRINT("test stack HW-based overflow (FPU thread) - user 1\n");
437 	check_stack_overflow(stack_hw_overflow, K_USER | K_FP_REGS);
438 
439 	TC_PRINT("test stack HW-based overflow (FPU thread) - user 2\n");
440 	check_stack_overflow(stack_hw_overflow, K_USER | K_FP_REGS);
441 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
442 
443 #endif /* CONFIG_USERSPACE */
444 
445 #endif /* CONFIG_HW_STACK_PROTECTION */
446 
447 #endif /* !CONFIG_ARCH_POSIX */
448 }
449 
fatal_setup(void)450 static void *fatal_setup(void)
451 {
452 #if defined(CONFIG_DEMAND_PAGING) && \
453 	!defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
454 	uintptr_t pin_addr;
455 	size_t pin_size, obj_size;
456 
457 	/* Need to pin the whole stack object (including reserved
458 	 * space), or else it would cause double faults: exception
459 	 * being processed while page faults on the stacks.
460 	 *
461 	 * Same applies for some variables needed during exception
462 	 * processing.
463 	 */
464 #if defined(CONFIG_STACK_SENTINEL) && !defined(CONFIG_ARCH_POSIX)
465 
466 	obj_size = K_THREAD_STACK_SIZEOF(overflow_stack);
467 #if defined(CONFIG_USERSPACE)
468 	obj_size = K_THREAD_STACK_LEN(obj_size);
469 #endif
470 
471 	k_mem_region_align(&pin_addr, &pin_size,
472 			   POINTER_TO_UINT(&overflow_stack),
473 			   obj_size, CONFIG_MMU_PAGE_SIZE);
474 
475 	k_mem_pin(UINT_TO_POINTER(pin_addr), pin_size);
476 #endif /* CONFIG_STACK_SENTINEL && !CONFIG_ARCH_POSIX */
477 
478 	obj_size = K_THREAD_STACK_SIZEOF(alt_stack);
479 #if defined(CONFIG_USERSPACE)
480 	obj_size = K_THREAD_STACK_LEN(obj_size);
481 #endif
482 
483 	k_mem_region_align(&pin_addr, &pin_size,
484 			   POINTER_TO_UINT(&alt_stack),
485 			   obj_size,
486 			   CONFIG_MMU_PAGE_SIZE);
487 
488 	k_mem_pin(UINT_TO_POINTER(pin_addr), pin_size);
489 
490 	k_mem_region_align(&pin_addr, &pin_size,
491 			   POINTER_TO_UINT((void *)&expected_reason),
492 			   sizeof(expected_reason),
493 			   CONFIG_MMU_PAGE_SIZE);
494 
495 	k_mem_pin(UINT_TO_POINTER(pin_addr), pin_size);
496 #endif /* CONFIG_DEMAND_PAGING
497 	* && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
498 	*/
499 
500 	return NULL;
501 }
502 
503 ZTEST_SUITE(fatal_exception, NULL, fatal_setup, NULL, NULL, NULL);
504