1 /*
2  * Copyright (c) 2019 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <kernel_internal.h>
10 #include <zephyr/kernel_structs.h>
11 #include <zephyr/sys/__assert.h>
12 #include <zephyr/arch/cpu.h>
13 #include <zephyr/logging/log_ctrl.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/fatal.h>
16 #ifndef	CONFIG_XTENSA
17 #include <zephyr/debug/coredump.h>
18 #endif
19 
20 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
21 
22 /* LCOV_EXCL_START */
arch_system_halt(unsigned int reason)23 FUNC_NORETURN __weak void arch_system_halt(unsigned int reason)
24 {
25 	ARG_UNUSED(reason);
26 
27 	/* TODO: What's the best way to totally halt the system if SMP
28 	 * is enabled?
29 	 */
30 
31 	(void)arch_irq_lock();
32 	for (;;) {
33 		/* Spin endlessly */
34 	}
35 }
36 /* LCOV_EXCL_STOP */
37 
38 /* LCOV_EXCL_START */
k_sys_fatal_error_handler(unsigned int reason,const z_arch_esf_t * esf)39 __weak void k_sys_fatal_error_handler(unsigned int reason,
40 				      const z_arch_esf_t *esf)
41 {
42 	ARG_UNUSED(esf);
43 
44 	LOG_PANIC();
45 	LOG_ERR("Halting system");
46 	arch_system_halt(reason);
47 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
48 }
49 /* LCOV_EXCL_STOP */
50 
thread_name_get(struct k_thread * thread)51 static const char *thread_name_get(struct k_thread *thread)
52 {
53 	const char *thread_name = (thread != NULL) ? k_thread_name_get(thread) : NULL;
54 
55 	if ((thread_name == NULL) || (thread_name[0] == '\0')) {
56 		thread_name = "unknown";
57 	}
58 
59 	return thread_name;
60 }
61 
reason_to_str(unsigned int reason)62 static const char *reason_to_str(unsigned int reason)
63 {
64 	switch (reason) {
65 	case K_ERR_CPU_EXCEPTION:
66 		return "CPU exception";
67 	case K_ERR_SPURIOUS_IRQ:
68 		return "Unhandled interrupt";
69 	case K_ERR_STACK_CHK_FAIL:
70 		return "Stack overflow";
71 	case K_ERR_KERNEL_OOPS:
72 		return "Kernel oops";
73 	case K_ERR_KERNEL_PANIC:
74 		return "Kernel panic";
75 	default:
76 		return "Unknown error";
77 	}
78 }
79 
80 /* LCOV_EXCL_START */
k_fatal_halt(unsigned int reason)81 FUNC_NORETURN void k_fatal_halt(unsigned int reason)
82 {
83 	arch_system_halt(reason);
84 }
85 /* LCOV_EXCL_STOP */
86 
z_fatal_error(unsigned int reason,const z_arch_esf_t * esf)87 void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
88 {
89 	/* We can't allow this code to be preempted, but don't need to
90 	 * synchronize between CPUs, so an arch-layer lock is
91 	 * appropriate.
92 	 */
93 	unsigned int key = arch_irq_lock();
94 	struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ?
95 			_current : NULL;
96 
97 	/* twister looks for the "ZEPHYR FATAL ERROR" string, don't
98 	 * change it without also updating twister
99 	 */
100 	LOG_ERR(">>> ZEPHYR FATAL ERROR %d: %s on CPU %d", reason,
101 		reason_to_str(reason), _current_cpu->id);
102 
103 	/* FIXME: This doesn't seem to work as expected on all arches.
104 	 * Need a reliable way to determine whether the fault happened when
105 	 * an IRQ or exception was being handled, or thread context.
106 	 *
107 	 * See #17656
108 	 */
109 #if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION)
110 	if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
111 		LOG_ERR("Fault during interrupt handling\n");
112 	}
113 #endif
114 
115 	LOG_ERR("Current thread: %p (%s)", thread,
116 		thread_name_get(thread));
117 
118 #ifndef CONFIG_XTENSA
119 	coredump(reason, esf, thread);
120 #endif
121 
122 	k_sys_fatal_error_handler(reason, esf);
123 
124 	/* If the system fatal error handler returns, then kill the faulting
125 	 * thread; a policy decision was made not to hang the system.
126 	 *
127 	 * Policy for fatal errors in ISRs: unconditionally panic.
128 	 *
129 	 * There is one exception to this policy: a stack sentinel
130 	 * check may be performed (on behalf of the current thread)
131 	 * during ISR exit, but in this case the thread should be
132 	 * aborted.
133 	 *
134 	 * Note that k_thread_abort() returns on some architectures but
135 	 * not others; e.g. on ARC, x86_64, Xtensa with ASM2, ARM
136 	 */
137 	if (!IS_ENABLED(CONFIG_TEST)) {
138 		__ASSERT(reason != K_ERR_KERNEL_PANIC,
139 			 "Attempted to recover from a kernel panic condition");
140 		/* FIXME: #17656 */
141 #if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION)
142 		if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
143 #if defined(CONFIG_STACK_SENTINEL)
144 			if (reason != K_ERR_STACK_CHK_FAIL) {
145 				__ASSERT(0,
146 				 "Attempted to recover from a fatal error in ISR");
147 			 }
148 #endif /* CONFIG_STACK_SENTINEL */
149 		}
150 #endif /* CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION */
151 	} else {
152 		/* Test mode */
153 #if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION)
154 		if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
155 			/* Abort the thread only on STACK Sentinel check fail. */
156 #if defined(CONFIG_STACK_SENTINEL)
157 			if (reason != K_ERR_STACK_CHK_FAIL) {
158 				arch_irq_unlock(key);
159 				return;
160 			}
161 #else
162 			arch_irq_unlock(key);
163 			return;
164 #endif /* CONFIG_STACK_SENTINEL */
165 		} else {
166 			/* Abort the thread only if the fault is not due to
167 			 * a spurious ISR handler triggered.
168 			 */
169 			if (reason == K_ERR_SPURIOUS_IRQ) {
170 				arch_irq_unlock(key);
171 				return;
172 			}
173 		}
174 #endif /*CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION */
175 	}
176 
177 	arch_irq_unlock(key);
178 
179 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
180 		k_thread_abort(thread);
181 	}
182 }
183