1 /*
2 * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
3 * Copyright (c) 2018 Lexmark International, Inc.
4 * Copyright 2023 NXP
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/kernel.h>
10 #include <kernel_internal.h>
11 #include <zephyr/arch/common/exc_handle.h>
12 #include <zephyr/logging/log.h>
13 #if defined(CONFIG_GDBSTUB)
14 #include <zephyr/arch/arm/gdbstub.h>
15 #include <zephyr/debug/gdbstub.h>
16 #endif
17
18 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
19
20 #define FAULT_DUMP_VERBOSE (CONFIG_FAULT_DUMP == 2)
21
22 #if FAULT_DUMP_VERBOSE
get_dbgdscr_moe_string(uint32_t moe)23 static const char *get_dbgdscr_moe_string(uint32_t moe)
24 {
25 switch (moe) {
26 case DBGDSCR_MOE_HALT_REQUEST:
27 return "Halt Request";
28 case DBGDSCR_MOE_BREAKPOINT:
29 return "Breakpoint";
30 case DBGDSCR_MOE_ASYNC_WATCHPOINT:
31 return "Asynchronous Watchpoint";
32 case DBGDSCR_MOE_BKPT_INSTRUCTION:
33 return "BKPT Instruction";
34 case DBGDSCR_MOE_EXT_DEBUG_REQUEST:
35 return "External Debug Request";
36 case DBGDSCR_MOE_VECTOR_CATCH:
37 return "Vector Catch";
38 case DBGDSCR_MOE_OS_UNLOCK_CATCH:
39 return "OS Unlock Catch";
40 case DBGDSCR_MOE_SYNC_WATCHPOINT:
41 return "Synchronous Watchpoint";
42 default:
43 return "Unknown";
44 }
45 }
46
dump_debug_event(void)47 static void dump_debug_event(void)
48 {
49 /* Read and parse debug mode of entry */
50 uint32_t dbgdscr = __get_DBGDSCR();
51 uint32_t moe = (dbgdscr & DBGDSCR_MOE_Msk) >> DBGDSCR_MOE_Pos;
52
53 /* Print debug event information */
54 LOG_ERR("Debug Event (%s)", get_dbgdscr_moe_string(moe));
55 }
56
dump_fault(uint32_t status,uint32_t addr)57 static uint32_t dump_fault(uint32_t status, uint32_t addr)
58 {
59 uint32_t reason = K_ERR_CPU_EXCEPTION;
60 /*
61 * Dump fault status and, if applicable, status-specific information.
62 * Note that the fault address is only displayed for the synchronous
63 * faults because it is unpredictable for asynchronous faults.
64 */
65 switch (status) {
66 case FSR_FS_ALIGNMENT_FAULT:
67 reason = K_ERR_ARM_ALIGNMENT_FAULT;
68 LOG_ERR("Alignment Fault @ 0x%08x", addr);
69 break;
70 case FSR_FS_PERMISSION_FAULT:
71 reason = K_ERR_ARM_PERMISSION_FAULT;
72 LOG_ERR("Permission Fault @ 0x%08x", addr);
73 break;
74 case FSR_FS_SYNC_EXTERNAL_ABORT:
75 reason = K_ERR_ARM_SYNC_EXTERNAL_ABORT;
76 LOG_ERR("Synchronous External Abort @ 0x%08x", addr);
77 break;
78 case FSR_FS_ASYNC_EXTERNAL_ABORT:
79 reason = K_ERR_ARM_ASYNC_EXTERNAL_ABORT;
80 LOG_ERR("Asynchronous External Abort");
81 break;
82 case FSR_FS_SYNC_PARITY_ERROR:
83 reason = K_ERR_ARM_SYNC_PARITY_ERROR;
84 LOG_ERR("Synchronous Parity/ECC Error @ 0x%08x", addr);
85 break;
86 case FSR_FS_ASYNC_PARITY_ERROR:
87 reason = K_ERR_ARM_ASYNC_PARITY_ERROR;
88 LOG_ERR("Asynchronous Parity/ECC Error");
89 break;
90 case FSR_FS_DEBUG_EVENT:
91 reason = K_ERR_ARM_DEBUG_EVENT;
92 dump_debug_event();
93 break;
94 #if defined(CONFIG_AARCH32_ARMV8_R)
95 case FSR_FS_TRANSLATION_FAULT:
96 reason = K_ERR_ARM_TRANSLATION_FAULT;
97 LOG_ERR("Translation Fault @ 0x%08x", addr);
98 break;
99 case FSR_FS_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT:
100 reason = K_ERR_ARM_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT;
101 LOG_ERR("Unsupported Exclusive Access Fault @ 0x%08x", addr);
102 break;
103 #else
104 case FSR_FS_BACKGROUND_FAULT:
105 reason = K_ERR_ARM_BACKGROUND_FAULT;
106 LOG_ERR("Background Fault @ 0x%08x", addr);
107 break;
108 #endif
109 default:
110 LOG_ERR("Unknown (%u)", status);
111 }
112 return reason;
113 }
114 #endif
115
116 #if defined(CONFIG_FPU_SHARING)
117
z_arm_fpu_caller_save(struct __fpu_sf * fpu)118 static ALWAYS_INLINE void z_arm_fpu_caller_save(struct __fpu_sf *fpu)
119 {
120 __asm__ volatile (
121 "vstmia %0, {s0-s15};\n"
122 : : "r" (&fpu->s[0])
123 : "memory"
124 );
125 #if CONFIG_VFP_FEATURE_REGS_S64_D32
126 __asm__ volatile (
127 "vstmia %0, {d16-d31};\n\t"
128 :
129 : "r" (&fpu->d[0])
130 : "memory"
131 );
132 #endif
133 }
134
135 /**
136 * @brief FPU undefined instruction fault handler
137 *
138 * @return Returns true if the FPU is already enabled
139 * implying a true undefined instruction
140 * Returns false if the FPU was disabled
141 */
z_arm_fault_undef_instruction_fp(void)142 bool z_arm_fault_undef_instruction_fp(void)
143 {
144 /*
145 * Assume this is a floating point instruction that faulted because
146 * the FP unit was disabled. Enable the FP unit and try again. If
147 * the FP was already enabled then this was an actual undefined
148 * instruction.
149 */
150 if (__get_FPEXC() & FPEXC_EN) {
151 return true;
152 }
153
154 __set_FPEXC(FPEXC_EN);
155
156 if (_current_cpu->nested > 1) {
157 /*
158 * If the nested count is greater than 1, the undefined
159 * instruction exception came from an irq/svc context. (The
160 * irq/svc handler would have the nested count at 1 and then
161 * the undef exception would increment it to 2).
162 */
163 struct __fpu_sf *spill_esf =
164 (struct __fpu_sf *)_current_cpu->fp_ctx;
165
166 if (spill_esf == NULL) {
167 return false;
168 }
169
170 _current_cpu->fp_ctx = NULL;
171
172 /*
173 * If the nested count is 2 and the current thread has used the
174 * VFP (whether or not it was actually using the VFP before the
175 * current exception) OR if the nested count is greater than 2
176 * and the VFP was enabled on the irq/svc entrance for the
177 * saved exception stack frame, then save the floating point
178 * context because it is about to be overwritten.
179 */
180 if (((_current_cpu->nested == 2)
181 && (arch_current_thread()->base.user_options & K_FP_REGS))
182 || ((_current_cpu->nested > 2)
183 && (spill_esf->undefined & FPEXC_EN))) {
184 /*
185 * Spill VFP registers to specified exception stack
186 * frame
187 */
188 spill_esf->undefined |= FPEXC_EN;
189 spill_esf->fpscr = __get_FPSCR();
190 z_arm_fpu_caller_save(spill_esf);
191 }
192 } else {
193 /*
194 * If the nested count is one, a thread was the faulting
195 * context. Just flag that this thread uses the VFP. This
196 * means that a thread that uses the VFP does not have to,
197 * but should, set K_FP_REGS on thread creation.
198 */
199 arch_current_thread()->base.user_options |= K_FP_REGS;
200 }
201
202 return false;
203 }
204 #endif
205
206 /**
207 * @brief Undefined instruction fault handler
208 *
209 * @return Returns true if the fault is fatal
210 */
z_arm_fault_undef_instruction(struct arch_esf * esf)211 bool z_arm_fault_undef_instruction(struct arch_esf *esf)
212 {
213 #if defined(CONFIG_FPU_SHARING)
214 /*
215 * This is a true undefined instruction and we will be crashing
216 * so save away the VFP registers.
217 */
218 esf->fpu.undefined = __get_FPEXC();
219 esf->fpu.fpscr = __get_FPSCR();
220 z_arm_fpu_caller_save(&esf->fpu);
221 #endif
222
223 #if defined(CONFIG_GDBSTUB)
224 z_gdb_entry(esf, GDB_EXCEPTION_INVALID_INSTRUCTION);
225 /* Might not be fatal if GDB stub placed it in the code. */
226 return false;
227 #endif
228
229 /* Print fault information */
230 LOG_ERR("***** UNDEFINED INSTRUCTION ABORT *****");
231
232 uint32_t reason = IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) ?
233 K_ERR_CPU_EXCEPTION :
234 K_ERR_ARM_UNDEFINED_INSTRUCTION;
235
236 /* Invoke kernel fatal exception handler */
237 z_arm_fatal_error(reason, esf);
238
239 /* All undefined instructions are treated as fatal for now */
240 return true;
241 }
242
243 /**
244 * @brief Prefetch abort fault handler
245 *
246 * @return Returns true if the fault is fatal
247 */
z_arm_fault_prefetch(struct arch_esf * esf)248 bool z_arm_fault_prefetch(struct arch_esf *esf)
249 {
250 uint32_t reason = K_ERR_CPU_EXCEPTION;
251
252 /* Read and parse Instruction Fault Status Register (IFSR) */
253 uint32_t ifsr = __get_IFSR();
254 #if defined(CONFIG_AARCH32_ARMV8_R)
255 uint32_t fs = ifsr & IFSR_STATUS_Msk;
256 #else
257 uint32_t fs = ((ifsr & IFSR_FS1_Msk) >> 6) | (ifsr & IFSR_FS0_Msk);
258 #endif
259
260 /* Read Instruction Fault Address Register (IFAR) */
261 uint32_t ifar = __get_IFAR();
262
263 #if defined(CONFIG_GDBSTUB)
264 /* The BKPT instruction could have caused a software breakpoint */
265 if (fs == IFSR_DEBUG_EVENT) {
266 /* Debug event, call the gdbstub handler */
267 z_gdb_entry(esf, GDB_EXCEPTION_BREAKPOINT);
268 } else {
269 /* Fatal */
270 z_gdb_entry(esf, GDB_EXCEPTION_MEMORY_FAULT);
271 }
272 return false;
273 #endif
274 /* Print fault information*/
275 LOG_ERR("***** PREFETCH ABORT *****");
276 if (FAULT_DUMP_VERBOSE) {
277 reason = dump_fault(fs, ifar);
278 }
279
280 /* Simplify exception codes if requested */
281 if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
282 reason = K_ERR_CPU_EXCEPTION;
283 }
284
285 /* Invoke kernel fatal exception handler */
286 z_arm_fatal_error(reason, esf);
287
288 /* All prefetch aborts are treated as fatal for now */
289 return true;
290 }
291
292 #ifdef CONFIG_USERSPACE
293 Z_EXC_DECLARE(z_arm_user_string_nlen);
294
295 static const struct z_exc_handle exceptions[] = {
296 Z_EXC_HANDLE(z_arm_user_string_nlen)
297 };
298
299 /* Perform an assessment whether an MPU fault shall be
300 * treated as recoverable.
301 *
302 * @return true if error is recoverable, otherwise return false.
303 */
memory_fault_recoverable(struct arch_esf * esf)304 static bool memory_fault_recoverable(struct arch_esf *esf)
305 {
306 for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
307 /* Mask out instruction mode */
308 uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
309 uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
310
311 if (esf->basic.pc >= start && esf->basic.pc < end) {
312 esf->basic.pc = (uint32_t)(exceptions[i].fixup);
313 return true;
314 }
315 }
316
317 return false;
318 }
319 #endif
320
321 /**
322 * @brief Data abort fault handler
323 *
324 * @return Returns true if the fault is fatal
325 */
z_arm_fault_data(struct arch_esf * esf)326 bool z_arm_fault_data(struct arch_esf *esf)
327 {
328 uint32_t reason = K_ERR_CPU_EXCEPTION;
329
330 /* Read and parse Data Fault Status Register (DFSR) */
331 uint32_t dfsr = __get_DFSR();
332 #if defined(CONFIG_AARCH32_ARMV8_R)
333 uint32_t fs = dfsr & DFSR_STATUS_Msk;
334 #else
335 uint32_t fs = ((dfsr & DFSR_FS1_Msk) >> 6) | (dfsr & DFSR_FS0_Msk);
336 #endif
337
338 /* Read Data Fault Address Register (DFAR) */
339 uint32_t dfar = __get_DFAR();
340
341 #if defined(CONFIG_GDBSTUB)
342 z_gdb_entry(esf, GDB_EXCEPTION_MEMORY_FAULT);
343 /* return false - non-fatal error */
344 return false;
345 #endif
346
347 #if defined(CONFIG_USERSPACE)
348 if ((fs == COND_CODE_1(CONFIG_AARCH32_ARMV8_R,
349 (FSR_FS_TRANSLATION_FAULT),
350 (FSR_FS_BACKGROUND_FAULT)))
351 || (fs == FSR_FS_PERMISSION_FAULT)) {
352 if (memory_fault_recoverable(esf)) {
353 return false;
354 }
355 }
356 #endif
357
358 /* Print fault information*/
359 LOG_ERR("***** DATA ABORT *****");
360 if (FAULT_DUMP_VERBOSE) {
361 reason = dump_fault(fs, dfar);
362 }
363
364 /* Simplify exception codes if requested */
365 if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
366 reason = K_ERR_CPU_EXCEPTION;
367 }
368
369 /* Invoke kernel fatal exception handler */
370 z_arm_fatal_error(reason, esf);
371
372 /* All data aborts are treated as fatal for now */
373 return true;
374 }
375
376 /**
377 * @brief Initialisation of fault handling
378 */
z_arm_fault_init(void)379 void z_arm_fault_init(void)
380 {
381 /* Nothing to do for now */
382 }
383