1 /*
2  * Copyright (c) 2014 Wind River Systems, Inc.
3  * Copyright (c) 2020 Nordic Semiconductor ASA.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file
10  * @brief Common fault handler for ARM Cortex-M
11  *
12  * Common fault handler for ARM Cortex-M processors.
13  */
14 
15 #include <zephyr/kernel.h>
16 #include <kernel_internal.h>
17 #include <inttypes.h>
18 #include <zephyr/arch/common/exc_handle.h>
19 #include <zephyr/linker/linker-defs.h>
20 #include <zephyr/logging/log.h>
21 #include <zephyr/sys/barrier.h>
22 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
23 
24 #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
25 #define PR_EXC(...) LOG_ERR(__VA_ARGS__)
26 #define STORE_xFAR(reg_var, reg) uint32_t reg_var = (uint32_t)reg
27 #else
28 #define PR_EXC(...)
29 #define STORE_xFAR(reg_var, reg)
30 #endif /* CONFIG_PRINTK || CONFIG_LOG */
31 
32 #if (CONFIG_FAULT_DUMP == 2)
33 #define PR_FAULT_INFO(...) PR_EXC(__VA_ARGS__)
34 #else
35 #define PR_FAULT_INFO(...)
36 #endif
37 
38 #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU)
39 #define EMN(edr)   (((edr) & SYSMPU_EDR_EMN_MASK) >> SYSMPU_EDR_EMN_SHIFT)
40 #define EACD(edr)  (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
41 #endif
42 
43 /* Integrity signature for an ARMv8-M implementation */
44 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
45 #define INTEGRITY_SIGNATURE_STD 0xFEFA125BUL
46 #define INTEGRITY_SIGNATURE_EXT 0xFEFA125AUL
47 #else
48 #define INTEGRITY_SIGNATURE 0xFEFA125BUL
49 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
50 /* Size (in words) of the additional state context that is pushed
51  * to the Secure stack during a Non-Secure exception entry.
52  */
53 #define ADDITIONAL_STATE_CONTEXT_WORDS 10
54 
55 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
56 /* helpers to access memory/bus/usage faults */
57 #define SCB_CFSR_MEMFAULTSR \
58 	(uint32_t)((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) \
59 		   >> SCB_CFSR_MEMFAULTSR_Pos)
60 #define SCB_CFSR_BUSFAULTSR \
61 	(uint32_t)((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) \
62 		   >> SCB_CFSR_BUSFAULTSR_Pos)
63 #define SCB_CFSR_USGFAULTSR \
64 	(uint32_t)((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) \
65 		   >> SCB_CFSR_USGFAULTSR_Pos)
66 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
67 
68 /**
69  *
70  * Dump information regarding fault (FAULT_DUMP == 1)
71  *
72  * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
73  * (short form).
74  *
75  * eg. (precise bus error escalated to hard fault):
76  *
77  * Fault! EXC #3
78  * HARD FAULT: Escalation (see below)!
79  * MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
80  * BFAR: 0xff001234
81  *
82  *
83  *
84  * Dump information regarding fault (FAULT_DUMP == 2)
85  *
86  * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
87  * (long form), and return the error code for the kernel to identify the fatal
88  * error reason.
89  *
90  * eg. (precise bus error escalated to hard fault):
91  *
92  * ***** HARD FAULT *****
93  *    Fault escalation (see below)
94  * ***** BUS FAULT *****
95  *   Precise data bus error
96  *   Address: 0xff001234
97  *
98  */
99 
100 #if (CONFIG_FAULT_DUMP == 1)
fault_show(const struct arch_esf * esf,int fault)101 static void fault_show(const struct arch_esf *esf, int fault)
102 {
103 	PR_EXC("Fault! EXC #%d", fault);
104 
105 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
106 	PR_EXC("MMFSR: 0x%x, BFSR: 0x%x, UFSR: 0x%x", SCB_CFSR_MEMFAULTSR,
107 	       SCB_CFSR_BUSFAULTSR, SCB_CFSR_USGFAULTSR);
108 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
109 	PR_EXC("SFSR: 0x%x", SAU->SFSR);
110 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
111 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
112 }
113 #else
114 /* For Dump level 2, detailed information is generated by the
115  * fault handling functions for individual fault conditions, so this
116  * function is left empty.
117  *
118  * For Dump level 0, no information needs to be generated.
119  */
fault_show(const struct arch_esf * esf,int fault)120 static void fault_show(const struct arch_esf *esf, int fault)
121 {
122 	(void)esf;
123 	(void)fault;
124 }
125 #endif /* FAULT_DUMP == 1 */
126 
127 #ifdef CONFIG_USERSPACE
128 Z_EXC_DECLARE(z_arm_user_string_nlen);
129 
130 static const struct z_exc_handle exceptions[] = {
131 	Z_EXC_HANDLE(z_arm_user_string_nlen)
132 };
133 #endif
134 
135 /* Perform an assessment whether an MPU fault shall be
136  * treated as recoverable.
137  *
138  * @return true if error is recoverable, otherwise return false.
139  */
memory_fault_recoverable(struct arch_esf * esf,bool synchronous)140 static bool memory_fault_recoverable(struct arch_esf *esf, bool synchronous)
141 {
142 #ifdef CONFIG_USERSPACE
143 	for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
144 		/* Mask out instruction mode */
145 		uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
146 		uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
147 
148 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
149 	/* Non-synchronous exceptions (e.g. DebugMonitor) may have
150 	 * allowed PC to continue to the next instruction.
151 	 */
152 	end += (synchronous) ? 0x0 : 0x4;
153 #else
154 	ARG_UNUSED(synchronous);
155 #endif
156 		if (esf->basic.pc >= start && esf->basic.pc < end) {
157 			esf->basic.pc = (uint32_t)(exceptions[i].fixup);
158 			return true;
159 		}
160 	}
161 #endif
162 
163 	return false;
164 }
165 
166 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
167 /* HardFault is used for all fault conditions on ARMv6-M. */
168 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
169 
170 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
171 uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
172 	const uint32_t psp);
173 #endif /* CONFIG_MPU_STACK_GUARD || defined(CONFIG_USERSPACE) */
174 
175 /**
176  *
177  * @brief Dump MemManage fault information
178  *
179  * See z_arm_fault_dump() for example.
180  *
181  * @return error code to identify the fatal error reason
182  */
mem_manage_fault(struct arch_esf * esf,int from_hard_fault,bool * recoverable)183 static uint32_t mem_manage_fault(struct arch_esf *esf, int from_hard_fault,
184 			      bool *recoverable)
185 {
186 	uint32_t reason = K_ERR_ARM_MEM_GENERIC;
187 	uint32_t mmfar = -EINVAL;
188 
189 	PR_FAULT_INFO("***** MPU FAULT *****");
190 
191 	if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) != 0) {
192 		reason = K_ERR_ARM_MEM_STACKING;
193 		PR_FAULT_INFO("  Stacking error (context area might be"
194 			" not valid)");
195 	}
196 	if ((SCB->CFSR & SCB_CFSR_MUNSTKERR_Msk) != 0) {
197 		reason = K_ERR_ARM_MEM_UNSTACKING;
198 		PR_FAULT_INFO("  Unstacking error");
199 	}
200 	if ((SCB->CFSR & SCB_CFSR_DACCVIOL_Msk) != 0) {
201 		reason = K_ERR_ARM_MEM_DATA_ACCESS;
202 		PR_FAULT_INFO("  Data Access Violation");
203 		/* In a fault handler, to determine the true faulting address:
204 		 * 1. Read and save the MMFAR value.
205 		 * 2. Read the MMARVALID bit in the MMFSR.
206 		 * The MMFAR address is valid only if this bit is 1.
207 		 *
208 		 * Software must follow this sequence because another higher
209 		 * priority exception might change the MMFAR value.
210 		 */
211 		uint32_t temp = SCB->MMFAR;
212 
213 		if ((SCB->CFSR & SCB_CFSR_MMARVALID_Msk) != 0) {
214 			mmfar = temp;
215 			PR_EXC("  MMFAR Address: 0x%x", mmfar);
216 			if (from_hard_fault != 0) {
217 				/* clear SCB_MMAR[VALID] to reset */
218 				SCB->CFSR &= ~SCB_CFSR_MMARVALID_Msk;
219 			}
220 		}
221 	}
222 	if ((SCB->CFSR & SCB_CFSR_IACCVIOL_Msk) != 0) {
223 		reason = K_ERR_ARM_MEM_INSTRUCTION_ACCESS;
224 		PR_FAULT_INFO("  Instruction Access Violation");
225 	}
226 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
227 	if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
228 		reason = K_ERR_ARM_MEM_FP_LAZY_STATE_PRESERVATION;
229 		PR_FAULT_INFO(
230 			"  Floating-point lazy state preservation error");
231 	}
232 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
233 
234 	/* When stack protection is enabled, we need to assess
235 	 * if the memory violation error is a stack corruption.
236 	 *
237 	 * By design, being a Stacking MemManage fault is a necessary
238 	 * and sufficient condition for a thread stack corruption.
239 	 * [Cortex-M process stack pointer is always descending and
240 	 * is never modified by code (except for the context-switch
241 	 * routine), therefore, a stacking error implies the PSP has
242 	 * crossed into an area beyond the thread stack.]
243 	 *
244 	 * Data Access Violation errors may or may not be caused by
245 	 * thread stack overflows.
246 	 */
247 	if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) ||
248 		(SCB->CFSR & SCB_CFSR_DACCVIOL_Msk)) {
249 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
250 		/* MemManage Faults are always banked between security
251 		 * states. Therefore, we can safely assume the fault
252 		 * originated from the same security state.
253 		 *
254 		 * As we only assess thread stack corruption, we only
255 		 * process the error further if the stack frame is on
256 		 * PSP. For always-banked MemManage Fault, this is
257 		 * equivalent to inspecting the RETTOBASE flag.
258 		 *
259 		 * Note:
260 		 * It is possible that MMFAR address is not written by the
261 		 * Cortex-M core; this occurs when the stacking error is
262 		 * not accompanied by a data access violation error (i.e.
263 		 * when stack overflows due to the exception entry frame
264 		 * stacking): z_check_thread_stack_fail() shall be able to
265 		 * handle the case of 'mmfar' holding the -EINVAL value.
266 		 */
267 		if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
268 			uint32_t min_stack_ptr = z_check_thread_stack_fail(mmfar,
269 				((uint32_t) &esf[0]));
270 
271 			if (min_stack_ptr) {
272 				/* When MemManage Stacking Error has occurred,
273 				 * the stack context frame might be corrupted
274 				 * but the stack pointer may have actually
275 				 * descent below the allowed (thread) stack
276 				 * area. We may face a problem with un-stacking
277 				 * the frame, upon the exception return, if we
278 				 * do not have sufficient access permissions to
279 				 * read the corrupted stack frame. Therefore,
280 				 * we manually force the stack pointer to the
281 				 * lowest allowed position, inside the thread's
282 				 * stack.
283 				 *
284 				 * Note:
285 				 * The PSP will normally be adjusted in a tail-
286 				 * chained exception performing context switch,
287 				 * after aborting the corrupted thread. The
288 				 * adjustment, here, is required as tail-chain
289 				 * cannot always be guaranteed.
290 				 *
291 				 * The manual adjustment of PSP is safe, as we
292 				 * will not be re-scheduling this thread again
293 				 * for execution; thread stack corruption is a
294 				 * fatal error and a thread that corrupted its
295 				 * stack needs to be aborted.
296 				 */
297 				__set_PSP(min_stack_ptr);
298 
299 				reason = K_ERR_STACK_CHK_FAIL;
300 			} else {
301 				__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
302 					"Stacking error not a stack fail\n");
303 			}
304 		}
305 #else
306 	(void)mmfar;
307 	__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
308 		"Stacking or Data Access Violation error "
309 		"without stack guard, user-mode or null-pointer detection\n");
310 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
311 	}
312 
313 	/* When we were handling this fault, we may have triggered a fp
314 	 * lazy stacking Memory Manage fault. At the time of writing, this
315 	 * can happen when printing.  If that's true, we should clear the
316 	 * pending flag in addition to the clearing the reason for the fault
317 	 */
318 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
319 	if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
320 		SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTPENDED_Msk;
321 	}
322 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
323 
324 	/* clear MMFSR sticky bits */
325 	SCB->CFSR |= SCB_CFSR_MEMFAULTSR_Msk;
326 
327 	/* Assess whether system shall ignore/recover from this MPU fault. */
328 	*recoverable = memory_fault_recoverable(esf, true);
329 
330 	return reason;
331 }
332 
333 /**
334  *
335  * @brief Dump BusFault information
336  *
337  * See z_arm_fault_dump() for example.
338  *
339  * @return error code to identify the fatal error reason.
340  *
341  */
bus_fault(struct arch_esf * esf,int from_hard_fault,bool * recoverable)342 static int bus_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
343 {
344 	uint32_t reason = K_ERR_ARM_BUS_GENERIC;
345 
346 	PR_FAULT_INFO("***** BUS FAULT *****");
347 
348 	if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
349 		reason = K_ERR_ARM_BUS_STACKING;
350 		PR_FAULT_INFO("  Stacking error");
351 	}
352 	if (SCB->CFSR & SCB_CFSR_UNSTKERR_Msk) {
353 		reason = K_ERR_ARM_BUS_UNSTACKING;
354 		PR_FAULT_INFO("  Unstacking error");
355 	}
356 	if (SCB->CFSR & SCB_CFSR_PRECISERR_Msk) {
357 		reason = K_ERR_ARM_BUS_PRECISE_DATA_BUS;
358 		PR_FAULT_INFO("  Precise data bus error");
359 		/* In a fault handler, to determine the true faulting address:
360 		 * 1. Read and save the BFAR value.
361 		 * 2. Read the BFARVALID bit in the BFSR.
362 		 * The BFAR address is valid only if this bit is 1.
363 		 *
364 		 * Software must follow this sequence because another
365 		 * higher priority exception might change the BFAR value.
366 		 */
367 		STORE_xFAR(bfar, SCB->BFAR);
368 
369 		if ((SCB->CFSR & SCB_CFSR_BFARVALID_Msk) != 0) {
370 			PR_EXC("  BFAR Address: 0x%x", bfar);
371 			if (from_hard_fault != 0) {
372 				/* clear SCB_CFSR_BFAR[VALID] to reset */
373 				SCB->CFSR &= ~SCB_CFSR_BFARVALID_Msk;
374 			}
375 		}
376 	}
377 	if (SCB->CFSR & SCB_CFSR_IMPRECISERR_Msk) {
378 		reason = K_ERR_ARM_BUS_IMPRECISE_DATA_BUS;
379 		PR_FAULT_INFO("  Imprecise data bus error");
380 	}
381 	if ((SCB->CFSR & SCB_CFSR_IBUSERR_Msk) != 0) {
382 		reason = K_ERR_ARM_BUS_INSTRUCTION_BUS;
383 		PR_FAULT_INFO("  Instruction bus error");
384 #if !defined(CONFIG_ARMV7_M_ARMV8_M_FP)
385 	}
386 #else
387 	} else if (SCB->CFSR & SCB_CFSR_LSPERR_Msk) {
388 		reason = K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION;
389 		PR_FAULT_INFO("  Floating-point lazy state preservation error");
390 	} else {
391 		;
392 	}
393 #endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */
394 
395 #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU)
396 	uint32_t sperr = SYSMPU->CESR & SYSMPU_CESR_SPERR_MASK;
397 	uint32_t mask = BIT(31);
398 	int i;
399 	uint32_t ear = -EINVAL;
400 
401 	if (sperr) {
402 		for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) {
403 			if ((sperr & mask) == 0U) {
404 				continue;
405 			}
406 			STORE_xFAR(edr, SYSMPU->SP[i].EDR);
407 			ear = SYSMPU->SP[i].EAR;
408 
409 			PR_FAULT_INFO("  NXP MPU error, port %d", i);
410 			PR_FAULT_INFO("    Mode: %s, %s Address: 0x%x",
411 			       edr & BIT(2) ? "Supervisor" : "User",
412 			       edr & BIT(1) ? "Data" : "Instruction",
413 			       ear);
414 			PR_FAULT_INFO(
415 					"    Type: %s, Master: %d, Regions: 0x%x",
416 			       edr & BIT(0) ? "Write" : "Read",
417 			       EMN(edr), EACD(edr));
418 
419 			/* When stack protection is enabled, we need to assess
420 			 * if the memory violation error is a stack corruption.
421 			 *
422 			 * By design, being a Stacking Bus fault is a necessary
423 			 * and sufficient condition for a stack corruption.
424 			 */
425 			if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
426 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
427 				/* Note: we can assume the fault originated
428 				 * from the same security state for ARM
429 				 * platforms implementing the NXP MPU
430 				 * (CONFIG_CPU_HAS_NXP_SYSMPU=y).
431 				 *
432 				 * As we only assess thread stack corruption,
433 				 * we only process the error further, if the
434 				 * stack frame is on PSP. For NXP MPU-related
435 				 * Bus Faults (banked), this is equivalent to
436 				 * inspecting the RETTOBASE flag.
437 				 */
438 				if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
439 					uint32_t min_stack_ptr =
440 						z_check_thread_stack_fail(ear,
441 							((uint32_t) &esf[0]));
442 
443 					if (min_stack_ptr) {
444 						/* When BusFault Stacking Error
445 						 * has occurred, the stack
446 						 * context frame might be
447 						 * corrupted but the stack
448 						 * pointer may have actually
449 						 * moved. We may face problems
450 						 * with un-stacking the frame,
451 						 * upon exception return, if we
452 						 * do not have sufficient
453 						 * permissions to read the
454 						 * corrupted stack frame.
455 						 * Therefore, we manually force
456 						 * the stack pointer to the
457 						 * lowest allowed position.
458 						 *
459 						 * Note:
460 						 * The PSP will normally be
461 						 * adjusted in a tail-chained
462 						 * exception performing context
463 						 * switch, after aborting the
464 						 * corrupted thread. Here, the
465 						 * adjustment is required as
466 						 * tail-chain cannot always be
467 						 * guaranteed.
468 						 */
469 						__set_PSP(min_stack_ptr);
470 
471 						reason =
472 							K_ERR_STACK_CHK_FAIL;
473 						break;
474 					}
475 				}
476 #else
477 				(void)ear;
478 				__ASSERT(0,
479 					"Stacking error without stack guard"
480 					"or User-mode support");
481 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
482 			}
483 		}
484 		SYSMPU->CESR &= ~sperr;
485 	}
486 #endif /* defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU) */
487 
488 	/* clear BFSR sticky bits */
489 	SCB->CFSR |= SCB_CFSR_BUSFAULTSR_Msk;
490 
491 	*recoverable = memory_fault_recoverable(esf, true);
492 
493 	return reason;
494 }
495 
496 /**
497  *
498  * @brief Dump UsageFault information
499  *
500  * See z_arm_fault_dump() for example.
501  *
502  * @return error code to identify the fatal error reason
503  */
usage_fault(const struct arch_esf * esf)504 static uint32_t usage_fault(const struct arch_esf *esf)
505 {
506 	uint32_t reason = K_ERR_ARM_USAGE_GENERIC;
507 
508 	PR_FAULT_INFO("***** USAGE FAULT *****");
509 
510 	/* bits are sticky: they stack and must be reset */
511 	if ((SCB->CFSR & SCB_CFSR_DIVBYZERO_Msk) != 0) {
512 		reason = K_ERR_ARM_USAGE_DIV_0;
513 		PR_FAULT_INFO("  Division by zero");
514 	}
515 	if ((SCB->CFSR & SCB_CFSR_UNALIGNED_Msk) != 0) {
516 		reason = K_ERR_ARM_USAGE_UNALIGNED_ACCESS;
517 		PR_FAULT_INFO("  Unaligned memory access");
518 	}
519 #if defined(CONFIG_ARMV8_M_MAINLINE)
520 	if ((SCB->CFSR & SCB_CFSR_STKOF_Msk) != 0) {
521 		reason = K_ERR_ARM_USAGE_STACK_OVERFLOW;
522 		PR_FAULT_INFO("  Stack overflow (context area not valid)");
523 #if defined(CONFIG_BUILTIN_STACK_GUARD)
524 		/* Stack Overflows are always reported as stack corruption
525 		 * errors. Note that the built-in stack overflow mechanism
526 		 * prevents the context area to be loaded on the stack upon
527 		 * UsageFault exception entry. As a result, we cannot rely
528 		 * on the reported faulty instruction address, to determine
529 		 * the instruction that triggered the stack overflow.
530 		 */
531 		reason = K_ERR_STACK_CHK_FAIL;
532 #endif /* CONFIG_BUILTIN_STACK_GUARD */
533 	}
534 #endif /* CONFIG_ARMV8_M_MAINLINE */
535 	if ((SCB->CFSR & SCB_CFSR_NOCP_Msk) != 0) {
536 		reason = K_ERR_ARM_USAGE_NO_COPROCESSOR;
537 		PR_FAULT_INFO("  No coprocessor instructions");
538 	}
539 	if ((SCB->CFSR & SCB_CFSR_INVPC_Msk) != 0) {
540 		reason = K_ERR_ARM_USAGE_ILLEGAL_EXC_RETURN;
541 		PR_FAULT_INFO("  Illegal load of EXC_RETURN into PC");
542 	}
543 	if ((SCB->CFSR & SCB_CFSR_INVSTATE_Msk) != 0) {
544 		reason = K_ERR_ARM_USAGE_ILLEGAL_EPSR;
545 		PR_FAULT_INFO("  Illegal use of the EPSR");
546 	}
547 	if ((SCB->CFSR & SCB_CFSR_UNDEFINSTR_Msk) != 0) {
548 		reason = K_ERR_ARM_USAGE_UNDEFINED_INSTRUCTION;
549 		PR_FAULT_INFO("  Attempt to execute undefined instruction");
550 	}
551 
552 	/* clear UFSR sticky bits */
553 	SCB->CFSR |= SCB_CFSR_USGFAULTSR_Msk;
554 
555 	return reason;
556 }
557 
558 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
559 /**
560  *
561  * @brief Dump SecureFault information
562  *
563  * See z_arm_fault_dump() for example.
564  *
565  * @return error code to identify the fatal error reason
566  */
secure_fault(const struct arch_esf * esf)567 static uint32_t secure_fault(const struct arch_esf *esf)
568 {
569 	uint32_t reason = K_ERR_ARM_SECURE_GENERIC;
570 
571 	PR_FAULT_INFO("***** SECURE FAULT *****");
572 
573 	STORE_xFAR(sfar, SAU->SFAR);
574 	if ((SAU->SFSR & SAU_SFSR_SFARVALID_Msk) != 0) {
575 		PR_EXC("  Address: 0x%x", sfar);
576 	}
577 
578 	/* bits are sticky: they stack and must be reset */
579 	if ((SAU->SFSR & SAU_SFSR_INVEP_Msk) != 0) {
580 		reason = K_ERR_ARM_SECURE_ENTRY_POINT;
581 		PR_FAULT_INFO("  Invalid entry point");
582 	} else if ((SAU->SFSR & SAU_SFSR_INVIS_Msk) != 0) {
583 		reason = K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE;
584 		PR_FAULT_INFO("  Invalid integrity signature");
585 	} else if ((SAU->SFSR & SAU_SFSR_INVER_Msk) != 0) {
586 		reason = K_ERR_ARM_SECURE_EXCEPTION_RETURN;
587 		PR_FAULT_INFO("  Invalid exception return");
588 	} else if ((SAU->SFSR & SAU_SFSR_AUVIOL_Msk) != 0) {
589 		reason = K_ERR_ARM_SECURE_ATTRIBUTION_UNIT;
590 		PR_FAULT_INFO("  Attribution unit violation");
591 	} else if ((SAU->SFSR & SAU_SFSR_INVTRAN_Msk) != 0) {
592 		reason = K_ERR_ARM_SECURE_TRANSITION;
593 		PR_FAULT_INFO("  Invalid transition");
594 	} else if ((SAU->SFSR & SAU_SFSR_LSPERR_Msk) != 0) {
595 		reason = K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION;
596 		PR_FAULT_INFO("  Lazy state preservation");
597 	} else if ((SAU->SFSR & SAU_SFSR_LSERR_Msk) != 0) {
598 		reason = K_ERR_ARM_SECURE_LAZY_STATE_ERROR;
599 		PR_FAULT_INFO("  Lazy state error");
600 	}
601 
602 	/* clear SFSR sticky bits */
603 	SAU->SFSR |= 0xFF;
604 
605 	return reason;
606 }
607 #endif /* defined(CONFIG_ARM_SECURE_FIRMWARE) */
608 
609 /**
610  *
611  * @brief Dump debug monitor exception information
612  *
613  * See z_arm_fault_dump() for example.
614  *
615  */
debug_monitor(struct arch_esf * esf,bool * recoverable)616 static void debug_monitor(struct arch_esf *esf, bool *recoverable)
617 {
618 	*recoverable = false;
619 
620 	PR_FAULT_INFO(
621 		"***** Debug monitor exception *****");
622 
623 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
624 	if (!z_arm_debug_monitor_event_error_check()) {
625 		/* By default, all debug monitor exceptions that are not
626 		 * treated as errors by z_arm_debug_event_error_check(),
627 		 * they are considered as recoverable errors.
628 		 */
629 		*recoverable = true;
630 	} else {
631 
632 		*recoverable = memory_fault_recoverable(esf, false);
633 	}
634 
635 #endif
636 }
637 
638 #else
639 #error Unknown ARM architecture
640 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
641 
z_arm_is_synchronous_svc(struct arch_esf * esf)642 static inline bool z_arm_is_synchronous_svc(struct arch_esf *esf)
643 {
644 	uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
645 	/* SVC is a 16-bit instruction. On a synchronous SVC
646 	 * escalated to Hard Fault, the return address is the
647 	 * next instruction, i.e. after the SVC.
648 	 */
649 #define _SVC_OPCODE 0xDF00
650 
651 	/* We are about to de-reference the program counter at the
652 	 * time of fault to determine if it was a SVC
653 	 * instruction. However, we don't know if the pc itself is
654 	 * valid -- we could have faulted due to trying to execute a
655 	 * corrupted function pointer.
656 	 *
657 	 * We will temporarily ignore BusFault's so a bad program
658 	 * counter does not trigger ARM lockup condition.
659 	 */
660 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && !defined(CONFIG_ARMV8_M_BASELINE)
661 	/* Note: ARMv6-M does not support CCR.BFHFNMIGN so this access
662 	 * could generate a fault if the pc was invalid.
663 	 */
664 	uint16_t fault_insn = *(ret_addr - 1);
665 #else
666 	SCB->CCR |= SCB_CCR_BFHFNMIGN_Msk;
667 	barrier_dsync_fence_full();
668 	barrier_isync_fence_full();
669 
670 	uint16_t fault_insn = *(ret_addr - 1);
671 
672 	SCB->CCR &= ~SCB_CCR_BFHFNMIGN_Msk;
673 	barrier_dsync_fence_full();
674 	barrier_isync_fence_full();
675 #endif /* ARMV6_M_ARMV8_M_BASELINE && !ARMV8_M_BASELINE */
676 
677 	if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
678 		((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
679 		return true;
680 	}
681 #undef _SVC_OPCODE
682 	return false;
683 }
684 
685 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
z_arm_is_pc_valid(uintptr_t pc)686 static inline bool z_arm_is_pc_valid(uintptr_t pc)
687 {
688 	/* Is it in valid text region */
689 	if ((((uintptr_t)&__text_region_start) <= pc) && (pc < ((uintptr_t)&__text_region_end))) {
690 		return true;
691 	}
692 
693 	/* Is it in valid ramfunc range */
694 	if ((((uintptr_t)&__ramfunc_start) <= pc) && (pc < ((uintptr_t)&__ramfunc_end))) {
695 		return true;
696 	}
697 
698 #if DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_itcm))
699 	/* Is it in the ITCM */
700 	if ((((uintptr_t)&__itcm_start) <= pc) && (pc < ((uintptr_t)&__itcm_end))) {
701 		return true;
702 	}
703 #endif
704 
705 	return false;
706 }
707 #endif
708 
709 /**
710  *
711  * @brief Dump hard fault information
712  *
713  * See z_arm_fault_dump() for example.
714  *
715  * @return error code to identify the fatal error reason
716  */
hard_fault(struct arch_esf * esf,bool * recoverable)717 static uint32_t hard_fault(struct arch_esf *esf, bool *recoverable)
718 {
719 	uint32_t reason = K_ERR_CPU_EXCEPTION;
720 
721 	PR_FAULT_INFO("***** HARD FAULT *****");
722 
723 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
724 	/* Workaround for #18712:
725 	 * HardFault may be due to escalation, as a result of
726 	 * an SVC instruction that could not be executed; this
727 	 * can occur if ARCH_EXCEPT() is called by an ISR,
728 	 * which executes at priority equal to the SVC handler
729 	 * priority. We handle the case of Kernel OOPS and Stack
730 	 * Fail here.
731 	 */
732 
733 	if (z_arm_is_pc_valid((uintptr_t)esf->basic.pc) && z_arm_is_synchronous_svc(esf)) {
734 		PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
735 		reason = esf->basic.r0;
736 	}
737 
738 	*recoverable = memory_fault_recoverable(esf, true);
739 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
740 	*recoverable = false;
741 
742 	if ((SCB->HFSR & SCB_HFSR_VECTTBL_Msk) != 0) {
743 		PR_EXC("  Bus fault on vector table read");
744 	} else if ((SCB->HFSR & SCB_HFSR_DEBUGEVT_Msk) != 0) {
745 		PR_EXC("  Debug event");
746 	} else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) {
747 		PR_EXC("  Fault escalation (see below)");
748 		if (z_arm_is_synchronous_svc(esf)) {
749 			PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
750 			reason = esf->basic.r0;
751 		} else if ((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) != 0) {
752 			reason = mem_manage_fault(esf, 1, recoverable);
753 		} else if ((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) != 0) {
754 			reason = bus_fault(esf, 1, recoverable);
755 		} else if ((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) != 0) {
756 			reason = usage_fault(esf);
757 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
758 		} else if (SAU->SFSR != 0) {
759 			reason = secure_fault(esf);
760 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
761 		} else {
762 			__ASSERT(0,
763 			"Fault escalation without FSR info");
764 		}
765 	} else {
766 		__ASSERT(0,
767 		"HardFault without HFSR info"
768 		" Shall never occur");
769 	}
770 #else
771 #error Unknown ARM architecture
772 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
773 
774 	return reason;
775 }
776 
777 /**
778  *
779  * @brief Dump reserved exception information
780  *
781  * See z_arm_fault_dump() for example.
782  *
783  */
reserved_exception(const struct arch_esf * esf,int fault)784 static void reserved_exception(const struct arch_esf *esf, int fault)
785 {
786 	ARG_UNUSED(esf);
787 
788 	PR_FAULT_INFO("***** %s %d) *****",
789 	       fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
790 	       fault - 16);
791 }
792 
793 /* Handler function for ARM fault conditions. */
fault_handle(struct arch_esf * esf,int fault,bool * recoverable)794 static uint32_t fault_handle(struct arch_esf *esf, int fault, bool *recoverable)
795 {
796 	uint32_t reason = K_ERR_CPU_EXCEPTION;
797 
798 	*recoverable = false;
799 
800 	switch (fault) {
801 	case 3:
802 		reason = hard_fault(esf, recoverable);
803 		break;
804 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
805 	/* HardFault is raised for all fault conditions on ARMv6-M. */
806 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
807 	case 4:
808 		reason = mem_manage_fault(esf, 0, recoverable);
809 		break;
810 	case 5:
811 		reason = bus_fault(esf, 0, recoverable);
812 		break;
813 	case 6:
814 		reason = usage_fault(esf);
815 		break;
816 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
817 	case 7:
818 		reason = secure_fault(esf);
819 		break;
820 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
821 	case 12:
822 		debug_monitor(esf, recoverable);
823 		break;
824 #else
825 #error Unknown ARM architecture
826 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
827 	default:
828 		reserved_exception(esf, fault);
829 		break;
830 	}
831 
832 	if ((*recoverable) == false) {
833 		/* Dump generic information about the fault. */
834 		fault_show(esf, fault);
835 	}
836 
837 	return reason;
838 }
839 
840 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
841 #if (CONFIG_FAULT_DUMP == 2)
842 /**
843  * @brief Dump the Secure Stack information for an exception that
844  * has occurred in Non-Secure state.
845  *
846  * @param secure_esf Pointer to the secure stack frame.
847  */
secure_stack_dump(const struct arch_esf * secure_esf)848 static void secure_stack_dump(const struct arch_esf *secure_esf)
849 {
850 	/*
851 	 * In case a Non-Secure exception interrupted the Secure
852 	 * execution, the Secure state has stacked the additional
853 	 * state context and the top of the stack contains the
854 	 * integrity signature.
855 	 *
856 	 * In case of a Non-Secure function call the top of the
857 	 * stack contains the return address to Secure state.
858 	 */
859 	uint32_t *top_of_sec_stack = (uint32_t *)secure_esf;
860 	uint32_t sec_ret_addr;
861 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
862 	if ((*top_of_sec_stack == INTEGRITY_SIGNATURE_STD) ||
863 		(*top_of_sec_stack == INTEGRITY_SIGNATURE_EXT)) {
864 #else
865 	if (*top_of_sec_stack == INTEGRITY_SIGNATURE) {
866 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
867 		/* Secure state interrupted by a Non-Secure exception.
868 		 * The return address after the additional state
869 		 * context, stacked by the Secure code upon
870 		 * Non-Secure exception entry.
871 		 */
872 		top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
873 		secure_esf = (const struct arch_esf *)top_of_sec_stack;
874 		sec_ret_addr = secure_esf->basic.pc;
875 	} else {
876 		/* Exception during Non-Secure function call.
877 		 * The return address is located on top of stack.
878 		 */
879 		sec_ret_addr = *top_of_sec_stack;
880 	}
881 	PR_FAULT_INFO("  S instruction address:  0x%x", sec_ret_addr);
882 
883 }
884 #define SECURE_STACK_DUMP(esf) secure_stack_dump(esf)
885 #else
886 /* We do not dump the Secure stack information for lower dump levels. */
887 #define SECURE_STACK_DUMP(esf)
888 #endif /* CONFIG_FAULT_DUMP== 2 */
889 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
890 
891 /*
892  * This internal function does the following:
893  *
894  * - Retrieves the exception stack frame
895  * - Evaluates whether to report being in a nested exception
896  *
897  * If the ESF is not successfully retrieved, the function signals
898  * an error by returning NULL.
899  *
900  * @return ESF pointer on success, otherwise return NULL
901  */
902 static inline struct arch_esf *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
903 	bool *nested_exc)
904 {
905 	bool alternative_state_exc = false;
906 	struct arch_esf *ptr_esf = NULL;
907 
908 	*nested_exc = false;
909 
910 	if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) !=
911 			EXC_RETURN_INDICATOR_PREFIX) {
912 		/* Invalid EXC_RETURN value. This is a fatal error. */
913 		return NULL;
914 	}
915 
916 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
917 	if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
918 		/* Secure Firmware shall only handle Secure Exceptions.
919 		 * This is a fatal error.
920 		 */
921 		return NULL;
922 	}
923 
924 	if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
925 		/* Exception entry occurred in Secure stack. */
926 	} else {
927 		/* Exception entry occurred in Non-Secure stack. Therefore,
928 		 * msp/psp point to the Secure stack, however, the actual
929 		 * exception stack frame is located in the Non-Secure stack.
930 		 */
931 		alternative_state_exc = true;
932 
933 		/* Dump the Secure stack before handling the actual fault. */
934 		struct arch_esf *secure_esf;
935 
936 		if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
937 			/* Secure stack pointed by PSP */
938 			secure_esf = (struct arch_esf *)psp;
939 		} else {
940 			/* Secure stack pointed by MSP */
941 			secure_esf = (struct arch_esf *)msp;
942 			*nested_exc = true;
943 		}
944 
945 		SECURE_STACK_DUMP(secure_esf);
946 
947 		/* Handle the actual fault.
948 		 * Extract the correct stack frame from the Non-Secure state
949 		 * and supply it to the fault handing function.
950 		 */
951 		if (exc_return & EXC_RETURN_MODE_THREAD) {
952 			ptr_esf = (struct arch_esf *)__TZ_get_PSP_NS();
953 		} else {
954 			ptr_esf = (struct arch_esf *)__TZ_get_MSP_NS();
955 		}
956 	}
957 #elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
958 	if (exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) {
959 		/* Non-Secure Firmware shall only handle Non-Secure Exceptions.
960 		 * This is a fatal error.
961 		 */
962 		return NULL;
963 	}
964 
965 	if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
966 		/* Exception entry occurred in Secure stack.
967 		 *
968 		 * Note that Non-Secure firmware cannot inspect the Secure
969 		 * stack to determine the root cause of the fault. Fault
970 		 * inspection will indicate the Non-Secure instruction
971 		 * that performed the branch to the Secure domain.
972 		 */
973 		alternative_state_exc = true;
974 
975 		PR_FAULT_INFO("Exception occurred in Secure State");
976 
977 		if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
978 			/* Non-Secure stack frame on PSP */
979 			ptr_esf = (struct arch_esf *)psp;
980 		} else {
981 			/* Non-Secure stack frame on MSP */
982 			ptr_esf = (struct arch_esf *)msp;
983 		}
984 	} else {
985 		/* Exception entry occurred in Non-Secure stack. */
986 	}
987 #else
988 	/* The processor has a single execution state.
989 	 * We verify that the Thread mode is using PSP.
990 	 */
991 	if ((exc_return & EXC_RETURN_MODE_THREAD) &&
992 		(!(exc_return & EXC_RETURN_SPSEL_PROCESS))) {
993 		PR_EXC("SPSEL in thread mode does not indicate PSP");
994 		return NULL;
995 	}
996 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
997 
998 	if (!alternative_state_exc) {
999 		if (exc_return & EXC_RETURN_MODE_THREAD) {
1000 			/* Returning to thread mode */
1001 			ptr_esf =  (struct arch_esf *)psp;
1002 
1003 		} else {
1004 			/* Returning to handler mode */
1005 			ptr_esf = (struct arch_esf *)msp;
1006 			*nested_exc = true;
1007 		}
1008 	}
1009 
1010 	return ptr_esf;
1011 }
1012 
1013 /**
1014  *
1015  * @brief ARM Fault handler
1016  *
1017  * This routine is called when fatal error conditions are detected by hardware
1018  * and is responsible for:
1019  * - resetting the processor fault status registers (for the case when the
1020  *   error handling policy allows the system to recover from the error),
1021  * - reporting the error information,
1022  * - determining the error reason to be provided as input to the user-
1023  *   provided routine, k_sys_fatal_error_handler().
1024  * The k_sys_fatal_error_handler() is invoked once the above operations are
1025  * completed, and is responsible for implementing the error handling policy.
1026  *
1027  * The function needs, first, to determine the exception stack frame.
1028  * Note that the current security state might not be the actual
1029  * state in which the processor was executing, when the exception occurred.
1030  * The actual state may need to be determined by inspecting the EXC_RETURN
1031  * value, which is provided as argument to the Fault handler.
1032  *
1033  * If the exception occurred in the same security state, the stack frame
1034  * will be pointed to by either MSP or PSP depending on the processor
1035  * execution state when the exception occurred. MSP and PSP values are
1036  * provided as arguments to the Fault handler.
1037  *
1038  * @param msp MSP value immediately after the exception occurred
1039  * @param psp PSP value immediately after the exception occurred
1040  * @param exc_return EXC_RETURN value present in LR after exception entry.
1041  * @param callee_regs Callee-saved registers (R4-R11, PSP)
1042  *
1043  */
1044 void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
1045 	_callee_saved_t *callee_regs)
1046 {
1047 	uint32_t reason = K_ERR_CPU_EXCEPTION;
1048 	int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
1049 	bool recoverable, nested_exc;
1050 	struct arch_esf *esf;
1051 
1052 	/* Create a stack-ed copy of the ESF to be used during
1053 	 * the fault handling process.
1054 	 */
1055 	struct arch_esf esf_copy;
1056 
1057 	/* Force unlock interrupts */
1058 	arch_irq_unlock(0);
1059 
1060 	/* Retrieve the Exception Stack Frame (ESF) to be supplied
1061 	 * as argument to the remainder of the fault handling process.
1062 	 */
1063 	 esf = get_esf(msp, psp, exc_return, &nested_exc);
1064 	__ASSERT(esf != NULL,
1065 		"ESF could not be retrieved successfully. Shall never occur.");
1066 
1067 	z_arm_set_fault_sp(esf, exc_return);
1068 
1069 	reason = fault_handle(esf, fault, &recoverable);
1070 	if (recoverable) {
1071 		return;
1072 	}
1073 
1074 	/* Copy ESF */
1075 #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
1076 	memcpy(&esf_copy, esf, sizeof(struct arch_esf));
1077 	ARG_UNUSED(callee_regs);
1078 #else
1079 	/* the extra exception info is not present in the original esf
1080 	 * so we only copy the fields before those.
1081 	 */
1082 	memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
1083 	esf_copy.extra_info = (struct __extra_esf_info) {
1084 		.callee = callee_regs,
1085 		.exc_return = exc_return,
1086 		.msp = msp
1087 	};
1088 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
1089 
1090 	/* Overwrite stacked IPSR to mark a nested exception,
1091 	 * or a return to Thread mode. Note that this may be
1092 	 * required, if the retrieved ESF contents are invalid
1093 	 * due to, for instance, a stacking error.
1094 	 */
1095 	if (nested_exc) {
1096 		if ((esf_copy.basic.xpsr & IPSR_ISR_Msk) == 0) {
1097 			esf_copy.basic.xpsr |= IPSR_ISR_Msk;
1098 		}
1099 	} else {
1100 		esf_copy.basic.xpsr &= ~(IPSR_ISR_Msk);
1101 	}
1102 
1103 	if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
1104 		reason = K_ERR_CPU_EXCEPTION;
1105 	}
1106 
1107 	z_arm_fatal_error(reason, &esf_copy);
1108 }
1109 
1110 /**
1111  *
1112  * @brief Initialization of fault handling
1113  *
1114  * Turns on the desired hardware faults.
1115  *
1116  */
1117 void z_arm_fault_init(void)
1118 {
1119 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
1120 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
1121 	SCB->CCR |= SCB_CCR_DIV_0_TRP_Msk;
1122 #else
1123 #error Unknown ARM architecture
1124 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
1125 #if defined(CONFIG_BUILTIN_STACK_GUARD)
1126 	/* If Stack guarding via SP limit checking is enabled, disable
1127 	 * SP limit checking inside HardFault and NMI. This is done
1128 	 * in order to allow for the desired fault logging to execute
1129 	 * properly in all cases.
1130 	 *
1131 	 * Note that this could allow a Secure Firmware Main Stack
1132 	 * to descend into non-secure region during HardFault and
1133 	 * NMI exception entry. To prevent from this, non-secure
1134 	 * memory regions must be located higher than secure memory
1135 	 * regions.
1136 	 *
1137 	 * For Non-Secure Firmware this could allow the Non-Secure Main
1138 	 * Stack to attempt to descend into secure region, in which case a
1139 	 * Secure Hard Fault will occur and we can track the fault from there.
1140 	 */
1141 	SCB->CCR |= SCB_CCR_STKOFHFNMIGN_Msk;
1142 #endif /* CONFIG_BUILTIN_STACK_GUARD */
1143 #ifdef CONFIG_TRAP_UNALIGNED_ACCESS
1144 	SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
1145 #else
1146 	SCB->CCR &= ~SCB_CCR_UNALIGN_TRP_Msk;
1147 #endif /* CONFIG_TRAP_UNALIGNED_ACCESS */
1148 }
1149