1 /*
2  * Copyright (c) 2014 Wind River Systems, Inc.
3  * Copyright (c) 2020 Nordic Semiconductor ASA.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file
10  * @brief Common fault handler for ARM Cortex-M
11  *
12  * Common fault handler for ARM Cortex-M processors.
13  */
14 
15 #include <zephyr/kernel.h>
16 #include <kernel_internal.h>
17 #include <inttypes.h>
18 #include <zephyr/arch/exception.h>
19 #include <zephyr/arch/common/exc_handle.h>
20 #include <zephyr/linker/linker-defs.h>
21 #include <zephyr/logging/log.h>
22 #include <zephyr/sys/barrier.h>
23 #include <cortex_m/debug.h>
24 
25 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
26 
27 #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
28 #define PR_EXC(...)              EXCEPTION_DUMP(__VA_ARGS__)
29 #define STORE_xFAR(reg_var, reg) uint32_t reg_var = (uint32_t)reg
30 #else
31 #define PR_EXC(...)
32 #define STORE_xFAR(reg_var, reg)
33 #endif /* CONFIG_PRINTK || CONFIG_LOG */
34 
35 #if (CONFIG_FAULT_DUMP == 2)
36 #define PR_FAULT_INFO(...) PR_EXC(__VA_ARGS__)
37 #else
38 #define PR_FAULT_INFO(...)
39 #endif
40 
41 #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU)
42 #define EMN(edr)  (((edr) & SYSMPU_EDR_EMN_MASK) >> SYSMPU_EDR_EMN_SHIFT)
43 #define EACD(edr) (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
44 #endif
45 
46 /* Integrity signature for an ARMv8-M implementation */
47 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
48 #define INTEGRITY_SIGNATURE_STD 0xFEFA125BUL
49 #define INTEGRITY_SIGNATURE_EXT 0xFEFA125AUL
50 #else
51 #define INTEGRITY_SIGNATURE 0xFEFA125BUL
52 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
53 /* Size (in words) of the additional state context that is pushed
54  * to the Secure stack during a Non-Secure exception entry.
55  */
56 #define ADDITIONAL_STATE_CONTEXT_WORDS 10
57 
58 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
59 /* helpers to access memory/bus/usage faults */
60 #define SCB_CFSR_MEMFAULTSR                                                                        \
61 	(uint32_t)((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos)
62 #define SCB_CFSR_BUSFAULTSR                                                                        \
63 	(uint32_t)((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) >> SCB_CFSR_BUSFAULTSR_Pos)
64 #define SCB_CFSR_USGFAULTSR                                                                        \
65 	(uint32_t)((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) >> SCB_CFSR_USGFAULTSR_Pos)
66 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
67 
68 /**
69  *
70  * Dump information regarding fault (FAULT_DUMP == 1)
71  *
72  * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
73  * (short form).
74  *
75  * eg. (precise bus error escalated to hard fault):
76  *
77  * Fault! EXC #3
78  * HARD FAULT: Escalation (see below)!
79  * MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
80  * BFAR: 0xff001234
81  *
82  *
83  *
84  * Dump information regarding fault (FAULT_DUMP == 2)
85  *
86  * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
87  * (long form), and return the error code for the kernel to identify the fatal
88  * error reason.
89  *
90  * eg. (precise bus error escalated to hard fault):
91  *
92  * ***** HARD FAULT *****
93  *    Fault escalation (see below)
94  * ***** BUS FAULT *****
95  *   Precise data bus error
96  *   Address: 0xff001234
97  *
98  */
99 
100 #if (CONFIG_FAULT_DUMP == 1)
fault_show(const struct arch_esf * esf,int fault)101 static void fault_show(const struct arch_esf *esf, int fault)
102 {
103 	PR_EXC("Fault! EXC #%d", fault);
104 
105 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
106 	PR_EXC("MMFSR: 0x%x, BFSR: 0x%x, UFSR: 0x%x", SCB_CFSR_MEMFAULTSR, SCB_CFSR_BUSFAULTSR,
107 	       SCB_CFSR_USGFAULTSR);
108 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
109 	PR_EXC("SFSR: 0x%x", SAU->SFSR);
110 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
111 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
112 }
113 #else
114 /* For Dump level 2, detailed information is generated by the
115  * fault handling functions for individual fault conditions, so this
116  * function is left empty.
117  *
118  * For Dump level 0, no information needs to be generated.
119  */
fault_show(const struct arch_esf * esf,int fault)120 static void fault_show(const struct arch_esf *esf, int fault)
121 {
122 	(void)esf;
123 	(void)fault;
124 }
125 #endif /* FAULT_DUMP == 1 */
126 
127 #ifdef CONFIG_USERSPACE
128 Z_EXC_DECLARE(z_arm_user_string_nlen);
129 
130 static const struct z_exc_handle exceptions[] = {Z_EXC_HANDLE(z_arm_user_string_nlen)};
131 #endif
132 
133 /* Perform an assessment whether an MPU fault shall be
134  * treated as recoverable.
135  *
136  * @return true if error is recoverable, otherwise return false.
137  */
memory_fault_recoverable(struct arch_esf * esf,bool synchronous)138 static bool memory_fault_recoverable(struct arch_esf *esf, bool synchronous)
139 {
140 #ifdef CONFIG_USERSPACE
141 	for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
142 		/* Mask out instruction mode */
143 		uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
144 		uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
145 
146 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
147 		/* Non-synchronous exceptions (e.g. DebugMonitor) may have
148 		 * allowed PC to continue to the next instruction.
149 		 */
150 		end += (synchronous) ? 0x0 : 0x4;
151 #else
152 		ARG_UNUSED(synchronous);
153 #endif
154 		if (esf->basic.pc >= start && esf->basic.pc < end) {
155 			esf->basic.pc = (uint32_t)(exceptions[i].fixup);
156 			return true;
157 		}
158 	}
159 #endif
160 
161 	return false;
162 }
163 
164 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
165 /* HardFault is used for all fault conditions on ARMv6-M. */
166 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
167 
168 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
169 uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp);
170 #endif /* CONFIG_MPU_STACK_GUARD || defined(CONFIG_USERSPACE) */
171 
172 /**
173  *
174  * @brief Dump MemManage fault information
175  *
176  * See z_arm_fault_dump() for example.
177  *
178  * @return error code to identify the fatal error reason
179  */
mem_manage_fault(struct arch_esf * esf,int from_hard_fault,bool * recoverable)180 static uint32_t mem_manage_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
181 {
182 	uint32_t reason = K_ERR_ARM_MEM_GENERIC;
183 	uint32_t mmfar = -EINVAL;
184 
185 	PR_FAULT_INFO("***** MPU FAULT *****");
186 
187 	if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) != 0) {
188 		reason = K_ERR_ARM_MEM_STACKING;
189 		PR_FAULT_INFO("  Stacking error (context area might be"
190 			      " not valid)");
191 	}
192 	if ((SCB->CFSR & SCB_CFSR_MUNSTKERR_Msk) != 0) {
193 		reason = K_ERR_ARM_MEM_UNSTACKING;
194 		PR_FAULT_INFO("  Unstacking error");
195 	}
196 	if ((SCB->CFSR & SCB_CFSR_DACCVIOL_Msk) != 0) {
197 		reason = K_ERR_ARM_MEM_DATA_ACCESS;
198 		PR_FAULT_INFO("  Data Access Violation");
199 		/* In a fault handler, to determine the true faulting address:
200 		 * 1. Read and save the MMFAR value.
201 		 * 2. Read the MMARVALID bit in the MMFSR.
202 		 * The MMFAR address is valid only if this bit is 1.
203 		 *
204 		 * Software must follow this sequence because another higher
205 		 * priority exception might change the MMFAR value.
206 		 */
207 		uint32_t temp = SCB->MMFAR;
208 
209 		if ((SCB->CFSR & SCB_CFSR_MMARVALID_Msk) != 0) {
210 			mmfar = temp;
211 			PR_EXC("  MMFAR Address: 0x%x", mmfar);
212 			if (from_hard_fault != 0) {
213 				/* clear SCB_MMAR[VALID] to reset */
214 				SCB->CFSR &= ~SCB_CFSR_MMARVALID_Msk;
215 			}
216 		}
217 	}
218 	if ((SCB->CFSR & SCB_CFSR_IACCVIOL_Msk) != 0) {
219 		reason = K_ERR_ARM_MEM_INSTRUCTION_ACCESS;
220 		PR_FAULT_INFO("  Instruction Access Violation");
221 	}
222 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
223 	if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
224 		reason = K_ERR_ARM_MEM_FP_LAZY_STATE_PRESERVATION;
225 		PR_FAULT_INFO("  Floating-point lazy state preservation error");
226 	}
227 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
228 
229 	/* When stack protection is enabled, we need to assess
230 	 * if the memory violation error is a stack corruption.
231 	 *
232 	 * By design, being a Stacking MemManage fault is a necessary
233 	 * and sufficient condition for a thread stack corruption.
234 	 * [Cortex-M process stack pointer is always descending and
235 	 * is never modified by code (except for the context-switch
236 	 * routine), therefore, a stacking error implies the PSP has
237 	 * crossed into an area beyond the thread stack.]
238 	 *
239 	 * Data Access Violation errors may or may not be caused by
240 	 * thread stack overflows.
241 	 */
242 	if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) || (SCB->CFSR & SCB_CFSR_DACCVIOL_Msk)) {
243 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
244 		/* MemManage Faults are always banked between security
245 		 * states. Therefore, we can safely assume the fault
246 		 * originated from the same security state.
247 		 *
248 		 * As we only assess thread stack corruption, we only
249 		 * process the error further if the stack frame is on
250 		 * PSP. For always-banked MemManage Fault, this is
251 		 * equivalent to inspecting the RETTOBASE flag.
252 		 *
253 		 * Note:
254 		 * It is possible that MMFAR address is not written by the
255 		 * Cortex-M core; this occurs when the stacking error is
256 		 * not accompanied by a data access violation error (i.e.
257 		 * when stack overflows due to the exception entry frame
258 		 * stacking): z_check_thread_stack_fail() shall be able to
259 		 * handle the case of 'mmfar' holding the -EINVAL value.
260 		 */
261 		if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
262 			uint32_t min_stack_ptr =
263 				z_check_thread_stack_fail(mmfar, ((uint32_t)&esf[0]));
264 
265 			if (min_stack_ptr) {
266 				/* When MemManage Stacking Error has occurred,
267 				 * the stack context frame might be corrupted
268 				 * but the stack pointer may have actually
269 				 * descent below the allowed (thread) stack
270 				 * area. We may face a problem with un-stacking
271 				 * the frame, upon the exception return, if we
272 				 * do not have sufficient access permissions to
273 				 * read the corrupted stack frame. Therefore,
274 				 * we manually force the stack pointer to the
275 				 * lowest allowed position, inside the thread's
276 				 * stack.
277 				 *
278 				 * Note:
279 				 * The PSP will normally be adjusted in a tail-
280 				 * chained exception performing context switch,
281 				 * after aborting the corrupted thread. The
282 				 * adjustment, here, is required as tail-chain
283 				 * cannot always be guaranteed.
284 				 *
285 				 * The manual adjustment of PSP is safe, as we
286 				 * will not be re-scheduling this thread again
287 				 * for execution; thread stack corruption is a
288 				 * fatal error and a thread that corrupted its
289 				 * stack needs to be aborted.
290 				 */
291 				__set_PSP(min_stack_ptr);
292 
293 				reason = K_ERR_STACK_CHK_FAIL;
294 			} else {
295 				__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
296 					 "Stacking error not a stack fail\n");
297 			}
298 		}
299 #else
300 		(void)mmfar;
301 		__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
302 			 "Stacking or Data Access Violation error "
303 			 "without stack guard, user-mode or null-pointer detection\n");
304 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
305 	}
306 
307 	/* When we were handling this fault, we may have triggered a fp
308 	 * lazy stacking Memory Manage fault. At the time of writing, this
309 	 * can happen when printing.  If that's true, we should clear the
310 	 * pending flag in addition to the clearing the reason for the fault
311 	 */
312 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
313 	if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
314 		SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTPENDED_Msk;
315 	}
316 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
317 
318 	/* clear MMFSR sticky bits */
319 	SCB->CFSR |= SCB_CFSR_MEMFAULTSR_Msk;
320 
321 	/* Assess whether system shall ignore/recover from this MPU fault. */
322 	*recoverable = memory_fault_recoverable(esf, true);
323 
324 	return reason;
325 }
326 
327 /**
328  *
329  * @brief Dump BusFault information
330  *
331  * See z_arm_fault_dump() for example.
332  *
333  * @return error code to identify the fatal error reason.
334  *
335  */
bus_fault(struct arch_esf * esf,int from_hard_fault,bool * recoverable)336 static int bus_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
337 {
338 	uint32_t reason = K_ERR_ARM_BUS_GENERIC;
339 
340 	PR_FAULT_INFO("***** BUS FAULT *****");
341 
342 	if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
343 		reason = K_ERR_ARM_BUS_STACKING;
344 		PR_FAULT_INFO("  Stacking error");
345 	}
346 	if (SCB->CFSR & SCB_CFSR_UNSTKERR_Msk) {
347 		reason = K_ERR_ARM_BUS_UNSTACKING;
348 		PR_FAULT_INFO("  Unstacking error");
349 	}
350 	if (SCB->CFSR & SCB_CFSR_PRECISERR_Msk) {
351 		reason = K_ERR_ARM_BUS_PRECISE_DATA_BUS;
352 		PR_FAULT_INFO("  Precise data bus error");
353 		/* In a fault handler, to determine the true faulting address:
354 		 * 1. Read and save the BFAR value.
355 		 * 2. Read the BFARVALID bit in the BFSR.
356 		 * The BFAR address is valid only if this bit is 1.
357 		 *
358 		 * Software must follow this sequence because another
359 		 * higher priority exception might change the BFAR value.
360 		 */
361 		STORE_xFAR(bfar, SCB->BFAR);
362 
363 		if ((SCB->CFSR & SCB_CFSR_BFARVALID_Msk) != 0) {
364 			PR_EXC("  BFAR Address: 0x%x", bfar);
365 			if (from_hard_fault != 0) {
366 				/* clear SCB_CFSR_BFAR[VALID] to reset */
367 				SCB->CFSR &= ~SCB_CFSR_BFARVALID_Msk;
368 			}
369 		}
370 	}
371 	if (SCB->CFSR & SCB_CFSR_IMPRECISERR_Msk) {
372 		reason = K_ERR_ARM_BUS_IMPRECISE_DATA_BUS;
373 		PR_FAULT_INFO("  Imprecise data bus error");
374 	}
375 	if ((SCB->CFSR & SCB_CFSR_IBUSERR_Msk) != 0) {
376 		reason = K_ERR_ARM_BUS_INSTRUCTION_BUS;
377 		PR_FAULT_INFO("  Instruction bus error");
378 #if !defined(CONFIG_ARMV7_M_ARMV8_M_FP)
379 	}
380 #else
381 	} else if (SCB->CFSR & SCB_CFSR_LSPERR_Msk) {
382 		reason = K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION;
383 		PR_FAULT_INFO("  Floating-point lazy state preservation error");
384 	} else {
385 		;
386 	}
387 #endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */
388 
389 #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU)
390 	uint32_t sperr = SYSMPU->CESR & SYSMPU_CESR_SPERR_MASK;
391 	uint32_t mask = BIT(31);
392 	int i;
393 	uint32_t ear = -EINVAL;
394 
395 	if (sperr) {
396 		for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) {
397 			if ((sperr & mask) == 0U) {
398 				continue;
399 			}
400 			STORE_xFAR(edr, SYSMPU->SP[i].EDR);
401 			ear = SYSMPU->SP[i].EAR;
402 
403 			PR_FAULT_INFO("  NXP MPU error, port %d", i);
404 			PR_FAULT_INFO("    Mode: %s, %s Address: 0x%x",
405 				      edr & BIT(2) ? "Supervisor" : "User",
406 				      edr & BIT(1) ? "Data" : "Instruction", ear);
407 			PR_FAULT_INFO("    Type: %s, Master: %d, Regions: 0x%x",
408 				      edr & BIT(0) ? "Write" : "Read", EMN(edr), EACD(edr));
409 
410 			/* When stack protection is enabled, we need to assess
411 			 * if the memory violation error is a stack corruption.
412 			 *
413 			 * By design, being a Stacking Bus fault is a necessary
414 			 * and sufficient condition for a stack corruption.
415 			 */
416 			if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
417 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
418 				/* Note: we can assume the fault originated
419 				 * from the same security state for ARM
420 				 * platforms implementing the NXP MPU
421 				 * (CONFIG_CPU_HAS_NXP_SYSMPU=y).
422 				 *
423 				 * As we only assess thread stack corruption,
424 				 * we only process the error further, if the
425 				 * stack frame is on PSP. For NXP MPU-related
426 				 * Bus Faults (banked), this is equivalent to
427 				 * inspecting the RETTOBASE flag.
428 				 */
429 				if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
430 					uint32_t min_stack_ptr =
431 						z_check_thread_stack_fail(ear, ((uint32_t)&esf[0]));
432 
433 					if (min_stack_ptr) {
434 						/* When BusFault Stacking Error
435 						 * has occurred, the stack
436 						 * context frame might be
437 						 * corrupted but the stack
438 						 * pointer may have actually
439 						 * moved. We may face problems
440 						 * with un-stacking the frame,
441 						 * upon exception return, if we
442 						 * do not have sufficient
443 						 * permissions to read the
444 						 * corrupted stack frame.
445 						 * Therefore, we manually force
446 						 * the stack pointer to the
447 						 * lowest allowed position.
448 						 *
449 						 * Note:
450 						 * The PSP will normally be
451 						 * adjusted in a tail-chained
452 						 * exception performing context
453 						 * switch, after aborting the
454 						 * corrupted thread. Here, the
455 						 * adjustment is required as
456 						 * tail-chain cannot always be
457 						 * guaranteed.
458 						 */
459 						__set_PSP(min_stack_ptr);
460 
461 						reason = K_ERR_STACK_CHK_FAIL;
462 						break;
463 					}
464 				}
465 #else
466 				(void)ear;
467 				__ASSERT(0,
468 					 "Stacking error without stack guard or User-mode support");
469 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
470 			}
471 		}
472 		SYSMPU->CESR &= ~sperr;
473 	}
474 #endif /* defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU) */
475 
476 	/* clear BFSR sticky bits */
477 	SCB->CFSR |= SCB_CFSR_BUSFAULTSR_Msk;
478 
479 	*recoverable = memory_fault_recoverable(esf, true);
480 
481 	return reason;
482 }
483 
484 /**
485  *
486  * @brief Dump UsageFault information
487  *
488  * See z_arm_fault_dump() for example.
489  *
490  * @return error code to identify the fatal error reason
491  */
usage_fault(const struct arch_esf * esf)492 static uint32_t usage_fault(const struct arch_esf *esf)
493 {
494 	uint32_t reason = K_ERR_ARM_USAGE_GENERIC;
495 
496 	PR_FAULT_INFO("***** USAGE FAULT *****");
497 
498 	/* bits are sticky: they stack and must be reset */
499 	if ((SCB->CFSR & SCB_CFSR_DIVBYZERO_Msk) != 0) {
500 		reason = K_ERR_ARM_USAGE_DIV_0;
501 		PR_FAULT_INFO("  Division by zero");
502 	}
503 	if ((SCB->CFSR & SCB_CFSR_UNALIGNED_Msk) != 0) {
504 		reason = K_ERR_ARM_USAGE_UNALIGNED_ACCESS;
505 		PR_FAULT_INFO("  Unaligned memory access");
506 	}
507 #if defined(CONFIG_ARMV8_M_MAINLINE)
508 	if ((SCB->CFSR & SCB_CFSR_STKOF_Msk) != 0) {
509 		reason = K_ERR_ARM_USAGE_STACK_OVERFLOW;
510 		PR_FAULT_INFO("  Stack overflow (context area not valid)");
511 #if defined(CONFIG_BUILTIN_STACK_GUARD)
512 		/* Stack Overflows are always reported as stack corruption
513 		 * errors. Note that the built-in stack overflow mechanism
514 		 * prevents the context area to be loaded on the stack upon
515 		 * UsageFault exception entry. As a result, we cannot rely
516 		 * on the reported faulty instruction address, to determine
517 		 * the instruction that triggered the stack overflow.
518 		 */
519 		reason = K_ERR_STACK_CHK_FAIL;
520 #endif /* CONFIG_BUILTIN_STACK_GUARD */
521 	}
522 #endif /* CONFIG_ARMV8_M_MAINLINE */
523 	if ((SCB->CFSR & SCB_CFSR_NOCP_Msk) != 0) {
524 		reason = K_ERR_ARM_USAGE_NO_COPROCESSOR;
525 		PR_FAULT_INFO("  No coprocessor instructions");
526 	}
527 	if ((SCB->CFSR & SCB_CFSR_INVPC_Msk) != 0) {
528 		reason = K_ERR_ARM_USAGE_ILLEGAL_EXC_RETURN;
529 		PR_FAULT_INFO("  Illegal load of EXC_RETURN into PC");
530 	}
531 	if ((SCB->CFSR & SCB_CFSR_INVSTATE_Msk) != 0) {
532 		reason = K_ERR_ARM_USAGE_ILLEGAL_EPSR;
533 		PR_FAULT_INFO("  Illegal use of the EPSR");
534 	}
535 	if ((SCB->CFSR & SCB_CFSR_UNDEFINSTR_Msk) != 0) {
536 		reason = K_ERR_ARM_USAGE_UNDEFINED_INSTRUCTION;
537 		PR_FAULT_INFO("  Attempt to execute undefined instruction");
538 	}
539 
540 	/* clear UFSR sticky bits */
541 	SCB->CFSR |= SCB_CFSR_USGFAULTSR_Msk;
542 
543 	return reason;
544 }
545 
546 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
547 /**
548  *
549  * @brief Dump SecureFault information
550  *
551  * See z_arm_fault_dump() for example.
552  *
553  * @return error code to identify the fatal error reason
554  */
secure_fault(const struct arch_esf * esf)555 static uint32_t secure_fault(const struct arch_esf *esf)
556 {
557 	uint32_t reason = K_ERR_ARM_SECURE_GENERIC;
558 
559 	PR_FAULT_INFO("***** SECURE FAULT *****");
560 
561 	STORE_xFAR(sfar, SAU->SFAR);
562 	if ((SAU->SFSR & SAU_SFSR_SFARVALID_Msk) != 0) {
563 		PR_EXC("  Address: 0x%x", sfar);
564 	}
565 
566 	/* bits are sticky: they stack and must be reset */
567 	if ((SAU->SFSR & SAU_SFSR_INVEP_Msk) != 0) {
568 		reason = K_ERR_ARM_SECURE_ENTRY_POINT;
569 		PR_FAULT_INFO("  Invalid entry point");
570 	} else if ((SAU->SFSR & SAU_SFSR_INVIS_Msk) != 0) {
571 		reason = K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE;
572 		PR_FAULT_INFO("  Invalid integrity signature");
573 	} else if ((SAU->SFSR & SAU_SFSR_INVER_Msk) != 0) {
574 		reason = K_ERR_ARM_SECURE_EXCEPTION_RETURN;
575 		PR_FAULT_INFO("  Invalid exception return");
576 	} else if ((SAU->SFSR & SAU_SFSR_AUVIOL_Msk) != 0) {
577 		reason = K_ERR_ARM_SECURE_ATTRIBUTION_UNIT;
578 		PR_FAULT_INFO("  Attribution unit violation");
579 	} else if ((SAU->SFSR & SAU_SFSR_INVTRAN_Msk) != 0) {
580 		reason = K_ERR_ARM_SECURE_TRANSITION;
581 		PR_FAULT_INFO("  Invalid transition");
582 	} else if ((SAU->SFSR & SAU_SFSR_LSPERR_Msk) != 0) {
583 		reason = K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION;
584 		PR_FAULT_INFO("  Lazy state preservation");
585 	} else if ((SAU->SFSR & SAU_SFSR_LSERR_Msk) != 0) {
586 		reason = K_ERR_ARM_SECURE_LAZY_STATE_ERROR;
587 		PR_FAULT_INFO("  Lazy state error");
588 	}
589 
590 	/* clear SFSR sticky bits */
591 	SAU->SFSR |= 0xFF;
592 
593 	return reason;
594 }
595 #endif /* defined(CONFIG_ARM_SECURE_FIRMWARE) */
596 
597 /**
598  *
599  * @brief Dump debug monitor exception information
600  *
601  * See z_arm_fault_dump() for example.
602  *
603  */
debug_monitor(struct arch_esf * esf,bool * recoverable)604 static void debug_monitor(struct arch_esf *esf, bool *recoverable)
605 {
606 	*recoverable = false;
607 
608 	PR_FAULT_INFO("***** Debug monitor exception *****");
609 
610 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
611 	if (!z_arm_debug_monitor_event_error_check()) {
612 		/* By default, all debug monitor exceptions that are not
613 		 * treated as errors by z_arm_debug_event_error_check(),
614 		 * they are considered as recoverable errors.
615 		 */
616 		*recoverable = true;
617 	} else {
618 
619 		*recoverable = memory_fault_recoverable(esf, false);
620 	}
621 
622 #endif
623 }
624 
625 #else
626 #error Unknown ARM architecture
627 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
628 
z_arm_is_synchronous_svc(struct arch_esf * esf)629 static inline bool z_arm_is_synchronous_svc(struct arch_esf *esf)
630 {
631 	uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
632 	/* SVC is a 16-bit instruction. On a synchronous SVC
633 	 * escalated to Hard Fault, the return address is the
634 	 * next instruction, i.e. after the SVC.
635 	 */
636 #define _SVC_OPCODE 0xDF00
637 
638 	/* We are about to de-reference the program counter at the
639 	 * time of fault to determine if it was a SVC
640 	 * instruction. However, we don't know if the pc itself is
641 	 * valid -- we could have faulted due to trying to execute a
642 	 * corrupted function pointer.
643 	 *
644 	 * We will temporarily ignore BusFault's so a bad program
645 	 * counter does not trigger ARM lockup condition.
646 	 */
647 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && !defined(CONFIG_ARMV8_M_BASELINE)
648 	/* Note: ARMv6-M does not support CCR.BFHFNMIGN so this access
649 	 * could generate a fault if the pc was invalid.
650 	 */
651 	uint16_t fault_insn = *(ret_addr - 1);
652 #else
653 	SCB->CCR |= SCB_CCR_BFHFNMIGN_Msk;
654 	barrier_dsync_fence_full();
655 	barrier_isync_fence_full();
656 
657 	uint16_t fault_insn = *(ret_addr - 1);
658 
659 	SCB->CCR &= ~SCB_CCR_BFHFNMIGN_Msk;
660 	barrier_dsync_fence_full();
661 	barrier_isync_fence_full();
662 #endif /* ARMV6_M_ARMV8_M_BASELINE && !ARMV8_M_BASELINE */
663 
664 	if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
665 	    ((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
666 		return true;
667 	}
668 #undef _SVC_OPCODE
669 	return false;
670 }
671 
672 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
z_arm_is_pc_valid(uintptr_t pc)673 static inline bool z_arm_is_pc_valid(uintptr_t pc)
674 {
675 	/* Is it in valid text region */
676 	if ((((uintptr_t)&__text_region_start) <= pc) && (pc < ((uintptr_t)&__text_region_end))) {
677 		return true;
678 	}
679 
680 	/* Is it in valid ramfunc range */
681 	if ((((uintptr_t)&__ramfunc_start) <= pc) && (pc < ((uintptr_t)&__ramfunc_end))) {
682 		return true;
683 	}
684 
685 #if DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_itcm))
686 	/* Is it in the ITCM */
687 	if ((((uintptr_t)&__itcm_start) <= pc) && (pc < ((uintptr_t)&__itcm_end))) {
688 		return true;
689 	}
690 #endif
691 
692 	return false;
693 }
694 #endif
695 
696 /**
697  *
698  * @brief Dump hard fault information
699  *
700  * See z_arm_fault_dump() for example.
701  *
702  * @return error code to identify the fatal error reason
703  */
hard_fault(struct arch_esf * esf,bool * recoverable)704 static uint32_t hard_fault(struct arch_esf *esf, bool *recoverable)
705 {
706 	uint32_t reason = K_ERR_CPU_EXCEPTION;
707 
708 	PR_FAULT_INFO("***** HARD FAULT *****");
709 
710 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
711 	/* Workaround for #18712:
712 	 * HardFault may be due to escalation, as a result of
713 	 * an SVC instruction that could not be executed; this
714 	 * can occur if ARCH_EXCEPT() is called by an ISR,
715 	 * which executes at priority equal to the SVC handler
716 	 * priority. We handle the case of Kernel OOPS and Stack
717 	 * Fail here.
718 	 */
719 
720 	if (z_arm_is_pc_valid((uintptr_t)esf->basic.pc) && z_arm_is_synchronous_svc(esf)) {
721 		PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
722 		reason = esf->basic.r0;
723 	}
724 
725 	*recoverable = memory_fault_recoverable(esf, true);
726 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
727 	*recoverable = false;
728 
729 	if ((SCB->HFSR & SCB_HFSR_VECTTBL_Msk) != 0) {
730 		PR_EXC("  Bus fault on vector table read");
731 	} else if ((SCB->HFSR & SCB_HFSR_DEBUGEVT_Msk) != 0) {
732 		PR_EXC("  Debug event");
733 	} else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) {
734 		PR_EXC("  Fault escalation (see below)");
735 		if (z_arm_is_synchronous_svc(esf)) {
736 			PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
737 			reason = esf->basic.r0;
738 		} else if ((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) != 0) {
739 			reason = mem_manage_fault(esf, 1, recoverable);
740 		} else if ((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) != 0) {
741 			reason = bus_fault(esf, 1, recoverable);
742 		} else if ((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) != 0) {
743 			reason = usage_fault(esf);
744 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
745 		} else if (SAU->SFSR != 0) {
746 			reason = secure_fault(esf);
747 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
748 		} else {
749 			__ASSERT(0, "Fault escalation without FSR info");
750 		}
751 	} else {
752 		__ASSERT(0, "HardFault without HFSR info"
753 			    " Shall never occur");
754 	}
755 #else
756 #error Unknown ARM architecture
757 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
758 
759 	return reason;
760 }
761 
762 /**
763  *
764  * @brief Dump reserved exception information
765  *
766  * See z_arm_fault_dump() for example.
767  *
768  */
reserved_exception(const struct arch_esf * esf,int fault)769 static void reserved_exception(const struct arch_esf *esf, int fault)
770 {
771 	ARG_UNUSED(esf);
772 
773 	PR_FAULT_INFO("***** %s %d) *****",
774 		      fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ", fault - 16);
775 }
776 
777 /* Handler function for ARM fault conditions. */
fault_handle(struct arch_esf * esf,int fault,bool * recoverable)778 static uint32_t fault_handle(struct arch_esf *esf, int fault, bool *recoverable)
779 {
780 	uint32_t reason = K_ERR_CPU_EXCEPTION;
781 
782 	*recoverable = false;
783 
784 	switch (fault) {
785 	case 3:
786 		reason = hard_fault(esf, recoverable);
787 		break;
788 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
789 		/* HardFault is raised for all fault conditions on ARMv6-M. */
790 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
791 	case 4:
792 		reason = mem_manage_fault(esf, 0, recoverable);
793 		break;
794 	case 5:
795 		reason = bus_fault(esf, 0, recoverable);
796 		break;
797 	case 6:
798 		reason = usage_fault(esf);
799 		break;
800 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
801 	case 7:
802 		reason = secure_fault(esf);
803 		break;
804 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
805 	case 12:
806 		debug_monitor(esf, recoverable);
807 		break;
808 #else
809 #error Unknown ARM architecture
810 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
811 	default:
812 		reserved_exception(esf, fault);
813 		break;
814 	}
815 
816 	if ((*recoverable) == false) {
817 		/* Dump generic information about the fault. */
818 		fault_show(esf, fault);
819 	}
820 
821 	return reason;
822 }
823 
824 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
825 #if (CONFIG_FAULT_DUMP == 2)
826 /**
827  * @brief Dump the Secure Stack information for an exception that
828  * has occurred in Non-Secure state.
829  *
830  * @param secure_esf Pointer to the secure stack frame.
831  */
secure_stack_dump(const struct arch_esf * secure_esf)832 static void secure_stack_dump(const struct arch_esf *secure_esf)
833 {
834 	/*
835 	 * In case a Non-Secure exception interrupted the Secure
836 	 * execution, the Secure state has stacked the additional
837 	 * state context and the top of the stack contains the
838 	 * integrity signature.
839 	 *
840 	 * In case of a Non-Secure function call the top of the
841 	 * stack contains the return address to Secure state.
842 	 */
843 	uint32_t *top_of_sec_stack = (uint32_t *)secure_esf;
844 	uint32_t sec_ret_addr;
845 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
846 	if ((*top_of_sec_stack == INTEGRITY_SIGNATURE_STD) ||
847 	    (*top_of_sec_stack == INTEGRITY_SIGNATURE_EXT)) {
848 #else
849 	if (*top_of_sec_stack == INTEGRITY_SIGNATURE) {
850 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
851 		/* Secure state interrupted by a Non-Secure exception.
852 		 * The return address after the additional state
853 		 * context, stacked by the Secure code upon
854 		 * Non-Secure exception entry.
855 		 */
856 		top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
857 		secure_esf = (const struct arch_esf *)top_of_sec_stack;
858 		sec_ret_addr = secure_esf->basic.pc;
859 	} else {
860 		/* Exception during Non-Secure function call.
861 		 * The return address is located on top of stack.
862 		 */
863 		sec_ret_addr = *top_of_sec_stack;
864 	}
865 	PR_FAULT_INFO("  S instruction address:  0x%x", sec_ret_addr);
866 }
867 #define SECURE_STACK_DUMP(esf) secure_stack_dump(esf)
868 #else
869 /* We do not dump the Secure stack information for lower dump levels. */
870 #define SECURE_STACK_DUMP(esf)
871 #endif /* CONFIG_FAULT_DUMP== 2 */
872 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
873 
874 /*
875  * This internal function does the following:
876  *
877  * - Retrieves the exception stack frame
878  * - Evaluates whether to report being in a nested exception
879  *
880  * If the ESF is not successfully retrieved, the function signals
881  * an error by returning NULL.
882  *
883  * @return ESF pointer on success, otherwise return NULL
884  */
885 static inline struct arch_esf *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
886 				       bool *nested_exc)
887 {
888 	bool alternative_state_exc = false;
889 	struct arch_esf *ptr_esf = NULL;
890 
891 	*nested_exc = false;
892 
893 	if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) != EXC_RETURN_INDICATOR_PREFIX) {
894 		/* Invalid EXC_RETURN value. This is a fatal error. */
895 		return NULL;
896 	}
897 
898 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
899 	if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
900 		/* Secure Firmware shall only handle Secure Exceptions.
901 		 * This is a fatal error.
902 		 */
903 		return NULL;
904 	}
905 
906 	if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
907 		/* Exception entry occurred in Secure stack. */
908 	} else {
909 		/* Exception entry occurred in Non-Secure stack. Therefore,
910 		 * msp/psp point to the Secure stack, however, the actual
911 		 * exception stack frame is located in the Non-Secure stack.
912 		 */
913 		alternative_state_exc = true;
914 
915 		/* Dump the Secure stack before handling the actual fault. */
916 		struct arch_esf *secure_esf;
917 
918 		if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
919 			/* Secure stack pointed by PSP */
920 			secure_esf = (struct arch_esf *)psp;
921 		} else {
922 			/* Secure stack pointed by MSP */
923 			secure_esf = (struct arch_esf *)msp;
924 			*nested_exc = true;
925 		}
926 
927 		SECURE_STACK_DUMP(secure_esf);
928 
929 		/* Handle the actual fault.
930 		 * Extract the correct stack frame from the Non-Secure state
931 		 * and supply it to the fault handing function.
932 		 */
933 		if (exc_return & EXC_RETURN_MODE_THREAD) {
934 			ptr_esf = (struct arch_esf *)__TZ_get_PSP_NS();
935 		} else {
936 			ptr_esf = (struct arch_esf *)__TZ_get_MSP_NS();
937 		}
938 	}
939 #elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
940 	if (exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) {
941 		/* Non-Secure Firmware shall only handle Non-Secure Exceptions.
942 		 * This is a fatal error.
943 		 */
944 		return NULL;
945 	}
946 
947 	if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
948 		/* Exception entry occurred in Secure stack.
949 		 *
950 		 * Note that Non-Secure firmware cannot inspect the Secure
951 		 * stack to determine the root cause of the fault. Fault
952 		 * inspection will indicate the Non-Secure instruction
953 		 * that performed the branch to the Secure domain.
954 		 */
955 		alternative_state_exc = true;
956 
957 		PR_FAULT_INFO("Exception occurred in Secure State");
958 
959 		if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
960 			/* Non-Secure stack frame on PSP */
961 			ptr_esf = (struct arch_esf *)psp;
962 		} else {
963 			/* Non-Secure stack frame on MSP */
964 			ptr_esf = (struct arch_esf *)msp;
965 		}
966 	} else {
967 		/* Exception entry occurred in Non-Secure stack. */
968 	}
969 #else
970 	/* The processor has a single execution state.
971 	 * We verify that the Thread mode is using PSP.
972 	 */
973 	if ((exc_return & EXC_RETURN_MODE_THREAD) && (!(exc_return & EXC_RETURN_SPSEL_PROCESS))) {
974 		PR_EXC("SPSEL in thread mode does not indicate PSP");
975 		return NULL;
976 	}
977 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
978 
979 	if (!alternative_state_exc) {
980 		if (exc_return & EXC_RETURN_MODE_THREAD) {
981 			/* Returning to thread mode */
982 			ptr_esf = (struct arch_esf *)psp;
983 
984 		} else {
985 			/* Returning to handler mode */
986 			ptr_esf = (struct arch_esf *)msp;
987 			*nested_exc = true;
988 		}
989 	}
990 
991 	return ptr_esf;
992 }
993 
994 /**
995  *
996  * @brief ARM Fault handler
997  *
998  * This routine is called when fatal error conditions are detected by hardware
999  * and is responsible for:
1000  * - resetting the processor fault status registers (for the case when the
1001  *   error handling policy allows the system to recover from the error),
1002  * - reporting the error information,
1003  * - determining the error reason to be provided as input to the user-
1004  *   provided routine, k_sys_fatal_error_handler().
1005  * The k_sys_fatal_error_handler() is invoked once the above operations are
1006  * completed, and is responsible for implementing the error handling policy.
1007  *
1008  * The function needs, first, to determine the exception stack frame.
1009  * Note that the current security state might not be the actual
1010  * state in which the processor was executing, when the exception occurred.
1011  * The actual state may need to be determined by inspecting the EXC_RETURN
1012  * value, which is provided as argument to the Fault handler.
1013  *
1014  * If the exception occurred in the same security state, the stack frame
1015  * will be pointed to by either MSP or PSP depending on the processor
1016  * execution state when the exception occurred. MSP and PSP values are
1017  * provided as arguments to the Fault handler.
1018  *
1019  * @param msp MSP value immediately after the exception occurred
1020  * @param psp PSP value immediately after the exception occurred
1021  * @param exc_return EXC_RETURN value present in LR after exception entry.
1022  * @param callee_regs Callee-saved registers (R4-R11, PSP)
1023  *
1024  */
1025 void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return, _callee_saved_t *callee_regs)
1026 {
1027 	uint32_t reason = K_ERR_CPU_EXCEPTION;
1028 	int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
1029 	bool recoverable, nested_exc;
1030 	struct arch_esf *esf;
1031 
1032 	/* Create a stack-ed copy of the ESF to be used during
1033 	 * the fault handling process.
1034 	 */
1035 	struct arch_esf esf_copy;
1036 
1037 	/* Force unlock interrupts */
1038 	arch_irq_unlock(0);
1039 
1040 	/* Retrieve the Exception Stack Frame (ESF) to be supplied
1041 	 * as argument to the remainder of the fault handling process.
1042 	 */
1043 	esf = get_esf(msp, psp, exc_return, &nested_exc);
1044 	__ASSERT(esf != NULL, "ESF could not be retrieved successfully. Shall never occur.");
1045 
1046 	z_arm_set_fault_sp(esf, exc_return);
1047 
1048 	reason = fault_handle(esf, fault, &recoverable);
1049 	if (recoverable) {
1050 		return;
1051 	}
1052 
1053 	/* Copy ESF */
1054 #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
1055 	memcpy(&esf_copy, esf, sizeof(struct arch_esf));
1056 	ARG_UNUSED(callee_regs);
1057 #else
1058 	/* the extra exception info is not present in the original esf
1059 	 * so we only copy the fields before those.
1060 	 */
1061 	memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
1062 	esf_copy.extra_info = (struct __extra_esf_info){
1063 		.callee = callee_regs, .exc_return = exc_return, .msp = msp};
1064 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
1065 
1066 	/* Overwrite stacked IPSR to mark a nested exception,
1067 	 * or a return to Thread mode. Note that this may be
1068 	 * required, if the retrieved ESF contents are invalid
1069 	 * due to, for instance, a stacking error.
1070 	 */
1071 	if (nested_exc) {
1072 		if ((esf_copy.basic.xpsr & IPSR_ISR_Msk) == 0) {
1073 			esf_copy.basic.xpsr |= IPSR_ISR_Msk;
1074 		}
1075 	} else {
1076 		esf_copy.basic.xpsr &= ~(IPSR_ISR_Msk);
1077 	}
1078 
1079 	if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
1080 		reason = K_ERR_CPU_EXCEPTION;
1081 	}
1082 
1083 	z_arm_fatal_error(reason, &esf_copy);
1084 }
1085 
1086 /**
1087  *
1088  * @brief Initialization of fault handling
1089  *
1090  * Turns on the desired hardware faults.
1091  *
1092  */
1093 void z_arm_fault_init(void)
1094 {
1095 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
1096 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
1097 	SCB->CCR |= SCB_CCR_DIV_0_TRP_Msk;
1098 #else
1099 #error Unknown ARM architecture
1100 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
1101 #if defined(CONFIG_BUILTIN_STACK_GUARD)
1102 	/* If Stack guarding via SP limit checking is enabled, disable
1103 	 * SP limit checking inside HardFault and NMI. This is done
1104 	 * in order to allow for the desired fault logging to execute
1105 	 * properly in all cases.
1106 	 *
1107 	 * Note that this could allow a Secure Firmware Main Stack
1108 	 * to descend into non-secure region during HardFault and
1109 	 * NMI exception entry. To prevent from this, non-secure
1110 	 * memory regions must be located higher than secure memory
1111 	 * regions.
1112 	 *
1113 	 * For Non-Secure Firmware this could allow the Non-Secure Main
1114 	 * Stack to attempt to descend into secure region, in which case a
1115 	 * Secure Hard Fault will occur and we can track the fault from there.
1116 	 */
1117 	SCB->CCR |= SCB_CCR_STKOFHFNMIGN_Msk;
1118 #endif /* CONFIG_BUILTIN_STACK_GUARD */
1119 #ifdef CONFIG_TRAP_UNALIGNED_ACCESS
1120 	SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
1121 #else
1122 	SCB->CCR &= ~SCB_CCR_UNALIGN_TRP_Msk;
1123 #endif /* CONFIG_TRAP_UNALIGNED_ACCESS */
1124 }
1125