1 /*
2  * Copyright (c) 2014 Wind River Systems, Inc.
3  * Copyright (c) 2020 Nordic Semiconductor ASA.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file
10  * @brief Common fault handler for ARM Cortex-M
11  *
12  * Common fault handler for ARM Cortex-M processors.
13  */
14 
15 #include <zephyr/kernel.h>
16 #include <kernel_internal.h>
17 #include <inttypes.h>
18 #include <zephyr/arch/common/exc_handle.h>
19 #include <zephyr/linker/linker-defs.h>
20 #include <zephyr/logging/log.h>
21 #include <zephyr/sys/barrier.h>
22 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
23 
24 #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
25 #define PR_EXC(...)              LOG_ERR(__VA_ARGS__)
26 #define STORE_xFAR(reg_var, reg) uint32_t reg_var = (uint32_t)reg
27 #else
28 #define PR_EXC(...)
29 #define STORE_xFAR(reg_var, reg)
30 #endif /* CONFIG_PRINTK || CONFIG_LOG */
31 
32 #if (CONFIG_FAULT_DUMP == 2)
33 #define PR_FAULT_INFO(...) PR_EXC(__VA_ARGS__)
34 #else
35 #define PR_FAULT_INFO(...)
36 #endif
37 
38 #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU)
39 #define EMN(edr)  (((edr) & SYSMPU_EDR_EMN_MASK) >> SYSMPU_EDR_EMN_SHIFT)
40 #define EACD(edr) (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
41 #endif
42 
43 /* Integrity signature for an ARMv8-M implementation */
44 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
45 #define INTEGRITY_SIGNATURE_STD 0xFEFA125BUL
46 #define INTEGRITY_SIGNATURE_EXT 0xFEFA125AUL
47 #else
48 #define INTEGRITY_SIGNATURE 0xFEFA125BUL
49 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
50 /* Size (in words) of the additional state context that is pushed
51  * to the Secure stack during a Non-Secure exception entry.
52  */
53 #define ADDITIONAL_STATE_CONTEXT_WORDS 10
54 
55 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
56 /* helpers to access memory/bus/usage faults */
57 #define SCB_CFSR_MEMFAULTSR                                                                        \
58 	(uint32_t)((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos)
59 #define SCB_CFSR_BUSFAULTSR                                                                        \
60 	(uint32_t)((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) >> SCB_CFSR_BUSFAULTSR_Pos)
61 #define SCB_CFSR_USGFAULTSR                                                                        \
62 	(uint32_t)((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) >> SCB_CFSR_USGFAULTSR_Pos)
63 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
64 
65 /**
66  *
67  * Dump information regarding fault (FAULT_DUMP == 1)
68  *
69  * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
70  * (short form).
71  *
72  * eg. (precise bus error escalated to hard fault):
73  *
74  * Fault! EXC #3
75  * HARD FAULT: Escalation (see below)!
76  * MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
77  * BFAR: 0xff001234
78  *
79  *
80  *
81  * Dump information regarding fault (FAULT_DUMP == 2)
82  *
83  * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
84  * (long form), and return the error code for the kernel to identify the fatal
85  * error reason.
86  *
87  * eg. (precise bus error escalated to hard fault):
88  *
89  * ***** HARD FAULT *****
90  *    Fault escalation (see below)
91  * ***** BUS FAULT *****
92  *   Precise data bus error
93  *   Address: 0xff001234
94  *
95  */
96 
97 #if (CONFIG_FAULT_DUMP == 1)
fault_show(const struct arch_esf * esf,int fault)98 static void fault_show(const struct arch_esf *esf, int fault)
99 {
100 	PR_EXC("Fault! EXC #%d", fault);
101 
102 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
103 	PR_EXC("MMFSR: 0x%x, BFSR: 0x%x, UFSR: 0x%x", SCB_CFSR_MEMFAULTSR, SCB_CFSR_BUSFAULTSR,
104 	       SCB_CFSR_USGFAULTSR);
105 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
106 	PR_EXC("SFSR: 0x%x", SAU->SFSR);
107 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
108 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
109 }
110 #else
111 /* For Dump level 2, detailed information is generated by the
112  * fault handling functions for individual fault conditions, so this
113  * function is left empty.
114  *
115  * For Dump level 0, no information needs to be generated.
116  */
fault_show(const struct arch_esf * esf,int fault)117 static void fault_show(const struct arch_esf *esf, int fault)
118 {
119 	(void)esf;
120 	(void)fault;
121 }
122 #endif /* FAULT_DUMP == 1 */
123 
124 #ifdef CONFIG_USERSPACE
125 Z_EXC_DECLARE(z_arm_user_string_nlen);
126 
127 static const struct z_exc_handle exceptions[] = {Z_EXC_HANDLE(z_arm_user_string_nlen)};
128 #endif
129 
130 /* Perform an assessment whether an MPU fault shall be
131  * treated as recoverable.
132  *
133  * @return true if error is recoverable, otherwise return false.
134  */
memory_fault_recoverable(struct arch_esf * esf,bool synchronous)135 static bool memory_fault_recoverable(struct arch_esf *esf, bool synchronous)
136 {
137 #ifdef CONFIG_USERSPACE
138 	for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
139 		/* Mask out instruction mode */
140 		uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
141 		uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
142 
143 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
144 		/* Non-synchronous exceptions (e.g. DebugMonitor) may have
145 		 * allowed PC to continue to the next instruction.
146 		 */
147 		end += (synchronous) ? 0x0 : 0x4;
148 #else
149 		ARG_UNUSED(synchronous);
150 #endif
151 		if (esf->basic.pc >= start && esf->basic.pc < end) {
152 			esf->basic.pc = (uint32_t)(exceptions[i].fixup);
153 			return true;
154 		}
155 	}
156 #endif
157 
158 	return false;
159 }
160 
161 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
162 /* HardFault is used for all fault conditions on ARMv6-M. */
163 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
164 
165 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
166 uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp);
167 #endif /* CONFIG_MPU_STACK_GUARD || defined(CONFIG_USERSPACE) */
168 
169 /**
170  *
171  * @brief Dump MemManage fault information
172  *
173  * See z_arm_fault_dump() for example.
174  *
175  * @return error code to identify the fatal error reason
176  */
mem_manage_fault(struct arch_esf * esf,int from_hard_fault,bool * recoverable)177 static uint32_t mem_manage_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
178 {
179 	uint32_t reason = K_ERR_ARM_MEM_GENERIC;
180 	uint32_t mmfar = -EINVAL;
181 
182 	PR_FAULT_INFO("***** MPU FAULT *****");
183 
184 	if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) != 0) {
185 		reason = K_ERR_ARM_MEM_STACKING;
186 		PR_FAULT_INFO("  Stacking error (context area might be"
187 			      " not valid)");
188 	}
189 	if ((SCB->CFSR & SCB_CFSR_MUNSTKERR_Msk) != 0) {
190 		reason = K_ERR_ARM_MEM_UNSTACKING;
191 		PR_FAULT_INFO("  Unstacking error");
192 	}
193 	if ((SCB->CFSR & SCB_CFSR_DACCVIOL_Msk) != 0) {
194 		reason = K_ERR_ARM_MEM_DATA_ACCESS;
195 		PR_FAULT_INFO("  Data Access Violation");
196 		/* In a fault handler, to determine the true faulting address:
197 		 * 1. Read and save the MMFAR value.
198 		 * 2. Read the MMARVALID bit in the MMFSR.
199 		 * The MMFAR address is valid only if this bit is 1.
200 		 *
201 		 * Software must follow this sequence because another higher
202 		 * priority exception might change the MMFAR value.
203 		 */
204 		uint32_t temp = SCB->MMFAR;
205 
206 		if ((SCB->CFSR & SCB_CFSR_MMARVALID_Msk) != 0) {
207 			mmfar = temp;
208 			PR_EXC("  MMFAR Address: 0x%x", mmfar);
209 			if (from_hard_fault != 0) {
210 				/* clear SCB_MMAR[VALID] to reset */
211 				SCB->CFSR &= ~SCB_CFSR_MMARVALID_Msk;
212 			}
213 		}
214 	}
215 	if ((SCB->CFSR & SCB_CFSR_IACCVIOL_Msk) != 0) {
216 		reason = K_ERR_ARM_MEM_INSTRUCTION_ACCESS;
217 		PR_FAULT_INFO("  Instruction Access Violation");
218 	}
219 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
220 	if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
221 		reason = K_ERR_ARM_MEM_FP_LAZY_STATE_PRESERVATION;
222 		PR_FAULT_INFO("  Floating-point lazy state preservation error");
223 	}
224 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
225 
226 	/* When stack protection is enabled, we need to assess
227 	 * if the memory violation error is a stack corruption.
228 	 *
229 	 * By design, being a Stacking MemManage fault is a necessary
230 	 * and sufficient condition for a thread stack corruption.
231 	 * [Cortex-M process stack pointer is always descending and
232 	 * is never modified by code (except for the context-switch
233 	 * routine), therefore, a stacking error implies the PSP has
234 	 * crossed into an area beyond the thread stack.]
235 	 *
236 	 * Data Access Violation errors may or may not be caused by
237 	 * thread stack overflows.
238 	 */
239 	if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) || (SCB->CFSR & SCB_CFSR_DACCVIOL_Msk)) {
240 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
241 		/* MemManage Faults are always banked between security
242 		 * states. Therefore, we can safely assume the fault
243 		 * originated from the same security state.
244 		 *
245 		 * As we only assess thread stack corruption, we only
246 		 * process the error further if the stack frame is on
247 		 * PSP. For always-banked MemManage Fault, this is
248 		 * equivalent to inspecting the RETTOBASE flag.
249 		 *
250 		 * Note:
251 		 * It is possible that MMFAR address is not written by the
252 		 * Cortex-M core; this occurs when the stacking error is
253 		 * not accompanied by a data access violation error (i.e.
254 		 * when stack overflows due to the exception entry frame
255 		 * stacking): z_check_thread_stack_fail() shall be able to
256 		 * handle the case of 'mmfar' holding the -EINVAL value.
257 		 */
258 		if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
259 			uint32_t min_stack_ptr =
260 				z_check_thread_stack_fail(mmfar, ((uint32_t)&esf[0]));
261 
262 			if (min_stack_ptr) {
263 				/* When MemManage Stacking Error has occurred,
264 				 * the stack context frame might be corrupted
265 				 * but the stack pointer may have actually
266 				 * descent below the allowed (thread) stack
267 				 * area. We may face a problem with un-stacking
268 				 * the frame, upon the exception return, if we
269 				 * do not have sufficient access permissions to
270 				 * read the corrupted stack frame. Therefore,
271 				 * we manually force the stack pointer to the
272 				 * lowest allowed position, inside the thread's
273 				 * stack.
274 				 *
275 				 * Note:
276 				 * The PSP will normally be adjusted in a tail-
277 				 * chained exception performing context switch,
278 				 * after aborting the corrupted thread. The
279 				 * adjustment, here, is required as tail-chain
280 				 * cannot always be guaranteed.
281 				 *
282 				 * The manual adjustment of PSP is safe, as we
283 				 * will not be re-scheduling this thread again
284 				 * for execution; thread stack corruption is a
285 				 * fatal error and a thread that corrupted its
286 				 * stack needs to be aborted.
287 				 */
288 				__set_PSP(min_stack_ptr);
289 
290 				reason = K_ERR_STACK_CHK_FAIL;
291 			} else {
292 				__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
293 					 "Stacking error not a stack fail\n");
294 			}
295 		}
296 #else
297 		(void)mmfar;
298 		__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
299 			 "Stacking or Data Access Violation error "
300 			 "without stack guard, user-mode or null-pointer detection\n");
301 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
302 	}
303 
304 	/* When we were handling this fault, we may have triggered a fp
305 	 * lazy stacking Memory Manage fault. At the time of writing, this
306 	 * can happen when printing.  If that's true, we should clear the
307 	 * pending flag in addition to the clearing the reason for the fault
308 	 */
309 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
310 	if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
311 		SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTPENDED_Msk;
312 	}
313 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
314 
315 	/* clear MMFSR sticky bits */
316 	SCB->CFSR |= SCB_CFSR_MEMFAULTSR_Msk;
317 
318 	/* Assess whether system shall ignore/recover from this MPU fault. */
319 	*recoverable = memory_fault_recoverable(esf, true);
320 
321 	return reason;
322 }
323 
324 /**
325  *
326  * @brief Dump BusFault information
327  *
328  * See z_arm_fault_dump() for example.
329  *
330  * @return error code to identify the fatal error reason.
331  *
332  */
bus_fault(struct arch_esf * esf,int from_hard_fault,bool * recoverable)333 static int bus_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
334 {
335 	uint32_t reason = K_ERR_ARM_BUS_GENERIC;
336 
337 	PR_FAULT_INFO("***** BUS FAULT *****");
338 
339 	if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
340 		reason = K_ERR_ARM_BUS_STACKING;
341 		PR_FAULT_INFO("  Stacking error");
342 	}
343 	if (SCB->CFSR & SCB_CFSR_UNSTKERR_Msk) {
344 		reason = K_ERR_ARM_BUS_UNSTACKING;
345 		PR_FAULT_INFO("  Unstacking error");
346 	}
347 	if (SCB->CFSR & SCB_CFSR_PRECISERR_Msk) {
348 		reason = K_ERR_ARM_BUS_PRECISE_DATA_BUS;
349 		PR_FAULT_INFO("  Precise data bus error");
350 		/* In a fault handler, to determine the true faulting address:
351 		 * 1. Read and save the BFAR value.
352 		 * 2. Read the BFARVALID bit in the BFSR.
353 		 * The BFAR address is valid only if this bit is 1.
354 		 *
355 		 * Software must follow this sequence because another
356 		 * higher priority exception might change the BFAR value.
357 		 */
358 		STORE_xFAR(bfar, SCB->BFAR);
359 
360 		if ((SCB->CFSR & SCB_CFSR_BFARVALID_Msk) != 0) {
361 			PR_EXC("  BFAR Address: 0x%x", bfar);
362 			if (from_hard_fault != 0) {
363 				/* clear SCB_CFSR_BFAR[VALID] to reset */
364 				SCB->CFSR &= ~SCB_CFSR_BFARVALID_Msk;
365 			}
366 		}
367 	}
368 	if (SCB->CFSR & SCB_CFSR_IMPRECISERR_Msk) {
369 		reason = K_ERR_ARM_BUS_IMPRECISE_DATA_BUS;
370 		PR_FAULT_INFO("  Imprecise data bus error");
371 	}
372 	if ((SCB->CFSR & SCB_CFSR_IBUSERR_Msk) != 0) {
373 		reason = K_ERR_ARM_BUS_INSTRUCTION_BUS;
374 		PR_FAULT_INFO("  Instruction bus error");
375 #if !defined(CONFIG_ARMV7_M_ARMV8_M_FP)
376 	}
377 #else
378 	} else if (SCB->CFSR & SCB_CFSR_LSPERR_Msk) {
379 		reason = K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION;
380 		PR_FAULT_INFO("  Floating-point lazy state preservation error");
381 	} else {
382 		;
383 	}
384 #endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */
385 
386 #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU)
387 	uint32_t sperr = SYSMPU->CESR & SYSMPU_CESR_SPERR_MASK;
388 	uint32_t mask = BIT(31);
389 	int i;
390 	uint32_t ear = -EINVAL;
391 
392 	if (sperr) {
393 		for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) {
394 			if ((sperr & mask) == 0U) {
395 				continue;
396 			}
397 			STORE_xFAR(edr, SYSMPU->SP[i].EDR);
398 			ear = SYSMPU->SP[i].EAR;
399 
400 			PR_FAULT_INFO("  NXP MPU error, port %d", i);
401 			PR_FAULT_INFO("    Mode: %s, %s Address: 0x%x",
402 				      edr & BIT(2) ? "Supervisor" : "User",
403 				      edr & BIT(1) ? "Data" : "Instruction", ear);
404 			PR_FAULT_INFO("    Type: %s, Master: %d, Regions: 0x%x",
405 				      edr & BIT(0) ? "Write" : "Read", EMN(edr), EACD(edr));
406 
407 			/* When stack protection is enabled, we need to assess
408 			 * if the memory violation error is a stack corruption.
409 			 *
410 			 * By design, being a Stacking Bus fault is a necessary
411 			 * and sufficient condition for a stack corruption.
412 			 */
413 			if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
414 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
415 				/* Note: we can assume the fault originated
416 				 * from the same security state for ARM
417 				 * platforms implementing the NXP MPU
418 				 * (CONFIG_CPU_HAS_NXP_SYSMPU=y).
419 				 *
420 				 * As we only assess thread stack corruption,
421 				 * we only process the error further, if the
422 				 * stack frame is on PSP. For NXP MPU-related
423 				 * Bus Faults (banked), this is equivalent to
424 				 * inspecting the RETTOBASE flag.
425 				 */
426 				if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
427 					uint32_t min_stack_ptr =
428 						z_check_thread_stack_fail(ear, ((uint32_t)&esf[0]));
429 
430 					if (min_stack_ptr) {
431 						/* When BusFault Stacking Error
432 						 * has occurred, the stack
433 						 * context frame might be
434 						 * corrupted but the stack
435 						 * pointer may have actually
436 						 * moved. We may face problems
437 						 * with un-stacking the frame,
438 						 * upon exception return, if we
439 						 * do not have sufficient
440 						 * permissions to read the
441 						 * corrupted stack frame.
442 						 * Therefore, we manually force
443 						 * the stack pointer to the
444 						 * lowest allowed position.
445 						 *
446 						 * Note:
447 						 * The PSP will normally be
448 						 * adjusted in a tail-chained
449 						 * exception performing context
450 						 * switch, after aborting the
451 						 * corrupted thread. Here, the
452 						 * adjustment is required as
453 						 * tail-chain cannot always be
454 						 * guaranteed.
455 						 */
456 						__set_PSP(min_stack_ptr);
457 
458 						reason = K_ERR_STACK_CHK_FAIL;
459 						break;
460 					}
461 				}
462 #else
463 				(void)ear;
464 				__ASSERT(0,
465 					 "Stacking error without stack guard or User-mode support");
466 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
467 			}
468 		}
469 		SYSMPU->CESR &= ~sperr;
470 	}
471 #endif /* defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_SYSMPU) */
472 
473 	/* clear BFSR sticky bits */
474 	SCB->CFSR |= SCB_CFSR_BUSFAULTSR_Msk;
475 
476 	*recoverable = memory_fault_recoverable(esf, true);
477 
478 	return reason;
479 }
480 
481 /**
482  *
483  * @brief Dump UsageFault information
484  *
485  * See z_arm_fault_dump() for example.
486  *
487  * @return error code to identify the fatal error reason
488  */
usage_fault(const struct arch_esf * esf)489 static uint32_t usage_fault(const struct arch_esf *esf)
490 {
491 	uint32_t reason = K_ERR_ARM_USAGE_GENERIC;
492 
493 	PR_FAULT_INFO("***** USAGE FAULT *****");
494 
495 	/* bits are sticky: they stack and must be reset */
496 	if ((SCB->CFSR & SCB_CFSR_DIVBYZERO_Msk) != 0) {
497 		reason = K_ERR_ARM_USAGE_DIV_0;
498 		PR_FAULT_INFO("  Division by zero");
499 	}
500 	if ((SCB->CFSR & SCB_CFSR_UNALIGNED_Msk) != 0) {
501 		reason = K_ERR_ARM_USAGE_UNALIGNED_ACCESS;
502 		PR_FAULT_INFO("  Unaligned memory access");
503 	}
504 #if defined(CONFIG_ARMV8_M_MAINLINE)
505 	if ((SCB->CFSR & SCB_CFSR_STKOF_Msk) != 0) {
506 		reason = K_ERR_ARM_USAGE_STACK_OVERFLOW;
507 		PR_FAULT_INFO("  Stack overflow (context area not valid)");
508 #if defined(CONFIG_BUILTIN_STACK_GUARD)
509 		/* Stack Overflows are always reported as stack corruption
510 		 * errors. Note that the built-in stack overflow mechanism
511 		 * prevents the context area to be loaded on the stack upon
512 		 * UsageFault exception entry. As a result, we cannot rely
513 		 * on the reported faulty instruction address, to determine
514 		 * the instruction that triggered the stack overflow.
515 		 */
516 		reason = K_ERR_STACK_CHK_FAIL;
517 #endif /* CONFIG_BUILTIN_STACK_GUARD */
518 	}
519 #endif /* CONFIG_ARMV8_M_MAINLINE */
520 	if ((SCB->CFSR & SCB_CFSR_NOCP_Msk) != 0) {
521 		reason = K_ERR_ARM_USAGE_NO_COPROCESSOR;
522 		PR_FAULT_INFO("  No coprocessor instructions");
523 	}
524 	if ((SCB->CFSR & SCB_CFSR_INVPC_Msk) != 0) {
525 		reason = K_ERR_ARM_USAGE_ILLEGAL_EXC_RETURN;
526 		PR_FAULT_INFO("  Illegal load of EXC_RETURN into PC");
527 	}
528 	if ((SCB->CFSR & SCB_CFSR_INVSTATE_Msk) != 0) {
529 		reason = K_ERR_ARM_USAGE_ILLEGAL_EPSR;
530 		PR_FAULT_INFO("  Illegal use of the EPSR");
531 	}
532 	if ((SCB->CFSR & SCB_CFSR_UNDEFINSTR_Msk) != 0) {
533 		reason = K_ERR_ARM_USAGE_UNDEFINED_INSTRUCTION;
534 		PR_FAULT_INFO("  Attempt to execute undefined instruction");
535 	}
536 
537 	/* clear UFSR sticky bits */
538 	SCB->CFSR |= SCB_CFSR_USGFAULTSR_Msk;
539 
540 	return reason;
541 }
542 
543 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
544 /**
545  *
546  * @brief Dump SecureFault information
547  *
548  * See z_arm_fault_dump() for example.
549  *
550  * @return error code to identify the fatal error reason
551  */
secure_fault(const struct arch_esf * esf)552 static uint32_t secure_fault(const struct arch_esf *esf)
553 {
554 	uint32_t reason = K_ERR_ARM_SECURE_GENERIC;
555 
556 	PR_FAULT_INFO("***** SECURE FAULT *****");
557 
558 	STORE_xFAR(sfar, SAU->SFAR);
559 	if ((SAU->SFSR & SAU_SFSR_SFARVALID_Msk) != 0) {
560 		PR_EXC("  Address: 0x%x", sfar);
561 	}
562 
563 	/* bits are sticky: they stack and must be reset */
564 	if ((SAU->SFSR & SAU_SFSR_INVEP_Msk) != 0) {
565 		reason = K_ERR_ARM_SECURE_ENTRY_POINT;
566 		PR_FAULT_INFO("  Invalid entry point");
567 	} else if ((SAU->SFSR & SAU_SFSR_INVIS_Msk) != 0) {
568 		reason = K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE;
569 		PR_FAULT_INFO("  Invalid integrity signature");
570 	} else if ((SAU->SFSR & SAU_SFSR_INVER_Msk) != 0) {
571 		reason = K_ERR_ARM_SECURE_EXCEPTION_RETURN;
572 		PR_FAULT_INFO("  Invalid exception return");
573 	} else if ((SAU->SFSR & SAU_SFSR_AUVIOL_Msk) != 0) {
574 		reason = K_ERR_ARM_SECURE_ATTRIBUTION_UNIT;
575 		PR_FAULT_INFO("  Attribution unit violation");
576 	} else if ((SAU->SFSR & SAU_SFSR_INVTRAN_Msk) != 0) {
577 		reason = K_ERR_ARM_SECURE_TRANSITION;
578 		PR_FAULT_INFO("  Invalid transition");
579 	} else if ((SAU->SFSR & SAU_SFSR_LSPERR_Msk) != 0) {
580 		reason = K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION;
581 		PR_FAULT_INFO("  Lazy state preservation");
582 	} else if ((SAU->SFSR & SAU_SFSR_LSERR_Msk) != 0) {
583 		reason = K_ERR_ARM_SECURE_LAZY_STATE_ERROR;
584 		PR_FAULT_INFO("  Lazy state error");
585 	}
586 
587 	/* clear SFSR sticky bits */
588 	SAU->SFSR |= 0xFF;
589 
590 	return reason;
591 }
592 #endif /* defined(CONFIG_ARM_SECURE_FIRMWARE) */
593 
594 /**
595  *
596  * @brief Dump debug monitor exception information
597  *
598  * See z_arm_fault_dump() for example.
599  *
600  */
debug_monitor(struct arch_esf * esf,bool * recoverable)601 static void debug_monitor(struct arch_esf *esf, bool *recoverable)
602 {
603 	*recoverable = false;
604 
605 	PR_FAULT_INFO("***** Debug monitor exception *****");
606 
607 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
608 	if (!z_arm_debug_monitor_event_error_check()) {
609 		/* By default, all debug monitor exceptions that are not
610 		 * treated as errors by z_arm_debug_event_error_check(),
611 		 * they are considered as recoverable errors.
612 		 */
613 		*recoverable = true;
614 	} else {
615 
616 		*recoverable = memory_fault_recoverable(esf, false);
617 	}
618 
619 #endif
620 }
621 
622 #else
623 #error Unknown ARM architecture
624 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
625 
z_arm_is_synchronous_svc(struct arch_esf * esf)626 static inline bool z_arm_is_synchronous_svc(struct arch_esf *esf)
627 {
628 	uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
629 	/* SVC is a 16-bit instruction. On a synchronous SVC
630 	 * escalated to Hard Fault, the return address is the
631 	 * next instruction, i.e. after the SVC.
632 	 */
633 #define _SVC_OPCODE 0xDF00
634 
635 	/* We are about to de-reference the program counter at the
636 	 * time of fault to determine if it was a SVC
637 	 * instruction. However, we don't know if the pc itself is
638 	 * valid -- we could have faulted due to trying to execute a
639 	 * corrupted function pointer.
640 	 *
641 	 * We will temporarily ignore BusFault's so a bad program
642 	 * counter does not trigger ARM lockup condition.
643 	 */
644 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && !defined(CONFIG_ARMV8_M_BASELINE)
645 	/* Note: ARMv6-M does not support CCR.BFHFNMIGN so this access
646 	 * could generate a fault if the pc was invalid.
647 	 */
648 	uint16_t fault_insn = *(ret_addr - 1);
649 #else
650 	SCB->CCR |= SCB_CCR_BFHFNMIGN_Msk;
651 	barrier_dsync_fence_full();
652 	barrier_isync_fence_full();
653 
654 	uint16_t fault_insn = *(ret_addr - 1);
655 
656 	SCB->CCR &= ~SCB_CCR_BFHFNMIGN_Msk;
657 	barrier_dsync_fence_full();
658 	barrier_isync_fence_full();
659 #endif /* ARMV6_M_ARMV8_M_BASELINE && !ARMV8_M_BASELINE */
660 
661 	if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
662 	    ((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
663 		return true;
664 	}
665 #undef _SVC_OPCODE
666 	return false;
667 }
668 
669 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
z_arm_is_pc_valid(uintptr_t pc)670 static inline bool z_arm_is_pc_valid(uintptr_t pc)
671 {
672 	/* Is it in valid text region */
673 	if ((((uintptr_t)&__text_region_start) <= pc) && (pc < ((uintptr_t)&__text_region_end))) {
674 		return true;
675 	}
676 
677 	/* Is it in valid ramfunc range */
678 	if ((((uintptr_t)&__ramfunc_start) <= pc) && (pc < ((uintptr_t)&__ramfunc_end))) {
679 		return true;
680 	}
681 
682 #if DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_itcm))
683 	/* Is it in the ITCM */
684 	if ((((uintptr_t)&__itcm_start) <= pc) && (pc < ((uintptr_t)&__itcm_end))) {
685 		return true;
686 	}
687 #endif
688 
689 	return false;
690 }
691 #endif
692 
693 /**
694  *
695  * @brief Dump hard fault information
696  *
697  * See z_arm_fault_dump() for example.
698  *
699  * @return error code to identify the fatal error reason
700  */
hard_fault(struct arch_esf * esf,bool * recoverable)701 static uint32_t hard_fault(struct arch_esf *esf, bool *recoverable)
702 {
703 	uint32_t reason = K_ERR_CPU_EXCEPTION;
704 
705 	PR_FAULT_INFO("***** HARD FAULT *****");
706 
707 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
708 	/* Workaround for #18712:
709 	 * HardFault may be due to escalation, as a result of
710 	 * an SVC instruction that could not be executed; this
711 	 * can occur if ARCH_EXCEPT() is called by an ISR,
712 	 * which executes at priority equal to the SVC handler
713 	 * priority. We handle the case of Kernel OOPS and Stack
714 	 * Fail here.
715 	 */
716 
717 	if (z_arm_is_pc_valid((uintptr_t)esf->basic.pc) && z_arm_is_synchronous_svc(esf)) {
718 		PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
719 		reason = esf->basic.r0;
720 	}
721 
722 	*recoverable = memory_fault_recoverable(esf, true);
723 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
724 	*recoverable = false;
725 
726 	if ((SCB->HFSR & SCB_HFSR_VECTTBL_Msk) != 0) {
727 		PR_EXC("  Bus fault on vector table read");
728 	} else if ((SCB->HFSR & SCB_HFSR_DEBUGEVT_Msk) != 0) {
729 		PR_EXC("  Debug event");
730 	} else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) {
731 		PR_EXC("  Fault escalation (see below)");
732 		if (z_arm_is_synchronous_svc(esf)) {
733 			PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
734 			reason = esf->basic.r0;
735 		} else if ((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) != 0) {
736 			reason = mem_manage_fault(esf, 1, recoverable);
737 		} else if ((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) != 0) {
738 			reason = bus_fault(esf, 1, recoverable);
739 		} else if ((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) != 0) {
740 			reason = usage_fault(esf);
741 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
742 		} else if (SAU->SFSR != 0) {
743 			reason = secure_fault(esf);
744 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
745 		} else {
746 			__ASSERT(0, "Fault escalation without FSR info");
747 		}
748 	} else {
749 		__ASSERT(0, "HardFault without HFSR info"
750 			    " Shall never occur");
751 	}
752 #else
753 #error Unknown ARM architecture
754 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
755 
756 	return reason;
757 }
758 
759 /**
760  *
761  * @brief Dump reserved exception information
762  *
763  * See z_arm_fault_dump() for example.
764  *
765  */
reserved_exception(const struct arch_esf * esf,int fault)766 static void reserved_exception(const struct arch_esf *esf, int fault)
767 {
768 	ARG_UNUSED(esf);
769 
770 	PR_FAULT_INFO("***** %s %d) *****",
771 		      fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ", fault - 16);
772 }
773 
774 /* Handler function for ARM fault conditions. */
fault_handle(struct arch_esf * esf,int fault,bool * recoverable)775 static uint32_t fault_handle(struct arch_esf *esf, int fault, bool *recoverable)
776 {
777 	uint32_t reason = K_ERR_CPU_EXCEPTION;
778 
779 	*recoverable = false;
780 
781 	switch (fault) {
782 	case 3:
783 		reason = hard_fault(esf, recoverable);
784 		break;
785 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
786 		/* HardFault is raised for all fault conditions on ARMv6-M. */
787 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
788 	case 4:
789 		reason = mem_manage_fault(esf, 0, recoverable);
790 		break;
791 	case 5:
792 		reason = bus_fault(esf, 0, recoverable);
793 		break;
794 	case 6:
795 		reason = usage_fault(esf);
796 		break;
797 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
798 	case 7:
799 		reason = secure_fault(esf);
800 		break;
801 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
802 	case 12:
803 		debug_monitor(esf, recoverable);
804 		break;
805 #else
806 #error Unknown ARM architecture
807 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
808 	default:
809 		reserved_exception(esf, fault);
810 		break;
811 	}
812 
813 	if ((*recoverable) == false) {
814 		/* Dump generic information about the fault. */
815 		fault_show(esf, fault);
816 	}
817 
818 	return reason;
819 }
820 
821 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
822 #if (CONFIG_FAULT_DUMP == 2)
823 /**
824  * @brief Dump the Secure Stack information for an exception that
825  * has occurred in Non-Secure state.
826  *
827  * @param secure_esf Pointer to the secure stack frame.
828  */
secure_stack_dump(const struct arch_esf * secure_esf)829 static void secure_stack_dump(const struct arch_esf *secure_esf)
830 {
831 	/*
832 	 * In case a Non-Secure exception interrupted the Secure
833 	 * execution, the Secure state has stacked the additional
834 	 * state context and the top of the stack contains the
835 	 * integrity signature.
836 	 *
837 	 * In case of a Non-Secure function call the top of the
838 	 * stack contains the return address to Secure state.
839 	 */
840 	uint32_t *top_of_sec_stack = (uint32_t *)secure_esf;
841 	uint32_t sec_ret_addr;
842 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
843 	if ((*top_of_sec_stack == INTEGRITY_SIGNATURE_STD) ||
844 	    (*top_of_sec_stack == INTEGRITY_SIGNATURE_EXT)) {
845 #else
846 	if (*top_of_sec_stack == INTEGRITY_SIGNATURE) {
847 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
848 		/* Secure state interrupted by a Non-Secure exception.
849 		 * The return address after the additional state
850 		 * context, stacked by the Secure code upon
851 		 * Non-Secure exception entry.
852 		 */
853 		top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
854 		secure_esf = (const struct arch_esf *)top_of_sec_stack;
855 		sec_ret_addr = secure_esf->basic.pc;
856 	} else {
857 		/* Exception during Non-Secure function call.
858 		 * The return address is located on top of stack.
859 		 */
860 		sec_ret_addr = *top_of_sec_stack;
861 	}
862 	PR_FAULT_INFO("  S instruction address:  0x%x", sec_ret_addr);
863 }
864 #define SECURE_STACK_DUMP(esf) secure_stack_dump(esf)
865 #else
866 /* We do not dump the Secure stack information for lower dump levels. */
867 #define SECURE_STACK_DUMP(esf)
868 #endif /* CONFIG_FAULT_DUMP== 2 */
869 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
870 
871 /*
872  * This internal function does the following:
873  *
874  * - Retrieves the exception stack frame
875  * - Evaluates whether to report being in a nested exception
876  *
877  * If the ESF is not successfully retrieved, the function signals
878  * an error by returning NULL.
879  *
880  * @return ESF pointer on success, otherwise return NULL
881  */
882 static inline struct arch_esf *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
883 				       bool *nested_exc)
884 {
885 	bool alternative_state_exc = false;
886 	struct arch_esf *ptr_esf = NULL;
887 
888 	*nested_exc = false;
889 
890 	if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) != EXC_RETURN_INDICATOR_PREFIX) {
891 		/* Invalid EXC_RETURN value. This is a fatal error. */
892 		return NULL;
893 	}
894 
895 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
896 	if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
897 		/* Secure Firmware shall only handle Secure Exceptions.
898 		 * This is a fatal error.
899 		 */
900 		return NULL;
901 	}
902 
903 	if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
904 		/* Exception entry occurred in Secure stack. */
905 	} else {
906 		/* Exception entry occurred in Non-Secure stack. Therefore,
907 		 * msp/psp point to the Secure stack, however, the actual
908 		 * exception stack frame is located in the Non-Secure stack.
909 		 */
910 		alternative_state_exc = true;
911 
912 		/* Dump the Secure stack before handling the actual fault. */
913 		struct arch_esf *secure_esf;
914 
915 		if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
916 			/* Secure stack pointed by PSP */
917 			secure_esf = (struct arch_esf *)psp;
918 		} else {
919 			/* Secure stack pointed by MSP */
920 			secure_esf = (struct arch_esf *)msp;
921 			*nested_exc = true;
922 		}
923 
924 		SECURE_STACK_DUMP(secure_esf);
925 
926 		/* Handle the actual fault.
927 		 * Extract the correct stack frame from the Non-Secure state
928 		 * and supply it to the fault handing function.
929 		 */
930 		if (exc_return & EXC_RETURN_MODE_THREAD) {
931 			ptr_esf = (struct arch_esf *)__TZ_get_PSP_NS();
932 		} else {
933 			ptr_esf = (struct arch_esf *)__TZ_get_MSP_NS();
934 		}
935 	}
936 #elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
937 	if (exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) {
938 		/* Non-Secure Firmware shall only handle Non-Secure Exceptions.
939 		 * This is a fatal error.
940 		 */
941 		return NULL;
942 	}
943 
944 	if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
945 		/* Exception entry occurred in Secure stack.
946 		 *
947 		 * Note that Non-Secure firmware cannot inspect the Secure
948 		 * stack to determine the root cause of the fault. Fault
949 		 * inspection will indicate the Non-Secure instruction
950 		 * that performed the branch to the Secure domain.
951 		 */
952 		alternative_state_exc = true;
953 
954 		PR_FAULT_INFO("Exception occurred in Secure State");
955 
956 		if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
957 			/* Non-Secure stack frame on PSP */
958 			ptr_esf = (struct arch_esf *)psp;
959 		} else {
960 			/* Non-Secure stack frame on MSP */
961 			ptr_esf = (struct arch_esf *)msp;
962 		}
963 	} else {
964 		/* Exception entry occurred in Non-Secure stack. */
965 	}
966 #else
967 	/* The processor has a single execution state.
968 	 * We verify that the Thread mode is using PSP.
969 	 */
970 	if ((exc_return & EXC_RETURN_MODE_THREAD) && (!(exc_return & EXC_RETURN_SPSEL_PROCESS))) {
971 		PR_EXC("SPSEL in thread mode does not indicate PSP");
972 		return NULL;
973 	}
974 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
975 
976 	if (!alternative_state_exc) {
977 		if (exc_return & EXC_RETURN_MODE_THREAD) {
978 			/* Returning to thread mode */
979 			ptr_esf = (struct arch_esf *)psp;
980 
981 		} else {
982 			/* Returning to handler mode */
983 			ptr_esf = (struct arch_esf *)msp;
984 			*nested_exc = true;
985 		}
986 	}
987 
988 	return ptr_esf;
989 }
990 
991 /**
992  *
993  * @brief ARM Fault handler
994  *
995  * This routine is called when fatal error conditions are detected by hardware
996  * and is responsible for:
997  * - resetting the processor fault status registers (for the case when the
998  *   error handling policy allows the system to recover from the error),
999  * - reporting the error information,
1000  * - determining the error reason to be provided as input to the user-
1001  *   provided routine, k_sys_fatal_error_handler().
1002  * The k_sys_fatal_error_handler() is invoked once the above operations are
1003  * completed, and is responsible for implementing the error handling policy.
1004  *
1005  * The function needs, first, to determine the exception stack frame.
1006  * Note that the current security state might not be the actual
1007  * state in which the processor was executing, when the exception occurred.
1008  * The actual state may need to be determined by inspecting the EXC_RETURN
1009  * value, which is provided as argument to the Fault handler.
1010  *
1011  * If the exception occurred in the same security state, the stack frame
1012  * will be pointed to by either MSP or PSP depending on the processor
1013  * execution state when the exception occurred. MSP and PSP values are
1014  * provided as arguments to the Fault handler.
1015  *
1016  * @param msp MSP value immediately after the exception occurred
1017  * @param psp PSP value immediately after the exception occurred
1018  * @param exc_return EXC_RETURN value present in LR after exception entry.
1019  * @param callee_regs Callee-saved registers (R4-R11, PSP)
1020  *
1021  */
1022 void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return, _callee_saved_t *callee_regs)
1023 {
1024 	uint32_t reason = K_ERR_CPU_EXCEPTION;
1025 	int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
1026 	bool recoverable, nested_exc;
1027 	struct arch_esf *esf;
1028 
1029 	/* Create a stack-ed copy of the ESF to be used during
1030 	 * the fault handling process.
1031 	 */
1032 	struct arch_esf esf_copy;
1033 
1034 	/* Force unlock interrupts */
1035 	arch_irq_unlock(0);
1036 
1037 	/* Retrieve the Exception Stack Frame (ESF) to be supplied
1038 	 * as argument to the remainder of the fault handling process.
1039 	 */
1040 	esf = get_esf(msp, psp, exc_return, &nested_exc);
1041 	__ASSERT(esf != NULL, "ESF could not be retrieved successfully. Shall never occur.");
1042 
1043 	z_arm_set_fault_sp(esf, exc_return);
1044 
1045 	reason = fault_handle(esf, fault, &recoverable);
1046 	if (recoverable) {
1047 		return;
1048 	}
1049 
1050 	/* Copy ESF */
1051 #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
1052 	memcpy(&esf_copy, esf, sizeof(struct arch_esf));
1053 	ARG_UNUSED(callee_regs);
1054 #else
1055 	/* the extra exception info is not present in the original esf
1056 	 * so we only copy the fields before those.
1057 	 */
1058 	memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
1059 	esf_copy.extra_info = (struct __extra_esf_info){
1060 		.callee = callee_regs, .exc_return = exc_return, .msp = msp};
1061 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
1062 
1063 	/* Overwrite stacked IPSR to mark a nested exception,
1064 	 * or a return to Thread mode. Note that this may be
1065 	 * required, if the retrieved ESF contents are invalid
1066 	 * due to, for instance, a stacking error.
1067 	 */
1068 	if (nested_exc) {
1069 		if ((esf_copy.basic.xpsr & IPSR_ISR_Msk) == 0) {
1070 			esf_copy.basic.xpsr |= IPSR_ISR_Msk;
1071 		}
1072 	} else {
1073 		esf_copy.basic.xpsr &= ~(IPSR_ISR_Msk);
1074 	}
1075 
1076 	if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
1077 		reason = K_ERR_CPU_EXCEPTION;
1078 	}
1079 
1080 	z_arm_fatal_error(reason, &esf_copy);
1081 }
1082 
1083 /**
1084  *
1085  * @brief Initialization of fault handling
1086  *
1087  * Turns on the desired hardware faults.
1088  *
1089  */
1090 void z_arm_fault_init(void)
1091 {
1092 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
1093 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
1094 	SCB->CCR |= SCB_CCR_DIV_0_TRP_Msk;
1095 #else
1096 #error Unknown ARM architecture
1097 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
1098 #if defined(CONFIG_BUILTIN_STACK_GUARD)
1099 	/* If Stack guarding via SP limit checking is enabled, disable
1100 	 * SP limit checking inside HardFault and NMI. This is done
1101 	 * in order to allow for the desired fault logging to execute
1102 	 * properly in all cases.
1103 	 *
1104 	 * Note that this could allow a Secure Firmware Main Stack
1105 	 * to descend into non-secure region during HardFault and
1106 	 * NMI exception entry. To prevent from this, non-secure
1107 	 * memory regions must be located higher than secure memory
1108 	 * regions.
1109 	 *
1110 	 * For Non-Secure Firmware this could allow the Non-Secure Main
1111 	 * Stack to attempt to descend into secure region, in which case a
1112 	 * Secure Hard Fault will occur and we can track the fault from there.
1113 	 */
1114 	SCB->CCR |= SCB_CCR_STKOFHFNMIGN_Msk;
1115 #endif /* CONFIG_BUILTIN_STACK_GUARD */
1116 #ifdef CONFIG_TRAP_UNALIGNED_ACCESS
1117 	SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
1118 #else
1119 	SCB->CCR &= ~SCB_CCR_UNALIGN_TRP_Msk;
1120 #endif /* CONFIG_TRAP_UNALIGNED_ACCESS */
1121 }
1122