1 /*
2  * Copyright (c) 2014 Wind River Systems, Inc.
3  * Copyright (c) 2020 Nordic Semiconductor ASA.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file
10  * @brief Common fault handler for ARM Cortex-M
11  *
12  * Common fault handler for ARM Cortex-M processors.
13  */
14 
15 #include <zephyr/kernel.h>
16 #include <kernel_internal.h>
17 #include <inttypes.h>
18 #include <zephyr/arch/common/exc_handle.h>
19 #include <zephyr/linker/linker-defs.h>
20 #include <zephyr/logging/log.h>
21 #include <zephyr/sys/barrier.h>
22 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
23 
24 #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
25 #define PR_EXC(...) LOG_ERR(__VA_ARGS__)
26 #define STORE_xFAR(reg_var, reg) uint32_t reg_var = (uint32_t)reg
27 #else
28 #define PR_EXC(...)
29 #define STORE_xFAR(reg_var, reg)
30 #endif /* CONFIG_PRINTK || CONFIG_LOG */
31 
32 #if (CONFIG_FAULT_DUMP == 2)
33 #define PR_FAULT_INFO(...) PR_EXC(__VA_ARGS__)
34 #else
35 #define PR_FAULT_INFO(...)
36 #endif
37 
38 #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU)
39 #define EMN(edr)   (((edr) & SYSMPU_EDR_EMN_MASK) >> SYSMPU_EDR_EMN_SHIFT)
40 #define EACD(edr)  (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
41 #endif
42 
43 /* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
44  * It is used to perform an exception return and to detect possible state
45  * transition upon exception.
46  */
47 
48 /* Prefix. Indicates that this is an EXC_RETURN value.
49  * This field reads as 0b11111111.
50  */
51 #define EXC_RETURN_INDICATOR_PREFIX     (0xFF << 24)
52 /* bit[0]: Exception Secure. The security domain the exception was taken to. */
53 #define EXC_RETURN_EXCEPTION_SECURE_Pos 0
54 #define EXC_RETURN_EXCEPTION_SECURE_Msk \
55 		BIT(EXC_RETURN_EXCEPTION_SECURE_Pos)
56 #define EXC_RETURN_EXCEPTION_SECURE_Non_Secure 0
57 #define EXC_RETURN_EXCEPTION_SECURE_Secure EXC_RETURN_EXCEPTION_SECURE_Msk
58 /* bit[2]: Stack Pointer selection. */
59 #define EXC_RETURN_SPSEL_Pos 2
60 #define EXC_RETURN_SPSEL_Msk BIT(EXC_RETURN_SPSEL_Pos)
61 #define EXC_RETURN_SPSEL_MAIN 0
62 #define EXC_RETURN_SPSEL_PROCESS EXC_RETURN_SPSEL_Msk
63 /* bit[3]: Mode. Indicates the Mode that was stacked from. */
64 #define EXC_RETURN_MODE_Pos 3
65 #define EXC_RETURN_MODE_Msk BIT(EXC_RETURN_MODE_Pos)
66 #define EXC_RETURN_MODE_HANDLER 0
67 #define EXC_RETURN_MODE_THREAD EXC_RETURN_MODE_Msk
68 /* bit[4]: Stack frame type. Indicates whether the stack frame is a standard
69  * integer only stack frame or an extended floating-point stack frame.
70  */
71 #define EXC_RETURN_STACK_FRAME_TYPE_Pos 4
72 #define EXC_RETURN_STACK_FRAME_TYPE_Msk BIT(EXC_RETURN_STACK_FRAME_TYPE_Pos)
73 #define EXC_RETURN_STACK_FRAME_TYPE_EXTENDED 0
74 #define EXC_RETURN_STACK_FRAME_TYPE_STANDARD EXC_RETURN_STACK_FRAME_TYPE_Msk
75 /* bit[5]: Default callee register stacking. Indicates whether the default
76  * stacking rules apply, or whether the callee registers are already on the
77  * stack.
78  */
79 #define EXC_RETURN_CALLEE_STACK_Pos 5
80 #define EXC_RETURN_CALLEE_STACK_Msk BIT(EXC_RETURN_CALLEE_STACK_Pos)
81 #define EXC_RETURN_CALLEE_STACK_SKIPPED 0
82 #define EXC_RETURN_CALLEE_STACK_DEFAULT EXC_RETURN_CALLEE_STACK_Msk
83 /* bit[6]: Secure or Non-secure stack. Indicates whether a Secure or
84  * Non-secure stack is used to restore stack frame on exception return.
85  */
86 #define EXC_RETURN_RETURN_STACK_Pos 6
87 #define EXC_RETURN_RETURN_STACK_Msk BIT(EXC_RETURN_RETURN_STACK_Pos)
88 #define EXC_RETURN_RETURN_STACK_Non_Secure 0
89 #define EXC_RETURN_RETURN_STACK_Secure EXC_RETURN_RETURN_STACK_Msk
90 
91 /* Integrity signature for an ARMv8-M implementation */
92 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
93 #define INTEGRITY_SIGNATURE_STD 0xFEFA125BUL
94 #define INTEGRITY_SIGNATURE_EXT 0xFEFA125AUL
95 #else
96 #define INTEGRITY_SIGNATURE 0xFEFA125BUL
97 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
98 /* Size (in words) of the additional state context that is pushed
99  * to the Secure stack during a Non-Secure exception entry.
100  */
101 #define ADDITIONAL_STATE_CONTEXT_WORDS 10
102 
103 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
104 /* helpers to access memory/bus/usage faults */
105 #define SCB_CFSR_MEMFAULTSR \
106 	(uint32_t)((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) \
107 		   >> SCB_CFSR_MEMFAULTSR_Pos)
108 #define SCB_CFSR_BUSFAULTSR \
109 	(uint32_t)((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) \
110 		   >> SCB_CFSR_BUSFAULTSR_Pos)
111 #define SCB_CFSR_USGFAULTSR \
112 	(uint32_t)((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) \
113 		   >> SCB_CFSR_USGFAULTSR_Pos)
114 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
115 
116 /**
117  *
118  * Dump information regarding fault (FAULT_DUMP == 1)
119  *
120  * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
121  * (short form).
122  *
123  * eg. (precise bus error escalated to hard fault):
124  *
125  * Fault! EXC #3
126  * HARD FAULT: Escalation (see below)!
127  * MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
128  * BFAR: 0xff001234
129  *
130  *
131  *
132  * Dump information regarding fault (FAULT_DUMP == 2)
133  *
134  * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
135  * (long form), and return the error code for the kernel to identify the fatal
136  * error reason.
137  *
138  * eg. (precise bus error escalated to hard fault):
139  *
140  * ***** HARD FAULT *****
141  *    Fault escalation (see below)
142  * ***** BUS FAULT *****
143  *   Precise data bus error
144  *   Address: 0xff001234
145  *
146  */
147 
148 #if (CONFIG_FAULT_DUMP == 1)
fault_show(const z_arch_esf_t * esf,int fault)149 static void fault_show(const z_arch_esf_t *esf, int fault)
150 {
151 	PR_EXC("Fault! EXC #%d", fault);
152 
153 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
154 	PR_EXC("MMFSR: 0x%x, BFSR: 0x%x, UFSR: 0x%x", SCB_CFSR_MEMFAULTSR,
155 	       SCB_CFSR_BUSFAULTSR, SCB_CFSR_USGFAULTSR);
156 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
157 	PR_EXC("SFSR: 0x%x", SAU->SFSR);
158 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
159 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
160 }
161 #else
162 /* For Dump level 2, detailed information is generated by the
163  * fault handling functions for individual fault conditions, so this
164  * function is left empty.
165  *
166  * For Dump level 0, no information needs to be generated.
167  */
fault_show(const z_arch_esf_t * esf,int fault)168 static void fault_show(const z_arch_esf_t *esf, int fault)
169 {
170 	(void)esf;
171 	(void)fault;
172 }
173 #endif /* FAULT_DUMP == 1 */
174 
175 #ifdef CONFIG_USERSPACE
176 Z_EXC_DECLARE(z_arm_user_string_nlen);
177 
178 static const struct z_exc_handle exceptions[] = {
179 	Z_EXC_HANDLE(z_arm_user_string_nlen)
180 };
181 #endif
182 
183 /* Perform an assessment whether an MPU fault shall be
184  * treated as recoverable.
185  *
186  * @return true if error is recoverable, otherwise return false.
187  */
memory_fault_recoverable(z_arch_esf_t * esf,bool synchronous)188 static bool memory_fault_recoverable(z_arch_esf_t *esf, bool synchronous)
189 {
190 #ifdef CONFIG_USERSPACE
191 	for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
192 		/* Mask out instruction mode */
193 		uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
194 		uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
195 
196 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
197 	/* Non-synchronous exceptions (e.g. DebugMonitor) may have
198 	 * allowed PC to continue to the next instruction.
199 	 */
200 	end += (synchronous) ? 0x0 : 0x4;
201 #else
202 	ARG_UNUSED(synchronous);
203 #endif
204 		if (esf->basic.pc >= start && esf->basic.pc < end) {
205 			esf->basic.pc = (uint32_t)(exceptions[i].fixup);
206 			return true;
207 		}
208 	}
209 #endif
210 
211 	return false;
212 }
213 
214 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
215 /* HardFault is used for all fault conditions on ARMv6-M. */
216 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
217 
218 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
219 uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
220 	const uint32_t psp);
221 #endif /* CONFIG_MPU_STACK_GUARD || defined(CONFIG_USERSPACE) */
222 
223 /**
224  *
225  * @brief Dump MemManage fault information
226  *
227  * See z_arm_fault_dump() for example.
228  *
229  * @return error code to identify the fatal error reason
230  */
mem_manage_fault(z_arch_esf_t * esf,int from_hard_fault,bool * recoverable)231 static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
232 			      bool *recoverable)
233 {
234 	uint32_t reason = K_ERR_ARM_MEM_GENERIC;
235 	uint32_t mmfar = -EINVAL;
236 
237 	PR_FAULT_INFO("***** MPU FAULT *****");
238 
239 	if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) != 0) {
240 		reason = K_ERR_ARM_MEM_STACKING;
241 		PR_FAULT_INFO("  Stacking error (context area might be"
242 			" not valid)");
243 	}
244 	if ((SCB->CFSR & SCB_CFSR_MUNSTKERR_Msk) != 0) {
245 		reason = K_ERR_ARM_MEM_UNSTACKING;
246 		PR_FAULT_INFO("  Unstacking error");
247 	}
248 	if ((SCB->CFSR & SCB_CFSR_DACCVIOL_Msk) != 0) {
249 		reason = K_ERR_ARM_MEM_DATA_ACCESS;
250 		PR_FAULT_INFO("  Data Access Violation");
251 		/* In a fault handler, to determine the true faulting address:
252 		 * 1. Read and save the MMFAR value.
253 		 * 2. Read the MMARVALID bit in the MMFSR.
254 		 * The MMFAR address is valid only if this bit is 1.
255 		 *
256 		 * Software must follow this sequence because another higher
257 		 * priority exception might change the MMFAR value.
258 		 */
259 		uint32_t temp = SCB->MMFAR;
260 
261 		if ((SCB->CFSR & SCB_CFSR_MMARVALID_Msk) != 0) {
262 			mmfar = temp;
263 			PR_EXC("  MMFAR Address: 0x%x", mmfar);
264 			if (from_hard_fault != 0) {
265 				/* clear SCB_MMAR[VALID] to reset */
266 				SCB->CFSR &= ~SCB_CFSR_MMARVALID_Msk;
267 			}
268 		}
269 	}
270 	if ((SCB->CFSR & SCB_CFSR_IACCVIOL_Msk) != 0) {
271 		reason = K_ERR_ARM_MEM_INSTRUCTION_ACCESS;
272 		PR_FAULT_INFO("  Instruction Access Violation");
273 	}
274 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
275 	if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
276 		reason = K_ERR_ARM_MEM_FP_LAZY_STATE_PRESERVATION;
277 		PR_FAULT_INFO(
278 			"  Floating-point lazy state preservation error");
279 	}
280 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
281 
282 	/* When stack protection is enabled, we need to assess
283 	 * if the memory violation error is a stack corruption.
284 	 *
285 	 * By design, being a Stacking MemManage fault is a necessary
286 	 * and sufficient condition for a thread stack corruption.
287 	 * [Cortex-M process stack pointer is always descending and
288 	 * is never modified by code (except for the context-switch
289 	 * routine), therefore, a stacking error implies the PSP has
290 	 * crossed into an area beyond the thread stack.]
291 	 *
292 	 * Data Access Violation errors may or may not be caused by
293 	 * thread stack overflows.
294 	 */
295 	if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) ||
296 		(SCB->CFSR & SCB_CFSR_DACCVIOL_Msk)) {
297 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
298 		/* MemManage Faults are always banked between security
299 		 * states. Therefore, we can safely assume the fault
300 		 * originated from the same security state.
301 		 *
302 		 * As we only assess thread stack corruption, we only
303 		 * process the error further if the stack frame is on
304 		 * PSP. For always-banked MemManage Fault, this is
305 		 * equivalent to inspecting the RETTOBASE flag.
306 		 *
307 		 * Note:
308 		 * It is possible that MMFAR address is not written by the
309 		 * Cortex-M core; this occurs when the stacking error is
310 		 * not accompanied by a data access violation error (i.e.
311 		 * when stack overflows due to the exception entry frame
312 		 * stacking): z_check_thread_stack_fail() shall be able to
313 		 * handle the case of 'mmfar' holding the -EINVAL value.
314 		 */
315 		if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
316 			uint32_t min_stack_ptr = z_check_thread_stack_fail(mmfar,
317 				((uint32_t) &esf[0]));
318 
319 			if (min_stack_ptr) {
320 				/* When MemManage Stacking Error has occurred,
321 				 * the stack context frame might be corrupted
322 				 * but the stack pointer may have actually
323 				 * descent below the allowed (thread) stack
324 				 * area. We may face a problem with un-stacking
325 				 * the frame, upon the exception return, if we
326 				 * do not have sufficient access permissions to
327 				 * read the corrupted stack frame. Therefore,
328 				 * we manually force the stack pointer to the
329 				 * lowest allowed position, inside the thread's
330 				 * stack.
331 				 *
332 				 * Note:
333 				 * The PSP will normally be adjusted in a tail-
334 				 * chained exception performing context switch,
335 				 * after aborting the corrupted thread. The
336 				 * adjustment, here, is required as tail-chain
337 				 * cannot always be guaranteed.
338 				 *
339 				 * The manual adjustment of PSP is safe, as we
340 				 * will not be re-scheduling this thread again
341 				 * for execution; thread stack corruption is a
342 				 * fatal error and a thread that corrupted its
343 				 * stack needs to be aborted.
344 				 */
345 				__set_PSP(min_stack_ptr);
346 
347 				reason = K_ERR_STACK_CHK_FAIL;
348 			} else {
349 				__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
350 					"Stacking error not a stack fail\n");
351 			}
352 		}
353 #else
354 	(void)mmfar;
355 	__ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
356 		"Stacking or Data Access Violation error "
357 		"without stack guard, user-mode or null-pointer detection\n");
358 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
359 	}
360 
361 	/* When we were handling this fault, we may have triggered a fp
362 	 * lazy stacking Memory Manage fault. At the time of writing, this
363 	 * can happen when printing.  If that's true, we should clear the
364 	 * pending flag in addition to the clearing the reason for the fault
365 	 */
366 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
367 	if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
368 		SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTPENDED_Msk;
369 	}
370 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
371 
372 	/* clear MMFSR sticky bits */
373 	SCB->CFSR |= SCB_CFSR_MEMFAULTSR_Msk;
374 
375 	/* Assess whether system shall ignore/recover from this MPU fault. */
376 	*recoverable = memory_fault_recoverable(esf, true);
377 
378 	return reason;
379 }
380 
381 /**
382  *
383  * @brief Dump BusFault information
384  *
385  * See z_arm_fault_dump() for example.
386  *
387  * @return error code to identify the fatal error reason.
388  *
389  */
bus_fault(z_arch_esf_t * esf,int from_hard_fault,bool * recoverable)390 static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
391 {
392 	uint32_t reason = K_ERR_ARM_BUS_GENERIC;
393 
394 	PR_FAULT_INFO("***** BUS FAULT *****");
395 
396 	if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
397 		reason = K_ERR_ARM_BUS_STACKING;
398 		PR_FAULT_INFO("  Stacking error");
399 	}
400 	if (SCB->CFSR & SCB_CFSR_UNSTKERR_Msk) {
401 		reason = K_ERR_ARM_BUS_UNSTACKING;
402 		PR_FAULT_INFO("  Unstacking error");
403 	}
404 	if (SCB->CFSR & SCB_CFSR_PRECISERR_Msk) {
405 		reason = K_ERR_ARM_BUS_PRECISE_DATA_BUS;
406 		PR_FAULT_INFO("  Precise data bus error");
407 		/* In a fault handler, to determine the true faulting address:
408 		 * 1. Read and save the BFAR value.
409 		 * 2. Read the BFARVALID bit in the BFSR.
410 		 * The BFAR address is valid only if this bit is 1.
411 		 *
412 		 * Software must follow this sequence because another
413 		 * higher priority exception might change the BFAR value.
414 		 */
415 		STORE_xFAR(bfar, SCB->BFAR);
416 
417 		if ((SCB->CFSR & SCB_CFSR_BFARVALID_Msk) != 0) {
418 			PR_EXC("  BFAR Address: 0x%x", bfar);
419 			if (from_hard_fault != 0) {
420 				/* clear SCB_CFSR_BFAR[VALID] to reset */
421 				SCB->CFSR &= ~SCB_CFSR_BFARVALID_Msk;
422 			}
423 		}
424 	}
425 	if (SCB->CFSR & SCB_CFSR_IMPRECISERR_Msk) {
426 		reason = K_ERR_ARM_BUS_IMPRECISE_DATA_BUS;
427 		PR_FAULT_INFO("  Imprecise data bus error");
428 	}
429 	if ((SCB->CFSR & SCB_CFSR_IBUSERR_Msk) != 0) {
430 		reason = K_ERR_ARM_BUS_INSTRUCTION_BUS;
431 		PR_FAULT_INFO("  Instruction bus error");
432 #if !defined(CONFIG_ARMV7_M_ARMV8_M_FP)
433 	}
434 #else
435 	} else if (SCB->CFSR & SCB_CFSR_LSPERR_Msk) {
436 		reason = K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION;
437 		PR_FAULT_INFO("  Floating-point lazy state preservation error");
438 	} else {
439 		;
440 	}
441 #endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */
442 
443 #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU)
444 	uint32_t sperr = SYSMPU->CESR & SYSMPU_CESR_SPERR_MASK;
445 	uint32_t mask = BIT(31);
446 	int i;
447 	uint32_t ear = -EINVAL;
448 
449 	if (sperr) {
450 		for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) {
451 			if ((sperr & mask) == 0U) {
452 				continue;
453 			}
454 			STORE_xFAR(edr, SYSMPU->SP[i].EDR);
455 			ear = SYSMPU->SP[i].EAR;
456 
457 			PR_FAULT_INFO("  NXP MPU error, port %d", i);
458 			PR_FAULT_INFO("    Mode: %s, %s Address: 0x%x",
459 			       edr & BIT(2) ? "Supervisor" : "User",
460 			       edr & BIT(1) ? "Data" : "Instruction",
461 			       ear);
462 			PR_FAULT_INFO(
463 					"    Type: %s, Master: %d, Regions: 0x%x",
464 			       edr & BIT(0) ? "Write" : "Read",
465 			       EMN(edr), EACD(edr));
466 
467 			/* When stack protection is enabled, we need to assess
468 			 * if the memory violation error is a stack corruption.
469 			 *
470 			 * By design, being a Stacking Bus fault is a necessary
471 			 * and sufficient condition for a stack corruption.
472 			 */
473 			if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
474 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
475 				/* Note: we can assume the fault originated
476 				 * from the same security state for ARM
477 				 * platforms implementing the NXP MPU
478 				 * (CONFIG_CPU_HAS_NXP_MPU=y).
479 				 *
480 				 * As we only assess thread stack corruption,
481 				 * we only process the error further, if the
482 				 * stack frame is on PSP. For NXP MPU-related
483 				 * Bus Faults (banked), this is equivalent to
484 				 * inspecting the RETTOBASE flag.
485 				 */
486 				if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
487 					uint32_t min_stack_ptr =
488 						z_check_thread_stack_fail(ear,
489 							((uint32_t) &esf[0]));
490 
491 					if (min_stack_ptr) {
492 						/* When BusFault Stacking Error
493 						 * has occurred, the stack
494 						 * context frame might be
495 						 * corrupted but the stack
496 						 * pointer may have actually
497 						 * moved. We may face problems
498 						 * with un-stacking the frame,
499 						 * upon exception return, if we
500 						 * do not have sufficient
501 						 * permissions to read the
502 						 * corrupted stack frame.
503 						 * Therefore, we manually force
504 						 * the stack pointer to the
505 						 * lowest allowed position.
506 						 *
507 						 * Note:
508 						 * The PSP will normally be
509 						 * adjusted in a tail-chained
510 						 * exception performing context
511 						 * switch, after aborting the
512 						 * corrupted thread. Here, the
513 						 * adjustment is required as
514 						 * tail-chain cannot always be
515 						 * guaranteed.
516 						 */
517 						__set_PSP(min_stack_ptr);
518 
519 						reason =
520 							K_ERR_STACK_CHK_FAIL;
521 						break;
522 					}
523 				}
524 #else
525 				(void)ear;
526 				__ASSERT(0,
527 					"Stacking error without stack guard"
528 					"or User-mode support");
529 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
530 			}
531 		}
532 		SYSMPU->CESR &= ~sperr;
533 	}
534 #endif /* defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU) */
535 
536 	/* clear BFSR sticky bits */
537 	SCB->CFSR |= SCB_CFSR_BUSFAULTSR_Msk;
538 
539 	*recoverable = memory_fault_recoverable(esf, true);
540 
541 	return reason;
542 }
543 
544 /**
545  *
546  * @brief Dump UsageFault information
547  *
548  * See z_arm_fault_dump() for example.
549  *
550  * @return error code to identify the fatal error reason
551  */
usage_fault(const z_arch_esf_t * esf)552 static uint32_t usage_fault(const z_arch_esf_t *esf)
553 {
554 	uint32_t reason = K_ERR_ARM_USAGE_GENERIC;
555 
556 	PR_FAULT_INFO("***** USAGE FAULT *****");
557 
558 	/* bits are sticky: they stack and must be reset */
559 	if ((SCB->CFSR & SCB_CFSR_DIVBYZERO_Msk) != 0) {
560 		reason = K_ERR_ARM_USAGE_DIV_0;
561 		PR_FAULT_INFO("  Division by zero");
562 	}
563 	if ((SCB->CFSR & SCB_CFSR_UNALIGNED_Msk) != 0) {
564 		reason = K_ERR_ARM_USAGE_UNALIGNED_ACCESS;
565 		PR_FAULT_INFO("  Unaligned memory access");
566 	}
567 #if defined(CONFIG_ARMV8_M_MAINLINE)
568 	if ((SCB->CFSR & SCB_CFSR_STKOF_Msk) != 0) {
569 		reason = K_ERR_ARM_USAGE_STACK_OVERFLOW;
570 		PR_FAULT_INFO("  Stack overflow (context area not valid)");
571 #if defined(CONFIG_BUILTIN_STACK_GUARD)
572 		/* Stack Overflows are always reported as stack corruption
573 		 * errors. Note that the built-in stack overflow mechanism
574 		 * prevents the context area to be loaded on the stack upon
575 		 * UsageFault exception entry. As a result, we cannot rely
576 		 * on the reported faulty instruction address, to determine
577 		 * the instruction that triggered the stack overflow.
578 		 */
579 		reason = K_ERR_STACK_CHK_FAIL;
580 #endif /* CONFIG_BUILTIN_STACK_GUARD */
581 	}
582 #endif /* CONFIG_ARMV8_M_MAINLINE */
583 	if ((SCB->CFSR & SCB_CFSR_NOCP_Msk) != 0) {
584 		reason = K_ERR_ARM_USAGE_NO_COPROCESSOR;
585 		PR_FAULT_INFO("  No coprocessor instructions");
586 	}
587 	if ((SCB->CFSR & SCB_CFSR_INVPC_Msk) != 0) {
588 		reason = K_ERR_ARM_USAGE_ILLEGAL_EXC_RETURN;
589 		PR_FAULT_INFO("  Illegal load of EXC_RETURN into PC");
590 	}
591 	if ((SCB->CFSR & SCB_CFSR_INVSTATE_Msk) != 0) {
592 		reason = K_ERR_ARM_USAGE_ILLEGAL_EPSR;
593 		PR_FAULT_INFO("  Illegal use of the EPSR");
594 	}
595 	if ((SCB->CFSR & SCB_CFSR_UNDEFINSTR_Msk) != 0) {
596 		reason = K_ERR_ARM_USAGE_UNDEFINED_INSTRUCTION;
597 		PR_FAULT_INFO("  Attempt to execute undefined instruction");
598 	}
599 
600 	/* clear UFSR sticky bits */
601 	SCB->CFSR |= SCB_CFSR_USGFAULTSR_Msk;
602 
603 	return reason;
604 }
605 
606 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
607 /**
608  *
609  * @brief Dump SecureFault information
610  *
611  * See z_arm_fault_dump() for example.
612  *
613  * @return error code to identify the fatal error reason
614  */
secure_fault(const z_arch_esf_t * esf)615 static uint32_t secure_fault(const z_arch_esf_t *esf)
616 {
617 	uint32_t reason = K_ERR_ARM_SECURE_GENERIC;
618 
619 	PR_FAULT_INFO("***** SECURE FAULT *****");
620 
621 	STORE_xFAR(sfar, SAU->SFAR);
622 	if ((SAU->SFSR & SAU_SFSR_SFARVALID_Msk) != 0) {
623 		PR_EXC("  Address: 0x%x", sfar);
624 	}
625 
626 	/* bits are sticky: they stack and must be reset */
627 	if ((SAU->SFSR & SAU_SFSR_INVEP_Msk) != 0) {
628 		reason = K_ERR_ARM_SECURE_ENTRY_POINT;
629 		PR_FAULT_INFO("  Invalid entry point");
630 	} else if ((SAU->SFSR & SAU_SFSR_INVIS_Msk) != 0) {
631 		reason = K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE;
632 		PR_FAULT_INFO("  Invalid integrity signature");
633 	} else if ((SAU->SFSR & SAU_SFSR_INVER_Msk) != 0) {
634 		reason = K_ERR_ARM_SECURE_EXCEPTION_RETURN;
635 		PR_FAULT_INFO("  Invalid exception return");
636 	} else if ((SAU->SFSR & SAU_SFSR_AUVIOL_Msk) != 0) {
637 		reason = K_ERR_ARM_SECURE_ATTRIBUTION_UNIT;
638 		PR_FAULT_INFO("  Attribution unit violation");
639 	} else if ((SAU->SFSR & SAU_SFSR_INVTRAN_Msk) != 0) {
640 		reason = K_ERR_ARM_SECURE_TRANSITION;
641 		PR_FAULT_INFO("  Invalid transition");
642 	} else if ((SAU->SFSR & SAU_SFSR_LSPERR_Msk) != 0) {
643 		reason = K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION;
644 		PR_FAULT_INFO("  Lazy state preservation");
645 	} else if ((SAU->SFSR & SAU_SFSR_LSERR_Msk) != 0) {
646 		reason = K_ERR_ARM_SECURE_LAZY_STATE_ERROR;
647 		PR_FAULT_INFO("  Lazy state error");
648 	}
649 
650 	/* clear SFSR sticky bits */
651 	SAU->SFSR |= 0xFF;
652 
653 	return reason;
654 }
655 #endif /* defined(CONFIG_ARM_SECURE_FIRMWARE) */
656 
657 /**
658  *
659  * @brief Dump debug monitor exception information
660  *
661  * See z_arm_fault_dump() for example.
662  *
663  */
debug_monitor(z_arch_esf_t * esf,bool * recoverable)664 static void debug_monitor(z_arch_esf_t *esf, bool *recoverable)
665 {
666 	*recoverable = false;
667 
668 	PR_FAULT_INFO(
669 		"***** Debug monitor exception *****");
670 
671 #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
672 	if (!z_arm_debug_monitor_event_error_check()) {
673 		/* By default, all debug monitor exceptions that are not
674 		 * treated as errors by z_arm_debug_event_error_check(),
675 		 * they are considered as recoverable errors.
676 		 */
677 		*recoverable = true;
678 	} else {
679 
680 		*recoverable = memory_fault_recoverable(esf, false);
681 	}
682 
683 #endif
684 }
685 
686 #else
687 #error Unknown ARM architecture
688 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
689 
z_arm_is_synchronous_svc(z_arch_esf_t * esf)690 static inline bool z_arm_is_synchronous_svc(z_arch_esf_t *esf)
691 {
692 	uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
693 	/* SVC is a 16-bit instruction. On a synchronous SVC
694 	 * escalated to Hard Fault, the return address is the
695 	 * next instruction, i.e. after the SVC.
696 	 */
697 #define _SVC_OPCODE 0xDF00
698 
699 	/* We are about to de-reference the program counter at the
700 	 * time of fault to determine if it was a SVC
701 	 * instruction. However, we don't know if the pc itself is
702 	 * valid -- we could have faulted due to trying to execute a
703 	 * corrupted function pointer.
704 	 *
705 	 * We will temporarily ignore BusFault's so a bad program
706 	 * counter does not trigger ARM lockup condition.
707 	 */
708 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && !defined(CONFIG_ARMV8_M_BASELINE)
709 	/* Note: ARMv6-M does not support CCR.BFHFNMIGN so this access
710 	 * could generate a fault if the pc was invalid.
711 	 */
712 	uint16_t fault_insn = *(ret_addr - 1);
713 #else
714 	SCB->CCR |= SCB_CCR_BFHFNMIGN_Msk;
715 	barrier_dsync_fence_full();
716 	barrier_isync_fence_full();
717 
718 	uint16_t fault_insn = *(ret_addr - 1);
719 
720 	SCB->CCR &= ~SCB_CCR_BFHFNMIGN_Msk;
721 	barrier_dsync_fence_full();
722 	barrier_isync_fence_full();
723 #endif /* ARMV6_M_ARMV8_M_BASELINE && !ARMV8_M_BASELINE */
724 
725 	if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
726 		((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
727 		return true;
728 	}
729 #undef _SVC_OPCODE
730 	return false;
731 }
732 
733 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
z_arm_is_pc_valid(uintptr_t pc)734 static inline bool z_arm_is_pc_valid(uintptr_t pc)
735 {
736 	/* Is it in valid text region */
737 	if ((((uintptr_t)&__text_region_start) <= pc) && (pc < ((uintptr_t)&__text_region_end))) {
738 		return true;
739 	}
740 
741 	/* Is it in valid ramfunc range */
742 	if ((((uintptr_t)&__ramfunc_start) <= pc) && (pc < ((uintptr_t)&__ramfunc_end))) {
743 		return true;
744 	}
745 
746 #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_itcm), okay)
747 	/* Is it in the ITCM */
748 	if ((((uintptr_t)&__itcm_start) <= pc) && (pc < ((uintptr_t)&__itcm_end))) {
749 		return true;
750 	}
751 #endif
752 
753 	return false;
754 }
755 #endif
756 
757 /**
758  *
759  * @brief Dump hard fault information
760  *
761  * See z_arm_fault_dump() for example.
762  *
763  * @return error code to identify the fatal error reason
764  */
hard_fault(z_arch_esf_t * esf,bool * recoverable)765 static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
766 {
767 	uint32_t reason = K_ERR_CPU_EXCEPTION;
768 
769 	PR_FAULT_INFO("***** HARD FAULT *****");
770 
771 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
772 	/* Workaround for #18712:
773 	 * HardFault may be due to escalation, as a result of
774 	 * an SVC instruction that could not be executed; this
775 	 * can occur if ARCH_EXCEPT() is called by an ISR,
776 	 * which executes at priority equal to the SVC handler
777 	 * priority. We handle the case of Kernel OOPS and Stack
778 	 * Fail here.
779 	 */
780 
781 	if (z_arm_is_pc_valid((uintptr_t)esf->basic.pc) && z_arm_is_synchronous_svc(esf)) {
782 		PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
783 		reason = esf->basic.r0;
784 	}
785 
786 	*recoverable = memory_fault_recoverable(esf, true);
787 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
788 	*recoverable = false;
789 
790 	if ((SCB->HFSR & SCB_HFSR_VECTTBL_Msk) != 0) {
791 		PR_EXC("  Bus fault on vector table read");
792 	} else if ((SCB->HFSR & SCB_HFSR_DEBUGEVT_Msk) != 0) {
793 		PR_EXC("  Debug event");
794 	} else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) {
795 		PR_EXC("  Fault escalation (see below)");
796 		if (z_arm_is_synchronous_svc(esf)) {
797 			PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
798 			reason = esf->basic.r0;
799 		} else if ((SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) != 0) {
800 			reason = mem_manage_fault(esf, 1, recoverable);
801 		} else if ((SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk) != 0) {
802 			reason = bus_fault(esf, 1, recoverable);
803 		} else if ((SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk) != 0) {
804 			reason = usage_fault(esf);
805 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
806 		} else if (SAU->SFSR != 0) {
807 			reason = secure_fault(esf);
808 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
809 		} else {
810 			__ASSERT(0,
811 			"Fault escalation without FSR info");
812 		}
813 	} else {
814 		__ASSERT(0,
815 		"HardFault without HFSR info"
816 		" Shall never occur");
817 	}
818 #else
819 #error Unknown ARM architecture
820 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
821 
822 	return reason;
823 }
824 
825 /**
826  *
827  * @brief Dump reserved exception information
828  *
829  * See z_arm_fault_dump() for example.
830  *
831  */
reserved_exception(const z_arch_esf_t * esf,int fault)832 static void reserved_exception(const z_arch_esf_t *esf, int fault)
833 {
834 	ARG_UNUSED(esf);
835 
836 	PR_FAULT_INFO("***** %s %d) *****",
837 	       fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
838 	       fault - 16);
839 }
840 
841 /* Handler function for ARM fault conditions. */
fault_handle(z_arch_esf_t * esf,int fault,bool * recoverable)842 static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
843 {
844 	uint32_t reason = K_ERR_CPU_EXCEPTION;
845 
846 	*recoverable = false;
847 
848 	switch (fault) {
849 	case 3:
850 		reason = hard_fault(esf, recoverable);
851 		break;
852 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
853 	/* HardFault is raised for all fault conditions on ARMv6-M. */
854 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
855 	case 4:
856 		reason = mem_manage_fault(esf, 0, recoverable);
857 		break;
858 	case 5:
859 		reason = bus_fault(esf, 0, recoverable);
860 		break;
861 	case 6:
862 		reason = usage_fault(esf);
863 		break;
864 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
865 	case 7:
866 		secure_fault(esf);
867 		break;
868 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
869 	case 12:
870 		debug_monitor(esf, recoverable);
871 		break;
872 #else
873 #error Unknown ARM architecture
874 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
875 	default:
876 		reserved_exception(esf, fault);
877 		break;
878 	}
879 
880 	if ((*recoverable) == false) {
881 		/* Dump generic information about the fault. */
882 		fault_show(esf, fault);
883 	}
884 
885 	return reason;
886 }
887 
888 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
889 #if (CONFIG_FAULT_DUMP == 2)
890 /**
891  * @brief Dump the Secure Stack information for an exception that
892  * has occurred in Non-Secure state.
893  *
894  * @param secure_esf Pointer to the secure stack frame.
895  */
secure_stack_dump(const z_arch_esf_t * secure_esf)896 static void secure_stack_dump(const z_arch_esf_t *secure_esf)
897 {
898 	/*
899 	 * In case a Non-Secure exception interrupted the Secure
900 	 * execution, the Secure state has stacked the additional
901 	 * state context and the top of the stack contains the
902 	 * integrity signature.
903 	 *
904 	 * In case of a Non-Secure function call the top of the
905 	 * stack contains the return address to Secure state.
906 	 */
907 	uint32_t *top_of_sec_stack = (uint32_t *)secure_esf;
908 	uint32_t sec_ret_addr;
909 #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
910 	if ((*top_of_sec_stack == INTEGRITY_SIGNATURE_STD) ||
911 		(*top_of_sec_stack == INTEGRITY_SIGNATURE_EXT)) {
912 #else
913 	if (*top_of_sec_stack == INTEGRITY_SIGNATURE) {
914 #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
915 		/* Secure state interrupted by a Non-Secure exception.
916 		 * The return address after the additional state
917 		 * context, stacked by the Secure code upon
918 		 * Non-Secure exception entry.
919 		 */
920 		top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
921 		secure_esf = (const z_arch_esf_t *)top_of_sec_stack;
922 		sec_ret_addr = secure_esf->basic.pc;
923 	} else {
924 		/* Exception during Non-Secure function call.
925 		 * The return address is located on top of stack.
926 		 */
927 		sec_ret_addr = *top_of_sec_stack;
928 	}
929 	PR_FAULT_INFO("  S instruction address:  0x%x", sec_ret_addr);
930 
931 }
932 #define SECURE_STACK_DUMP(esf) secure_stack_dump(esf)
933 #else
934 /* We do not dump the Secure stack information for lower dump levels. */
935 #define SECURE_STACK_DUMP(esf)
936 #endif /* CONFIG_FAULT_DUMP== 2 */
937 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
938 
939 /*
940  * This internal function does the following:
941  *
942  * - Retrieves the exception stack frame
943  * - Evaluates whether to report being in a nested exception
944  *
945  * If the ESF is not successfully retrieved, the function signals
946  * an error by returning NULL.
947  *
948  * @return ESF pointer on success, otherwise return NULL
949  */
950 static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
951 	bool *nested_exc)
952 {
953 	bool alternative_state_exc = false;
954 	z_arch_esf_t *ptr_esf = NULL;
955 
956 	*nested_exc = false;
957 
958 	if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) !=
959 			EXC_RETURN_INDICATOR_PREFIX) {
960 		/* Invalid EXC_RETURN value. This is a fatal error. */
961 		return NULL;
962 	}
963 
964 #if defined(CONFIG_ARM_SECURE_FIRMWARE)
965 	if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
966 		/* Secure Firmware shall only handle Secure Exceptions.
967 		 * This is a fatal error.
968 		 */
969 		return NULL;
970 	}
971 
972 	if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
973 		/* Exception entry occurred in Secure stack. */
974 	} else {
975 		/* Exception entry occurred in Non-Secure stack. Therefore,
976 		 * msp/psp point to the Secure stack, however, the actual
977 		 * exception stack frame is located in the Non-Secure stack.
978 		 */
979 		alternative_state_exc = true;
980 
981 		/* Dump the Secure stack before handling the actual fault. */
982 		z_arch_esf_t *secure_esf;
983 
984 		if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
985 			/* Secure stack pointed by PSP */
986 			secure_esf = (z_arch_esf_t *)psp;
987 		} else {
988 			/* Secure stack pointed by MSP */
989 			secure_esf = (z_arch_esf_t *)msp;
990 			*nested_exc = true;
991 		}
992 
993 		SECURE_STACK_DUMP(secure_esf);
994 
995 		/* Handle the actual fault.
996 		 * Extract the correct stack frame from the Non-Secure state
997 		 * and supply it to the fault handing function.
998 		 */
999 		if (exc_return & EXC_RETURN_MODE_THREAD) {
1000 			ptr_esf = (z_arch_esf_t *)__TZ_get_PSP_NS();
1001 		} else {
1002 			ptr_esf = (z_arch_esf_t *)__TZ_get_MSP_NS();
1003 		}
1004 	}
1005 #elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
1006 	if (exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) {
1007 		/* Non-Secure Firmware shall only handle Non-Secure Exceptions.
1008 		 * This is a fatal error.
1009 		 */
1010 		return NULL;
1011 	}
1012 
1013 	if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
1014 		/* Exception entry occurred in Secure stack.
1015 		 *
1016 		 * Note that Non-Secure firmware cannot inspect the Secure
1017 		 * stack to determine the root cause of the fault. Fault
1018 		 * inspection will indicate the Non-Secure instruction
1019 		 * that performed the branch to the Secure domain.
1020 		 */
1021 		alternative_state_exc = true;
1022 
1023 		PR_FAULT_INFO("Exception occurred in Secure State");
1024 
1025 		if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
1026 			/* Non-Secure stack frame on PSP */
1027 			ptr_esf = (z_arch_esf_t *)psp;
1028 		} else {
1029 			/* Non-Secure stack frame on MSP */
1030 			ptr_esf = (z_arch_esf_t *)msp;
1031 		}
1032 	} else {
1033 		/* Exception entry occurred in Non-Secure stack. */
1034 	}
1035 #else
1036 	/* The processor has a single execution state.
1037 	 * We verify that the Thread mode is using PSP.
1038 	 */
1039 	if ((exc_return & EXC_RETURN_MODE_THREAD) &&
1040 		(!(exc_return & EXC_RETURN_SPSEL_PROCESS))) {
1041 		PR_EXC("SPSEL in thread mode does not indicate PSP");
1042 		return NULL;
1043 	}
1044 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
1045 
1046 	if (!alternative_state_exc) {
1047 		if (exc_return & EXC_RETURN_MODE_THREAD) {
1048 			/* Returning to thread mode */
1049 			ptr_esf =  (z_arch_esf_t *)psp;
1050 
1051 		} else {
1052 			/* Returning to handler mode */
1053 			ptr_esf = (z_arch_esf_t *)msp;
1054 			*nested_exc = true;
1055 		}
1056 	}
1057 
1058 	return ptr_esf;
1059 }
1060 
1061 /**
1062  *
1063  * @brief ARM Fault handler
1064  *
1065  * This routine is called when fatal error conditions are detected by hardware
1066  * and is responsible for:
1067  * - resetting the processor fault status registers (for the case when the
1068  *   error handling policy allows the system to recover from the error),
1069  * - reporting the error information,
1070  * - determining the error reason to be provided as input to the user-
1071  *   provided routine, k_sys_fatal_error_handler().
1072  * The k_sys_fatal_error_handler() is invoked once the above operations are
1073  * completed, and is responsible for implementing the error handling policy.
1074  *
1075  * The function needs, first, to determine the exception stack frame.
1076  * Note that the current security state might not be the actual
1077  * state in which the processor was executing, when the exception occurred.
1078  * The actual state may need to be determined by inspecting the EXC_RETURN
1079  * value, which is provided as argument to the Fault handler.
1080  *
1081  * If the exception occurred in the same security state, the stack frame
1082  * will be pointed to by either MSP or PSP depending on the processor
1083  * execution state when the exception occurred. MSP and PSP values are
1084  * provided as arguments to the Fault handler.
1085  *
1086  * @param msp MSP value immediately after the exception occurred
1087  * @param psp PSP value immediately after the exception occurred
1088  * @param exc_return EXC_RETURN value present in LR after exception entry.
1089  * @param callee_regs Callee-saved registers (R4-R11, PSP)
1090  *
1091  */
1092 void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
1093 	_callee_saved_t *callee_regs)
1094 {
1095 	uint32_t reason = K_ERR_CPU_EXCEPTION;
1096 	int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
1097 	bool recoverable, nested_exc;
1098 	z_arch_esf_t *esf;
1099 
1100 	/* Create a stack-ed copy of the ESF to be used during
1101 	 * the fault handling process.
1102 	 */
1103 	z_arch_esf_t esf_copy;
1104 
1105 	/* Force unlock interrupts */
1106 	arch_irq_unlock(0);
1107 
1108 	/* Retrieve the Exception Stack Frame (ESF) to be supplied
1109 	 * as argument to the remainder of the fault handling process.
1110 	 */
1111 	 esf = get_esf(msp, psp, exc_return, &nested_exc);
1112 	__ASSERT(esf != NULL,
1113 		"ESF could not be retrieved successfully. Shall never occur.");
1114 
1115 #ifdef CONFIG_DEBUG_COREDUMP
1116 	z_arm_coredump_fault_sp = POINTER_TO_UINT(esf);
1117 #endif
1118 
1119 	reason = fault_handle(esf, fault, &recoverable);
1120 	if (recoverable) {
1121 		return;
1122 	}
1123 
1124 	/* Copy ESF */
1125 #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
1126 	memcpy(&esf_copy, esf, sizeof(z_arch_esf_t));
1127 	ARG_UNUSED(callee_regs);
1128 #else
1129 	/* the extra exception info is not present in the original esf
1130 	 * so we only copy the fields before those.
1131 	 */
1132 	memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info));
1133 	esf_copy.extra_info = (struct __extra_esf_info) {
1134 		.callee = callee_regs,
1135 		.exc_return = exc_return,
1136 		.msp = msp
1137 	};
1138 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
1139 
1140 	/* Overwrite stacked IPSR to mark a nested exception,
1141 	 * or a return to Thread mode. Note that this may be
1142 	 * required, if the retrieved ESF contents are invalid
1143 	 * due to, for instance, a stacking error.
1144 	 */
1145 	if (nested_exc) {
1146 		if ((esf_copy.basic.xpsr & IPSR_ISR_Msk) == 0) {
1147 			esf_copy.basic.xpsr |= IPSR_ISR_Msk;
1148 		}
1149 	} else {
1150 		esf_copy.basic.xpsr &= ~(IPSR_ISR_Msk);
1151 	}
1152 
1153 	if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
1154 		reason = K_ERR_CPU_EXCEPTION;
1155 	}
1156 
1157 	z_arm_fatal_error(reason, &esf_copy);
1158 }
1159 
1160 /**
1161  *
1162  * @brief Initialization of fault handling
1163  *
1164  * Turns on the desired hardware faults.
1165  *
1166  */
1167 void z_arm_fault_init(void)
1168 {
1169 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
1170 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
1171 	SCB->CCR |= SCB_CCR_DIV_0_TRP_Msk;
1172 #else
1173 #error Unknown ARM architecture
1174 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
1175 #if defined(CONFIG_BUILTIN_STACK_GUARD)
1176 	/* If Stack guarding via SP limit checking is enabled, disable
1177 	 * SP limit checking inside HardFault and NMI. This is done
1178 	 * in order to allow for the desired fault logging to execute
1179 	 * properly in all cases.
1180 	 *
1181 	 * Note that this could allow a Secure Firmware Main Stack
1182 	 * to descend into non-secure region during HardFault and
1183 	 * NMI exception entry. To prevent from this, non-secure
1184 	 * memory regions must be located higher than secure memory
1185 	 * regions.
1186 	 *
1187 	 * For Non-Secure Firmware this could allow the Non-Secure Main
1188 	 * Stack to attempt to descend into secure region, in which case a
1189 	 * Secure Hard Fault will occur and we can track the fault from there.
1190 	 */
1191 	SCB->CCR |= SCB_CCR_STKOFHFNMIGN_Msk;
1192 #endif /* CONFIG_BUILTIN_STACK_GUARD */
1193 #ifdef CONFIG_TRAP_UNALIGNED_ACCESS
1194 	SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
1195 #endif /* CONFIG_TRAP_UNALIGNED_ACCESS */
1196 }
1197