1 /*
2 * Copyright (c) 2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Kernel fatal error handler for ARM Cortex-M and Cortex-R
10 *
11 * This module provides the z_arm_fatal_error() routine for ARM Cortex-M
12 * and Cortex-R CPUs.
13 */
14
15 #include <zephyr/kernel.h>
16 #include <kernel_arch_data.h>
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
19
esf_dump(const z_arch_esf_t * esf)20 static void esf_dump(const z_arch_esf_t *esf)
21 {
22 LOG_ERR("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x",
23 esf->basic.a1, esf->basic.a2, esf->basic.a3);
24 LOG_ERR("r3/a4: 0x%08x r12/ip: 0x%08x r14/lr: 0x%08x",
25 esf->basic.a4, esf->basic.ip, esf->basic.lr);
26 LOG_ERR(" xpsr: 0x%08x", esf->basic.xpsr);
27 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
28 for (int i = 0; i < ARRAY_SIZE(esf->fpu.s); i += 4) {
29 LOG_ERR("s[%2d]: 0x%08x s[%2d]: 0x%08x"
30 " s[%2d]: 0x%08x s[%2d]: 0x%08x",
31 i, (uint32_t)esf->fpu.s[i],
32 i + 1, (uint32_t)esf->fpu.s[i + 1],
33 i + 2, (uint32_t)esf->fpu.s[i + 2],
34 i + 3, (uint32_t)esf->fpu.s[i + 3]);
35 }
36 LOG_ERR("fpscr: 0x%08x", esf->fpu.fpscr);
37 #endif
38 #if defined(CONFIG_EXTRA_EXCEPTION_INFO)
39 const struct _callee_saved *callee = esf->extra_info.callee;
40
41 if (callee != NULL) {
42 LOG_ERR("r4/v1: 0x%08x r5/v2: 0x%08x r6/v3: 0x%08x",
43 callee->v1, callee->v2, callee->v3);
44 LOG_ERR("r7/v4: 0x%08x r8/v5: 0x%08x r9/v6: 0x%08x",
45 callee->v4, callee->v5, callee->v6);
46 LOG_ERR("r10/v7: 0x%08x r11/v8: 0x%08x psp: 0x%08x",
47 callee->v7, callee->v8, callee->psp);
48 }
49
50 LOG_ERR("EXC_RETURN: 0x%0x", esf->extra_info.exc_return);
51
52 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
53 LOG_ERR("Faulting instruction address (r15/pc): 0x%08x",
54 esf->basic.pc);
55 }
56
z_arm_fatal_error(unsigned int reason,const z_arch_esf_t * esf)57 void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
58 {
59
60 if (esf != NULL) {
61 esf_dump(esf);
62 }
63 z_fatal_error(reason, esf);
64 }
65
66 /**
67 * @brief Handle a software-generated fatal exception
68 * (e.g. kernel oops, panic, etc.).
69 *
70 * Notes:
71 * - the function is invoked in SVC Handler
72 * - if triggered from nPRIV mode, only oops and stack fail error reasons
73 * may be propagated to the fault handling process.
74 * - We expect the supplied exception stack frame to always be a valid
75 * frame. That is because, if the ESF cannot be stacked during an SVC,
76 * a processor fault (e.g. stacking error) will be generated, and the
77 * fault handler will executed instead of the SVC.
78 *
79 * @param esf exception frame
80 * @param callee_regs Callee-saved registers (R4-R11)
81 */
z_do_kernel_oops(const z_arch_esf_t * esf,_callee_saved_t * callee_regs)82 void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs)
83 {
84 #if !(defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE))
85 ARG_UNUSED(callee_regs);
86 #endif
87 /* Stacked R0 holds the exception reason. */
88 unsigned int reason = esf->basic.r0;
89
90 #if defined(CONFIG_USERSPACE)
91 if (z_arm_preempted_thread_in_user_mode(esf)) {
92 /*
93 * Exception triggered from user mode.
94 *
95 * User mode is only allowed to induce oopses and stack check
96 * failures via software-triggered system fatal exceptions.
97 */
98 if (!((esf->basic.r0 == K_ERR_KERNEL_OOPS) ||
99 (esf->basic.r0 == K_ERR_STACK_CHK_FAIL))) {
100
101 reason = K_ERR_KERNEL_OOPS;
102 }
103 }
104
105 #endif /* CONFIG_USERSPACE */
106
107 #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
108 z_arm_fatal_error(reason, esf);
109 #else
110 z_arch_esf_t esf_copy;
111
112 memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info));
113 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
114 /* extra exception info is collected in callee_reg param
115 * on CONFIG_ARMV7_M_ARMV8_M_MAINLINE
116 */
117
118 esf_copy.extra_info = (struct __extra_esf_info) {
119 .callee = callee_regs,
120 };
121 #else
122 /* extra exception info is not collected for kernel oops
123 * path today so we make a copy of the ESF and zero out
124 * that information
125 */
126 esf_copy.extra_info = (struct __extra_esf_info) { 0 };
127 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
128
129 z_arm_fatal_error(reason, &esf_copy);
130 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
131 }
132
arch_syscall_oops(void * ssf_ptr)133 FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
134 {
135 uint32_t *ssf_contents = ssf_ptr;
136 z_arch_esf_t oops_esf = { 0 };
137
138 /* TODO: Copy the rest of the register set out of ssf_ptr */
139 oops_esf.basic.pc = ssf_contents[3];
140
141 z_arm_fatal_error(K_ERR_KERNEL_OOPS, &oops_esf);
142 CODE_UNREACHABLE;
143 }
144