1 /*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/kernel_structs.h>
9 #include <kernel_internal.h>
10 #include <inttypes.h>
11 #include <zephyr/arch/common/exc_handle.h>
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
14
15 #ifdef CONFIG_USERSPACE
16 Z_EXC_DECLARE(z_riscv_user_string_nlen);
17
18 static const struct z_exc_handle exceptions[] = {
19 Z_EXC_HANDLE(z_riscv_user_string_nlen),
20 };
21 #endif /* CONFIG_USERSPACE */
22
23 #if __riscv_xlen == 32
24 #define PR_REG "%08" PRIxPTR
25 #define NO_REG " "
26 #elif __riscv_xlen == 64
27 #define PR_REG "%016" PRIxPTR
28 #define NO_REG " "
29 #endif
30
31 /* Stack trace function */
32 void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf);
33
z_riscv_get_sp_before_exc(const struct arch_esf * esf)34 uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf)
35 {
36 /*
37 * Kernel stack pointer prior this exception i.e. before
38 * storing the exception stack frame.
39 */
40 uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
41
42 #ifdef CONFIG_USERSPACE
43 if ((esf->mstatus & MSTATUS_MPP) == PRV_U) {
44 /*
45 * Exception happened in user space:
46 * consider the saved user stack instead.
47 */
48 sp = esf->sp;
49 }
50 #endif
51
52 return sp;
53 }
54
z_riscv_fatal_error(unsigned int reason,const struct arch_esf * esf)55 FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
56 const struct arch_esf *esf)
57 {
58 z_riscv_fatal_error_csf(reason, esf, NULL);
59 }
60
z_riscv_fatal_error_csf(unsigned int reason,const struct arch_esf * esf,const _callee_saved_t * csf)61 FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf,
62 const _callee_saved_t *csf)
63 {
64 #ifdef CONFIG_EXCEPTION_DEBUG
65 if (esf != NULL) {
66 LOG_ERR(" a0: " PR_REG " t0: " PR_REG, esf->a0, esf->t0);
67 LOG_ERR(" a1: " PR_REG " t1: " PR_REG, esf->a1, esf->t1);
68 LOG_ERR(" a2: " PR_REG " t2: " PR_REG, esf->a2, esf->t2);
69 #if defined(CONFIG_RISCV_ISA_RV32E)
70 LOG_ERR(" a3: " PR_REG, esf->a3);
71 LOG_ERR(" a4: " PR_REG, esf->a4);
72 LOG_ERR(" a5: " PR_REG, esf->a5);
73 #else
74 LOG_ERR(" a3: " PR_REG " t3: " PR_REG, esf->a3, esf->t3);
75 LOG_ERR(" a4: " PR_REG " t4: " PR_REG, esf->a4, esf->t4);
76 LOG_ERR(" a5: " PR_REG " t5: " PR_REG, esf->a5, esf->t5);
77 LOG_ERR(" a6: " PR_REG " t6: " PR_REG, esf->a6, esf->t6);
78 LOG_ERR(" a7: " PR_REG, esf->a7);
79 #endif /* CONFIG_RISCV_ISA_RV32E */
80 LOG_ERR(" sp: " PR_REG, z_riscv_get_sp_before_exc(esf));
81 LOG_ERR(" ra: " PR_REG, esf->ra);
82 LOG_ERR(" mepc: " PR_REG, esf->mepc);
83 LOG_ERR("mstatus: " PR_REG, esf->mstatus);
84 LOG_ERR("");
85 }
86
87 if (csf != NULL) {
88 #if defined(CONFIG_RISCV_ISA_RV32E)
89 LOG_ERR(" s0: " PR_REG, csf->s0);
90 LOG_ERR(" s1: " PR_REG, csf->s1);
91 #else
92 LOG_ERR(" s0: " PR_REG " s6: " PR_REG, csf->s0, csf->s6);
93 LOG_ERR(" s1: " PR_REG " s7: " PR_REG, csf->s1, csf->s7);
94 LOG_ERR(" s2: " PR_REG " s8: " PR_REG, csf->s2, csf->s8);
95 LOG_ERR(" s3: " PR_REG " s9: " PR_REG, csf->s3, csf->s9);
96 LOG_ERR(" s4: " PR_REG " s10: " PR_REG, csf->s4, csf->s10);
97 LOG_ERR(" s5: " PR_REG " s11: " PR_REG, csf->s5, csf->s11);
98 #endif /* CONFIG_RISCV_ISA_RV32E */
99 LOG_ERR("");
100 }
101
102 if (IS_ENABLED(CONFIG_EXCEPTION_STACK_TRACE)) {
103 z_riscv_unwind_stack(esf, csf);
104 }
105
106 #endif /* CONFIG_EXCEPTION_DEBUG */
107 z_fatal_error(reason, esf);
108 CODE_UNREACHABLE;
109 }
110
cause_str(unsigned long cause)111 static char *cause_str(unsigned long cause)
112 {
113 switch (cause) {
114 case 0:
115 return "Instruction address misaligned";
116 case 1:
117 return "Instruction Access fault";
118 case 2:
119 return "Illegal instruction";
120 case 3:
121 return "Breakpoint";
122 case 4:
123 return "Load address misaligned";
124 case 5:
125 return "Load access fault";
126 case 6:
127 return "Store/AMO address misaligned";
128 case 7:
129 return "Store/AMO access fault";
130 case 8:
131 return "Environment call from U-mode";
132 case 9:
133 return "Environment call from S-mode";
134 case 11:
135 return "Environment call from M-mode";
136 case 12:
137 return "Instruction page fault";
138 case 13:
139 return "Load page fault";
140 case 15:
141 return "Store/AMO page fault";
142 default:
143 return "unknown";
144 }
145 }
146
bad_stack_pointer(struct arch_esf * esf)147 static bool bad_stack_pointer(struct arch_esf *esf)
148 {
149 #ifdef CONFIG_PMP_STACK_GUARD
150 /*
151 * Check if the kernel stack pointer prior this exception (before
152 * storing the exception stack frame) was in the stack guard area.
153 */
154 uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
155
156 #ifdef CONFIG_USERSPACE
157 if (_current->arch.priv_stack_start != 0 &&
158 sp >= _current->arch.priv_stack_start &&
159 sp < _current->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) {
160 return true;
161 }
162
163 if (z_stack_is_user_capable(_current->stack_obj) &&
164 sp >= _current->stack_info.start - K_THREAD_STACK_RESERVED &&
165 sp < _current->stack_info.start - K_THREAD_STACK_RESERVED
166 + Z_RISCV_STACK_GUARD_SIZE) {
167 return true;
168 }
169 #endif /* CONFIG_USERSPACE */
170
171 if (sp >= _current->stack_info.start - K_KERNEL_STACK_RESERVED &&
172 sp < _current->stack_info.start - K_KERNEL_STACK_RESERVED
173 + Z_RISCV_STACK_GUARD_SIZE) {
174 return true;
175 }
176 #endif /* CONFIG_PMP_STACK_GUARD */
177
178 #ifdef CONFIG_USERSPACE
179 if ((esf->mstatus & MSTATUS_MPP) == 0 &&
180 (esf->sp < _current->stack_info.start ||
181 esf->sp > _current->stack_info.start +
182 _current->stack_info.size -
183 _current->stack_info.delta)) {
184 /* user stack pointer moved outside of its allowed stack */
185 return true;
186 }
187 #endif
188
189 return false;
190 }
191
_Fault(struct arch_esf * esf)192 void _Fault(struct arch_esf *esf)
193 {
194 #ifdef CONFIG_USERSPACE
195 /*
196 * Perform an assessment whether an PMP fault shall be
197 * treated as recoverable.
198 */
199 for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
200 unsigned long start = (unsigned long)exceptions[i].start;
201 unsigned long end = (unsigned long)exceptions[i].end;
202
203 if (esf->mepc >= start && esf->mepc < end) {
204 esf->mepc = (unsigned long)exceptions[i].fixup;
205 return;
206 }
207 }
208 #endif /* CONFIG_USERSPACE */
209
210 unsigned long mcause;
211
212 __asm__ volatile("csrr %0, mcause" : "=r" (mcause));
213
214 #ifndef CONFIG_SOC_OPENISA_RV32M1
215 unsigned long mtval;
216 __asm__ volatile("csrr %0, mtval" : "=r" (mtval));
217 #endif
218
219 mcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK;
220 LOG_ERR("");
221 LOG_ERR(" mcause: %ld, %s", mcause, cause_str(mcause));
222 #ifndef CONFIG_SOC_OPENISA_RV32M1
223 LOG_ERR(" mtval: %lx", mtval);
224 #endif
225
226 unsigned int reason = K_ERR_CPU_EXCEPTION;
227
228 if (bad_stack_pointer(esf)) {
229 #ifdef CONFIG_PMP_STACK_GUARD
230 /*
231 * Remove the thread's PMP setting to prevent triggering a stack
232 * overflow error again due to the previous configuration.
233 */
234 z_riscv_pmp_stackguard_disable();
235 #endif /* CONFIG_PMP_STACK_GUARD */
236 reason = K_ERR_STACK_CHK_FAIL;
237 }
238
239 z_riscv_fatal_error(reason, esf);
240 }
241
242 #ifdef CONFIG_USERSPACE
arch_syscall_oops(void * ssf_ptr)243 FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
244 {
245 user_fault(K_ERR_KERNEL_OOPS);
246 CODE_UNREACHABLE;
247 }
248
z_impl_user_fault(unsigned int reason)249 void z_impl_user_fault(unsigned int reason)
250 {
251 struct arch_esf *oops_esf = _current->syscall_frame;
252
253 if (((_current->base.user_options & K_USER) != 0) &&
254 reason != K_ERR_STACK_CHK_FAIL) {
255 reason = K_ERR_KERNEL_OOPS;
256 }
257 z_riscv_fatal_error(reason, oops_esf);
258 }
259
z_vrfy_user_fault(unsigned int reason)260 static void z_vrfy_user_fault(unsigned int reason)
261 {
262 z_impl_user_fault(reason);
263 }
264
265 #include <zephyr/syscalls/user_fault_mrsh.c>
266
267 #endif /* CONFIG_USERSPACE */
268