1 /*
2  * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/kernel_structs.h>
9 #include <kernel_internal.h>
10 #include <inttypes.h>
11 #include <zephyr/arch/common/exc_handle.h>
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
14 
15 #ifdef CONFIG_USERSPACE
16 Z_EXC_DECLARE(z_riscv_user_string_nlen);
17 
18 static const struct z_exc_handle exceptions[] = {
19 	Z_EXC_HANDLE(z_riscv_user_string_nlen),
20 };
21 #endif /* CONFIG_USERSPACE */
22 
23 #if __riscv_xlen == 32
24  #define PR_REG "%08" PRIxPTR
25  #define NO_REG "        "
26 #elif __riscv_xlen == 64
27  #define PR_REG "%016" PRIxPTR
28  #define NO_REG "                "
29 #endif
30 
31 /* Stack trace function */
32 void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf);
33 
z_riscv_get_sp_before_exc(const struct arch_esf * esf)34 uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf)
35 {
36 	/*
37 	 * Kernel stack pointer prior this exception i.e. before
38 	 * storing the exception stack frame.
39 	 */
40 	uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
41 
42 #ifdef CONFIG_USERSPACE
43 	if ((esf->mstatus & MSTATUS_MPP) == PRV_U) {
44 		/*
45 		 * Exception happened in user space:
46 		 * consider the saved user stack instead.
47 		 */
48 		sp = esf->sp;
49 	}
50 #endif
51 
52 	return sp;
53 }
54 
z_riscv_mcause_str(unsigned long cause)55 const char *z_riscv_mcause_str(unsigned long cause)
56 {
57 	static const char *const mcause_str[17] = {
58 		[0] = "Instruction address misaligned",
59 		[1] = "Instruction Access fault",
60 		[2] = "Illegal instruction",
61 		[3] = "Breakpoint",
62 		[4] = "Load address misaligned",
63 		[5] = "Load access fault",
64 		[6] = "Store/AMO address misaligned",
65 		[7] = "Store/AMO access fault",
66 		[8] = "Environment call from U-mode",
67 		[9] = "Environment call from S-mode",
68 		[10] = "unknown",
69 		[11] = "Environment call from M-mode",
70 		[12] = "Instruction page fault",
71 		[13] = "Load page fault",
72 		[14] = "unknown",
73 		[15] = "Store/AMO page fault",
74 		[16] = "unknown",
75 	};
76 
77 	return mcause_str[MIN(cause, ARRAY_SIZE(mcause_str) - 1)];
78 }
79 
z_riscv_fatal_error(unsigned int reason,const struct arch_esf * esf)80 FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
81 				       const struct arch_esf *esf)
82 {
83 	z_riscv_fatal_error_csf(reason, esf, NULL);
84 }
85 
z_riscv_fatal_error_csf(unsigned int reason,const struct arch_esf * esf,const _callee_saved_t * csf)86 FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf,
87 					   const _callee_saved_t *csf)
88 {
89 	unsigned long mcause;
90 
91 	__asm__ volatile("csrr %0, mcause" : "=r" (mcause));
92 
93 	mcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK;
94 	LOG_ERR("");
95 	LOG_ERR(" mcause: %ld, %s", mcause, z_riscv_mcause_str(mcause));
96 
97 #ifndef CONFIG_SOC_OPENISA_RV32M1
98 	unsigned long mtval;
99 
100 	__asm__ volatile("csrr %0, mtval" : "=r" (mtval));
101 	LOG_ERR("  mtval: %lx", mtval);
102 #endif /* CONFIG_SOC_OPENISA_RV32M1 */
103 
104 #ifdef CONFIG_EXCEPTION_DEBUG
105 	if (esf != NULL) {
106 		LOG_ERR("     a0: " PR_REG "    t0: " PR_REG, esf->a0, esf->t0);
107 		LOG_ERR("     a1: " PR_REG "    t1: " PR_REG, esf->a1, esf->t1);
108 		LOG_ERR("     a2: " PR_REG "    t2: " PR_REG, esf->a2, esf->t2);
109 #if defined(CONFIG_RISCV_ISA_RV32E)
110 		LOG_ERR("     a3: " PR_REG, esf->a3);
111 		LOG_ERR("     a4: " PR_REG, esf->a4);
112 		LOG_ERR("     a5: " PR_REG, esf->a5);
113 #else
114 		LOG_ERR("     a3: " PR_REG "    t3: " PR_REG, esf->a3, esf->t3);
115 		LOG_ERR("     a4: " PR_REG "    t4: " PR_REG, esf->a4, esf->t4);
116 		LOG_ERR("     a5: " PR_REG "    t5: " PR_REG, esf->a5, esf->t5);
117 		LOG_ERR("     a6: " PR_REG "    t6: " PR_REG, esf->a6, esf->t6);
118 		LOG_ERR("     a7: " PR_REG, esf->a7);
119 #endif /* CONFIG_RISCV_ISA_RV32E */
120 		LOG_ERR("     sp: " PR_REG, z_riscv_get_sp_before_exc(esf));
121 		LOG_ERR("     ra: " PR_REG, esf->ra);
122 		LOG_ERR("   mepc: " PR_REG, esf->mepc);
123 		LOG_ERR("mstatus: " PR_REG, esf->mstatus);
124 		LOG_ERR("");
125 	}
126 
127 	if (csf != NULL) {
128 #if defined(CONFIG_RISCV_ISA_RV32E)
129 		LOG_ERR("     s0: " PR_REG, csf->s0);
130 		LOG_ERR("     s1: " PR_REG, csf->s1);
131 #else
132 		LOG_ERR("     s0: " PR_REG "    s6: " PR_REG, csf->s0, csf->s6);
133 		LOG_ERR("     s1: " PR_REG "    s7: " PR_REG, csf->s1, csf->s7);
134 		LOG_ERR("     s2: " PR_REG "    s8: " PR_REG, csf->s2, csf->s8);
135 		LOG_ERR("     s3: " PR_REG "    s9: " PR_REG, csf->s3, csf->s9);
136 		LOG_ERR("     s4: " PR_REG "   s10: " PR_REG, csf->s4, csf->s10);
137 		LOG_ERR("     s5: " PR_REG "   s11: " PR_REG, csf->s5, csf->s11);
138 #endif /* CONFIG_RISCV_ISA_RV32E */
139 		LOG_ERR("");
140 	}
141 #endif /* CONFIG_EXCEPTION_DEBUG */
142 
143 #ifdef CONFIG_EXCEPTION_STACK_TRACE
144 	z_riscv_unwind_stack(esf, csf);
145 #endif /* CONFIG_EXCEPTION_STACK_TRACE */
146 
147 	z_fatal_error(reason, esf);
148 	CODE_UNREACHABLE;
149 }
150 
bad_stack_pointer(struct arch_esf * esf)151 static bool bad_stack_pointer(struct arch_esf *esf)
152 {
153 #ifdef CONFIG_PMP_STACK_GUARD
154 	/*
155 	 * Check if the kernel stack pointer prior this exception (before
156 	 * storing the exception stack frame) was in the stack guard area.
157 	 */
158 	uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
159 
160 #ifdef CONFIG_USERSPACE
161 	if (arch_current_thread()->arch.priv_stack_start != 0 &&
162 	    sp >= arch_current_thread()->arch.priv_stack_start &&
163 	    sp <  arch_current_thread()->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) {
164 		return true;
165 	}
166 
167 	if (z_stack_is_user_capable(arch_current_thread()->stack_obj) &&
168 	    sp >= arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED &&
169 	    sp <  arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED
170 		  + Z_RISCV_STACK_GUARD_SIZE) {
171 		return true;
172 	}
173 #endif /* CONFIG_USERSPACE */
174 
175 #if CONFIG_MULTITHREADING
176 	if (sp >= arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED &&
177 	    sp <  arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED
178 		  + Z_RISCV_STACK_GUARD_SIZE) {
179 		return true;
180 	}
181 #else
182 	uintptr_t isr_stack = (uintptr_t)z_interrupt_stacks;
183 	uintptr_t main_stack = (uintptr_t)z_main_stack;
184 
185 	if ((sp >= isr_stack && sp < isr_stack + Z_RISCV_STACK_GUARD_SIZE) ||
186 	    (sp >= main_stack && sp < main_stack + Z_RISCV_STACK_GUARD_SIZE)) {
187 		return true;
188 	}
189 #endif /* CONFIG_MULTITHREADING */
190 #endif /* CONFIG_PMP_STACK_GUARD */
191 
192 #ifdef CONFIG_USERSPACE
193 	if ((esf->mstatus & MSTATUS_MPP) == 0 &&
194 	    (esf->sp < arch_current_thread()->stack_info.start ||
195 	     esf->sp > arch_current_thread()->stack_info.start +
196 		       arch_current_thread()->stack_info.size -
197 		       arch_current_thread()->stack_info.delta)) {
198 		/* user stack pointer moved outside of its allowed stack */
199 		return true;
200 	}
201 #endif
202 
203 	return false;
204 }
205 
_Fault(struct arch_esf * esf)206 void _Fault(struct arch_esf *esf)
207 {
208 #ifdef CONFIG_USERSPACE
209 	/*
210 	 * Perform an assessment whether an PMP fault shall be
211 	 * treated as recoverable.
212 	 */
213 	for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
214 		unsigned long start = (unsigned long)exceptions[i].start;
215 		unsigned long end = (unsigned long)exceptions[i].end;
216 
217 		if (esf->mepc >= start && esf->mepc < end) {
218 			esf->mepc = (unsigned long)exceptions[i].fixup;
219 			return;
220 		}
221 	}
222 #endif /* CONFIG_USERSPACE */
223 
224 	unsigned int reason = K_ERR_CPU_EXCEPTION;
225 
226 	if (bad_stack_pointer(esf)) {
227 #ifdef CONFIG_PMP_STACK_GUARD
228 		/*
229 		 * Remove the thread's PMP setting to prevent triggering a stack
230 		 * overflow error again due to the previous configuration.
231 		 */
232 		z_riscv_pmp_stackguard_disable();
233 #endif /* CONFIG_PMP_STACK_GUARD */
234 		reason = K_ERR_STACK_CHK_FAIL;
235 	}
236 
237 	z_riscv_fatal_error(reason, esf);
238 }
239 
240 #ifdef CONFIG_USERSPACE
arch_syscall_oops(void * ssf_ptr)241 FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
242 {
243 	user_fault(K_ERR_KERNEL_OOPS);
244 	CODE_UNREACHABLE;
245 }
246 
z_impl_user_fault(unsigned int reason)247 void z_impl_user_fault(unsigned int reason)
248 {
249 	struct arch_esf *oops_esf = arch_current_thread()->syscall_frame;
250 
251 	if (((arch_current_thread()->base.user_options & K_USER) != 0) &&
252 		reason != K_ERR_STACK_CHK_FAIL) {
253 		reason = K_ERR_KERNEL_OOPS;
254 	}
255 	z_riscv_fatal_error(reason, oops_esf);
256 }
257 
z_vrfy_user_fault(unsigned int reason)258 static void z_vrfy_user_fault(unsigned int reason)
259 {
260 	z_impl_user_fault(reason);
261 }
262 
263 #include <zephyr/syscalls/user_fault_mrsh.c>
264 
265 #endif /* CONFIG_USERSPACE */
266