1 /*
2  * Copyright (c) 2024 Meta Platforms
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/debug/symtab.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/kernel_structs.h>
10 #include <zephyr/linker/linker-defs.h>
11 #include <kernel_internal.h>
12 #include <zephyr/logging/log.h>
13 
14 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
15 
16 uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf);
17 
18 typedef bool (*riscv_stacktrace_cb)(void *cookie, unsigned long addr, unsigned long sfp);
19 
20 #define MAX_STACK_FRAMES CONFIG_ARCH_STACKWALK_MAX_FRAMES
21 
22 struct stackframe {
23 	uintptr_t fp;
24 	uintptr_t ra;
25 };
26 
27 typedef bool (*stack_verify_fn)(uintptr_t, const struct k_thread *const, const struct arch_esf *);
28 
in_irq_stack_bound(uintptr_t addr,uint8_t cpu_id)29 static inline bool in_irq_stack_bound(uintptr_t addr, uint8_t cpu_id)
30 {
31 	uintptr_t start, end;
32 
33 	start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]);
34 	end = start + CONFIG_ISR_STACK_SIZE;
35 
36 	return (addr >= start) && (addr < end);
37 }
38 
in_kernel_thread_stack_bound(uintptr_t addr,const struct k_thread * const thread)39 static inline bool in_kernel_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread)
40 {
41 #ifdef CONFIG_THREAD_STACK_INFO
42 	uintptr_t start, end;
43 
44 	/*
45 	 * Special handling to support stacktrace in dummy thread during system initialization,
46 	 * as its stack info isn't initialized.
47 	 */
48 	if (is_thread_dummy(thread)) {
49 		start = (uintptr_t)z_interrupt_stacks;
50 		end = Z_STACK_PTR_ALIGN(start + __z_interrupt_stack_SIZEOF);
51 	} else {
52 		start = thread->stack_info.start;
53 		end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size);
54 	}
55 
56 	return (addr >= start) && (addr < end);
57 #else
58 	ARG_UNUSED(addr);
59 	ARG_UNUSED(thread);
60 	/* Return false as we can't check if the addr is in the thread stack without stack info */
61 	return false;
62 #endif
63 }
64 
65 #ifdef CONFIG_USERSPACE
in_user_thread_stack_bound(uintptr_t addr,const struct k_thread * const thread)66 static inline bool in_user_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread)
67 {
68 	uintptr_t start, end;
69 
70 	/* See: zephyr/include/zephyr/arch/riscv/arch.h */
71 	if (IS_ENABLED(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)) {
72 		start = thread->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE;
73 	} else {
74 		start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
75 	}
76 	end = Z_STACK_PTR_ALIGN(thread->arch.priv_stack_start + K_KERNEL_STACK_RESERVED +
77 				CONFIG_PRIVILEGED_STACK_SIZE);
78 
79 	return (addr >= start) && (addr < end);
80 }
81 #endif /* CONFIG_USERSPACE */
82 
in_stack_bound(uintptr_t addr,const struct k_thread * const thread,const struct arch_esf * esf)83 static bool in_stack_bound(uintptr_t addr, const struct k_thread *const thread,
84 			   const struct arch_esf *esf)
85 {
86 	ARG_UNUSED(esf);
87 
88 	if (!IS_ALIGNED(addr, sizeof(uintptr_t))) {
89 		return false;
90 	}
91 
92 #ifdef CONFIG_USERSPACE
93 	if ((thread->base.user_options & K_USER) != 0) {
94 		return in_user_thread_stack_bound(addr, thread);
95 	}
96 #endif /* CONFIG_USERSPACE */
97 
98 	return in_kernel_thread_stack_bound(addr, thread);
99 }
100 
in_text_region(uintptr_t addr)101 static inline bool in_text_region(uintptr_t addr)
102 {
103 	return (addr >= (uintptr_t)__text_region_start) && (addr < (uintptr_t)__text_region_end);
104 }
105 
106 #ifdef CONFIG_FRAME_POINTER
walk_stackframe(riscv_stacktrace_cb cb,void * cookie,const struct k_thread * thread,const struct arch_esf * esf,stack_verify_fn vrfy,const _callee_saved_t * csf)107 static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k_thread *thread,
108 			    const struct arch_esf *esf, stack_verify_fn vrfy,
109 			    const _callee_saved_t *csf)
110 {
111 	uintptr_t fp, last_fp = 0;
112 	uintptr_t ra;
113 	struct stackframe *frame;
114 
115 	if (esf != NULL) {
116 		/* Unwind the provided exception stack frame */
117 		fp = esf->s0;
118 		ra = esf->mepc;
119 	} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
120 		/* Unwind current thread (default case when nothing is provided ) */
121 		fp = (uintptr_t)__builtin_frame_address(0);
122 		ra = (uintptr_t)walk_stackframe;
123 	} else {
124 		/* Unwind the provided thread */
125 		fp = csf->s0;
126 		ra = csf->ra;
127 	}
128 
129 	for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy(fp, thread, esf) && (fp > last_fp); i++) {
130 		if (in_text_region(ra) && !cb(cookie, ra, fp)) {
131 			break;
132 		}
133 		last_fp = fp;
134 
135 		/* Unwind to the previous frame */
136 		frame = (struct stackframe *)fp - 1;
137 
138 		if ((i == 0) && (esf != NULL)) {
139 			/* Print `esf->ra` if we are at the top of the stack */
140 			if (in_text_region(esf->ra) && !cb(cookie, esf->ra, fp)) {
141 				break;
142 			}
143 			/**
144 			 * For the first stack frame, the `ra` is not stored in the frame if the
145 			 * preempted function doesn't call any other function, we can observe:
146 			 *
147 			 *                     .-------------.
148 			 *   frame[0]->fp ---> | frame[0] fp |
149 			 *                     :-------------:
150 			 *   frame[0]->ra ---> | frame[1] fp |
151 			 *                     | frame[1] ra |
152 			 *                     :~~~~~~~~~~~~~:
153 			 *                     | frame[N] fp |
154 			 *
155 			 * Instead of:
156 			 *
157 			 *                     .-------------.
158 			 *   frame[0]->fp ---> | frame[0] fp |
159 			 *   frame[0]->ra ---> | frame[1] ra |
160 			 *                     :-------------:
161 			 *                     | frame[1] fp |
162 			 *                     | frame[1] ra |
163 			 *                     :~~~~~~~~~~~~~:
164 			 *                     | frame[N] fp |
165 			 *
166 			 * Check if `frame->ra` actually points to a `fp`, and adjust accordingly
167 			 */
168 			if (vrfy(frame->ra, thread, esf)) {
169 				fp = frame->ra;
170 				frame = (struct stackframe *)fp;
171 			}
172 		}
173 
174 		fp = frame->fp;
175 		ra = frame->ra;
176 	}
177 }
178 #else  /* !CONFIG_FRAME_POINTER */
179 register uintptr_t current_stack_pointer __asm__("sp");
walk_stackframe(riscv_stacktrace_cb cb,void * cookie,const struct k_thread * thread,const struct arch_esf * esf,stack_verify_fn vrfy,const _callee_saved_t * csf)180 static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k_thread *thread,
181 			    const struct arch_esf *esf, stack_verify_fn vrfy,
182 			    const _callee_saved_t *csf)
183 {
184 	uintptr_t sp;
185 	uintptr_t ra;
186 	uintptr_t *ksp, last_ksp = 0;
187 
188 	if (esf != NULL) {
189 		/* Unwind the provided exception stack frame */
190 		sp = z_riscv_get_sp_before_exc(esf);
191 		ra = esf->mepc;
192 	} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
193 		/* Unwind current thread (default case when nothing is provided ) */
194 		sp = current_stack_pointer;
195 		ra = (uintptr_t)walk_stackframe;
196 	} else {
197 		/* Unwind the provided thread */
198 		sp = csf->sp;
199 		ra = csf->ra;
200 	}
201 
202 	ksp = (uintptr_t *)sp;
203 	for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy((uintptr_t)ksp, thread, esf) &&
204 			((uintptr_t)ksp > last_ksp);) {
205 		if (in_text_region(ra)) {
206 			if (!cb(cookie, ra, POINTER_TO_UINT(ksp))) {
207 				break;
208 			}
209 			/*
210 			 * Increment the iterator only if `ra` is within the text region to get the
211 			 * most out of it
212 			 */
213 			i++;
214 		}
215 		last_ksp = (uintptr_t)ksp;
216 		/* Unwind to the previous frame */
217 		ra = ((struct arch_esf *)ksp++)->ra;
218 	}
219 }
220 #endif /* CONFIG_FRAME_POINTER */
221 
arch_stack_walk(stack_trace_callback_fn callback_fn,void * cookie,const struct k_thread * thread,const struct arch_esf * esf)222 void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
223 		     const struct k_thread *thread, const struct arch_esf *esf)
224 {
225 	if (thread == NULL) {
226 		/* In case `thread` is NULL, default that to `_current` and try to unwind */
227 		thread = _current;
228 	}
229 
230 	walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound,
231 			&thread->callee_saved);
232 }
233 
234 #ifdef CONFIG_EXCEPTION_STACK_TRACE
in_fatal_stack_bound(uintptr_t addr,const struct k_thread * const thread,const struct arch_esf * esf)235 static bool in_fatal_stack_bound(uintptr_t addr, const struct k_thread *const thread,
236 				 const struct arch_esf *esf)
237 {
238 	if (!IS_ALIGNED(addr, sizeof(uintptr_t))) {
239 		return false;
240 	}
241 
242 	if ((thread == NULL) || arch_is_in_isr()) {
243 		/* We were servicing an interrupt */
244 		uint8_t cpu_id = IS_ENABLED(CONFIG_SMP) ? arch_curr_cpu()->id : 0U;
245 
246 		return in_irq_stack_bound(addr, cpu_id);
247 	}
248 
249 	return in_stack_bound(addr, thread, esf);
250 }
251 
252 #if __riscv_xlen == 32
253 #define PR_REG "%08" PRIxPTR
254 #elif __riscv_xlen == 64
255 #define PR_REG "%016" PRIxPTR
256 #endif
257 
258 #ifdef CONFIG_FRAME_POINTER
259 #define SFP "fp"
260 #else
261 #define SFP "sp"
262 #endif /* CONFIG_FRAME_POINTER */
263 
264 #ifdef CONFIG_SYMTAB
265 #define LOG_STACK_TRACE(idx, sfp, ra, name, offset)                                                \
266 	EXCEPTION_DUMP("     %2d: " SFP ": " PR_REG " ra: " PR_REG " [%s+0x%x]",		   \
267 			idx, sfp, ra, name,  offset)
268 #else
269 #define LOG_STACK_TRACE(idx, sfp, ra, name, offset)                                                \
270 	EXCEPTION_DUMP("     %2d: " SFP ": " PR_REG " ra: " PR_REG, idx, sfp, ra)
271 #endif /* CONFIG_SYMTAB */
272 
print_trace_address(void * arg,unsigned long ra,unsigned long sfp)273 static bool print_trace_address(void *arg, unsigned long ra, unsigned long sfp)
274 {
275 	int *i = arg;
276 #ifdef CONFIG_SYMTAB
277 	uint32_t offset = 0;
278 	const char *name = symtab_find_symbol_name(ra, &offset);
279 #endif /* CONFIG_SYMTAB */
280 
281 	LOG_STACK_TRACE((*i)++, sfp, ra, name, offset);
282 
283 	return true;
284 }
285 
z_riscv_unwind_stack(const struct arch_esf * esf,const _callee_saved_t * csf)286 void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf)
287 {
288 	int i = 0;
289 
290 	EXCEPTION_DUMP("call trace:");
291 	walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf);
292 	EXCEPTION_DUMP("");
293 }
294 #endif /* CONFIG_EXCEPTION_STACK_TRACE */
295