1 /*
2 * Copyright (c) 2024 Meta Platforms
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/debug/symtab.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/kernel_structs.h>
10 #include <kernel_internal.h>
11 #include <zephyr/logging/log.h>
12
13 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
14
15 uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf);
16
17 typedef bool (*riscv_stacktrace_cb)(void *cookie, unsigned long addr, unsigned long sfp);
18
19 #define MAX_STACK_FRAMES CONFIG_ARCH_STACKWALK_MAX_FRAMES
20
21 struct stackframe {
22 uintptr_t fp;
23 uintptr_t ra;
24 };
25
26 typedef bool (*stack_verify_fn)(uintptr_t, const struct k_thread *const, const struct arch_esf *);
27
in_irq_stack_bound(uintptr_t addr,uint8_t cpu_id)28 static inline bool in_irq_stack_bound(uintptr_t addr, uint8_t cpu_id)
29 {
30 uintptr_t start, end;
31
32 start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]);
33 end = start + CONFIG_ISR_STACK_SIZE;
34
35 return (addr >= start) && (addr < end);
36 }
37
in_kernel_thread_stack_bound(uintptr_t addr,const struct k_thread * const thread)38 static inline bool in_kernel_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread)
39 {
40 #ifdef CONFIG_THREAD_STACK_INFO
41 uintptr_t start, end;
42
43 start = thread->stack_info.start;
44 end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size);
45
46 return (addr >= start) && (addr < end);
47 #else
48 ARG_UNUSED(addr);
49 ARG_UNUSED(thread);
50 /* Return false as we can't check if the addr is in the thread stack without stack info */
51 return false;
52 #endif
53 }
54
55 #ifdef CONFIG_USERSPACE
in_user_thread_stack_bound(uintptr_t addr,const struct k_thread * const thread)56 static inline bool in_user_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread)
57 {
58 uintptr_t start, end;
59
60 /* See: zephyr/include/zephyr/arch/riscv/arch.h */
61 if (IS_ENABLED(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)) {
62 start = thread->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE;
63 } else {
64 start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
65 }
66 end = Z_STACK_PTR_ALIGN(thread->arch.priv_stack_start + K_KERNEL_STACK_RESERVED +
67 CONFIG_PRIVILEGED_STACK_SIZE);
68
69 return (addr >= start) && (addr < end);
70 }
71 #endif /* CONFIG_USERSPACE */
72
in_stack_bound(uintptr_t addr,const struct k_thread * const thread,const struct arch_esf * esf)73 static bool in_stack_bound(uintptr_t addr, const struct k_thread *const thread,
74 const struct arch_esf *esf)
75 {
76 ARG_UNUSED(esf);
77
78 if (!IS_ALIGNED(addr, sizeof(uintptr_t))) {
79 return false;
80 }
81
82 #ifdef CONFIG_USERSPACE
83 if ((thread->base.user_options & K_USER) != 0) {
84 return in_user_thread_stack_bound(addr, thread);
85 }
86 #endif /* CONFIG_USERSPACE */
87
88 return in_kernel_thread_stack_bound(addr, thread);
89 }
90
in_text_region(uintptr_t addr)91 static inline bool in_text_region(uintptr_t addr)
92 {
93 extern uintptr_t __text_region_start, __text_region_end;
94
95 return (addr >= (uintptr_t)&__text_region_start) && (addr < (uintptr_t)&__text_region_end);
96 }
97
98 #ifdef CONFIG_FRAME_POINTER
walk_stackframe(riscv_stacktrace_cb cb,void * cookie,const struct k_thread * thread,const struct arch_esf * esf,stack_verify_fn vrfy,const _callee_saved_t * csf)99 static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k_thread *thread,
100 const struct arch_esf *esf, stack_verify_fn vrfy,
101 const _callee_saved_t *csf)
102 {
103 uintptr_t fp, last_fp = 0;
104 uintptr_t ra;
105 struct stackframe *frame;
106
107 if (esf != NULL) {
108 /* Unwind the provided exception stack frame */
109 fp = esf->s0;
110 ra = esf->mepc;
111 } else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) {
112 /* Unwind current thread (default case when nothing is provided ) */
113 fp = (uintptr_t)__builtin_frame_address(0);
114 ra = (uintptr_t)walk_stackframe;
115 } else {
116 /* Unwind the provided thread */
117 fp = csf->s0;
118 ra = csf->ra;
119 }
120
121 for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy(fp, thread, esf) && (fp > last_fp); i++) {
122 if (in_text_region(ra) && !cb(cookie, ra, fp)) {
123 break;
124 }
125 last_fp = fp;
126
127 /* Unwind to the previous frame */
128 frame = (struct stackframe *)fp - 1;
129
130 if ((i == 0) && (esf != NULL)) {
131 /* Print `esf->ra` if we are at the top of the stack */
132 if (in_text_region(esf->ra) && !cb(cookie, esf->ra, fp)) {
133 break;
134 }
135 /**
136 * For the first stack frame, the `ra` is not stored in the frame if the
137 * preempted function doesn't call any other function, we can observe:
138 *
139 * .-------------.
140 * frame[0]->fp ---> | frame[0] fp |
141 * :-------------:
142 * frame[0]->ra ---> | frame[1] fp |
143 * | frame[1] ra |
144 * :~~~~~~~~~~~~~:
145 * | frame[N] fp |
146 *
147 * Instead of:
148 *
149 * .-------------.
150 * frame[0]->fp ---> | frame[0] fp |
151 * frame[0]->ra ---> | frame[1] ra |
152 * :-------------:
153 * | frame[1] fp |
154 * | frame[1] ra |
155 * :~~~~~~~~~~~~~:
156 * | frame[N] fp |
157 *
158 * Check if `frame->ra` actually points to a `fp`, and adjust accordingly
159 */
160 if (vrfy(frame->ra, thread, esf)) {
161 fp = frame->ra;
162 frame = (struct stackframe *)fp;
163 }
164 }
165
166 fp = frame->fp;
167 ra = frame->ra;
168 }
169 }
170 #else /* !CONFIG_FRAME_POINTER */
171 register uintptr_t current_stack_pointer __asm__("sp");
walk_stackframe(riscv_stacktrace_cb cb,void * cookie,const struct k_thread * thread,const struct arch_esf * esf,stack_verify_fn vrfy,const _callee_saved_t * csf)172 static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k_thread *thread,
173 const struct arch_esf *esf, stack_verify_fn vrfy,
174 const _callee_saved_t *csf)
175 {
176 uintptr_t sp;
177 uintptr_t ra;
178 uintptr_t *ksp, last_ksp = 0;
179
180 if (esf != NULL) {
181 /* Unwind the provided exception stack frame */
182 sp = z_riscv_get_sp_before_exc(esf);
183 ra = esf->mepc;
184 } else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) {
185 /* Unwind current thread (default case when nothing is provided ) */
186 sp = current_stack_pointer;
187 ra = (uintptr_t)walk_stackframe;
188 } else {
189 /* Unwind the provided thread */
190 sp = csf->sp;
191 ra = csf->ra;
192 }
193
194 ksp = (uintptr_t *)sp;
195 for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy((uintptr_t)ksp, thread, esf) &&
196 ((uintptr_t)ksp > last_ksp);) {
197 if (in_text_region(ra)) {
198 if (!cb(cookie, ra, POINTER_TO_UINT(ksp))) {
199 break;
200 }
201 /*
202 * Increment the iterator only if `ra` is within the text region to get the
203 * most out of it
204 */
205 i++;
206 }
207 last_ksp = (uintptr_t)ksp;
208 /* Unwind to the previous frame */
209 ra = ((struct arch_esf *)ksp++)->ra;
210 }
211 }
212 #endif /* CONFIG_FRAME_POINTER */
213
arch_stack_walk(stack_trace_callback_fn callback_fn,void * cookie,const struct k_thread * thread,const struct arch_esf * esf)214 void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
215 const struct k_thread *thread, const struct arch_esf *esf)
216 {
217 if (thread == NULL) {
218 /* In case `thread` is NULL, default that to `arch_current_thread()`
219 * and try to unwind
220 */
221 thread = arch_current_thread();
222 }
223
224 walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound,
225 &thread->callee_saved);
226 }
227
228 #ifdef CONFIG_EXCEPTION_STACK_TRACE
in_fatal_stack_bound(uintptr_t addr,const struct k_thread * const thread,const struct arch_esf * esf)229 static bool in_fatal_stack_bound(uintptr_t addr, const struct k_thread *const thread,
230 const struct arch_esf *esf)
231 {
232 if (!IS_ALIGNED(addr, sizeof(uintptr_t))) {
233 return false;
234 }
235
236 if ((thread == NULL) || arch_is_in_isr()) {
237 /* We were servicing an interrupt */
238 uint8_t cpu_id = IS_ENABLED(CONFIG_SMP) ? arch_curr_cpu()->id : 0U;
239
240 return in_irq_stack_bound(addr, cpu_id);
241 }
242
243 return in_stack_bound(addr, thread, esf);
244 }
245
246 #if __riscv_xlen == 32
247 #define PR_REG "%08" PRIxPTR
248 #elif __riscv_xlen == 64
249 #define PR_REG "%016" PRIxPTR
250 #endif
251
252 #ifdef CONFIG_FRAME_POINTER
253 #define SFP "fp"
254 #else
255 #define SFP "sp"
256 #endif /* CONFIG_FRAME_POINTER */
257
258 #ifdef CONFIG_SYMTAB
259 #define LOG_STACK_TRACE(idx, sfp, ra, name, offset) \
260 LOG_ERR(" %2d: " SFP ": " PR_REG " ra: " PR_REG " [%s+0x%x]", idx, sfp, ra, name, \
261 offset)
262 #else
263 #define LOG_STACK_TRACE(idx, sfp, ra, name, offset) \
264 LOG_ERR(" %2d: " SFP ": " PR_REG " ra: " PR_REG, idx, sfp, ra)
265 #endif /* CONFIG_SYMTAB */
266
print_trace_address(void * arg,unsigned long ra,unsigned long sfp)267 static bool print_trace_address(void *arg, unsigned long ra, unsigned long sfp)
268 {
269 int *i = arg;
270 #ifdef CONFIG_SYMTAB
271 uint32_t offset = 0;
272 const char *name = symtab_find_symbol_name(ra, &offset);
273 #endif /* CONFIG_SYMTAB */
274
275 LOG_STACK_TRACE((*i)++, sfp, ra, name, offset);
276
277 return true;
278 }
279
z_riscv_unwind_stack(const struct arch_esf * esf,const _callee_saved_t * csf)280 void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf)
281 {
282 int i = 0;
283
284 LOG_ERR("call trace:");
285 walk_stackframe(print_trace_address, &i, arch_current_thread(), esf, in_fatal_stack_bound,
286 csf);
287 LOG_ERR("");
288 }
289 #endif /* CONFIG_EXCEPTION_STACK_TRACE */
290