1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/ftrace.h>
10 #include <linux/sched.h>
11 #include <linux/sched/debug.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/stacktrace.h>
14
15 #include <asm/irq.h>
16 #include <asm/stack_pointer.h>
17 #include <asm/stacktrace.h>
18
19 /*
20 * Start an unwind from a pt_regs.
21 *
22 * The unwind will begin at the PC within the regs.
23 *
24 * The regs must be on a stack currently owned by the calling task.
25 */
unwind_init_from_regs(struct unwind_state * state,struct pt_regs * regs)26 static inline void unwind_init_from_regs(struct unwind_state *state,
27 struct pt_regs *regs)
28 {
29 unwind_init_common(state, current);
30
31 state->fp = regs->regs[29];
32 state->pc = regs->pc;
33 }
34
35 /*
36 * Start an unwind from a caller.
37 *
38 * The unwind will begin at the caller of whichever function this is inlined
39 * into.
40 *
41 * The function which invokes this must be noinline.
42 */
unwind_init_from_caller(struct unwind_state * state)43 static __always_inline void unwind_init_from_caller(struct unwind_state *state)
44 {
45 unwind_init_common(state, current);
46
47 state->fp = (unsigned long)__builtin_frame_address(1);
48 state->pc = (unsigned long)__builtin_return_address(0);
49 }
50
51 /*
52 * Start an unwind from a blocked task.
53 *
54 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
55 * cpu_switch_to()).
56 *
57 * The caller should ensure the task is blocked in cpu_switch_to() for the
58 * duration of the unwind, or the unwind will be bogus. It is never valid to
59 * call this for the current task.
60 */
unwind_init_from_task(struct unwind_state * state,struct task_struct * task)61 static inline void unwind_init_from_task(struct unwind_state *state,
62 struct task_struct *task)
63 {
64 unwind_init_common(state, task);
65
66 state->fp = thread_saved_fp(task);
67 state->pc = thread_saved_pc(task);
68 }
69
70 /*
71 * Unwind from one frame record (A) to the next frame record (B).
72 *
73 * We terminate early if the location of B indicates a malformed chain of frame
74 * records (e.g. a cycle), determined based on the location and fp value of A
75 * and the location (but not the fp value) of B.
76 */
unwind_next(struct unwind_state * state)77 static int notrace unwind_next(struct unwind_state *state)
78 {
79 struct task_struct *tsk = state->task;
80 unsigned long fp = state->fp;
81 int err;
82
83 /* Final frame; nothing to unwind */
84 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
85 return -ENOENT;
86
87 err = unwind_next_frame_record(state);
88 if (err)
89 return err;
90
91 state->pc = ptrauth_strip_insn_pac(state->pc);
92
93 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
94 if (tsk->ret_stack &&
95 (state->pc == (unsigned long)return_to_handler)) {
96 unsigned long orig_pc;
97 /*
98 * This is a case where function graph tracer has
99 * modified a return address (LR) in a stack frame
100 * to hook a function return.
101 * So replace it to an original value.
102 */
103 orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
104 (void *)state->fp);
105 if (WARN_ON_ONCE(state->pc == orig_pc))
106 return -EINVAL;
107 state->pc = orig_pc;
108 }
109 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
110 #ifdef CONFIG_KRETPROBES
111 if (is_kretprobe_trampoline(state->pc))
112 state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
113 #endif
114
115 return 0;
116 }
117 NOKPROBE_SYMBOL(unwind_next);
118
unwind(struct unwind_state * state,stack_trace_consume_fn consume_entry,void * cookie)119 static void notrace unwind(struct unwind_state *state,
120 stack_trace_consume_fn consume_entry, void *cookie)
121 {
122 while (1) {
123 int ret;
124
125 if (!consume_entry(cookie, state->pc))
126 break;
127 ret = unwind_next(state);
128 if (ret < 0)
129 break;
130 }
131 }
132 NOKPROBE_SYMBOL(unwind);
133
dump_backtrace_entry(void * arg,unsigned long where)134 static bool dump_backtrace_entry(void *arg, unsigned long where)
135 {
136 char *loglvl = arg;
137 printk("%s %pSb\n", loglvl, (void *)where);
138 return true;
139 }
140
dump_backtrace(struct pt_regs * regs,struct task_struct * tsk,const char * loglvl)141 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
142 const char *loglvl)
143 {
144 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
145
146 if (regs && user_mode(regs))
147 return;
148
149 if (!tsk)
150 tsk = current;
151
152 if (!try_get_task_stack(tsk))
153 return;
154
155 printk("%sCall trace:\n", loglvl);
156 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
157
158 put_task_stack(tsk);
159 }
160
show_stack(struct task_struct * tsk,unsigned long * sp,const char * loglvl)161 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
162 {
163 dump_backtrace(NULL, tsk, loglvl);
164 barrier();
165 }
166
167 /*
168 * Per-cpu stacks are only accessible when unwinding the current task in a
169 * non-preemptible context.
170 */
171 #define STACKINFO_CPU(name) \
172 ({ \
173 ((task == current) && !preemptible()) \
174 ? stackinfo_get_##name() \
175 : stackinfo_get_unknown(); \
176 })
177
178 /*
179 * SDEI stacks are only accessible when unwinding the current task in an NMI
180 * context.
181 */
182 #define STACKINFO_SDEI(name) \
183 ({ \
184 ((task == current) && in_nmi()) \
185 ? stackinfo_get_sdei_##name() \
186 : stackinfo_get_unknown(); \
187 })
188
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)189 noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
190 void *cookie, struct task_struct *task,
191 struct pt_regs *regs)
192 {
193 struct stack_info stacks[] = {
194 stackinfo_get_task(task),
195 STACKINFO_CPU(irq),
196 #if defined(CONFIG_VMAP_STACK)
197 STACKINFO_CPU(overflow),
198 #endif
199 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
200 STACKINFO_SDEI(normal),
201 STACKINFO_SDEI(critical),
202 #endif
203 };
204 struct unwind_state state = {
205 .stacks = stacks,
206 .nr_stacks = ARRAY_SIZE(stacks),
207 };
208
209 if (regs) {
210 if (task != current)
211 return;
212 unwind_init_from_regs(&state, regs);
213 } else if (task == current) {
214 unwind_init_from_caller(&state);
215 } else {
216 unwind_init_from_task(&state, task);
217 }
218
219 unwind(&state, consume_entry, cookie);
220 }
221