1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/kernel/ftrace.c
4 *
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7 */
8
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/swab.h>
12 #include <linux/uaccess.h>
13
14 #include <asm/cacheflush.h>
15 #include <asm/debug-monitors.h>
16 #include <asm/ftrace.h>
17 #include <asm/insn.h>
18 #include <asm/patching.h>
19
20 #ifdef CONFIG_DYNAMIC_FTRACE
21 /*
22 * Replace a single instruction, which may be a branch or NOP.
23 * If @validate == true, a replaced instruction is checked against 'old'.
24 */
ftrace_modify_code(unsigned long pc,u32 old,u32 new,bool validate)25 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
26 bool validate)
27 {
28 u32 replaced;
29
30 /*
31 * Note:
32 * We are paranoid about modifying text, as if a bug were to happen, it
33 * could cause us to read or write to someplace that could cause harm.
34 * Carefully read and modify the code with aarch64_insn_*() which uses
35 * probe_kernel_*(), and make sure what we read is what we expected it
36 * to be before modifying it.
37 */
38 if (validate) {
39 if (aarch64_insn_read((void *)pc, &replaced))
40 return -EFAULT;
41
42 if (replaced != old)
43 return -EINVAL;
44 }
45 if (aarch64_insn_patch_text_nosync((void *)pc, new))
46 return -EPERM;
47
48 return 0;
49 }
50
51 /*
52 * Replace tracer function in ftrace_caller()
53 */
ftrace_update_ftrace_func(ftrace_func_t func)54 int ftrace_update_ftrace_func(ftrace_func_t func)
55 {
56 unsigned long pc;
57 u32 new;
58
59 pc = (unsigned long)ftrace_call;
60 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
61 AARCH64_INSN_BRANCH_LINK);
62
63 return ftrace_modify_code(pc, 0, new, false);
64 }
65
get_ftrace_plt(struct module * mod,unsigned long addr)66 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
67 {
68 #ifdef CONFIG_ARM64_MODULE_PLTS
69 struct plt_entry *plt = mod->arch.ftrace_trampolines;
70
71 if (addr == FTRACE_ADDR)
72 return &plt[FTRACE_PLT_IDX];
73 if (addr == FTRACE_REGS_ADDR &&
74 IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
75 return &plt[FTRACE_REGS_PLT_IDX];
76 #endif
77 return NULL;
78 }
79
80 /*
81 * Find the address the callsite must branch to in order to reach '*addr'.
82 *
83 * Due to the limited range of 'BL' instructions, modules may be placed too far
84 * away to branch directly and must use a PLT.
85 *
86 * Returns true when '*addr' contains a reachable target address, or has been
87 * modified to contain a PLT address. Returns false otherwise.
88 */
ftrace_find_callable_addr(struct dyn_ftrace * rec,struct module * mod,unsigned long * addr)89 static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
90 struct module *mod,
91 unsigned long *addr)
92 {
93 unsigned long pc = rec->ip;
94 long offset = (long)*addr - (long)pc;
95 struct plt_entry *plt;
96
97 /*
98 * When the target is within range of the 'BL' instruction, use 'addr'
99 * as-is and branch to that directly.
100 */
101 if (offset >= -SZ_128M && offset < SZ_128M)
102 return true;
103
104 /*
105 * When the target is outside of the range of a 'BL' instruction, we
106 * must use a PLT to reach it. We can only place PLTs for modules, and
107 * only when module PLT support is built-in.
108 */
109 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
110 return false;
111
112 /*
113 * 'mod' is only set at module load time, but if we end up
114 * dealing with an out-of-range condition, we can assume it
115 * is due to a module being loaded far away from the kernel.
116 *
117 * NOTE: __module_text_address() must be called with preemption
118 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
119 * retains its validity throughout the remainder of this code.
120 */
121 if (!mod) {
122 preempt_disable();
123 mod = __module_text_address(pc);
124 preempt_enable();
125 }
126
127 if (WARN_ON(!mod))
128 return false;
129
130 plt = get_ftrace_plt(mod, *addr);
131 if (!plt) {
132 pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
133 return false;
134 }
135
136 *addr = (unsigned long)plt;
137 return true;
138 }
139
140 /*
141 * Turn on the call to ftrace_caller() in instrumented function
142 */
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)143 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
144 {
145 unsigned long pc = rec->ip;
146 u32 old, new;
147
148 if (!ftrace_find_callable_addr(rec, NULL, &addr))
149 return -EINVAL;
150
151 old = aarch64_insn_gen_nop();
152 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
153
154 return ftrace_modify_code(pc, old, new, true);
155 }
156
157 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)158 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
159 unsigned long addr)
160 {
161 unsigned long pc = rec->ip;
162 u32 old, new;
163
164 if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
165 return -EINVAL;
166 if (!ftrace_find_callable_addr(rec, NULL, &addr))
167 return -EINVAL;
168
169 old = aarch64_insn_gen_branch_imm(pc, old_addr,
170 AARCH64_INSN_BRANCH_LINK);
171 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
172
173 return ftrace_modify_code(pc, old, new, true);
174 }
175
176 /*
177 * The compiler has inserted two NOPs before the regular function prologue.
178 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
179 * and x9-x18 are free for our use.
180 *
181 * At runtime we want to be able to swing a single NOP <-> BL to enable or
182 * disable the ftrace call. The BL requires us to save the original LR value,
183 * so here we insert a <MOV X9, LR> over the first NOP so the instructions
184 * before the regular prologue are:
185 *
186 * | Compiled | Disabled | Enabled |
187 * +----------+------------+------------+
188 * | NOP | MOV X9, LR | MOV X9, LR |
189 * | NOP | NOP | BL <entry> |
190 *
191 * The LR value will be recovered by ftrace_regs_entry, and restored into LR
192 * before returning to the regular function prologue. When a function is not
193 * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
194 *
195 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
196 * the BL.
197 */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)198 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
199 {
200 unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
201 u32 old, new;
202
203 old = aarch64_insn_gen_nop();
204 new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
205 AARCH64_INSN_REG_LR,
206 AARCH64_INSN_VARIANT_64BIT);
207 return ftrace_modify_code(pc, old, new, true);
208 }
209 #endif
210
211 /*
212 * Turn off the call to ftrace_caller() in instrumented function
213 */
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)214 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
215 unsigned long addr)
216 {
217 unsigned long pc = rec->ip;
218 u32 old = 0, new;
219
220 new = aarch64_insn_gen_nop();
221
222 /*
223 * When using mcount, callsites in modules may have been initalized to
224 * call an arbitrary module PLT (which redirects to the _mcount stub)
225 * rather than the ftrace PLT we'll use at runtime (which redirects to
226 * the ftrace trampoline). We can ignore the old PLT when initializing
227 * the callsite.
228 *
229 * Note: 'mod' is only set at module load time.
230 */
231 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
232 IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
233 return aarch64_insn_patch_text_nosync((void *)pc, new);
234 }
235
236 if (!ftrace_find_callable_addr(rec, mod, &addr))
237 return -EINVAL;
238
239 old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
240
241 return ftrace_modify_code(pc, old, new, true);
242 }
243
arch_ftrace_update_code(int command)244 void arch_ftrace_update_code(int command)
245 {
246 command |= FTRACE_MAY_SLEEP;
247 ftrace_modify_all_code(command);
248 }
249 #endif /* CONFIG_DYNAMIC_FTRACE */
250
251 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
252 /*
253 * function_graph tracer expects ftrace_return_to_handler() to be called
254 * on the way back to parent. For this purpose, this function is called
255 * in _mcount() or ftrace_caller() to replace return address (*parent) on
256 * the call stack to return_to_handler.
257 */
prepare_ftrace_return(unsigned long self_addr,unsigned long * parent,unsigned long frame_pointer)258 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
259 unsigned long frame_pointer)
260 {
261 unsigned long return_hooker = (unsigned long)&return_to_handler;
262 unsigned long old;
263
264 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
265 return;
266
267 /*
268 * Note:
269 * No protection against faulting at *parent, which may be seen
270 * on other archs. It's unlikely on AArch64.
271 */
272 old = *parent;
273
274 if (!function_graph_enter(old, self_addr, frame_pointer,
275 (void *)frame_pointer)) {
276 *parent = return_hooker;
277 }
278 }
279
280 #ifdef CONFIG_DYNAMIC_FTRACE
281
282 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)283 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
284 struct ftrace_ops *op, struct ftrace_regs *fregs)
285 {
286 /*
287 * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL
288 * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs
289 * in which we can safely modify the LR.
290 */
291 struct pt_regs *regs = arch_ftrace_get_regs(fregs);
292 unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs);
293
294 prepare_ftrace_return(ip, parent, frame_pointer(regs));
295 }
296 #else
297 /*
298 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
299 * depending on @enable.
300 */
ftrace_modify_graph_caller(bool enable)301 static int ftrace_modify_graph_caller(bool enable)
302 {
303 unsigned long pc = (unsigned long)&ftrace_graph_call;
304 u32 branch, nop;
305
306 branch = aarch64_insn_gen_branch_imm(pc,
307 (unsigned long)ftrace_graph_caller,
308 AARCH64_INSN_BRANCH_NOLINK);
309 nop = aarch64_insn_gen_nop();
310
311 if (enable)
312 return ftrace_modify_code(pc, nop, branch, true);
313 else
314 return ftrace_modify_code(pc, branch, nop, true);
315 }
316
ftrace_enable_ftrace_graph_caller(void)317 int ftrace_enable_ftrace_graph_caller(void)
318 {
319 return ftrace_modify_graph_caller(true);
320 }
321
ftrace_disable_ftrace_graph_caller(void)322 int ftrace_disable_ftrace_graph_caller(void)
323 {
324 return ftrace_modify_graph_caller(false);
325 }
326 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
327 #endif /* CONFIG_DYNAMIC_FTRACE */
328 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
329