1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/ftrace.h>
4 #include <linux/uaccess.h>
5 #include <asm/cacheflush.h>
6
7 #ifndef CONFIG_DYNAMIC_FTRACE
8 extern void (*ftrace_trace_function)(unsigned long, unsigned long,
9 struct ftrace_ops*, struct pt_regs*);
10 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
11 extern void ftrace_graph_caller(void);
12
ftrace_stub(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * regs)13 noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
14 struct ftrace_ops *op, struct pt_regs *regs)
15 {
16 __asm__ (""); /* avoid to optimize as pure function */
17 }
18
_mcount(unsigned long parent_ip)19 noinline void _mcount(unsigned long parent_ip)
20 {
21 /* save all state by the compiler prologue */
22
23 unsigned long ip = (unsigned long)__builtin_return_address(0);
24
25 if (ftrace_trace_function != ftrace_stub)
26 ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
27 NULL, NULL);
28
29 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
30 if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
31 || ftrace_graph_entry != ftrace_graph_entry_stub)
32 ftrace_graph_caller();
33 #endif
34
35 /* restore all state by the compiler epilogue */
36 }
37 EXPORT_SYMBOL(_mcount);
38
39 #else /* CONFIG_DYNAMIC_FTRACE */
40
ftrace_stub(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * regs)41 noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
42 struct ftrace_ops *op, struct pt_regs *regs)
43 {
44 __asm__ (""); /* avoid to optimize as pure function */
45 }
46
_mcount(unsigned long parent_ip)47 noinline void __naked _mcount(unsigned long parent_ip)
48 {
49 __asm__ (""); /* avoid to optimize as pure function */
50 }
51 EXPORT_SYMBOL(_mcount);
52
53 #define XSTR(s) STR(s)
54 #define STR(s) #s
_ftrace_caller(unsigned long parent_ip)55 void _ftrace_caller(unsigned long parent_ip)
56 {
57 /* save all state needed by the compiler prologue */
58
59 /*
60 * prepare arguments for real tracing function
61 * first arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
62 * second arg : parent_ip
63 */
64 __asm__ __volatile__ (
65 "move $r1, %0 \n\t"
66 "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
67 :
68 : "r" (parent_ip), "r" (__builtin_return_address(0)));
69
70 /* a placeholder for the call to a real tracing function */
71 __asm__ __volatile__ (
72 "ftrace_call: \n\t"
73 "nop \n\t"
74 "nop \n\t"
75 "nop \n\t");
76
77 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
78 /* a placeholder for the call to ftrace_graph_caller */
79 __asm__ __volatile__ (
80 "ftrace_graph_call: \n\t"
81 "nop \n\t"
82 "nop \n\t"
83 "nop \n\t");
84 #endif
85 /* restore all state needed by the compiler epilogue */
86 }
87
ftrace_dyn_arch_init(void)88 int __init ftrace_dyn_arch_init(void)
89 {
90 return 0;
91 }
92
ftrace_arch_code_modify_prepare(void)93 int ftrace_arch_code_modify_prepare(void)
94 {
95 set_all_modules_text_rw();
96 return 0;
97 }
98
ftrace_arch_code_modify_post_process(void)99 int ftrace_arch_code_modify_post_process(void)
100 {
101 set_all_modules_text_ro();
102 return 0;
103 }
104
gen_sethi_insn(unsigned long addr)105 static unsigned long gen_sethi_insn(unsigned long addr)
106 {
107 unsigned long opcode = 0x46000000;
108 unsigned long imm = addr >> 12;
109 unsigned long rt_num = 0xf << 20;
110
111 return ENDIAN_CONVERT(opcode | rt_num | imm);
112 }
113
gen_ori_insn(unsigned long addr)114 static unsigned long gen_ori_insn(unsigned long addr)
115 {
116 unsigned long opcode = 0x58000000;
117 unsigned long imm = addr & 0x0000fff;
118 unsigned long rt_num = 0xf << 20;
119 unsigned long ra_num = 0xf << 15;
120
121 return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
122 }
123
gen_jral_insn(unsigned long addr)124 static unsigned long gen_jral_insn(unsigned long addr)
125 {
126 unsigned long opcode = 0x4a000001;
127 unsigned long rt_num = 0x1e << 20;
128 unsigned long rb_num = 0xf << 10;
129
130 return ENDIAN_CONVERT(opcode | rt_num | rb_num);
131 }
132
ftrace_gen_call_insn(unsigned long * call_insns,unsigned long addr)133 static void ftrace_gen_call_insn(unsigned long *call_insns,
134 unsigned long addr)
135 {
136 call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u */
137 call_insns[1] = gen_ori_insn(addr); /* ori $r15, $r15, imm15u */
138 call_insns[2] = gen_jral_insn(addr); /* jral $lp, $r15 */
139 }
140
__ftrace_modify_code(unsigned long pc,unsigned long * old_insn,unsigned long * new_insn,bool validate)141 static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
142 unsigned long *new_insn, bool validate)
143 {
144 unsigned long orig_insn[3];
145
146 if (validate) {
147 if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
148 return -EFAULT;
149 if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
150 return -EINVAL;
151 }
152
153 if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
154 return -EPERM;
155
156 return 0;
157 }
158
ftrace_modify_code(unsigned long pc,unsigned long * old_insn,unsigned long * new_insn,bool validate)159 static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
160 unsigned long *new_insn, bool validate)
161 {
162 int ret;
163
164 ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
165 if (ret)
166 return ret;
167
168 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
169
170 return ret;
171 }
172
ftrace_update_ftrace_func(ftrace_func_t func)173 int ftrace_update_ftrace_func(ftrace_func_t func)
174 {
175 unsigned long pc = (unsigned long)&ftrace_call;
176 unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
177 unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
178
179 if (func != ftrace_stub)
180 ftrace_gen_call_insn(new_insn, (unsigned long)func);
181
182 return ftrace_modify_code(pc, old_insn, new_insn, false);
183 }
184
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)185 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
186 {
187 unsigned long pc = rec->ip;
188 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
189 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
190
191 ftrace_gen_call_insn(call_insn, addr);
192
193 return ftrace_modify_code(pc, nop_insn, call_insn, true);
194 }
195
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)196 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
197 unsigned long addr)
198 {
199 unsigned long pc = rec->ip;
200 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
201 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
202
203 ftrace_gen_call_insn(call_insn, addr);
204
205 return ftrace_modify_code(pc, call_insn, nop_insn, true);
206 }
207 #endif /* CONFIG_DYNAMIC_FTRACE */
208
209 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)210 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
211 unsigned long frame_pointer)
212 {
213 unsigned long return_hooker = (unsigned long)&return_to_handler;
214 struct ftrace_graph_ent trace;
215 unsigned long old;
216 int err;
217
218 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
219 return;
220
221 old = *parent;
222
223 trace.func = self_addr;
224 trace.depth = current->curr_ret_stack + 1;
225
226 /* Only trace if the calling function expects to */
227 if (!ftrace_graph_entry(&trace))
228 return;
229
230 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
231 frame_pointer, NULL);
232
233 if (err == -EBUSY)
234 return;
235
236 *parent = return_hooker;
237 }
238
ftrace_graph_caller(void)239 noinline void ftrace_graph_caller(void)
240 {
241 unsigned long *parent_ip =
242 (unsigned long *)(__builtin_frame_address(2) - 4);
243
244 unsigned long selfpc =
245 (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
246
247 unsigned long frame_pointer =
248 (unsigned long)__builtin_frame_address(3);
249
250 prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
251 }
252
253 extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
return_to_handler(void)254 void __naked return_to_handler(void)
255 {
256 __asm__ __volatile__ (
257 /* save state needed by the ABI */
258 "smw.adm $r0,[$sp],$r1,#0x0 \n\t"
259
260 /* get original return address */
261 "move $r0, $fp \n\t"
262 "bal ftrace_return_to_handler\n\t"
263 "move $lp, $r0 \n\t"
264
265 /* restore state nedded by the ABI */
266 "lmw.bim $r0,[$sp],$r1,#0x0 \n\t");
267 }
268
269 #ifdef CONFIG_DYNAMIC_FTRACE
270 extern unsigned long ftrace_graph_call;
271
ftrace_modify_graph_caller(bool enable)272 static int ftrace_modify_graph_caller(bool enable)
273 {
274 unsigned long pc = (unsigned long)&ftrace_graph_call;
275 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
276 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
277
278 ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
279
280 if (enable)
281 return ftrace_modify_code(pc, nop_insn, call_insn, true);
282 else
283 return ftrace_modify_code(pc, call_insn, nop_insn, true);
284 }
285
ftrace_enable_ftrace_graph_caller(void)286 int ftrace_enable_ftrace_graph_caller(void)
287 {
288 return ftrace_modify_graph_caller(true);
289 }
290
ftrace_disable_ftrace_graph_caller(void)291 int ftrace_disable_ftrace_graph_caller(void)
292 {
293 return ftrace_modify_graph_caller(false);
294 }
295 #endif /* CONFIG_DYNAMIC_FTRACE */
296
297 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
298
299
300 #ifdef CONFIG_TRACE_IRQFLAGS
__trace_hardirqs_off(void)301 noinline void __trace_hardirqs_off(void)
302 {
303 trace_hardirqs_off();
304 }
__trace_hardirqs_on(void)305 noinline void __trace_hardirqs_on(void)
306 {
307 trace_hardirqs_on();
308 }
309 #endif /* CONFIG_TRACE_IRQFLAGS */
310