1 /*
2 * builtin-ftrace.c
3 *
4 * Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org>
5 *
6 * Released under the GPL v2.
7 */
8
9 #include "builtin.h"
10 #include "perf.h"
11
12 #include <errno.h>
13 #include <unistd.h>
14 #include <signal.h>
15 #include <fcntl.h>
16 #include <poll.h>
17
18 #include "debug.h"
19 #include <subcmd/parse-options.h>
20 #include <api/fs/tracing_path.h>
21 #include "evlist.h"
22 #include "target.h"
23 #include "cpumap.h"
24 #include "thread_map.h"
25 #include "util/config.h"
26
27
28 #define DEFAULT_TRACER "function_graph"
29
30 struct perf_ftrace {
31 struct perf_evlist *evlist;
32 struct target target;
33 const char *tracer;
34 struct list_head filters;
35 struct list_head notrace;
36 struct list_head graph_funcs;
37 struct list_head nograph_funcs;
38 int graph_depth;
39 };
40
41 struct filter_entry {
42 struct list_head list;
43 char name[];
44 };
45
46 static bool done;
47
sig_handler(int sig __maybe_unused)48 static void sig_handler(int sig __maybe_unused)
49 {
50 done = true;
51 }
52
53 /*
54 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
55 * we asked by setting its exec_error to the function below,
56 * ftrace__workload_exec_failed_signal.
57 *
58 * XXX We need to handle this more appropriately, emitting an error, etc.
59 */
ftrace__workload_exec_failed_signal(int signo __maybe_unused,siginfo_t * info __maybe_unused,void * ucontext __maybe_unused)60 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
61 siginfo_t *info __maybe_unused,
62 void *ucontext __maybe_unused)
63 {
64 /* workload_exec_errno = info->si_value.sival_int; */
65 done = true;
66 }
67
__write_tracing_file(const char * name,const char * val,bool append)68 static int __write_tracing_file(const char *name, const char *val, bool append)
69 {
70 char *file;
71 int fd, ret = -1;
72 ssize_t size = strlen(val);
73 int flags = O_WRONLY;
74 char errbuf[512];
75 char *val_copy;
76
77 file = get_tracing_file(name);
78 if (!file) {
79 pr_debug("cannot get tracing file: %s\n", name);
80 return -1;
81 }
82
83 if (append)
84 flags |= O_APPEND;
85 else
86 flags |= O_TRUNC;
87
88 fd = open(file, flags);
89 if (fd < 0) {
90 pr_debug("cannot open tracing file: %s: %s\n",
91 name, str_error_r(errno, errbuf, sizeof(errbuf)));
92 goto out;
93 }
94
95 /*
96 * Copy the original value and append a '\n'. Without this,
97 * the kernel can hide possible errors.
98 */
99 val_copy = strdup(val);
100 if (!val_copy)
101 goto out_close;
102 val_copy[size] = '\n';
103
104 if (write(fd, val_copy, size + 1) == size + 1)
105 ret = 0;
106 else
107 pr_debug("write '%s' to tracing/%s failed: %s\n",
108 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
109
110 free(val_copy);
111 out_close:
112 close(fd);
113 out:
114 put_tracing_file(file);
115 return ret;
116 }
117
write_tracing_file(const char * name,const char * val)118 static int write_tracing_file(const char *name, const char *val)
119 {
120 return __write_tracing_file(name, val, false);
121 }
122
append_tracing_file(const char * name,const char * val)123 static int append_tracing_file(const char *name, const char *val)
124 {
125 return __write_tracing_file(name, val, true);
126 }
127
128 static int reset_tracing_cpu(void);
129 static void reset_tracing_filters(void);
130
reset_tracing_files(struct perf_ftrace * ftrace __maybe_unused)131 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
132 {
133 if (write_tracing_file("tracing_on", "0") < 0)
134 return -1;
135
136 if (write_tracing_file("current_tracer", "nop") < 0)
137 return -1;
138
139 if (write_tracing_file("set_ftrace_pid", " ") < 0)
140 return -1;
141
142 if (reset_tracing_cpu() < 0)
143 return -1;
144
145 if (write_tracing_file("max_graph_depth", "0") < 0)
146 return -1;
147
148 reset_tracing_filters();
149 return 0;
150 }
151
set_tracing_pid(struct perf_ftrace * ftrace)152 static int set_tracing_pid(struct perf_ftrace *ftrace)
153 {
154 int i;
155 char buf[16];
156
157 if (target__has_cpu(&ftrace->target))
158 return 0;
159
160 for (i = 0; i < thread_map__nr(ftrace->evlist->threads); i++) {
161 scnprintf(buf, sizeof(buf), "%d",
162 ftrace->evlist->threads->map[i]);
163 if (append_tracing_file("set_ftrace_pid", buf) < 0)
164 return -1;
165 }
166 return 0;
167 }
168
set_tracing_cpumask(struct cpu_map * cpumap)169 static int set_tracing_cpumask(struct cpu_map *cpumap)
170 {
171 char *cpumask;
172 size_t mask_size;
173 int ret;
174 int last_cpu;
175
176 last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
177 mask_size = (last_cpu + 3) / 4 + 1;
178 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
179
180 cpumask = malloc(mask_size);
181 if (cpumask == NULL) {
182 pr_debug("failed to allocate cpu mask\n");
183 return -1;
184 }
185
186 cpu_map__snprint_mask(cpumap, cpumask, mask_size);
187
188 ret = write_tracing_file("tracing_cpumask", cpumask);
189
190 free(cpumask);
191 return ret;
192 }
193
set_tracing_cpu(struct perf_ftrace * ftrace)194 static int set_tracing_cpu(struct perf_ftrace *ftrace)
195 {
196 struct cpu_map *cpumap = ftrace->evlist->cpus;
197
198 if (!target__has_cpu(&ftrace->target))
199 return 0;
200
201 return set_tracing_cpumask(cpumap);
202 }
203
reset_tracing_cpu(void)204 static int reset_tracing_cpu(void)
205 {
206 struct cpu_map *cpumap = cpu_map__new(NULL);
207 int ret;
208
209 ret = set_tracing_cpumask(cpumap);
210 cpu_map__put(cpumap);
211 return ret;
212 }
213
__set_tracing_filter(const char * filter_file,struct list_head * funcs)214 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
215 {
216 struct filter_entry *pos;
217
218 list_for_each_entry(pos, funcs, list) {
219 if (append_tracing_file(filter_file, pos->name) < 0)
220 return -1;
221 }
222
223 return 0;
224 }
225
set_tracing_filters(struct perf_ftrace * ftrace)226 static int set_tracing_filters(struct perf_ftrace *ftrace)
227 {
228 int ret;
229
230 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
231 if (ret < 0)
232 return ret;
233
234 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
235 if (ret < 0)
236 return ret;
237
238 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
239 if (ret < 0)
240 return ret;
241
242 /* old kernels do not have this filter */
243 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
244
245 return ret;
246 }
247
reset_tracing_filters(void)248 static void reset_tracing_filters(void)
249 {
250 write_tracing_file("set_ftrace_filter", " ");
251 write_tracing_file("set_ftrace_notrace", " ");
252 write_tracing_file("set_graph_function", " ");
253 write_tracing_file("set_graph_notrace", " ");
254 }
255
set_tracing_depth(struct perf_ftrace * ftrace)256 static int set_tracing_depth(struct perf_ftrace *ftrace)
257 {
258 char buf[16];
259
260 if (ftrace->graph_depth == 0)
261 return 0;
262
263 if (ftrace->graph_depth < 0) {
264 pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
265 return -1;
266 }
267
268 snprintf(buf, sizeof(buf), "%d", ftrace->graph_depth);
269
270 if (write_tracing_file("max_graph_depth", buf) < 0)
271 return -1;
272
273 return 0;
274 }
275
__cmd_ftrace(struct perf_ftrace * ftrace,int argc,const char ** argv)276 static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
277 {
278 char *trace_file;
279 int trace_fd;
280 char buf[4096];
281 struct pollfd pollfd = {
282 .events = POLLIN,
283 };
284
285 if (geteuid() != 0) {
286 pr_err("ftrace only works for root!\n");
287 return -1;
288 }
289
290 signal(SIGINT, sig_handler);
291 signal(SIGUSR1, sig_handler);
292 signal(SIGCHLD, sig_handler);
293 signal(SIGPIPE, sig_handler);
294
295 if (reset_tracing_files(ftrace) < 0) {
296 pr_err("failed to reset ftrace\n");
297 goto out;
298 }
299
300 /* reset ftrace buffer */
301 if (write_tracing_file("trace", "0") < 0)
302 goto out;
303
304 if (argc && perf_evlist__prepare_workload(ftrace->evlist,
305 &ftrace->target, argv, false,
306 ftrace__workload_exec_failed_signal) < 0) {
307 goto out;
308 }
309
310 if (set_tracing_pid(ftrace) < 0) {
311 pr_err("failed to set ftrace pid\n");
312 goto out_reset;
313 }
314
315 if (set_tracing_cpu(ftrace) < 0) {
316 pr_err("failed to set tracing cpumask\n");
317 goto out_reset;
318 }
319
320 if (set_tracing_filters(ftrace) < 0) {
321 pr_err("failed to set tracing filters\n");
322 goto out_reset;
323 }
324
325 if (set_tracing_depth(ftrace) < 0) {
326 pr_err("failed to set graph depth\n");
327 goto out_reset;
328 }
329
330 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
331 pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
332 goto out_reset;
333 }
334
335 setup_pager();
336
337 trace_file = get_tracing_file("trace_pipe");
338 if (!trace_file) {
339 pr_err("failed to open trace_pipe\n");
340 goto out_reset;
341 }
342
343 trace_fd = open(trace_file, O_RDONLY);
344
345 put_tracing_file(trace_file);
346
347 if (trace_fd < 0) {
348 pr_err("failed to open trace_pipe\n");
349 goto out_reset;
350 }
351
352 fcntl(trace_fd, F_SETFL, O_NONBLOCK);
353 pollfd.fd = trace_fd;
354
355 if (write_tracing_file("tracing_on", "1") < 0) {
356 pr_err("can't enable tracing\n");
357 goto out_close_fd;
358 }
359
360 perf_evlist__start_workload(ftrace->evlist);
361
362 while (!done) {
363 if (poll(&pollfd, 1, -1) < 0)
364 break;
365
366 if (pollfd.revents & POLLIN) {
367 int n = read(trace_fd, buf, sizeof(buf));
368 if (n < 0)
369 break;
370 if (fwrite(buf, n, 1, stdout) != 1)
371 break;
372 }
373 }
374
375 write_tracing_file("tracing_on", "0");
376
377 /* read remaining buffer contents */
378 while (true) {
379 int n = read(trace_fd, buf, sizeof(buf));
380 if (n <= 0)
381 break;
382 if (fwrite(buf, n, 1, stdout) != 1)
383 break;
384 }
385
386 out_close_fd:
387 close(trace_fd);
388 out_reset:
389 reset_tracing_files(ftrace);
390 out:
391 return done ? 0 : -1;
392 }
393
perf_ftrace_config(const char * var,const char * value,void * cb)394 static int perf_ftrace_config(const char *var, const char *value, void *cb)
395 {
396 struct perf_ftrace *ftrace = cb;
397
398 if (!strstarts(var, "ftrace."))
399 return 0;
400
401 if (strcmp(var, "ftrace.tracer"))
402 return -1;
403
404 if (!strcmp(value, "function_graph") ||
405 !strcmp(value, "function")) {
406 ftrace->tracer = value;
407 return 0;
408 }
409
410 pr_err("Please select \"function_graph\" (default) or \"function\"\n");
411 return -1;
412 }
413
parse_filter_func(const struct option * opt,const char * str,int unset __maybe_unused)414 static int parse_filter_func(const struct option *opt, const char *str,
415 int unset __maybe_unused)
416 {
417 struct list_head *head = opt->value;
418 struct filter_entry *entry;
419
420 entry = malloc(sizeof(*entry) + strlen(str) + 1);
421 if (entry == NULL)
422 return -ENOMEM;
423
424 strcpy(entry->name, str);
425 list_add_tail(&entry->list, head);
426
427 return 0;
428 }
429
delete_filter_func(struct list_head * head)430 static void delete_filter_func(struct list_head *head)
431 {
432 struct filter_entry *pos, *tmp;
433
434 list_for_each_entry_safe(pos, tmp, head, list) {
435 list_del(&pos->list);
436 free(pos);
437 }
438 }
439
cmd_ftrace(int argc,const char ** argv)440 int cmd_ftrace(int argc, const char **argv)
441 {
442 int ret;
443 struct perf_ftrace ftrace = {
444 .tracer = DEFAULT_TRACER,
445 .target = { .uid = UINT_MAX, },
446 };
447 const char * const ftrace_usage[] = {
448 "perf ftrace [<options>] [<command>]",
449 "perf ftrace [<options>] -- <command> [<options>]",
450 NULL
451 };
452 const struct option ftrace_options[] = {
453 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
454 "tracer to use: function_graph(default) or function"),
455 OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
456 "trace on existing process id"),
457 OPT_INCR('v', "verbose", &verbose,
458 "be more verbose"),
459 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
460 "system-wide collection from all CPUs"),
461 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
462 "list of cpus to monitor"),
463 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
464 "trace given functions only", parse_filter_func),
465 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
466 "do not trace given functions", parse_filter_func),
467 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
468 "Set graph filter on given functions", parse_filter_func),
469 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
470 "Set nograph filter on given functions", parse_filter_func),
471 OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
472 "Max depth for function graph tracer"),
473 OPT_END()
474 };
475
476 INIT_LIST_HEAD(&ftrace.filters);
477 INIT_LIST_HEAD(&ftrace.notrace);
478 INIT_LIST_HEAD(&ftrace.graph_funcs);
479 INIT_LIST_HEAD(&ftrace.nograph_funcs);
480
481 ret = perf_config(perf_ftrace_config, &ftrace);
482 if (ret < 0)
483 return -1;
484
485 argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
486 PARSE_OPT_STOP_AT_NON_OPTION);
487 if (!argc && target__none(&ftrace.target))
488 usage_with_options(ftrace_usage, ftrace_options);
489
490 ret = target__validate(&ftrace.target);
491 if (ret) {
492 char errbuf[512];
493
494 target__strerror(&ftrace.target, ret, errbuf, 512);
495 pr_err("%s\n", errbuf);
496 goto out_delete_filters;
497 }
498
499 ftrace.evlist = perf_evlist__new();
500 if (ftrace.evlist == NULL) {
501 ret = -ENOMEM;
502 goto out_delete_filters;
503 }
504
505 ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
506 if (ret < 0)
507 goto out_delete_evlist;
508
509 ret = __cmd_ftrace(&ftrace, argc, argv);
510
511 out_delete_evlist:
512 perf_evlist__delete(ftrace.evlist);
513
514 out_delete_filters:
515 delete_filter_func(&ftrace.filters);
516 delete_filter_func(&ftrace.notrace);
517 delete_filter_func(&ftrace.graph_funcs);
518 delete_filter_func(&ftrace.nograph_funcs);
519
520 return ret;
521 }
522