1 /*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 */
16
17 #include "util/record.h"
18 #include <traceevent/event-parse.h>
19 #include <api/fs/tracing_path.h>
20 #include <bpf/bpf.h>
21 #include "util/bpf_map.h"
22 #include "util/rlimit.h"
23 #include "builtin.h"
24 #include "util/cgroup.h"
25 #include "util/color.h"
26 #include "util/config.h"
27 #include "util/debug.h"
28 #include "util/dso.h"
29 #include "util/env.h"
30 #include "util/event.h"
31 #include "util/evsel.h"
32 #include "util/evsel_fprintf.h"
33 #include "util/synthetic-events.h"
34 #include "util/evlist.h"
35 #include "util/evswitch.h"
36 #include "util/mmap.h"
37 #include <subcmd/pager.h>
38 #include <subcmd/exec-cmd.h>
39 #include "util/machine.h"
40 #include "util/map.h"
41 #include "util/symbol.h"
42 #include "util/path.h"
43 #include "util/session.h"
44 #include "util/thread.h"
45 #include <subcmd/parse-options.h>
46 #include "util/strlist.h"
47 #include "util/intlist.h"
48 #include "util/thread_map.h"
49 #include "util/stat.h"
50 #include "util/tool.h"
51 #include "util/util.h"
52 #include "trace/beauty/beauty.h"
53 #include "trace-event.h"
54 #include "util/parse-events.h"
55 #include "util/bpf-loader.h"
56 #include "callchain.h"
57 #include "print_binary.h"
58 #include "string2.h"
59 #include "syscalltbl.h"
60 #include "rb_resort.h"
61 #include "../perf.h"
62
63 #include <errno.h>
64 #include <inttypes.h>
65 #include <poll.h>
66 #include <signal.h>
67 #include <stdlib.h>
68 #include <string.h>
69 #include <linux/err.h>
70 #include <linux/filter.h>
71 #include <linux/kernel.h>
72 #include <linux/random.h>
73 #include <linux/stringify.h>
74 #include <linux/time64.h>
75 #include <linux/zalloc.h>
76 #include <fcntl.h>
77 #include <sys/sysmacros.h>
78
79 #include <linux/ctype.h>
80 #include <perf/mmap.h>
81
82 #ifndef O_CLOEXEC
83 # define O_CLOEXEC 02000000
84 #endif
85
86 #ifndef F_LINUX_SPECIFIC_BASE
87 # define F_LINUX_SPECIFIC_BASE 1024
88 #endif
89
90 /*
91 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
92 */
93 struct syscall_arg_fmt {
94 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
95 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
96 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
97 void *parm;
98 const char *name;
99 u16 nr_entries; // for arrays
100 bool show_zero;
101 };
102
103 struct syscall_fmt {
104 const char *name;
105 const char *alias;
106 struct {
107 const char *sys_enter,
108 *sys_exit;
109 } bpf_prog_name;
110 struct syscall_arg_fmt arg[6];
111 u8 nr_args;
112 bool errpid;
113 bool timeout;
114 bool hexret;
115 };
116
117 struct trace {
118 struct perf_tool tool;
119 struct syscalltbl *sctbl;
120 struct {
121 struct syscall *table;
122 struct bpf_map *map;
123 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
124 struct bpf_map *sys_enter,
125 *sys_exit;
126 } prog_array;
127 struct {
128 struct evsel *sys_enter,
129 *sys_exit,
130 *augmented;
131 } events;
132 struct bpf_program *unaugmented_prog;
133 } syscalls;
134 struct {
135 struct bpf_map *map;
136 } dump;
137 struct record_opts opts;
138 struct evlist *evlist;
139 struct machine *host;
140 struct thread *current;
141 struct bpf_object *bpf_obj;
142 struct cgroup *cgroup;
143 u64 base_time;
144 FILE *output;
145 unsigned long nr_events;
146 unsigned long nr_events_printed;
147 unsigned long max_events;
148 struct evswitch evswitch;
149 struct strlist *ev_qualifier;
150 struct {
151 size_t nr;
152 int *entries;
153 } ev_qualifier_ids;
154 struct {
155 size_t nr;
156 pid_t *entries;
157 struct bpf_map *map;
158 } filter_pids;
159 double duration_filter;
160 double runtime_ms;
161 struct {
162 u64 vfs_getname,
163 proc_getname;
164 } stats;
165 unsigned int max_stack;
166 unsigned int min_stack;
167 int raw_augmented_syscalls_args_size;
168 bool raw_augmented_syscalls;
169 bool fd_path_disabled;
170 bool sort_events;
171 bool not_ev_qualifier;
172 bool live;
173 bool full_time;
174 bool sched;
175 bool multiple_threads;
176 bool summary;
177 bool summary_only;
178 bool errno_summary;
179 bool failure_only;
180 bool show_comm;
181 bool print_sample;
182 bool show_tool_stats;
183 bool trace_syscalls;
184 bool libtraceevent_print;
185 bool kernel_syscallchains;
186 s16 args_alignment;
187 bool show_tstamp;
188 bool show_duration;
189 bool show_zeros;
190 bool show_arg_names;
191 bool show_string_prefix;
192 bool force;
193 bool vfs_getname;
194 int trace_pgfaults;
195 char *perfconfig_events;
196 struct {
197 struct ordered_events data;
198 u64 last;
199 } oe;
200 };
201
202 struct tp_field {
203 int offset;
204 union {
205 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
206 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
207 };
208 };
209
210 #define TP_UINT_FIELD(bits) \
211 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
212 { \
213 u##bits value; \
214 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
215 return value; \
216 }
217
218 TP_UINT_FIELD(8);
219 TP_UINT_FIELD(16);
220 TP_UINT_FIELD(32);
221 TP_UINT_FIELD(64);
222
223 #define TP_UINT_FIELD__SWAPPED(bits) \
224 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
225 { \
226 u##bits value; \
227 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
228 return bswap_##bits(value);\
229 }
230
231 TP_UINT_FIELD__SWAPPED(16);
232 TP_UINT_FIELD__SWAPPED(32);
233 TP_UINT_FIELD__SWAPPED(64);
234
__tp_field__init_uint(struct tp_field * field,int size,int offset,bool needs_swap)235 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
236 {
237 field->offset = offset;
238
239 switch (size) {
240 case 1:
241 field->integer = tp_field__u8;
242 break;
243 case 2:
244 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
245 break;
246 case 4:
247 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
248 break;
249 case 8:
250 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
251 break;
252 default:
253 return -1;
254 }
255
256 return 0;
257 }
258
tp_field__init_uint(struct tp_field * field,struct tep_format_field * format_field,bool needs_swap)259 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
260 {
261 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
262 }
263
tp_field__ptr(struct tp_field * field,struct perf_sample * sample)264 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
265 {
266 return sample->raw_data + field->offset;
267 }
268
__tp_field__init_ptr(struct tp_field * field,int offset)269 static int __tp_field__init_ptr(struct tp_field *field, int offset)
270 {
271 field->offset = offset;
272 field->pointer = tp_field__ptr;
273 return 0;
274 }
275
tp_field__init_ptr(struct tp_field * field,struct tep_format_field * format_field)276 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
277 {
278 return __tp_field__init_ptr(field, format_field->offset);
279 }
280
281 struct syscall_tp {
282 struct tp_field id;
283 union {
284 struct tp_field args, ret;
285 };
286 };
287
288 /*
289 * The evsel->priv as used by 'perf trace'
290 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
291 * fmt: for all the other tracepoints
292 */
293 struct evsel_trace {
294 struct syscall_tp sc;
295 struct syscall_arg_fmt *fmt;
296 };
297
evsel_trace__new(void)298 static struct evsel_trace *evsel_trace__new(void)
299 {
300 return zalloc(sizeof(struct evsel_trace));
301 }
302
evsel_trace__delete(struct evsel_trace * et)303 static void evsel_trace__delete(struct evsel_trace *et)
304 {
305 if (et == NULL)
306 return;
307
308 zfree(&et->fmt);
309 free(et);
310 }
311
312 /*
313 * Used with raw_syscalls:sys_{enter,exit} and with the
314 * syscalls:sys_{enter,exit}_SYSCALL tracepoints
315 */
__evsel__syscall_tp(struct evsel * evsel)316 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
317 {
318 struct evsel_trace *et = evsel->priv;
319
320 return &et->sc;
321 }
322
evsel__syscall_tp(struct evsel * evsel)323 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
324 {
325 if (evsel->priv == NULL) {
326 evsel->priv = evsel_trace__new();
327 if (evsel->priv == NULL)
328 return NULL;
329 }
330
331 return __evsel__syscall_tp(evsel);
332 }
333
334 /*
335 * Used with all the other tracepoints.
336 */
__evsel__syscall_arg_fmt(struct evsel * evsel)337 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
338 {
339 struct evsel_trace *et = evsel->priv;
340
341 return et->fmt;
342 }
343
evsel__syscall_arg_fmt(struct evsel * evsel)344 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
345 {
346 struct evsel_trace *et = evsel->priv;
347
348 if (evsel->priv == NULL) {
349 et = evsel->priv = evsel_trace__new();
350
351 if (et == NULL)
352 return NULL;
353 }
354
355 if (et->fmt == NULL) {
356 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
357 if (et->fmt == NULL)
358 goto out_delete;
359 }
360
361 return __evsel__syscall_arg_fmt(evsel);
362
363 out_delete:
364 evsel_trace__delete(evsel->priv);
365 evsel->priv = NULL;
366 return NULL;
367 }
368
evsel__init_tp_uint_field(struct evsel * evsel,struct tp_field * field,const char * name)369 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
370 {
371 struct tep_format_field *format_field = evsel__field(evsel, name);
372
373 if (format_field == NULL)
374 return -1;
375
376 return tp_field__init_uint(field, format_field, evsel->needs_swap);
377 }
378
379 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
380 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
381 evsel__init_tp_uint_field(evsel, &sc->name, #name); })
382
evsel__init_tp_ptr_field(struct evsel * evsel,struct tp_field * field,const char * name)383 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
384 {
385 struct tep_format_field *format_field = evsel__field(evsel, name);
386
387 if (format_field == NULL)
388 return -1;
389
390 return tp_field__init_ptr(field, format_field);
391 }
392
393 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
394 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
395 evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
396
evsel__delete_priv(struct evsel * evsel)397 static void evsel__delete_priv(struct evsel *evsel)
398 {
399 zfree(&evsel->priv);
400 evsel__delete(evsel);
401 }
402
evsel__init_syscall_tp(struct evsel * evsel)403 static int evsel__init_syscall_tp(struct evsel *evsel)
404 {
405 struct syscall_tp *sc = evsel__syscall_tp(evsel);
406
407 if (sc != NULL) {
408 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
409 evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
410 return -ENOENT;
411 return 0;
412 }
413
414 return -ENOMEM;
415 }
416
evsel__init_augmented_syscall_tp(struct evsel * evsel,struct evsel * tp)417 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
418 {
419 struct syscall_tp *sc = evsel__syscall_tp(evsel);
420
421 if (sc != NULL) {
422 struct tep_format_field *syscall_id = evsel__field(tp, "id");
423 if (syscall_id == NULL)
424 syscall_id = evsel__field(tp, "__syscall_nr");
425 if (syscall_id == NULL ||
426 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
427 return -EINVAL;
428
429 return 0;
430 }
431
432 return -ENOMEM;
433 }
434
evsel__init_augmented_syscall_tp_args(struct evsel * evsel)435 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
436 {
437 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
438
439 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
440 }
441
evsel__init_augmented_syscall_tp_ret(struct evsel * evsel)442 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
443 {
444 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
445
446 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
447 }
448
evsel__init_raw_syscall_tp(struct evsel * evsel,void * handler)449 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
450 {
451 if (evsel__syscall_tp(evsel) != NULL) {
452 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
453 return -ENOENT;
454
455 evsel->handler = handler;
456 return 0;
457 }
458
459 return -ENOMEM;
460 }
461
perf_evsel__raw_syscall_newtp(const char * direction,void * handler)462 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
463 {
464 struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
465
466 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
467 if (IS_ERR(evsel))
468 evsel = evsel__newtp("syscalls", direction);
469
470 if (IS_ERR(evsel))
471 return NULL;
472
473 if (evsel__init_raw_syscall_tp(evsel, handler))
474 goto out_delete;
475
476 return evsel;
477
478 out_delete:
479 evsel__delete_priv(evsel);
480 return NULL;
481 }
482
483 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
484 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
485 fields->name.integer(&fields->name, sample); })
486
487 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
488 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
489 fields->name.pointer(&fields->name, sample); })
490
strarray__scnprintf_suffix(struct strarray * sa,char * bf,size_t size,const char * intfmt,bool show_suffix,int val)491 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
492 {
493 int idx = val - sa->offset;
494
495 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
496 size_t printed = scnprintf(bf, size, intfmt, val);
497 if (show_suffix)
498 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
499 return printed;
500 }
501
502 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
503 }
504
strarray__scnprintf(struct strarray * sa,char * bf,size_t size,const char * intfmt,bool show_prefix,int val)505 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
506 {
507 int idx = val - sa->offset;
508
509 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
510 size_t printed = scnprintf(bf, size, intfmt, val);
511 if (show_prefix)
512 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
513 return printed;
514 }
515
516 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
517 }
518
__syscall_arg__scnprintf_strarray(char * bf,size_t size,const char * intfmt,struct syscall_arg * arg)519 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
520 const char *intfmt,
521 struct syscall_arg *arg)
522 {
523 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
524 }
525
syscall_arg__scnprintf_strarray(char * bf,size_t size,struct syscall_arg * arg)526 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
527 struct syscall_arg *arg)
528 {
529 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
530 }
531
532 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
533
syscall_arg__strtoul_strarray(char * bf,size_t size,struct syscall_arg * arg,u64 * ret)534 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
535 {
536 return strarray__strtoul(arg->parm, bf, size, ret);
537 }
538
syscall_arg__strtoul_strarray_flags(char * bf,size_t size,struct syscall_arg * arg,u64 * ret)539 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
540 {
541 return strarray__strtoul_flags(arg->parm, bf, size, ret);
542 }
543
syscall_arg__strtoul_strarrays(char * bf,size_t size,struct syscall_arg * arg,u64 * ret)544 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
545 {
546 return strarrays__strtoul(arg->parm, bf, size, ret);
547 }
548
syscall_arg__scnprintf_strarray_flags(char * bf,size_t size,struct syscall_arg * arg)549 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
550 {
551 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
552 }
553
strarrays__scnprintf(struct strarrays * sas,char * bf,size_t size,const char * intfmt,bool show_prefix,int val)554 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
555 {
556 size_t printed;
557 int i;
558
559 for (i = 0; i < sas->nr_entries; ++i) {
560 struct strarray *sa = sas->entries[i];
561 int idx = val - sa->offset;
562
563 if (idx >= 0 && idx < sa->nr_entries) {
564 if (sa->entries[idx] == NULL)
565 break;
566 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
567 }
568 }
569
570 printed = scnprintf(bf, size, intfmt, val);
571 if (show_prefix)
572 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
573 return printed;
574 }
575
strarray__strtoul(struct strarray * sa,char * bf,size_t size,u64 * ret)576 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
577 {
578 int i;
579
580 for (i = 0; i < sa->nr_entries; ++i) {
581 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
582 *ret = sa->offset + i;
583 return true;
584 }
585 }
586
587 return false;
588 }
589
strarray__strtoul_flags(struct strarray * sa,char * bf,size_t size,u64 * ret)590 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
591 {
592 u64 val = 0;
593 char *tok = bf, *sep, *end;
594
595 *ret = 0;
596
597 while (size != 0) {
598 int toklen = size;
599
600 sep = memchr(tok, '|', size);
601 if (sep != NULL) {
602 size -= sep - tok + 1;
603
604 end = sep - 1;
605 while (end > tok && isspace(*end))
606 --end;
607
608 toklen = end - tok + 1;
609 }
610
611 while (isspace(*tok))
612 ++tok;
613
614 if (isalpha(*tok) || *tok == '_') {
615 if (!strarray__strtoul(sa, tok, toklen, &val))
616 return false;
617 } else {
618 bool is_hexa = tok[0] == 0 && (tok[1] = 'x' || tok[1] == 'X');
619
620 val = strtoul(tok, NULL, is_hexa ? 16 : 0);
621 }
622
623 *ret |= (1 << (val - 1));
624
625 if (sep == NULL)
626 break;
627 tok = sep + 1;
628 }
629
630 return true;
631 }
632
strarrays__strtoul(struct strarrays * sas,char * bf,size_t size,u64 * ret)633 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
634 {
635 int i;
636
637 for (i = 0; i < sas->nr_entries; ++i) {
638 struct strarray *sa = sas->entries[i];
639
640 if (strarray__strtoul(sa, bf, size, ret))
641 return true;
642 }
643
644 return false;
645 }
646
syscall_arg__scnprintf_strarrays(char * bf,size_t size,struct syscall_arg * arg)647 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
648 struct syscall_arg *arg)
649 {
650 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
651 }
652
653 #ifndef AT_FDCWD
654 #define AT_FDCWD -100
655 #endif
656
syscall_arg__scnprintf_fd_at(char * bf,size_t size,struct syscall_arg * arg)657 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
658 struct syscall_arg *arg)
659 {
660 int fd = arg->val;
661 const char *prefix = "AT_FD";
662
663 if (fd == AT_FDCWD)
664 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
665
666 return syscall_arg__scnprintf_fd(bf, size, arg);
667 }
668
669 #define SCA_FDAT syscall_arg__scnprintf_fd_at
670
671 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
672 struct syscall_arg *arg);
673
674 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
675
syscall_arg__scnprintf_hex(char * bf,size_t size,struct syscall_arg * arg)676 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
677 {
678 return scnprintf(bf, size, "%#lx", arg->val);
679 }
680
syscall_arg__scnprintf_ptr(char * bf,size_t size,struct syscall_arg * arg)681 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
682 {
683 if (arg->val == 0)
684 return scnprintf(bf, size, "NULL");
685 return syscall_arg__scnprintf_hex(bf, size, arg);
686 }
687
syscall_arg__scnprintf_int(char * bf,size_t size,struct syscall_arg * arg)688 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
689 {
690 return scnprintf(bf, size, "%d", arg->val);
691 }
692
syscall_arg__scnprintf_long(char * bf,size_t size,struct syscall_arg * arg)693 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
694 {
695 return scnprintf(bf, size, "%ld", arg->val);
696 }
697
syscall_arg__scnprintf_char_array(char * bf,size_t size,struct syscall_arg * arg)698 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
699 {
700 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
701 // fill missing comms using thread__set_comm()...
702 // here or in a special syscall_arg__scnprintf_pid_sched_tp...
703 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
704 }
705
706 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
707
708 static const char *bpf_cmd[] = {
709 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
710 "MAP_GET_NEXT_KEY", "PROG_LOAD",
711 };
712 static DEFINE_STRARRAY(bpf_cmd, "BPF_");
713
714 static const char *fsmount_flags[] = {
715 [1] = "CLOEXEC",
716 };
717 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
718
719 #include "trace/beauty/generated/fsconfig_arrays.c"
720
721 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
722
723 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
724 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
725
726 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
727 static DEFINE_STRARRAY(itimers, "ITIMER_");
728
729 static const char *keyctl_options[] = {
730 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
731 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
732 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
733 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
734 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
735 };
736 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
737
738 static const char *whences[] = { "SET", "CUR", "END",
739 #ifdef SEEK_DATA
740 "DATA",
741 #endif
742 #ifdef SEEK_HOLE
743 "HOLE",
744 #endif
745 };
746 static DEFINE_STRARRAY(whences, "SEEK_");
747
748 static const char *fcntl_cmds[] = {
749 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
750 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
751 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
752 "GETOWNER_UIDS",
753 };
754 static DEFINE_STRARRAY(fcntl_cmds, "F_");
755
756 static const char *fcntl_linux_specific_cmds[] = {
757 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
758 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
759 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
760 };
761
762 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
763
764 static struct strarray *fcntl_cmds_arrays[] = {
765 &strarray__fcntl_cmds,
766 &strarray__fcntl_linux_specific_cmds,
767 };
768
769 static DEFINE_STRARRAYS(fcntl_cmds_arrays);
770
771 static const char *rlimit_resources[] = {
772 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
773 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
774 "RTTIME",
775 };
776 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
777
778 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
779 static DEFINE_STRARRAY(sighow, "SIG_");
780
781 static const char *clockid[] = {
782 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
783 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
784 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
785 };
786 static DEFINE_STRARRAY(clockid, "CLOCK_");
787
syscall_arg__scnprintf_access_mode(char * bf,size_t size,struct syscall_arg * arg)788 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
789 struct syscall_arg *arg)
790 {
791 bool show_prefix = arg->show_string_prefix;
792 const char *suffix = "_OK";
793 size_t printed = 0;
794 int mode = arg->val;
795
796 if (mode == F_OK) /* 0 */
797 return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
798 #define P_MODE(n) \
799 if (mode & n##_OK) { \
800 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
801 mode &= ~n##_OK; \
802 }
803
804 P_MODE(R);
805 P_MODE(W);
806 P_MODE(X);
807 #undef P_MODE
808
809 if (mode)
810 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
811
812 return printed;
813 }
814
815 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
816
817 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
818 struct syscall_arg *arg);
819
820 #define SCA_FILENAME syscall_arg__scnprintf_filename
821
syscall_arg__scnprintf_pipe_flags(char * bf,size_t size,struct syscall_arg * arg)822 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
823 struct syscall_arg *arg)
824 {
825 bool show_prefix = arg->show_string_prefix;
826 const char *prefix = "O_";
827 int printed = 0, flags = arg->val;
828
829 #define P_FLAG(n) \
830 if (flags & O_##n) { \
831 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
832 flags &= ~O_##n; \
833 }
834
835 P_FLAG(CLOEXEC);
836 P_FLAG(NONBLOCK);
837 #undef P_FLAG
838
839 if (flags)
840 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
841
842 return printed;
843 }
844
845 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
846
847 #ifndef GRND_NONBLOCK
848 #define GRND_NONBLOCK 0x0001
849 #endif
850 #ifndef GRND_RANDOM
851 #define GRND_RANDOM 0x0002
852 #endif
853
syscall_arg__scnprintf_getrandom_flags(char * bf,size_t size,struct syscall_arg * arg)854 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
855 struct syscall_arg *arg)
856 {
857 bool show_prefix = arg->show_string_prefix;
858 const char *prefix = "GRND_";
859 int printed = 0, flags = arg->val;
860
861 #define P_FLAG(n) \
862 if (flags & GRND_##n) { \
863 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
864 flags &= ~GRND_##n; \
865 }
866
867 P_FLAG(RANDOM);
868 P_FLAG(NONBLOCK);
869 #undef P_FLAG
870
871 if (flags)
872 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
873
874 return printed;
875 }
876
877 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
878
879 #define STRARRAY(name, array) \
880 { .scnprintf = SCA_STRARRAY, \
881 .strtoul = STUL_STRARRAY, \
882 .parm = &strarray__##array, }
883
884 #define STRARRAY_FLAGS(name, array) \
885 { .scnprintf = SCA_STRARRAY_FLAGS, \
886 .strtoul = STUL_STRARRAY_FLAGS, \
887 .parm = &strarray__##array, }
888
889 #include "trace/beauty/arch_errno_names.c"
890 #include "trace/beauty/eventfd.c"
891 #include "trace/beauty/futex_op.c"
892 #include "trace/beauty/futex_val3.c"
893 #include "trace/beauty/mmap.c"
894 #include "trace/beauty/mode_t.c"
895 #include "trace/beauty/msg_flags.c"
896 #include "trace/beauty/open_flags.c"
897 #include "trace/beauty/perf_event_open.c"
898 #include "trace/beauty/pid.c"
899 #include "trace/beauty/sched_policy.c"
900 #include "trace/beauty/seccomp.c"
901 #include "trace/beauty/signum.c"
902 #include "trace/beauty/socket_type.c"
903 #include "trace/beauty/waitid_options.c"
904
905 static struct syscall_fmt syscall_fmts[] = {
906 { .name = "access",
907 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
908 { .name = "arch_prctl",
909 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
910 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
911 { .name = "bind",
912 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
913 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
914 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
915 { .name = "bpf",
916 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
917 { .name = "brk", .hexret = true,
918 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
919 { .name = "clock_gettime",
920 .arg = { [0] = STRARRAY(clk_id, clockid), }, },
921 { .name = "clone", .errpid = true, .nr_args = 5,
922 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
923 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
924 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
925 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
926 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
927 { .name = "close",
928 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
929 { .name = "connect",
930 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
931 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
932 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
933 { .name = "epoll_ctl",
934 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
935 { .name = "eventfd2",
936 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
937 { .name = "fchmodat",
938 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
939 { .name = "fchownat",
940 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
941 { .name = "fcntl",
942 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
943 .strtoul = STUL_STRARRAYS,
944 .parm = &strarrays__fcntl_cmds_arrays,
945 .show_zero = true, },
946 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
947 { .name = "flock",
948 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
949 { .name = "fsconfig",
950 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
951 { .name = "fsmount",
952 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
953 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
954 { .name = "fspick",
955 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
956 [1] = { .scnprintf = SCA_FILENAME, /* path */ },
957 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
958 { .name = "fstat", .alias = "newfstat", },
959 { .name = "fstatat", .alias = "newfstatat", },
960 { .name = "futex",
961 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
962 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
963 { .name = "futimesat",
964 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
965 { .name = "getitimer",
966 .arg = { [0] = STRARRAY(which, itimers), }, },
967 { .name = "getpid", .errpid = true, },
968 { .name = "getpgid", .errpid = true, },
969 { .name = "getppid", .errpid = true, },
970 { .name = "getrandom",
971 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
972 { .name = "getrlimit",
973 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
974 { .name = "gettid", .errpid = true, },
975 { .name = "ioctl",
976 .arg = {
977 #if defined(__i386__) || defined(__x86_64__)
978 /*
979 * FIXME: Make this available to all arches.
980 */
981 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
982 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
983 #else
984 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
985 #endif
986 { .name = "kcmp", .nr_args = 5,
987 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
988 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
989 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
990 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
991 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
992 { .name = "keyctl",
993 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
994 { .name = "kill",
995 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
996 { .name = "linkat",
997 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
998 { .name = "lseek",
999 .arg = { [2] = STRARRAY(whence, whences), }, },
1000 { .name = "lstat", .alias = "newlstat", },
1001 { .name = "madvise",
1002 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1003 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
1004 { .name = "mkdirat",
1005 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1006 { .name = "mknodat",
1007 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1008 { .name = "mmap", .hexret = true,
1009 /* The standard mmap maps to old_mmap on s390x */
1010 #if defined(__s390x__)
1011 .alias = "old_mmap",
1012 #endif
1013 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
1014 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
1015 .strtoul = STUL_STRARRAY_FLAGS,
1016 .parm = &strarray__mmap_flags, },
1017 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
1018 { .name = "mount",
1019 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
1020 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
1021 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
1022 { .name = "move_mount",
1023 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
1024 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
1025 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
1026 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
1027 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
1028 { .name = "mprotect",
1029 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1030 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
1031 { .name = "mq_unlink",
1032 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
1033 { .name = "mremap", .hexret = true,
1034 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
1035 { .name = "name_to_handle_at",
1036 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1037 { .name = "newfstatat",
1038 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1039 { .name = "open",
1040 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1041 { .name = "open_by_handle_at",
1042 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1043 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1044 { .name = "openat",
1045 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1046 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1047 { .name = "perf_event_open",
1048 .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ },
1049 [3] = { .scnprintf = SCA_FD, /* group_fd */ },
1050 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
1051 { .name = "pipe2",
1052 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
1053 { .name = "pkey_alloc",
1054 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
1055 { .name = "pkey_free",
1056 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
1057 { .name = "pkey_mprotect",
1058 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1059 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
1060 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
1061 { .name = "poll", .timeout = true, },
1062 { .name = "ppoll", .timeout = true, },
1063 { .name = "prctl",
1064 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
1065 .strtoul = STUL_STRARRAY,
1066 .parm = &strarray__prctl_options, },
1067 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
1068 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
1069 { .name = "pread", .alias = "pread64", },
1070 { .name = "preadv", .alias = "pread", },
1071 { .name = "prlimit64",
1072 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
1073 { .name = "pwrite", .alias = "pwrite64", },
1074 { .name = "readlinkat",
1075 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1076 { .name = "recvfrom",
1077 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1078 { .name = "recvmmsg",
1079 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1080 { .name = "recvmsg",
1081 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1082 { .name = "renameat",
1083 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1084 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
1085 { .name = "renameat2",
1086 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1087 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
1088 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
1089 { .name = "rt_sigaction",
1090 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1091 { .name = "rt_sigprocmask",
1092 .arg = { [0] = STRARRAY(how, sighow), }, },
1093 { .name = "rt_sigqueueinfo",
1094 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1095 { .name = "rt_tgsigqueueinfo",
1096 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1097 { .name = "sched_setscheduler",
1098 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
1099 { .name = "seccomp",
1100 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
1101 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
1102 { .name = "select", .timeout = true, },
1103 { .name = "sendfile", .alias = "sendfile64", },
1104 { .name = "sendmmsg",
1105 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1106 { .name = "sendmsg",
1107 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1108 { .name = "sendto",
1109 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
1110 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
1111 { .name = "set_tid_address", .errpid = true, },
1112 { .name = "setitimer",
1113 .arg = { [0] = STRARRAY(which, itimers), }, },
1114 { .name = "setrlimit",
1115 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
1116 { .name = "socket",
1117 .arg = { [0] = STRARRAY(family, socket_families),
1118 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1119 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1120 { .name = "socketpair",
1121 .arg = { [0] = STRARRAY(family, socket_families),
1122 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1123 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1124 { .name = "stat", .alias = "newstat", },
1125 { .name = "statx",
1126 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
1127 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
1128 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
1129 { .name = "swapoff",
1130 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1131 { .name = "swapon",
1132 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1133 { .name = "symlinkat",
1134 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1135 { .name = "sync_file_range",
1136 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
1137 { .name = "tgkill",
1138 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1139 { .name = "tkill",
1140 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1141 { .name = "umount2", .alias = "umount",
1142 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
1143 { .name = "uname", .alias = "newuname", },
1144 { .name = "unlinkat",
1145 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1146 { .name = "utimensat",
1147 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
1148 { .name = "wait4", .errpid = true,
1149 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1150 { .name = "waitid", .errpid = true,
1151 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1152 };
1153
syscall_fmt__cmp(const void * name,const void * fmtp)1154 static int syscall_fmt__cmp(const void *name, const void *fmtp)
1155 {
1156 const struct syscall_fmt *fmt = fmtp;
1157 return strcmp(name, fmt->name);
1158 }
1159
__syscall_fmt__find(struct syscall_fmt * fmts,const int nmemb,const char * name)1160 static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name)
1161 {
1162 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1163 }
1164
syscall_fmt__find(const char * name)1165 static struct syscall_fmt *syscall_fmt__find(const char *name)
1166 {
1167 const int nmemb = ARRAY_SIZE(syscall_fmts);
1168 return __syscall_fmt__find(syscall_fmts, nmemb, name);
1169 }
1170
__syscall_fmt__find_by_alias(struct syscall_fmt * fmts,const int nmemb,const char * alias)1171 static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias)
1172 {
1173 int i;
1174
1175 for (i = 0; i < nmemb; ++i) {
1176 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
1177 return &fmts[i];
1178 }
1179
1180 return NULL;
1181 }
1182
syscall_fmt__find_by_alias(const char * alias)1183 static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
1184 {
1185 const int nmemb = ARRAY_SIZE(syscall_fmts);
1186 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
1187 }
1188
1189 /*
1190 * is_exit: is this "exit" or "exit_group"?
1191 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
1192 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
1193 * nonexistent: Just a hole in the syscall table, syscall id not allocated
1194 */
1195 struct syscall {
1196 struct tep_event *tp_format;
1197 int nr_args;
1198 int args_size;
1199 struct {
1200 struct bpf_program *sys_enter,
1201 *sys_exit;
1202 } bpf_prog;
1203 bool is_exit;
1204 bool is_open;
1205 bool nonexistent;
1206 struct tep_format_field *args;
1207 const char *name;
1208 struct syscall_fmt *fmt;
1209 struct syscall_arg_fmt *arg_fmt;
1210 };
1211
1212 /*
1213 * Must match what is in the BPF program:
1214 *
1215 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1216 */
1217 struct bpf_map_syscall_entry {
1218 bool enabled;
1219 u16 string_args_len[6];
1220 };
1221
1222 /*
1223 * We need to have this 'calculated' boolean because in some cases we really
1224 * don't know what is the duration of a syscall, for instance, when we start
1225 * a session and some threads are waiting for a syscall to finish, say 'poll',
1226 * in which case all we can do is to print "( ? ) for duration and for the
1227 * start timestamp.
1228 */
fprintf_duration(unsigned long t,bool calculated,FILE * fp)1229 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1230 {
1231 double duration = (double)t / NSEC_PER_MSEC;
1232 size_t printed = fprintf(fp, "(");
1233
1234 if (!calculated)
1235 printed += fprintf(fp, " ");
1236 else if (duration >= 1.0)
1237 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1238 else if (duration >= 0.01)
1239 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1240 else
1241 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1242 return printed + fprintf(fp, "): ");
1243 }
1244
1245 /**
1246 * filename.ptr: The filename char pointer that will be vfs_getname'd
1247 * filename.entry_str_pos: Where to insert the string translated from
1248 * filename.ptr by the vfs_getname tracepoint/kprobe.
1249 * ret_scnprintf: syscall args may set this to a different syscall return
1250 * formatter, for instance, fcntl may return fds, file flags, etc.
1251 */
1252 struct thread_trace {
1253 u64 entry_time;
1254 bool entry_pending;
1255 unsigned long nr_events;
1256 unsigned long pfmaj, pfmin;
1257 char *entry_str;
1258 double runtime_ms;
1259 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1260 struct {
1261 unsigned long ptr;
1262 short int entry_str_pos;
1263 bool pending_open;
1264 unsigned int namelen;
1265 char *name;
1266 } filename;
1267 struct {
1268 int max;
1269 struct file *table;
1270 } files;
1271
1272 struct intlist *syscall_stats;
1273 };
1274
thread_trace__new(void)1275 static struct thread_trace *thread_trace__new(void)
1276 {
1277 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
1278
1279 if (ttrace) {
1280 ttrace->files.max = -1;
1281 ttrace->syscall_stats = intlist__new(NULL);
1282 }
1283
1284 return ttrace;
1285 }
1286
thread__trace(struct thread * thread,FILE * fp)1287 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1288 {
1289 struct thread_trace *ttrace;
1290
1291 if (thread == NULL)
1292 goto fail;
1293
1294 if (thread__priv(thread) == NULL)
1295 thread__set_priv(thread, thread_trace__new());
1296
1297 if (thread__priv(thread) == NULL)
1298 goto fail;
1299
1300 ttrace = thread__priv(thread);
1301 ++ttrace->nr_events;
1302
1303 return ttrace;
1304 fail:
1305 color_fprintf(fp, PERF_COLOR_RED,
1306 "WARNING: not enough memory, dropping samples!\n");
1307 return NULL;
1308 }
1309
1310
syscall_arg__set_ret_scnprintf(struct syscall_arg * arg,size_t (* ret_scnprintf)(char * bf,size_t size,struct syscall_arg * arg))1311 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1312 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1313 {
1314 struct thread_trace *ttrace = thread__priv(arg->thread);
1315
1316 ttrace->ret_scnprintf = ret_scnprintf;
1317 }
1318
1319 #define TRACE_PFMAJ (1 << 0)
1320 #define TRACE_PFMIN (1 << 1)
1321
1322 static const size_t trace__entry_str_size = 2048;
1323
thread_trace__files_entry(struct thread_trace * ttrace,int fd)1324 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1325 {
1326 if (fd < 0)
1327 return NULL;
1328
1329 if (fd > ttrace->files.max) {
1330 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1331
1332 if (nfiles == NULL)
1333 return NULL;
1334
1335 if (ttrace->files.max != -1) {
1336 memset(nfiles + ttrace->files.max + 1, 0,
1337 (fd - ttrace->files.max) * sizeof(struct file));
1338 } else {
1339 memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1340 }
1341
1342 ttrace->files.table = nfiles;
1343 ttrace->files.max = fd;
1344 }
1345
1346 return ttrace->files.table + fd;
1347 }
1348
thread__files_entry(struct thread * thread,int fd)1349 struct file *thread__files_entry(struct thread *thread, int fd)
1350 {
1351 return thread_trace__files_entry(thread__priv(thread), fd);
1352 }
1353
trace__set_fd_pathname(struct thread * thread,int fd,const char * pathname)1354 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1355 {
1356 struct thread_trace *ttrace = thread__priv(thread);
1357 struct file *file = thread_trace__files_entry(ttrace, fd);
1358
1359 if (file != NULL) {
1360 struct stat st;
1361 if (stat(pathname, &st) == 0)
1362 file->dev_maj = major(st.st_rdev);
1363 file->pathname = strdup(pathname);
1364 if (file->pathname)
1365 return 0;
1366 }
1367
1368 return -1;
1369 }
1370
thread__read_fd_path(struct thread * thread,int fd)1371 static int thread__read_fd_path(struct thread *thread, int fd)
1372 {
1373 char linkname[PATH_MAX], pathname[PATH_MAX];
1374 struct stat st;
1375 int ret;
1376
1377 if (thread->pid_ == thread->tid) {
1378 scnprintf(linkname, sizeof(linkname),
1379 "/proc/%d/fd/%d", thread->pid_, fd);
1380 } else {
1381 scnprintf(linkname, sizeof(linkname),
1382 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1383 }
1384
1385 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1386 return -1;
1387
1388 ret = readlink(linkname, pathname, sizeof(pathname));
1389
1390 if (ret < 0 || ret > st.st_size)
1391 return -1;
1392
1393 pathname[ret] = '\0';
1394 return trace__set_fd_pathname(thread, fd, pathname);
1395 }
1396
thread__fd_path(struct thread * thread,int fd,struct trace * trace)1397 static const char *thread__fd_path(struct thread *thread, int fd,
1398 struct trace *trace)
1399 {
1400 struct thread_trace *ttrace = thread__priv(thread);
1401
1402 if (ttrace == NULL || trace->fd_path_disabled)
1403 return NULL;
1404
1405 if (fd < 0)
1406 return NULL;
1407
1408 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1409 if (!trace->live)
1410 return NULL;
1411 ++trace->stats.proc_getname;
1412 if (thread__read_fd_path(thread, fd))
1413 return NULL;
1414 }
1415
1416 return ttrace->files.table[fd].pathname;
1417 }
1418
syscall_arg__scnprintf_fd(char * bf,size_t size,struct syscall_arg * arg)1419 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1420 {
1421 int fd = arg->val;
1422 size_t printed = scnprintf(bf, size, "%d", fd);
1423 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1424
1425 if (path)
1426 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1427
1428 return printed;
1429 }
1430
pid__scnprintf_fd(struct trace * trace,pid_t pid,int fd,char * bf,size_t size)1431 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1432 {
1433 size_t printed = scnprintf(bf, size, "%d", fd);
1434 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1435
1436 if (thread) {
1437 const char *path = thread__fd_path(thread, fd, trace);
1438
1439 if (path)
1440 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1441
1442 thread__put(thread);
1443 }
1444
1445 return printed;
1446 }
1447
syscall_arg__scnprintf_close_fd(char * bf,size_t size,struct syscall_arg * arg)1448 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1449 struct syscall_arg *arg)
1450 {
1451 int fd = arg->val;
1452 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1453 struct thread_trace *ttrace = thread__priv(arg->thread);
1454
1455 if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1456 zfree(&ttrace->files.table[fd].pathname);
1457
1458 return printed;
1459 }
1460
thread__set_filename_pos(struct thread * thread,const char * bf,unsigned long ptr)1461 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1462 unsigned long ptr)
1463 {
1464 struct thread_trace *ttrace = thread__priv(thread);
1465
1466 ttrace->filename.ptr = ptr;
1467 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1468 }
1469
syscall_arg__scnprintf_augmented_string(struct syscall_arg * arg,char * bf,size_t size)1470 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1471 {
1472 struct augmented_arg *augmented_arg = arg->augmented.args;
1473 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1474 /*
1475 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1476 * we would have two strings, each prefixed by its size.
1477 */
1478 int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1479
1480 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1481 arg->augmented.size -= consumed;
1482
1483 return printed;
1484 }
1485
syscall_arg__scnprintf_filename(char * bf,size_t size,struct syscall_arg * arg)1486 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1487 struct syscall_arg *arg)
1488 {
1489 unsigned long ptr = arg->val;
1490
1491 if (arg->augmented.args)
1492 return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1493
1494 if (!arg->trace->vfs_getname)
1495 return scnprintf(bf, size, "%#x", ptr);
1496
1497 thread__set_filename_pos(arg->thread, bf, ptr);
1498 return 0;
1499 }
1500
trace__filter_duration(struct trace * trace,double t)1501 static bool trace__filter_duration(struct trace *trace, double t)
1502 {
1503 return t < (trace->duration_filter * NSEC_PER_MSEC);
1504 }
1505
__trace__fprintf_tstamp(struct trace * trace,u64 tstamp,FILE * fp)1506 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1507 {
1508 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1509
1510 return fprintf(fp, "%10.3f ", ts);
1511 }
1512
1513 /*
1514 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1515 * using ttrace->entry_time for a thread that receives a sys_exit without
1516 * first having received a sys_enter ("poll" issued before tracing session
1517 * starts, lost sys_enter exit due to ring buffer overflow).
1518 */
trace__fprintf_tstamp(struct trace * trace,u64 tstamp,FILE * fp)1519 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1520 {
1521 if (tstamp > 0)
1522 return __trace__fprintf_tstamp(trace, tstamp, fp);
1523
1524 return fprintf(fp, " ? ");
1525 }
1526
1527 static bool done = false;
1528 static bool interrupted = false;
1529
sig_handler(int sig)1530 static void sig_handler(int sig)
1531 {
1532 done = true;
1533 interrupted = sig == SIGINT;
1534 }
1535
trace__fprintf_comm_tid(struct trace * trace,struct thread * thread,FILE * fp)1536 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1537 {
1538 size_t printed = 0;
1539
1540 if (trace->multiple_threads) {
1541 if (trace->show_comm)
1542 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1543 printed += fprintf(fp, "%d ", thread->tid);
1544 }
1545
1546 return printed;
1547 }
1548
trace__fprintf_entry_head(struct trace * trace,struct thread * thread,u64 duration,bool duration_calculated,u64 tstamp,FILE * fp)1549 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1550 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1551 {
1552 size_t printed = 0;
1553
1554 if (trace->show_tstamp)
1555 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1556 if (trace->show_duration)
1557 printed += fprintf_duration(duration, duration_calculated, fp);
1558 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1559 }
1560
trace__process_event(struct trace * trace,struct machine * machine,union perf_event * event,struct perf_sample * sample)1561 static int trace__process_event(struct trace *trace, struct machine *machine,
1562 union perf_event *event, struct perf_sample *sample)
1563 {
1564 int ret = 0;
1565
1566 switch (event->header.type) {
1567 case PERF_RECORD_LOST:
1568 color_fprintf(trace->output, PERF_COLOR_RED,
1569 "LOST %" PRIu64 " events!\n", event->lost.lost);
1570 ret = machine__process_lost_event(machine, event, sample);
1571 break;
1572 default:
1573 ret = machine__process_event(machine, event, sample);
1574 break;
1575 }
1576
1577 return ret;
1578 }
1579
trace__tool_process(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1580 static int trace__tool_process(struct perf_tool *tool,
1581 union perf_event *event,
1582 struct perf_sample *sample,
1583 struct machine *machine)
1584 {
1585 struct trace *trace = container_of(tool, struct trace, tool);
1586 return trace__process_event(trace, machine, event, sample);
1587 }
1588
trace__machine__resolve_kernel_addr(void * vmachine,unsigned long long * addrp,char ** modp)1589 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1590 {
1591 struct machine *machine = vmachine;
1592
1593 if (machine->kptr_restrict_warned)
1594 return NULL;
1595
1596 if (symbol_conf.kptr_restrict) {
1597 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1598 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1599 "Kernel samples will not be resolved.\n");
1600 machine->kptr_restrict_warned = true;
1601 return NULL;
1602 }
1603
1604 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1605 }
1606
trace__symbols_init(struct trace * trace,struct evlist * evlist)1607 static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1608 {
1609 int err = symbol__init(NULL);
1610
1611 if (err)
1612 return err;
1613
1614 trace->host = machine__new_host();
1615 if (trace->host == NULL)
1616 return -ENOMEM;
1617
1618 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1619 if (err < 0)
1620 goto out;
1621
1622 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1623 evlist->core.threads, trace__tool_process, false,
1624 1);
1625 out:
1626 if (err)
1627 symbol__exit();
1628
1629 return err;
1630 }
1631
trace__symbols__exit(struct trace * trace)1632 static void trace__symbols__exit(struct trace *trace)
1633 {
1634 machine__exit(trace->host);
1635 trace->host = NULL;
1636
1637 symbol__exit();
1638 }
1639
syscall__alloc_arg_fmts(struct syscall * sc,int nr_args)1640 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1641 {
1642 int idx;
1643
1644 if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
1645 nr_args = sc->fmt->nr_args;
1646
1647 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1648 if (sc->arg_fmt == NULL)
1649 return -1;
1650
1651 for (idx = 0; idx < nr_args; ++idx) {
1652 if (sc->fmt)
1653 sc->arg_fmt[idx] = sc->fmt->arg[idx];
1654 }
1655
1656 sc->nr_args = nr_args;
1657 return 0;
1658 }
1659
1660 static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1661 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, },
1662 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
1663 };
1664
syscall_arg_fmt__cmp(const void * name,const void * fmtp)1665 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1666 {
1667 const struct syscall_arg_fmt *fmt = fmtp;
1668 return strcmp(name, fmt->name);
1669 }
1670
1671 static struct syscall_arg_fmt *
__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt * fmts,const int nmemb,const char * name)1672 __syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
1673 {
1674 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1675 }
1676
syscall_arg_fmt__find_by_name(const char * name)1677 static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1678 {
1679 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1680 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1681 }
1682
1683 static struct tep_format_field *
syscall_arg_fmt__init_array(struct syscall_arg_fmt * arg,struct tep_format_field * field)1684 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
1685 {
1686 struct tep_format_field *last_field = NULL;
1687 int len;
1688
1689 for (; field; field = field->next, ++arg) {
1690 last_field = field;
1691
1692 if (arg->scnprintf)
1693 continue;
1694
1695 len = strlen(field->name);
1696
1697 if (strcmp(field->type, "const char *") == 0 &&
1698 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1699 strstr(field->name, "path") != NULL))
1700 arg->scnprintf = SCA_FILENAME;
1701 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1702 arg->scnprintf = SCA_PTR;
1703 else if (strcmp(field->type, "pid_t") == 0)
1704 arg->scnprintf = SCA_PID;
1705 else if (strcmp(field->type, "umode_t") == 0)
1706 arg->scnprintf = SCA_MODE_T;
1707 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
1708 arg->scnprintf = SCA_CHAR_ARRAY;
1709 arg->nr_entries = field->arraylen;
1710 } else if ((strcmp(field->type, "int") == 0 ||
1711 strcmp(field->type, "unsigned int") == 0 ||
1712 strcmp(field->type, "long") == 0) &&
1713 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1714 /*
1715 * /sys/kernel/tracing/events/syscalls/sys_enter*
1716 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1717 * 65 int
1718 * 23 unsigned int
1719 * 7 unsigned long
1720 */
1721 arg->scnprintf = SCA_FD;
1722 } else {
1723 struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
1724
1725 if (fmt) {
1726 arg->scnprintf = fmt->scnprintf;
1727 arg->strtoul = fmt->strtoul;
1728 }
1729 }
1730 }
1731
1732 return last_field;
1733 }
1734
syscall__set_arg_fmts(struct syscall * sc)1735 static int syscall__set_arg_fmts(struct syscall *sc)
1736 {
1737 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
1738
1739 if (last_field)
1740 sc->args_size = last_field->offset + last_field->size;
1741
1742 return 0;
1743 }
1744
trace__read_syscall_info(struct trace * trace,int id)1745 static int trace__read_syscall_info(struct trace *trace, int id)
1746 {
1747 char tp_name[128];
1748 struct syscall *sc;
1749 const char *name = syscalltbl__name(trace->sctbl, id);
1750
1751 #ifdef HAVE_SYSCALL_TABLE_SUPPORT
1752 if (trace->syscalls.table == NULL) {
1753 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1754 if (trace->syscalls.table == NULL)
1755 return -ENOMEM;
1756 }
1757 #else
1758 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1759 // When using libaudit we don't know beforehand what is the max syscall id
1760 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1761
1762 if (table == NULL)
1763 return -ENOMEM;
1764
1765 // Need to memset from offset 0 and +1 members if brand new
1766 if (trace->syscalls.table == NULL)
1767 memset(table, 0, (id + 1) * sizeof(*sc));
1768 else
1769 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1770
1771 trace->syscalls.table = table;
1772 trace->sctbl->syscalls.max_id = id;
1773 }
1774 #endif
1775 sc = trace->syscalls.table + id;
1776 if (sc->nonexistent)
1777 return 0;
1778
1779 if (name == NULL) {
1780 sc->nonexistent = true;
1781 return 0;
1782 }
1783
1784 sc->name = name;
1785 sc->fmt = syscall_fmt__find(sc->name);
1786
1787 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1788 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1789
1790 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1791 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1792 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1793 }
1794
1795 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
1796 return -ENOMEM;
1797
1798 if (IS_ERR(sc->tp_format))
1799 return PTR_ERR(sc->tp_format);
1800
1801 sc->args = sc->tp_format->format.fields;
1802 /*
1803 * We need to check and discard the first variable '__syscall_nr'
1804 * or 'nr' that mean the syscall number. It is needless here.
1805 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1806 */
1807 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1808 sc->args = sc->args->next;
1809 --sc->nr_args;
1810 }
1811
1812 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1813 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1814
1815 return syscall__set_arg_fmts(sc);
1816 }
1817
evsel__init_tp_arg_scnprintf(struct evsel * evsel)1818 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
1819 {
1820 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
1821
1822 if (fmt != NULL) {
1823 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
1824 return 0;
1825 }
1826
1827 return -ENOMEM;
1828 }
1829
intcmp(const void * a,const void * b)1830 static int intcmp(const void *a, const void *b)
1831 {
1832 const int *one = a, *another = b;
1833
1834 return *one - *another;
1835 }
1836
trace__validate_ev_qualifier(struct trace * trace)1837 static int trace__validate_ev_qualifier(struct trace *trace)
1838 {
1839 int err = 0;
1840 bool printed_invalid_prefix = false;
1841 struct str_node *pos;
1842 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1843
1844 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1845 sizeof(trace->ev_qualifier_ids.entries[0]));
1846
1847 if (trace->ev_qualifier_ids.entries == NULL) {
1848 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1849 trace->output);
1850 err = -EINVAL;
1851 goto out;
1852 }
1853
1854 strlist__for_each_entry(pos, trace->ev_qualifier) {
1855 const char *sc = pos->s;
1856 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1857
1858 if (id < 0) {
1859 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1860 if (id >= 0)
1861 goto matches;
1862
1863 if (!printed_invalid_prefix) {
1864 pr_debug("Skipping unknown syscalls: ");
1865 printed_invalid_prefix = true;
1866 } else {
1867 pr_debug(", ");
1868 }
1869
1870 pr_debug("%s", sc);
1871 continue;
1872 }
1873 matches:
1874 trace->ev_qualifier_ids.entries[nr_used++] = id;
1875 if (match_next == -1)
1876 continue;
1877
1878 while (1) {
1879 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1880 if (id < 0)
1881 break;
1882 if (nr_allocated == nr_used) {
1883 void *entries;
1884
1885 nr_allocated += 8;
1886 entries = realloc(trace->ev_qualifier_ids.entries,
1887 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1888 if (entries == NULL) {
1889 err = -ENOMEM;
1890 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1891 goto out_free;
1892 }
1893 trace->ev_qualifier_ids.entries = entries;
1894 }
1895 trace->ev_qualifier_ids.entries[nr_used++] = id;
1896 }
1897 }
1898
1899 trace->ev_qualifier_ids.nr = nr_used;
1900 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1901 out:
1902 if (printed_invalid_prefix)
1903 pr_debug("\n");
1904 return err;
1905 out_free:
1906 zfree(&trace->ev_qualifier_ids.entries);
1907 trace->ev_qualifier_ids.nr = 0;
1908 goto out;
1909 }
1910
trace__syscall_enabled(struct trace * trace,int id)1911 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1912 {
1913 bool in_ev_qualifier;
1914
1915 if (trace->ev_qualifier_ids.nr == 0)
1916 return true;
1917
1918 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1919 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1920
1921 if (in_ev_qualifier)
1922 return !trace->not_ev_qualifier;
1923
1924 return trace->not_ev_qualifier;
1925 }
1926
1927 /*
1928 * args is to be interpreted as a series of longs but we need to handle
1929 * 8-byte unaligned accesses. args points to raw_data within the event
1930 * and raw_data is guaranteed to be 8-byte unaligned because it is
1931 * preceded by raw_size which is a u32. So we need to copy args to a temp
1932 * variable to read it. Most notably this avoids extended load instructions
1933 * on unaligned addresses
1934 */
syscall_arg__val(struct syscall_arg * arg,u8 idx)1935 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1936 {
1937 unsigned long val;
1938 unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1939
1940 memcpy(&val, p, sizeof(val));
1941 return val;
1942 }
1943
syscall__scnprintf_name(struct syscall * sc,char * bf,size_t size,struct syscall_arg * arg)1944 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1945 struct syscall_arg *arg)
1946 {
1947 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1948 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1949
1950 return scnprintf(bf, size, "arg%d: ", arg->idx);
1951 }
1952
1953 /*
1954 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1955 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1956 * in tools/perf/trace/beauty/mount_flags.c
1957 */
syscall_arg_fmt__mask_val(struct syscall_arg_fmt * fmt,struct syscall_arg * arg,unsigned long val)1958 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
1959 {
1960 if (fmt && fmt->mask_val)
1961 return fmt->mask_val(arg, val);
1962
1963 return val;
1964 }
1965
syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt * fmt,char * bf,size_t size,struct syscall_arg * arg,unsigned long val)1966 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
1967 struct syscall_arg *arg, unsigned long val)
1968 {
1969 if (fmt && fmt->scnprintf) {
1970 arg->val = val;
1971 if (fmt->parm)
1972 arg->parm = fmt->parm;
1973 return fmt->scnprintf(bf, size, arg);
1974 }
1975 return scnprintf(bf, size, "%ld", val);
1976 }
1977
syscall__scnprintf_args(struct syscall * sc,char * bf,size_t size,unsigned char * args,void * augmented_args,int augmented_args_size,struct trace * trace,struct thread * thread)1978 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1979 unsigned char *args, void *augmented_args, int augmented_args_size,
1980 struct trace *trace, struct thread *thread)
1981 {
1982 size_t printed = 0;
1983 unsigned long val;
1984 u8 bit = 1;
1985 struct syscall_arg arg = {
1986 .args = args,
1987 .augmented = {
1988 .size = augmented_args_size,
1989 .args = augmented_args,
1990 },
1991 .idx = 0,
1992 .mask = 0,
1993 .trace = trace,
1994 .thread = thread,
1995 .show_string_prefix = trace->show_string_prefix,
1996 };
1997 struct thread_trace *ttrace = thread__priv(thread);
1998
1999 /*
2000 * Things like fcntl will set this in its 'cmd' formatter to pick the
2001 * right formatter for the return value (an fd? file flags?), which is
2002 * not needed for syscalls that always return a given type, say an fd.
2003 */
2004 ttrace->ret_scnprintf = NULL;
2005
2006 if (sc->args != NULL) {
2007 struct tep_format_field *field;
2008
2009 for (field = sc->args; field;
2010 field = field->next, ++arg.idx, bit <<= 1) {
2011 if (arg.mask & bit)
2012 continue;
2013
2014 arg.fmt = &sc->arg_fmt[arg.idx];
2015 val = syscall_arg__val(&arg, arg.idx);
2016 /*
2017 * Some syscall args need some mask, most don't and
2018 * return val untouched.
2019 */
2020 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
2021
2022 /*
2023 * Suppress this argument if its value is zero and
2024 * and we don't have a string associated in an
2025 * strarray for it.
2026 */
2027 if (val == 0 &&
2028 !trace->show_zeros &&
2029 !(sc->arg_fmt &&
2030 (sc->arg_fmt[arg.idx].show_zero ||
2031 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
2032 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
2033 sc->arg_fmt[arg.idx].parm))
2034 continue;
2035
2036 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2037
2038 if (trace->show_arg_names)
2039 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2040
2041 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
2042 bf + printed, size - printed, &arg, val);
2043 }
2044 } else if (IS_ERR(sc->tp_format)) {
2045 /*
2046 * If we managed to read the tracepoint /format file, then we
2047 * may end up not having any args, like with gettid(), so only
2048 * print the raw args when we didn't manage to read it.
2049 */
2050 while (arg.idx < sc->nr_args) {
2051 if (arg.mask & bit)
2052 goto next_arg;
2053 val = syscall_arg__val(&arg, arg.idx);
2054 if (printed)
2055 printed += scnprintf(bf + printed, size - printed, ", ");
2056 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
2057 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
2058 next_arg:
2059 ++arg.idx;
2060 bit <<= 1;
2061 }
2062 }
2063
2064 return printed;
2065 }
2066
2067 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2068 union perf_event *event,
2069 struct perf_sample *sample);
2070
trace__syscall_info(struct trace * trace,struct evsel * evsel,int id)2071 static struct syscall *trace__syscall_info(struct trace *trace,
2072 struct evsel *evsel, int id)
2073 {
2074 int err = 0;
2075
2076 if (id < 0) {
2077
2078 /*
2079 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
2080 * before that, leaving at a higher verbosity level till that is
2081 * explained. Reproduced with plain ftrace with:
2082 *
2083 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
2084 * grep "NR -1 " /t/trace_pipe
2085 *
2086 * After generating some load on the machine.
2087 */
2088 if (verbose > 1) {
2089 static u64 n;
2090 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2091 id, evsel__name(evsel), ++n);
2092 }
2093 return NULL;
2094 }
2095
2096 err = -EINVAL;
2097
2098 #ifdef HAVE_SYSCALL_TABLE_SUPPORT
2099 if (id > trace->sctbl->syscalls.max_id) {
2100 #else
2101 if (id >= trace->sctbl->syscalls.max_id) {
2102 /*
2103 * With libaudit we don't know beforehand what is the max_id,
2104 * so we let trace__read_syscall_info() figure that out as we
2105 * go on reading syscalls.
2106 */
2107 err = trace__read_syscall_info(trace, id);
2108 if (err)
2109 #endif
2110 goto out_cant_read;
2111 }
2112
2113 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2114 (err = trace__read_syscall_info(trace, id)) != 0)
2115 goto out_cant_read;
2116
2117 if (trace->syscalls.table[id].name == NULL) {
2118 if (trace->syscalls.table[id].nonexistent)
2119 return NULL;
2120 goto out_cant_read;
2121 }
2122
2123 return &trace->syscalls.table[id];
2124
2125 out_cant_read:
2126 if (verbose > 0) {
2127 char sbuf[STRERR_BUFSIZE];
2128 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2129 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2130 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2131 fputs(" information\n", trace->output);
2132 }
2133 return NULL;
2134 }
2135
2136 struct syscall_stats {
2137 struct stats stats;
2138 u64 nr_failures;
2139 int max_errno;
2140 u32 *errnos;
2141 };
2142
2143 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
2144 int id, struct perf_sample *sample, long err, bool errno_summary)
2145 {
2146 struct int_node *inode;
2147 struct syscall_stats *stats;
2148 u64 duration = 0;
2149
2150 inode = intlist__findnew(ttrace->syscall_stats, id);
2151 if (inode == NULL)
2152 return;
2153
2154 stats = inode->priv;
2155 if (stats == NULL) {
2156 stats = malloc(sizeof(*stats));
2157 if (stats == NULL)
2158 return;
2159
2160 stats->nr_failures = 0;
2161 stats->max_errno = 0;
2162 stats->errnos = NULL;
2163 init_stats(&stats->stats);
2164 inode->priv = stats;
2165 }
2166
2167 if (ttrace->entry_time && sample->time > ttrace->entry_time)
2168 duration = sample->time - ttrace->entry_time;
2169
2170 update_stats(&stats->stats, duration);
2171
2172 if (err < 0) {
2173 ++stats->nr_failures;
2174
2175 if (!errno_summary)
2176 return;
2177
2178 err = -err;
2179 if (err > stats->max_errno) {
2180 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2181
2182 if (new_errnos) {
2183 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2184 } else {
2185 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
2186 thread__comm_str(thread), thread->pid_, thread->tid);
2187 return;
2188 }
2189
2190 stats->errnos = new_errnos;
2191 stats->max_errno = err;
2192 }
2193
2194 ++stats->errnos[err - 1];
2195 }
2196 }
2197
2198 static int trace__printf_interrupted_entry(struct trace *trace)
2199 {
2200 struct thread_trace *ttrace;
2201 size_t printed;
2202 int len;
2203
2204 if (trace->failure_only || trace->current == NULL)
2205 return 0;
2206
2207 ttrace = thread__priv(trace->current);
2208
2209 if (!ttrace->entry_pending)
2210 return 0;
2211
2212 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2213 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2214
2215 if (len < trace->args_alignment - 4)
2216 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2217
2218 printed += fprintf(trace->output, " ...\n");
2219
2220 ttrace->entry_pending = false;
2221 ++trace->nr_events_printed;
2222
2223 return printed;
2224 }
2225
2226 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2227 struct perf_sample *sample, struct thread *thread)
2228 {
2229 int printed = 0;
2230
2231 if (trace->print_sample) {
2232 double ts = (double)sample->time / NSEC_PER_MSEC;
2233
2234 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2235 evsel__name(evsel), ts,
2236 thread__comm_str(thread),
2237 sample->pid, sample->tid, sample->cpu);
2238 }
2239
2240 return printed;
2241 }
2242
2243 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
2244 {
2245 void *augmented_args = NULL;
2246 /*
2247 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
2248 * and there we get all 6 syscall args plus the tracepoint common fields
2249 * that gets calculated at the start and the syscall_nr (another long).
2250 * So we check if that is the case and if so don't look after the
2251 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2252 * which is fixed.
2253 *
2254 * We'll revisit this later to pass s->args_size to the BPF augmenter
2255 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
2256 * copies only what we need for each syscall, like what happens when we
2257 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2258 * traffic to just what is needed for each syscall.
2259 */
2260 int args_size = raw_augmented_args_size ?: sc->args_size;
2261
2262 *augmented_args_size = sample->raw_size - args_size;
2263 if (*augmented_args_size > 0)
2264 augmented_args = sample->raw_data + args_size;
2265
2266 return augmented_args;
2267 }
2268
2269 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2270 union perf_event *event __maybe_unused,
2271 struct perf_sample *sample)
2272 {
2273 char *msg;
2274 void *args;
2275 int printed = 0;
2276 struct thread *thread;
2277 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2278 int augmented_args_size = 0;
2279 void *augmented_args = NULL;
2280 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2281 struct thread_trace *ttrace;
2282
2283 if (sc == NULL)
2284 return -1;
2285
2286 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2287 ttrace = thread__trace(thread, trace->output);
2288 if (ttrace == NULL)
2289 goto out_put;
2290
2291 trace__fprintf_sample(trace, evsel, sample, thread);
2292
2293 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2294
2295 if (ttrace->entry_str == NULL) {
2296 ttrace->entry_str = malloc(trace__entry_str_size);
2297 if (!ttrace->entry_str)
2298 goto out_put;
2299 }
2300
2301 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2302 trace__printf_interrupted_entry(trace);
2303 /*
2304 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2305 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2306 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2307 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2308 * so when handling, say the openat syscall, we end up getting 6 args for the
2309 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2310 * thinking that the extra 2 u64 args are the augmented filename, so just check
2311 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2312 */
2313 if (evsel != trace->syscalls.events.sys_enter)
2314 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2315 ttrace->entry_time = sample->time;
2316 msg = ttrace->entry_str;
2317 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2318
2319 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2320 args, augmented_args, augmented_args_size, trace, thread);
2321
2322 if (sc->is_exit) {
2323 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2324 int alignment = 0;
2325
2326 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2327 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2328 if (trace->args_alignment > printed)
2329 alignment = trace->args_alignment - printed;
2330 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2331 }
2332 } else {
2333 ttrace->entry_pending = true;
2334 /* See trace__vfs_getname & trace__sys_exit */
2335 ttrace->filename.pending_open = false;
2336 }
2337
2338 if (trace->current != thread) {
2339 thread__put(trace->current);
2340 trace->current = thread__get(thread);
2341 }
2342 err = 0;
2343 out_put:
2344 thread__put(thread);
2345 return err;
2346 }
2347
2348 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2349 struct perf_sample *sample)
2350 {
2351 struct thread_trace *ttrace;
2352 struct thread *thread;
2353 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2354 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2355 char msg[1024];
2356 void *args, *augmented_args = NULL;
2357 int augmented_args_size;
2358
2359 if (sc == NULL)
2360 return -1;
2361
2362 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2363 ttrace = thread__trace(thread, trace->output);
2364 /*
2365 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2366 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2367 */
2368 if (ttrace == NULL)
2369 goto out_put;
2370
2371 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2372 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2373 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2374 fprintf(trace->output, "%s", msg);
2375 err = 0;
2376 out_put:
2377 thread__put(thread);
2378 return err;
2379 }
2380
2381 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2382 struct perf_sample *sample,
2383 struct callchain_cursor *cursor)
2384 {
2385 struct addr_location al;
2386 int max_stack = evsel->core.attr.sample_max_stack ?
2387 evsel->core.attr.sample_max_stack :
2388 trace->max_stack;
2389 int err;
2390
2391 if (machine__resolve(trace->host, &al, sample) < 0)
2392 return -1;
2393
2394 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2395 addr_location__put(&al);
2396 return err;
2397 }
2398
2399 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2400 {
2401 /* TODO: user-configurable print_opts */
2402 const unsigned int print_opts = EVSEL__PRINT_SYM |
2403 EVSEL__PRINT_DSO |
2404 EVSEL__PRINT_UNKNOWN_AS_ADDR;
2405
2406 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2407 }
2408
2409 static const char *errno_to_name(struct evsel *evsel, int err)
2410 {
2411 struct perf_env *env = evsel__env(evsel);
2412 const char *arch_name = perf_env__arch(env);
2413
2414 return arch_syscalls__strerrno(arch_name, err);
2415 }
2416
2417 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2418 union perf_event *event __maybe_unused,
2419 struct perf_sample *sample)
2420 {
2421 long ret;
2422 u64 duration = 0;
2423 bool duration_calculated = false;
2424 struct thread *thread;
2425 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2426 int alignment = trace->args_alignment;
2427 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2428 struct thread_trace *ttrace;
2429
2430 if (sc == NULL)
2431 return -1;
2432
2433 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2434 ttrace = thread__trace(thread, trace->output);
2435 if (ttrace == NULL)
2436 goto out_put;
2437
2438 trace__fprintf_sample(trace, evsel, sample, thread);
2439
2440 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2441
2442 if (trace->summary)
2443 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2444
2445 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2446 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2447 ttrace->filename.pending_open = false;
2448 ++trace->stats.vfs_getname;
2449 }
2450
2451 if (ttrace->entry_time) {
2452 duration = sample->time - ttrace->entry_time;
2453 if (trace__filter_duration(trace, duration))
2454 goto out;
2455 duration_calculated = true;
2456 } else if (trace->duration_filter)
2457 goto out;
2458
2459 if (sample->callchain) {
2460 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2461 if (callchain_ret == 0) {
2462 if (callchain_cursor.nr < trace->min_stack)
2463 goto out;
2464 callchain_ret = 1;
2465 }
2466 }
2467
2468 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2469 goto out;
2470
2471 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2472
2473 if (ttrace->entry_pending) {
2474 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2475 } else {
2476 printed += fprintf(trace->output, " ... [");
2477 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2478 printed += 9;
2479 printed += fprintf(trace->output, "]: %s()", sc->name);
2480 }
2481
2482 printed++; /* the closing ')' */
2483
2484 if (alignment > printed)
2485 alignment -= printed;
2486 else
2487 alignment = 0;
2488
2489 fprintf(trace->output, ")%*s= ", alignment, " ");
2490
2491 if (sc->fmt == NULL) {
2492 if (ret < 0)
2493 goto errno_print;
2494 signed_print:
2495 fprintf(trace->output, "%ld", ret);
2496 } else if (ret < 0) {
2497 errno_print: {
2498 char bf[STRERR_BUFSIZE];
2499 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2500 *e = errno_to_name(evsel, -ret);
2501
2502 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2503 }
2504 } else if (ret == 0 && sc->fmt->timeout)
2505 fprintf(trace->output, "0 (Timeout)");
2506 else if (ttrace->ret_scnprintf) {
2507 char bf[1024];
2508 struct syscall_arg arg = {
2509 .val = ret,
2510 .thread = thread,
2511 .trace = trace,
2512 };
2513 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2514 ttrace->ret_scnprintf = NULL;
2515 fprintf(trace->output, "%s", bf);
2516 } else if (sc->fmt->hexret)
2517 fprintf(trace->output, "%#lx", ret);
2518 else if (sc->fmt->errpid) {
2519 struct thread *child = machine__find_thread(trace->host, ret, ret);
2520
2521 if (child != NULL) {
2522 fprintf(trace->output, "%ld", ret);
2523 if (child->comm_set)
2524 fprintf(trace->output, " (%s)", thread__comm_str(child));
2525 thread__put(child);
2526 }
2527 } else
2528 goto signed_print;
2529
2530 fputc('\n', trace->output);
2531
2532 /*
2533 * We only consider an 'event' for the sake of --max-events a non-filtered
2534 * sys_enter + sys_exit and other tracepoint events.
2535 */
2536 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2537 interrupted = true;
2538
2539 if (callchain_ret > 0)
2540 trace__fprintf_callchain(trace, sample);
2541 else if (callchain_ret < 0)
2542 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2543 out:
2544 ttrace->entry_pending = false;
2545 err = 0;
2546 out_put:
2547 thread__put(thread);
2548 return err;
2549 }
2550
2551 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2552 union perf_event *event __maybe_unused,
2553 struct perf_sample *sample)
2554 {
2555 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2556 struct thread_trace *ttrace;
2557 size_t filename_len, entry_str_len, to_move;
2558 ssize_t remaining_space;
2559 char *pos;
2560 const char *filename = evsel__rawptr(evsel, sample, "pathname");
2561
2562 if (!thread)
2563 goto out;
2564
2565 ttrace = thread__priv(thread);
2566 if (!ttrace)
2567 goto out_put;
2568
2569 filename_len = strlen(filename);
2570 if (filename_len == 0)
2571 goto out_put;
2572
2573 if (ttrace->filename.namelen < filename_len) {
2574 char *f = realloc(ttrace->filename.name, filename_len + 1);
2575
2576 if (f == NULL)
2577 goto out_put;
2578
2579 ttrace->filename.namelen = filename_len;
2580 ttrace->filename.name = f;
2581 }
2582
2583 strcpy(ttrace->filename.name, filename);
2584 ttrace->filename.pending_open = true;
2585
2586 if (!ttrace->filename.ptr)
2587 goto out_put;
2588
2589 entry_str_len = strlen(ttrace->entry_str);
2590 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2591 if (remaining_space <= 0)
2592 goto out_put;
2593
2594 if (filename_len > (size_t)remaining_space) {
2595 filename += filename_len - remaining_space;
2596 filename_len = remaining_space;
2597 }
2598
2599 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2600 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2601 memmove(pos + filename_len, pos, to_move);
2602 memcpy(pos, filename, filename_len);
2603
2604 ttrace->filename.ptr = 0;
2605 ttrace->filename.entry_str_pos = 0;
2606 out_put:
2607 thread__put(thread);
2608 out:
2609 return 0;
2610 }
2611
2612 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2613 union perf_event *event __maybe_unused,
2614 struct perf_sample *sample)
2615 {
2616 u64 runtime = evsel__intval(evsel, sample, "runtime");
2617 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2618 struct thread *thread = machine__findnew_thread(trace->host,
2619 sample->pid,
2620 sample->tid);
2621 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2622
2623 if (ttrace == NULL)
2624 goto out_dump;
2625
2626 ttrace->runtime_ms += runtime_ms;
2627 trace->runtime_ms += runtime_ms;
2628 out_put:
2629 thread__put(thread);
2630 return 0;
2631
2632 out_dump:
2633 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2634 evsel->name,
2635 evsel__strval(evsel, sample, "comm"),
2636 (pid_t)evsel__intval(evsel, sample, "pid"),
2637 runtime,
2638 evsel__intval(evsel, sample, "vruntime"));
2639 goto out_put;
2640 }
2641
2642 static int bpf_output__printer(enum binary_printer_ops op,
2643 unsigned int val, void *extra __maybe_unused, FILE *fp)
2644 {
2645 unsigned char ch = (unsigned char)val;
2646
2647 switch (op) {
2648 case BINARY_PRINT_CHAR_DATA:
2649 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2650 case BINARY_PRINT_DATA_BEGIN:
2651 case BINARY_PRINT_LINE_BEGIN:
2652 case BINARY_PRINT_ADDR:
2653 case BINARY_PRINT_NUM_DATA:
2654 case BINARY_PRINT_NUM_PAD:
2655 case BINARY_PRINT_SEP:
2656 case BINARY_PRINT_CHAR_PAD:
2657 case BINARY_PRINT_LINE_END:
2658 case BINARY_PRINT_DATA_END:
2659 default:
2660 break;
2661 }
2662
2663 return 0;
2664 }
2665
2666 static void bpf_output__fprintf(struct trace *trace,
2667 struct perf_sample *sample)
2668 {
2669 binary__fprintf(sample->raw_data, sample->raw_size, 8,
2670 bpf_output__printer, NULL, trace->output);
2671 ++trace->nr_events_printed;
2672 }
2673
2674 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
2675 struct thread *thread, void *augmented_args, int augmented_args_size)
2676 {
2677 char bf[2048];
2678 size_t size = sizeof(bf);
2679 struct tep_format_field *field = evsel->tp_format->format.fields;
2680 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
2681 size_t printed = 0;
2682 unsigned long val;
2683 u8 bit = 1;
2684 struct syscall_arg syscall_arg = {
2685 .augmented = {
2686 .size = augmented_args_size,
2687 .args = augmented_args,
2688 },
2689 .idx = 0,
2690 .mask = 0,
2691 .trace = trace,
2692 .thread = thread,
2693 .show_string_prefix = trace->show_string_prefix,
2694 };
2695
2696 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
2697 if (syscall_arg.mask & bit)
2698 continue;
2699
2700 syscall_arg.len = 0;
2701 syscall_arg.fmt = arg;
2702 if (field->flags & TEP_FIELD_IS_ARRAY) {
2703 int offset = field->offset;
2704
2705 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2706 offset = format_field__intval(field, sample, evsel->needs_swap);
2707 syscall_arg.len = offset >> 16;
2708 offset &= 0xffff;
2709 }
2710
2711 val = (uintptr_t)(sample->raw_data + offset);
2712 } else
2713 val = format_field__intval(field, sample, evsel->needs_swap);
2714 /*
2715 * Some syscall args need some mask, most don't and
2716 * return val untouched.
2717 */
2718 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
2719
2720 /*
2721 * Suppress this argument if its value is zero and
2722 * and we don't have a string associated in an
2723 * strarray for it.
2724 */
2725 if (val == 0 &&
2726 !trace->show_zeros &&
2727 !((arg->show_zero ||
2728 arg->scnprintf == SCA_STRARRAY ||
2729 arg->scnprintf == SCA_STRARRAYS) &&
2730 arg->parm))
2731 continue;
2732
2733 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2734
2735 /*
2736 * XXX Perhaps we should have a show_tp_arg_names,
2737 * leaving show_arg_names just for syscalls?
2738 */
2739 if (1 || trace->show_arg_names)
2740 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2741
2742 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
2743 }
2744
2745 return printed + fprintf(trace->output, "%s", bf);
2746 }
2747
2748 static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2749 union perf_event *event __maybe_unused,
2750 struct perf_sample *sample)
2751 {
2752 struct thread *thread;
2753 int callchain_ret = 0;
2754 /*
2755 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2756 * this event's max_events having been hit and this is an entry coming
2757 * from the ring buffer that we should discard, since the max events
2758 * have already been considered/printed.
2759 */
2760 if (evsel->disabled)
2761 return 0;
2762
2763 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2764
2765 if (sample->callchain) {
2766 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2767 if (callchain_ret == 0) {
2768 if (callchain_cursor.nr < trace->min_stack)
2769 goto out;
2770 callchain_ret = 1;
2771 }
2772 }
2773
2774 trace__printf_interrupted_entry(trace);
2775 trace__fprintf_tstamp(trace, sample->time, trace->output);
2776
2777 if (trace->trace_syscalls && trace->show_duration)
2778 fprintf(trace->output, "( ): ");
2779
2780 if (thread)
2781 trace__fprintf_comm_tid(trace, thread, trace->output);
2782
2783 if (evsel == trace->syscalls.events.augmented) {
2784 int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2785 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2786
2787 if (sc) {
2788 fprintf(trace->output, "%s(", sc->name);
2789 trace__fprintf_sys_enter(trace, evsel, sample);
2790 fputc(')', trace->output);
2791 goto newline;
2792 }
2793
2794 /*
2795 * XXX: Not having the associated syscall info or not finding/adding
2796 * the thread should never happen, but if it does...
2797 * fall thru and print it as a bpf_output event.
2798 */
2799 }
2800
2801 fprintf(trace->output, "%s(", evsel->name);
2802
2803 if (evsel__is_bpf_output(evsel)) {
2804 bpf_output__fprintf(trace, sample);
2805 } else if (evsel->tp_format) {
2806 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2807 trace__fprintf_sys_enter(trace, evsel, sample)) {
2808 if (trace->libtraceevent_print) {
2809 event_format__fprintf(evsel->tp_format, sample->cpu,
2810 sample->raw_data, sample->raw_size,
2811 trace->output);
2812 } else {
2813 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2814 }
2815 }
2816 }
2817
2818 newline:
2819 fprintf(trace->output, ")\n");
2820
2821 if (callchain_ret > 0)
2822 trace__fprintf_callchain(trace, sample);
2823 else if (callchain_ret < 0)
2824 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2825
2826 ++trace->nr_events_printed;
2827
2828 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2829 evsel__disable(evsel);
2830 evsel__close(evsel);
2831 }
2832 out:
2833 thread__put(thread);
2834 return 0;
2835 }
2836
2837 static void print_location(FILE *f, struct perf_sample *sample,
2838 struct addr_location *al,
2839 bool print_dso, bool print_sym)
2840 {
2841
2842 if ((verbose > 0 || print_dso) && al->map)
2843 fprintf(f, "%s@", al->map->dso->long_name);
2844
2845 if ((verbose > 0 || print_sym) && al->sym)
2846 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2847 al->addr - al->sym->start);
2848 else if (al->map)
2849 fprintf(f, "0x%" PRIx64, al->addr);
2850 else
2851 fprintf(f, "0x%" PRIx64, sample->addr);
2852 }
2853
2854 static int trace__pgfault(struct trace *trace,
2855 struct evsel *evsel,
2856 union perf_event *event __maybe_unused,
2857 struct perf_sample *sample)
2858 {
2859 struct thread *thread;
2860 struct addr_location al;
2861 char map_type = 'd';
2862 struct thread_trace *ttrace;
2863 int err = -1;
2864 int callchain_ret = 0;
2865
2866 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2867
2868 if (sample->callchain) {
2869 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2870 if (callchain_ret == 0) {
2871 if (callchain_cursor.nr < trace->min_stack)
2872 goto out_put;
2873 callchain_ret = 1;
2874 }
2875 }
2876
2877 ttrace = thread__trace(thread, trace->output);
2878 if (ttrace == NULL)
2879 goto out_put;
2880
2881 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2882 ttrace->pfmaj++;
2883 else
2884 ttrace->pfmin++;
2885
2886 if (trace->summary_only)
2887 goto out;
2888
2889 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2890
2891 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2892
2893 fprintf(trace->output, "%sfault [",
2894 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2895 "maj" : "min");
2896
2897 print_location(trace->output, sample, &al, false, true);
2898
2899 fprintf(trace->output, "] => ");
2900
2901 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2902
2903 if (!al.map) {
2904 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2905
2906 if (al.map)
2907 map_type = 'x';
2908 else
2909 map_type = '?';
2910 }
2911
2912 print_location(trace->output, sample, &al, true, false);
2913
2914 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2915
2916 if (callchain_ret > 0)
2917 trace__fprintf_callchain(trace, sample);
2918 else if (callchain_ret < 0)
2919 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2920
2921 ++trace->nr_events_printed;
2922 out:
2923 err = 0;
2924 out_put:
2925 thread__put(thread);
2926 return err;
2927 }
2928
2929 static void trace__set_base_time(struct trace *trace,
2930 struct evsel *evsel,
2931 struct perf_sample *sample)
2932 {
2933 /*
2934 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2935 * and don't use sample->time unconditionally, we may end up having
2936 * some other event in the future without PERF_SAMPLE_TIME for good
2937 * reason, i.e. we may not be interested in its timestamps, just in
2938 * it taking place, picking some piece of information when it
2939 * appears in our event stream (vfs_getname comes to mind).
2940 */
2941 if (trace->base_time == 0 && !trace->full_time &&
2942 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2943 trace->base_time = sample->time;
2944 }
2945
2946 static int trace__process_sample(struct perf_tool *tool,
2947 union perf_event *event,
2948 struct perf_sample *sample,
2949 struct evsel *evsel,
2950 struct machine *machine __maybe_unused)
2951 {
2952 struct trace *trace = container_of(tool, struct trace, tool);
2953 struct thread *thread;
2954 int err = 0;
2955
2956 tracepoint_handler handler = evsel->handler;
2957
2958 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2959 if (thread && thread__is_filtered(thread))
2960 goto out;
2961
2962 trace__set_base_time(trace, evsel, sample);
2963
2964 if (handler) {
2965 ++trace->nr_events;
2966 handler(trace, evsel, event, sample);
2967 }
2968 out:
2969 thread__put(thread);
2970 return err;
2971 }
2972
2973 static int trace__record(struct trace *trace, int argc, const char **argv)
2974 {
2975 unsigned int rec_argc, i, j;
2976 const char **rec_argv;
2977 const char * const record_args[] = {
2978 "record",
2979 "-R",
2980 "-m", "1024",
2981 "-c", "1",
2982 };
2983 pid_t pid = getpid();
2984 char *filter = asprintf__tp_filter_pids(1, &pid);
2985 const char * const sc_args[] = { "-e", };
2986 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2987 const char * const majpf_args[] = { "-e", "major-faults" };
2988 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2989 const char * const minpf_args[] = { "-e", "minor-faults" };
2990 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2991 int err = -1;
2992
2993 /* +3 is for the event string below and the pid filter */
2994 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
2995 majpf_args_nr + minpf_args_nr + argc;
2996 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2997
2998 if (rec_argv == NULL || filter == NULL)
2999 goto out_free;
3000
3001 j = 0;
3002 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3003 rec_argv[j++] = record_args[i];
3004
3005 if (trace->trace_syscalls) {
3006 for (i = 0; i < sc_args_nr; i++)
3007 rec_argv[j++] = sc_args[i];
3008
3009 /* event string may be different for older kernels - e.g., RHEL6 */
3010 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
3011 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
3012 else if (is_valid_tracepoint("syscalls:sys_enter"))
3013 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3014 else {
3015 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3016 goto out_free;
3017 }
3018 }
3019
3020 rec_argv[j++] = "--filter";
3021 rec_argv[j++] = filter;
3022
3023 if (trace->trace_pgfaults & TRACE_PFMAJ)
3024 for (i = 0; i < majpf_args_nr; i++)
3025 rec_argv[j++] = majpf_args[i];
3026
3027 if (trace->trace_pgfaults & TRACE_PFMIN)
3028 for (i = 0; i < minpf_args_nr; i++)
3029 rec_argv[j++] = minpf_args[i];
3030
3031 for (i = 0; i < (unsigned int)argc; i++)
3032 rec_argv[j++] = argv[i];
3033
3034 err = cmd_record(j, rec_argv);
3035 out_free:
3036 free(filter);
3037 free(rec_argv);
3038 return err;
3039 }
3040
3041 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3042
3043 static bool evlist__add_vfs_getname(struct evlist *evlist)
3044 {
3045 bool found = false;
3046 struct evsel *evsel, *tmp;
3047 struct parse_events_error err;
3048 int ret;
3049
3050 bzero(&err, sizeof(err));
3051 ret = parse_events(evlist, "probe:vfs_getname*", &err);
3052 if (ret) {
3053 free(err.str);
3054 free(err.help);
3055 free(err.first_str);
3056 free(err.first_help);
3057 return false;
3058 }
3059
3060 evlist__for_each_entry_safe(evlist, evsel, tmp) {
3061 if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
3062 continue;
3063
3064 if (evsel__field(evsel, "pathname")) {
3065 evsel->handler = trace__vfs_getname;
3066 found = true;
3067 continue;
3068 }
3069
3070 list_del_init(&evsel->core.node);
3071 evsel->evlist = NULL;
3072 evsel__delete(evsel);
3073 }
3074
3075 return found;
3076 }
3077
3078 static struct evsel *evsel__new_pgfault(u64 config)
3079 {
3080 struct evsel *evsel;
3081 struct perf_event_attr attr = {
3082 .type = PERF_TYPE_SOFTWARE,
3083 .mmap_data = 1,
3084 };
3085
3086 attr.config = config;
3087 attr.sample_period = 1;
3088
3089 event_attr_init(&attr);
3090
3091 evsel = evsel__new(&attr);
3092 if (evsel)
3093 evsel->handler = trace__pgfault;
3094
3095 return evsel;
3096 }
3097
3098 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3099 {
3100 const u32 type = event->header.type;
3101 struct evsel *evsel;
3102
3103 if (type != PERF_RECORD_SAMPLE) {
3104 trace__process_event(trace, trace->host, event, sample);
3105 return;
3106 }
3107
3108 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
3109 if (evsel == NULL) {
3110 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3111 return;
3112 }
3113
3114 if (evswitch__discard(&trace->evswitch, evsel))
3115 return;
3116
3117 trace__set_base_time(trace, evsel, sample);
3118
3119 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3120 sample->raw_data == NULL) {
3121 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3122 evsel__name(evsel), sample->tid,
3123 sample->cpu, sample->raw_size);
3124 } else {
3125 tracepoint_handler handler = evsel->handler;
3126 handler(trace, evsel, event, sample);
3127 }
3128
3129 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3130 interrupted = true;
3131 }
3132
3133 static int trace__add_syscall_newtp(struct trace *trace)
3134 {
3135 int ret = -1;
3136 struct evlist *evlist = trace->evlist;
3137 struct evsel *sys_enter, *sys_exit;
3138
3139 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
3140 if (sys_enter == NULL)
3141 goto out;
3142
3143 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
3144 goto out_delete_sys_enter;
3145
3146 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
3147 if (sys_exit == NULL)
3148 goto out_delete_sys_enter;
3149
3150 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
3151 goto out_delete_sys_exit;
3152
3153 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3154 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3155
3156 evlist__add(evlist, sys_enter);
3157 evlist__add(evlist, sys_exit);
3158
3159 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3160 /*
3161 * We're interested only in the user space callchain
3162 * leading to the syscall, allow overriding that for
3163 * debugging reasons using --kernel_syscall_callchains
3164 */
3165 sys_exit->core.attr.exclude_callchain_kernel = 1;
3166 }
3167
3168 trace->syscalls.events.sys_enter = sys_enter;
3169 trace->syscalls.events.sys_exit = sys_exit;
3170
3171 ret = 0;
3172 out:
3173 return ret;
3174
3175 out_delete_sys_exit:
3176 evsel__delete_priv(sys_exit);
3177 out_delete_sys_enter:
3178 evsel__delete_priv(sys_enter);
3179 goto out;
3180 }
3181
3182 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3183 {
3184 int err = -1;
3185 struct evsel *sys_exit;
3186 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3187 trace->ev_qualifier_ids.nr,
3188 trace->ev_qualifier_ids.entries);
3189
3190 if (filter == NULL)
3191 goto out_enomem;
3192
3193 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3194 sys_exit = trace->syscalls.events.sys_exit;
3195 err = evsel__append_tp_filter(sys_exit, filter);
3196 }
3197
3198 free(filter);
3199 out:
3200 return err;
3201 out_enomem:
3202 errno = ENOMEM;
3203 goto out;
3204 }
3205
3206 #ifdef HAVE_LIBBPF_SUPPORT
3207 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
3208 {
3209 if (trace->bpf_obj == NULL)
3210 return NULL;
3211
3212 return bpf_object__find_map_by_name(trace->bpf_obj, name);
3213 }
3214
3215 static void trace__set_bpf_map_filtered_pids(struct trace *trace)
3216 {
3217 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3218 }
3219
3220 static void trace__set_bpf_map_syscalls(struct trace *trace)
3221 {
3222 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
3223 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3224 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3225 }
3226
3227 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3228 {
3229 if (trace->bpf_obj == NULL)
3230 return NULL;
3231
3232 return bpf_object__find_program_by_title(trace->bpf_obj, name);
3233 }
3234
3235 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3236 const char *prog_name, const char *type)
3237 {
3238 struct bpf_program *prog;
3239
3240 if (prog_name == NULL) {
3241 char default_prog_name[256];
3242 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
3243 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3244 if (prog != NULL)
3245 goto out_found;
3246 if (sc->fmt && sc->fmt->alias) {
3247 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
3248 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3249 if (prog != NULL)
3250 goto out_found;
3251 }
3252 goto out_unaugmented;
3253 }
3254
3255 prog = trace__find_bpf_program_by_title(trace, prog_name);
3256
3257 if (prog != NULL) {
3258 out_found:
3259 return prog;
3260 }
3261
3262 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3263 prog_name, type, sc->name);
3264 out_unaugmented:
3265 return trace->syscalls.unaugmented_prog;
3266 }
3267
3268 static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3269 {
3270 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3271
3272 if (sc == NULL)
3273 return;
3274
3275 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3276 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
3277 }
3278
3279 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3280 {
3281 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3282 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3283 }
3284
3285 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3286 {
3287 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3288 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3289 }
3290
3291 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
3292 {
3293 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3294 int arg = 0;
3295
3296 if (sc == NULL)
3297 goto out;
3298
3299 for (; arg < sc->nr_args; ++arg) {
3300 entry->string_args_len[arg] = 0;
3301 if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
3302 /* Should be set like strace -s strsize */
3303 entry->string_args_len[arg] = PATH_MAX;
3304 }
3305 }
3306 out:
3307 for (; arg < 6; ++arg)
3308 entry->string_args_len[arg] = 0;
3309 }
3310 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
3311 {
3312 int fd = bpf_map__fd(trace->syscalls.map);
3313 struct bpf_map_syscall_entry value = {
3314 .enabled = !trace->not_ev_qualifier,
3315 };
3316 int err = 0;
3317 size_t i;
3318
3319 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
3320 int key = trace->ev_qualifier_ids.entries[i];
3321
3322 if (value.enabled) {
3323 trace__init_bpf_map_syscall_args(trace, key, &value);
3324 trace__init_syscall_bpf_progs(trace, key);
3325 }
3326
3327 err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
3328 if (err)
3329 break;
3330 }
3331
3332 return err;
3333 }
3334
3335 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
3336 {
3337 int fd = bpf_map__fd(trace->syscalls.map);
3338 struct bpf_map_syscall_entry value = {
3339 .enabled = enabled,
3340 };
3341 int err = 0, key;
3342
3343 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3344 if (enabled)
3345 trace__init_bpf_map_syscall_args(trace, key, &value);
3346
3347 err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
3348 if (err)
3349 break;
3350 }
3351
3352 return err;
3353 }
3354
3355 static int trace__init_syscalls_bpf_map(struct trace *trace)
3356 {
3357 bool enabled = true;
3358
3359 if (trace->ev_qualifier_ids.nr)
3360 enabled = trace->not_ev_qualifier;
3361
3362 return __trace__init_syscalls_bpf_map(trace, enabled);
3363 }
3364
3365 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3366 {
3367 struct tep_format_field *field, *candidate_field;
3368 int id;
3369
3370 /*
3371 * We're only interested in syscalls that have a pointer:
3372 */
3373 for (field = sc->args; field; field = field->next) {
3374 if (field->flags & TEP_FIELD_IS_POINTER)
3375 goto try_to_find_pair;
3376 }
3377
3378 return NULL;
3379
3380 try_to_find_pair:
3381 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3382 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3383 struct bpf_program *pair_prog;
3384 bool is_candidate = false;
3385
3386 if (pair == NULL || pair == sc ||
3387 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3388 continue;
3389
3390 for (field = sc->args, candidate_field = pair->args;
3391 field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3392 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3393 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3394
3395 if (is_pointer) {
3396 if (!candidate_is_pointer) {
3397 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3398 continue;
3399 }
3400 } else {
3401 if (candidate_is_pointer) {
3402 // The candidate might copy a pointer we don't have, skip it.
3403 goto next_candidate;
3404 }
3405 continue;
3406 }
3407
3408 if (strcmp(field->type, candidate_field->type))
3409 goto next_candidate;
3410
3411 is_candidate = true;
3412 }
3413
3414 if (!is_candidate)
3415 goto next_candidate;
3416
3417 /*
3418 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3419 * then it may be collecting that and we then can't use it, as it would collect
3420 * more than what is common to the two syscalls.
3421 */
3422 if (candidate_field) {
3423 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3424 if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3425 goto next_candidate;
3426 }
3427
3428 pair_prog = pair->bpf_prog.sys_enter;
3429 /*
3430 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3431 * have been searched for, so search it here and if it returns the
3432 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3433 * program for a filtered syscall on a non-filtered one.
3434 *
3435 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3436 * useful for "renameat2".
3437 */
3438 if (pair_prog == NULL) {
3439 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3440 if (pair_prog == trace->syscalls.unaugmented_prog)
3441 goto next_candidate;
3442 }
3443
3444 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3445 return pair_prog;
3446 next_candidate:
3447 continue;
3448 }
3449
3450 return NULL;
3451 }
3452
3453 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3454 {
3455 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3456 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3457 int err = 0, key;
3458
3459 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3460 int prog_fd;
3461
3462 if (!trace__syscall_enabled(trace, key))
3463 continue;
3464
3465 trace__init_syscall_bpf_progs(trace, key);
3466
3467 // It'll get at least the "!raw_syscalls:unaugmented"
3468 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3469 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3470 if (err)
3471 break;
3472 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3473 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3474 if (err)
3475 break;
3476 }
3477
3478 /*
3479 * Now lets do a second pass looking for enabled syscalls without
3480 * an augmenter that have a signature that is a superset of another
3481 * syscall with an augmenter so that we can auto-reuse it.
3482 *
3483 * I.e. if we have an augmenter for the "open" syscall that has
3484 * this signature:
3485 *
3486 * int open(const char *pathname, int flags, mode_t mode);
3487 *
3488 * I.e. that will collect just the first string argument, then we
3489 * can reuse it for the 'creat' syscall, that has this signature:
3490 *
3491 * int creat(const char *pathname, mode_t mode);
3492 *
3493 * and for:
3494 *
3495 * int stat(const char *pathname, struct stat *statbuf);
3496 * int lstat(const char *pathname, struct stat *statbuf);
3497 *
3498 * Because the 'open' augmenter will collect the first arg as a string,
3499 * and leave alone all the other args, which already helps with
3500 * beautifying 'stat' and 'lstat''s pathname arg.
3501 *
3502 * Then, in time, when 'stat' gets an augmenter that collects both
3503 * first and second arg (this one on the raw_syscalls:sys_exit prog
3504 * array tail call, then that one will be used.
3505 */
3506 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3507 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3508 struct bpf_program *pair_prog;
3509 int prog_fd;
3510
3511 if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3512 continue;
3513
3514 /*
3515 * For now we're just reusing the sys_enter prog, and if it
3516 * already has an augmenter, we don't need to find one.
3517 */
3518 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3519 continue;
3520
3521 /*
3522 * Look at all the other syscalls for one that has a signature
3523 * that is close enough that we can share:
3524 */
3525 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3526 if (pair_prog == NULL)
3527 continue;
3528
3529 sc->bpf_prog.sys_enter = pair_prog;
3530
3531 /*
3532 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3533 * with the fd for the program we're reusing:
3534 */
3535 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3536 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3537 if (err)
3538 break;
3539 }
3540
3541
3542 return err;
3543 }
3544
3545 static void trace__delete_augmented_syscalls(struct trace *trace)
3546 {
3547 struct evsel *evsel, *tmp;
3548
3549 evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3550 evsel__delete(trace->syscalls.events.augmented);
3551 trace->syscalls.events.augmented = NULL;
3552
3553 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3554 if (evsel->bpf_obj == trace->bpf_obj) {
3555 evlist__remove(trace->evlist, evsel);
3556 evsel__delete(evsel);
3557 }
3558
3559 }
3560
3561 bpf_object__close(trace->bpf_obj);
3562 trace->bpf_obj = NULL;
3563 }
3564 #else // HAVE_LIBBPF_SUPPORT
3565 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3566 const char *name __maybe_unused)
3567 {
3568 return NULL;
3569 }
3570
3571 static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3572 {
3573 }
3574
3575 static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3576 {
3577 }
3578
3579 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3580 {
3581 return 0;
3582 }
3583
3584 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3585 {
3586 return 0;
3587 }
3588
3589 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3590 const char *name __maybe_unused)
3591 {
3592 return NULL;
3593 }
3594
3595 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3596 {
3597 return 0;
3598 }
3599
3600 static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3601 {
3602 }
3603 #endif // HAVE_LIBBPF_SUPPORT
3604
3605 static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
3606 {
3607 struct evsel *evsel;
3608
3609 evlist__for_each_entry(trace->evlist, evsel) {
3610 if (evsel == trace->syscalls.events.augmented ||
3611 evsel->bpf_obj == trace->bpf_obj)
3612 continue;
3613
3614 return false;
3615 }
3616
3617 return true;
3618 }
3619
3620 static int trace__set_ev_qualifier_filter(struct trace *trace)
3621 {
3622 if (trace->syscalls.map)
3623 return trace__set_ev_qualifier_bpf_filter(trace);
3624 if (trace->syscalls.events.sys_enter)
3625 return trace__set_ev_qualifier_tp_filter(trace);
3626 return 0;
3627 }
3628
3629 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3630 size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3631 {
3632 int err = 0;
3633 #ifdef HAVE_LIBBPF_SUPPORT
3634 bool value = true;
3635 int map_fd = bpf_map__fd(map);
3636 size_t i;
3637
3638 for (i = 0; i < npids; ++i) {
3639 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3640 if (err)
3641 break;
3642 }
3643 #endif
3644 return err;
3645 }
3646
3647 static int trace__set_filter_loop_pids(struct trace *trace)
3648 {
3649 unsigned int nr = 1, err;
3650 pid_t pids[32] = {
3651 getpid(),
3652 };
3653 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3654
3655 while (thread && nr < ARRAY_SIZE(pids)) {
3656 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3657
3658 if (parent == NULL)
3659 break;
3660
3661 if (!strcmp(thread__comm_str(parent), "sshd") ||
3662 strstarts(thread__comm_str(parent), "gnome-terminal")) {
3663 pids[nr++] = parent->tid;
3664 break;
3665 }
3666 thread = parent;
3667 }
3668
3669 err = perf_evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3670 if (!err && trace->filter_pids.map)
3671 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3672
3673 return err;
3674 }
3675
3676 static int trace__set_filter_pids(struct trace *trace)
3677 {
3678 int err = 0;
3679 /*
3680 * Better not use !target__has_task() here because we need to cover the
3681 * case where no threads were specified in the command line, but a
3682 * workload was, and in that case we will fill in the thread_map when
3683 * we fork the workload in perf_evlist__prepare_workload.
3684 */
3685 if (trace->filter_pids.nr > 0) {
3686 err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3687 trace->filter_pids.entries);
3688 if (!err && trace->filter_pids.map) {
3689 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3690 trace->filter_pids.entries);
3691 }
3692 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3693 err = trace__set_filter_loop_pids(trace);
3694 }
3695
3696 return err;
3697 }
3698
3699 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3700 {
3701 struct evlist *evlist = trace->evlist;
3702 struct perf_sample sample;
3703 int err;
3704
3705 err = perf_evlist__parse_sample(evlist, event, &sample);
3706 if (err)
3707 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3708 else
3709 trace__handle_event(trace, event, &sample);
3710
3711 return 0;
3712 }
3713
3714 static int __trace__flush_events(struct trace *trace)
3715 {
3716 u64 first = ordered_events__first_time(&trace->oe.data);
3717 u64 flush = trace->oe.last - NSEC_PER_SEC;
3718
3719 /* Is there some thing to flush.. */
3720 if (first && first < flush)
3721 return ordered_events__flush_time(&trace->oe.data, flush);
3722
3723 return 0;
3724 }
3725
3726 static int trace__flush_events(struct trace *trace)
3727 {
3728 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3729 }
3730
3731 static int trace__deliver_event(struct trace *trace, union perf_event *event)
3732 {
3733 int err;
3734
3735 if (!trace->sort_events)
3736 return __trace__deliver_event(trace, event);
3737
3738 err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3739 if (err && err != -1)
3740 return err;
3741
3742 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3743 if (err)
3744 return err;
3745
3746 return trace__flush_events(trace);
3747 }
3748
3749 static int ordered_events__deliver_event(struct ordered_events *oe,
3750 struct ordered_event *event)
3751 {
3752 struct trace *trace = container_of(oe, struct trace, oe.data);
3753
3754 return __trace__deliver_event(trace, event->event);
3755 }
3756
3757 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
3758 {
3759 struct tep_format_field *field;
3760 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
3761
3762 if (evsel->tp_format == NULL || fmt == NULL)
3763 return NULL;
3764
3765 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
3766 if (strcmp(field->name, arg) == 0)
3767 return fmt;
3768
3769 return NULL;
3770 }
3771
3772 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3773 {
3774 char *tok, *left = evsel->filter, *new_filter = evsel->filter;
3775
3776 while ((tok = strpbrk(left, "=<>!")) != NULL) {
3777 char *right = tok + 1, *right_end;
3778
3779 if (*right == '=')
3780 ++right;
3781
3782 while (isspace(*right))
3783 ++right;
3784
3785 if (*right == '\0')
3786 break;
3787
3788 while (!isalpha(*left))
3789 if (++left == tok) {
3790 /*
3791 * Bail out, can't find the name of the argument that is being
3792 * used in the filter, let it try to set this filter, will fail later.
3793 */
3794 return 0;
3795 }
3796
3797 right_end = right + 1;
3798 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
3799 ++right_end;
3800
3801 if (isalpha(*right)) {
3802 struct syscall_arg_fmt *fmt;
3803 int left_size = tok - left,
3804 right_size = right_end - right;
3805 char arg[128];
3806
3807 while (isspace(left[left_size - 1]))
3808 --left_size;
3809
3810 scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
3811
3812 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
3813 if (fmt == NULL) {
3814 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
3815 arg, evsel->name, evsel->filter);
3816 return -1;
3817 }
3818
3819 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
3820 arg, (int)(right - tok), tok, right_size, right);
3821
3822 if (fmt->strtoul) {
3823 u64 val;
3824 struct syscall_arg syscall_arg = {
3825 .parm = fmt->parm,
3826 };
3827
3828 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
3829 char *n, expansion[19];
3830 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
3831 int expansion_offset = right - new_filter;
3832
3833 pr_debug("%s", expansion);
3834
3835 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
3836 pr_debug(" out of memory!\n");
3837 free(new_filter);
3838 return -1;
3839 }
3840 if (new_filter != evsel->filter)
3841 free(new_filter);
3842 left = n + expansion_offset + expansion_lenght;
3843 new_filter = n;
3844 } else {
3845 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3846 right_size, right, arg, evsel->name, evsel->filter);
3847 return -1;
3848 }
3849 } else {
3850 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3851 arg, evsel->name, evsel->filter);
3852 return -1;
3853 }
3854
3855 pr_debug("\n");
3856 } else {
3857 left = right_end;
3858 }
3859 }
3860
3861 if (new_filter != evsel->filter) {
3862 pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
3863 evsel__set_filter(evsel, new_filter);
3864 free(new_filter);
3865 }
3866
3867 return 0;
3868 }
3869
3870 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
3871 {
3872 struct evlist *evlist = trace->evlist;
3873 struct evsel *evsel;
3874
3875 evlist__for_each_entry(evlist, evsel) {
3876 if (evsel->filter == NULL)
3877 continue;
3878
3879 if (trace__expand_filter(trace, evsel)) {
3880 *err_evsel = evsel;
3881 return -1;
3882 }
3883 }
3884
3885 return 0;
3886 }
3887
3888 static int trace__run(struct trace *trace, int argc, const char **argv)
3889 {
3890 struct evlist *evlist = trace->evlist;
3891 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3892 int err = -1, i;
3893 unsigned long before;
3894 const bool forks = argc > 0;
3895 bool draining = false;
3896
3897 trace->live = true;
3898
3899 if (!trace->raw_augmented_syscalls) {
3900 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3901 goto out_error_raw_syscalls;
3902
3903 if (trace->trace_syscalls)
3904 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3905 }
3906
3907 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3908 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3909 if (pgfault_maj == NULL)
3910 goto out_error_mem;
3911 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3912 evlist__add(evlist, pgfault_maj);
3913 }
3914
3915 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3916 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3917 if (pgfault_min == NULL)
3918 goto out_error_mem;
3919 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3920 evlist__add(evlist, pgfault_min);
3921 }
3922
3923 if (trace->sched &&
3924 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
3925 goto out_error_sched_stat_runtime;
3926 /*
3927 * If a global cgroup was set, apply it to all the events without an
3928 * explicit cgroup. I.e.:
3929 *
3930 * trace -G A -e sched:*switch
3931 *
3932 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3933 * _and_ sched:sched_switch to the 'A' cgroup, while:
3934 *
3935 * trace -e sched:*switch -G A
3936 *
3937 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3938 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3939 * a cgroup (on the root cgroup, sys wide, etc).
3940 *
3941 * Multiple cgroups:
3942 *
3943 * trace -G A -e sched:*switch -G B
3944 *
3945 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3946 * to the 'B' cgroup.
3947 *
3948 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3949 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3950 */
3951 if (trace->cgroup)
3952 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3953
3954 err = perf_evlist__create_maps(evlist, &trace->opts.target);
3955 if (err < 0) {
3956 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3957 goto out_delete_evlist;
3958 }
3959
3960 err = trace__symbols_init(trace, evlist);
3961 if (err < 0) {
3962 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3963 goto out_delete_evlist;
3964 }
3965
3966 perf_evlist__config(evlist, &trace->opts, &callchain_param);
3967
3968 signal(SIGCHLD, sig_handler);
3969 signal(SIGINT, sig_handler);
3970
3971 if (forks) {
3972 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
3973 argv, false, NULL);
3974 if (err < 0) {
3975 fprintf(trace->output, "Couldn't run the workload!\n");
3976 goto out_delete_evlist;
3977 }
3978 }
3979
3980 err = evlist__open(evlist);
3981 if (err < 0)
3982 goto out_error_open;
3983
3984 err = bpf__apply_obj_config();
3985 if (err) {
3986 char errbuf[BUFSIZ];
3987
3988 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
3989 pr_err("ERROR: Apply config to BPF failed: %s\n",
3990 errbuf);
3991 goto out_error_open;
3992 }
3993
3994 err = trace__set_filter_pids(trace);
3995 if (err < 0)
3996 goto out_error_mem;
3997
3998 if (trace->syscalls.map)
3999 trace__init_syscalls_bpf_map(trace);
4000
4001 if (trace->syscalls.prog_array.sys_enter)
4002 trace__init_syscalls_bpf_prog_array_maps(trace);
4003
4004 if (trace->ev_qualifier_ids.nr > 0) {
4005 err = trace__set_ev_qualifier_filter(trace);
4006 if (err < 0)
4007 goto out_errno;
4008
4009 if (trace->syscalls.events.sys_exit) {
4010 pr_debug("event qualifier tracepoint filter: %s\n",
4011 trace->syscalls.events.sys_exit->filter);
4012 }
4013 }
4014
4015 /*
4016 * If the "close" syscall is not traced, then we will not have the
4017 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
4018 * fd->pathname table and were ending up showing the last value set by
4019 * syscalls opening a pathname and associating it with a descriptor or
4020 * reading it from /proc/pid/fd/ in cases where that doesn't make
4021 * sense.
4022 *
4023 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
4024 * not in use.
4025 */
4026 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4027
4028 err = trace__expand_filters(trace, &evsel);
4029 if (err)
4030 goto out_delete_evlist;
4031 err = perf_evlist__apply_filters(evlist, &evsel);
4032 if (err < 0)
4033 goto out_error_apply_filters;
4034
4035 if (trace->dump.map)
4036 bpf_map__fprintf(trace->dump.map, trace->output);
4037
4038 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4039 if (err < 0)
4040 goto out_error_mmap;
4041
4042 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
4043 evlist__enable(evlist);
4044
4045 if (forks)
4046 perf_evlist__start_workload(evlist);
4047
4048 if (trace->opts.initial_delay) {
4049 usleep(trace->opts.initial_delay * 1000);
4050 evlist__enable(evlist);
4051 }
4052
4053 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4054 evlist->core.threads->nr > 1 ||
4055 evlist__first(evlist)->core.attr.inherit;
4056
4057 /*
4058 * Now that we already used evsel->core.attr to ask the kernel to setup the
4059 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
4060 * trace__resolve_callchain(), allowing per-event max-stack settings
4061 * to override an explicitly set --max-stack global setting.
4062 */
4063 evlist__for_each_entry(evlist, evsel) {
4064 if (evsel__has_callchain(evsel) &&
4065 evsel->core.attr.sample_max_stack == 0)
4066 evsel->core.attr.sample_max_stack = trace->max_stack;
4067 }
4068 again:
4069 before = trace->nr_events;
4070
4071 for (i = 0; i < evlist->core.nr_mmaps; i++) {
4072 union perf_event *event;
4073 struct mmap *md;
4074
4075 md = &evlist->mmap[i];
4076 if (perf_mmap__read_init(&md->core) < 0)
4077 continue;
4078
4079 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4080 ++trace->nr_events;
4081
4082 err = trace__deliver_event(trace, event);
4083 if (err)
4084 goto out_disable;
4085
4086 perf_mmap__consume(&md->core);
4087
4088 if (interrupted)
4089 goto out_disable;
4090
4091 if (done && !draining) {
4092 evlist__disable(evlist);
4093 draining = true;
4094 }
4095 }
4096 perf_mmap__read_done(&md->core);
4097 }
4098
4099 if (trace->nr_events == before) {
4100 int timeout = done ? 100 : -1;
4101
4102 if (!draining && evlist__poll(evlist, timeout) > 0) {
4103 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
4104 draining = true;
4105
4106 goto again;
4107 } else {
4108 if (trace__flush_events(trace))
4109 goto out_disable;
4110 }
4111 } else {
4112 goto again;
4113 }
4114
4115 out_disable:
4116 thread__zput(trace->current);
4117
4118 evlist__disable(evlist);
4119
4120 if (trace->sort_events)
4121 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4122
4123 if (!err) {
4124 if (trace->summary)
4125 trace__fprintf_thread_summary(trace, trace->output);
4126
4127 if (trace->show_tool_stats) {
4128 fprintf(trace->output, "Stats:\n "
4129 " vfs_getname : %" PRIu64 "\n"
4130 " proc_getname: %" PRIu64 "\n",
4131 trace->stats.vfs_getname,
4132 trace->stats.proc_getname);
4133 }
4134 }
4135
4136 out_delete_evlist:
4137 trace__symbols__exit(trace);
4138
4139 evlist__delete(evlist);
4140 cgroup__put(trace->cgroup);
4141 trace->evlist = NULL;
4142 trace->live = false;
4143 return err;
4144 {
4145 char errbuf[BUFSIZ];
4146
4147 out_error_sched_stat_runtime:
4148 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
4149 goto out_error;
4150
4151 out_error_raw_syscalls:
4152 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
4153 goto out_error;
4154
4155 out_error_mmap:
4156 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
4157 goto out_error;
4158
4159 out_error_open:
4160 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
4161
4162 out_error:
4163 fprintf(trace->output, "%s\n", errbuf);
4164 goto out_delete_evlist;
4165
4166 out_error_apply_filters:
4167 fprintf(trace->output,
4168 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
4169 evsel->filter, evsel__name(evsel), errno,
4170 str_error_r(errno, errbuf, sizeof(errbuf)));
4171 goto out_delete_evlist;
4172 }
4173 out_error_mem:
4174 fprintf(trace->output, "Not enough memory to run!\n");
4175 goto out_delete_evlist;
4176
4177 out_errno:
4178 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4179 goto out_delete_evlist;
4180 }
4181
4182 static int trace__replay(struct trace *trace)
4183 {
4184 const struct evsel_str_handler handlers[] = {
4185 { "probe:vfs_getname", trace__vfs_getname, },
4186 };
4187 struct perf_data data = {
4188 .path = input_name,
4189 .mode = PERF_DATA_MODE_READ,
4190 .force = trace->force,
4191 };
4192 struct perf_session *session;
4193 struct evsel *evsel;
4194 int err = -1;
4195
4196 trace->tool.sample = trace__process_sample;
4197 trace->tool.mmap = perf_event__process_mmap;
4198 trace->tool.mmap2 = perf_event__process_mmap2;
4199 trace->tool.comm = perf_event__process_comm;
4200 trace->tool.exit = perf_event__process_exit;
4201 trace->tool.fork = perf_event__process_fork;
4202 trace->tool.attr = perf_event__process_attr;
4203 trace->tool.tracing_data = perf_event__process_tracing_data;
4204 trace->tool.build_id = perf_event__process_build_id;
4205 trace->tool.namespaces = perf_event__process_namespaces;
4206
4207 trace->tool.ordered_events = true;
4208 trace->tool.ordering_requires_timestamps = true;
4209
4210 /* add tid to output */
4211 trace->multiple_threads = true;
4212
4213 session = perf_session__new(&data, false, &trace->tool);
4214 if (IS_ERR(session))
4215 return PTR_ERR(session);
4216
4217 if (trace->opts.target.pid)
4218 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4219
4220 if (trace->opts.target.tid)
4221 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4222
4223 if (symbol__init(&session->header.env) < 0)
4224 goto out;
4225
4226 trace->host = &session->machines.host;
4227
4228 err = perf_session__set_tracepoints_handlers(session, handlers);
4229 if (err)
4230 goto out;
4231
4232 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
4233 "raw_syscalls:sys_enter");
4234 /* older kernels have syscalls tp versus raw_syscalls */
4235 if (evsel == NULL)
4236 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
4237 "syscalls:sys_enter");
4238
4239 if (evsel &&
4240 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
4241 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
4242 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
4243 goto out;
4244 }
4245
4246 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
4247 "raw_syscalls:sys_exit");
4248 if (evsel == NULL)
4249 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
4250 "syscalls:sys_exit");
4251 if (evsel &&
4252 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
4253 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
4254 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
4255 goto out;
4256 }
4257
4258 evlist__for_each_entry(session->evlist, evsel) {
4259 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4260 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4261 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4262 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4263 evsel->handler = trace__pgfault;
4264 }
4265
4266 setup_pager();
4267
4268 err = perf_session__process_events(session);
4269 if (err)
4270 pr_err("Failed to process events, error %d", err);
4271
4272 else if (trace->summary)
4273 trace__fprintf_thread_summary(trace, trace->output);
4274
4275 out:
4276 perf_session__delete(session);
4277
4278 return err;
4279 }
4280
4281 static size_t trace__fprintf_threads_header(FILE *fp)
4282 {
4283 size_t printed;
4284
4285 printed = fprintf(fp, "\n Summary of events:\n\n");
4286
4287 return printed;
4288 }
4289
4290 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4291 struct syscall_stats *stats;
4292 double msecs;
4293 int syscall;
4294 )
4295 {
4296 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
4297 struct syscall_stats *stats = source->priv;
4298
4299 entry->syscall = source->i;
4300 entry->stats = stats;
4301 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4302 }
4303
4304 static size_t thread__dump_stats(struct thread_trace *ttrace,
4305 struct trace *trace, FILE *fp)
4306 {
4307 size_t printed = 0;
4308 struct syscall *sc;
4309 struct rb_node *nd;
4310 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
4311
4312 if (syscall_stats == NULL)
4313 return 0;
4314
4315 printed += fprintf(fp, "\n");
4316
4317 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
4318 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
4319 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
4320
4321 resort_rb__for_each_entry(nd, syscall_stats) {
4322 struct syscall_stats *stats = syscall_stats_entry->stats;
4323 if (stats) {
4324 double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4325 double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4326 double avg = avg_stats(&stats->stats);
4327 double pct;
4328 u64 n = (u64)stats->stats.n;
4329
4330 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4331 avg /= NSEC_PER_MSEC;
4332
4333 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4334 printed += fprintf(fp, " %-15s", sc->name);
4335 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
4336 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
4337 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
4338
4339 if (trace->errno_summary && stats->nr_failures) {
4340 const char *arch_name = perf_env__arch(trace->host->env);
4341 int e;
4342
4343 for (e = 0; e < stats->max_errno; ++e) {
4344 if (stats->errnos[e] != 0)
4345 fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
4346 }
4347 }
4348 }
4349 }
4350
4351 resort_rb__delete(syscall_stats);
4352 printed += fprintf(fp, "\n\n");
4353
4354 return printed;
4355 }
4356
4357 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4358 {
4359 size_t printed = 0;
4360 struct thread_trace *ttrace = thread__priv(thread);
4361 double ratio;
4362
4363 if (ttrace == NULL)
4364 return 0;
4365
4366 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4367
4368 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
4369 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4370 printed += fprintf(fp, "%.1f%%", ratio);
4371 if (ttrace->pfmaj)
4372 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4373 if (ttrace->pfmin)
4374 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4375 if (trace->sched)
4376 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4377 else if (fputc('\n', fp) != EOF)
4378 ++printed;
4379
4380 printed += thread__dump_stats(ttrace, trace, fp);
4381
4382 return printed;
4383 }
4384
4385 static unsigned long thread__nr_events(struct thread_trace *ttrace)
4386 {
4387 return ttrace ? ttrace->nr_events : 0;
4388 }
4389
4390 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
4391 struct thread *thread;
4392 )
4393 {
4394 entry->thread = rb_entry(nd, struct thread, rb_node);
4395 }
4396
4397 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4398 {
4399 size_t printed = trace__fprintf_threads_header(fp);
4400 struct rb_node *nd;
4401 int i;
4402
4403 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
4404 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4405
4406 if (threads == NULL) {
4407 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
4408 return 0;
4409 }
4410
4411 resort_rb__for_each_entry(nd, threads)
4412 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4413
4414 resort_rb__delete(threads);
4415 }
4416 return printed;
4417 }
4418
4419 static int trace__set_duration(const struct option *opt, const char *str,
4420 int unset __maybe_unused)
4421 {
4422 struct trace *trace = opt->value;
4423
4424 trace->duration_filter = atof(str);
4425 return 0;
4426 }
4427
4428 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
4429 int unset __maybe_unused)
4430 {
4431 int ret = -1;
4432 size_t i;
4433 struct trace *trace = opt->value;
4434 /*
4435 * FIXME: introduce a intarray class, plain parse csv and create a
4436 * { int nr, int entries[] } struct...
4437 */
4438 struct intlist *list = intlist__new(str);
4439
4440 if (list == NULL)
4441 return -1;
4442
4443 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4444 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4445
4446 if (trace->filter_pids.entries == NULL)
4447 goto out;
4448
4449 trace->filter_pids.entries[0] = getpid();
4450
4451 for (i = 1; i < trace->filter_pids.nr; ++i)
4452 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4453
4454 intlist__delete(list);
4455 ret = 0;
4456 out:
4457 return ret;
4458 }
4459
4460 static int trace__open_output(struct trace *trace, const char *filename)
4461 {
4462 struct stat st;
4463
4464 if (!stat(filename, &st) && st.st_size) {
4465 char oldname[PATH_MAX];
4466
4467 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
4468 unlink(oldname);
4469 rename(filename, oldname);
4470 }
4471
4472 trace->output = fopen(filename, "w");
4473
4474 return trace->output == NULL ? -errno : 0;
4475 }
4476
4477 static int parse_pagefaults(const struct option *opt, const char *str,
4478 int unset __maybe_unused)
4479 {
4480 int *trace_pgfaults = opt->value;
4481
4482 if (strcmp(str, "all") == 0)
4483 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
4484 else if (strcmp(str, "maj") == 0)
4485 *trace_pgfaults |= TRACE_PFMAJ;
4486 else if (strcmp(str, "min") == 0)
4487 *trace_pgfaults |= TRACE_PFMIN;
4488 else
4489 return -1;
4490
4491 return 0;
4492 }
4493
4494 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
4495 {
4496 struct evsel *evsel;
4497
4498 evlist__for_each_entry(evlist, evsel) {
4499 if (evsel->handler == NULL)
4500 evsel->handler = handler;
4501 }
4502 }
4503
4504 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
4505 {
4506 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
4507
4508 if (fmt) {
4509 struct syscall_fmt *scfmt = syscall_fmt__find(name);
4510
4511 if (scfmt) {
4512 int skip = 0;
4513
4514 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
4515 strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
4516 ++skip;
4517
4518 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
4519 }
4520 }
4521 }
4522
4523 static int evlist__set_syscall_tp_fields(struct evlist *evlist)
4524 {
4525 struct evsel *evsel;
4526
4527 evlist__for_each_entry(evlist, evsel) {
4528 if (evsel->priv || !evsel->tp_format)
4529 continue;
4530
4531 if (strcmp(evsel->tp_format->system, "syscalls")) {
4532 evsel__init_tp_arg_scnprintf(evsel);
4533 continue;
4534 }
4535
4536 if (evsel__init_syscall_tp(evsel))
4537 return -1;
4538
4539 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
4540 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4541
4542 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
4543 return -1;
4544
4545 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
4546 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
4547 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4548
4549 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
4550 return -1;
4551
4552 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
4553 }
4554 }
4555
4556 return 0;
4557 }
4558
4559 /*
4560 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4561 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
4562 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4563 *
4564 * It'd be better to introduce a parse_options() variant that would return a
4565 * list with the terms it didn't match to an event...
4566 */
4567 static int trace__parse_events_option(const struct option *opt, const char *str,
4568 int unset __maybe_unused)
4569 {
4570 struct trace *trace = (struct trace *)opt->value;
4571 const char *s = str;
4572 char *sep = NULL, *lists[2] = { NULL, NULL, };
4573 int len = strlen(str) + 1, err = -1, list, idx;
4574 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
4575 char group_name[PATH_MAX];
4576 struct syscall_fmt *fmt;
4577
4578 if (strace_groups_dir == NULL)
4579 return -1;
4580
4581 if (*s == '!') {
4582 ++s;
4583 trace->not_ev_qualifier = true;
4584 }
4585
4586 while (1) {
4587 if ((sep = strchr(s, ',')) != NULL)
4588 *sep = '\0';
4589
4590 list = 0;
4591 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4592 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4593 list = 1;
4594 goto do_concat;
4595 }
4596
4597 fmt = syscall_fmt__find_by_alias(s);
4598 if (fmt != NULL) {
4599 list = 1;
4600 s = fmt->name;
4601 } else {
4602 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
4603 if (access(group_name, R_OK) == 0)
4604 list = 1;
4605 }
4606 do_concat:
4607 if (lists[list]) {
4608 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
4609 } else {
4610 lists[list] = malloc(len);
4611 if (lists[list] == NULL)
4612 goto out;
4613 strcpy(lists[list], s);
4614 }
4615
4616 if (!sep)
4617 break;
4618
4619 *sep = ',';
4620 s = sep + 1;
4621 }
4622
4623 if (lists[1] != NULL) {
4624 struct strlist_config slist_config = {
4625 .dirname = strace_groups_dir,
4626 };
4627
4628 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4629 if (trace->ev_qualifier == NULL) {
4630 fputs("Not enough memory to parse event qualifier", trace->output);
4631 goto out;
4632 }
4633
4634 if (trace__validate_ev_qualifier(trace))
4635 goto out;
4636 trace->trace_syscalls = true;
4637 }
4638
4639 err = 0;
4640
4641 if (lists[0]) {
4642 struct option o = {
4643 .value = &trace->evlist,
4644 };
4645 err = parse_events_option(&o, lists[0], 0);
4646 }
4647 out:
4648 if (sep)
4649 *sep = ',';
4650
4651 return err;
4652 }
4653
4654 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
4655 {
4656 struct trace *trace = opt->value;
4657
4658 if (!list_empty(&trace->evlist->core.entries)) {
4659 struct option o = {
4660 .value = &trace->evlist,
4661 };
4662 return parse_cgroups(&o, str, unset);
4663 }
4664 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4665
4666 return 0;
4667 }
4668
4669 static int trace__config(const char *var, const char *value, void *arg)
4670 {
4671 struct trace *trace = arg;
4672 int err = 0;
4673
4674 if (!strcmp(var, "trace.add_events")) {
4675 trace->perfconfig_events = strdup(value);
4676 if (trace->perfconfig_events == NULL) {
4677 pr_err("Not enough memory for %s\n", "trace.add_events");
4678 return -1;
4679 }
4680 } else if (!strcmp(var, "trace.show_timestamp")) {
4681 trace->show_tstamp = perf_config_bool(var, value);
4682 } else if (!strcmp(var, "trace.show_duration")) {
4683 trace->show_duration = perf_config_bool(var, value);
4684 } else if (!strcmp(var, "trace.show_arg_names")) {
4685 trace->show_arg_names = perf_config_bool(var, value);
4686 if (!trace->show_arg_names)
4687 trace->show_zeros = true;
4688 } else if (!strcmp(var, "trace.show_zeros")) {
4689 bool new_show_zeros = perf_config_bool(var, value);
4690 if (!trace->show_arg_names && !new_show_zeros) {
4691 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4692 goto out;
4693 }
4694 trace->show_zeros = new_show_zeros;
4695 } else if (!strcmp(var, "trace.show_prefix")) {
4696 trace->show_string_prefix = perf_config_bool(var, value);
4697 } else if (!strcmp(var, "trace.no_inherit")) {
4698 trace->opts.no_inherit = perf_config_bool(var, value);
4699 } else if (!strcmp(var, "trace.args_alignment")) {
4700 int args_alignment = 0;
4701 if (perf_config_int(&args_alignment, var, value) == 0)
4702 trace->args_alignment = args_alignment;
4703 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4704 if (strcasecmp(value, "libtraceevent") == 0)
4705 trace->libtraceevent_print = true;
4706 else if (strcasecmp(value, "libbeauty") == 0)
4707 trace->libtraceevent_print = false;
4708 }
4709 out:
4710 return err;
4711 }
4712
4713 int cmd_trace(int argc, const char **argv)
4714 {
4715 const char *trace_usage[] = {
4716 "perf trace [<options>] [<command>]",
4717 "perf trace [<options>] -- <command> [<options>]",
4718 "perf trace record [<options>] [<command>]",
4719 "perf trace record [<options>] -- <command> [<options>]",
4720 NULL
4721 };
4722 struct trace trace = {
4723 .opts = {
4724 .target = {
4725 .uid = UINT_MAX,
4726 .uses_mmap = true,
4727 },
4728 .user_freq = UINT_MAX,
4729 .user_interval = ULLONG_MAX,
4730 .no_buffering = true,
4731 .mmap_pages = UINT_MAX,
4732 },
4733 .output = stderr,
4734 .show_comm = true,
4735 .show_tstamp = true,
4736 .show_duration = true,
4737 .show_arg_names = true,
4738 .args_alignment = 70,
4739 .trace_syscalls = false,
4740 .kernel_syscallchains = false,
4741 .max_stack = UINT_MAX,
4742 .max_events = ULONG_MAX,
4743 };
4744 const char *map_dump_str = NULL;
4745 const char *output_name = NULL;
4746 const struct option trace_options[] = {
4747 OPT_CALLBACK('e', "event", &trace, "event",
4748 "event/syscall selector. use 'perf list' to list available events",
4749 trace__parse_events_option),
4750 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4751 "event filter", parse_filter),
4752 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4753 "show the thread COMM next to its id"),
4754 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4755 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4756 trace__parse_events_option),
4757 OPT_STRING('o', "output", &output_name, "file", "output file name"),
4758 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4759 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4760 "trace events on existing process id"),
4761 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4762 "trace events on existing thread id"),
4763 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4764 "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4765 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4766 "system-wide collection from all CPUs"),
4767 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4768 "list of cpus to monitor"),
4769 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4770 "child tasks do not inherit counters"),
4771 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4772 "number of mmap data pages",
4773 perf_evlist__parse_mmap_pages),
4774 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4775 "user to profile"),
4776 OPT_CALLBACK(0, "duration", &trace, "float",
4777 "show only events with duration > N.M ms",
4778 trace__set_duration),
4779 #ifdef HAVE_LIBBPF_SUPPORT
4780 OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4781 #endif
4782 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4783 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4784 OPT_BOOLEAN('T', "time", &trace.full_time,
4785 "Show full timestamp, not time relative to first start"),
4786 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4787 "Show only syscalls that failed"),
4788 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4789 "Show only syscall summary with statistics"),
4790 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4791 "Show all syscalls and summary with statistics"),
4792 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4793 "Show errno stats per syscall, use with -s or -S"),
4794 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4795 "Trace pagefaults", parse_pagefaults, "maj"),
4796 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4797 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4798 OPT_CALLBACK(0, "call-graph", &trace.opts,
4799 "record_mode[,record_size]", record_callchain_help,
4800 &record_parse_callchain_opt),
4801 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4802 "Use libtraceevent to print the tracepoint arguments."),
4803 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4804 "Show the kernel callchains on the syscall exit path"),
4805 OPT_ULONG(0, "max-events", &trace.max_events,
4806 "Set the maximum number of events to print, exit after that is reached. "),
4807 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4808 "Set the minimum stack depth when parsing the callchain, "
4809 "anything below the specified depth will be ignored."),
4810 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4811 "Set the maximum stack depth when parsing the callchain, "
4812 "anything beyond the specified depth will be ignored. "
4813 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4814 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4815 "Sort batch of events before processing, use if getting out of order events"),
4816 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4817 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4818 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4819 "per thread proc mmap processing timeout in ms"),
4820 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4821 trace__parse_cgroups),
4822 OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4823 "ms to wait before starting measurement after program "
4824 "start"),
4825 OPTS_EVSWITCH(&trace.evswitch),
4826 OPT_END()
4827 };
4828 bool __maybe_unused max_stack_user_set = true;
4829 bool mmap_pages_user_set = true;
4830 struct evsel *evsel;
4831 const char * const trace_subcommands[] = { "record", NULL };
4832 int err = -1;
4833 char bf[BUFSIZ];
4834
4835 signal(SIGSEGV, sighandler_dump_stack);
4836 signal(SIGFPE, sighandler_dump_stack);
4837
4838 trace.evlist = evlist__new();
4839 trace.sctbl = syscalltbl__new();
4840
4841 if (trace.evlist == NULL || trace.sctbl == NULL) {
4842 pr_err("Not enough memory to run!\n");
4843 err = -ENOMEM;
4844 goto out;
4845 }
4846
4847 /*
4848 * Parsing .perfconfig may entail creating a BPF event, that may need
4849 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4850 * is too small. This affects just this process, not touching the
4851 * global setting. If it fails we'll get something in 'perf trace -v'
4852 * to help diagnose the problem.
4853 */
4854 rlimit__bump_memlock();
4855
4856 err = perf_config(trace__config, &trace);
4857 if (err)
4858 goto out;
4859
4860 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4861 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4862
4863 /*
4864 * Here we already passed thru trace__parse_events_option() and it has
4865 * already figured out if -e syscall_name, if not but if --event
4866 * foo:bar was used, the user is interested _just_ in those, say,
4867 * tracepoint events, not in the strace-like syscall-name-based mode.
4868 *
4869 * This is important because we need to check if strace-like mode is
4870 * needed to decided if we should filter out the eBPF
4871 * __augmented_syscalls__ code, if it is in the mix, say, via
4872 * .perfconfig trace.add_events, and filter those out.
4873 */
4874 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4875 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4876 trace.trace_syscalls = true;
4877 }
4878 /*
4879 * Now that we have --verbose figured out, lets see if we need to parse
4880 * events from .perfconfig, so that if those events fail parsing, say some
4881 * BPF program fails, then we'll be able to use --verbose to see what went
4882 * wrong in more detail.
4883 */
4884 if (trace.perfconfig_events != NULL) {
4885 struct parse_events_error parse_err;
4886
4887 bzero(&parse_err, sizeof(parse_err));
4888 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4889 if (err) {
4890 parse_events_print_error(&parse_err, trace.perfconfig_events);
4891 goto out;
4892 }
4893 }
4894
4895 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4896 usage_with_options_msg(trace_usage, trace_options,
4897 "cgroup monitoring only available in system-wide mode");
4898 }
4899
4900 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4901 if (IS_ERR(evsel)) {
4902 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4903 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4904 goto out;
4905 }
4906
4907 if (evsel) {
4908 trace.syscalls.events.augmented = evsel;
4909
4910 evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4911 if (evsel == NULL) {
4912 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4913 goto out;
4914 }
4915
4916 if (evsel->bpf_obj == NULL) {
4917 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4918 goto out;
4919 }
4920
4921 trace.bpf_obj = evsel->bpf_obj;
4922
4923 /*
4924 * If we have _just_ the augmenter event but don't have a
4925 * explicit --syscalls, then assume we want all strace-like
4926 * syscalls:
4927 */
4928 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4929 trace.trace_syscalls = true;
4930 /*
4931 * So, if we have a syscall augmenter, but trace_syscalls, aka
4932 * strace-like syscall tracing is not set, then we need to trow
4933 * away the augmenter, i.e. all the events that were created
4934 * from that BPF object file.
4935 *
4936 * This is more to fix the current .perfconfig trace.add_events
4937 * style of setting up the strace-like eBPF based syscall point
4938 * payload augmenter.
4939 *
4940 * All this complexity will be avoided by adding an alternative
4941 * to trace.add_events in the form of
4942 * trace.bpf_augmented_syscalls, that will be only parsed if we
4943 * need it.
4944 *
4945 * .perfconfig trace.add_events is still useful if we want, for
4946 * instance, have msr_write.msr in some .perfconfig profile based
4947 * 'perf trace --config determinism.profile' mode, where for some
4948 * particular goal/workload type we want a set of events and
4949 * output mode (with timings, etc) instead of having to add
4950 * all via the command line.
4951 *
4952 * Also --config to specify an alternate .perfconfig file needs
4953 * to be implemented.
4954 */
4955 if (!trace.trace_syscalls) {
4956 trace__delete_augmented_syscalls(&trace);
4957 } else {
4958 trace__set_bpf_map_filtered_pids(&trace);
4959 trace__set_bpf_map_syscalls(&trace);
4960 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4961 }
4962 }
4963
4964 err = bpf__setup_stdout(trace.evlist);
4965 if (err) {
4966 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4967 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
4968 goto out;
4969 }
4970
4971 err = -1;
4972
4973 if (map_dump_str) {
4974 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4975 if (trace.dump.map == NULL) {
4976 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
4977 goto out;
4978 }
4979 }
4980
4981 if (trace.trace_pgfaults) {
4982 trace.opts.sample_address = true;
4983 trace.opts.sample_time = true;
4984 }
4985
4986 if (trace.opts.mmap_pages == UINT_MAX)
4987 mmap_pages_user_set = false;
4988
4989 if (trace.max_stack == UINT_MAX) {
4990 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4991 max_stack_user_set = false;
4992 }
4993
4994 #ifdef HAVE_DWARF_UNWIND_SUPPORT
4995 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4996 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4997 }
4998 #endif
4999
5000 if (callchain_param.enabled) {
5001 if (!mmap_pages_user_set && geteuid() == 0)
5002 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5003
5004 symbol_conf.use_callchain = true;
5005 }
5006
5007 if (trace.evlist->core.nr_entries > 0) {
5008 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5009 if (evlist__set_syscall_tp_fields(trace.evlist)) {
5010 perror("failed to set syscalls:* tracepoint fields");
5011 goto out;
5012 }
5013 }
5014
5015 if (trace.sort_events) {
5016 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5017 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5018 }
5019
5020 /*
5021 * If we are augmenting syscalls, then combine what we put in the
5022 * __augmented_syscalls__ BPF map with what is in the
5023 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
5024 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
5025 *
5026 * We'll switch to look at two BPF maps, one for sys_enter and the
5027 * other for sys_exit when we start augmenting the sys_exit paths with
5028 * buffers that are being copied from kernel to userspace, think 'read'
5029 * syscall.
5030 */
5031 if (trace.syscalls.events.augmented) {
5032 evlist__for_each_entry(trace.evlist, evsel) {
5033 bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
5034
5035 if (raw_syscalls_sys_exit) {
5036 trace.raw_augmented_syscalls = true;
5037 goto init_augmented_syscall_tp;
5038 }
5039
5040 if (trace.syscalls.events.augmented->priv == NULL &&
5041 strstr(evsel__name(evsel), "syscalls:sys_enter")) {
5042 struct evsel *augmented = trace.syscalls.events.augmented;
5043 if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
5044 evsel__init_augmented_syscall_tp_args(augmented))
5045 goto out;
5046 /*
5047 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
5048 * Above we made sure we can get from the payload the tp fields
5049 * that we get from syscalls:sys_enter tracefs format file.
5050 */
5051 augmented->handler = trace__sys_enter;
5052 /*
5053 * Now we do the same for the *syscalls:sys_enter event so that
5054 * if we handle it directly, i.e. if the BPF prog returns 0 so
5055 * as not to filter it, then we'll handle it just like we would
5056 * for the BPF_OUTPUT one:
5057 */
5058 if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
5059 evsel__init_augmented_syscall_tp_args(evsel))
5060 goto out;
5061 evsel->handler = trace__sys_enter;
5062 }
5063
5064 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
5065 struct syscall_tp *sc;
5066 init_augmented_syscall_tp:
5067 if (evsel__init_augmented_syscall_tp(evsel, evsel))
5068 goto out;
5069 sc = __evsel__syscall_tp(evsel);
5070 /*
5071 * For now with BPF raw_augmented we hook into
5072 * raw_syscalls:sys_enter and there we get all
5073 * 6 syscall args plus the tracepoint common
5074 * fields and the syscall_nr (another long).
5075 * So we check if that is the case and if so
5076 * don't look after the sc->args_size but
5077 * always after the full raw_syscalls:sys_enter
5078 * payload, which is fixed.
5079 *
5080 * We'll revisit this later to pass
5081 * s->args_size to the BPF augmenter (now
5082 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
5083 * so that it copies only what we need for each
5084 * syscall, like what happens when we use
5085 * syscalls:sys_enter_NAME, so that we reduce
5086 * the kernel/userspace traffic to just what is
5087 * needed for each syscall.
5088 */
5089 if (trace.raw_augmented_syscalls)
5090 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5091 evsel__init_augmented_syscall_tp_ret(evsel);
5092 evsel->handler = trace__sys_exit;
5093 }
5094 }
5095 }
5096
5097 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
5098 return trace__record(&trace, argc-1, &argv[1]);
5099
5100 /* Using just --errno-summary will trigger --summary */
5101 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5102 trace.summary_only = true;
5103
5104 /* summary_only implies summary option, but don't overwrite summary if set */
5105 if (trace.summary_only)
5106 trace.summary = trace.summary_only;
5107
5108 if (output_name != NULL) {
5109 err = trace__open_output(&trace, output_name);
5110 if (err < 0) {
5111 perror("failed to create output file");
5112 goto out;
5113 }
5114 }
5115
5116 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5117 if (err)
5118 goto out_close;
5119
5120 err = target__validate(&trace.opts.target);
5121 if (err) {
5122 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5123 fprintf(trace.output, "%s", bf);
5124 goto out_close;
5125 }
5126
5127 err = target__parse_uid(&trace.opts.target);
5128 if (err) {
5129 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5130 fprintf(trace.output, "%s", bf);
5131 goto out_close;
5132 }
5133
5134 if (!argc && target__none(&trace.opts.target))
5135 trace.opts.target.system_wide = true;
5136
5137 if (input_name)
5138 err = trace__replay(&trace);
5139 else
5140 err = trace__run(&trace, argc, argv);
5141
5142 out_close:
5143 if (output_name != NULL)
5144 fclose(trace.output);
5145 out:
5146 zfree(&trace.perfconfig_events);
5147 return err;
5148 }
5149