1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9 #include "builtin.h"
10
11 #include "util/color.h"
12 #include "util/dso.h"
13 #include "util/vdso.h"
14 #include "util/evlist.h"
15 #include "util/evsel.h"
16 #include "util/map.h"
17 #include "util/session.h"
18 #include "util/tool.h"
19 #include "util/debug.h"
20 #include "util/build-id.h"
21 #include "util/data.h"
22 #include "util/auxtrace.h"
23 #include "util/jit.h"
24 #include "util/symbol.h"
25 #include "util/synthetic-events.h"
26 #include "util/thread.h"
27 #include "util/namespaces.h"
28
29 #include <linux/err.h>
30 #include <subcmd/parse-options.h>
31 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
32
33 #include <linux/list.h>
34 #include <linux/string.h>
35 #include <errno.h>
36 #include <signal.h>
37
38 struct perf_inject {
39 struct perf_tool tool;
40 struct perf_session *session;
41 bool build_ids;
42 bool build_id_all;
43 bool sched_stat;
44 bool have_auxtrace;
45 bool strip;
46 bool jit_mode;
47 bool in_place_update;
48 bool in_place_update_dry_run;
49 bool is_pipe;
50 const char *input_name;
51 struct perf_data output;
52 u64 bytes_written;
53 u64 aux_id;
54 struct list_head samples;
55 struct itrace_synth_opts itrace_synth_opts;
56 char event_copy[PERF_SAMPLE_MAX_SIZE];
57 };
58
59 struct event_entry {
60 struct list_head node;
61 u32 tid;
62 union perf_event event[];
63 };
64
65 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
66 struct machine *machine, u8 cpumode, u32 flags);
67
output_bytes(struct perf_inject * inject,void * buf,size_t sz)68 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
69 {
70 ssize_t size;
71
72 size = perf_data__write(&inject->output, buf, sz);
73 if (size < 0)
74 return -errno;
75
76 inject->bytes_written += size;
77 return 0;
78 }
79
perf_event__repipe_synth(struct perf_tool * tool,union perf_event * event)80 static int perf_event__repipe_synth(struct perf_tool *tool,
81 union perf_event *event)
82 {
83 struct perf_inject *inject = container_of(tool, struct perf_inject,
84 tool);
85
86 return output_bytes(inject, event, event->header.size);
87 }
88
perf_event__repipe_oe_synth(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe __maybe_unused)89 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
90 union perf_event *event,
91 struct ordered_events *oe __maybe_unused)
92 {
93 return perf_event__repipe_synth(tool, event);
94 }
95
96 #ifdef HAVE_JITDUMP
perf_event__drop_oe(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe __maybe_unused)97 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
98 union perf_event *event __maybe_unused,
99 struct ordered_events *oe __maybe_unused)
100 {
101 return 0;
102 }
103 #endif
104
perf_event__repipe_op2_synth(struct perf_session * session,union perf_event * event)105 static int perf_event__repipe_op2_synth(struct perf_session *session,
106 union perf_event *event)
107 {
108 return perf_event__repipe_synth(session->tool, event);
109 }
110
perf_event__repipe_op4_synth(struct perf_session * session,union perf_event * event,u64 data __maybe_unused)111 static int perf_event__repipe_op4_synth(struct perf_session *session,
112 union perf_event *event,
113 u64 data __maybe_unused)
114 {
115 return perf_event__repipe_synth(session->tool, event);
116 }
117
perf_event__repipe_attr(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist)118 static int perf_event__repipe_attr(struct perf_tool *tool,
119 union perf_event *event,
120 struct evlist **pevlist)
121 {
122 struct perf_inject *inject = container_of(tool, struct perf_inject,
123 tool);
124 int ret;
125
126 ret = perf_event__process_attr(tool, event, pevlist);
127 if (ret)
128 return ret;
129
130 if (!inject->is_pipe)
131 return 0;
132
133 return perf_event__repipe_synth(tool, event);
134 }
135
perf_event__repipe_event_update(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist __maybe_unused)136 static int perf_event__repipe_event_update(struct perf_tool *tool,
137 union perf_event *event,
138 struct evlist **pevlist __maybe_unused)
139 {
140 return perf_event__repipe_synth(tool, event);
141 }
142
143 #ifdef HAVE_AUXTRACE_SUPPORT
144
copy_bytes(struct perf_inject * inject,int fd,off_t size)145 static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
146 {
147 char buf[4096];
148 ssize_t ssz;
149 int ret;
150
151 while (size > 0) {
152 ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
153 if (ssz < 0)
154 return -errno;
155 ret = output_bytes(inject, buf, ssz);
156 if (ret)
157 return ret;
158 size -= ssz;
159 }
160
161 return 0;
162 }
163
perf_event__repipe_auxtrace(struct perf_session * session,union perf_event * event)164 static s64 perf_event__repipe_auxtrace(struct perf_session *session,
165 union perf_event *event)
166 {
167 struct perf_tool *tool = session->tool;
168 struct perf_inject *inject = container_of(tool, struct perf_inject,
169 tool);
170 int ret;
171
172 inject->have_auxtrace = true;
173
174 if (!inject->output.is_pipe) {
175 off_t offset;
176
177 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
178 if (offset == -1)
179 return -errno;
180 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
181 event, offset);
182 if (ret < 0)
183 return ret;
184 }
185
186 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
187 ret = output_bytes(inject, event, event->header.size);
188 if (ret < 0)
189 return ret;
190 ret = copy_bytes(inject, perf_data__fd(session->data),
191 event->auxtrace.size);
192 } else {
193 ret = output_bytes(inject, event,
194 event->header.size + event->auxtrace.size);
195 }
196 if (ret < 0)
197 return ret;
198
199 return event->auxtrace.size;
200 }
201
202 #else
203
204 static s64
perf_event__repipe_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)205 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
206 union perf_event *event __maybe_unused)
207 {
208 pr_err("AUX area tracing not supported\n");
209 return -EINVAL;
210 }
211
212 #endif
213
perf_event__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)214 static int perf_event__repipe(struct perf_tool *tool,
215 union perf_event *event,
216 struct perf_sample *sample __maybe_unused,
217 struct machine *machine __maybe_unused)
218 {
219 return perf_event__repipe_synth(tool, event);
220 }
221
perf_event__drop(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)222 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
223 union perf_event *event __maybe_unused,
224 struct perf_sample *sample __maybe_unused,
225 struct machine *machine __maybe_unused)
226 {
227 return 0;
228 }
229
perf_event__drop_aux(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct machine * machine __maybe_unused)230 static int perf_event__drop_aux(struct perf_tool *tool,
231 union perf_event *event __maybe_unused,
232 struct perf_sample *sample,
233 struct machine *machine __maybe_unused)
234 {
235 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
236
237 if (!inject->aux_id)
238 inject->aux_id = sample->id;
239
240 return 0;
241 }
242
243 static union perf_event *
perf_inject__cut_auxtrace_sample(struct perf_inject * inject,union perf_event * event,struct perf_sample * sample)244 perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
245 union perf_event *event,
246 struct perf_sample *sample)
247 {
248 size_t sz1 = sample->aux_sample.data - (void *)event;
249 size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
250 union perf_event *ev = (union perf_event *)inject->event_copy;
251
252 if (sz1 > event->header.size || sz2 > event->header.size ||
253 sz1 + sz2 > event->header.size ||
254 sz1 < sizeof(struct perf_event_header) + sizeof(u64))
255 return event;
256
257 memcpy(ev, event, sz1);
258 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
259 ev->header.size = sz1 + sz2;
260 ((u64 *)((void *)ev + sz1))[-1] = 0;
261
262 return ev;
263 }
264
265 typedef int (*inject_handler)(struct perf_tool *tool,
266 union perf_event *event,
267 struct perf_sample *sample,
268 struct evsel *evsel,
269 struct machine *machine);
270
perf_event__repipe_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)271 static int perf_event__repipe_sample(struct perf_tool *tool,
272 union perf_event *event,
273 struct perf_sample *sample,
274 struct evsel *evsel,
275 struct machine *machine)
276 {
277 struct perf_inject *inject = container_of(tool, struct perf_inject,
278 tool);
279
280 if (evsel && evsel->handler) {
281 inject_handler f = evsel->handler;
282 return f(tool, event, sample, evsel, machine);
283 }
284
285 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
286
287 if (inject->itrace_synth_opts.set && sample->aux_sample.size)
288 event = perf_inject__cut_auxtrace_sample(inject, event, sample);
289
290 return perf_event__repipe_synth(tool, event);
291 }
292
perf_event__repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)293 static int perf_event__repipe_mmap(struct perf_tool *tool,
294 union perf_event *event,
295 struct perf_sample *sample,
296 struct machine *machine)
297 {
298 int err;
299
300 err = perf_event__process_mmap(tool, event, sample, machine);
301 perf_event__repipe(tool, event, sample, machine);
302
303 return err;
304 }
305
306 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)307 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
308 union perf_event *event,
309 struct perf_sample *sample,
310 struct machine *machine)
311 {
312 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
313 u64 n = 0;
314 int ret;
315
316 /*
317 * if jit marker, then inject jit mmaps and generate ELF images
318 */
319 ret = jit_process(inject->session, &inject->output, machine,
320 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
321 if (ret < 0)
322 return ret;
323 if (ret) {
324 inject->bytes_written += n;
325 return 0;
326 }
327 return perf_event__repipe_mmap(tool, event, sample, machine);
328 }
329 #endif
330
findnew_dso(int pid,int tid,const char * filename,struct dso_id * id,struct machine * machine)331 static struct dso *findnew_dso(int pid, int tid, const char *filename,
332 struct dso_id *id, struct machine *machine)
333 {
334 struct thread *thread;
335 struct nsinfo *nsi = NULL;
336 struct nsinfo *nnsi;
337 struct dso *dso;
338 bool vdso;
339
340 thread = machine__findnew_thread(machine, pid, tid);
341 if (thread == NULL) {
342 pr_err("cannot find or create a task %d/%d.\n", tid, pid);
343 return NULL;
344 }
345
346 vdso = is_vdso_map(filename);
347 nsi = nsinfo__get(thread->nsinfo);
348
349 if (vdso) {
350 /* The vdso maps are always on the host and not the
351 * container. Ensure that we don't use setns to look
352 * them up.
353 */
354 nnsi = nsinfo__copy(nsi);
355 if (nnsi) {
356 nsinfo__put(nsi);
357 nnsi->need_setns = false;
358 nsi = nnsi;
359 }
360 dso = machine__findnew_vdso(machine, thread);
361 } else {
362 dso = machine__findnew_dso_id(machine, filename, id);
363 }
364
365 if (dso) {
366 nsinfo__put(dso->nsinfo);
367 dso->nsinfo = nsi;
368 } else
369 nsinfo__put(nsi);
370
371 thread__put(thread);
372 return dso;
373 }
374
perf_event__repipe_buildid_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)375 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
376 union perf_event *event,
377 struct perf_sample *sample,
378 struct machine *machine)
379 {
380 struct dso *dso;
381
382 dso = findnew_dso(event->mmap.pid, event->mmap.tid,
383 event->mmap.filename, NULL, machine);
384
385 if (dso && !dso->hit) {
386 dso->hit = 1;
387 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
388 }
389 dso__put(dso);
390
391 return perf_event__repipe(tool, event, sample, machine);
392 }
393
perf_event__repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)394 static int perf_event__repipe_mmap2(struct perf_tool *tool,
395 union perf_event *event,
396 struct perf_sample *sample,
397 struct machine *machine)
398 {
399 int err;
400
401 err = perf_event__process_mmap2(tool, event, sample, machine);
402 perf_event__repipe(tool, event, sample, machine);
403
404 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
405 struct dso *dso;
406
407 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
408 event->mmap2.filename, NULL, machine);
409 if (dso) {
410 /* mark it not to inject build-id */
411 dso->hit = 1;
412 }
413 dso__put(dso);
414 }
415
416 return err;
417 }
418
419 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)420 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
421 union perf_event *event,
422 struct perf_sample *sample,
423 struct machine *machine)
424 {
425 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
426 u64 n = 0;
427 int ret;
428
429 /*
430 * if jit marker, then inject jit mmaps and generate ELF images
431 */
432 ret = jit_process(inject->session, &inject->output, machine,
433 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
434 if (ret < 0)
435 return ret;
436 if (ret) {
437 inject->bytes_written += n;
438 return 0;
439 }
440 return perf_event__repipe_mmap2(tool, event, sample, machine);
441 }
442 #endif
443
perf_event__repipe_buildid_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)444 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
445 union perf_event *event,
446 struct perf_sample *sample,
447 struct machine *machine)
448 {
449 struct dso_id dso_id = {
450 .maj = event->mmap2.maj,
451 .min = event->mmap2.min,
452 .ino = event->mmap2.ino,
453 .ino_generation = event->mmap2.ino_generation,
454 };
455 struct dso *dso;
456
457 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
458 /* cannot use dso_id since it'd have invalid info */
459 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
460 event->mmap2.filename, NULL, machine);
461 if (dso) {
462 /* mark it not to inject build-id */
463 dso->hit = 1;
464 }
465 dso__put(dso);
466 return 0;
467 }
468
469 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
470 event->mmap2.filename, &dso_id, machine);
471
472 if (dso && !dso->hit) {
473 dso->hit = 1;
474 dso__inject_build_id(dso, tool, machine, sample->cpumode,
475 event->mmap2.flags);
476 }
477 dso__put(dso);
478
479 perf_event__repipe(tool, event, sample, machine);
480
481 return 0;
482 }
483
perf_event__repipe_fork(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)484 static int perf_event__repipe_fork(struct perf_tool *tool,
485 union perf_event *event,
486 struct perf_sample *sample,
487 struct machine *machine)
488 {
489 int err;
490
491 err = perf_event__process_fork(tool, event, sample, machine);
492 perf_event__repipe(tool, event, sample, machine);
493
494 return err;
495 }
496
perf_event__repipe_comm(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)497 static int perf_event__repipe_comm(struct perf_tool *tool,
498 union perf_event *event,
499 struct perf_sample *sample,
500 struct machine *machine)
501 {
502 int err;
503
504 err = perf_event__process_comm(tool, event, sample, machine);
505 perf_event__repipe(tool, event, sample, machine);
506
507 return err;
508 }
509
perf_event__repipe_namespaces(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)510 static int perf_event__repipe_namespaces(struct perf_tool *tool,
511 union perf_event *event,
512 struct perf_sample *sample,
513 struct machine *machine)
514 {
515 int err = perf_event__process_namespaces(tool, event, sample, machine);
516
517 perf_event__repipe(tool, event, sample, machine);
518
519 return err;
520 }
521
perf_event__repipe_exit(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)522 static int perf_event__repipe_exit(struct perf_tool *tool,
523 union perf_event *event,
524 struct perf_sample *sample,
525 struct machine *machine)
526 {
527 int err;
528
529 err = perf_event__process_exit(tool, event, sample, machine);
530 perf_event__repipe(tool, event, sample, machine);
531
532 return err;
533 }
534
perf_event__repipe_tracing_data(struct perf_session * session,union perf_event * event)535 static int perf_event__repipe_tracing_data(struct perf_session *session,
536 union perf_event *event)
537 {
538 int err;
539
540 perf_event__repipe_synth(session->tool, event);
541 err = perf_event__process_tracing_data(session, event);
542
543 return err;
544 }
545
dso__read_build_id(struct dso * dso)546 static int dso__read_build_id(struct dso *dso)
547 {
548 struct nscookie nsc;
549
550 if (dso->has_build_id)
551 return 0;
552
553 nsinfo__mountns_enter(dso->nsinfo, &nsc);
554 if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
555 dso->has_build_id = true;
556 nsinfo__mountns_exit(&nsc);
557
558 return dso->has_build_id ? 0 : -1;
559 }
560
dso__inject_build_id(struct dso * dso,struct perf_tool * tool,struct machine * machine,u8 cpumode,u32 flags)561 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
562 struct machine *machine, u8 cpumode, u32 flags)
563 {
564 int err;
565
566 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
567 return 0;
568 if (is_no_dso_memory(dso->long_name))
569 return 0;
570
571 if (dso__read_build_id(dso) < 0) {
572 pr_debug("no build_id found for %s\n", dso->long_name);
573 return -1;
574 }
575
576 err = perf_event__synthesize_build_id(tool, dso, cpumode,
577 perf_event__repipe, machine);
578 if (err) {
579 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
580 return -1;
581 }
582
583 return 0;
584 }
585
perf_event__inject_buildid(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine)586 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
587 struct perf_sample *sample,
588 struct evsel *evsel __maybe_unused,
589 struct machine *machine)
590 {
591 struct addr_location al;
592 struct thread *thread;
593
594 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
595 if (thread == NULL) {
596 pr_err("problem processing %d event, skipping it.\n",
597 event->header.type);
598 goto repipe;
599 }
600
601 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
602 if (!al.map->dso->hit) {
603 al.map->dso->hit = 1;
604 dso__inject_build_id(al.map->dso, tool, machine,
605 sample->cpumode, al.map->flags);
606 }
607 }
608
609 thread__put(thread);
610 repipe:
611 perf_event__repipe(tool, event, sample, machine);
612 return 0;
613 }
614
perf_inject__sched_process_exit(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)615 static int perf_inject__sched_process_exit(struct perf_tool *tool,
616 union perf_event *event __maybe_unused,
617 struct perf_sample *sample,
618 struct evsel *evsel __maybe_unused,
619 struct machine *machine __maybe_unused)
620 {
621 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
622 struct event_entry *ent;
623
624 list_for_each_entry(ent, &inject->samples, node) {
625 if (sample->tid == ent->tid) {
626 list_del_init(&ent->node);
627 free(ent);
628 break;
629 }
630 }
631
632 return 0;
633 }
634
perf_inject__sched_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)635 static int perf_inject__sched_switch(struct perf_tool *tool,
636 union perf_event *event,
637 struct perf_sample *sample,
638 struct evsel *evsel,
639 struct machine *machine)
640 {
641 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
642 struct event_entry *ent;
643
644 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
645
646 ent = malloc(event->header.size + sizeof(struct event_entry));
647 if (ent == NULL) {
648 color_fprintf(stderr, PERF_COLOR_RED,
649 "Not enough memory to process sched switch event!");
650 return -1;
651 }
652
653 ent->tid = sample->tid;
654 memcpy(&ent->event, event, event->header.size);
655 list_add(&ent->node, &inject->samples);
656 return 0;
657 }
658
perf_inject__sched_stat(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)659 static int perf_inject__sched_stat(struct perf_tool *tool,
660 union perf_event *event __maybe_unused,
661 struct perf_sample *sample,
662 struct evsel *evsel,
663 struct machine *machine)
664 {
665 struct event_entry *ent;
666 union perf_event *event_sw;
667 struct perf_sample sample_sw;
668 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
669 u32 pid = evsel__intval(evsel, sample, "pid");
670
671 list_for_each_entry(ent, &inject->samples, node) {
672 if (pid == ent->tid)
673 goto found;
674 }
675
676 return 0;
677 found:
678 event_sw = &ent->event[0];
679 evsel__parse_sample(evsel, event_sw, &sample_sw);
680
681 sample_sw.period = sample->period;
682 sample_sw.time = sample->time;
683 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
684 evsel->core.attr.read_format, &sample_sw);
685 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
686 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
687 }
688
sig_handler(int sig __maybe_unused)689 static void sig_handler(int sig __maybe_unused)
690 {
691 session_done = 1;
692 }
693
evsel__check_stype(struct evsel * evsel,u64 sample_type,const char * sample_msg)694 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
695 {
696 struct perf_event_attr *attr = &evsel->core.attr;
697 const char *name = evsel__name(evsel);
698
699 if (!(attr->sample_type & sample_type)) {
700 pr_err("Samples for %s event do not have %s attribute set.",
701 name, sample_msg);
702 return -EINVAL;
703 }
704
705 return 0;
706 }
707
drop_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)708 static int drop_sample(struct perf_tool *tool __maybe_unused,
709 union perf_event *event __maybe_unused,
710 struct perf_sample *sample __maybe_unused,
711 struct evsel *evsel __maybe_unused,
712 struct machine *machine __maybe_unused)
713 {
714 return 0;
715 }
716
strip_init(struct perf_inject * inject)717 static void strip_init(struct perf_inject *inject)
718 {
719 struct evlist *evlist = inject->session->evlist;
720 struct evsel *evsel;
721
722 inject->tool.context_switch = perf_event__drop;
723
724 evlist__for_each_entry(evlist, evsel)
725 evsel->handler = drop_sample;
726 }
727
parse_vm_time_correlation(const struct option * opt,const char * str,int unset)728 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
729 {
730 struct perf_inject *inject = opt->value;
731 const char *args;
732 char *dry_run;
733
734 if (unset)
735 return 0;
736
737 inject->itrace_synth_opts.set = true;
738 inject->itrace_synth_opts.vm_time_correlation = true;
739 inject->in_place_update = true;
740
741 if (!str)
742 return 0;
743
744 dry_run = skip_spaces(str);
745 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
746 inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
747 inject->in_place_update_dry_run = true;
748 args = dry_run + strlen("dry-run");
749 } else {
750 args = str;
751 }
752
753 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
754
755 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
756 }
757
__cmd_inject(struct perf_inject * inject)758 static int __cmd_inject(struct perf_inject *inject)
759 {
760 int ret = -EINVAL;
761 struct perf_session *session = inject->session;
762 struct perf_data *data_out = &inject->output;
763 int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
764 u64 output_data_offset;
765
766 signal(SIGINT, sig_handler);
767
768 if (inject->build_ids || inject->sched_stat ||
769 inject->itrace_synth_opts.set || inject->build_id_all) {
770 inject->tool.mmap = perf_event__repipe_mmap;
771 inject->tool.mmap2 = perf_event__repipe_mmap2;
772 inject->tool.fork = perf_event__repipe_fork;
773 inject->tool.tracing_data = perf_event__repipe_tracing_data;
774 }
775
776 output_data_offset = session->header.data_offset;
777
778 if (inject->build_id_all) {
779 inject->tool.mmap = perf_event__repipe_buildid_mmap;
780 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
781 } else if (inject->build_ids) {
782 inject->tool.sample = perf_event__inject_buildid;
783 } else if (inject->sched_stat) {
784 struct evsel *evsel;
785
786 evlist__for_each_entry(session->evlist, evsel) {
787 const char *name = evsel__name(evsel);
788
789 if (!strcmp(name, "sched:sched_switch")) {
790 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
791 return -EINVAL;
792
793 evsel->handler = perf_inject__sched_switch;
794 } else if (!strcmp(name, "sched:sched_process_exit"))
795 evsel->handler = perf_inject__sched_process_exit;
796 else if (!strncmp(name, "sched:sched_stat_", 17))
797 evsel->handler = perf_inject__sched_stat;
798 }
799 } else if (inject->itrace_synth_opts.vm_time_correlation) {
800 session->itrace_synth_opts = &inject->itrace_synth_opts;
801 memset(&inject->tool, 0, sizeof(inject->tool));
802 inject->tool.id_index = perf_event__process_id_index;
803 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
804 inject->tool.auxtrace = perf_event__process_auxtrace;
805 inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
806 inject->tool.ordered_events = true;
807 inject->tool.ordering_requires_timestamps = true;
808 } else if (inject->itrace_synth_opts.set) {
809 session->itrace_synth_opts = &inject->itrace_synth_opts;
810 inject->itrace_synth_opts.inject = true;
811 inject->tool.comm = perf_event__repipe_comm;
812 inject->tool.namespaces = perf_event__repipe_namespaces;
813 inject->tool.exit = perf_event__repipe_exit;
814 inject->tool.id_index = perf_event__process_id_index;
815 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
816 inject->tool.auxtrace = perf_event__process_auxtrace;
817 inject->tool.aux = perf_event__drop_aux;
818 inject->tool.itrace_start = perf_event__drop_aux,
819 inject->tool.ordered_events = true;
820 inject->tool.ordering_requires_timestamps = true;
821 /* Allow space in the header for new attributes */
822 output_data_offset = 4096;
823 if (inject->strip)
824 strip_init(inject);
825 }
826
827 if (!inject->itrace_synth_opts.set)
828 auxtrace_index__free(&session->auxtrace_index);
829
830 if (!inject->is_pipe && !inject->in_place_update)
831 lseek(fd, output_data_offset, SEEK_SET);
832
833 ret = perf_session__process_events(session);
834 if (ret)
835 return ret;
836
837 if (!inject->is_pipe && !inject->in_place_update) {
838 if (inject->build_ids)
839 perf_header__set_feat(&session->header,
840 HEADER_BUILD_ID);
841 /*
842 * Keep all buildids when there is unprocessed AUX data because
843 * it is not known which ones the AUX trace hits.
844 */
845 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
846 inject->have_auxtrace && !inject->itrace_synth_opts.set)
847 dsos__hit_all(session);
848 /*
849 * The AUX areas have been removed and replaced with
850 * synthesized hardware events, so clear the feature flag.
851 */
852 if (inject->itrace_synth_opts.set) {
853 perf_header__clear_feat(&session->header,
854 HEADER_AUXTRACE);
855 if (inject->itrace_synth_opts.last_branch ||
856 inject->itrace_synth_opts.add_last_branch)
857 perf_header__set_feat(&session->header,
858 HEADER_BRANCH_STACK);
859 }
860 session->header.data_offset = output_data_offset;
861 session->header.data_size = inject->bytes_written;
862 perf_session__write_header(session, session->evlist, fd, true);
863 }
864
865 return ret;
866 }
867
cmd_inject(int argc,const char ** argv)868 int cmd_inject(int argc, const char **argv)
869 {
870 struct perf_inject inject = {
871 .tool = {
872 .sample = perf_event__repipe_sample,
873 .read = perf_event__repipe_sample,
874 .mmap = perf_event__repipe,
875 .mmap2 = perf_event__repipe,
876 .comm = perf_event__repipe,
877 .namespaces = perf_event__repipe,
878 .cgroup = perf_event__repipe,
879 .fork = perf_event__repipe,
880 .exit = perf_event__repipe,
881 .lost = perf_event__repipe,
882 .lost_samples = perf_event__repipe,
883 .aux = perf_event__repipe,
884 .itrace_start = perf_event__repipe,
885 .context_switch = perf_event__repipe,
886 .throttle = perf_event__repipe,
887 .unthrottle = perf_event__repipe,
888 .ksymbol = perf_event__repipe,
889 .bpf = perf_event__repipe,
890 .text_poke = perf_event__repipe,
891 .attr = perf_event__repipe_attr,
892 .event_update = perf_event__repipe_event_update,
893 .tracing_data = perf_event__repipe_op2_synth,
894 .finished_round = perf_event__repipe_oe_synth,
895 .build_id = perf_event__repipe_op2_synth,
896 .id_index = perf_event__repipe_op2_synth,
897 .auxtrace_info = perf_event__repipe_op2_synth,
898 .auxtrace_error = perf_event__repipe_op2_synth,
899 .time_conv = perf_event__repipe_op2_synth,
900 .thread_map = perf_event__repipe_op2_synth,
901 .cpu_map = perf_event__repipe_op2_synth,
902 .stat_config = perf_event__repipe_op2_synth,
903 .stat = perf_event__repipe_op2_synth,
904 .stat_round = perf_event__repipe_op2_synth,
905 .feature = perf_event__repipe_op2_synth,
906 .compressed = perf_event__repipe_op4_synth,
907 .auxtrace = perf_event__repipe_auxtrace,
908 },
909 .input_name = "-",
910 .samples = LIST_HEAD_INIT(inject.samples),
911 .output = {
912 .path = "-",
913 .mode = PERF_DATA_MODE_WRITE,
914 .use_stdio = true,
915 },
916 };
917 struct perf_data data = {
918 .mode = PERF_DATA_MODE_READ,
919 .use_stdio = true,
920 };
921 int ret;
922 bool repipe = true;
923
924 struct option options[] = {
925 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
926 "Inject build-ids into the output stream"),
927 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
928 "Inject build-ids of all DSOs into the output stream"),
929 OPT_STRING('i', "input", &inject.input_name, "file",
930 "input file name"),
931 OPT_STRING('o', "output", &inject.output.path, "file",
932 "output file name"),
933 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
934 "Merge sched-stat and sched-switch for getting events "
935 "where and how long tasks slept"),
936 #ifdef HAVE_JITDUMP
937 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
938 #endif
939 OPT_INCR('v', "verbose", &verbose,
940 "be more verbose (show build ids, etc)"),
941 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
942 "kallsyms pathname"),
943 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
944 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
945 NULL, "opts", "Instruction Tracing options\n"
946 ITRACE_HELP,
947 itrace_parse_synth_opts),
948 OPT_BOOLEAN(0, "strip", &inject.strip,
949 "strip non-synthesized events (use with --itrace)"),
950 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
951 "correlate time between VM guests and the host",
952 parse_vm_time_correlation),
953 OPT_END()
954 };
955 const char * const inject_usage[] = {
956 "perf inject [<options>]",
957 NULL
958 };
959 #ifndef HAVE_JITDUMP
960 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
961 #endif
962 argc = parse_options(argc, argv, options, inject_usage, 0);
963
964 /*
965 * Any (unrecognized) arguments left?
966 */
967 if (argc)
968 usage_with_options(inject_usage, options);
969
970 if (inject.strip && !inject.itrace_synth_opts.set) {
971 pr_err("--strip option requires --itrace option\n");
972 return -1;
973 }
974
975 if (inject.in_place_update) {
976 if (!strcmp(inject.input_name, "-")) {
977 pr_err("Input file name required for in-place updating\n");
978 return -1;
979 }
980 if (strcmp(inject.output.path, "-")) {
981 pr_err("Output file name must not be specified for in-place updating\n");
982 return -1;
983 }
984 if (!data.force && !inject.in_place_update_dry_run) {
985 pr_err("The input file would be updated in place, "
986 "the --force option is required.\n");
987 return -1;
988 }
989 if (!inject.in_place_update_dry_run)
990 data.in_place_update = true;
991 } else if (perf_data__open(&inject.output)) {
992 perror("failed to create output file");
993 return -1;
994 }
995
996 data.path = inject.input_name;
997 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
998 inject.is_pipe = true;
999 /*
1000 * Do not repipe header when input is a regular file
1001 * since either it can rewrite the header at the end
1002 * or write a new pipe header.
1003 */
1004 if (strcmp(inject.input_name, "-"))
1005 repipe = false;
1006 }
1007
1008 inject.session = __perf_session__new(&data, repipe,
1009 perf_data__fd(&inject.output),
1010 &inject.tool);
1011 if (IS_ERR(inject.session)) {
1012 ret = PTR_ERR(inject.session);
1013 goto out_close_output;
1014 }
1015
1016 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
1017 pr_warning("Decompression initialization failed.\n");
1018
1019 if (!data.is_pipe && inject.output.is_pipe) {
1020 ret = perf_header__write_pipe(perf_data__fd(&inject.output));
1021 if (ret < 0) {
1022 pr_err("Couldn't write a new pipe header.\n");
1023 goto out_delete;
1024 }
1025
1026 ret = perf_event__synthesize_for_pipe(&inject.tool,
1027 inject.session,
1028 &inject.output,
1029 perf_event__repipe);
1030 if (ret < 0)
1031 goto out_delete;
1032 }
1033
1034 if (inject.build_ids && !inject.build_id_all) {
1035 /*
1036 * to make sure the mmap records are ordered correctly
1037 * and so that the correct especially due to jitted code
1038 * mmaps. We cannot generate the buildid hit list and
1039 * inject the jit mmaps at the same time for now.
1040 */
1041 inject.tool.ordered_events = true;
1042 inject.tool.ordering_requires_timestamps = true;
1043 }
1044
1045 if (inject.sched_stat) {
1046 inject.tool.ordered_events = true;
1047 }
1048
1049 #ifdef HAVE_JITDUMP
1050 if (inject.jit_mode) {
1051 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
1052 inject.tool.mmap = perf_event__jit_repipe_mmap;
1053 inject.tool.ordered_events = true;
1054 inject.tool.ordering_requires_timestamps = true;
1055 /*
1056 * JIT MMAP injection injects all MMAP events in one go, so it
1057 * does not obey finished_round semantics.
1058 */
1059 inject.tool.finished_round = perf_event__drop_oe;
1060 }
1061 #endif
1062 ret = symbol__init(&inject.session->header.env);
1063 if (ret < 0)
1064 goto out_delete;
1065
1066 ret = __cmd_inject(&inject);
1067
1068 out_delete:
1069 zstd_fini(&(inject.session->zstd_data));
1070 perf_session__delete(inject.session);
1071 out_close_output:
1072 perf_data__close(&inject.output);
1073 free(inject.itrace_synth_opts.vm_tm_corr_args);
1074 return ret;
1075 }
1076