1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include <stdlib.h>
7 #include "callchain.h"
8 #include "debug.h"
9 #include "dso.h"
10 #include "env.h"
11 #include "event.h"
12 #include "evsel.h"
13 #include "hist.h"
14 #include "machine.h"
15 #include "map.h"
16 #include "map_symbol.h"
17 #include "branch.h"
18 #include "mem-events.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "sort.h"
22 #include "strlist.h"
23 #include "target.h"
24 #include "thread.h"
25 #include "util.h"
26 #include "vdso.h"
27 #include <stdbool.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <unistd.h>
31 #include "unwind.h"
32 #include "linux/hash.h"
33 #include "asm/bug.h"
34 #include "bpf-event.h"
35 #include <internal/lib.h> // page_size
36 #include "cgroup.h"
37
38 #include <linux/ctype.h>
39 #include <symbol/kallsyms.h>
40 #include <linux/mman.h>
41 #include <linux/string.h>
42 #include <linux/zalloc.h>
43
44 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
45
machine__kernel_dso(struct machine * machine)46 static struct dso *machine__kernel_dso(struct machine *machine)
47 {
48 return machine->vmlinux_map->dso;
49 }
50
dsos__init(struct dsos * dsos)51 static void dsos__init(struct dsos *dsos)
52 {
53 INIT_LIST_HEAD(&dsos->head);
54 dsos->root = RB_ROOT;
55 init_rwsem(&dsos->lock);
56 }
57
machine__threads_init(struct machine * machine)58 static void machine__threads_init(struct machine *machine)
59 {
60 int i;
61
62 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
63 struct threads *threads = &machine->threads[i];
64 threads->entries = RB_ROOT_CACHED;
65 init_rwsem(&threads->lock);
66 threads->nr = 0;
67 INIT_LIST_HEAD(&threads->dead);
68 threads->last_match = NULL;
69 }
70 }
71
machine__set_mmap_name(struct machine * machine)72 static int machine__set_mmap_name(struct machine *machine)
73 {
74 if (machine__is_host(machine))
75 machine->mmap_name = strdup("[kernel.kallsyms]");
76 else if (machine__is_default_guest(machine))
77 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
78 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
79 machine->pid) < 0)
80 machine->mmap_name = NULL;
81
82 return machine->mmap_name ? 0 : -ENOMEM;
83 }
84
machine__init(struct machine * machine,const char * root_dir,pid_t pid)85 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
86 {
87 int err = -ENOMEM;
88
89 memset(machine, 0, sizeof(*machine));
90 maps__init(&machine->kmaps, machine);
91 RB_CLEAR_NODE(&machine->rb_node);
92 dsos__init(&machine->dsos);
93
94 machine__threads_init(machine);
95
96 machine->vdso_info = NULL;
97 machine->env = NULL;
98
99 machine->pid = pid;
100
101 machine->id_hdr_size = 0;
102 machine->kptr_restrict_warned = false;
103 machine->comm_exec = false;
104 machine->kernel_start = 0;
105 machine->vmlinux_map = NULL;
106
107 machine->root_dir = strdup(root_dir);
108 if (machine->root_dir == NULL)
109 return -ENOMEM;
110
111 if (machine__set_mmap_name(machine))
112 goto out;
113
114 if (pid != HOST_KERNEL_ID) {
115 struct thread *thread = machine__findnew_thread(machine, -1,
116 pid);
117 char comm[64];
118
119 if (thread == NULL)
120 goto out;
121
122 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
123 thread__set_comm(thread, comm, 0);
124 thread__put(thread);
125 }
126
127 machine->current_tid = NULL;
128 err = 0;
129
130 out:
131 if (err) {
132 zfree(&machine->root_dir);
133 zfree(&machine->mmap_name);
134 }
135 return 0;
136 }
137
machine__new_host(void)138 struct machine *machine__new_host(void)
139 {
140 struct machine *machine = malloc(sizeof(*machine));
141
142 if (machine != NULL) {
143 machine__init(machine, "", HOST_KERNEL_ID);
144
145 if (machine__create_kernel_maps(machine) < 0)
146 goto out_delete;
147 }
148
149 return machine;
150 out_delete:
151 free(machine);
152 return NULL;
153 }
154
machine__new_kallsyms(void)155 struct machine *machine__new_kallsyms(void)
156 {
157 struct machine *machine = machine__new_host();
158 /*
159 * FIXME:
160 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
161 * ask for not using the kcore parsing code, once this one is fixed
162 * to create a map per module.
163 */
164 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
165 machine__delete(machine);
166 machine = NULL;
167 }
168
169 return machine;
170 }
171
dsos__purge(struct dsos * dsos)172 static void dsos__purge(struct dsos *dsos)
173 {
174 struct dso *pos, *n;
175
176 down_write(&dsos->lock);
177
178 list_for_each_entry_safe(pos, n, &dsos->head, node) {
179 RB_CLEAR_NODE(&pos->rb_node);
180 pos->root = NULL;
181 list_del_init(&pos->node);
182 dso__put(pos);
183 }
184
185 up_write(&dsos->lock);
186 }
187
dsos__exit(struct dsos * dsos)188 static void dsos__exit(struct dsos *dsos)
189 {
190 dsos__purge(dsos);
191 exit_rwsem(&dsos->lock);
192 }
193
machine__delete_threads(struct machine * machine)194 void machine__delete_threads(struct machine *machine)
195 {
196 struct rb_node *nd;
197 int i;
198
199 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
200 struct threads *threads = &machine->threads[i];
201 down_write(&threads->lock);
202 nd = rb_first_cached(&threads->entries);
203 while (nd) {
204 struct thread *t = rb_entry(nd, struct thread, rb_node);
205
206 nd = rb_next(nd);
207 __machine__remove_thread(machine, t, false);
208 }
209 up_write(&threads->lock);
210 }
211 }
212
machine__exit(struct machine * machine)213 void machine__exit(struct machine *machine)
214 {
215 int i;
216
217 if (machine == NULL)
218 return;
219
220 machine__destroy_kernel_maps(machine);
221 maps__exit(&machine->kmaps);
222 dsos__exit(&machine->dsos);
223 machine__exit_vdso(machine);
224 zfree(&machine->root_dir);
225 zfree(&machine->mmap_name);
226 zfree(&machine->current_tid);
227
228 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
229 struct threads *threads = &machine->threads[i];
230 struct thread *thread, *n;
231 /*
232 * Forget about the dead, at this point whatever threads were
233 * left in the dead lists better have a reference count taken
234 * by who is using them, and then, when they drop those references
235 * and it finally hits zero, thread__put() will check and see that
236 * its not in the dead threads list and will not try to remove it
237 * from there, just calling thread__delete() straight away.
238 */
239 list_for_each_entry_safe(thread, n, &threads->dead, node)
240 list_del_init(&thread->node);
241
242 exit_rwsem(&threads->lock);
243 }
244 }
245
machine__delete(struct machine * machine)246 void machine__delete(struct machine *machine)
247 {
248 if (machine) {
249 machine__exit(machine);
250 free(machine);
251 }
252 }
253
machines__init(struct machines * machines)254 void machines__init(struct machines *machines)
255 {
256 machine__init(&machines->host, "", HOST_KERNEL_ID);
257 machines->guests = RB_ROOT_CACHED;
258 }
259
machines__exit(struct machines * machines)260 void machines__exit(struct machines *machines)
261 {
262 machine__exit(&machines->host);
263 /* XXX exit guest */
264 }
265
machines__add(struct machines * machines,pid_t pid,const char * root_dir)266 struct machine *machines__add(struct machines *machines, pid_t pid,
267 const char *root_dir)
268 {
269 struct rb_node **p = &machines->guests.rb_root.rb_node;
270 struct rb_node *parent = NULL;
271 struct machine *pos, *machine = malloc(sizeof(*machine));
272 bool leftmost = true;
273
274 if (machine == NULL)
275 return NULL;
276
277 if (machine__init(machine, root_dir, pid) != 0) {
278 free(machine);
279 return NULL;
280 }
281
282 while (*p != NULL) {
283 parent = *p;
284 pos = rb_entry(parent, struct machine, rb_node);
285 if (pid < pos->pid)
286 p = &(*p)->rb_left;
287 else {
288 p = &(*p)->rb_right;
289 leftmost = false;
290 }
291 }
292
293 rb_link_node(&machine->rb_node, parent, p);
294 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
295
296 return machine;
297 }
298
machines__set_comm_exec(struct machines * machines,bool comm_exec)299 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
300 {
301 struct rb_node *nd;
302
303 machines->host.comm_exec = comm_exec;
304
305 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
306 struct machine *machine = rb_entry(nd, struct machine, rb_node);
307
308 machine->comm_exec = comm_exec;
309 }
310 }
311
machines__find(struct machines * machines,pid_t pid)312 struct machine *machines__find(struct machines *machines, pid_t pid)
313 {
314 struct rb_node **p = &machines->guests.rb_root.rb_node;
315 struct rb_node *parent = NULL;
316 struct machine *machine;
317 struct machine *default_machine = NULL;
318
319 if (pid == HOST_KERNEL_ID)
320 return &machines->host;
321
322 while (*p != NULL) {
323 parent = *p;
324 machine = rb_entry(parent, struct machine, rb_node);
325 if (pid < machine->pid)
326 p = &(*p)->rb_left;
327 else if (pid > machine->pid)
328 p = &(*p)->rb_right;
329 else
330 return machine;
331 if (!machine->pid)
332 default_machine = machine;
333 }
334
335 return default_machine;
336 }
337
machines__findnew(struct machines * machines,pid_t pid)338 struct machine *machines__findnew(struct machines *machines, pid_t pid)
339 {
340 char path[PATH_MAX];
341 const char *root_dir = "";
342 struct machine *machine = machines__find(machines, pid);
343
344 if (machine && (machine->pid == pid))
345 goto out;
346
347 if ((pid != HOST_KERNEL_ID) &&
348 (pid != DEFAULT_GUEST_KERNEL_ID) &&
349 (symbol_conf.guestmount)) {
350 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
351 if (access(path, R_OK)) {
352 static struct strlist *seen;
353
354 if (!seen)
355 seen = strlist__new(NULL, NULL);
356
357 if (!strlist__has_entry(seen, path)) {
358 pr_err("Can't access file %s\n", path);
359 strlist__add(seen, path);
360 }
361 machine = NULL;
362 goto out;
363 }
364 root_dir = path;
365 }
366
367 machine = machines__add(machines, pid, root_dir);
368 out:
369 return machine;
370 }
371
machines__find_guest(struct machines * machines,pid_t pid)372 struct machine *machines__find_guest(struct machines *machines, pid_t pid)
373 {
374 struct machine *machine = machines__find(machines, pid);
375
376 if (!machine)
377 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
378 return machine;
379 }
380
machines__process_guests(struct machines * machines,machine__process_t process,void * data)381 void machines__process_guests(struct machines *machines,
382 machine__process_t process, void *data)
383 {
384 struct rb_node *nd;
385
386 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
387 struct machine *pos = rb_entry(nd, struct machine, rb_node);
388 process(pos, data);
389 }
390 }
391
machines__set_id_hdr_size(struct machines * machines,u16 id_hdr_size)392 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
393 {
394 struct rb_node *node;
395 struct machine *machine;
396
397 machines->host.id_hdr_size = id_hdr_size;
398
399 for (node = rb_first_cached(&machines->guests); node;
400 node = rb_next(node)) {
401 machine = rb_entry(node, struct machine, rb_node);
402 machine->id_hdr_size = id_hdr_size;
403 }
404
405 return;
406 }
407
machine__update_thread_pid(struct machine * machine,struct thread * th,pid_t pid)408 static void machine__update_thread_pid(struct machine *machine,
409 struct thread *th, pid_t pid)
410 {
411 struct thread *leader;
412
413 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
414 return;
415
416 th->pid_ = pid;
417
418 if (th->pid_ == th->tid)
419 return;
420
421 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
422 if (!leader)
423 goto out_err;
424
425 if (!leader->maps)
426 leader->maps = maps__new(machine);
427
428 if (!leader->maps)
429 goto out_err;
430
431 if (th->maps == leader->maps)
432 return;
433
434 if (th->maps) {
435 /*
436 * Maps are created from MMAP events which provide the pid and
437 * tid. Consequently there never should be any maps on a thread
438 * with an unknown pid. Just print an error if there are.
439 */
440 if (!maps__empty(th->maps))
441 pr_err("Discarding thread maps for %d:%d\n",
442 th->pid_, th->tid);
443 maps__put(th->maps);
444 }
445
446 th->maps = maps__get(leader->maps);
447 out_put:
448 thread__put(leader);
449 return;
450 out_err:
451 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
452 goto out_put;
453 }
454
455 /*
456 * Front-end cache - TID lookups come in blocks,
457 * so most of the time we dont have to look up
458 * the full rbtree:
459 */
460 static struct thread*
__threads__get_last_match(struct threads * threads,struct machine * machine,int pid,int tid)461 __threads__get_last_match(struct threads *threads, struct machine *machine,
462 int pid, int tid)
463 {
464 struct thread *th;
465
466 th = threads->last_match;
467 if (th != NULL) {
468 if (th->tid == tid) {
469 machine__update_thread_pid(machine, th, pid);
470 return thread__get(th);
471 }
472
473 threads->last_match = NULL;
474 }
475
476 return NULL;
477 }
478
479 static struct thread*
threads__get_last_match(struct threads * threads,struct machine * machine,int pid,int tid)480 threads__get_last_match(struct threads *threads, struct machine *machine,
481 int pid, int tid)
482 {
483 struct thread *th = NULL;
484
485 if (perf_singlethreaded)
486 th = __threads__get_last_match(threads, machine, pid, tid);
487
488 return th;
489 }
490
491 static void
__threads__set_last_match(struct threads * threads,struct thread * th)492 __threads__set_last_match(struct threads *threads, struct thread *th)
493 {
494 threads->last_match = th;
495 }
496
497 static void
threads__set_last_match(struct threads * threads,struct thread * th)498 threads__set_last_match(struct threads *threads, struct thread *th)
499 {
500 if (perf_singlethreaded)
501 __threads__set_last_match(threads, th);
502 }
503
504 /*
505 * Caller must eventually drop thread->refcnt returned with a successful
506 * lookup/new thread inserted.
507 */
____machine__findnew_thread(struct machine * machine,struct threads * threads,pid_t pid,pid_t tid,bool create)508 static struct thread *____machine__findnew_thread(struct machine *machine,
509 struct threads *threads,
510 pid_t pid, pid_t tid,
511 bool create)
512 {
513 struct rb_node **p = &threads->entries.rb_root.rb_node;
514 struct rb_node *parent = NULL;
515 struct thread *th;
516 bool leftmost = true;
517
518 th = threads__get_last_match(threads, machine, pid, tid);
519 if (th)
520 return th;
521
522 while (*p != NULL) {
523 parent = *p;
524 th = rb_entry(parent, struct thread, rb_node);
525
526 if (th->tid == tid) {
527 threads__set_last_match(threads, th);
528 machine__update_thread_pid(machine, th, pid);
529 return thread__get(th);
530 }
531
532 if (tid < th->tid)
533 p = &(*p)->rb_left;
534 else {
535 p = &(*p)->rb_right;
536 leftmost = false;
537 }
538 }
539
540 if (!create)
541 return NULL;
542
543 th = thread__new(pid, tid);
544 if (th != NULL) {
545 rb_link_node(&th->rb_node, parent, p);
546 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
547
548 /*
549 * We have to initialize maps separately after rb tree is updated.
550 *
551 * The reason is that we call machine__findnew_thread
552 * within thread__init_maps to find the thread
553 * leader and that would screwed the rb tree.
554 */
555 if (thread__init_maps(th, machine)) {
556 rb_erase_cached(&th->rb_node, &threads->entries);
557 RB_CLEAR_NODE(&th->rb_node);
558 thread__put(th);
559 return NULL;
560 }
561 /*
562 * It is now in the rbtree, get a ref
563 */
564 thread__get(th);
565 threads__set_last_match(threads, th);
566 ++threads->nr;
567 }
568
569 return th;
570 }
571
__machine__findnew_thread(struct machine * machine,pid_t pid,pid_t tid)572 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
573 {
574 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
575 }
576
machine__findnew_thread(struct machine * machine,pid_t pid,pid_t tid)577 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
578 pid_t tid)
579 {
580 struct threads *threads = machine__threads(machine, tid);
581 struct thread *th;
582
583 down_write(&threads->lock);
584 th = __machine__findnew_thread(machine, pid, tid);
585 up_write(&threads->lock);
586 return th;
587 }
588
machine__find_thread(struct machine * machine,pid_t pid,pid_t tid)589 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
590 pid_t tid)
591 {
592 struct threads *threads = machine__threads(machine, tid);
593 struct thread *th;
594
595 down_read(&threads->lock);
596 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
597 up_read(&threads->lock);
598 return th;
599 }
600
601 /*
602 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
603 * So here a single thread is created for that, but actually there is a separate
604 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
605 * is only 1. That causes problems for some tools, requiring workarounds. For
606 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
607 */
machine__idle_thread(struct machine * machine)608 struct thread *machine__idle_thread(struct machine *machine)
609 {
610 struct thread *thread = machine__findnew_thread(machine, 0, 0);
611
612 if (!thread || thread__set_comm(thread, "swapper", 0) ||
613 thread__set_namespaces(thread, 0, NULL))
614 pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
615
616 return thread;
617 }
618
machine__thread_exec_comm(struct machine * machine,struct thread * thread)619 struct comm *machine__thread_exec_comm(struct machine *machine,
620 struct thread *thread)
621 {
622 if (machine->comm_exec)
623 return thread__exec_comm(thread);
624 else
625 return thread__comm(thread);
626 }
627
machine__process_comm_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)628 int machine__process_comm_event(struct machine *machine, union perf_event *event,
629 struct perf_sample *sample)
630 {
631 struct thread *thread = machine__findnew_thread(machine,
632 event->comm.pid,
633 event->comm.tid);
634 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
635 int err = 0;
636
637 if (exec)
638 machine->comm_exec = true;
639
640 if (dump_trace)
641 perf_event__fprintf_comm(event, stdout);
642
643 if (thread == NULL ||
644 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
645 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
646 err = -1;
647 }
648
649 thread__put(thread);
650
651 return err;
652 }
653
machine__process_namespaces_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused)654 int machine__process_namespaces_event(struct machine *machine __maybe_unused,
655 union perf_event *event,
656 struct perf_sample *sample __maybe_unused)
657 {
658 struct thread *thread = machine__findnew_thread(machine,
659 event->namespaces.pid,
660 event->namespaces.tid);
661 int err = 0;
662
663 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
664 "\nWARNING: kernel seems to support more namespaces than perf"
665 " tool.\nTry updating the perf tool..\n\n");
666
667 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
668 "\nWARNING: perf tool seems to support more namespaces than"
669 " the kernel.\nTry updating the kernel..\n\n");
670
671 if (dump_trace)
672 perf_event__fprintf_namespaces(event, stdout);
673
674 if (thread == NULL ||
675 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
676 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
677 err = -1;
678 }
679
680 thread__put(thread);
681
682 return err;
683 }
684
machine__process_cgroup_event(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)685 int machine__process_cgroup_event(struct machine *machine,
686 union perf_event *event,
687 struct perf_sample *sample __maybe_unused)
688 {
689 struct cgroup *cgrp;
690
691 if (dump_trace)
692 perf_event__fprintf_cgroup(event, stdout);
693
694 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
695 if (cgrp == NULL)
696 return -ENOMEM;
697
698 return 0;
699 }
700
machine__process_lost_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused)701 int machine__process_lost_event(struct machine *machine __maybe_unused,
702 union perf_event *event, struct perf_sample *sample __maybe_unused)
703 {
704 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
705 event->lost.id, event->lost.lost);
706 return 0;
707 }
708
machine__process_lost_samples_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample)709 int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
710 union perf_event *event, struct perf_sample *sample)
711 {
712 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
713 sample->id, event->lost_samples.lost);
714 return 0;
715 }
716
machine__findnew_module_dso(struct machine * machine,struct kmod_path * m,const char * filename)717 static struct dso *machine__findnew_module_dso(struct machine *machine,
718 struct kmod_path *m,
719 const char *filename)
720 {
721 struct dso *dso;
722
723 down_write(&machine->dsos.lock);
724
725 dso = __dsos__find(&machine->dsos, m->name, true);
726 if (!dso) {
727 dso = __dsos__addnew(&machine->dsos, m->name);
728 if (dso == NULL)
729 goto out_unlock;
730
731 dso__set_module_info(dso, m, machine);
732 dso__set_long_name(dso, strdup(filename), true);
733 dso->kernel = DSO_SPACE__KERNEL;
734 }
735
736 dso__get(dso);
737 out_unlock:
738 up_write(&machine->dsos.lock);
739 return dso;
740 }
741
machine__process_aux_event(struct machine * machine __maybe_unused,union perf_event * event)742 int machine__process_aux_event(struct machine *machine __maybe_unused,
743 union perf_event *event)
744 {
745 if (dump_trace)
746 perf_event__fprintf_aux(event, stdout);
747 return 0;
748 }
749
machine__process_itrace_start_event(struct machine * machine __maybe_unused,union perf_event * event)750 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
751 union perf_event *event)
752 {
753 if (dump_trace)
754 perf_event__fprintf_itrace_start(event, stdout);
755 return 0;
756 }
757
machine__process_switch_event(struct machine * machine __maybe_unused,union perf_event * event)758 int machine__process_switch_event(struct machine *machine __maybe_unused,
759 union perf_event *event)
760 {
761 if (dump_trace)
762 perf_event__fprintf_switch(event, stdout);
763 return 0;
764 }
765
machine__process_ksymbol_register(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)766 static int machine__process_ksymbol_register(struct machine *machine,
767 union perf_event *event,
768 struct perf_sample *sample __maybe_unused)
769 {
770 struct symbol *sym;
771 struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
772
773 if (!map) {
774 struct dso *dso = dso__new(event->ksymbol.name);
775
776 if (dso) {
777 dso->kernel = DSO_SPACE__KERNEL;
778 map = map__new2(0, dso);
779 dso__put(dso);
780 }
781
782 if (!dso || !map) {
783 return -ENOMEM;
784 }
785
786 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
787 map->dso->binary_type = DSO_BINARY_TYPE__OOL;
788 map->dso->data.file_size = event->ksymbol.len;
789 dso__set_loaded(map->dso);
790 }
791
792 map->start = event->ksymbol.addr;
793 map->end = map->start + event->ksymbol.len;
794 maps__insert(&machine->kmaps, map);
795 map__put(map);
796 dso__set_loaded(dso);
797
798 if (is_bpf_image(event->ksymbol.name)) {
799 dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
800 dso__set_long_name(dso, "", false);
801 }
802 }
803
804 sym = symbol__new(map->map_ip(map, map->start),
805 event->ksymbol.len,
806 0, 0, event->ksymbol.name);
807 if (!sym)
808 return -ENOMEM;
809 dso__insert_symbol(map->dso, sym);
810 return 0;
811 }
812
machine__process_ksymbol_unregister(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)813 static int machine__process_ksymbol_unregister(struct machine *machine,
814 union perf_event *event,
815 struct perf_sample *sample __maybe_unused)
816 {
817 struct symbol *sym;
818 struct map *map;
819
820 map = maps__find(&machine->kmaps, event->ksymbol.addr);
821 if (!map)
822 return 0;
823
824 if (map != machine->vmlinux_map)
825 maps__remove(&machine->kmaps, map);
826 else {
827 sym = dso__find_symbol(map->dso, map->map_ip(map, map->start));
828 if (sym)
829 dso__delete_symbol(map->dso, sym);
830 }
831
832 return 0;
833 }
834
machine__process_ksymbol(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample)835 int machine__process_ksymbol(struct machine *machine __maybe_unused,
836 union perf_event *event,
837 struct perf_sample *sample)
838 {
839 if (dump_trace)
840 perf_event__fprintf_ksymbol(event, stdout);
841
842 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
843 return machine__process_ksymbol_unregister(machine, event,
844 sample);
845 return machine__process_ksymbol_register(machine, event, sample);
846 }
847
machine__process_text_poke(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)848 int machine__process_text_poke(struct machine *machine, union perf_event *event,
849 struct perf_sample *sample __maybe_unused)
850 {
851 struct map *map = maps__find(&machine->kmaps, event->text_poke.addr);
852 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
853
854 if (dump_trace)
855 perf_event__fprintf_text_poke(event, machine, stdout);
856
857 if (!event->text_poke.new_len)
858 return 0;
859
860 if (cpumode != PERF_RECORD_MISC_KERNEL) {
861 pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
862 return 0;
863 }
864
865 if (map && map->dso) {
866 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
867 int ret;
868
869 /*
870 * Kernel maps might be changed when loading symbols so loading
871 * must be done prior to using kernel maps.
872 */
873 map__load(map);
874 ret = dso__data_write_cache_addr(map->dso, map, machine,
875 event->text_poke.addr,
876 new_bytes,
877 event->text_poke.new_len);
878 if (ret != event->text_poke.new_len)
879 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
880 event->text_poke.addr);
881 } else {
882 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
883 event->text_poke.addr);
884 }
885
886 return 0;
887 }
888
machine__addnew_module_map(struct machine * machine,u64 start,const char * filename)889 static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
890 const char *filename)
891 {
892 struct map *map = NULL;
893 struct kmod_path m;
894 struct dso *dso;
895
896 if (kmod_path__parse_name(&m, filename))
897 return NULL;
898
899 dso = machine__findnew_module_dso(machine, &m, filename);
900 if (dso == NULL)
901 goto out;
902
903 map = map__new2(start, dso);
904 if (map == NULL)
905 goto out;
906
907 maps__insert(&machine->kmaps, map);
908
909 /* Put the map here because maps__insert already got it */
910 map__put(map);
911 out:
912 /* put the dso here, corresponding to machine__findnew_module_dso */
913 dso__put(dso);
914 zfree(&m.name);
915 return map;
916 }
917
machines__fprintf_dsos(struct machines * machines,FILE * fp)918 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
919 {
920 struct rb_node *nd;
921 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
922
923 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
924 struct machine *pos = rb_entry(nd, struct machine, rb_node);
925 ret += __dsos__fprintf(&pos->dsos.head, fp);
926 }
927
928 return ret;
929 }
930
machine__fprintf_dsos_buildid(struct machine * m,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)931 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
932 bool (skip)(struct dso *dso, int parm), int parm)
933 {
934 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
935 }
936
machines__fprintf_dsos_buildid(struct machines * machines,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)937 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
938 bool (skip)(struct dso *dso, int parm), int parm)
939 {
940 struct rb_node *nd;
941 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
942
943 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
944 struct machine *pos = rb_entry(nd, struct machine, rb_node);
945 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
946 }
947 return ret;
948 }
949
machine__fprintf_vmlinux_path(struct machine * machine,FILE * fp)950 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
951 {
952 int i;
953 size_t printed = 0;
954 struct dso *kdso = machine__kernel_dso(machine);
955
956 if (kdso->has_build_id) {
957 char filename[PATH_MAX];
958 if (dso__build_id_filename(kdso, filename, sizeof(filename),
959 false))
960 printed += fprintf(fp, "[0] %s\n", filename);
961 }
962
963 for (i = 0; i < vmlinux_path__nr_entries; ++i)
964 printed += fprintf(fp, "[%d] %s\n",
965 i + kdso->has_build_id, vmlinux_path[i]);
966
967 return printed;
968 }
969
machine__fprintf(struct machine * machine,FILE * fp)970 size_t machine__fprintf(struct machine *machine, FILE *fp)
971 {
972 struct rb_node *nd;
973 size_t ret;
974 int i;
975
976 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
977 struct threads *threads = &machine->threads[i];
978
979 down_read(&threads->lock);
980
981 ret = fprintf(fp, "Threads: %u\n", threads->nr);
982
983 for (nd = rb_first_cached(&threads->entries); nd;
984 nd = rb_next(nd)) {
985 struct thread *pos = rb_entry(nd, struct thread, rb_node);
986
987 ret += thread__fprintf(pos, fp);
988 }
989
990 up_read(&threads->lock);
991 }
992 return ret;
993 }
994
machine__get_kernel(struct machine * machine)995 static struct dso *machine__get_kernel(struct machine *machine)
996 {
997 const char *vmlinux_name = machine->mmap_name;
998 struct dso *kernel;
999
1000 if (machine__is_host(machine)) {
1001 if (symbol_conf.vmlinux_name)
1002 vmlinux_name = symbol_conf.vmlinux_name;
1003
1004 kernel = machine__findnew_kernel(machine, vmlinux_name,
1005 "[kernel]", DSO_SPACE__KERNEL);
1006 } else {
1007 if (symbol_conf.default_guest_vmlinux_name)
1008 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1009
1010 kernel = machine__findnew_kernel(machine, vmlinux_name,
1011 "[guest.kernel]",
1012 DSO_SPACE__KERNEL_GUEST);
1013 }
1014
1015 if (kernel != NULL && (!kernel->has_build_id))
1016 dso__read_running_kernel_build_id(kernel, machine);
1017
1018 return kernel;
1019 }
1020
1021 struct process_args {
1022 u64 start;
1023 };
1024
machine__get_kallsyms_filename(struct machine * machine,char * buf,size_t bufsz)1025 void machine__get_kallsyms_filename(struct machine *machine, char *buf,
1026 size_t bufsz)
1027 {
1028 if (machine__is_default_guest(machine))
1029 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
1030 else
1031 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
1032 }
1033
1034 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
1035
1036 /* Figure out the start address of kernel map from /proc/kallsyms.
1037 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1038 * symbol_name if it's not that important.
1039 */
machine__get_running_kernel_start(struct machine * machine,const char ** symbol_name,u64 * start,u64 * end)1040 static int machine__get_running_kernel_start(struct machine *machine,
1041 const char **symbol_name,
1042 u64 *start, u64 *end)
1043 {
1044 char filename[PATH_MAX];
1045 int i, err = -1;
1046 const char *name;
1047 u64 addr = 0;
1048
1049 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1050
1051 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1052 return 0;
1053
1054 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1055 err = kallsyms__get_function_start(filename, name, &addr);
1056 if (!err)
1057 break;
1058 }
1059
1060 if (err)
1061 return -1;
1062
1063 if (symbol_name)
1064 *symbol_name = name;
1065
1066 *start = addr;
1067
1068 err = kallsyms__get_function_start(filename, "_etext", &addr);
1069 if (!err)
1070 *end = addr;
1071
1072 return 0;
1073 }
1074
machine__create_extra_kernel_map(struct machine * machine,struct dso * kernel,struct extra_kernel_map * xm)1075 int machine__create_extra_kernel_map(struct machine *machine,
1076 struct dso *kernel,
1077 struct extra_kernel_map *xm)
1078 {
1079 struct kmap *kmap;
1080 struct map *map;
1081
1082 map = map__new2(xm->start, kernel);
1083 if (!map)
1084 return -1;
1085
1086 map->end = xm->end;
1087 map->pgoff = xm->pgoff;
1088
1089 kmap = map__kmap(map);
1090
1091 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1092
1093 maps__insert(&machine->kmaps, map);
1094
1095 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1096 kmap->name, map->start, map->end);
1097
1098 map__put(map);
1099
1100 return 0;
1101 }
1102
find_entry_trampoline(struct dso * dso)1103 static u64 find_entry_trampoline(struct dso *dso)
1104 {
1105 /* Duplicates are removed so lookup all aliases */
1106 const char *syms[] = {
1107 "_entry_trampoline",
1108 "__entry_trampoline_start",
1109 "entry_SYSCALL_64_trampoline",
1110 };
1111 struct symbol *sym = dso__first_symbol(dso);
1112 unsigned int i;
1113
1114 for (; sym; sym = dso__next_symbol(sym)) {
1115 if (sym->binding != STB_GLOBAL)
1116 continue;
1117 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1118 if (!strcmp(sym->name, syms[i]))
1119 return sym->start;
1120 }
1121 }
1122
1123 return 0;
1124 }
1125
1126 /*
1127 * These values can be used for kernels that do not have symbols for the entry
1128 * trampolines in kallsyms.
1129 */
1130 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1131 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1132 #define X86_64_ENTRY_TRAMPOLINE 0x6000
1133
1134 /* Map x86_64 PTI entry trampolines */
machine__map_x86_64_entry_trampolines(struct machine * machine,struct dso * kernel)1135 int machine__map_x86_64_entry_trampolines(struct machine *machine,
1136 struct dso *kernel)
1137 {
1138 struct maps *kmaps = &machine->kmaps;
1139 int nr_cpus_avail, cpu;
1140 bool found = false;
1141 struct map *map;
1142 u64 pgoff;
1143
1144 /*
1145 * In the vmlinux case, pgoff is a virtual address which must now be
1146 * mapped to a vmlinux offset.
1147 */
1148 maps__for_each_entry(kmaps, map) {
1149 struct kmap *kmap = __map__kmap(map);
1150 struct map *dest_map;
1151
1152 if (!kmap || !is_entry_trampoline(kmap->name))
1153 continue;
1154
1155 dest_map = maps__find(kmaps, map->pgoff);
1156 if (dest_map != map)
1157 map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1158 found = true;
1159 }
1160 if (found || machine->trampolines_mapped)
1161 return 0;
1162
1163 pgoff = find_entry_trampoline(kernel);
1164 if (!pgoff)
1165 return 0;
1166
1167 nr_cpus_avail = machine__nr_cpus_avail(machine);
1168
1169 /* Add a 1 page map for each CPU's entry trampoline */
1170 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1171 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1172 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1173 X86_64_ENTRY_TRAMPOLINE;
1174 struct extra_kernel_map xm = {
1175 .start = va,
1176 .end = va + page_size,
1177 .pgoff = pgoff,
1178 };
1179
1180 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1181
1182 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1183 return -1;
1184 }
1185
1186 machine->trampolines_mapped = nr_cpus_avail;
1187
1188 return 0;
1189 }
1190
machine__create_extra_kernel_maps(struct machine * machine __maybe_unused,struct dso * kernel __maybe_unused)1191 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1192 struct dso *kernel __maybe_unused)
1193 {
1194 return 0;
1195 }
1196
1197 static int
__machine__create_kernel_maps(struct machine * machine,struct dso * kernel)1198 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1199 {
1200 /* In case of renewal the kernel map, destroy previous one */
1201 machine__destroy_kernel_maps(machine);
1202
1203 machine->vmlinux_map = map__new2(0, kernel);
1204 if (machine->vmlinux_map == NULL)
1205 return -1;
1206
1207 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1208 maps__insert(&machine->kmaps, machine->vmlinux_map);
1209 return 0;
1210 }
1211
machine__destroy_kernel_maps(struct machine * machine)1212 void machine__destroy_kernel_maps(struct machine *machine)
1213 {
1214 struct kmap *kmap;
1215 struct map *map = machine__kernel_map(machine);
1216
1217 if (map == NULL)
1218 return;
1219
1220 kmap = map__kmap(map);
1221 maps__remove(&machine->kmaps, map);
1222 if (kmap && kmap->ref_reloc_sym) {
1223 zfree((char **)&kmap->ref_reloc_sym->name);
1224 zfree(&kmap->ref_reloc_sym);
1225 }
1226
1227 map__zput(machine->vmlinux_map);
1228 }
1229
machines__create_guest_kernel_maps(struct machines * machines)1230 int machines__create_guest_kernel_maps(struct machines *machines)
1231 {
1232 int ret = 0;
1233 struct dirent **namelist = NULL;
1234 int i, items = 0;
1235 char path[PATH_MAX];
1236 pid_t pid;
1237 char *endp;
1238
1239 if (symbol_conf.default_guest_vmlinux_name ||
1240 symbol_conf.default_guest_modules ||
1241 symbol_conf.default_guest_kallsyms) {
1242 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1243 }
1244
1245 if (symbol_conf.guestmount) {
1246 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1247 if (items <= 0)
1248 return -ENOENT;
1249 for (i = 0; i < items; i++) {
1250 if (!isdigit(namelist[i]->d_name[0])) {
1251 /* Filter out . and .. */
1252 continue;
1253 }
1254 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1255 if ((*endp != '\0') ||
1256 (endp == namelist[i]->d_name) ||
1257 (errno == ERANGE)) {
1258 pr_debug("invalid directory (%s). Skipping.\n",
1259 namelist[i]->d_name);
1260 continue;
1261 }
1262 sprintf(path, "%s/%s/proc/kallsyms",
1263 symbol_conf.guestmount,
1264 namelist[i]->d_name);
1265 ret = access(path, R_OK);
1266 if (ret) {
1267 pr_debug("Can't access file %s\n", path);
1268 goto failure;
1269 }
1270 machines__create_kernel_maps(machines, pid);
1271 }
1272 failure:
1273 free(namelist);
1274 }
1275
1276 return ret;
1277 }
1278
machines__destroy_kernel_maps(struct machines * machines)1279 void machines__destroy_kernel_maps(struct machines *machines)
1280 {
1281 struct rb_node *next = rb_first_cached(&machines->guests);
1282
1283 machine__destroy_kernel_maps(&machines->host);
1284
1285 while (next) {
1286 struct machine *pos = rb_entry(next, struct machine, rb_node);
1287
1288 next = rb_next(&pos->rb_node);
1289 rb_erase_cached(&pos->rb_node, &machines->guests);
1290 machine__delete(pos);
1291 }
1292 }
1293
machines__create_kernel_maps(struct machines * machines,pid_t pid)1294 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1295 {
1296 struct machine *machine = machines__findnew(machines, pid);
1297
1298 if (machine == NULL)
1299 return -1;
1300
1301 return machine__create_kernel_maps(machine);
1302 }
1303
machine__load_kallsyms(struct machine * machine,const char * filename)1304 int machine__load_kallsyms(struct machine *machine, const char *filename)
1305 {
1306 struct map *map = machine__kernel_map(machine);
1307 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
1308
1309 if (ret > 0) {
1310 dso__set_loaded(map->dso);
1311 /*
1312 * Since /proc/kallsyms will have multiple sessions for the
1313 * kernel, with modules between them, fixup the end of all
1314 * sections.
1315 */
1316 maps__fixup_end(&machine->kmaps);
1317 }
1318
1319 return ret;
1320 }
1321
machine__load_vmlinux_path(struct machine * machine)1322 int machine__load_vmlinux_path(struct machine *machine)
1323 {
1324 struct map *map = machine__kernel_map(machine);
1325 int ret = dso__load_vmlinux_path(map->dso, map);
1326
1327 if (ret > 0)
1328 dso__set_loaded(map->dso);
1329
1330 return ret;
1331 }
1332
get_kernel_version(const char * root_dir)1333 static char *get_kernel_version(const char *root_dir)
1334 {
1335 char version[PATH_MAX];
1336 FILE *file;
1337 char *name, *tmp;
1338 const char *prefix = "Linux version ";
1339
1340 sprintf(version, "%s/proc/version", root_dir);
1341 file = fopen(version, "r");
1342 if (!file)
1343 return NULL;
1344
1345 tmp = fgets(version, sizeof(version), file);
1346 fclose(file);
1347 if (!tmp)
1348 return NULL;
1349
1350 name = strstr(version, prefix);
1351 if (!name)
1352 return NULL;
1353 name += strlen(prefix);
1354 tmp = strchr(name, ' ');
1355 if (tmp)
1356 *tmp = '\0';
1357
1358 return strdup(name);
1359 }
1360
is_kmod_dso(struct dso * dso)1361 static bool is_kmod_dso(struct dso *dso)
1362 {
1363 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1364 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1365 }
1366
maps__set_module_path(struct maps * maps,const char * path,struct kmod_path * m)1367 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1368 {
1369 char *long_name;
1370 struct map *map = maps__find_by_name(maps, m->name);
1371
1372 if (map == NULL)
1373 return 0;
1374
1375 long_name = strdup(path);
1376 if (long_name == NULL)
1377 return -ENOMEM;
1378
1379 dso__set_long_name(map->dso, long_name, true);
1380 dso__kernel_module_get_build_id(map->dso, "");
1381
1382 /*
1383 * Full name could reveal us kmod compression, so
1384 * we need to update the symtab_type if needed.
1385 */
1386 if (m->comp && is_kmod_dso(map->dso)) {
1387 map->dso->symtab_type++;
1388 map->dso->comp = m->comp;
1389 }
1390
1391 return 0;
1392 }
1393
maps__set_modules_path_dir(struct maps * maps,const char * dir_name,int depth)1394 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1395 {
1396 struct dirent *dent;
1397 DIR *dir = opendir(dir_name);
1398 int ret = 0;
1399
1400 if (!dir) {
1401 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1402 return -1;
1403 }
1404
1405 while ((dent = readdir(dir)) != NULL) {
1406 char path[PATH_MAX];
1407 struct stat st;
1408
1409 /*sshfs might return bad dent->d_type, so we have to stat*/
1410 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1411 if (stat(path, &st))
1412 continue;
1413
1414 if (S_ISDIR(st.st_mode)) {
1415 if (!strcmp(dent->d_name, ".") ||
1416 !strcmp(dent->d_name, ".."))
1417 continue;
1418
1419 /* Do not follow top-level source and build symlinks */
1420 if (depth == 0) {
1421 if (!strcmp(dent->d_name, "source") ||
1422 !strcmp(dent->d_name, "build"))
1423 continue;
1424 }
1425
1426 ret = maps__set_modules_path_dir(maps, path, depth + 1);
1427 if (ret < 0)
1428 goto out;
1429 } else {
1430 struct kmod_path m;
1431
1432 ret = kmod_path__parse_name(&m, dent->d_name);
1433 if (ret)
1434 goto out;
1435
1436 if (m.kmod)
1437 ret = maps__set_module_path(maps, path, &m);
1438
1439 zfree(&m.name);
1440
1441 if (ret)
1442 goto out;
1443 }
1444 }
1445
1446 out:
1447 closedir(dir);
1448 return ret;
1449 }
1450
machine__set_modules_path(struct machine * machine)1451 static int machine__set_modules_path(struct machine *machine)
1452 {
1453 char *version;
1454 char modules_path[PATH_MAX];
1455
1456 version = get_kernel_version(machine->root_dir);
1457 if (!version)
1458 return -1;
1459
1460 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1461 machine->root_dir, version);
1462 free(version);
1463
1464 return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1465 }
arch__fix_module_text_start(u64 * start __maybe_unused,u64 * size __maybe_unused,const char * name __maybe_unused)1466 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1467 u64 *size __maybe_unused,
1468 const char *name __maybe_unused)
1469 {
1470 return 0;
1471 }
1472
machine__create_module(void * arg,const char * name,u64 start,u64 size)1473 static int machine__create_module(void *arg, const char *name, u64 start,
1474 u64 size)
1475 {
1476 struct machine *machine = arg;
1477 struct map *map;
1478
1479 if (arch__fix_module_text_start(&start, &size, name) < 0)
1480 return -1;
1481
1482 map = machine__addnew_module_map(machine, start, name);
1483 if (map == NULL)
1484 return -1;
1485 map->end = start + size;
1486
1487 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1488
1489 return 0;
1490 }
1491
machine__create_modules(struct machine * machine)1492 static int machine__create_modules(struct machine *machine)
1493 {
1494 const char *modules;
1495 char path[PATH_MAX];
1496
1497 if (machine__is_default_guest(machine)) {
1498 modules = symbol_conf.default_guest_modules;
1499 } else {
1500 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1501 modules = path;
1502 }
1503
1504 if (symbol__restricted_filename(modules, "/proc/modules"))
1505 return -1;
1506
1507 if (modules__parse(modules, machine, machine__create_module))
1508 return -1;
1509
1510 if (!machine__set_modules_path(machine))
1511 return 0;
1512
1513 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1514
1515 return 0;
1516 }
1517
machine__set_kernel_mmap(struct machine * machine,u64 start,u64 end)1518 static void machine__set_kernel_mmap(struct machine *machine,
1519 u64 start, u64 end)
1520 {
1521 machine->vmlinux_map->start = start;
1522 machine->vmlinux_map->end = end;
1523 /*
1524 * Be a bit paranoid here, some perf.data file came with
1525 * a zero sized synthesized MMAP event for the kernel.
1526 */
1527 if (start == 0 && end == 0)
1528 machine->vmlinux_map->end = ~0ULL;
1529 }
1530
machine__update_kernel_mmap(struct machine * machine,u64 start,u64 end)1531 static void machine__update_kernel_mmap(struct machine *machine,
1532 u64 start, u64 end)
1533 {
1534 struct map *map = machine__kernel_map(machine);
1535
1536 map__get(map);
1537 maps__remove(&machine->kmaps, map);
1538
1539 machine__set_kernel_mmap(machine, start, end);
1540
1541 maps__insert(&machine->kmaps, map);
1542 map__put(map);
1543 }
1544
machine__create_kernel_maps(struct machine * machine)1545 int machine__create_kernel_maps(struct machine *machine)
1546 {
1547 struct dso *kernel = machine__get_kernel(machine);
1548 const char *name = NULL;
1549 struct map *map;
1550 u64 start = 0, end = ~0ULL;
1551 int ret;
1552
1553 if (kernel == NULL)
1554 return -1;
1555
1556 ret = __machine__create_kernel_maps(machine, kernel);
1557 if (ret < 0)
1558 goto out_put;
1559
1560 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1561 if (machine__is_host(machine))
1562 pr_debug("Problems creating module maps, "
1563 "continuing anyway...\n");
1564 else
1565 pr_debug("Problems creating module maps for guest %d, "
1566 "continuing anyway...\n", machine->pid);
1567 }
1568
1569 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1570 if (name &&
1571 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1572 machine__destroy_kernel_maps(machine);
1573 ret = -1;
1574 goto out_put;
1575 }
1576
1577 /*
1578 * we have a real start address now, so re-order the kmaps
1579 * assume it's the last in the kmaps
1580 */
1581 machine__update_kernel_mmap(machine, start, end);
1582 }
1583
1584 if (machine__create_extra_kernel_maps(machine, kernel))
1585 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1586
1587 if (end == ~0ULL) {
1588 /* update end address of the kernel map using adjacent module address */
1589 map = map__next(machine__kernel_map(machine));
1590 if (map)
1591 machine__set_kernel_mmap(machine, start, map->start);
1592 }
1593
1594 out_put:
1595 dso__put(kernel);
1596 return ret;
1597 }
1598
machine__uses_kcore(struct machine * machine)1599 static bool machine__uses_kcore(struct machine *machine)
1600 {
1601 struct dso *dso;
1602
1603 list_for_each_entry(dso, &machine->dsos.head, node) {
1604 if (dso__is_kcore(dso))
1605 return true;
1606 }
1607
1608 return false;
1609 }
1610
perf_event__is_extra_kernel_mmap(struct machine * machine,struct extra_kernel_map * xm)1611 static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1612 struct extra_kernel_map *xm)
1613 {
1614 return machine__is(machine, "x86_64") &&
1615 is_entry_trampoline(xm->name);
1616 }
1617
machine__process_extra_kernel_map(struct machine * machine,struct extra_kernel_map * xm)1618 static int machine__process_extra_kernel_map(struct machine *machine,
1619 struct extra_kernel_map *xm)
1620 {
1621 struct dso *kernel = machine__kernel_dso(machine);
1622
1623 if (kernel == NULL)
1624 return -1;
1625
1626 return machine__create_extra_kernel_map(machine, kernel, xm);
1627 }
1628
machine__process_kernel_mmap_event(struct machine * machine,struct extra_kernel_map * xm,struct build_id * bid)1629 static int machine__process_kernel_mmap_event(struct machine *machine,
1630 struct extra_kernel_map *xm,
1631 struct build_id *bid)
1632 {
1633 struct map *map;
1634 enum dso_space_type dso_space;
1635 bool is_kernel_mmap;
1636
1637 /* If we have maps from kcore then we do not need or want any others */
1638 if (machine__uses_kcore(machine))
1639 return 0;
1640
1641 if (machine__is_host(machine))
1642 dso_space = DSO_SPACE__KERNEL;
1643 else
1644 dso_space = DSO_SPACE__KERNEL_GUEST;
1645
1646 is_kernel_mmap = memcmp(xm->name, machine->mmap_name,
1647 strlen(machine->mmap_name) - 1) == 0;
1648 if (xm->name[0] == '/' ||
1649 (!is_kernel_mmap && xm->name[0] == '[')) {
1650 map = machine__addnew_module_map(machine, xm->start,
1651 xm->name);
1652 if (map == NULL)
1653 goto out_problem;
1654
1655 map->end = map->start + xm->end - xm->start;
1656
1657 if (build_id__is_defined(bid))
1658 dso__set_build_id(map->dso, bid);
1659
1660 } else if (is_kernel_mmap) {
1661 const char *symbol_name = (xm->name + strlen(machine->mmap_name));
1662 /*
1663 * Should be there already, from the build-id table in
1664 * the header.
1665 */
1666 struct dso *kernel = NULL;
1667 struct dso *dso;
1668
1669 down_read(&machine->dsos.lock);
1670
1671 list_for_each_entry(dso, &machine->dsos.head, node) {
1672
1673 /*
1674 * The cpumode passed to is_kernel_module is not the
1675 * cpumode of *this* event. If we insist on passing
1676 * correct cpumode to is_kernel_module, we should
1677 * record the cpumode when we adding this dso to the
1678 * linked list.
1679 *
1680 * However we don't really need passing correct
1681 * cpumode. We know the correct cpumode must be kernel
1682 * mode (if not, we should not link it onto kernel_dsos
1683 * list).
1684 *
1685 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1686 * is_kernel_module() treats it as a kernel cpumode.
1687 */
1688
1689 if (!dso->kernel ||
1690 is_kernel_module(dso->long_name,
1691 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1692 continue;
1693
1694
1695 kernel = dso;
1696 break;
1697 }
1698
1699 up_read(&machine->dsos.lock);
1700
1701 if (kernel == NULL)
1702 kernel = machine__findnew_dso(machine, machine->mmap_name);
1703 if (kernel == NULL)
1704 goto out_problem;
1705
1706 kernel->kernel = dso_space;
1707 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1708 dso__put(kernel);
1709 goto out_problem;
1710 }
1711
1712 if (strstr(kernel->long_name, "vmlinux"))
1713 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1714
1715 machine__update_kernel_mmap(machine, xm->start, xm->end);
1716
1717 if (build_id__is_defined(bid))
1718 dso__set_build_id(kernel, bid);
1719
1720 /*
1721 * Avoid using a zero address (kptr_restrict) for the ref reloc
1722 * symbol. Effectively having zero here means that at record
1723 * time /proc/sys/kernel/kptr_restrict was non zero.
1724 */
1725 if (xm->pgoff != 0) {
1726 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1727 symbol_name,
1728 xm->pgoff);
1729 }
1730
1731 if (machine__is_default_guest(machine)) {
1732 /*
1733 * preload dso of guest kernel and modules
1734 */
1735 dso__load(kernel, machine__kernel_map(machine));
1736 }
1737 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1738 return machine__process_extra_kernel_map(machine, xm);
1739 }
1740 return 0;
1741 out_problem:
1742 return -1;
1743 }
1744
machine__process_mmap2_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1745 int machine__process_mmap2_event(struct machine *machine,
1746 union perf_event *event,
1747 struct perf_sample *sample)
1748 {
1749 struct thread *thread;
1750 struct map *map;
1751 struct dso_id dso_id = {
1752 .maj = event->mmap2.maj,
1753 .min = event->mmap2.min,
1754 .ino = event->mmap2.ino,
1755 .ino_generation = event->mmap2.ino_generation,
1756 };
1757 struct build_id __bid, *bid = NULL;
1758 int ret = 0;
1759
1760 if (dump_trace)
1761 perf_event__fprintf_mmap2(event, stdout);
1762
1763 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1764 bid = &__bid;
1765 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1766 }
1767
1768 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1769 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1770 struct extra_kernel_map xm = {
1771 .start = event->mmap2.start,
1772 .end = event->mmap2.start + event->mmap2.len,
1773 .pgoff = event->mmap2.pgoff,
1774 };
1775
1776 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1777 ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1778 if (ret < 0)
1779 goto out_problem;
1780 return 0;
1781 }
1782
1783 thread = machine__findnew_thread(machine, event->mmap2.pid,
1784 event->mmap2.tid);
1785 if (thread == NULL)
1786 goto out_problem;
1787
1788 map = map__new(machine, event->mmap2.start,
1789 event->mmap2.len, event->mmap2.pgoff,
1790 &dso_id, event->mmap2.prot,
1791 event->mmap2.flags, bid,
1792 event->mmap2.filename, thread);
1793
1794 if (map == NULL)
1795 goto out_problem_map;
1796
1797 ret = thread__insert_map(thread, map);
1798 if (ret)
1799 goto out_problem_insert;
1800
1801 thread__put(thread);
1802 map__put(map);
1803 return 0;
1804
1805 out_problem_insert:
1806 map__put(map);
1807 out_problem_map:
1808 thread__put(thread);
1809 out_problem:
1810 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1811 return 0;
1812 }
1813
machine__process_mmap_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1814 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1815 struct perf_sample *sample)
1816 {
1817 struct thread *thread;
1818 struct map *map;
1819 u32 prot = 0;
1820 int ret = 0;
1821
1822 if (dump_trace)
1823 perf_event__fprintf_mmap(event, stdout);
1824
1825 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1826 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1827 struct extra_kernel_map xm = {
1828 .start = event->mmap.start,
1829 .end = event->mmap.start + event->mmap.len,
1830 .pgoff = event->mmap.pgoff,
1831 };
1832
1833 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1834 ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
1835 if (ret < 0)
1836 goto out_problem;
1837 return 0;
1838 }
1839
1840 thread = machine__findnew_thread(machine, event->mmap.pid,
1841 event->mmap.tid);
1842 if (thread == NULL)
1843 goto out_problem;
1844
1845 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1846 prot = PROT_EXEC;
1847
1848 map = map__new(machine, event->mmap.start,
1849 event->mmap.len, event->mmap.pgoff,
1850 NULL, prot, 0, NULL, event->mmap.filename, thread);
1851
1852 if (map == NULL)
1853 goto out_problem_map;
1854
1855 ret = thread__insert_map(thread, map);
1856 if (ret)
1857 goto out_problem_insert;
1858
1859 thread__put(thread);
1860 map__put(map);
1861 return 0;
1862
1863 out_problem_insert:
1864 map__put(map);
1865 out_problem_map:
1866 thread__put(thread);
1867 out_problem:
1868 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1869 return 0;
1870 }
1871
__machine__remove_thread(struct machine * machine,struct thread * th,bool lock)1872 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1873 {
1874 struct threads *threads = machine__threads(machine, th->tid);
1875
1876 if (threads->last_match == th)
1877 threads__set_last_match(threads, NULL);
1878
1879 if (lock)
1880 down_write(&threads->lock);
1881
1882 BUG_ON(refcount_read(&th->refcnt) == 0);
1883
1884 rb_erase_cached(&th->rb_node, &threads->entries);
1885 RB_CLEAR_NODE(&th->rb_node);
1886 --threads->nr;
1887 /*
1888 * Move it first to the dead_threads list, then drop the reference,
1889 * if this is the last reference, then the thread__delete destructor
1890 * will be called and we will remove it from the dead_threads list.
1891 */
1892 list_add_tail(&th->node, &threads->dead);
1893
1894 /*
1895 * We need to do the put here because if this is the last refcount,
1896 * then we will be touching the threads->dead head when removing the
1897 * thread.
1898 */
1899 thread__put(th);
1900
1901 if (lock)
1902 up_write(&threads->lock);
1903 }
1904
machine__remove_thread(struct machine * machine,struct thread * th)1905 void machine__remove_thread(struct machine *machine, struct thread *th)
1906 {
1907 return __machine__remove_thread(machine, th, true);
1908 }
1909
machine__process_fork_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1910 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1911 struct perf_sample *sample)
1912 {
1913 struct thread *thread = machine__find_thread(machine,
1914 event->fork.pid,
1915 event->fork.tid);
1916 struct thread *parent = machine__findnew_thread(machine,
1917 event->fork.ppid,
1918 event->fork.ptid);
1919 bool do_maps_clone = true;
1920 int err = 0;
1921
1922 if (dump_trace)
1923 perf_event__fprintf_task(event, stdout);
1924
1925 /*
1926 * There may be an existing thread that is not actually the parent,
1927 * either because we are processing events out of order, or because the
1928 * (fork) event that would have removed the thread was lost. Assume the
1929 * latter case and continue on as best we can.
1930 */
1931 if (parent->pid_ != (pid_t)event->fork.ppid) {
1932 dump_printf("removing erroneous parent thread %d/%d\n",
1933 parent->pid_, parent->tid);
1934 machine__remove_thread(machine, parent);
1935 thread__put(parent);
1936 parent = machine__findnew_thread(machine, event->fork.ppid,
1937 event->fork.ptid);
1938 }
1939
1940 /* if a thread currently exists for the thread id remove it */
1941 if (thread != NULL) {
1942 machine__remove_thread(machine, thread);
1943 thread__put(thread);
1944 }
1945
1946 thread = machine__findnew_thread(machine, event->fork.pid,
1947 event->fork.tid);
1948 /*
1949 * When synthesizing FORK events, we are trying to create thread
1950 * objects for the already running tasks on the machine.
1951 *
1952 * Normally, for a kernel FORK event, we want to clone the parent's
1953 * maps because that is what the kernel just did.
1954 *
1955 * But when synthesizing, this should not be done. If we do, we end up
1956 * with overlapping maps as we process the synthesized MMAP2 events that
1957 * get delivered shortly thereafter.
1958 *
1959 * Use the FORK event misc flags in an internal way to signal this
1960 * situation, so we can elide the map clone when appropriate.
1961 */
1962 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1963 do_maps_clone = false;
1964
1965 if (thread == NULL || parent == NULL ||
1966 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1967 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1968 err = -1;
1969 }
1970 thread__put(thread);
1971 thread__put(parent);
1972
1973 return err;
1974 }
1975
machine__process_exit_event(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)1976 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1977 struct perf_sample *sample __maybe_unused)
1978 {
1979 struct thread *thread = machine__find_thread(machine,
1980 event->fork.pid,
1981 event->fork.tid);
1982
1983 if (dump_trace)
1984 perf_event__fprintf_task(event, stdout);
1985
1986 if (thread != NULL) {
1987 thread__exited(thread);
1988 thread__put(thread);
1989 }
1990
1991 return 0;
1992 }
1993
machine__process_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1994 int machine__process_event(struct machine *machine, union perf_event *event,
1995 struct perf_sample *sample)
1996 {
1997 int ret;
1998
1999 switch (event->header.type) {
2000 case PERF_RECORD_COMM:
2001 ret = machine__process_comm_event(machine, event, sample); break;
2002 case PERF_RECORD_MMAP:
2003 ret = machine__process_mmap_event(machine, event, sample); break;
2004 case PERF_RECORD_NAMESPACES:
2005 ret = machine__process_namespaces_event(machine, event, sample); break;
2006 case PERF_RECORD_CGROUP:
2007 ret = machine__process_cgroup_event(machine, event, sample); break;
2008 case PERF_RECORD_MMAP2:
2009 ret = machine__process_mmap2_event(machine, event, sample); break;
2010 case PERF_RECORD_FORK:
2011 ret = machine__process_fork_event(machine, event, sample); break;
2012 case PERF_RECORD_EXIT:
2013 ret = machine__process_exit_event(machine, event, sample); break;
2014 case PERF_RECORD_LOST:
2015 ret = machine__process_lost_event(machine, event, sample); break;
2016 case PERF_RECORD_AUX:
2017 ret = machine__process_aux_event(machine, event); break;
2018 case PERF_RECORD_ITRACE_START:
2019 ret = machine__process_itrace_start_event(machine, event); break;
2020 case PERF_RECORD_LOST_SAMPLES:
2021 ret = machine__process_lost_samples_event(machine, event, sample); break;
2022 case PERF_RECORD_SWITCH:
2023 case PERF_RECORD_SWITCH_CPU_WIDE:
2024 ret = machine__process_switch_event(machine, event); break;
2025 case PERF_RECORD_KSYMBOL:
2026 ret = machine__process_ksymbol(machine, event, sample); break;
2027 case PERF_RECORD_BPF_EVENT:
2028 ret = machine__process_bpf(machine, event, sample); break;
2029 case PERF_RECORD_TEXT_POKE:
2030 ret = machine__process_text_poke(machine, event, sample); break;
2031 default:
2032 ret = -1;
2033 break;
2034 }
2035
2036 return ret;
2037 }
2038
symbol__match_regex(struct symbol * sym,regex_t * regex)2039 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
2040 {
2041 if (!regexec(regex, sym->name, 0, NULL, 0))
2042 return true;
2043 return false;
2044 }
2045
ip__resolve_ams(struct thread * thread,struct addr_map_symbol * ams,u64 ip)2046 static void ip__resolve_ams(struct thread *thread,
2047 struct addr_map_symbol *ams,
2048 u64 ip)
2049 {
2050 struct addr_location al;
2051
2052 memset(&al, 0, sizeof(al));
2053 /*
2054 * We cannot use the header.misc hint to determine whether a
2055 * branch stack address is user, kernel, guest, hypervisor.
2056 * Branches may straddle the kernel/user/hypervisor boundaries.
2057 * Thus, we have to try consecutively until we find a match
2058 * or else, the symbol is unknown
2059 */
2060 thread__find_cpumode_addr_location(thread, ip, &al);
2061
2062 ams->addr = ip;
2063 ams->al_addr = al.addr;
2064 ams->ms.maps = al.maps;
2065 ams->ms.sym = al.sym;
2066 ams->ms.map = al.map;
2067 ams->phys_addr = 0;
2068 ams->data_page_size = 0;
2069 }
2070
ip__resolve_data(struct thread * thread,u8 m,struct addr_map_symbol * ams,u64 addr,u64 phys_addr,u64 daddr_page_size)2071 static void ip__resolve_data(struct thread *thread,
2072 u8 m, struct addr_map_symbol *ams,
2073 u64 addr, u64 phys_addr, u64 daddr_page_size)
2074 {
2075 struct addr_location al;
2076
2077 memset(&al, 0, sizeof(al));
2078
2079 thread__find_symbol(thread, m, addr, &al);
2080
2081 ams->addr = addr;
2082 ams->al_addr = al.addr;
2083 ams->ms.maps = al.maps;
2084 ams->ms.sym = al.sym;
2085 ams->ms.map = al.map;
2086 ams->phys_addr = phys_addr;
2087 ams->data_page_size = daddr_page_size;
2088 }
2089
sample__resolve_mem(struct perf_sample * sample,struct addr_location * al)2090 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2091 struct addr_location *al)
2092 {
2093 struct mem_info *mi = mem_info__new();
2094
2095 if (!mi)
2096 return NULL;
2097
2098 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2099 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2100 sample->addr, sample->phys_addr,
2101 sample->data_page_size);
2102 mi->data_src.val = sample->data_src;
2103
2104 return mi;
2105 }
2106
callchain_srcline(struct map_symbol * ms,u64 ip)2107 static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2108 {
2109 struct map *map = ms->map;
2110 char *srcline = NULL;
2111
2112 if (!map || callchain_param.key == CCKEY_FUNCTION)
2113 return srcline;
2114
2115 srcline = srcline__tree_find(&map->dso->srclines, ip);
2116 if (!srcline) {
2117 bool show_sym = false;
2118 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2119
2120 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
2121 ms->sym, show_sym, show_addr, ip);
2122 srcline__tree_insert(&map->dso->srclines, ip, srcline);
2123 }
2124
2125 return srcline;
2126 }
2127
2128 struct iterations {
2129 int nr_loop_iter;
2130 u64 cycles;
2131 };
2132
add_callchain_ip(struct thread * thread,struct callchain_cursor * cursor,struct symbol ** parent,struct addr_location * root_al,u8 * cpumode,u64 ip,bool branch,struct branch_flags * flags,struct iterations * iter,u64 branch_from)2133 static int add_callchain_ip(struct thread *thread,
2134 struct callchain_cursor *cursor,
2135 struct symbol **parent,
2136 struct addr_location *root_al,
2137 u8 *cpumode,
2138 u64 ip,
2139 bool branch,
2140 struct branch_flags *flags,
2141 struct iterations *iter,
2142 u64 branch_from)
2143 {
2144 struct map_symbol ms;
2145 struct addr_location al;
2146 int nr_loop_iter = 0;
2147 u64 iter_cycles = 0;
2148 const char *srcline = NULL;
2149
2150 al.filtered = 0;
2151 al.sym = NULL;
2152 al.srcline = NULL;
2153 if (!cpumode) {
2154 thread__find_cpumode_addr_location(thread, ip, &al);
2155 } else {
2156 if (ip >= PERF_CONTEXT_MAX) {
2157 switch (ip) {
2158 case PERF_CONTEXT_HV:
2159 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2160 break;
2161 case PERF_CONTEXT_KERNEL:
2162 *cpumode = PERF_RECORD_MISC_KERNEL;
2163 break;
2164 case PERF_CONTEXT_USER:
2165 *cpumode = PERF_RECORD_MISC_USER;
2166 break;
2167 default:
2168 pr_debug("invalid callchain context: "
2169 "%"PRId64"\n", (s64) ip);
2170 /*
2171 * It seems the callchain is corrupted.
2172 * Discard all.
2173 */
2174 callchain_cursor_reset(cursor);
2175 return 1;
2176 }
2177 return 0;
2178 }
2179 thread__find_symbol(thread, *cpumode, ip, &al);
2180 }
2181
2182 if (al.sym != NULL) {
2183 if (perf_hpp_list.parent && !*parent &&
2184 symbol__match_regex(al.sym, &parent_regex))
2185 *parent = al.sym;
2186 else if (have_ignore_callees && root_al &&
2187 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2188 /* Treat this symbol as the root,
2189 forgetting its callees. */
2190 *root_al = al;
2191 callchain_cursor_reset(cursor);
2192 }
2193 }
2194
2195 if (symbol_conf.hide_unresolved && al.sym == NULL)
2196 return 0;
2197
2198 if (iter) {
2199 nr_loop_iter = iter->nr_loop_iter;
2200 iter_cycles = iter->cycles;
2201 }
2202
2203 ms.maps = al.maps;
2204 ms.map = al.map;
2205 ms.sym = al.sym;
2206 srcline = callchain_srcline(&ms, al.addr);
2207 return callchain_cursor_append(cursor, ip, &ms,
2208 branch, flags, nr_loop_iter,
2209 iter_cycles, branch_from, srcline);
2210 }
2211
sample__resolve_bstack(struct perf_sample * sample,struct addr_location * al)2212 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2213 struct addr_location *al)
2214 {
2215 unsigned int i;
2216 const struct branch_stack *bs = sample->branch_stack;
2217 struct branch_entry *entries = perf_sample__branch_entries(sample);
2218 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2219
2220 if (!bi)
2221 return NULL;
2222
2223 for (i = 0; i < bs->nr; i++) {
2224 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2225 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2226 bi[i].flags = entries[i].flags;
2227 }
2228 return bi;
2229 }
2230
save_iterations(struct iterations * iter,struct branch_entry * be,int nr)2231 static void save_iterations(struct iterations *iter,
2232 struct branch_entry *be, int nr)
2233 {
2234 int i;
2235
2236 iter->nr_loop_iter++;
2237 iter->cycles = 0;
2238
2239 for (i = 0; i < nr; i++)
2240 iter->cycles += be[i].flags.cycles;
2241 }
2242
2243 #define CHASHSZ 127
2244 #define CHASHBITS 7
2245 #define NO_ENTRY 0xff
2246
2247 #define PERF_MAX_BRANCH_DEPTH 127
2248
2249 /* Remove loops. */
remove_loops(struct branch_entry * l,int nr,struct iterations * iter)2250 static int remove_loops(struct branch_entry *l, int nr,
2251 struct iterations *iter)
2252 {
2253 int i, j, off;
2254 unsigned char chash[CHASHSZ];
2255
2256 memset(chash, NO_ENTRY, sizeof(chash));
2257
2258 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2259
2260 for (i = 0; i < nr; i++) {
2261 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2262
2263 /* no collision handling for now */
2264 if (chash[h] == NO_ENTRY) {
2265 chash[h] = i;
2266 } else if (l[chash[h]].from == l[i].from) {
2267 bool is_loop = true;
2268 /* check if it is a real loop */
2269 off = 0;
2270 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2271 if (l[j].from != l[i + off].from) {
2272 is_loop = false;
2273 break;
2274 }
2275 if (is_loop) {
2276 j = nr - (i + off);
2277 if (j > 0) {
2278 save_iterations(iter + i + off,
2279 l + i, off);
2280
2281 memmove(iter + i, iter + i + off,
2282 j * sizeof(*iter));
2283
2284 memmove(l + i, l + i + off,
2285 j * sizeof(*l));
2286 }
2287
2288 nr -= off;
2289 }
2290 }
2291 }
2292 return nr;
2293 }
2294
lbr_callchain_add_kernel_ip(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,u64 branch_from,bool callee,int end)2295 static int lbr_callchain_add_kernel_ip(struct thread *thread,
2296 struct callchain_cursor *cursor,
2297 struct perf_sample *sample,
2298 struct symbol **parent,
2299 struct addr_location *root_al,
2300 u64 branch_from,
2301 bool callee, int end)
2302 {
2303 struct ip_callchain *chain = sample->callchain;
2304 u8 cpumode = PERF_RECORD_MISC_USER;
2305 int err, i;
2306
2307 if (callee) {
2308 for (i = 0; i < end + 1; i++) {
2309 err = add_callchain_ip(thread, cursor, parent,
2310 root_al, &cpumode, chain->ips[i],
2311 false, NULL, NULL, branch_from);
2312 if (err)
2313 return err;
2314 }
2315 return 0;
2316 }
2317
2318 for (i = end; i >= 0; i--) {
2319 err = add_callchain_ip(thread, cursor, parent,
2320 root_al, &cpumode, chain->ips[i],
2321 false, NULL, NULL, branch_from);
2322 if (err)
2323 return err;
2324 }
2325
2326 return 0;
2327 }
2328
save_lbr_cursor_node(struct thread * thread,struct callchain_cursor * cursor,int idx)2329 static void save_lbr_cursor_node(struct thread *thread,
2330 struct callchain_cursor *cursor,
2331 int idx)
2332 {
2333 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2334
2335 if (!lbr_stitch)
2336 return;
2337
2338 if (cursor->pos == cursor->nr) {
2339 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2340 return;
2341 }
2342
2343 if (!cursor->curr)
2344 cursor->curr = cursor->first;
2345 else
2346 cursor->curr = cursor->curr->next;
2347 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2348 sizeof(struct callchain_cursor_node));
2349
2350 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2351 cursor->pos++;
2352 }
2353
lbr_callchain_add_lbr_ip(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,u64 * branch_from,bool callee)2354 static int lbr_callchain_add_lbr_ip(struct thread *thread,
2355 struct callchain_cursor *cursor,
2356 struct perf_sample *sample,
2357 struct symbol **parent,
2358 struct addr_location *root_al,
2359 u64 *branch_from,
2360 bool callee)
2361 {
2362 struct branch_stack *lbr_stack = sample->branch_stack;
2363 struct branch_entry *entries = perf_sample__branch_entries(sample);
2364 u8 cpumode = PERF_RECORD_MISC_USER;
2365 int lbr_nr = lbr_stack->nr;
2366 struct branch_flags *flags;
2367 int err, i;
2368 u64 ip;
2369
2370 /*
2371 * The curr and pos are not used in writing session. They are cleared
2372 * in callchain_cursor_commit() when the writing session is closed.
2373 * Using curr and pos to track the current cursor node.
2374 */
2375 if (thread->lbr_stitch) {
2376 cursor->curr = NULL;
2377 cursor->pos = cursor->nr;
2378 if (cursor->nr) {
2379 cursor->curr = cursor->first;
2380 for (i = 0; i < (int)(cursor->nr - 1); i++)
2381 cursor->curr = cursor->curr->next;
2382 }
2383 }
2384
2385 if (callee) {
2386 /* Add LBR ip from first entries.to */
2387 ip = entries[0].to;
2388 flags = &entries[0].flags;
2389 *branch_from = entries[0].from;
2390 err = add_callchain_ip(thread, cursor, parent,
2391 root_al, &cpumode, ip,
2392 true, flags, NULL,
2393 *branch_from);
2394 if (err)
2395 return err;
2396
2397 /*
2398 * The number of cursor node increases.
2399 * Move the current cursor node.
2400 * But does not need to save current cursor node for entry 0.
2401 * It's impossible to stitch the whole LBRs of previous sample.
2402 */
2403 if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
2404 if (!cursor->curr)
2405 cursor->curr = cursor->first;
2406 else
2407 cursor->curr = cursor->curr->next;
2408 cursor->pos++;
2409 }
2410
2411 /* Add LBR ip from entries.from one by one. */
2412 for (i = 0; i < lbr_nr; i++) {
2413 ip = entries[i].from;
2414 flags = &entries[i].flags;
2415 err = add_callchain_ip(thread, cursor, parent,
2416 root_al, &cpumode, ip,
2417 true, flags, NULL,
2418 *branch_from);
2419 if (err)
2420 return err;
2421 save_lbr_cursor_node(thread, cursor, i);
2422 }
2423 return 0;
2424 }
2425
2426 /* Add LBR ip from entries.from one by one. */
2427 for (i = lbr_nr - 1; i >= 0; i--) {
2428 ip = entries[i].from;
2429 flags = &entries[i].flags;
2430 err = add_callchain_ip(thread, cursor, parent,
2431 root_al, &cpumode, ip,
2432 true, flags, NULL,
2433 *branch_from);
2434 if (err)
2435 return err;
2436 save_lbr_cursor_node(thread, cursor, i);
2437 }
2438
2439 /* Add LBR ip from first entries.to */
2440 ip = entries[0].to;
2441 flags = &entries[0].flags;
2442 *branch_from = entries[0].from;
2443 err = add_callchain_ip(thread, cursor, parent,
2444 root_al, &cpumode, ip,
2445 true, flags, NULL,
2446 *branch_from);
2447 if (err)
2448 return err;
2449
2450 return 0;
2451 }
2452
lbr_callchain_add_stitched_lbr_ip(struct thread * thread,struct callchain_cursor * cursor)2453 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2454 struct callchain_cursor *cursor)
2455 {
2456 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2457 struct callchain_cursor_node *cnode;
2458 struct stitch_list *stitch_node;
2459 int err;
2460
2461 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2462 cnode = &stitch_node->cursor;
2463
2464 err = callchain_cursor_append(cursor, cnode->ip,
2465 &cnode->ms,
2466 cnode->branch,
2467 &cnode->branch_flags,
2468 cnode->nr_loop_iter,
2469 cnode->iter_cycles,
2470 cnode->branch_from,
2471 cnode->srcline);
2472 if (err)
2473 return err;
2474 }
2475 return 0;
2476 }
2477
get_stitch_node(struct thread * thread)2478 static struct stitch_list *get_stitch_node(struct thread *thread)
2479 {
2480 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2481 struct stitch_list *stitch_node;
2482
2483 if (!list_empty(&lbr_stitch->free_lists)) {
2484 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2485 struct stitch_list, node);
2486 list_del(&stitch_node->node);
2487
2488 return stitch_node;
2489 }
2490
2491 return malloc(sizeof(struct stitch_list));
2492 }
2493
has_stitched_lbr(struct thread * thread,struct perf_sample * cur,struct perf_sample * prev,unsigned int max_lbr,bool callee)2494 static bool has_stitched_lbr(struct thread *thread,
2495 struct perf_sample *cur,
2496 struct perf_sample *prev,
2497 unsigned int max_lbr,
2498 bool callee)
2499 {
2500 struct branch_stack *cur_stack = cur->branch_stack;
2501 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2502 struct branch_stack *prev_stack = prev->branch_stack;
2503 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2504 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2505 int i, j, nr_identical_branches = 0;
2506 struct stitch_list *stitch_node;
2507 u64 cur_base, distance;
2508
2509 if (!cur_stack || !prev_stack)
2510 return false;
2511
2512 /* Find the physical index of the base-of-stack for current sample. */
2513 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2514
2515 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2516 (max_lbr + prev_stack->hw_idx - cur_base);
2517 /* Previous sample has shorter stack. Nothing can be stitched. */
2518 if (distance + 1 > prev_stack->nr)
2519 return false;
2520
2521 /*
2522 * Check if there are identical LBRs between two samples.
2523 * Identical LBRs must have same from, to and flags values. Also,
2524 * they have to be saved in the same LBR registers (same physical
2525 * index).
2526 *
2527 * Starts from the base-of-stack of current sample.
2528 */
2529 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2530 if ((prev_entries[i].from != cur_entries[j].from) ||
2531 (prev_entries[i].to != cur_entries[j].to) ||
2532 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2533 break;
2534 nr_identical_branches++;
2535 }
2536
2537 if (!nr_identical_branches)
2538 return false;
2539
2540 /*
2541 * Save the LBRs between the base-of-stack of previous sample
2542 * and the base-of-stack of current sample into lbr_stitch->lists.
2543 * These LBRs will be stitched later.
2544 */
2545 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2546
2547 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2548 continue;
2549
2550 stitch_node = get_stitch_node(thread);
2551 if (!stitch_node)
2552 return false;
2553
2554 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2555 sizeof(struct callchain_cursor_node));
2556
2557 if (callee)
2558 list_add(&stitch_node->node, &lbr_stitch->lists);
2559 else
2560 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2561 }
2562
2563 return true;
2564 }
2565
alloc_lbr_stitch(struct thread * thread,unsigned int max_lbr)2566 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2567 {
2568 if (thread->lbr_stitch)
2569 return true;
2570
2571 thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
2572 if (!thread->lbr_stitch)
2573 goto err;
2574
2575 thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2576 if (!thread->lbr_stitch->prev_lbr_cursor)
2577 goto free_lbr_stitch;
2578
2579 INIT_LIST_HEAD(&thread->lbr_stitch->lists);
2580 INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
2581
2582 return true;
2583
2584 free_lbr_stitch:
2585 zfree(&thread->lbr_stitch);
2586 err:
2587 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2588 thread->lbr_stitch_enable = false;
2589 return false;
2590 }
2591
2592 /*
2593 * Resolve LBR callstack chain sample
2594 * Return:
2595 * 1 on success get LBR callchain information
2596 * 0 no available LBR callchain information, should try fp
2597 * negative error code on other errors.
2598 */
resolve_lbr_callchain_sample(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack,unsigned int max_lbr)2599 static int resolve_lbr_callchain_sample(struct thread *thread,
2600 struct callchain_cursor *cursor,
2601 struct perf_sample *sample,
2602 struct symbol **parent,
2603 struct addr_location *root_al,
2604 int max_stack,
2605 unsigned int max_lbr)
2606 {
2607 bool callee = (callchain_param.order == ORDER_CALLEE);
2608 struct ip_callchain *chain = sample->callchain;
2609 int chain_nr = min(max_stack, (int)chain->nr), i;
2610 struct lbr_stitch *lbr_stitch;
2611 bool stitched_lbr = false;
2612 u64 branch_from = 0;
2613 int err;
2614
2615 for (i = 0; i < chain_nr; i++) {
2616 if (chain->ips[i] == PERF_CONTEXT_USER)
2617 break;
2618 }
2619
2620 /* LBR only affects the user callchain */
2621 if (i == chain_nr)
2622 return 0;
2623
2624 if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
2625 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2626 lbr_stitch = thread->lbr_stitch;
2627
2628 stitched_lbr = has_stitched_lbr(thread, sample,
2629 &lbr_stitch->prev_sample,
2630 max_lbr, callee);
2631
2632 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2633 list_replace_init(&lbr_stitch->lists,
2634 &lbr_stitch->free_lists);
2635 }
2636 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2637 }
2638
2639 if (callee) {
2640 /* Add kernel ip */
2641 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2642 parent, root_al, branch_from,
2643 true, i);
2644 if (err)
2645 goto error;
2646
2647 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2648 root_al, &branch_from, true);
2649 if (err)
2650 goto error;
2651
2652 if (stitched_lbr) {
2653 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2654 if (err)
2655 goto error;
2656 }
2657
2658 } else {
2659 if (stitched_lbr) {
2660 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2661 if (err)
2662 goto error;
2663 }
2664 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2665 root_al, &branch_from, false);
2666 if (err)
2667 goto error;
2668
2669 /* Add kernel ip */
2670 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2671 parent, root_al, branch_from,
2672 false, i);
2673 if (err)
2674 goto error;
2675 }
2676 return 1;
2677
2678 error:
2679 return (err < 0) ? err : 0;
2680 }
2681
find_prev_cpumode(struct ip_callchain * chain,struct thread * thread,struct callchain_cursor * cursor,struct symbol ** parent,struct addr_location * root_al,u8 * cpumode,int ent)2682 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2683 struct callchain_cursor *cursor,
2684 struct symbol **parent,
2685 struct addr_location *root_al,
2686 u8 *cpumode, int ent)
2687 {
2688 int err = 0;
2689
2690 while (--ent >= 0) {
2691 u64 ip = chain->ips[ent];
2692
2693 if (ip >= PERF_CONTEXT_MAX) {
2694 err = add_callchain_ip(thread, cursor, parent,
2695 root_al, cpumode, ip,
2696 false, NULL, NULL, 0);
2697 break;
2698 }
2699 }
2700 return err;
2701 }
2702
thread__resolve_callchain_sample(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack)2703 static int thread__resolve_callchain_sample(struct thread *thread,
2704 struct callchain_cursor *cursor,
2705 struct evsel *evsel,
2706 struct perf_sample *sample,
2707 struct symbol **parent,
2708 struct addr_location *root_al,
2709 int max_stack)
2710 {
2711 struct branch_stack *branch = sample->branch_stack;
2712 struct branch_entry *entries = perf_sample__branch_entries(sample);
2713 struct ip_callchain *chain = sample->callchain;
2714 int chain_nr = 0;
2715 u8 cpumode = PERF_RECORD_MISC_USER;
2716 int i, j, err, nr_entries;
2717 int skip_idx = -1;
2718 int first_call = 0;
2719
2720 if (chain)
2721 chain_nr = chain->nr;
2722
2723 if (evsel__has_branch_callstack(evsel)) {
2724 struct perf_env *env = evsel__env(evsel);
2725
2726 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2727 root_al, max_stack,
2728 !env ? 0 : env->max_branches);
2729 if (err)
2730 return (err < 0) ? err : 0;
2731 }
2732
2733 /*
2734 * Based on DWARF debug information, some architectures skip
2735 * a callchain entry saved by the kernel.
2736 */
2737 skip_idx = arch_skip_callchain_idx(thread, chain);
2738
2739 /*
2740 * Add branches to call stack for easier browsing. This gives
2741 * more context for a sample than just the callers.
2742 *
2743 * This uses individual histograms of paths compared to the
2744 * aggregated histograms the normal LBR mode uses.
2745 *
2746 * Limitations for now:
2747 * - No extra filters
2748 * - No annotations (should annotate somehow)
2749 */
2750
2751 if (branch && callchain_param.branch_callstack) {
2752 int nr = min(max_stack, (int)branch->nr);
2753 struct branch_entry be[nr];
2754 struct iterations iter[nr];
2755
2756 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2757 pr_warning("corrupted branch chain. skipping...\n");
2758 goto check_calls;
2759 }
2760
2761 for (i = 0; i < nr; i++) {
2762 if (callchain_param.order == ORDER_CALLEE) {
2763 be[i] = entries[i];
2764
2765 if (chain == NULL)
2766 continue;
2767
2768 /*
2769 * Check for overlap into the callchain.
2770 * The return address is one off compared to
2771 * the branch entry. To adjust for this
2772 * assume the calling instruction is not longer
2773 * than 8 bytes.
2774 */
2775 if (i == skip_idx ||
2776 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2777 first_call++;
2778 else if (be[i].from < chain->ips[first_call] &&
2779 be[i].from >= chain->ips[first_call] - 8)
2780 first_call++;
2781 } else
2782 be[i] = entries[branch->nr - i - 1];
2783 }
2784
2785 memset(iter, 0, sizeof(struct iterations) * nr);
2786 nr = remove_loops(be, nr, iter);
2787
2788 for (i = 0; i < nr; i++) {
2789 err = add_callchain_ip(thread, cursor, parent,
2790 root_al,
2791 NULL, be[i].to,
2792 true, &be[i].flags,
2793 NULL, be[i].from);
2794
2795 if (!err)
2796 err = add_callchain_ip(thread, cursor, parent, root_al,
2797 NULL, be[i].from,
2798 true, &be[i].flags,
2799 &iter[i], 0);
2800 if (err == -EINVAL)
2801 break;
2802 if (err)
2803 return err;
2804 }
2805
2806 if (chain_nr == 0)
2807 return 0;
2808
2809 chain_nr -= nr;
2810 }
2811
2812 check_calls:
2813 if (chain && callchain_param.order != ORDER_CALLEE) {
2814 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2815 &cpumode, chain->nr - first_call);
2816 if (err)
2817 return (err < 0) ? err : 0;
2818 }
2819 for (i = first_call, nr_entries = 0;
2820 i < chain_nr && nr_entries < max_stack; i++) {
2821 u64 ip;
2822
2823 if (callchain_param.order == ORDER_CALLEE)
2824 j = i;
2825 else
2826 j = chain->nr - i - 1;
2827
2828 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2829 if (j == skip_idx)
2830 continue;
2831 #endif
2832 ip = chain->ips[j];
2833 if (ip < PERF_CONTEXT_MAX)
2834 ++nr_entries;
2835 else if (callchain_param.order != ORDER_CALLEE) {
2836 err = find_prev_cpumode(chain, thread, cursor, parent,
2837 root_al, &cpumode, j);
2838 if (err)
2839 return (err < 0) ? err : 0;
2840 continue;
2841 }
2842
2843 err = add_callchain_ip(thread, cursor, parent,
2844 root_al, &cpumode, ip,
2845 false, NULL, NULL, 0);
2846
2847 if (err)
2848 return (err < 0) ? err : 0;
2849 }
2850
2851 return 0;
2852 }
2853
append_inlines(struct callchain_cursor * cursor,struct map_symbol * ms,u64 ip)2854 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
2855 {
2856 struct symbol *sym = ms->sym;
2857 struct map *map = ms->map;
2858 struct inline_node *inline_node;
2859 struct inline_list *ilist;
2860 u64 addr;
2861 int ret = 1;
2862
2863 if (!symbol_conf.inline_name || !map || !sym)
2864 return ret;
2865
2866 addr = map__map_ip(map, ip);
2867 addr = map__rip_2objdump(map, addr);
2868
2869 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2870 if (!inline_node) {
2871 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2872 if (!inline_node)
2873 return ret;
2874 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2875 }
2876
2877 list_for_each_entry(ilist, &inline_node->val, list) {
2878 struct map_symbol ilist_ms = {
2879 .maps = ms->maps,
2880 .map = map,
2881 .sym = ilist->symbol,
2882 };
2883 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2884 NULL, 0, 0, 0, ilist->srcline);
2885
2886 if (ret != 0)
2887 return ret;
2888 }
2889
2890 return ret;
2891 }
2892
unwind_entry(struct unwind_entry * entry,void * arg)2893 static int unwind_entry(struct unwind_entry *entry, void *arg)
2894 {
2895 struct callchain_cursor *cursor = arg;
2896 const char *srcline = NULL;
2897 u64 addr = entry->ip;
2898
2899 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2900 return 0;
2901
2902 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2903 return 0;
2904
2905 /*
2906 * Convert entry->ip from a virtual address to an offset in
2907 * its corresponding binary.
2908 */
2909 if (entry->ms.map)
2910 addr = map__map_ip(entry->ms.map, entry->ip);
2911
2912 srcline = callchain_srcline(&entry->ms, addr);
2913 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
2914 false, NULL, 0, 0, 0, srcline);
2915 }
2916
thread__resolve_callchain_unwind(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,int max_stack)2917 static int thread__resolve_callchain_unwind(struct thread *thread,
2918 struct callchain_cursor *cursor,
2919 struct evsel *evsel,
2920 struct perf_sample *sample,
2921 int max_stack)
2922 {
2923 /* Can we do dwarf post unwind? */
2924 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2925 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2926 return 0;
2927
2928 /* Bail out if nothing was captured. */
2929 if ((!sample->user_regs.regs) ||
2930 (!sample->user_stack.size))
2931 return 0;
2932
2933 return unwind__get_entries(unwind_entry, cursor,
2934 thread, sample, max_stack);
2935 }
2936
thread__resolve_callchain(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack)2937 int thread__resolve_callchain(struct thread *thread,
2938 struct callchain_cursor *cursor,
2939 struct evsel *evsel,
2940 struct perf_sample *sample,
2941 struct symbol **parent,
2942 struct addr_location *root_al,
2943 int max_stack)
2944 {
2945 int ret = 0;
2946
2947 callchain_cursor_reset(cursor);
2948
2949 if (callchain_param.order == ORDER_CALLEE) {
2950 ret = thread__resolve_callchain_sample(thread, cursor,
2951 evsel, sample,
2952 parent, root_al,
2953 max_stack);
2954 if (ret)
2955 return ret;
2956 ret = thread__resolve_callchain_unwind(thread, cursor,
2957 evsel, sample,
2958 max_stack);
2959 } else {
2960 ret = thread__resolve_callchain_unwind(thread, cursor,
2961 evsel, sample,
2962 max_stack);
2963 if (ret)
2964 return ret;
2965 ret = thread__resolve_callchain_sample(thread, cursor,
2966 evsel, sample,
2967 parent, root_al,
2968 max_stack);
2969 }
2970
2971 return ret;
2972 }
2973
machine__for_each_thread(struct machine * machine,int (* fn)(struct thread * thread,void * p),void * priv)2974 int machine__for_each_thread(struct machine *machine,
2975 int (*fn)(struct thread *thread, void *p),
2976 void *priv)
2977 {
2978 struct threads *threads;
2979 struct rb_node *nd;
2980 struct thread *thread;
2981 int rc = 0;
2982 int i;
2983
2984 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2985 threads = &machine->threads[i];
2986 for (nd = rb_first_cached(&threads->entries); nd;
2987 nd = rb_next(nd)) {
2988 thread = rb_entry(nd, struct thread, rb_node);
2989 rc = fn(thread, priv);
2990 if (rc != 0)
2991 return rc;
2992 }
2993
2994 list_for_each_entry(thread, &threads->dead, node) {
2995 rc = fn(thread, priv);
2996 if (rc != 0)
2997 return rc;
2998 }
2999 }
3000 return rc;
3001 }
3002
machines__for_each_thread(struct machines * machines,int (* fn)(struct thread * thread,void * p),void * priv)3003 int machines__for_each_thread(struct machines *machines,
3004 int (*fn)(struct thread *thread, void *p),
3005 void *priv)
3006 {
3007 struct rb_node *nd;
3008 int rc = 0;
3009
3010 rc = machine__for_each_thread(&machines->host, fn, priv);
3011 if (rc != 0)
3012 return rc;
3013
3014 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3015 struct machine *machine = rb_entry(nd, struct machine, rb_node);
3016
3017 rc = machine__for_each_thread(machine, fn, priv);
3018 if (rc != 0)
3019 return rc;
3020 }
3021 return rc;
3022 }
3023
machine__get_current_tid(struct machine * machine,int cpu)3024 pid_t machine__get_current_tid(struct machine *machine, int cpu)
3025 {
3026 int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
3027
3028 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
3029 return -1;
3030
3031 return machine->current_tid[cpu];
3032 }
3033
machine__set_current_tid(struct machine * machine,int cpu,pid_t pid,pid_t tid)3034 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3035 pid_t tid)
3036 {
3037 struct thread *thread;
3038 int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
3039
3040 if (cpu < 0)
3041 return -EINVAL;
3042
3043 if (!machine->current_tid) {
3044 int i;
3045
3046 machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
3047 if (!machine->current_tid)
3048 return -ENOMEM;
3049 for (i = 0; i < nr_cpus; i++)
3050 machine->current_tid[i] = -1;
3051 }
3052
3053 if (cpu >= nr_cpus) {
3054 pr_err("Requested CPU %d too large. ", cpu);
3055 pr_err("Consider raising MAX_NR_CPUS\n");
3056 return -EINVAL;
3057 }
3058
3059 machine->current_tid[cpu] = tid;
3060
3061 thread = machine__findnew_thread(machine, pid, tid);
3062 if (!thread)
3063 return -ENOMEM;
3064
3065 thread->cpu = cpu;
3066 thread__put(thread);
3067
3068 return 0;
3069 }
3070
3071 /*
3072 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
3073 * normalized arch is needed.
3074 */
machine__is(struct machine * machine,const char * arch)3075 bool machine__is(struct machine *machine, const char *arch)
3076 {
3077 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3078 }
3079
machine__nr_cpus_avail(struct machine * machine)3080 int machine__nr_cpus_avail(struct machine *machine)
3081 {
3082 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3083 }
3084
machine__get_kernel_start(struct machine * machine)3085 int machine__get_kernel_start(struct machine *machine)
3086 {
3087 struct map *map = machine__kernel_map(machine);
3088 int err = 0;
3089
3090 /*
3091 * The only addresses above 2^63 are kernel addresses of a 64-bit
3092 * kernel. Note that addresses are unsigned so that on a 32-bit system
3093 * all addresses including kernel addresses are less than 2^32. In
3094 * that case (32-bit system), if the kernel mapping is unknown, all
3095 * addresses will be assumed to be in user space - see
3096 * machine__kernel_ip().
3097 */
3098 machine->kernel_start = 1ULL << 63;
3099 if (map) {
3100 err = map__load(map);
3101 /*
3102 * On x86_64, PTI entry trampolines are less than the
3103 * start of kernel text, but still above 2^63. So leave
3104 * kernel_start = 1ULL << 63 for x86_64.
3105 */
3106 if (!err && !machine__is(machine, "x86_64"))
3107 machine->kernel_start = map->start;
3108 }
3109 return err;
3110 }
3111
machine__addr_cpumode(struct machine * machine,u8 cpumode,u64 addr)3112 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3113 {
3114 u8 addr_cpumode = cpumode;
3115 bool kernel_ip;
3116
3117 if (!machine->single_address_space)
3118 goto out;
3119
3120 kernel_ip = machine__kernel_ip(machine, addr);
3121 switch (cpumode) {
3122 case PERF_RECORD_MISC_KERNEL:
3123 case PERF_RECORD_MISC_USER:
3124 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3125 PERF_RECORD_MISC_USER;
3126 break;
3127 case PERF_RECORD_MISC_GUEST_KERNEL:
3128 case PERF_RECORD_MISC_GUEST_USER:
3129 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3130 PERF_RECORD_MISC_GUEST_USER;
3131 break;
3132 default:
3133 break;
3134 }
3135 out:
3136 return addr_cpumode;
3137 }
3138
machine__findnew_dso_id(struct machine * machine,const char * filename,struct dso_id * id)3139 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3140 {
3141 return dsos__findnew_id(&machine->dsos, filename, id);
3142 }
3143
machine__findnew_dso(struct machine * machine,const char * filename)3144 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3145 {
3146 return machine__findnew_dso_id(machine, filename, NULL);
3147 }
3148
machine__resolve_kernel_addr(void * vmachine,unsigned long long * addrp,char ** modp)3149 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3150 {
3151 struct machine *machine = vmachine;
3152 struct map *map;
3153 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3154
3155 if (sym == NULL)
3156 return NULL;
3157
3158 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
3159 *addrp = map->unmap_ip(map, sym->start);
3160 return sym->name;
3161 }
3162
machine__for_each_dso(struct machine * machine,machine__dso_t fn,void * priv)3163 int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3164 {
3165 struct dso *pos;
3166 int err = 0;
3167
3168 list_for_each_entry(pos, &machine->dsos.head, node) {
3169 if (fn(pos, machine, priv))
3170 err = -1;
3171 }
3172 return err;
3173 }
3174