1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * auxtrace.c: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7 #include <inttypes.h>
8 #include <sys/types.h>
9 #include <sys/mman.h>
10 #include <stdbool.h>
11 #include <string.h>
12 #include <limits.h>
13 #include <errno.h>
14
15 #include <linux/kernel.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <linux/bitops.h>
19 #include <linux/log2.h>
20 #include <linux/string.h>
21 #include <linux/time64.h>
22
23 #include <sys/param.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <linux/list.h>
27 #include <linux/zalloc.h>
28
29 #include "evlist.h"
30 #include "dso.h"
31 #include "map.h"
32 #include "pmu.h"
33 #include "evsel.h"
34 #include "evsel_config.h"
35 #include "symbol.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "thread_map.h"
39 #include "asm/bug.h"
40 #include "auxtrace.h"
41
42 #include <linux/hash.h>
43
44 #include "event.h"
45 #include "record.h"
46 #include "session.h"
47 #include "debug.h"
48 #include <subcmd/parse-options.h>
49
50 #include "cs-etm.h"
51 #include "intel-pt.h"
52 #include "intel-bts.h"
53 #include "arm-spe.h"
54 #include "s390-cpumsf.h"
55 #include "util/mmap.h"
56
57 #include <linux/ctype.h>
58 #include "symbol/kallsyms.h"
59 #include <internal/lib.h>
60
61 /*
62 * Make a group from 'leader' to 'last', requiring that the events were not
63 * already grouped to a different leader.
64 */
evlist__regroup(struct evlist * evlist,struct evsel * leader,struct evsel * last)65 static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
66 {
67 struct evsel *evsel;
68 bool grp;
69
70 if (!evsel__is_group_leader(leader))
71 return -EINVAL;
72
73 grp = false;
74 evlist__for_each_entry(evlist, evsel) {
75 if (grp) {
76 if (!(evsel__leader(evsel) == leader ||
77 (evsel__leader(evsel) == evsel &&
78 evsel->core.nr_members <= 1)))
79 return -EINVAL;
80 } else if (evsel == leader) {
81 grp = true;
82 }
83 if (evsel == last)
84 break;
85 }
86
87 grp = false;
88 evlist__for_each_entry(evlist, evsel) {
89 if (grp) {
90 if (!evsel__has_leader(evsel, leader)) {
91 evsel__set_leader(evsel, leader);
92 if (leader->core.nr_members < 1)
93 leader->core.nr_members = 1;
94 leader->core.nr_members += 1;
95 }
96 } else if (evsel == leader) {
97 grp = true;
98 }
99 if (evsel == last)
100 break;
101 }
102
103 return 0;
104 }
105
auxtrace__dont_decode(struct perf_session * session)106 static bool auxtrace__dont_decode(struct perf_session *session)
107 {
108 return !session->itrace_synth_opts ||
109 session->itrace_synth_opts->dont_decode;
110 }
111
auxtrace_mmap__mmap(struct auxtrace_mmap * mm,struct auxtrace_mmap_params * mp,void * userpg,int fd)112 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
113 struct auxtrace_mmap_params *mp,
114 void *userpg, int fd)
115 {
116 struct perf_event_mmap_page *pc = userpg;
117
118 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
119
120 mm->userpg = userpg;
121 mm->mask = mp->mask;
122 mm->len = mp->len;
123 mm->prev = 0;
124 mm->idx = mp->idx;
125 mm->tid = mp->tid;
126 mm->cpu = mp->cpu;
127
128 if (!mp->len) {
129 mm->base = NULL;
130 return 0;
131 }
132
133 pc->aux_offset = mp->offset;
134 pc->aux_size = mp->len;
135
136 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
137 if (mm->base == MAP_FAILED) {
138 pr_debug2("failed to mmap AUX area\n");
139 mm->base = NULL;
140 return -1;
141 }
142
143 return 0;
144 }
145
auxtrace_mmap__munmap(struct auxtrace_mmap * mm)146 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
147 {
148 if (mm->base) {
149 munmap(mm->base, mm->len);
150 mm->base = NULL;
151 }
152 }
153
auxtrace_mmap_params__init(struct auxtrace_mmap_params * mp,off_t auxtrace_offset,unsigned int auxtrace_pages,bool auxtrace_overwrite)154 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
155 off_t auxtrace_offset,
156 unsigned int auxtrace_pages,
157 bool auxtrace_overwrite)
158 {
159 if (auxtrace_pages) {
160 mp->offset = auxtrace_offset;
161 mp->len = auxtrace_pages * (size_t)page_size;
162 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
163 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
164 pr_debug2("AUX area mmap length %zu\n", mp->len);
165 } else {
166 mp->len = 0;
167 }
168 }
169
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params * mp,struct evlist * evlist,int idx,bool per_cpu)170 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
171 struct evlist *evlist, int idx,
172 bool per_cpu)
173 {
174 mp->idx = idx;
175
176 if (per_cpu) {
177 mp->cpu = evlist->core.cpus->map[idx];
178 if (evlist->core.threads)
179 mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
180 else
181 mp->tid = -1;
182 } else {
183 mp->cpu = -1;
184 mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
185 }
186 }
187
188 #define AUXTRACE_INIT_NR_QUEUES 32
189
auxtrace_alloc_queue_array(unsigned int nr_queues)190 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
191 {
192 struct auxtrace_queue *queue_array;
193 unsigned int max_nr_queues, i;
194
195 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
196 if (nr_queues > max_nr_queues)
197 return NULL;
198
199 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
200 if (!queue_array)
201 return NULL;
202
203 for (i = 0; i < nr_queues; i++) {
204 INIT_LIST_HEAD(&queue_array[i].head);
205 queue_array[i].priv = NULL;
206 }
207
208 return queue_array;
209 }
210
auxtrace_queues__init(struct auxtrace_queues * queues)211 int auxtrace_queues__init(struct auxtrace_queues *queues)
212 {
213 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
214 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
215 if (!queues->queue_array)
216 return -ENOMEM;
217 return 0;
218 }
219
auxtrace_queues__grow(struct auxtrace_queues * queues,unsigned int new_nr_queues)220 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
221 unsigned int new_nr_queues)
222 {
223 unsigned int nr_queues = queues->nr_queues;
224 struct auxtrace_queue *queue_array;
225 unsigned int i;
226
227 if (!nr_queues)
228 nr_queues = AUXTRACE_INIT_NR_QUEUES;
229
230 while (nr_queues && nr_queues < new_nr_queues)
231 nr_queues <<= 1;
232
233 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
234 return -EINVAL;
235
236 queue_array = auxtrace_alloc_queue_array(nr_queues);
237 if (!queue_array)
238 return -ENOMEM;
239
240 for (i = 0; i < queues->nr_queues; i++) {
241 list_splice_tail(&queues->queue_array[i].head,
242 &queue_array[i].head);
243 queue_array[i].tid = queues->queue_array[i].tid;
244 queue_array[i].cpu = queues->queue_array[i].cpu;
245 queue_array[i].set = queues->queue_array[i].set;
246 queue_array[i].priv = queues->queue_array[i].priv;
247 }
248
249 queues->nr_queues = nr_queues;
250 queues->queue_array = queue_array;
251
252 return 0;
253 }
254
auxtrace_copy_data(u64 size,struct perf_session * session)255 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
256 {
257 int fd = perf_data__fd(session->data);
258 void *p;
259 ssize_t ret;
260
261 if (size > SSIZE_MAX)
262 return NULL;
263
264 p = malloc(size);
265 if (!p)
266 return NULL;
267
268 ret = readn(fd, p, size);
269 if (ret != (ssize_t)size) {
270 free(p);
271 return NULL;
272 }
273
274 return p;
275 }
276
auxtrace_queues__queue_buffer(struct auxtrace_queues * queues,unsigned int idx,struct auxtrace_buffer * buffer)277 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
278 unsigned int idx,
279 struct auxtrace_buffer *buffer)
280 {
281 struct auxtrace_queue *queue;
282 int err;
283
284 if (idx >= queues->nr_queues) {
285 err = auxtrace_queues__grow(queues, idx + 1);
286 if (err)
287 return err;
288 }
289
290 queue = &queues->queue_array[idx];
291
292 if (!queue->set) {
293 queue->set = true;
294 queue->tid = buffer->tid;
295 queue->cpu = buffer->cpu;
296 }
297
298 buffer->buffer_nr = queues->next_buffer_nr++;
299
300 list_add_tail(&buffer->list, &queue->head);
301
302 queues->new_data = true;
303 queues->populated = true;
304
305 return 0;
306 }
307
308 /* Limit buffers to 32MiB on 32-bit */
309 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
310
auxtrace_queues__split_buffer(struct auxtrace_queues * queues,unsigned int idx,struct auxtrace_buffer * buffer)311 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
312 unsigned int idx,
313 struct auxtrace_buffer *buffer)
314 {
315 u64 sz = buffer->size;
316 bool consecutive = false;
317 struct auxtrace_buffer *b;
318 int err;
319
320 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
321 b = memdup(buffer, sizeof(struct auxtrace_buffer));
322 if (!b)
323 return -ENOMEM;
324 b->size = BUFFER_LIMIT_FOR_32_BIT;
325 b->consecutive = consecutive;
326 err = auxtrace_queues__queue_buffer(queues, idx, b);
327 if (err) {
328 auxtrace_buffer__free(b);
329 return err;
330 }
331 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
332 sz -= BUFFER_LIMIT_FOR_32_BIT;
333 consecutive = true;
334 }
335
336 buffer->size = sz;
337 buffer->consecutive = consecutive;
338
339 return 0;
340 }
341
filter_cpu(struct perf_session * session,int cpu)342 static bool filter_cpu(struct perf_session *session, int cpu)
343 {
344 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
345
346 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
347 }
348
auxtrace_queues__add_buffer(struct auxtrace_queues * queues,struct perf_session * session,unsigned int idx,struct auxtrace_buffer * buffer,struct auxtrace_buffer ** buffer_ptr)349 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
350 struct perf_session *session,
351 unsigned int idx,
352 struct auxtrace_buffer *buffer,
353 struct auxtrace_buffer **buffer_ptr)
354 {
355 int err = -ENOMEM;
356
357 if (filter_cpu(session, buffer->cpu))
358 return 0;
359
360 buffer = memdup(buffer, sizeof(*buffer));
361 if (!buffer)
362 return -ENOMEM;
363
364 if (session->one_mmap) {
365 buffer->data = buffer->data_offset - session->one_mmap_offset +
366 session->one_mmap_addr;
367 } else if (perf_data__is_pipe(session->data)) {
368 buffer->data = auxtrace_copy_data(buffer->size, session);
369 if (!buffer->data)
370 goto out_free;
371 buffer->data_needs_freeing = true;
372 } else if (BITS_PER_LONG == 32 &&
373 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
374 err = auxtrace_queues__split_buffer(queues, idx, buffer);
375 if (err)
376 goto out_free;
377 }
378
379 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
380 if (err)
381 goto out_free;
382
383 /* FIXME: Doesn't work for split buffer */
384 if (buffer_ptr)
385 *buffer_ptr = buffer;
386
387 return 0;
388
389 out_free:
390 auxtrace_buffer__free(buffer);
391 return err;
392 }
393
auxtrace_queues__add_event(struct auxtrace_queues * queues,struct perf_session * session,union perf_event * event,off_t data_offset,struct auxtrace_buffer ** buffer_ptr)394 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
395 struct perf_session *session,
396 union perf_event *event, off_t data_offset,
397 struct auxtrace_buffer **buffer_ptr)
398 {
399 struct auxtrace_buffer buffer = {
400 .pid = -1,
401 .tid = event->auxtrace.tid,
402 .cpu = event->auxtrace.cpu,
403 .data_offset = data_offset,
404 .offset = event->auxtrace.offset,
405 .reference = event->auxtrace.reference,
406 .size = event->auxtrace.size,
407 };
408 unsigned int idx = event->auxtrace.idx;
409
410 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
411 buffer_ptr);
412 }
413
auxtrace_queues__add_indexed_event(struct auxtrace_queues * queues,struct perf_session * session,off_t file_offset,size_t sz)414 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
415 struct perf_session *session,
416 off_t file_offset, size_t sz)
417 {
418 union perf_event *event;
419 int err;
420 char buf[PERF_SAMPLE_MAX_SIZE];
421
422 err = perf_session__peek_event(session, file_offset, buf,
423 PERF_SAMPLE_MAX_SIZE, &event, NULL);
424 if (err)
425 return err;
426
427 if (event->header.type == PERF_RECORD_AUXTRACE) {
428 if (event->header.size < sizeof(struct perf_record_auxtrace) ||
429 event->header.size != sz) {
430 err = -EINVAL;
431 goto out;
432 }
433 file_offset += event->header.size;
434 err = auxtrace_queues__add_event(queues, session, event,
435 file_offset, NULL);
436 }
437 out:
438 return err;
439 }
440
auxtrace_queues__free(struct auxtrace_queues * queues)441 void auxtrace_queues__free(struct auxtrace_queues *queues)
442 {
443 unsigned int i;
444
445 for (i = 0; i < queues->nr_queues; i++) {
446 while (!list_empty(&queues->queue_array[i].head)) {
447 struct auxtrace_buffer *buffer;
448
449 buffer = list_entry(queues->queue_array[i].head.next,
450 struct auxtrace_buffer, list);
451 list_del_init(&buffer->list);
452 auxtrace_buffer__free(buffer);
453 }
454 }
455
456 zfree(&queues->queue_array);
457 queues->nr_queues = 0;
458 }
459
auxtrace_heapify(struct auxtrace_heap_item * heap_array,unsigned int pos,unsigned int queue_nr,u64 ordinal)460 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
461 unsigned int pos, unsigned int queue_nr,
462 u64 ordinal)
463 {
464 unsigned int parent;
465
466 while (pos) {
467 parent = (pos - 1) >> 1;
468 if (heap_array[parent].ordinal <= ordinal)
469 break;
470 heap_array[pos] = heap_array[parent];
471 pos = parent;
472 }
473 heap_array[pos].queue_nr = queue_nr;
474 heap_array[pos].ordinal = ordinal;
475 }
476
auxtrace_heap__add(struct auxtrace_heap * heap,unsigned int queue_nr,u64 ordinal)477 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
478 u64 ordinal)
479 {
480 struct auxtrace_heap_item *heap_array;
481
482 if (queue_nr >= heap->heap_sz) {
483 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
484
485 while (heap_sz <= queue_nr)
486 heap_sz <<= 1;
487 heap_array = realloc(heap->heap_array,
488 heap_sz * sizeof(struct auxtrace_heap_item));
489 if (!heap_array)
490 return -ENOMEM;
491 heap->heap_array = heap_array;
492 heap->heap_sz = heap_sz;
493 }
494
495 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
496
497 return 0;
498 }
499
auxtrace_heap__free(struct auxtrace_heap * heap)500 void auxtrace_heap__free(struct auxtrace_heap *heap)
501 {
502 zfree(&heap->heap_array);
503 heap->heap_cnt = 0;
504 heap->heap_sz = 0;
505 }
506
auxtrace_heap__pop(struct auxtrace_heap * heap)507 void auxtrace_heap__pop(struct auxtrace_heap *heap)
508 {
509 unsigned int pos, last, heap_cnt = heap->heap_cnt;
510 struct auxtrace_heap_item *heap_array;
511
512 if (!heap_cnt)
513 return;
514
515 heap->heap_cnt -= 1;
516
517 heap_array = heap->heap_array;
518
519 pos = 0;
520 while (1) {
521 unsigned int left, right;
522
523 left = (pos << 1) + 1;
524 if (left >= heap_cnt)
525 break;
526 right = left + 1;
527 if (right >= heap_cnt) {
528 heap_array[pos] = heap_array[left];
529 return;
530 }
531 if (heap_array[left].ordinal < heap_array[right].ordinal) {
532 heap_array[pos] = heap_array[left];
533 pos = left;
534 } else {
535 heap_array[pos] = heap_array[right];
536 pos = right;
537 }
538 }
539
540 last = heap_cnt - 1;
541 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
542 heap_array[last].ordinal);
543 }
544
auxtrace_record__info_priv_size(struct auxtrace_record * itr,struct evlist * evlist)545 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
546 struct evlist *evlist)
547 {
548 if (itr)
549 return itr->info_priv_size(itr, evlist);
550 return 0;
551 }
552
auxtrace_not_supported(void)553 static int auxtrace_not_supported(void)
554 {
555 pr_err("AUX area tracing is not supported on this architecture\n");
556 return -EINVAL;
557 }
558
auxtrace_record__info_fill(struct auxtrace_record * itr,struct perf_session * session,struct perf_record_auxtrace_info * auxtrace_info,size_t priv_size)559 int auxtrace_record__info_fill(struct auxtrace_record *itr,
560 struct perf_session *session,
561 struct perf_record_auxtrace_info *auxtrace_info,
562 size_t priv_size)
563 {
564 if (itr)
565 return itr->info_fill(itr, session, auxtrace_info, priv_size);
566 return auxtrace_not_supported();
567 }
568
auxtrace_record__free(struct auxtrace_record * itr)569 void auxtrace_record__free(struct auxtrace_record *itr)
570 {
571 if (itr)
572 itr->free(itr);
573 }
574
auxtrace_record__snapshot_start(struct auxtrace_record * itr)575 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
576 {
577 if (itr && itr->snapshot_start)
578 return itr->snapshot_start(itr);
579 return 0;
580 }
581
auxtrace_record__snapshot_finish(struct auxtrace_record * itr,bool on_exit)582 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
583 {
584 if (!on_exit && itr && itr->snapshot_finish)
585 return itr->snapshot_finish(itr);
586 return 0;
587 }
588
auxtrace_record__find_snapshot(struct auxtrace_record * itr,int idx,struct auxtrace_mmap * mm,unsigned char * data,u64 * head,u64 * old)589 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
590 struct auxtrace_mmap *mm,
591 unsigned char *data, u64 *head, u64 *old)
592 {
593 if (itr && itr->find_snapshot)
594 return itr->find_snapshot(itr, idx, mm, data, head, old);
595 return 0;
596 }
597
auxtrace_record__options(struct auxtrace_record * itr,struct evlist * evlist,struct record_opts * opts)598 int auxtrace_record__options(struct auxtrace_record *itr,
599 struct evlist *evlist,
600 struct record_opts *opts)
601 {
602 if (itr) {
603 itr->evlist = evlist;
604 return itr->recording_options(itr, evlist, opts);
605 }
606 return 0;
607 }
608
auxtrace_record__reference(struct auxtrace_record * itr)609 u64 auxtrace_record__reference(struct auxtrace_record *itr)
610 {
611 if (itr)
612 return itr->reference(itr);
613 return 0;
614 }
615
auxtrace_parse_snapshot_options(struct auxtrace_record * itr,struct record_opts * opts,const char * str)616 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
617 struct record_opts *opts, const char *str)
618 {
619 if (!str)
620 return 0;
621
622 /* PMU-agnostic options */
623 switch (*str) {
624 case 'e':
625 opts->auxtrace_snapshot_on_exit = true;
626 str++;
627 break;
628 default:
629 break;
630 }
631
632 if (itr && itr->parse_snapshot_options)
633 return itr->parse_snapshot_options(itr, opts, str);
634
635 pr_err("No AUX area tracing to snapshot\n");
636 return -EINVAL;
637 }
638
auxtrace_record__read_finish(struct auxtrace_record * itr,int idx)639 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
640 {
641 struct evsel *evsel;
642
643 if (!itr->evlist || !itr->pmu)
644 return -EINVAL;
645
646 evlist__for_each_entry(itr->evlist, evsel) {
647 if (evsel->core.attr.type == itr->pmu->type) {
648 if (evsel->disabled)
649 return 0;
650 return evlist__enable_event_idx(itr->evlist, evsel, idx);
651 }
652 }
653 return -EINVAL;
654 }
655
656 /*
657 * Event record size is 16-bit which results in a maximum size of about 64KiB.
658 * Allow about 4KiB for the rest of the sample record, to give a maximum
659 * AUX area sample size of 60KiB.
660 */
661 #define MAX_AUX_SAMPLE_SIZE (60 * 1024)
662
663 /* Arbitrary default size if no other default provided */
664 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
665
auxtrace_validate_aux_sample_size(struct evlist * evlist,struct record_opts * opts)666 static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
667 struct record_opts *opts)
668 {
669 struct evsel *evsel;
670 bool has_aux_leader = false;
671 u32 sz;
672
673 evlist__for_each_entry(evlist, evsel) {
674 sz = evsel->core.attr.aux_sample_size;
675 if (evsel__is_group_leader(evsel)) {
676 has_aux_leader = evsel__is_aux_event(evsel);
677 if (sz) {
678 if (has_aux_leader)
679 pr_err("Cannot add AUX area sampling to an AUX area event\n");
680 else
681 pr_err("Cannot add AUX area sampling to a group leader\n");
682 return -EINVAL;
683 }
684 }
685 if (sz > MAX_AUX_SAMPLE_SIZE) {
686 pr_err("AUX area sample size %u too big, max. %d\n",
687 sz, MAX_AUX_SAMPLE_SIZE);
688 return -EINVAL;
689 }
690 if (sz) {
691 if (!has_aux_leader) {
692 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
693 return -EINVAL;
694 }
695 evsel__set_sample_bit(evsel, AUX);
696 opts->auxtrace_sample_mode = true;
697 } else {
698 evsel__reset_sample_bit(evsel, AUX);
699 }
700 }
701
702 if (!opts->auxtrace_sample_mode) {
703 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
704 return -EINVAL;
705 }
706
707 if (!perf_can_aux_sample()) {
708 pr_err("AUX area sampling is not supported by kernel\n");
709 return -EINVAL;
710 }
711
712 return 0;
713 }
714
auxtrace_parse_sample_options(struct auxtrace_record * itr,struct evlist * evlist,struct record_opts * opts,const char * str)715 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
716 struct evlist *evlist,
717 struct record_opts *opts, const char *str)
718 {
719 struct evsel_config_term *term;
720 struct evsel *aux_evsel;
721 bool has_aux_sample_size = false;
722 bool has_aux_leader = false;
723 struct evsel *evsel;
724 char *endptr;
725 unsigned long sz;
726
727 if (!str)
728 goto no_opt;
729
730 if (!itr) {
731 pr_err("No AUX area event to sample\n");
732 return -EINVAL;
733 }
734
735 sz = strtoul(str, &endptr, 0);
736 if (*endptr || sz > UINT_MAX) {
737 pr_err("Bad AUX area sampling option: '%s'\n", str);
738 return -EINVAL;
739 }
740
741 if (!sz)
742 sz = itr->default_aux_sample_size;
743
744 if (!sz)
745 sz = DEFAULT_AUX_SAMPLE_SIZE;
746
747 /* Set aux_sample_size based on --aux-sample option */
748 evlist__for_each_entry(evlist, evsel) {
749 if (evsel__is_group_leader(evsel)) {
750 has_aux_leader = evsel__is_aux_event(evsel);
751 } else if (has_aux_leader) {
752 evsel->core.attr.aux_sample_size = sz;
753 }
754 }
755 no_opt:
756 aux_evsel = NULL;
757 /* Override with aux_sample_size from config term */
758 evlist__for_each_entry(evlist, evsel) {
759 if (evsel__is_aux_event(evsel))
760 aux_evsel = evsel;
761 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
762 if (term) {
763 has_aux_sample_size = true;
764 evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
765 /* If possible, group with the AUX event */
766 if (aux_evsel && evsel->core.attr.aux_sample_size)
767 evlist__regroup(evlist, aux_evsel, evsel);
768 }
769 }
770
771 if (!str && !has_aux_sample_size)
772 return 0;
773
774 if (!itr) {
775 pr_err("No AUX area event to sample\n");
776 return -EINVAL;
777 }
778
779 return auxtrace_validate_aux_sample_size(evlist, opts);
780 }
781
auxtrace_regroup_aux_output(struct evlist * evlist)782 void auxtrace_regroup_aux_output(struct evlist *evlist)
783 {
784 struct evsel *evsel, *aux_evsel = NULL;
785 struct evsel_config_term *term;
786
787 evlist__for_each_entry(evlist, evsel) {
788 if (evsel__is_aux_event(evsel))
789 aux_evsel = evsel;
790 term = evsel__get_config_term(evsel, AUX_OUTPUT);
791 /* If possible, group with the AUX event */
792 if (term && aux_evsel)
793 evlist__regroup(evlist, aux_evsel, evsel);
794 }
795 }
796
797 struct auxtrace_record *__weak
auxtrace_record__init(struct evlist * evlist __maybe_unused,int * err)798 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
799 {
800 *err = 0;
801 return NULL;
802 }
803
auxtrace_index__alloc(struct list_head * head)804 static int auxtrace_index__alloc(struct list_head *head)
805 {
806 struct auxtrace_index *auxtrace_index;
807
808 auxtrace_index = malloc(sizeof(struct auxtrace_index));
809 if (!auxtrace_index)
810 return -ENOMEM;
811
812 auxtrace_index->nr = 0;
813 INIT_LIST_HEAD(&auxtrace_index->list);
814
815 list_add_tail(&auxtrace_index->list, head);
816
817 return 0;
818 }
819
auxtrace_index__free(struct list_head * head)820 void auxtrace_index__free(struct list_head *head)
821 {
822 struct auxtrace_index *auxtrace_index, *n;
823
824 list_for_each_entry_safe(auxtrace_index, n, head, list) {
825 list_del_init(&auxtrace_index->list);
826 free(auxtrace_index);
827 }
828 }
829
auxtrace_index__last(struct list_head * head)830 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
831 {
832 struct auxtrace_index *auxtrace_index;
833 int err;
834
835 if (list_empty(head)) {
836 err = auxtrace_index__alloc(head);
837 if (err)
838 return NULL;
839 }
840
841 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
842
843 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
844 err = auxtrace_index__alloc(head);
845 if (err)
846 return NULL;
847 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
848 list);
849 }
850
851 return auxtrace_index;
852 }
853
auxtrace_index__auxtrace_event(struct list_head * head,union perf_event * event,off_t file_offset)854 int auxtrace_index__auxtrace_event(struct list_head *head,
855 union perf_event *event, off_t file_offset)
856 {
857 struct auxtrace_index *auxtrace_index;
858 size_t nr;
859
860 auxtrace_index = auxtrace_index__last(head);
861 if (!auxtrace_index)
862 return -ENOMEM;
863
864 nr = auxtrace_index->nr;
865 auxtrace_index->entries[nr].file_offset = file_offset;
866 auxtrace_index->entries[nr].sz = event->header.size;
867 auxtrace_index->nr += 1;
868
869 return 0;
870 }
871
auxtrace_index__do_write(int fd,struct auxtrace_index * auxtrace_index)872 static int auxtrace_index__do_write(int fd,
873 struct auxtrace_index *auxtrace_index)
874 {
875 struct auxtrace_index_entry ent;
876 size_t i;
877
878 for (i = 0; i < auxtrace_index->nr; i++) {
879 ent.file_offset = auxtrace_index->entries[i].file_offset;
880 ent.sz = auxtrace_index->entries[i].sz;
881 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
882 return -errno;
883 }
884 return 0;
885 }
886
auxtrace_index__write(int fd,struct list_head * head)887 int auxtrace_index__write(int fd, struct list_head *head)
888 {
889 struct auxtrace_index *auxtrace_index;
890 u64 total = 0;
891 int err;
892
893 list_for_each_entry(auxtrace_index, head, list)
894 total += auxtrace_index->nr;
895
896 if (writen(fd, &total, sizeof(total)) != sizeof(total))
897 return -errno;
898
899 list_for_each_entry(auxtrace_index, head, list) {
900 err = auxtrace_index__do_write(fd, auxtrace_index);
901 if (err)
902 return err;
903 }
904
905 return 0;
906 }
907
auxtrace_index__process_entry(int fd,struct list_head * head,bool needs_swap)908 static int auxtrace_index__process_entry(int fd, struct list_head *head,
909 bool needs_swap)
910 {
911 struct auxtrace_index *auxtrace_index;
912 struct auxtrace_index_entry ent;
913 size_t nr;
914
915 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
916 return -1;
917
918 auxtrace_index = auxtrace_index__last(head);
919 if (!auxtrace_index)
920 return -1;
921
922 nr = auxtrace_index->nr;
923 if (needs_swap) {
924 auxtrace_index->entries[nr].file_offset =
925 bswap_64(ent.file_offset);
926 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
927 } else {
928 auxtrace_index->entries[nr].file_offset = ent.file_offset;
929 auxtrace_index->entries[nr].sz = ent.sz;
930 }
931
932 auxtrace_index->nr = nr + 1;
933
934 return 0;
935 }
936
auxtrace_index__process(int fd,u64 size,struct perf_session * session,bool needs_swap)937 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
938 bool needs_swap)
939 {
940 struct list_head *head = &session->auxtrace_index;
941 u64 nr;
942
943 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
944 return -1;
945
946 if (needs_swap)
947 nr = bswap_64(nr);
948
949 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
950 return -1;
951
952 while (nr--) {
953 int err;
954
955 err = auxtrace_index__process_entry(fd, head, needs_swap);
956 if (err)
957 return -1;
958 }
959
960 return 0;
961 }
962
auxtrace_queues__process_index_entry(struct auxtrace_queues * queues,struct perf_session * session,struct auxtrace_index_entry * ent)963 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
964 struct perf_session *session,
965 struct auxtrace_index_entry *ent)
966 {
967 return auxtrace_queues__add_indexed_event(queues, session,
968 ent->file_offset, ent->sz);
969 }
970
auxtrace_queues__process_index(struct auxtrace_queues * queues,struct perf_session * session)971 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
972 struct perf_session *session)
973 {
974 struct auxtrace_index *auxtrace_index;
975 struct auxtrace_index_entry *ent;
976 size_t i;
977 int err;
978
979 if (auxtrace__dont_decode(session))
980 return 0;
981
982 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
983 for (i = 0; i < auxtrace_index->nr; i++) {
984 ent = &auxtrace_index->entries[i];
985 err = auxtrace_queues__process_index_entry(queues,
986 session,
987 ent);
988 if (err)
989 return err;
990 }
991 }
992 return 0;
993 }
994
auxtrace_buffer__next(struct auxtrace_queue * queue,struct auxtrace_buffer * buffer)995 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
996 struct auxtrace_buffer *buffer)
997 {
998 if (buffer) {
999 if (list_is_last(&buffer->list, &queue->head))
1000 return NULL;
1001 return list_entry(buffer->list.next, struct auxtrace_buffer,
1002 list);
1003 } else {
1004 if (list_empty(&queue->head))
1005 return NULL;
1006 return list_entry(queue->head.next, struct auxtrace_buffer,
1007 list);
1008 }
1009 }
1010
auxtrace_queues__sample_queue(struct auxtrace_queues * queues,struct perf_sample * sample,struct perf_session * session)1011 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1012 struct perf_sample *sample,
1013 struct perf_session *session)
1014 {
1015 struct perf_sample_id *sid;
1016 unsigned int idx;
1017 u64 id;
1018
1019 id = sample->id;
1020 if (!id)
1021 return NULL;
1022
1023 sid = evlist__id2sid(session->evlist, id);
1024 if (!sid)
1025 return NULL;
1026
1027 idx = sid->idx;
1028
1029 if (idx >= queues->nr_queues)
1030 return NULL;
1031
1032 return &queues->queue_array[idx];
1033 }
1034
auxtrace_queues__add_sample(struct auxtrace_queues * queues,struct perf_session * session,struct perf_sample * sample,u64 data_offset,u64 reference)1035 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1036 struct perf_session *session,
1037 struct perf_sample *sample, u64 data_offset,
1038 u64 reference)
1039 {
1040 struct auxtrace_buffer buffer = {
1041 .pid = -1,
1042 .data_offset = data_offset,
1043 .reference = reference,
1044 .size = sample->aux_sample.size,
1045 };
1046 struct perf_sample_id *sid;
1047 u64 id = sample->id;
1048 unsigned int idx;
1049
1050 if (!id)
1051 return -EINVAL;
1052
1053 sid = evlist__id2sid(session->evlist, id);
1054 if (!sid)
1055 return -ENOENT;
1056
1057 idx = sid->idx;
1058 buffer.tid = sid->tid;
1059 buffer.cpu = sid->cpu;
1060
1061 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1062 }
1063
1064 struct queue_data {
1065 bool samples;
1066 bool events;
1067 };
1068
auxtrace_queue_data_cb(struct perf_session * session,union perf_event * event,u64 offset,void * data)1069 static int auxtrace_queue_data_cb(struct perf_session *session,
1070 union perf_event *event, u64 offset,
1071 void *data)
1072 {
1073 struct queue_data *qd = data;
1074 struct perf_sample sample;
1075 int err;
1076
1077 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1078 if (event->header.size < sizeof(struct perf_record_auxtrace))
1079 return -EINVAL;
1080 offset += event->header.size;
1081 return session->auxtrace->queue_data(session, NULL, event,
1082 offset);
1083 }
1084
1085 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1086 return 0;
1087
1088 err = evlist__parse_sample(session->evlist, event, &sample);
1089 if (err)
1090 return err;
1091
1092 if (!sample.aux_sample.size)
1093 return 0;
1094
1095 offset += sample.aux_sample.data - (void *)event;
1096
1097 return session->auxtrace->queue_data(session, &sample, NULL, offset);
1098 }
1099
auxtrace_queue_data(struct perf_session * session,bool samples,bool events)1100 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1101 {
1102 struct queue_data qd = {
1103 .samples = samples,
1104 .events = events,
1105 };
1106
1107 if (auxtrace__dont_decode(session))
1108 return 0;
1109
1110 if (!session->auxtrace || !session->auxtrace->queue_data)
1111 return -EINVAL;
1112
1113 return perf_session__peek_events(session, session->header.data_offset,
1114 session->header.data_size,
1115 auxtrace_queue_data_cb, &qd);
1116 }
1117
auxtrace_buffer__get_data_rw(struct auxtrace_buffer * buffer,int fd,bool rw)1118 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
1119 {
1120 int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
1121 size_t adj = buffer->data_offset & (page_size - 1);
1122 size_t size = buffer->size + adj;
1123 off_t file_offset = buffer->data_offset - adj;
1124 void *addr;
1125
1126 if (buffer->data)
1127 return buffer->data;
1128
1129 addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
1130 if (addr == MAP_FAILED)
1131 return NULL;
1132
1133 buffer->mmap_addr = addr;
1134 buffer->mmap_size = size;
1135
1136 buffer->data = addr + adj;
1137
1138 return buffer->data;
1139 }
1140
auxtrace_buffer__put_data(struct auxtrace_buffer * buffer)1141 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1142 {
1143 if (!buffer->data || !buffer->mmap_addr)
1144 return;
1145 munmap(buffer->mmap_addr, buffer->mmap_size);
1146 buffer->mmap_addr = NULL;
1147 buffer->mmap_size = 0;
1148 buffer->data = NULL;
1149 buffer->use_data = NULL;
1150 }
1151
auxtrace_buffer__drop_data(struct auxtrace_buffer * buffer)1152 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1153 {
1154 auxtrace_buffer__put_data(buffer);
1155 if (buffer->data_needs_freeing) {
1156 buffer->data_needs_freeing = false;
1157 zfree(&buffer->data);
1158 buffer->use_data = NULL;
1159 buffer->size = 0;
1160 }
1161 }
1162
auxtrace_buffer__free(struct auxtrace_buffer * buffer)1163 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1164 {
1165 auxtrace_buffer__drop_data(buffer);
1166 free(buffer);
1167 }
1168
auxtrace_synth_error(struct perf_record_auxtrace_error * auxtrace_error,int type,int code,int cpu,pid_t pid,pid_t tid,u64 ip,const char * msg,u64 timestamp)1169 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1170 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1171 const char *msg, u64 timestamp)
1172 {
1173 size_t size;
1174
1175 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1176
1177 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1178 auxtrace_error->type = type;
1179 auxtrace_error->code = code;
1180 auxtrace_error->cpu = cpu;
1181 auxtrace_error->pid = pid;
1182 auxtrace_error->tid = tid;
1183 auxtrace_error->fmt = 1;
1184 auxtrace_error->ip = ip;
1185 auxtrace_error->time = timestamp;
1186 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1187
1188 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1189 strlen(auxtrace_error->msg) + 1;
1190 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1191 }
1192
perf_event__synthesize_auxtrace_info(struct auxtrace_record * itr,struct perf_tool * tool,struct perf_session * session,perf_event__handler_t process)1193 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1194 struct perf_tool *tool,
1195 struct perf_session *session,
1196 perf_event__handler_t process)
1197 {
1198 union perf_event *ev;
1199 size_t priv_size;
1200 int err;
1201
1202 pr_debug2("Synthesizing auxtrace information\n");
1203 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1204 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1205 if (!ev)
1206 return -ENOMEM;
1207
1208 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1209 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1210 priv_size;
1211 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1212 priv_size);
1213 if (err)
1214 goto out_free;
1215
1216 err = process(tool, ev, NULL, NULL);
1217 out_free:
1218 free(ev);
1219 return err;
1220 }
1221
unleader_evsel(struct evlist * evlist,struct evsel * leader)1222 static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1223 {
1224 struct evsel *new_leader = NULL;
1225 struct evsel *evsel;
1226
1227 /* Find new leader for the group */
1228 evlist__for_each_entry(evlist, evsel) {
1229 if (!evsel__has_leader(evsel, leader) || evsel == leader)
1230 continue;
1231 if (!new_leader)
1232 new_leader = evsel;
1233 evsel__set_leader(evsel, new_leader);
1234 }
1235
1236 /* Update group information */
1237 if (new_leader) {
1238 zfree(&new_leader->group_name);
1239 new_leader->group_name = leader->group_name;
1240 leader->group_name = NULL;
1241
1242 new_leader->core.nr_members = leader->core.nr_members - 1;
1243 leader->core.nr_members = 1;
1244 }
1245 }
1246
unleader_auxtrace(struct perf_session * session)1247 static void unleader_auxtrace(struct perf_session *session)
1248 {
1249 struct evsel *evsel;
1250
1251 evlist__for_each_entry(session->evlist, evsel) {
1252 if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1253 evsel__is_group_leader(evsel)) {
1254 unleader_evsel(session->evlist, evsel);
1255 }
1256 }
1257 }
1258
perf_event__process_auxtrace_info(struct perf_session * session,union perf_event * event)1259 int perf_event__process_auxtrace_info(struct perf_session *session,
1260 union perf_event *event)
1261 {
1262 enum auxtrace_type type = event->auxtrace_info.type;
1263 int err;
1264
1265 if (dump_trace)
1266 fprintf(stdout, " type: %u\n", type);
1267
1268 switch (type) {
1269 case PERF_AUXTRACE_INTEL_PT:
1270 err = intel_pt_process_auxtrace_info(event, session);
1271 break;
1272 case PERF_AUXTRACE_INTEL_BTS:
1273 err = intel_bts_process_auxtrace_info(event, session);
1274 break;
1275 case PERF_AUXTRACE_ARM_SPE:
1276 err = arm_spe_process_auxtrace_info(event, session);
1277 break;
1278 case PERF_AUXTRACE_CS_ETM:
1279 err = cs_etm__process_auxtrace_info(event, session);
1280 break;
1281 case PERF_AUXTRACE_S390_CPUMSF:
1282 err = s390_cpumsf_process_auxtrace_info(event, session);
1283 break;
1284 case PERF_AUXTRACE_UNKNOWN:
1285 default:
1286 return -EINVAL;
1287 }
1288
1289 if (err)
1290 return err;
1291
1292 unleader_auxtrace(session);
1293
1294 return 0;
1295 }
1296
perf_event__process_auxtrace(struct perf_session * session,union perf_event * event)1297 s64 perf_event__process_auxtrace(struct perf_session *session,
1298 union perf_event *event)
1299 {
1300 s64 err;
1301
1302 if (dump_trace)
1303 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
1304 event->auxtrace.size, event->auxtrace.offset,
1305 event->auxtrace.reference, event->auxtrace.idx,
1306 event->auxtrace.tid, event->auxtrace.cpu);
1307
1308 if (auxtrace__dont_decode(session))
1309 return event->auxtrace.size;
1310
1311 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1312 return -EINVAL;
1313
1314 err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1315 if (err < 0)
1316 return err;
1317
1318 return event->auxtrace.size;
1319 }
1320
1321 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1322 #define PERF_ITRACE_DEFAULT_PERIOD 100000
1323 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1324 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1325 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1326 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1327
itrace_synth_opts__set_default(struct itrace_synth_opts * synth_opts,bool no_sample)1328 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1329 bool no_sample)
1330 {
1331 synth_opts->branches = true;
1332 synth_opts->transactions = true;
1333 synth_opts->ptwrites = true;
1334 synth_opts->pwr_events = true;
1335 synth_opts->other_events = true;
1336 synth_opts->errors = true;
1337 synth_opts->flc = true;
1338 synth_opts->llc = true;
1339 synth_opts->tlb = true;
1340 synth_opts->mem = true;
1341 synth_opts->remote_access = true;
1342
1343 if (no_sample) {
1344 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1345 synth_opts->period = 1;
1346 synth_opts->calls = true;
1347 } else {
1348 synth_opts->instructions = true;
1349 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1350 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1351 }
1352 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1353 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1354 synth_opts->initial_skip = 0;
1355 }
1356
get_flag(const char ** ptr,unsigned int * flags)1357 static int get_flag(const char **ptr, unsigned int *flags)
1358 {
1359 while (1) {
1360 char c = **ptr;
1361
1362 if (c >= 'a' && c <= 'z') {
1363 *flags |= 1 << (c - 'a');
1364 ++*ptr;
1365 return 0;
1366 } else if (c == ' ') {
1367 ++*ptr;
1368 continue;
1369 } else {
1370 return -1;
1371 }
1372 }
1373 }
1374
get_flags(const char ** ptr,unsigned int * plus_flags,unsigned int * minus_flags)1375 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
1376 {
1377 while (1) {
1378 switch (**ptr) {
1379 case '+':
1380 ++*ptr;
1381 if (get_flag(ptr, plus_flags))
1382 return -1;
1383 break;
1384 case '-':
1385 ++*ptr;
1386 if (get_flag(ptr, minus_flags))
1387 return -1;
1388 break;
1389 case ' ':
1390 ++*ptr;
1391 break;
1392 default:
1393 return 0;
1394 }
1395 }
1396 }
1397
1398 /*
1399 * Please check tools/perf/Documentation/perf-script.txt for information
1400 * about the options parsed here, which is introduced after this cset,
1401 * when support in 'perf script' for these options is introduced.
1402 */
itrace_do_parse_synth_opts(struct itrace_synth_opts * synth_opts,const char * str,int unset)1403 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
1404 const char *str, int unset)
1405 {
1406 const char *p;
1407 char *endptr;
1408 bool period_type_set = false;
1409 bool period_set = false;
1410
1411 synth_opts->set = true;
1412
1413 if (unset) {
1414 synth_opts->dont_decode = true;
1415 return 0;
1416 }
1417
1418 if (!str) {
1419 itrace_synth_opts__set_default(synth_opts,
1420 synth_opts->default_no_sample);
1421 return 0;
1422 }
1423
1424 for (p = str; *p;) {
1425 switch (*p++) {
1426 case 'i':
1427 synth_opts->instructions = true;
1428 while (*p == ' ' || *p == ',')
1429 p += 1;
1430 if (isdigit(*p)) {
1431 synth_opts->period = strtoull(p, &endptr, 10);
1432 period_set = true;
1433 p = endptr;
1434 while (*p == ' ' || *p == ',')
1435 p += 1;
1436 switch (*p++) {
1437 case 'i':
1438 synth_opts->period_type =
1439 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1440 period_type_set = true;
1441 break;
1442 case 't':
1443 synth_opts->period_type =
1444 PERF_ITRACE_PERIOD_TICKS;
1445 period_type_set = true;
1446 break;
1447 case 'm':
1448 synth_opts->period *= 1000;
1449 /* Fall through */
1450 case 'u':
1451 synth_opts->period *= 1000;
1452 /* Fall through */
1453 case 'n':
1454 if (*p++ != 's')
1455 goto out_err;
1456 synth_opts->period_type =
1457 PERF_ITRACE_PERIOD_NANOSECS;
1458 period_type_set = true;
1459 break;
1460 case '\0':
1461 goto out;
1462 default:
1463 goto out_err;
1464 }
1465 }
1466 break;
1467 case 'b':
1468 synth_opts->branches = true;
1469 break;
1470 case 'x':
1471 synth_opts->transactions = true;
1472 break;
1473 case 'w':
1474 synth_opts->ptwrites = true;
1475 break;
1476 case 'p':
1477 synth_opts->pwr_events = true;
1478 break;
1479 case 'o':
1480 synth_opts->other_events = true;
1481 break;
1482 case 'e':
1483 synth_opts->errors = true;
1484 if (get_flags(&p, &synth_opts->error_plus_flags,
1485 &synth_opts->error_minus_flags))
1486 goto out_err;
1487 break;
1488 case 'd':
1489 synth_opts->log = true;
1490 if (get_flags(&p, &synth_opts->log_plus_flags,
1491 &synth_opts->log_minus_flags))
1492 goto out_err;
1493 break;
1494 case 'c':
1495 synth_opts->branches = true;
1496 synth_opts->calls = true;
1497 break;
1498 case 'r':
1499 synth_opts->branches = true;
1500 synth_opts->returns = true;
1501 break;
1502 case 'G':
1503 case 'g':
1504 if (p[-1] == 'G')
1505 synth_opts->add_callchain = true;
1506 else
1507 synth_opts->callchain = true;
1508 synth_opts->callchain_sz =
1509 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1510 while (*p == ' ' || *p == ',')
1511 p += 1;
1512 if (isdigit(*p)) {
1513 unsigned int val;
1514
1515 val = strtoul(p, &endptr, 10);
1516 p = endptr;
1517 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1518 goto out_err;
1519 synth_opts->callchain_sz = val;
1520 }
1521 break;
1522 case 'L':
1523 case 'l':
1524 if (p[-1] == 'L')
1525 synth_opts->add_last_branch = true;
1526 else
1527 synth_opts->last_branch = true;
1528 synth_opts->last_branch_sz =
1529 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1530 while (*p == ' ' || *p == ',')
1531 p += 1;
1532 if (isdigit(*p)) {
1533 unsigned int val;
1534
1535 val = strtoul(p, &endptr, 10);
1536 p = endptr;
1537 if (!val ||
1538 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1539 goto out_err;
1540 synth_opts->last_branch_sz = val;
1541 }
1542 break;
1543 case 's':
1544 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1545 if (p == endptr)
1546 goto out_err;
1547 p = endptr;
1548 break;
1549 case 'f':
1550 synth_opts->flc = true;
1551 break;
1552 case 'm':
1553 synth_opts->llc = true;
1554 break;
1555 case 't':
1556 synth_opts->tlb = true;
1557 break;
1558 case 'a':
1559 synth_opts->remote_access = true;
1560 break;
1561 case 'M':
1562 synth_opts->mem = true;
1563 break;
1564 case 'q':
1565 synth_opts->quick += 1;
1566 break;
1567 case 'Z':
1568 synth_opts->timeless_decoding = true;
1569 break;
1570 case ' ':
1571 case ',':
1572 break;
1573 default:
1574 goto out_err;
1575 }
1576 }
1577 out:
1578 if (synth_opts->instructions) {
1579 if (!period_type_set)
1580 synth_opts->period_type =
1581 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1582 if (!period_set)
1583 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1584 }
1585
1586 return 0;
1587
1588 out_err:
1589 pr_err("Bad Instruction Tracing options '%s'\n", str);
1590 return -EINVAL;
1591 }
1592
itrace_parse_synth_opts(const struct option * opt,const char * str,int unset)1593 int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
1594 {
1595 return itrace_do_parse_synth_opts(opt->value, str, unset);
1596 }
1597
1598 static const char * const auxtrace_error_type_name[] = {
1599 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1600 };
1601
auxtrace_error_name(int type)1602 static const char *auxtrace_error_name(int type)
1603 {
1604 const char *error_type_name = NULL;
1605
1606 if (type < PERF_AUXTRACE_ERROR_MAX)
1607 error_type_name = auxtrace_error_type_name[type];
1608 if (!error_type_name)
1609 error_type_name = "unknown AUX";
1610 return error_type_name;
1611 }
1612
perf_event__fprintf_auxtrace_error(union perf_event * event,FILE * fp)1613 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1614 {
1615 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1616 unsigned long long nsecs = e->time;
1617 const char *msg = e->msg;
1618 int ret;
1619
1620 ret = fprintf(fp, " %s error type %u",
1621 auxtrace_error_name(e->type), e->type);
1622
1623 if (e->fmt && nsecs) {
1624 unsigned long secs = nsecs / NSEC_PER_SEC;
1625
1626 nsecs -= secs * NSEC_PER_SEC;
1627 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1628 } else {
1629 ret += fprintf(fp, " time 0");
1630 }
1631
1632 if (!e->fmt)
1633 msg = (const char *)&e->time;
1634
1635 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1636 e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1637 return ret;
1638 }
1639
perf_session__auxtrace_error_inc(struct perf_session * session,union perf_event * event)1640 void perf_session__auxtrace_error_inc(struct perf_session *session,
1641 union perf_event *event)
1642 {
1643 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1644
1645 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1646 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1647 }
1648
events_stats__auxtrace_error_warn(const struct events_stats * stats)1649 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1650 {
1651 int i;
1652
1653 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1654 if (!stats->nr_auxtrace_errors[i])
1655 continue;
1656 ui__warning("%u %s errors\n",
1657 stats->nr_auxtrace_errors[i],
1658 auxtrace_error_name(i));
1659 }
1660 }
1661
perf_event__process_auxtrace_error(struct perf_session * session,union perf_event * event)1662 int perf_event__process_auxtrace_error(struct perf_session *session,
1663 union perf_event *event)
1664 {
1665 if (auxtrace__dont_decode(session))
1666 return 0;
1667
1668 perf_event__fprintf_auxtrace_error(event, stdout);
1669 return 0;
1670 }
1671
1672 /*
1673 * In the compat mode kernel runs in 64-bit and perf tool runs in 32-bit mode,
1674 * 32-bit perf tool cannot access 64-bit value atomically, which might lead to
1675 * the issues caused by the below sequence on multiple CPUs: when perf tool
1676 * accesses either the load operation or the store operation for 64-bit value,
1677 * on some architectures the operation is divided into two instructions, one
1678 * is for accessing the low 32-bit value and another is for the high 32-bit;
1679 * thus these two user operations can give the kernel chances to access the
1680 * 64-bit value, and thus leads to the unexpected load values.
1681 *
1682 * kernel (64-bit) user (32-bit)
1683 *
1684 * if (LOAD ->aux_tail) { --, LOAD ->aux_head_lo
1685 * STORE $aux_data | ,--->
1686 * FLUSH $aux_data | | LOAD ->aux_head_hi
1687 * STORE ->aux_head --|-------` smp_rmb()
1688 * } | LOAD $data
1689 * | smp_mb()
1690 * | STORE ->aux_tail_lo
1691 * `----------->
1692 * STORE ->aux_tail_hi
1693 *
1694 * For this reason, it's impossible for the perf tool to work correctly when
1695 * the AUX head or tail is bigger than 4GB (more than 32 bits length); and we
1696 * can not simply limit the AUX ring buffer to less than 4GB, the reason is
1697 * the pointers can be increased monotonically, whatever the buffer size it is,
1698 * at the end the head and tail can be bigger than 4GB and carry out to the
1699 * high 32-bit.
1700 *
1701 * To mitigate the issues and improve the user experience, we can allow the
1702 * perf tool working in certain conditions and bail out with error if detect
1703 * any overflow cannot be handled.
1704 *
1705 * For reading the AUX head, it reads out the values for three times, and
1706 * compares the high 4 bytes of the values between the first time and the last
1707 * time, if there has no change for high 4 bytes injected by the kernel during
1708 * the user reading sequence, it's safe for use the second value.
1709 *
1710 * When compat_auxtrace_mmap__write_tail() detects any carrying in the high
1711 * 32 bits, it means there have two store operations in user space and it cannot
1712 * promise the atomicity for 64-bit write, so return '-1' in this case to tell
1713 * the caller an overflow error has happened.
1714 */
compat_auxtrace_mmap__read_head(struct auxtrace_mmap * mm)1715 u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
1716 {
1717 struct perf_event_mmap_page *pc = mm->userpg;
1718 u64 first, second, last;
1719 u64 mask = (u64)(UINT32_MAX) << 32;
1720
1721 do {
1722 first = READ_ONCE(pc->aux_head);
1723 /* Ensure all reads are done after we read the head */
1724 smp_rmb();
1725 second = READ_ONCE(pc->aux_head);
1726 /* Ensure all reads are done after we read the head */
1727 smp_rmb();
1728 last = READ_ONCE(pc->aux_head);
1729 } while ((first & mask) != (last & mask));
1730
1731 return second;
1732 }
1733
compat_auxtrace_mmap__write_tail(struct auxtrace_mmap * mm,u64 tail)1734 int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
1735 {
1736 struct perf_event_mmap_page *pc = mm->userpg;
1737 u64 mask = (u64)(UINT32_MAX) << 32;
1738
1739 if (tail & mask)
1740 return -1;
1741
1742 /* Ensure all reads are done before we write the tail out */
1743 smp_mb();
1744 WRITE_ONCE(pc->aux_tail, tail);
1745 return 0;
1746 }
1747
__auxtrace_mmap__read(struct mmap * map,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn,bool snapshot,size_t snapshot_size)1748 static int __auxtrace_mmap__read(struct mmap *map,
1749 struct auxtrace_record *itr,
1750 struct perf_tool *tool, process_auxtrace_t fn,
1751 bool snapshot, size_t snapshot_size)
1752 {
1753 struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1754 u64 head, old = mm->prev, offset, ref;
1755 unsigned char *data = mm->base;
1756 size_t size, head_off, old_off, len1, len2, padding;
1757 union perf_event ev;
1758 void *data1, *data2;
1759 int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL));
1760
1761 head = auxtrace_mmap__read_head(mm, kernel_is_64_bit);
1762
1763 if (snapshot &&
1764 auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old))
1765 return -1;
1766
1767 if (old == head)
1768 return 0;
1769
1770 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1771 mm->idx, old, head, head - old);
1772
1773 if (mm->mask) {
1774 head_off = head & mm->mask;
1775 old_off = old & mm->mask;
1776 } else {
1777 head_off = head % mm->len;
1778 old_off = old % mm->len;
1779 }
1780
1781 if (head_off > old_off)
1782 size = head_off - old_off;
1783 else
1784 size = mm->len - (old_off - head_off);
1785
1786 if (snapshot && size > snapshot_size)
1787 size = snapshot_size;
1788
1789 ref = auxtrace_record__reference(itr);
1790
1791 if (head > old || size <= head || mm->mask) {
1792 offset = head - size;
1793 } else {
1794 /*
1795 * When the buffer size is not a power of 2, 'head' wraps at the
1796 * highest multiple of the buffer size, so we have to subtract
1797 * the remainder here.
1798 */
1799 u64 rem = (0ULL - mm->len) % mm->len;
1800
1801 offset = head - size - rem;
1802 }
1803
1804 if (size > head_off) {
1805 len1 = size - head_off;
1806 data1 = &data[mm->len - len1];
1807 len2 = head_off;
1808 data2 = &data[0];
1809 } else {
1810 len1 = size;
1811 data1 = &data[head_off - len1];
1812 len2 = 0;
1813 data2 = NULL;
1814 }
1815
1816 if (itr->alignment) {
1817 unsigned int unwanted = len1 % itr->alignment;
1818
1819 len1 -= unwanted;
1820 size -= unwanted;
1821 }
1822
1823 /* padding must be written by fn() e.g. record__process_auxtrace() */
1824 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1825 if (padding)
1826 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1827
1828 memset(&ev, 0, sizeof(ev));
1829 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1830 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1831 ev.auxtrace.size = size + padding;
1832 ev.auxtrace.offset = offset;
1833 ev.auxtrace.reference = ref;
1834 ev.auxtrace.idx = mm->idx;
1835 ev.auxtrace.tid = mm->tid;
1836 ev.auxtrace.cpu = mm->cpu;
1837
1838 if (fn(tool, map, &ev, data1, len1, data2, len2))
1839 return -1;
1840
1841 mm->prev = head;
1842
1843 if (!snapshot) {
1844 int err;
1845
1846 err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit);
1847 if (err < 0)
1848 return err;
1849
1850 if (itr->read_finish) {
1851 err = itr->read_finish(itr, mm->idx);
1852 if (err < 0)
1853 return err;
1854 }
1855 }
1856
1857 return 1;
1858 }
1859
auxtrace_mmap__read(struct mmap * map,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn)1860 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1861 struct perf_tool *tool, process_auxtrace_t fn)
1862 {
1863 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1864 }
1865
auxtrace_mmap__read_snapshot(struct mmap * map,struct auxtrace_record * itr,struct perf_tool * tool,process_auxtrace_t fn,size_t snapshot_size)1866 int auxtrace_mmap__read_snapshot(struct mmap *map,
1867 struct auxtrace_record *itr,
1868 struct perf_tool *tool, process_auxtrace_t fn,
1869 size_t snapshot_size)
1870 {
1871 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1872 }
1873
1874 /**
1875 * struct auxtrace_cache - hash table to implement a cache
1876 * @hashtable: the hashtable
1877 * @sz: hashtable size (number of hlists)
1878 * @entry_size: size of an entry
1879 * @limit: limit the number of entries to this maximum, when reached the cache
1880 * is dropped and caching begins again with an empty cache
1881 * @cnt: current number of entries
1882 * @bits: hashtable size (@sz = 2^@bits)
1883 */
1884 struct auxtrace_cache {
1885 struct hlist_head *hashtable;
1886 size_t sz;
1887 size_t entry_size;
1888 size_t limit;
1889 size_t cnt;
1890 unsigned int bits;
1891 };
1892
auxtrace_cache__new(unsigned int bits,size_t entry_size,unsigned int limit_percent)1893 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1894 unsigned int limit_percent)
1895 {
1896 struct auxtrace_cache *c;
1897 struct hlist_head *ht;
1898 size_t sz, i;
1899
1900 c = zalloc(sizeof(struct auxtrace_cache));
1901 if (!c)
1902 return NULL;
1903
1904 sz = 1UL << bits;
1905
1906 ht = calloc(sz, sizeof(struct hlist_head));
1907 if (!ht)
1908 goto out_free;
1909
1910 for (i = 0; i < sz; i++)
1911 INIT_HLIST_HEAD(&ht[i]);
1912
1913 c->hashtable = ht;
1914 c->sz = sz;
1915 c->entry_size = entry_size;
1916 c->limit = (c->sz * limit_percent) / 100;
1917 c->bits = bits;
1918
1919 return c;
1920
1921 out_free:
1922 free(c);
1923 return NULL;
1924 }
1925
auxtrace_cache__drop(struct auxtrace_cache * c)1926 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1927 {
1928 struct auxtrace_cache_entry *entry;
1929 struct hlist_node *tmp;
1930 size_t i;
1931
1932 if (!c)
1933 return;
1934
1935 for (i = 0; i < c->sz; i++) {
1936 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1937 hlist_del(&entry->hash);
1938 auxtrace_cache__free_entry(c, entry);
1939 }
1940 }
1941
1942 c->cnt = 0;
1943 }
1944
auxtrace_cache__free(struct auxtrace_cache * c)1945 void auxtrace_cache__free(struct auxtrace_cache *c)
1946 {
1947 if (!c)
1948 return;
1949
1950 auxtrace_cache__drop(c);
1951 zfree(&c->hashtable);
1952 free(c);
1953 }
1954
auxtrace_cache__alloc_entry(struct auxtrace_cache * c)1955 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1956 {
1957 return malloc(c->entry_size);
1958 }
1959
auxtrace_cache__free_entry(struct auxtrace_cache * c __maybe_unused,void * entry)1960 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1961 void *entry)
1962 {
1963 free(entry);
1964 }
1965
auxtrace_cache__add(struct auxtrace_cache * c,u32 key,struct auxtrace_cache_entry * entry)1966 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1967 struct auxtrace_cache_entry *entry)
1968 {
1969 if (c->limit && ++c->cnt > c->limit)
1970 auxtrace_cache__drop(c);
1971
1972 entry->key = key;
1973 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1974
1975 return 0;
1976 }
1977
auxtrace_cache__rm(struct auxtrace_cache * c,u32 key)1978 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
1979 u32 key)
1980 {
1981 struct auxtrace_cache_entry *entry;
1982 struct hlist_head *hlist;
1983 struct hlist_node *n;
1984
1985 if (!c)
1986 return NULL;
1987
1988 hlist = &c->hashtable[hash_32(key, c->bits)];
1989 hlist_for_each_entry_safe(entry, n, hlist, hash) {
1990 if (entry->key == key) {
1991 hlist_del(&entry->hash);
1992 return entry;
1993 }
1994 }
1995
1996 return NULL;
1997 }
1998
auxtrace_cache__remove(struct auxtrace_cache * c,u32 key)1999 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
2000 {
2001 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
2002
2003 auxtrace_cache__free_entry(c, entry);
2004 }
2005
auxtrace_cache__lookup(struct auxtrace_cache * c,u32 key)2006 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
2007 {
2008 struct auxtrace_cache_entry *entry;
2009 struct hlist_head *hlist;
2010
2011 if (!c)
2012 return NULL;
2013
2014 hlist = &c->hashtable[hash_32(key, c->bits)];
2015 hlist_for_each_entry(entry, hlist, hash) {
2016 if (entry->key == key)
2017 return entry;
2018 }
2019
2020 return NULL;
2021 }
2022
addr_filter__free_str(struct addr_filter * filt)2023 static void addr_filter__free_str(struct addr_filter *filt)
2024 {
2025 zfree(&filt->str);
2026 filt->action = NULL;
2027 filt->sym_from = NULL;
2028 filt->sym_to = NULL;
2029 filt->filename = NULL;
2030 }
2031
addr_filter__new(void)2032 static struct addr_filter *addr_filter__new(void)
2033 {
2034 struct addr_filter *filt = zalloc(sizeof(*filt));
2035
2036 if (filt)
2037 INIT_LIST_HEAD(&filt->list);
2038
2039 return filt;
2040 }
2041
addr_filter__free(struct addr_filter * filt)2042 static void addr_filter__free(struct addr_filter *filt)
2043 {
2044 if (filt)
2045 addr_filter__free_str(filt);
2046 free(filt);
2047 }
2048
addr_filters__add(struct addr_filters * filts,struct addr_filter * filt)2049 static void addr_filters__add(struct addr_filters *filts,
2050 struct addr_filter *filt)
2051 {
2052 list_add_tail(&filt->list, &filts->head);
2053 filts->cnt += 1;
2054 }
2055
addr_filters__del(struct addr_filters * filts,struct addr_filter * filt)2056 static void addr_filters__del(struct addr_filters *filts,
2057 struct addr_filter *filt)
2058 {
2059 list_del_init(&filt->list);
2060 filts->cnt -= 1;
2061 }
2062
addr_filters__init(struct addr_filters * filts)2063 void addr_filters__init(struct addr_filters *filts)
2064 {
2065 INIT_LIST_HEAD(&filts->head);
2066 filts->cnt = 0;
2067 }
2068
addr_filters__exit(struct addr_filters * filts)2069 void addr_filters__exit(struct addr_filters *filts)
2070 {
2071 struct addr_filter *filt, *n;
2072
2073 list_for_each_entry_safe(filt, n, &filts->head, list) {
2074 addr_filters__del(filts, filt);
2075 addr_filter__free(filt);
2076 }
2077 }
2078
parse_num_or_str(char ** inp,u64 * num,const char ** str,const char * str_delim)2079 static int parse_num_or_str(char **inp, u64 *num, const char **str,
2080 const char *str_delim)
2081 {
2082 *inp += strspn(*inp, " ");
2083
2084 if (isdigit(**inp)) {
2085 char *endptr;
2086
2087 if (!num)
2088 return -EINVAL;
2089 errno = 0;
2090 *num = strtoull(*inp, &endptr, 0);
2091 if (errno)
2092 return -errno;
2093 if (endptr == *inp)
2094 return -EINVAL;
2095 *inp = endptr;
2096 } else {
2097 size_t n;
2098
2099 if (!str)
2100 return -EINVAL;
2101 *inp += strspn(*inp, " ");
2102 *str = *inp;
2103 n = strcspn(*inp, str_delim);
2104 if (!n)
2105 return -EINVAL;
2106 *inp += n;
2107 if (**inp) {
2108 **inp = '\0';
2109 *inp += 1;
2110 }
2111 }
2112 return 0;
2113 }
2114
parse_action(struct addr_filter * filt)2115 static int parse_action(struct addr_filter *filt)
2116 {
2117 if (!strcmp(filt->action, "filter")) {
2118 filt->start = true;
2119 filt->range = true;
2120 } else if (!strcmp(filt->action, "start")) {
2121 filt->start = true;
2122 } else if (!strcmp(filt->action, "stop")) {
2123 filt->start = false;
2124 } else if (!strcmp(filt->action, "tracestop")) {
2125 filt->start = false;
2126 filt->range = true;
2127 filt->action += 5; /* Change 'tracestop' to 'stop' */
2128 } else {
2129 return -EINVAL;
2130 }
2131 return 0;
2132 }
2133
parse_sym_idx(char ** inp,int * idx)2134 static int parse_sym_idx(char **inp, int *idx)
2135 {
2136 *idx = -1;
2137
2138 *inp += strspn(*inp, " ");
2139
2140 if (**inp != '#')
2141 return 0;
2142
2143 *inp += 1;
2144
2145 if (**inp == 'g' || **inp == 'G') {
2146 *inp += 1;
2147 *idx = 0;
2148 } else {
2149 unsigned long num;
2150 char *endptr;
2151
2152 errno = 0;
2153 num = strtoul(*inp, &endptr, 0);
2154 if (errno)
2155 return -errno;
2156 if (endptr == *inp || num > INT_MAX)
2157 return -EINVAL;
2158 *inp = endptr;
2159 *idx = num;
2160 }
2161
2162 return 0;
2163 }
2164
parse_addr_size(char ** inp,u64 * num,const char ** str,int * idx)2165 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2166 {
2167 int err = parse_num_or_str(inp, num, str, " ");
2168
2169 if (!err && *str)
2170 err = parse_sym_idx(inp, idx);
2171
2172 return err;
2173 }
2174
parse_one_filter(struct addr_filter * filt,const char ** filter_inp)2175 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2176 {
2177 char *fstr;
2178 int err;
2179
2180 filt->str = fstr = strdup(*filter_inp);
2181 if (!fstr)
2182 return -ENOMEM;
2183
2184 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2185 if (err)
2186 goto out_err;
2187
2188 err = parse_action(filt);
2189 if (err)
2190 goto out_err;
2191
2192 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2193 &filt->sym_from_idx);
2194 if (err)
2195 goto out_err;
2196
2197 fstr += strspn(fstr, " ");
2198
2199 if (*fstr == '/') {
2200 fstr += 1;
2201 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2202 &filt->sym_to_idx);
2203 if (err)
2204 goto out_err;
2205 filt->range = true;
2206 }
2207
2208 fstr += strspn(fstr, " ");
2209
2210 if (*fstr == '@') {
2211 fstr += 1;
2212 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2213 if (err)
2214 goto out_err;
2215 }
2216
2217 fstr += strspn(fstr, " ,");
2218
2219 *filter_inp += fstr - filt->str;
2220
2221 return 0;
2222
2223 out_err:
2224 addr_filter__free_str(filt);
2225
2226 return err;
2227 }
2228
addr_filters__parse_bare_filter(struct addr_filters * filts,const char * filter)2229 int addr_filters__parse_bare_filter(struct addr_filters *filts,
2230 const char *filter)
2231 {
2232 struct addr_filter *filt;
2233 const char *fstr = filter;
2234 int err;
2235
2236 while (*fstr) {
2237 filt = addr_filter__new();
2238 err = parse_one_filter(filt, &fstr);
2239 if (err) {
2240 addr_filter__free(filt);
2241 addr_filters__exit(filts);
2242 return err;
2243 }
2244 addr_filters__add(filts, filt);
2245 }
2246
2247 return 0;
2248 }
2249
2250 struct sym_args {
2251 const char *name;
2252 u64 start;
2253 u64 size;
2254 int idx;
2255 int cnt;
2256 bool started;
2257 bool global;
2258 bool selected;
2259 bool duplicate;
2260 bool near;
2261 };
2262
kern_sym_match(struct sym_args * args,const char * name,char type)2263 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2264 {
2265 /* A function with the same name, and global or the n'th found or any */
2266 return kallsyms__is_function(type) &&
2267 !strcmp(name, args->name) &&
2268 ((args->global && isupper(type)) ||
2269 (args->selected && ++(args->cnt) == args->idx) ||
2270 (!args->global && !args->selected));
2271 }
2272
find_kern_sym_cb(void * arg,const char * name,char type,u64 start)2273 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2274 {
2275 struct sym_args *args = arg;
2276
2277 if (args->started) {
2278 if (!args->size)
2279 args->size = start - args->start;
2280 if (args->selected) {
2281 if (args->size)
2282 return 1;
2283 } else if (kern_sym_match(args, name, type)) {
2284 args->duplicate = true;
2285 return 1;
2286 }
2287 } else if (kern_sym_match(args, name, type)) {
2288 args->started = true;
2289 args->start = start;
2290 }
2291
2292 return 0;
2293 }
2294
print_kern_sym_cb(void * arg,const char * name,char type,u64 start)2295 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2296 {
2297 struct sym_args *args = arg;
2298
2299 if (kern_sym_match(args, name, type)) {
2300 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2301 ++args->cnt, start, type, name);
2302 args->near = true;
2303 } else if (args->near) {
2304 args->near = false;
2305 pr_err("\t\twhich is near\t\t%s\n", name);
2306 }
2307
2308 return 0;
2309 }
2310
sym_not_found_error(const char * sym_name,int idx)2311 static int sym_not_found_error(const char *sym_name, int idx)
2312 {
2313 if (idx > 0) {
2314 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2315 idx, sym_name);
2316 } else if (!idx) {
2317 pr_err("Global symbol '%s' not found.\n", sym_name);
2318 } else {
2319 pr_err("Symbol '%s' not found.\n", sym_name);
2320 }
2321 pr_err("Note that symbols must be functions.\n");
2322
2323 return -EINVAL;
2324 }
2325
find_kern_sym(const char * sym_name,u64 * start,u64 * size,int idx)2326 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2327 {
2328 struct sym_args args = {
2329 .name = sym_name,
2330 .idx = idx,
2331 .global = !idx,
2332 .selected = idx > 0,
2333 };
2334 int err;
2335
2336 *start = 0;
2337 *size = 0;
2338
2339 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2340 if (err < 0) {
2341 pr_err("Failed to parse /proc/kallsyms\n");
2342 return err;
2343 }
2344
2345 if (args.duplicate) {
2346 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2347 args.cnt = 0;
2348 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2349 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2350 sym_name);
2351 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2352 return -EINVAL;
2353 }
2354
2355 if (!args.started) {
2356 pr_err("Kernel symbol lookup: ");
2357 return sym_not_found_error(sym_name, idx);
2358 }
2359
2360 *start = args.start;
2361 *size = args.size;
2362
2363 return 0;
2364 }
2365
find_entire_kern_cb(void * arg,const char * name __maybe_unused,char type,u64 start)2366 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2367 char type, u64 start)
2368 {
2369 struct sym_args *args = arg;
2370
2371 if (!kallsyms__is_function(type))
2372 return 0;
2373
2374 if (!args->started) {
2375 args->started = true;
2376 args->start = start;
2377 }
2378 /* Don't know exactly where the kernel ends, so we add a page */
2379 args->size = round_up(start, page_size) + page_size - args->start;
2380
2381 return 0;
2382 }
2383
addr_filter__entire_kernel(struct addr_filter * filt)2384 static int addr_filter__entire_kernel(struct addr_filter *filt)
2385 {
2386 struct sym_args args = { .started = false };
2387 int err;
2388
2389 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2390 if (err < 0 || !args.started) {
2391 pr_err("Failed to parse /proc/kallsyms\n");
2392 return err;
2393 }
2394
2395 filt->addr = args.start;
2396 filt->size = args.size;
2397
2398 return 0;
2399 }
2400
check_end_after_start(struct addr_filter * filt,u64 start,u64 size)2401 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2402 {
2403 if (start + size >= filt->addr)
2404 return 0;
2405
2406 if (filt->sym_from) {
2407 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2408 filt->sym_to, start, filt->sym_from, filt->addr);
2409 } else {
2410 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2411 filt->sym_to, start, filt->addr);
2412 }
2413
2414 return -EINVAL;
2415 }
2416
addr_filter__resolve_kernel_syms(struct addr_filter * filt)2417 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2418 {
2419 bool no_size = false;
2420 u64 start, size;
2421 int err;
2422
2423 if (symbol_conf.kptr_restrict) {
2424 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2425 return -EINVAL;
2426 }
2427
2428 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2429 return addr_filter__entire_kernel(filt);
2430
2431 if (filt->sym_from) {
2432 err = find_kern_sym(filt->sym_from, &start, &size,
2433 filt->sym_from_idx);
2434 if (err)
2435 return err;
2436 filt->addr = start;
2437 if (filt->range && !filt->size && !filt->sym_to) {
2438 filt->size = size;
2439 no_size = !size;
2440 }
2441 }
2442
2443 if (filt->sym_to) {
2444 err = find_kern_sym(filt->sym_to, &start, &size,
2445 filt->sym_to_idx);
2446 if (err)
2447 return err;
2448
2449 err = check_end_after_start(filt, start, size);
2450 if (err)
2451 return err;
2452 filt->size = start + size - filt->addr;
2453 no_size = !size;
2454 }
2455
2456 /* The very last symbol in kallsyms does not imply a particular size */
2457 if (no_size) {
2458 pr_err("Cannot determine size of symbol '%s'\n",
2459 filt->sym_to ? filt->sym_to : filt->sym_from);
2460 return -EINVAL;
2461 }
2462
2463 return 0;
2464 }
2465
load_dso(const char * name)2466 static struct dso *load_dso(const char *name)
2467 {
2468 struct map *map;
2469 struct dso *dso;
2470
2471 map = dso__new_map(name);
2472 if (!map)
2473 return NULL;
2474
2475 if (map__load(map) < 0)
2476 pr_err("File '%s' not found or has no symbols.\n", name);
2477
2478 dso = dso__get(map->dso);
2479
2480 map__put(map);
2481
2482 return dso;
2483 }
2484
dso_sym_match(struct symbol * sym,const char * name,int * cnt,int idx)2485 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2486 int idx)
2487 {
2488 /* Same name, and global or the n'th found or any */
2489 return !arch__compare_symbol_names(name, sym->name) &&
2490 ((!idx && sym->binding == STB_GLOBAL) ||
2491 (idx > 0 && ++*cnt == idx) ||
2492 idx < 0);
2493 }
2494
print_duplicate_syms(struct dso * dso,const char * sym_name)2495 static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2496 {
2497 struct symbol *sym;
2498 bool near = false;
2499 int cnt = 0;
2500
2501 pr_err("Multiple symbols with name '%s'\n", sym_name);
2502
2503 sym = dso__first_symbol(dso);
2504 while (sym) {
2505 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2506 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2507 ++cnt, sym->start,
2508 sym->binding == STB_GLOBAL ? 'g' :
2509 sym->binding == STB_LOCAL ? 'l' : 'w',
2510 sym->name);
2511 near = true;
2512 } else if (near) {
2513 near = false;
2514 pr_err("\t\twhich is near\t\t%s\n", sym->name);
2515 }
2516 sym = dso__next_symbol(sym);
2517 }
2518
2519 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2520 sym_name);
2521 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2522 }
2523
find_dso_sym(struct dso * dso,const char * sym_name,u64 * start,u64 * size,int idx)2524 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2525 u64 *size, int idx)
2526 {
2527 struct symbol *sym;
2528 int cnt = 0;
2529
2530 *start = 0;
2531 *size = 0;
2532
2533 sym = dso__first_symbol(dso);
2534 while (sym) {
2535 if (*start) {
2536 if (!*size)
2537 *size = sym->start - *start;
2538 if (idx > 0) {
2539 if (*size)
2540 return 1;
2541 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2542 print_duplicate_syms(dso, sym_name);
2543 return -EINVAL;
2544 }
2545 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2546 *start = sym->start;
2547 *size = sym->end - sym->start;
2548 }
2549 sym = dso__next_symbol(sym);
2550 }
2551
2552 if (!*start)
2553 return sym_not_found_error(sym_name, idx);
2554
2555 return 0;
2556 }
2557
addr_filter__entire_dso(struct addr_filter * filt,struct dso * dso)2558 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2559 {
2560 if (dso__data_file_size(dso, NULL)) {
2561 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2562 filt->filename);
2563 return -EINVAL;
2564 }
2565
2566 filt->addr = 0;
2567 filt->size = dso->data.file_size;
2568
2569 return 0;
2570 }
2571
addr_filter__resolve_syms(struct addr_filter * filt)2572 static int addr_filter__resolve_syms(struct addr_filter *filt)
2573 {
2574 u64 start, size;
2575 struct dso *dso;
2576 int err = 0;
2577
2578 if (!filt->sym_from && !filt->sym_to)
2579 return 0;
2580
2581 if (!filt->filename)
2582 return addr_filter__resolve_kernel_syms(filt);
2583
2584 dso = load_dso(filt->filename);
2585 if (!dso) {
2586 pr_err("Failed to load symbols from: %s\n", filt->filename);
2587 return -EINVAL;
2588 }
2589
2590 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2591 err = addr_filter__entire_dso(filt, dso);
2592 goto put_dso;
2593 }
2594
2595 if (filt->sym_from) {
2596 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2597 filt->sym_from_idx);
2598 if (err)
2599 goto put_dso;
2600 filt->addr = start;
2601 if (filt->range && !filt->size && !filt->sym_to)
2602 filt->size = size;
2603 }
2604
2605 if (filt->sym_to) {
2606 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2607 filt->sym_to_idx);
2608 if (err)
2609 goto put_dso;
2610
2611 err = check_end_after_start(filt, start, size);
2612 if (err)
2613 return err;
2614
2615 filt->size = start + size - filt->addr;
2616 }
2617
2618 put_dso:
2619 dso__put(dso);
2620
2621 return err;
2622 }
2623
addr_filter__to_str(struct addr_filter * filt)2624 static char *addr_filter__to_str(struct addr_filter *filt)
2625 {
2626 char filename_buf[PATH_MAX];
2627 const char *at = "";
2628 const char *fn = "";
2629 char *filter;
2630 int err;
2631
2632 if (filt->filename) {
2633 at = "@";
2634 fn = realpath(filt->filename, filename_buf);
2635 if (!fn)
2636 return NULL;
2637 }
2638
2639 if (filt->range) {
2640 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2641 filt->action, filt->addr, filt->size, at, fn);
2642 } else {
2643 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2644 filt->action, filt->addr, at, fn);
2645 }
2646
2647 return err < 0 ? NULL : filter;
2648 }
2649
parse_addr_filter(struct evsel * evsel,const char * filter,int max_nr)2650 static int parse_addr_filter(struct evsel *evsel, const char *filter,
2651 int max_nr)
2652 {
2653 struct addr_filters filts;
2654 struct addr_filter *filt;
2655 int err;
2656
2657 addr_filters__init(&filts);
2658
2659 err = addr_filters__parse_bare_filter(&filts, filter);
2660 if (err)
2661 goto out_exit;
2662
2663 if (filts.cnt > max_nr) {
2664 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2665 filts.cnt, max_nr);
2666 err = -EINVAL;
2667 goto out_exit;
2668 }
2669
2670 list_for_each_entry(filt, &filts.head, list) {
2671 char *new_filter;
2672
2673 err = addr_filter__resolve_syms(filt);
2674 if (err)
2675 goto out_exit;
2676
2677 new_filter = addr_filter__to_str(filt);
2678 if (!new_filter) {
2679 err = -ENOMEM;
2680 goto out_exit;
2681 }
2682
2683 if (evsel__append_addr_filter(evsel, new_filter)) {
2684 err = -ENOMEM;
2685 goto out_exit;
2686 }
2687 }
2688
2689 out_exit:
2690 addr_filters__exit(&filts);
2691
2692 if (err) {
2693 pr_err("Failed to parse address filter: '%s'\n", filter);
2694 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2695 pr_err("Where multiple filters are separated by space or comma.\n");
2696 }
2697
2698 return err;
2699 }
2700
evsel__nr_addr_filter(struct evsel * evsel)2701 static int evsel__nr_addr_filter(struct evsel *evsel)
2702 {
2703 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2704 int nr_addr_filters = 0;
2705
2706 if (!pmu)
2707 return 0;
2708
2709 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2710
2711 return nr_addr_filters;
2712 }
2713
auxtrace_parse_filters(struct evlist * evlist)2714 int auxtrace_parse_filters(struct evlist *evlist)
2715 {
2716 struct evsel *evsel;
2717 char *filter;
2718 int err, max_nr;
2719
2720 evlist__for_each_entry(evlist, evsel) {
2721 filter = evsel->filter;
2722 max_nr = evsel__nr_addr_filter(evsel);
2723 if (!filter || !max_nr)
2724 continue;
2725 evsel->filter = NULL;
2726 err = parse_addr_filter(evsel, filter, max_nr);
2727 free(filter);
2728 if (err)
2729 return err;
2730 pr_debug("Address filter: %s\n", evsel->filter);
2731 }
2732
2733 return 0;
2734 }
2735
auxtrace__process_event(struct perf_session * session,union perf_event * event,struct perf_sample * sample,struct perf_tool * tool)2736 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2737 struct perf_sample *sample, struct perf_tool *tool)
2738 {
2739 if (!session->auxtrace)
2740 return 0;
2741
2742 return session->auxtrace->process_event(session, event, sample, tool);
2743 }
2744
auxtrace__dump_auxtrace_sample(struct perf_session * session,struct perf_sample * sample)2745 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2746 struct perf_sample *sample)
2747 {
2748 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2749 auxtrace__dont_decode(session))
2750 return;
2751
2752 session->auxtrace->dump_auxtrace_sample(session, sample);
2753 }
2754
auxtrace__flush_events(struct perf_session * session,struct perf_tool * tool)2755 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2756 {
2757 if (!session->auxtrace)
2758 return 0;
2759
2760 return session->auxtrace->flush_events(session, tool);
2761 }
2762
auxtrace__free_events(struct perf_session * session)2763 void auxtrace__free_events(struct perf_session *session)
2764 {
2765 if (!session->auxtrace)
2766 return;
2767
2768 return session->auxtrace->free_events(session);
2769 }
2770
auxtrace__free(struct perf_session * session)2771 void auxtrace__free(struct perf_session *session)
2772 {
2773 if (!session->auxtrace)
2774 return;
2775
2776 return session->auxtrace->free(session);
2777 }
2778
auxtrace__evsel_is_auxtrace(struct perf_session * session,struct evsel * evsel)2779 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2780 struct evsel *evsel)
2781 {
2782 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2783 return false;
2784
2785 return session->auxtrace->evsel_is_auxtrace(session, evsel);
2786 }
2787