1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * auxtrace.h: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7 #ifndef __PERF_AUXTRACE_H
8 #define __PERF_AUXTRACE_H
9
10 #include <sys/types.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <stdio.h> // FILE
15 #include <linux/list.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <asm/bitsperlong.h>
19 #include <asm/barrier.h>
20
21 union perf_event;
22 struct perf_session;
23 struct evlist;
24 struct evsel;
25 struct perf_tool;
26 struct mmap;
27 struct perf_sample;
28 struct option;
29 struct record_opts;
30 struct perf_record_auxtrace_error;
31 struct perf_record_auxtrace_info;
32 struct events_stats;
33 struct perf_pmu;
34
35 enum auxtrace_error_type {
36 PERF_AUXTRACE_ERROR_ITRACE = 1,
37 PERF_AUXTRACE_ERROR_MAX
38 };
39
40 /* Auxtrace records must have the same alignment as perf event records */
41 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
42
43 enum auxtrace_type {
44 PERF_AUXTRACE_UNKNOWN,
45 PERF_AUXTRACE_INTEL_PT,
46 PERF_AUXTRACE_INTEL_BTS,
47 PERF_AUXTRACE_CS_ETM,
48 PERF_AUXTRACE_ARM_SPE,
49 PERF_AUXTRACE_S390_CPUMSF,
50 };
51
52 enum itrace_period_type {
53 PERF_ITRACE_PERIOD_INSTRUCTIONS,
54 PERF_ITRACE_PERIOD_TICKS,
55 PERF_ITRACE_PERIOD_NANOSECS,
56 };
57
58 #define AUXTRACE_ERR_FLG_OVERFLOW (1 << ('o' - 'a'))
59 #define AUXTRACE_ERR_FLG_DATA_LOST (1 << ('l' - 'a'))
60
61 #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a'))
62
63 /**
64 * struct itrace_synth_opts - AUX area tracing synthesis options.
65 * @set: indicates whether or not options have been set
66 * @default_no_sample: Default to no sampling.
67 * @inject: indicates the event (not just the sample) must be fully synthesized
68 * because 'perf inject' will write it out
69 * @instructions: whether to synthesize 'instructions' events
70 * @branches: whether to synthesize 'branches' events
71 * (branch misses only for Arm SPE)
72 * @transactions: whether to synthesize events for transactions
73 * @ptwrites: whether to synthesize events for ptwrites
74 * @pwr_events: whether to synthesize power events
75 * @other_events: whether to synthesize other events recorded due to the use of
76 * aux_output
77 * @errors: whether to synthesize decoder error events
78 * @dont_decode: whether to skip decoding entirely
79 * @log: write a decoding log
80 * @calls: limit branch samples to calls (can be combined with @returns)
81 * @returns: limit branch samples to returns (can be combined with @calls)
82 * @callchain: add callchain to 'instructions' events
83 * @add_callchain: add callchain to existing event records
84 * @thread_stack: feed branches to the thread_stack
85 * @last_branch: add branch context to 'instruction' events
86 * @add_last_branch: add branch context to existing event records
87 * @flc: whether to synthesize first level cache events
88 * @llc: whether to synthesize last level cache events
89 * @tlb: whether to synthesize TLB events
90 * @remote_access: whether to synthesize remote access events
91 * @mem: whether to synthesize memory events
92 * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
93 * @vm_time_correlation: perform VM Time Correlation
94 * @vm_tm_corr_dry_run: VM Time Correlation dry-run
95 * @vm_tm_corr_args: VM Time Correlation implementation-specific arguments
96 * @callchain_sz: maximum callchain size
97 * @last_branch_sz: branch context size
98 * @period: 'instructions' events period
99 * @period_type: 'instructions' events period type
100 * @initial_skip: skip N events at the beginning.
101 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
102 * @ptime_range: time intervals to trace or NULL
103 * @range_num: number of time intervals to trace
104 * @error_plus_flags: flags to affect what errors are reported
105 * @error_minus_flags: flags to affect what errors are reported
106 * @log_plus_flags: flags to affect what is logged
107 * @log_minus_flags: flags to affect what is logged
108 * @quick: quicker (less detailed) decoding
109 */
110 struct itrace_synth_opts {
111 bool set;
112 bool default_no_sample;
113 bool inject;
114 bool instructions;
115 bool branches;
116 bool transactions;
117 bool ptwrites;
118 bool pwr_events;
119 bool other_events;
120 bool errors;
121 bool dont_decode;
122 bool log;
123 bool calls;
124 bool returns;
125 bool callchain;
126 bool add_callchain;
127 bool thread_stack;
128 bool last_branch;
129 bool add_last_branch;
130 bool flc;
131 bool llc;
132 bool tlb;
133 bool remote_access;
134 bool mem;
135 bool timeless_decoding;
136 bool vm_time_correlation;
137 bool vm_tm_corr_dry_run;
138 char *vm_tm_corr_args;
139 unsigned int callchain_sz;
140 unsigned int last_branch_sz;
141 unsigned long long period;
142 enum itrace_period_type period_type;
143 unsigned long initial_skip;
144 unsigned long *cpu_bitmap;
145 struct perf_time_interval *ptime_range;
146 int range_num;
147 unsigned int error_plus_flags;
148 unsigned int error_minus_flags;
149 unsigned int log_plus_flags;
150 unsigned int log_minus_flags;
151 unsigned int quick;
152 };
153
154 /**
155 * struct auxtrace_index_entry - indexes a AUX area tracing event within a
156 * perf.data file.
157 * @file_offset: offset within the perf.data file
158 * @sz: size of the event
159 */
160 struct auxtrace_index_entry {
161 u64 file_offset;
162 u64 sz;
163 };
164
165 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
166
167 /**
168 * struct auxtrace_index - index of AUX area tracing events within a perf.data
169 * file.
170 * @list: linking a number of arrays of entries
171 * @nr: number of entries
172 * @entries: array of entries
173 */
174 struct auxtrace_index {
175 struct list_head list;
176 size_t nr;
177 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
178 };
179
180 /**
181 * struct auxtrace - session callbacks to allow AUX area data decoding.
182 * @process_event: lets the decoder see all session events
183 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
184 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
185 * processing
186 * @dump_auxtrace_sample: dump AUX area sample data
187 * @flush_events: process any remaining data
188 * @free_events: free resources associated with event processing
189 * @free: free resources associated with the session
190 */
191 struct auxtrace {
192 int (*process_event)(struct perf_session *session,
193 union perf_event *event,
194 struct perf_sample *sample,
195 struct perf_tool *tool);
196 int (*process_auxtrace_event)(struct perf_session *session,
197 union perf_event *event,
198 struct perf_tool *tool);
199 int (*queue_data)(struct perf_session *session,
200 struct perf_sample *sample, union perf_event *event,
201 u64 data_offset);
202 void (*dump_auxtrace_sample)(struct perf_session *session,
203 struct perf_sample *sample);
204 int (*flush_events)(struct perf_session *session,
205 struct perf_tool *tool);
206 void (*free_events)(struct perf_session *session);
207 void (*free)(struct perf_session *session);
208 bool (*evsel_is_auxtrace)(struct perf_session *session,
209 struct evsel *evsel);
210 };
211
212 /**
213 * struct auxtrace_buffer - a buffer containing AUX area tracing data.
214 * @list: buffers are queued in a list held by struct auxtrace_queue
215 * @size: size of the buffer in bytes
216 * @pid: in per-thread mode, the pid this buffer is associated with
217 * @tid: in per-thread mode, the tid this buffer is associated with
218 * @cpu: in per-cpu mode, the cpu this buffer is associated with
219 * @data: actual buffer data (can be null if the data has not been loaded)
220 * @data_offset: file offset at which the buffer can be read
221 * @mmap_addr: mmap address at which the buffer can be read
222 * @mmap_size: size of the mmap at @mmap_addr
223 * @data_needs_freeing: @data was malloc'd so free it when it is no longer
224 * needed
225 * @consecutive: the original data was split up and this buffer is consecutive
226 * to the previous buffer
227 * @offset: offset as determined by aux_head / aux_tail members of struct
228 * perf_event_mmap_page
229 * @reference: an implementation-specific reference determined when the data is
230 * recorded
231 * @buffer_nr: used to number each buffer
232 * @use_size: implementation actually only uses this number of bytes
233 * @use_data: implementation actually only uses data starting at this address
234 */
235 struct auxtrace_buffer {
236 struct list_head list;
237 size_t size;
238 pid_t pid;
239 pid_t tid;
240 int cpu;
241 void *data;
242 off_t data_offset;
243 void *mmap_addr;
244 size_t mmap_size;
245 bool data_needs_freeing;
246 bool consecutive;
247 u64 offset;
248 u64 reference;
249 u64 buffer_nr;
250 size_t use_size;
251 void *use_data;
252 };
253
254 /**
255 * struct auxtrace_queue - a queue of AUX area tracing data buffers.
256 * @head: head of buffer list
257 * @tid: in per-thread mode, the tid this queue is associated with
258 * @cpu: in per-cpu mode, the cpu this queue is associated with
259 * @set: %true once this queue has been dedicated to a specific thread or cpu
260 * @priv: implementation-specific data
261 */
262 struct auxtrace_queue {
263 struct list_head head;
264 pid_t tid;
265 int cpu;
266 bool set;
267 void *priv;
268 };
269
270 /**
271 * struct auxtrace_queues - an array of AUX area tracing queues.
272 * @queue_array: array of queues
273 * @nr_queues: number of queues
274 * @new_data: set whenever new data is queued
275 * @populated: queues have been fully populated using the auxtrace_index
276 * @next_buffer_nr: used to number each buffer
277 */
278 struct auxtrace_queues {
279 struct auxtrace_queue *queue_array;
280 unsigned int nr_queues;
281 bool new_data;
282 bool populated;
283 u64 next_buffer_nr;
284 };
285
286 /**
287 * struct auxtrace_heap_item - element of struct auxtrace_heap.
288 * @queue_nr: queue number
289 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
290 * to be a timestamp
291 */
292 struct auxtrace_heap_item {
293 unsigned int queue_nr;
294 u64 ordinal;
295 };
296
297 /**
298 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
299 * @heap_array: the heap
300 * @heap_cnt: the number of elements in the heap
301 * @heap_sz: maximum number of elements (grows as needed)
302 */
303 struct auxtrace_heap {
304 struct auxtrace_heap_item *heap_array;
305 unsigned int heap_cnt;
306 unsigned int heap_sz;
307 };
308
309 /**
310 * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
311 * @base: address of mapped area
312 * @userpg: pointer to buffer's perf_event_mmap_page
313 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
314 * @len: size of mapped area
315 * @prev: previous aux_head
316 * @idx: index of this mmap
317 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
318 * mmap) otherwise %0
319 * @cpu: cpu number for a per-cpu mmap otherwise %-1
320 */
321 struct auxtrace_mmap {
322 void *base;
323 void *userpg;
324 size_t mask;
325 size_t len;
326 u64 prev;
327 int idx;
328 pid_t tid;
329 int cpu;
330 };
331
332 /**
333 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
334 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
335 * @offset: file offset of mapped area
336 * @len: size of mapped area
337 * @prot: mmap memory protection
338 * @idx: index of this mmap
339 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
340 * mmap) otherwise %0
341 * @cpu: cpu number for a per-cpu mmap otherwise %-1
342 */
343 struct auxtrace_mmap_params {
344 size_t mask;
345 off_t offset;
346 size_t len;
347 int prot;
348 int idx;
349 pid_t tid;
350 int cpu;
351 };
352
353 /**
354 * struct auxtrace_record - callbacks for recording AUX area data.
355 * @recording_options: validate and process recording options
356 * @info_priv_size: return the size of the private data in auxtrace_info_event
357 * @info_fill: fill-in the private data in auxtrace_info_event
358 * @free: free this auxtrace record structure
359 * @snapshot_start: starting a snapshot
360 * @snapshot_finish: finishing a snapshot
361 * @find_snapshot: find data to snapshot within auxtrace mmap
362 * @parse_snapshot_options: parse snapshot options
363 * @reference: provide a 64-bit reference number for auxtrace_event
364 * @read_finish: called after reading from an auxtrace mmap
365 * @alignment: alignment (if any) for AUX area data
366 * @default_aux_sample_size: default sample size for --aux sample option
367 * @pmu: associated pmu
368 * @evlist: selected events list
369 */
370 struct auxtrace_record {
371 int (*recording_options)(struct auxtrace_record *itr,
372 struct evlist *evlist,
373 struct record_opts *opts);
374 size_t (*info_priv_size)(struct auxtrace_record *itr,
375 struct evlist *evlist);
376 int (*info_fill)(struct auxtrace_record *itr,
377 struct perf_session *session,
378 struct perf_record_auxtrace_info *auxtrace_info,
379 size_t priv_size);
380 void (*free)(struct auxtrace_record *itr);
381 int (*snapshot_start)(struct auxtrace_record *itr);
382 int (*snapshot_finish)(struct auxtrace_record *itr);
383 int (*find_snapshot)(struct auxtrace_record *itr, int idx,
384 struct auxtrace_mmap *mm, unsigned char *data,
385 u64 *head, u64 *old);
386 int (*parse_snapshot_options)(struct auxtrace_record *itr,
387 struct record_opts *opts,
388 const char *str);
389 u64 (*reference)(struct auxtrace_record *itr);
390 int (*read_finish)(struct auxtrace_record *itr, int idx);
391 unsigned int alignment;
392 unsigned int default_aux_sample_size;
393 struct perf_pmu *pmu;
394 struct evlist *evlist;
395 };
396
397 /**
398 * struct addr_filter - address filter.
399 * @list: list node
400 * @range: true if it is a range filter
401 * @start: true if action is 'filter' or 'start'
402 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
403 * to 'stop')
404 * @sym_from: symbol name for the filter address
405 * @sym_to: symbol name that determines the filter size
406 * @sym_from_idx: selects n'th from symbols with the same name (0 means global
407 * and less than 0 means symbol must be unique)
408 * @sym_to_idx: same as @sym_from_idx but for @sym_to
409 * @addr: filter address
410 * @size: filter region size (for range filters)
411 * @filename: DSO file name or NULL for the kernel
412 * @str: allocated string that contains the other string members
413 */
414 struct addr_filter {
415 struct list_head list;
416 bool range;
417 bool start;
418 const char *action;
419 const char *sym_from;
420 const char *sym_to;
421 int sym_from_idx;
422 int sym_to_idx;
423 u64 addr;
424 u64 size;
425 const char *filename;
426 char *str;
427 };
428
429 /**
430 * struct addr_filters - list of address filters.
431 * @head: list of address filters
432 * @cnt: number of address filters
433 */
434 struct addr_filters {
435 struct list_head head;
436 int cnt;
437 };
438
439 struct auxtrace_cache;
440
441 #ifdef HAVE_AUXTRACE_SUPPORT
442
443 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm);
444 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail);
445
auxtrace_mmap__read_head(struct auxtrace_mmap * mm,int kernel_is_64_bit __maybe_unused)446 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm,
447 int kernel_is_64_bit __maybe_unused)
448 {
449 struct perf_event_mmap_page *pc = mm->userpg;
450 u64 head;
451
452 #if BITS_PER_LONG == 32
453 if (kernel_is_64_bit)
454 return compat_auxtrace_mmap__read_head(mm);
455 #endif
456 head = READ_ONCE(pc->aux_head);
457
458 /* Ensure all reads are done after we read the head */
459 smp_rmb();
460 return head;
461 }
462
auxtrace_mmap__write_tail(struct auxtrace_mmap * mm,u64 tail,int kernel_is_64_bit __maybe_unused)463 static inline int auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail,
464 int kernel_is_64_bit __maybe_unused)
465 {
466 struct perf_event_mmap_page *pc = mm->userpg;
467
468 #if BITS_PER_LONG == 32
469 if (kernel_is_64_bit)
470 return compat_auxtrace_mmap__write_tail(mm, tail);
471 #endif
472 /* Ensure all reads are done before we write the tail out */
473 smp_mb();
474 WRITE_ONCE(pc->aux_tail, tail);
475 return 0;
476 }
477
478 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
479 struct auxtrace_mmap_params *mp,
480 void *userpg, int fd);
481 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
482 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
483 off_t auxtrace_offset,
484 unsigned int auxtrace_pages,
485 bool auxtrace_overwrite);
486 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
487 struct evlist *evlist, int idx,
488 bool per_cpu);
489
490 typedef int (*process_auxtrace_t)(struct perf_tool *tool,
491 struct mmap *map,
492 union perf_event *event, void *data1,
493 size_t len1, void *data2, size_t len2);
494
495 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
496 struct perf_tool *tool, process_auxtrace_t fn);
497
498 int auxtrace_mmap__read_snapshot(struct mmap *map,
499 struct auxtrace_record *itr,
500 struct perf_tool *tool, process_auxtrace_t fn,
501 size_t snapshot_size);
502
503 int auxtrace_queues__init(struct auxtrace_queues *queues);
504 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
505 struct perf_session *session,
506 union perf_event *event, off_t data_offset,
507 struct auxtrace_buffer **buffer_ptr);
508 struct auxtrace_queue *
509 auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
510 struct perf_sample *sample,
511 struct perf_session *session);
512 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
513 struct perf_session *session,
514 struct perf_sample *sample, u64 data_offset,
515 u64 reference);
516 void auxtrace_queues__free(struct auxtrace_queues *queues);
517 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
518 struct perf_session *session);
519 int auxtrace_queue_data(struct perf_session *session, bool samples,
520 bool events);
521 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
522 struct auxtrace_buffer *buffer);
523 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw);
auxtrace_buffer__get_data(struct auxtrace_buffer * buffer,int fd)524 static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
525 {
526 return auxtrace_buffer__get_data_rw(buffer, fd, false);
527 }
528 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
529 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
530 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
531
532 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
533 u64 ordinal);
534 void auxtrace_heap__pop(struct auxtrace_heap *heap);
535 void auxtrace_heap__free(struct auxtrace_heap *heap);
536
537 struct auxtrace_cache_entry {
538 struct hlist_node hash;
539 u32 key;
540 };
541
542 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
543 unsigned int limit_percent);
544 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
545 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
546 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
547 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
548 struct auxtrace_cache_entry *entry);
549 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
550 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
551
552 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
553 int *err);
554
555 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
556 struct record_opts *opts,
557 const char *str);
558 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
559 struct evlist *evlist,
560 struct record_opts *opts, const char *str);
561 void auxtrace_regroup_aux_output(struct evlist *evlist);
562 int auxtrace_record__options(struct auxtrace_record *itr,
563 struct evlist *evlist,
564 struct record_opts *opts);
565 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
566 struct evlist *evlist);
567 int auxtrace_record__info_fill(struct auxtrace_record *itr,
568 struct perf_session *session,
569 struct perf_record_auxtrace_info *auxtrace_info,
570 size_t priv_size);
571 void auxtrace_record__free(struct auxtrace_record *itr);
572 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
573 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
574 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
575 struct auxtrace_mmap *mm,
576 unsigned char *data, u64 *head, u64 *old);
577 u64 auxtrace_record__reference(struct auxtrace_record *itr);
578 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
579
580 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
581 off_t file_offset);
582 int auxtrace_index__write(int fd, struct list_head *head);
583 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
584 bool needs_swap);
585 void auxtrace_index__free(struct list_head *head);
586
587 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
588 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
589 const char *msg, u64 timestamp);
590
591 int perf_event__process_auxtrace_info(struct perf_session *session,
592 union perf_event *event);
593 s64 perf_event__process_auxtrace(struct perf_session *session,
594 union perf_event *event);
595 int perf_event__process_auxtrace_error(struct perf_session *session,
596 union perf_event *event);
597 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
598 const char *str, int unset);
599 int itrace_parse_synth_opts(const struct option *opt, const char *str,
600 int unset);
601 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
602 bool no_sample);
603
604 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
605 void perf_session__auxtrace_error_inc(struct perf_session *session,
606 union perf_event *event);
607 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
608
609 void addr_filters__init(struct addr_filters *filts);
610 void addr_filters__exit(struct addr_filters *filts);
611 int addr_filters__parse_bare_filter(struct addr_filters *filts,
612 const char *filter);
613 int auxtrace_parse_filters(struct evlist *evlist);
614
615 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
616 struct perf_sample *sample, struct perf_tool *tool);
617 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
618 struct perf_sample *sample);
619 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
620 void auxtrace__free_events(struct perf_session *session);
621 void auxtrace__free(struct perf_session *session);
622 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
623 struct evsel *evsel);
624
625 #define ITRACE_HELP \
626 " i[period]: synthesize instructions events\n" \
627 " b: synthesize branches events (branch misses for Arm SPE)\n" \
628 " c: synthesize branches events (calls only)\n" \
629 " r: synthesize branches events (returns only)\n" \
630 " x: synthesize transactions events\n" \
631 " w: synthesize ptwrite events\n" \
632 " p: synthesize power events\n" \
633 " o: synthesize other events recorded due to the use\n" \
634 " of aux-output (refer to perf record)\n" \
635 " e[flags]: synthesize error events\n" \
636 " each flag must be preceded by + or -\n" \
637 " error flags are: o (overflow)\n" \
638 " l (data lost)\n" \
639 " d[flags]: create a debug log\n" \
640 " each flag must be preceded by + or -\n" \
641 " log flags are: a (all perf events)\n" \
642 " f: synthesize first level cache events\n" \
643 " m: synthesize last level cache events\n" \
644 " t: synthesize TLB events\n" \
645 " a: synthesize remote access events\n" \
646 " g[len]: synthesize a call chain (use with i or x)\n" \
647 " G[len]: synthesize a call chain on existing event records\n" \
648 " l[len]: synthesize last branch entries (use with i or x)\n" \
649 " L[len]: synthesize last branch entries on existing event records\n" \
650 " sNUMBER: skip initial number of events\n" \
651 " q: quicker (less detailed) decoding\n" \
652 " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \
653 " concatenate multiple options. Default is ibxwpe or cewp\n"
654
655 static inline
itrace_synth_opts__set_time_range(struct itrace_synth_opts * opts,struct perf_time_interval * ptime_range,int range_num)656 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
657 struct perf_time_interval *ptime_range,
658 int range_num)
659 {
660 opts->ptime_range = ptime_range;
661 opts->range_num = range_num;
662 }
663
664 static inline
itrace_synth_opts__clear_time_range(struct itrace_synth_opts * opts)665 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
666 {
667 opts->ptime_range = NULL;
668 opts->range_num = 0;
669 }
670
671 #else
672 #include "debug.h"
673
674 static inline struct auxtrace_record *
auxtrace_record__init(struct evlist * evlist __maybe_unused,int * err)675 auxtrace_record__init(struct evlist *evlist __maybe_unused,
676 int *err)
677 {
678 *err = 0;
679 return NULL;
680 }
681
682 static inline
auxtrace_record__free(struct auxtrace_record * itr __maybe_unused)683 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
684 {
685 }
686
687 static inline
auxtrace_record__options(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused,struct record_opts * opts __maybe_unused)688 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
689 struct evlist *evlist __maybe_unused,
690 struct record_opts *opts __maybe_unused)
691 {
692 return 0;
693 }
694
695 static inline
perf_event__process_auxtrace_info(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)696 int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
697 union perf_event *event __maybe_unused)
698 {
699 return 0;
700 }
701
702 static inline
perf_event__process_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)703 s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
704 union perf_event *event __maybe_unused)
705 {
706 return 0;
707 }
708
709 static inline
perf_event__process_auxtrace_error(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)710 int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
711 union perf_event *event __maybe_unused)
712 {
713 return 0;
714 }
715
716 static inline
perf_session__auxtrace_error_inc(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)717 void perf_session__auxtrace_error_inc(struct perf_session *session
718 __maybe_unused,
719 union perf_event *event
720 __maybe_unused)
721 {
722 }
723
724 static inline
events_stats__auxtrace_error_warn(const struct events_stats * stats __maybe_unused)725 void events_stats__auxtrace_error_warn(const struct events_stats *stats
726 __maybe_unused)
727 {
728 }
729
730 static inline
itrace_do_parse_synth_opts(struct itrace_synth_opts * synth_opts __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)731 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
732 const char *str __maybe_unused, int unset __maybe_unused)
733 {
734 pr_err("AUX area tracing not supported\n");
735 return -EINVAL;
736 }
737
738 static inline
itrace_parse_synth_opts(const struct option * opt __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)739 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
740 const char *str __maybe_unused,
741 int unset __maybe_unused)
742 {
743 pr_err("AUX area tracing not supported\n");
744 return -EINVAL;
745 }
746
747 static inline
auxtrace_parse_snapshot_options(struct auxtrace_record * itr __maybe_unused,struct record_opts * opts __maybe_unused,const char * str)748 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
749 struct record_opts *opts __maybe_unused,
750 const char *str)
751 {
752 if (!str)
753 return 0;
754 pr_err("AUX area tracing not supported\n");
755 return -EINVAL;
756 }
757
758 static inline
auxtrace_parse_sample_options(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused,struct record_opts * opts __maybe_unused,const char * str)759 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
760 struct evlist *evlist __maybe_unused,
761 struct record_opts *opts __maybe_unused,
762 const char *str)
763 {
764 if (!str)
765 return 0;
766 pr_err("AUX area tracing not supported\n");
767 return -EINVAL;
768 }
769
770 static inline
auxtrace_regroup_aux_output(struct evlist * evlist __maybe_unused)771 void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused)
772 {
773 }
774
775 static inline
auxtrace__process_event(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct perf_tool * tool __maybe_unused)776 int auxtrace__process_event(struct perf_session *session __maybe_unused,
777 union perf_event *event __maybe_unused,
778 struct perf_sample *sample __maybe_unused,
779 struct perf_tool *tool __maybe_unused)
780 {
781 return 0;
782 }
783
784 static inline
auxtrace__dump_auxtrace_sample(struct perf_session * session __maybe_unused,struct perf_sample * sample __maybe_unused)785 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
786 struct perf_sample *sample __maybe_unused)
787 {
788 }
789
790 static inline
auxtrace__flush_events(struct perf_session * session __maybe_unused,struct perf_tool * tool __maybe_unused)791 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
792 struct perf_tool *tool __maybe_unused)
793 {
794 return 0;
795 }
796
797 static inline
auxtrace__free_events(struct perf_session * session __maybe_unused)798 void auxtrace__free_events(struct perf_session *session __maybe_unused)
799 {
800 }
801
802 static inline
auxtrace_cache__free(struct auxtrace_cache * auxtrace_cache __maybe_unused)803 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
804 {
805 }
806
807 static inline
auxtrace__free(struct perf_session * session __maybe_unused)808 void auxtrace__free(struct perf_session *session __maybe_unused)
809 {
810 }
811
812 static inline
auxtrace_index__write(int fd __maybe_unused,struct list_head * head __maybe_unused)813 int auxtrace_index__write(int fd __maybe_unused,
814 struct list_head *head __maybe_unused)
815 {
816 return -EINVAL;
817 }
818
819 static inline
auxtrace_index__process(int fd __maybe_unused,u64 size __maybe_unused,struct perf_session * session __maybe_unused,bool needs_swap __maybe_unused)820 int auxtrace_index__process(int fd __maybe_unused,
821 u64 size __maybe_unused,
822 struct perf_session *session __maybe_unused,
823 bool needs_swap __maybe_unused)
824 {
825 return -EINVAL;
826 }
827
828 static inline
auxtrace_index__free(struct list_head * head __maybe_unused)829 void auxtrace_index__free(struct list_head *head __maybe_unused)
830 {
831 }
832
833 static inline
auxtrace__evsel_is_auxtrace(struct perf_session * session __maybe_unused,struct evsel * evsel __maybe_unused)834 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
835 struct evsel *evsel __maybe_unused)
836 {
837 return false;
838 }
839
840 static inline
auxtrace_parse_filters(struct evlist * evlist __maybe_unused)841 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
842 {
843 return 0;
844 }
845
846 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
847 struct auxtrace_mmap_params *mp,
848 void *userpg, int fd);
849 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
850 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
851 off_t auxtrace_offset,
852 unsigned int auxtrace_pages,
853 bool auxtrace_overwrite);
854 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
855 struct evlist *evlist, int idx,
856 bool per_cpu);
857
858 #define ITRACE_HELP ""
859
860 static inline
itrace_synth_opts__set_time_range(struct itrace_synth_opts * opts __maybe_unused,struct perf_time_interval * ptime_range __maybe_unused,int range_num __maybe_unused)861 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
862 __maybe_unused,
863 struct perf_time_interval *ptime_range
864 __maybe_unused,
865 int range_num __maybe_unused)
866 {
867 }
868
869 static inline
itrace_synth_opts__clear_time_range(struct itrace_synth_opts * opts __maybe_unused)870 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
871 __maybe_unused)
872 {
873 }
874
875 #endif
876
877 #endif
878