1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
5
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
12
13 struct trace_array;
14 struct trace_buffer;
15 struct tracer;
16 struct dentry;
17 struct bpf_prog;
18
19 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
20 unsigned long flags,
21 const struct trace_print_flags *flag_array);
22
23 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
24 const struct trace_print_flags *symbol_array);
25
26 #if BITS_PER_LONG == 32
27 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
28 unsigned long long flags,
29 const struct trace_print_flags_u64 *flag_array);
30
31 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
32 unsigned long long val,
33 const struct trace_print_flags_u64
34 *symbol_array);
35 #endif
36
37 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
38 unsigned int bitmask_size);
39
40 const char *trace_print_hex_seq(struct trace_seq *p,
41 const unsigned char *buf, int len,
42 bool concatenate);
43
44 const char *trace_print_array_seq(struct trace_seq *p,
45 const void *buf, int count,
46 size_t el_size);
47
48 struct trace_iterator;
49 struct trace_event;
50
51 int trace_raw_output_prep(struct trace_iterator *iter,
52 struct trace_event *event);
53
54 /*
55 * The trace entry - the most basic unit of tracing. This is what
56 * is printed in the end as a single line in the trace output, such as:
57 *
58 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
59 */
60 struct trace_entry {
61 unsigned short type;
62 unsigned char flags;
63 unsigned char preempt_count;
64 int pid;
65 };
66
67 #define TRACE_EVENT_TYPE_MAX \
68 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
69
70 /*
71 * Trace iterator - used by printout routines who present trace
72 * results to users and which routines might sleep, etc:
73 */
74 struct trace_iterator {
75 struct trace_array *tr;
76 struct tracer *trace;
77 struct trace_buffer *trace_buffer;
78 void *private;
79 int cpu_file;
80 struct mutex mutex;
81 struct ring_buffer_iter **buffer_iter;
82 unsigned long iter_flags;
83
84 /* trace_seq for __print_flags() and __print_symbolic() etc. */
85 struct trace_seq tmp_seq;
86
87 cpumask_var_t started;
88
89 /* it's true when current open file is snapshot */
90 bool snapshot;
91
92 /* The below is zeroed out in pipe_read */
93 struct trace_seq seq;
94 struct trace_entry *ent;
95 unsigned long lost_events;
96 int leftover;
97 int ent_size;
98 int cpu;
99 u64 ts;
100
101 loff_t pos;
102 long idx;
103
104 /* All new field here will be zeroed out in pipe_read */
105 };
106
107 enum trace_iter_flags {
108 TRACE_FILE_LAT_FMT = 1,
109 TRACE_FILE_ANNOTATE = 2,
110 TRACE_FILE_TIME_IN_NS = 4,
111 };
112
113
114 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
115 int flags, struct trace_event *event);
116
117 struct trace_event_functions {
118 trace_print_func trace;
119 trace_print_func raw;
120 trace_print_func hex;
121 trace_print_func binary;
122 };
123
124 struct trace_event {
125 struct hlist_node node;
126 struct list_head list;
127 int type;
128 struct trace_event_functions *funcs;
129 };
130
131 extern int register_trace_event(struct trace_event *event);
132 extern int unregister_trace_event(struct trace_event *event);
133
134 /* Return values for print_line callback */
135 enum print_line_t {
136 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
137 TRACE_TYPE_HANDLED = 1,
138 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
139 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
140 };
141
142 enum print_line_t trace_handle_return(struct trace_seq *s);
143
144 void tracing_generic_entry_update(struct trace_entry *entry,
145 unsigned short type,
146 unsigned long flags,
147 int pc);
148 struct trace_event_file;
149
150 struct ring_buffer_event *
151 trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
152 struct trace_event_file *trace_file,
153 int type, unsigned long len,
154 unsigned long flags, int pc);
155
156 #define TRACE_RECORD_CMDLINE BIT(0)
157 #define TRACE_RECORD_TGID BIT(1)
158
159 void tracing_record_taskinfo(struct task_struct *task, int flags);
160 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
161 struct task_struct *next, int flags);
162
163 void tracing_record_cmdline(struct task_struct *task);
164 void tracing_record_tgid(struct task_struct *task);
165
166 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
167
168 struct event_filter;
169
170 enum trace_reg {
171 TRACE_REG_REGISTER,
172 TRACE_REG_UNREGISTER,
173 #ifdef CONFIG_PERF_EVENTS
174 TRACE_REG_PERF_REGISTER,
175 TRACE_REG_PERF_UNREGISTER,
176 TRACE_REG_PERF_OPEN,
177 TRACE_REG_PERF_CLOSE,
178 /*
179 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
180 * custom action was taken and the default action is not to be
181 * performed.
182 */
183 TRACE_REG_PERF_ADD,
184 TRACE_REG_PERF_DEL,
185 #endif
186 };
187
188 struct trace_event_call;
189
190 struct trace_event_class {
191 const char *system;
192 void *probe;
193 #ifdef CONFIG_PERF_EVENTS
194 void *perf_probe;
195 #endif
196 int (*reg)(struct trace_event_call *event,
197 enum trace_reg type, void *data);
198 int (*define_fields)(struct trace_event_call *);
199 struct list_head *(*get_fields)(struct trace_event_call *);
200 struct list_head fields;
201 int (*raw_init)(struct trace_event_call *);
202 };
203
204 extern int trace_event_reg(struct trace_event_call *event,
205 enum trace_reg type, void *data);
206
207 struct trace_event_buffer {
208 struct ring_buffer *buffer;
209 struct ring_buffer_event *event;
210 struct trace_event_file *trace_file;
211 void *entry;
212 unsigned long flags;
213 int pc;
214 };
215
216 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
217 struct trace_event_file *trace_file,
218 unsigned long len);
219
220 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
221
222 enum {
223 TRACE_EVENT_FL_FILTERED_BIT,
224 TRACE_EVENT_FL_CAP_ANY_BIT,
225 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
226 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
227 TRACE_EVENT_FL_TRACEPOINT_BIT,
228 TRACE_EVENT_FL_KPROBE_BIT,
229 TRACE_EVENT_FL_UPROBE_BIT,
230 };
231
232 /*
233 * Event flags:
234 * FILTERED - The event has a filter attached
235 * CAP_ANY - Any user can enable for perf
236 * NO_SET_FILTER - Set when filter has error and is to be ignored
237 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
238 * TRACEPOINT - Event is a tracepoint
239 * KPROBE - Event is a kprobe
240 * UPROBE - Event is a uprobe
241 */
242 enum {
243 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
244 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
245 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
246 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
247 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
248 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
249 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
250 };
251
252 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
253
254 struct trace_event_call {
255 struct list_head list;
256 struct trace_event_class *class;
257 union {
258 char *name;
259 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
260 struct tracepoint *tp;
261 };
262 struct trace_event event;
263 char *print_fmt;
264 struct event_filter *filter;
265 void *mod;
266 void *data;
267 /*
268 * bit 0: filter_active
269 * bit 1: allow trace by non root (cap any)
270 * bit 2: failed to apply filter
271 * bit 3: trace internal event (do not enable)
272 * bit 4: Event was enabled by module
273 * bit 5: use call filter rather than file filter
274 * bit 6: Event is a tracepoint
275 */
276 int flags; /* static flags of different events */
277
278 #ifdef CONFIG_PERF_EVENTS
279 int perf_refcount;
280 struct hlist_head __percpu *perf_events;
281 struct bpf_prog_array __rcu *prog_array;
282
283 int (*perf_perm)(struct trace_event_call *,
284 struct perf_event *);
285 #endif
286 };
287
288 #ifdef CONFIG_PERF_EVENTS
bpf_prog_array_valid(struct trace_event_call * call)289 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
290 {
291 /*
292 * This inline function checks whether call->prog_array
293 * is valid or not. The function is called in various places,
294 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
295 *
296 * If this function returns true, and later call->prog_array
297 * becomes false inside rcu_read_lock/unlock region,
298 * we bail out then. If this function return false,
299 * there is a risk that we might miss a few events if the checking
300 * were delayed until inside rcu_read_lock/unlock region and
301 * call->prog_array happened to become non-NULL then.
302 *
303 * Here, READ_ONCE() is used instead of rcu_access_pointer().
304 * rcu_access_pointer() requires the actual definition of
305 * "struct bpf_prog_array" while READ_ONCE() only needs
306 * a declaration of the same type.
307 */
308 return !!READ_ONCE(call->prog_array);
309 }
310 #endif
311
312 static inline const char *
trace_event_name(struct trace_event_call * call)313 trace_event_name(struct trace_event_call *call)
314 {
315 if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
316 return call->tp ? call->tp->name : NULL;
317 else
318 return call->name;
319 }
320
321 static inline struct list_head *
trace_get_fields(struct trace_event_call * event_call)322 trace_get_fields(struct trace_event_call *event_call)
323 {
324 if (!event_call->class->get_fields)
325 return &event_call->class->fields;
326 return event_call->class->get_fields(event_call);
327 }
328
329 struct trace_array;
330 struct trace_subsystem_dir;
331
332 enum {
333 EVENT_FILE_FL_ENABLED_BIT,
334 EVENT_FILE_FL_RECORDED_CMD_BIT,
335 EVENT_FILE_FL_RECORDED_TGID_BIT,
336 EVENT_FILE_FL_FILTERED_BIT,
337 EVENT_FILE_FL_NO_SET_FILTER_BIT,
338 EVENT_FILE_FL_SOFT_MODE_BIT,
339 EVENT_FILE_FL_SOFT_DISABLED_BIT,
340 EVENT_FILE_FL_TRIGGER_MODE_BIT,
341 EVENT_FILE_FL_TRIGGER_COND_BIT,
342 EVENT_FILE_FL_PID_FILTER_BIT,
343 EVENT_FILE_FL_WAS_ENABLED_BIT,
344 };
345
346 /*
347 * Event file flags:
348 * ENABLED - The event is enabled
349 * RECORDED_CMD - The comms should be recorded at sched_switch
350 * RECORDED_TGID - The tgids should be recorded at sched_switch
351 * FILTERED - The event has a filter attached
352 * NO_SET_FILTER - Set when filter has error and is to be ignored
353 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
354 * SOFT_DISABLED - When set, do not trace the event (even though its
355 * tracepoint may be enabled)
356 * TRIGGER_MODE - When set, invoke the triggers associated with the event
357 * TRIGGER_COND - When set, one or more triggers has an associated filter
358 * PID_FILTER - When set, the event is filtered based on pid
359 * WAS_ENABLED - Set when enabled to know to clear trace on module removal
360 */
361 enum {
362 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
363 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
364 EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
365 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
366 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
367 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
368 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
369 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
370 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
371 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
372 EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
373 };
374
375 struct trace_event_file {
376 struct list_head list;
377 struct trace_event_call *event_call;
378 struct event_filter __rcu *filter;
379 struct dentry *dir;
380 struct trace_array *tr;
381 struct trace_subsystem_dir *system;
382 struct list_head triggers;
383
384 /*
385 * 32 bit flags:
386 * bit 0: enabled
387 * bit 1: enabled cmd record
388 * bit 2: enable/disable with the soft disable bit
389 * bit 3: soft disabled
390 * bit 4: trigger enabled
391 *
392 * Note: The bits must be set atomically to prevent races
393 * from other writers. Reads of flags do not need to be in
394 * sync as they occur in critical sections. But the way flags
395 * is currently used, these changes do not affect the code
396 * except that when a change is made, it may have a slight
397 * delay in propagating the changes to other CPUs due to
398 * caching and such. Which is mostly OK ;-)
399 */
400 unsigned long flags;
401 atomic_t sm_ref; /* soft-mode reference counter */
402 atomic_t tm_ref; /* trigger-mode reference counter */
403 };
404
405 #define __TRACE_EVENT_FLAGS(name, value) \
406 static int __init trace_init_flags_##name(void) \
407 { \
408 event_##name.flags |= value; \
409 return 0; \
410 } \
411 early_initcall(trace_init_flags_##name);
412
413 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
414 static int perf_perm_##name(struct trace_event_call *tp_event, \
415 struct perf_event *p_event) \
416 { \
417 return ({ expr; }); \
418 } \
419 static int __init trace_init_perf_perm_##name(void) \
420 { \
421 event_##name.perf_perm = &perf_perm_##name; \
422 return 0; \
423 } \
424 early_initcall(trace_init_perf_perm_##name);
425
426 #define PERF_MAX_TRACE_SIZE 2048
427
428 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
429
430 enum event_trigger_type {
431 ETT_NONE = (0),
432 ETT_TRACE_ONOFF = (1 << 0),
433 ETT_SNAPSHOT = (1 << 1),
434 ETT_STACKTRACE = (1 << 2),
435 ETT_EVENT_ENABLE = (1 << 3),
436 ETT_EVENT_HIST = (1 << 4),
437 ETT_HIST_ENABLE = (1 << 5),
438 };
439
440 extern int filter_match_preds(struct event_filter *filter, void *rec);
441
442 extern enum event_trigger_type
443 event_triggers_call(struct trace_event_file *file, void *rec,
444 struct ring_buffer_event *event);
445 extern void
446 event_triggers_post_call(struct trace_event_file *file,
447 enum event_trigger_type tt);
448
449 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
450
451 /**
452 * trace_trigger_soft_disabled - do triggers and test if soft disabled
453 * @file: The file pointer of the event to test
454 *
455 * If any triggers without filters are attached to this event, they
456 * will be called here. If the event is soft disabled and has no
457 * triggers that require testing the fields, it will return true,
458 * otherwise false.
459 */
460 static inline bool
trace_trigger_soft_disabled(struct trace_event_file * file)461 trace_trigger_soft_disabled(struct trace_event_file *file)
462 {
463 unsigned long eflags = file->flags;
464
465 if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
466 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
467 event_triggers_call(file, NULL, NULL);
468 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
469 return true;
470 if (eflags & EVENT_FILE_FL_PID_FILTER)
471 return trace_event_ignore_this_pid(file);
472 }
473 return false;
474 }
475
476 #ifdef CONFIG_BPF_EVENTS
477 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
478 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
479 void perf_event_detach_bpf_prog(struct perf_event *event);
480 int perf_event_query_prog_array(struct perf_event *event, void __user *info);
481 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
482 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
483 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
484 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
485 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
486 u32 *fd_type, const char **buf,
487 u64 *probe_offset, u64 *probe_addr);
488 #else
trace_call_bpf(struct trace_event_call * call,void * ctx)489 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
490 {
491 return 1;
492 }
493
494 static inline int
perf_event_attach_bpf_prog(struct perf_event * event,struct bpf_prog * prog)495 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
496 {
497 return -EOPNOTSUPP;
498 }
499
perf_event_detach_bpf_prog(struct perf_event * event)500 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
501
502 static inline int
perf_event_query_prog_array(struct perf_event * event,void __user * info)503 perf_event_query_prog_array(struct perf_event *event, void __user *info)
504 {
505 return -EOPNOTSUPP;
506 }
bpf_probe_register(struct bpf_raw_event_map * btp,struct bpf_prog * p)507 static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
508 {
509 return -EOPNOTSUPP;
510 }
bpf_probe_unregister(struct bpf_raw_event_map * btp,struct bpf_prog * p)511 static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
512 {
513 return -EOPNOTSUPP;
514 }
bpf_get_raw_tracepoint(const char * name)515 static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
516 {
517 return NULL;
518 }
bpf_put_raw_tracepoint(struct bpf_raw_event_map * btp)519 static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
520 {
521 }
bpf_get_perf_event_info(const struct perf_event * event,u32 * prog_id,u32 * fd_type,const char ** buf,u64 * probe_offset,u64 * probe_addr)522 static inline int bpf_get_perf_event_info(const struct perf_event *event,
523 u32 *prog_id, u32 *fd_type,
524 const char **buf, u64 *probe_offset,
525 u64 *probe_addr)
526 {
527 return -EOPNOTSUPP;
528 }
529 #endif
530
531 enum {
532 FILTER_OTHER = 0,
533 FILTER_STATIC_STRING,
534 FILTER_DYN_STRING,
535 FILTER_PTR_STRING,
536 FILTER_TRACE_FN,
537 FILTER_COMM,
538 FILTER_CPU,
539 };
540
541 extern int trace_event_raw_init(struct trace_event_call *call);
542 extern int trace_define_field(struct trace_event_call *call, const char *type,
543 const char *name, int offset, int size,
544 int is_signed, int filter_type);
545 extern int trace_add_event_call(struct trace_event_call *call);
546 extern int trace_remove_event_call(struct trace_event_call *call);
547 extern int trace_event_get_offsets(struct trace_event_call *call);
548
549 #define is_signed_type(type) (((type)(-1)) < (type)1)
550
551 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
552 int trace_set_clr_event(const char *system, const char *event, int set);
553
554 /*
555 * The double __builtin_constant_p is because gcc will give us an error
556 * if we try to allocate the static variable to fmt if it is not a
557 * constant. Even with the outer if statement optimizing out.
558 */
559 #define event_trace_printk(ip, fmt, args...) \
560 do { \
561 __trace_printk_check_format(fmt, ##args); \
562 tracing_record_cmdline(current); \
563 if (__builtin_constant_p(fmt)) { \
564 static const char *trace_printk_fmt \
565 __attribute__((section("__trace_printk_fmt"))) = \
566 __builtin_constant_p(fmt) ? fmt : NULL; \
567 \
568 __trace_bprintk(ip, trace_printk_fmt, ##args); \
569 } else \
570 __trace_printk(ip, fmt, ##args); \
571 } while (0)
572
573 #ifdef CONFIG_PERF_EVENTS
574 struct perf_event;
575
576 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
577 DECLARE_PER_CPU(int, bpf_kprobe_override);
578
579 extern int perf_trace_init(struct perf_event *event);
580 extern void perf_trace_destroy(struct perf_event *event);
581 extern int perf_trace_add(struct perf_event *event, int flags);
582 extern void perf_trace_del(struct perf_event *event, int flags);
583 #ifdef CONFIG_KPROBE_EVENTS
584 extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
585 extern void perf_kprobe_destroy(struct perf_event *event);
586 extern int bpf_get_kprobe_info(const struct perf_event *event,
587 u32 *fd_type, const char **symbol,
588 u64 *probe_offset, u64 *probe_addr,
589 bool perf_type_tracepoint);
590 #endif
591 #ifdef CONFIG_UPROBE_EVENTS
592 extern int perf_uprobe_init(struct perf_event *event,
593 unsigned long ref_ctr_offset, bool is_retprobe);
594 extern void perf_uprobe_destroy(struct perf_event *event);
595 extern int bpf_get_uprobe_info(const struct perf_event *event,
596 u32 *fd_type, const char **filename,
597 u64 *probe_offset, bool perf_type_tracepoint);
598 #endif
599 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
600 char *filter_str);
601 extern void ftrace_profile_free_filter(struct perf_event *event);
602 void perf_trace_buf_update(void *record, u16 type);
603 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
604
605 void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
606 void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
607 void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
608 u64 arg3);
609 void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
610 u64 arg3, u64 arg4);
611 void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
612 u64 arg3, u64 arg4, u64 arg5);
613 void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
614 u64 arg3, u64 arg4, u64 arg5, u64 arg6);
615 void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
616 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
617 void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
618 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
619 u64 arg8);
620 void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
621 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
622 u64 arg8, u64 arg9);
623 void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
624 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
625 u64 arg8, u64 arg9, u64 arg10);
626 void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
627 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
628 u64 arg8, u64 arg9, u64 arg10, u64 arg11);
629 void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
630 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
631 u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
632 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
633 struct trace_event_call *call, u64 count,
634 struct pt_regs *regs, struct hlist_head *head,
635 struct task_struct *task);
636
637 static inline void
perf_trace_buf_submit(void * raw_data,int size,int rctx,u16 type,u64 count,struct pt_regs * regs,void * head,struct task_struct * task)638 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
639 u64 count, struct pt_regs *regs, void *head,
640 struct task_struct *task)
641 {
642 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
643 }
644
645 #endif
646
647 #endif /* _LINUX_TRACE_EVENT_H */
648