1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uprobes-based tracing events
4 *
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
9
10 #include <linux/security.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/uprobes.h>
15 #include <linux/namei.h>
16 #include <linux/string.h>
17 #include <linux/rculist.h>
18
19 #include "trace_dynevent.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
22
23 #define UPROBE_EVENT_SYSTEM "uprobes"
24
25 struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
28 };
29
30 #define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
33
34 #define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36
37 static int trace_uprobe_create(int argc, const char **argv);
38 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39 static int trace_uprobe_release(struct dyn_event *ev);
40 static bool trace_uprobe_is_busy(struct dyn_event *ev);
41 static bool trace_uprobe_match(const char *system, const char *event,
42 int argc, const char **argv, struct dyn_event *ev);
43
44 static struct dyn_event_operations trace_uprobe_ops = {
45 .create = trace_uprobe_create,
46 .show = trace_uprobe_show,
47 .is_busy = trace_uprobe_is_busy,
48 .free = trace_uprobe_release,
49 .match = trace_uprobe_match,
50 };
51
52 /*
53 * uprobe event core functions
54 */
55 struct trace_uprobe {
56 struct dyn_event devent;
57 struct uprobe_consumer consumer;
58 struct path path;
59 struct inode *inode;
60 char *filename;
61 unsigned long offset;
62 unsigned long ref_ctr_offset;
63 unsigned long nhit;
64 struct trace_probe tp;
65 };
66
is_trace_uprobe(struct dyn_event * ev)67 static bool is_trace_uprobe(struct dyn_event *ev)
68 {
69 return ev->ops == &trace_uprobe_ops;
70 }
71
to_trace_uprobe(struct dyn_event * ev)72 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
73 {
74 return container_of(ev, struct trace_uprobe, devent);
75 }
76
77 /**
78 * for_each_trace_uprobe - iterate over the trace_uprobe list
79 * @pos: the struct trace_uprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
81 */
82 #define for_each_trace_uprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
85
86 #define SIZEOF_TRACE_UPROBE(n) \
87 (offsetof(struct trace_uprobe, tp.args) + \
88 (sizeof(struct probe_arg) * (n)))
89
90 static int register_uprobe_event(struct trace_uprobe *tu);
91 static int unregister_uprobe_event(struct trace_uprobe *tu);
92
93 struct uprobe_dispatch_data {
94 struct trace_uprobe *tu;
95 unsigned long bp_addr;
96 };
97
98 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
99 static int uretprobe_dispatcher(struct uprobe_consumer *con,
100 unsigned long func, struct pt_regs *regs);
101
102 #ifdef CONFIG_STACK_GROWSUP
adjust_stack_addr(unsigned long addr,unsigned int n)103 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
104 {
105 return addr - (n * sizeof(long));
106 }
107 #else
adjust_stack_addr(unsigned long addr,unsigned int n)108 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
109 {
110 return addr + (n * sizeof(long));
111 }
112 #endif
113
get_user_stack_nth(struct pt_regs * regs,unsigned int n)114 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
115 {
116 unsigned long ret;
117 unsigned long addr = user_stack_pointer(regs);
118
119 addr = adjust_stack_addr(addr, n);
120
121 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
122 return 0;
123
124 return ret;
125 }
126
127 /*
128 * Uprobes-specific fetch functions
129 */
130 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)131 probe_mem_read(void *dest, void *src, size_t size)
132 {
133 void __user *vaddr = (void __force __user *)src;
134
135 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
136 }
137
138 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)139 probe_mem_read_user(void *dest, void *src, size_t size)
140 {
141 return probe_mem_read(dest, src, size);
142 }
143
144 /*
145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
146 * length and relative data location.
147 */
148 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)149 fetch_store_string(unsigned long addr, void *dest, void *base)
150 {
151 long ret;
152 u32 loc = *(u32 *)dest;
153 int maxlen = get_loc_len(loc);
154 u8 *dst = get_loc_data(dest, base);
155 void __user *src = (void __force __user *) addr;
156
157 if (unlikely(!maxlen))
158 return -ENOMEM;
159
160 if (addr == FETCH_TOKEN_COMM)
161 ret = strlcpy(dst, current->comm, maxlen);
162 else
163 ret = strncpy_from_user(dst, src, maxlen);
164 if (ret >= 0) {
165 if (ret == maxlen)
166 dst[ret - 1] = '\0';
167 else
168 /*
169 * Include the terminating null byte. In this case it
170 * was copied by strncpy_from_user but not accounted
171 * for in ret.
172 */
173 ret++;
174 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
175 }
176
177 return ret;
178 }
179
180 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)181 fetch_store_string_user(unsigned long addr, void *dest, void *base)
182 {
183 return fetch_store_string(addr, dest, base);
184 }
185
186 /* Return the length of string -- including null terminal byte */
187 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)188 fetch_store_strlen(unsigned long addr)
189 {
190 int len;
191 void __user *vaddr = (void __force __user *) addr;
192
193 if (addr == FETCH_TOKEN_COMM)
194 len = strlen(current->comm) + 1;
195 else
196 len = strnlen_user(vaddr, MAX_STRING_SIZE);
197
198 return (len > MAX_STRING_SIZE) ? 0 : len;
199 }
200
201 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)202 fetch_store_strlen_user(unsigned long addr)
203 {
204 return fetch_store_strlen(addr);
205 }
206
translate_user_vaddr(unsigned long file_offset)207 static unsigned long translate_user_vaddr(unsigned long file_offset)
208 {
209 unsigned long base_addr;
210 struct uprobe_dispatch_data *udd;
211
212 udd = (void *) current->utask->vaddr;
213
214 base_addr = udd->bp_addr - udd->tu->offset;
215 return base_addr + file_offset;
216 }
217
218 /* Note that we don't verify it, since the code does not come from user space */
219 static int
process_fetch_insn(struct fetch_insn * code,struct pt_regs * regs,void * dest,void * base)220 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
221 void *base)
222 {
223 unsigned long val;
224
225 /* 1st stage: get value from context */
226 switch (code->op) {
227 case FETCH_OP_REG:
228 val = regs_get_register(regs, code->param);
229 break;
230 case FETCH_OP_STACK:
231 val = get_user_stack_nth(regs, code->param);
232 break;
233 case FETCH_OP_STACKP:
234 val = user_stack_pointer(regs);
235 break;
236 case FETCH_OP_RETVAL:
237 val = regs_return_value(regs);
238 break;
239 case FETCH_OP_IMM:
240 val = code->immediate;
241 break;
242 case FETCH_OP_COMM:
243 val = FETCH_TOKEN_COMM;
244 break;
245 case FETCH_OP_DATA:
246 val = (unsigned long)code->data;
247 break;
248 case FETCH_OP_FOFFS:
249 val = translate_user_vaddr(code->immediate);
250 break;
251 default:
252 return -EILSEQ;
253 }
254 code++;
255
256 return process_fetch_insn_bottom(code, val, dest, base);
257 }
NOKPROBE_SYMBOL(process_fetch_insn)258 NOKPROBE_SYMBOL(process_fetch_insn)
259
260 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
261 {
262 rwlock_init(&filter->rwlock);
263 filter->nr_systemwide = 0;
264 INIT_LIST_HEAD(&filter->perf_events);
265 }
266
uprobe_filter_is_empty(struct trace_uprobe_filter * filter)267 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
268 {
269 return !filter->nr_systemwide && list_empty(&filter->perf_events);
270 }
271
is_ret_probe(struct trace_uprobe * tu)272 static inline bool is_ret_probe(struct trace_uprobe *tu)
273 {
274 return tu->consumer.ret_handler != NULL;
275 }
276
trace_uprobe_is_busy(struct dyn_event * ev)277 static bool trace_uprobe_is_busy(struct dyn_event *ev)
278 {
279 struct trace_uprobe *tu = to_trace_uprobe(ev);
280
281 return trace_probe_is_enabled(&tu->tp);
282 }
283
trace_uprobe_match_command_head(struct trace_uprobe * tu,int argc,const char ** argv)284 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
285 int argc, const char **argv)
286 {
287 char buf[MAX_ARGSTR_LEN + 1];
288 int len;
289
290 if (!argc)
291 return true;
292
293 len = strlen(tu->filename);
294 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
295 return false;
296
297 if (tu->ref_ctr_offset == 0)
298 snprintf(buf, sizeof(buf), "0x%0*lx",
299 (int)(sizeof(void *) * 2), tu->offset);
300 else
301 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
302 (int)(sizeof(void *) * 2), tu->offset,
303 tu->ref_ctr_offset);
304 if (strcmp(buf, &argv[0][len + 1]))
305 return false;
306
307 argc--; argv++;
308
309 return trace_probe_match_command_args(&tu->tp, argc, argv);
310 }
311
trace_uprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)312 static bool trace_uprobe_match(const char *system, const char *event,
313 int argc, const char **argv, struct dyn_event *ev)
314 {
315 struct trace_uprobe *tu = to_trace_uprobe(ev);
316
317 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
318 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
319 trace_uprobe_match_command_head(tu, argc, argv);
320 }
321
322 static nokprobe_inline struct trace_uprobe *
trace_uprobe_primary_from_call(struct trace_event_call * call)323 trace_uprobe_primary_from_call(struct trace_event_call *call)
324 {
325 struct trace_probe *tp;
326
327 tp = trace_probe_primary_from_call(call);
328 if (WARN_ON_ONCE(!tp))
329 return NULL;
330
331 return container_of(tp, struct trace_uprobe, tp);
332 }
333
334 /*
335 * Allocate new trace_uprobe and initialize it (including uprobes).
336 */
337 static struct trace_uprobe *
alloc_trace_uprobe(const char * group,const char * event,int nargs,bool is_ret)338 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
339 {
340 struct trace_uprobe *tu;
341 int ret;
342
343 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
344 if (!tu)
345 return ERR_PTR(-ENOMEM);
346
347 ret = trace_probe_init(&tu->tp, event, group, true);
348 if (ret < 0)
349 goto error;
350
351 dyn_event_init(&tu->devent, &trace_uprobe_ops);
352 tu->consumer.handler = uprobe_dispatcher;
353 if (is_ret)
354 tu->consumer.ret_handler = uretprobe_dispatcher;
355 init_trace_uprobe_filter(tu->tp.event->filter);
356 return tu;
357
358 error:
359 kfree(tu);
360
361 return ERR_PTR(ret);
362 }
363
free_trace_uprobe(struct trace_uprobe * tu)364 static void free_trace_uprobe(struct trace_uprobe *tu)
365 {
366 if (!tu)
367 return;
368
369 path_put(&tu->path);
370 trace_probe_cleanup(&tu->tp);
371 kfree(tu->filename);
372 kfree(tu);
373 }
374
find_probe_event(const char * event,const char * group)375 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
376 {
377 struct dyn_event *pos;
378 struct trace_uprobe *tu;
379
380 for_each_trace_uprobe(tu, pos)
381 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
382 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
383 return tu;
384
385 return NULL;
386 }
387
388 /* Unregister a trace_uprobe and probe_event */
unregister_trace_uprobe(struct trace_uprobe * tu)389 static int unregister_trace_uprobe(struct trace_uprobe *tu)
390 {
391 int ret;
392
393 if (trace_probe_has_sibling(&tu->tp))
394 goto unreg;
395
396 ret = unregister_uprobe_event(tu);
397 if (ret)
398 return ret;
399
400 unreg:
401 dyn_event_remove(&tu->devent);
402 trace_probe_unlink(&tu->tp);
403 free_trace_uprobe(tu);
404 return 0;
405 }
406
trace_uprobe_has_same_uprobe(struct trace_uprobe * orig,struct trace_uprobe * comp)407 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
408 struct trace_uprobe *comp)
409 {
410 struct trace_probe_event *tpe = orig->tp.event;
411 struct trace_probe *pos;
412 struct inode *comp_inode = d_real_inode(comp->path.dentry);
413 int i;
414
415 list_for_each_entry(pos, &tpe->probes, list) {
416 orig = container_of(pos, struct trace_uprobe, tp);
417 if (comp_inode != d_real_inode(orig->path.dentry) ||
418 comp->offset != orig->offset)
419 continue;
420
421 /*
422 * trace_probe_compare_arg_type() ensured that nr_args and
423 * each argument name and type are same. Let's compare comm.
424 */
425 for (i = 0; i < orig->tp.nr_args; i++) {
426 if (strcmp(orig->tp.args[i].comm,
427 comp->tp.args[i].comm))
428 break;
429 }
430
431 if (i == orig->tp.nr_args)
432 return true;
433 }
434
435 return false;
436 }
437
append_trace_uprobe(struct trace_uprobe * tu,struct trace_uprobe * to)438 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
439 {
440 int ret;
441
442 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
443 if (ret) {
444 /* Note that argument starts index = 2 */
445 trace_probe_log_set_index(ret + 1);
446 trace_probe_log_err(0, DIFF_ARG_TYPE);
447 return -EEXIST;
448 }
449 if (trace_uprobe_has_same_uprobe(to, tu)) {
450 trace_probe_log_set_index(0);
451 trace_probe_log_err(0, SAME_PROBE);
452 return -EEXIST;
453 }
454
455 /* Append to existing event */
456 ret = trace_probe_append(&tu->tp, &to->tp);
457 if (!ret)
458 dyn_event_add(&tu->devent);
459
460 return ret;
461 }
462
463 /*
464 * Uprobe with multiple reference counter is not allowed. i.e.
465 * If inode and offset matches, reference counter offset *must*
466 * match as well. Though, there is one exception: If user is
467 * replacing old trace_uprobe with new one(same group/event),
468 * then we allow same uprobe with new reference counter as far
469 * as the new one does not conflict with any other existing
470 * ones.
471 */
validate_ref_ctr_offset(struct trace_uprobe * new)472 static int validate_ref_ctr_offset(struct trace_uprobe *new)
473 {
474 struct dyn_event *pos;
475 struct trace_uprobe *tmp;
476 struct inode *new_inode = d_real_inode(new->path.dentry);
477
478 for_each_trace_uprobe(tmp, pos) {
479 if (new_inode == d_real_inode(tmp->path.dentry) &&
480 new->offset == tmp->offset &&
481 new->ref_ctr_offset != tmp->ref_ctr_offset) {
482 pr_warn("Reference counter offset mismatch.");
483 return -EINVAL;
484 }
485 }
486 return 0;
487 }
488
489 /* Register a trace_uprobe and probe_event */
register_trace_uprobe(struct trace_uprobe * tu)490 static int register_trace_uprobe(struct trace_uprobe *tu)
491 {
492 struct trace_uprobe *old_tu;
493 int ret;
494
495 mutex_lock(&event_mutex);
496
497 ret = validate_ref_ctr_offset(tu);
498 if (ret)
499 goto end;
500
501 /* register as an event */
502 old_tu = find_probe_event(trace_probe_name(&tu->tp),
503 trace_probe_group_name(&tu->tp));
504 if (old_tu) {
505 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
506 trace_probe_log_set_index(0);
507 trace_probe_log_err(0, DIFF_PROBE_TYPE);
508 ret = -EEXIST;
509 } else {
510 ret = append_trace_uprobe(tu, old_tu);
511 }
512 goto end;
513 }
514
515 ret = register_uprobe_event(tu);
516 if (ret) {
517 pr_warn("Failed to register probe event(%d)\n", ret);
518 goto end;
519 }
520
521 dyn_event_add(&tu->devent);
522
523 end:
524 mutex_unlock(&event_mutex);
525
526 return ret;
527 }
528
529 /*
530 * Argument syntax:
531 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
532 */
trace_uprobe_create(int argc,const char ** argv)533 static int trace_uprobe_create(int argc, const char **argv)
534 {
535 struct trace_uprobe *tu;
536 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
537 char *arg, *filename, *rctr, *rctr_end, *tmp;
538 char buf[MAX_EVENT_NAME_LEN];
539 struct path path;
540 unsigned long offset, ref_ctr_offset;
541 bool is_return = false;
542 int i, ret;
543
544 ret = 0;
545 ref_ctr_offset = 0;
546
547 switch (argv[0][0]) {
548 case 'r':
549 is_return = true;
550 break;
551 case 'p':
552 break;
553 default:
554 return -ECANCELED;
555 }
556
557 if (argc < 2)
558 return -ECANCELED;
559
560 if (argv[0][1] == ':')
561 event = &argv[0][2];
562
563 if (!strchr(argv[1], '/'))
564 return -ECANCELED;
565
566 filename = kstrdup(argv[1], GFP_KERNEL);
567 if (!filename)
568 return -ENOMEM;
569
570 /* Find the last occurrence, in case the path contains ':' too. */
571 arg = strrchr(filename, ':');
572 if (!arg || !isdigit(arg[1])) {
573 kfree(filename);
574 return -ECANCELED;
575 }
576
577 trace_probe_log_init("trace_uprobe", argc, argv);
578 trace_probe_log_set_index(1); /* filename is the 2nd argument */
579
580 *arg++ = '\0';
581 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
582 if (ret) {
583 trace_probe_log_err(0, FILE_NOT_FOUND);
584 kfree(filename);
585 trace_probe_log_clear();
586 return ret;
587 }
588 if (!d_is_reg(path.dentry)) {
589 trace_probe_log_err(0, NO_REGULAR_FILE);
590 ret = -EINVAL;
591 goto fail_address_parse;
592 }
593
594 /* Parse reference counter offset if specified. */
595 rctr = strchr(arg, '(');
596 if (rctr) {
597 rctr_end = strchr(rctr, ')');
598 if (!rctr_end) {
599 ret = -EINVAL;
600 rctr_end = rctr + strlen(rctr);
601 trace_probe_log_err(rctr_end - filename,
602 REFCNT_OPEN_BRACE);
603 goto fail_address_parse;
604 } else if (rctr_end[1] != '\0') {
605 ret = -EINVAL;
606 trace_probe_log_err(rctr_end + 1 - filename,
607 BAD_REFCNT_SUFFIX);
608 goto fail_address_parse;
609 }
610
611 *rctr++ = '\0';
612 *rctr_end = '\0';
613 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
614 if (ret) {
615 trace_probe_log_err(rctr - filename, BAD_REFCNT);
616 goto fail_address_parse;
617 }
618 }
619
620 /* Check if there is %return suffix */
621 tmp = strchr(arg, '%');
622 if (tmp) {
623 if (!strcmp(tmp, "%return")) {
624 *tmp = '\0';
625 is_return = true;
626 } else {
627 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
628 ret = -EINVAL;
629 goto fail_address_parse;
630 }
631 }
632
633 /* Parse uprobe offset. */
634 ret = kstrtoul(arg, 0, &offset);
635 if (ret) {
636 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
637 goto fail_address_parse;
638 }
639
640 /* setup a probe */
641 trace_probe_log_set_index(0);
642 if (event) {
643 ret = traceprobe_parse_event_name(&event, &group, buf,
644 event - argv[0]);
645 if (ret)
646 goto fail_address_parse;
647 } else {
648 char *tail;
649 char *ptr;
650
651 tail = kstrdup(kbasename(filename), GFP_KERNEL);
652 if (!tail) {
653 ret = -ENOMEM;
654 goto fail_address_parse;
655 }
656
657 ptr = strpbrk(tail, ".-_");
658 if (ptr)
659 *ptr = '\0';
660
661 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
662 event = buf;
663 kfree(tail);
664 }
665
666 argc -= 2;
667 argv += 2;
668
669 tu = alloc_trace_uprobe(group, event, argc, is_return);
670 if (IS_ERR(tu)) {
671 ret = PTR_ERR(tu);
672 /* This must return -ENOMEM otherwise there is a bug */
673 WARN_ON_ONCE(ret != -ENOMEM);
674 goto fail_address_parse;
675 }
676 tu->offset = offset;
677 tu->ref_ctr_offset = ref_ctr_offset;
678 tu->path = path;
679 tu->filename = filename;
680
681 /* parse arguments */
682 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
683 tmp = kstrdup(argv[i], GFP_KERNEL);
684 if (!tmp) {
685 ret = -ENOMEM;
686 goto error;
687 }
688
689 trace_probe_log_set_index(i + 2);
690 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
691 is_return ? TPARG_FL_RETURN : 0);
692 kfree(tmp);
693 if (ret)
694 goto error;
695 }
696
697 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
698 if (ret < 0)
699 goto error;
700
701 ret = register_trace_uprobe(tu);
702 if (!ret)
703 goto out;
704
705 error:
706 free_trace_uprobe(tu);
707 out:
708 trace_probe_log_clear();
709 return ret;
710
711 fail_address_parse:
712 trace_probe_log_clear();
713 path_put(&path);
714 kfree(filename);
715
716 return ret;
717 }
718
create_or_delete_trace_uprobe(int argc,char ** argv)719 static int create_or_delete_trace_uprobe(int argc, char **argv)
720 {
721 int ret;
722
723 if (argv[0][0] == '-')
724 return dyn_event_release(argc, argv, &trace_uprobe_ops);
725
726 ret = trace_uprobe_create(argc, (const char **)argv);
727 return ret == -ECANCELED ? -EINVAL : ret;
728 }
729
trace_uprobe_release(struct dyn_event * ev)730 static int trace_uprobe_release(struct dyn_event *ev)
731 {
732 struct trace_uprobe *tu = to_trace_uprobe(ev);
733
734 return unregister_trace_uprobe(tu);
735 }
736
737 /* Probes listing interfaces */
trace_uprobe_show(struct seq_file * m,struct dyn_event * ev)738 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
739 {
740 struct trace_uprobe *tu = to_trace_uprobe(ev);
741 char c = is_ret_probe(tu) ? 'r' : 'p';
742 int i;
743
744 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
745 trace_probe_name(&tu->tp), tu->filename,
746 (int)(sizeof(void *) * 2), tu->offset);
747
748 if (tu->ref_ctr_offset)
749 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
750
751 for (i = 0; i < tu->tp.nr_args; i++)
752 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
753
754 seq_putc(m, '\n');
755 return 0;
756 }
757
probes_seq_show(struct seq_file * m,void * v)758 static int probes_seq_show(struct seq_file *m, void *v)
759 {
760 struct dyn_event *ev = v;
761
762 if (!is_trace_uprobe(ev))
763 return 0;
764
765 return trace_uprobe_show(m, ev);
766 }
767
768 static const struct seq_operations probes_seq_op = {
769 .start = dyn_event_seq_start,
770 .next = dyn_event_seq_next,
771 .stop = dyn_event_seq_stop,
772 .show = probes_seq_show
773 };
774
probes_open(struct inode * inode,struct file * file)775 static int probes_open(struct inode *inode, struct file *file)
776 {
777 int ret;
778
779 ret = security_locked_down(LOCKDOWN_TRACEFS);
780 if (ret)
781 return ret;
782
783 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
784 ret = dyn_events_release_all(&trace_uprobe_ops);
785 if (ret)
786 return ret;
787 }
788
789 return seq_open(file, &probes_seq_op);
790 }
791
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)792 static ssize_t probes_write(struct file *file, const char __user *buffer,
793 size_t count, loff_t *ppos)
794 {
795 return trace_parse_run_command(file, buffer, count, ppos,
796 create_or_delete_trace_uprobe);
797 }
798
799 static const struct file_operations uprobe_events_ops = {
800 .owner = THIS_MODULE,
801 .open = probes_open,
802 .read = seq_read,
803 .llseek = seq_lseek,
804 .release = seq_release,
805 .write = probes_write,
806 };
807
808 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)809 static int probes_profile_seq_show(struct seq_file *m, void *v)
810 {
811 struct dyn_event *ev = v;
812 struct trace_uprobe *tu;
813
814 if (!is_trace_uprobe(ev))
815 return 0;
816
817 tu = to_trace_uprobe(ev);
818 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
819 trace_probe_name(&tu->tp), tu->nhit);
820 return 0;
821 }
822
823 static const struct seq_operations profile_seq_op = {
824 .start = dyn_event_seq_start,
825 .next = dyn_event_seq_next,
826 .stop = dyn_event_seq_stop,
827 .show = probes_profile_seq_show
828 };
829
profile_open(struct inode * inode,struct file * file)830 static int profile_open(struct inode *inode, struct file *file)
831 {
832 int ret;
833
834 ret = security_locked_down(LOCKDOWN_TRACEFS);
835 if (ret)
836 return ret;
837
838 return seq_open(file, &profile_seq_op);
839 }
840
841 static const struct file_operations uprobe_profile_ops = {
842 .owner = THIS_MODULE,
843 .open = profile_open,
844 .read = seq_read,
845 .llseek = seq_lseek,
846 .release = seq_release,
847 };
848
849 struct uprobe_cpu_buffer {
850 struct mutex mutex;
851 void *buf;
852 };
853 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
854 static int uprobe_buffer_refcnt;
855
uprobe_buffer_init(void)856 static int uprobe_buffer_init(void)
857 {
858 int cpu, err_cpu;
859
860 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
861 if (uprobe_cpu_buffer == NULL)
862 return -ENOMEM;
863
864 for_each_possible_cpu(cpu) {
865 struct page *p = alloc_pages_node(cpu_to_node(cpu),
866 GFP_KERNEL, 0);
867 if (p == NULL) {
868 err_cpu = cpu;
869 goto err;
870 }
871 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
872 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
873 }
874
875 return 0;
876
877 err:
878 for_each_possible_cpu(cpu) {
879 if (cpu == err_cpu)
880 break;
881 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
882 }
883
884 free_percpu(uprobe_cpu_buffer);
885 return -ENOMEM;
886 }
887
uprobe_buffer_enable(void)888 static int uprobe_buffer_enable(void)
889 {
890 int ret = 0;
891
892 BUG_ON(!mutex_is_locked(&event_mutex));
893
894 if (uprobe_buffer_refcnt++ == 0) {
895 ret = uprobe_buffer_init();
896 if (ret < 0)
897 uprobe_buffer_refcnt--;
898 }
899
900 return ret;
901 }
902
uprobe_buffer_disable(void)903 static void uprobe_buffer_disable(void)
904 {
905 int cpu;
906
907 BUG_ON(!mutex_is_locked(&event_mutex));
908
909 if (--uprobe_buffer_refcnt == 0) {
910 for_each_possible_cpu(cpu)
911 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
912 cpu)->buf);
913
914 free_percpu(uprobe_cpu_buffer);
915 uprobe_cpu_buffer = NULL;
916 }
917 }
918
uprobe_buffer_get(void)919 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
920 {
921 struct uprobe_cpu_buffer *ucb;
922 int cpu;
923
924 cpu = raw_smp_processor_id();
925 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
926
927 /*
928 * Use per-cpu buffers for fastest access, but we might migrate
929 * so the mutex makes sure we have sole access to it.
930 */
931 mutex_lock(&ucb->mutex);
932
933 return ucb;
934 }
935
uprobe_buffer_put(struct uprobe_cpu_buffer * ucb)936 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
937 {
938 mutex_unlock(&ucb->mutex);
939 }
940
__uprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize,struct trace_event_file * trace_file)941 static void __uprobe_trace_func(struct trace_uprobe *tu,
942 unsigned long func, struct pt_regs *regs,
943 struct uprobe_cpu_buffer *ucb, int dsize,
944 struct trace_event_file *trace_file)
945 {
946 struct uprobe_trace_entry_head *entry;
947 struct trace_buffer *buffer;
948 struct ring_buffer_event *event;
949 void *data;
950 int size, esize;
951 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
952
953 WARN_ON(call != trace_file->event_call);
954
955 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
956 return;
957
958 if (trace_trigger_soft_disabled(trace_file))
959 return;
960
961 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
962 size = esize + tu->tp.size + dsize;
963 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
964 call->event.type, size, 0, 0);
965 if (!event)
966 return;
967
968 entry = ring_buffer_event_data(event);
969 if (is_ret_probe(tu)) {
970 entry->vaddr[0] = func;
971 entry->vaddr[1] = instruction_pointer(regs);
972 data = DATAOF_TRACE_ENTRY(entry, true);
973 } else {
974 entry->vaddr[0] = instruction_pointer(regs);
975 data = DATAOF_TRACE_ENTRY(entry, false);
976 }
977
978 memcpy(data, ucb->buf, tu->tp.size + dsize);
979
980 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
981 }
982
983 /* uprobe handler */
uprobe_trace_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)984 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
985 struct uprobe_cpu_buffer *ucb, int dsize)
986 {
987 struct event_file_link *link;
988
989 if (is_ret_probe(tu))
990 return 0;
991
992 rcu_read_lock();
993 trace_probe_for_each_link_rcu(link, &tu->tp)
994 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
995 rcu_read_unlock();
996
997 return 0;
998 }
999
uretprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1000 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1001 struct pt_regs *regs,
1002 struct uprobe_cpu_buffer *ucb, int dsize)
1003 {
1004 struct event_file_link *link;
1005
1006 rcu_read_lock();
1007 trace_probe_for_each_link_rcu(link, &tu->tp)
1008 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1009 rcu_read_unlock();
1010 }
1011
1012 /* Event entry printers */
1013 static enum print_line_t
print_uprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1014 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1015 {
1016 struct uprobe_trace_entry_head *entry;
1017 struct trace_seq *s = &iter->seq;
1018 struct trace_uprobe *tu;
1019 u8 *data;
1020
1021 entry = (struct uprobe_trace_entry_head *)iter->ent;
1022 tu = trace_uprobe_primary_from_call(
1023 container_of(event, struct trace_event_call, event));
1024 if (unlikely(!tu))
1025 goto out;
1026
1027 if (is_ret_probe(tu)) {
1028 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1029 trace_probe_name(&tu->tp),
1030 entry->vaddr[1], entry->vaddr[0]);
1031 data = DATAOF_TRACE_ENTRY(entry, true);
1032 } else {
1033 trace_seq_printf(s, "%s: (0x%lx)",
1034 trace_probe_name(&tu->tp),
1035 entry->vaddr[0]);
1036 data = DATAOF_TRACE_ENTRY(entry, false);
1037 }
1038
1039 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1040 goto out;
1041
1042 trace_seq_putc(s, '\n');
1043
1044 out:
1045 return trace_handle_return(s);
1046 }
1047
1048 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1049 enum uprobe_filter_ctx ctx,
1050 struct mm_struct *mm);
1051
trace_uprobe_enable(struct trace_uprobe * tu,filter_func_t filter)1052 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1053 {
1054 int ret;
1055
1056 tu->consumer.filter = filter;
1057 tu->inode = d_real_inode(tu->path.dentry);
1058
1059 if (tu->ref_ctr_offset)
1060 ret = uprobe_register_refctr(tu->inode, tu->offset,
1061 tu->ref_ctr_offset, &tu->consumer);
1062 else
1063 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1064
1065 if (ret)
1066 tu->inode = NULL;
1067
1068 return ret;
1069 }
1070
__probe_event_disable(struct trace_probe * tp)1071 static void __probe_event_disable(struct trace_probe *tp)
1072 {
1073 struct trace_probe *pos;
1074 struct trace_uprobe *tu;
1075
1076 tu = container_of(tp, struct trace_uprobe, tp);
1077 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1078
1079 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1080 tu = container_of(pos, struct trace_uprobe, tp);
1081 if (!tu->inode)
1082 continue;
1083
1084 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1085 tu->inode = NULL;
1086 }
1087 }
1088
probe_event_enable(struct trace_event_call * call,struct trace_event_file * file,filter_func_t filter)1089 static int probe_event_enable(struct trace_event_call *call,
1090 struct trace_event_file *file, filter_func_t filter)
1091 {
1092 struct trace_probe *pos, *tp;
1093 struct trace_uprobe *tu;
1094 bool enabled;
1095 int ret;
1096
1097 tp = trace_probe_primary_from_call(call);
1098 if (WARN_ON_ONCE(!tp))
1099 return -ENODEV;
1100 enabled = trace_probe_is_enabled(tp);
1101
1102 /* This may also change "enabled" state */
1103 if (file) {
1104 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1105 return -EINTR;
1106
1107 ret = trace_probe_add_file(tp, file);
1108 if (ret < 0)
1109 return ret;
1110 } else {
1111 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1112 return -EINTR;
1113
1114 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1115 }
1116
1117 tu = container_of(tp, struct trace_uprobe, tp);
1118 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1119
1120 if (enabled)
1121 return 0;
1122
1123 ret = uprobe_buffer_enable();
1124 if (ret)
1125 goto err_flags;
1126
1127 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1128 tu = container_of(pos, struct trace_uprobe, tp);
1129 ret = trace_uprobe_enable(tu, filter);
1130 if (ret) {
1131 __probe_event_disable(tp);
1132 goto err_buffer;
1133 }
1134 }
1135
1136 return 0;
1137
1138 err_buffer:
1139 uprobe_buffer_disable();
1140
1141 err_flags:
1142 if (file)
1143 trace_probe_remove_file(tp, file);
1144 else
1145 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1146
1147 return ret;
1148 }
1149
probe_event_disable(struct trace_event_call * call,struct trace_event_file * file)1150 static void probe_event_disable(struct trace_event_call *call,
1151 struct trace_event_file *file)
1152 {
1153 struct trace_probe *tp;
1154
1155 tp = trace_probe_primary_from_call(call);
1156 if (WARN_ON_ONCE(!tp))
1157 return;
1158
1159 if (!trace_probe_is_enabled(tp))
1160 return;
1161
1162 if (file) {
1163 if (trace_probe_remove_file(tp, file) < 0)
1164 return;
1165
1166 if (trace_probe_is_enabled(tp))
1167 return;
1168 } else
1169 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1170
1171 __probe_event_disable(tp);
1172 uprobe_buffer_disable();
1173 }
1174
uprobe_event_define_fields(struct trace_event_call * event_call)1175 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1176 {
1177 int ret, size;
1178 struct uprobe_trace_entry_head field;
1179 struct trace_uprobe *tu;
1180
1181 tu = trace_uprobe_primary_from_call(event_call);
1182 if (unlikely(!tu))
1183 return -ENODEV;
1184
1185 if (is_ret_probe(tu)) {
1186 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1187 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1188 size = SIZEOF_TRACE_ENTRY(true);
1189 } else {
1190 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1191 size = SIZEOF_TRACE_ENTRY(false);
1192 }
1193
1194 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1195 }
1196
1197 #ifdef CONFIG_PERF_EVENTS
1198 static bool
__uprobe_perf_filter(struct trace_uprobe_filter * filter,struct mm_struct * mm)1199 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1200 {
1201 struct perf_event *event;
1202
1203 if (filter->nr_systemwide)
1204 return true;
1205
1206 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1207 if (event->hw.target->mm == mm)
1208 return true;
1209 }
1210
1211 return false;
1212 }
1213
1214 static inline bool
trace_uprobe_filter_event(struct trace_uprobe_filter * filter,struct perf_event * event)1215 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1216 struct perf_event *event)
1217 {
1218 return __uprobe_perf_filter(filter, event->hw.target->mm);
1219 }
1220
trace_uprobe_filter_remove(struct trace_uprobe_filter * filter,struct perf_event * event)1221 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1222 struct perf_event *event)
1223 {
1224 bool done;
1225
1226 write_lock(&filter->rwlock);
1227 if (event->hw.target) {
1228 list_del(&event->hw.tp_list);
1229 done = filter->nr_systemwide ||
1230 (event->hw.target->flags & PF_EXITING) ||
1231 trace_uprobe_filter_event(filter, event);
1232 } else {
1233 filter->nr_systemwide--;
1234 done = filter->nr_systemwide;
1235 }
1236 write_unlock(&filter->rwlock);
1237
1238 return done;
1239 }
1240
1241 /* This returns true if the filter always covers target mm */
trace_uprobe_filter_add(struct trace_uprobe_filter * filter,struct perf_event * event)1242 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1243 struct perf_event *event)
1244 {
1245 bool done;
1246
1247 write_lock(&filter->rwlock);
1248 if (event->hw.target) {
1249 /*
1250 * event->parent != NULL means copy_process(), we can avoid
1251 * uprobe_apply(). current->mm must be probed and we can rely
1252 * on dup_mmap() which preserves the already installed bp's.
1253 *
1254 * attr.enable_on_exec means that exec/mmap will install the
1255 * breakpoints we need.
1256 */
1257 done = filter->nr_systemwide ||
1258 event->parent || event->attr.enable_on_exec ||
1259 trace_uprobe_filter_event(filter, event);
1260 list_add(&event->hw.tp_list, &filter->perf_events);
1261 } else {
1262 done = filter->nr_systemwide;
1263 filter->nr_systemwide++;
1264 }
1265 write_unlock(&filter->rwlock);
1266
1267 return done;
1268 }
1269
uprobe_perf_close(struct trace_event_call * call,struct perf_event * event)1270 static int uprobe_perf_close(struct trace_event_call *call,
1271 struct perf_event *event)
1272 {
1273 struct trace_probe *pos, *tp;
1274 struct trace_uprobe *tu;
1275 int ret = 0;
1276
1277 tp = trace_probe_primary_from_call(call);
1278 if (WARN_ON_ONCE(!tp))
1279 return -ENODEV;
1280
1281 tu = container_of(tp, struct trace_uprobe, tp);
1282 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1283 return 0;
1284
1285 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1286 tu = container_of(pos, struct trace_uprobe, tp);
1287 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1288 if (ret)
1289 break;
1290 }
1291
1292 return ret;
1293 }
1294
uprobe_perf_open(struct trace_event_call * call,struct perf_event * event)1295 static int uprobe_perf_open(struct trace_event_call *call,
1296 struct perf_event *event)
1297 {
1298 struct trace_probe *pos, *tp;
1299 struct trace_uprobe *tu;
1300 int err = 0;
1301
1302 tp = trace_probe_primary_from_call(call);
1303 if (WARN_ON_ONCE(!tp))
1304 return -ENODEV;
1305
1306 tu = container_of(tp, struct trace_uprobe, tp);
1307 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1308 return 0;
1309
1310 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1311 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1312 if (err) {
1313 uprobe_perf_close(call, event);
1314 break;
1315 }
1316 }
1317
1318 return err;
1319 }
1320
uprobe_perf_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)1321 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1322 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1323 {
1324 struct trace_uprobe_filter *filter;
1325 struct trace_uprobe *tu;
1326 int ret;
1327
1328 tu = container_of(uc, struct trace_uprobe, consumer);
1329 filter = tu->tp.event->filter;
1330
1331 read_lock(&filter->rwlock);
1332 ret = __uprobe_perf_filter(filter, mm);
1333 read_unlock(&filter->rwlock);
1334
1335 return ret;
1336 }
1337
__uprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1338 static void __uprobe_perf_func(struct trace_uprobe *tu,
1339 unsigned long func, struct pt_regs *regs,
1340 struct uprobe_cpu_buffer *ucb, int dsize)
1341 {
1342 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1343 struct uprobe_trace_entry_head *entry;
1344 struct hlist_head *head;
1345 void *data;
1346 int size, esize;
1347 int rctx;
1348
1349 if (bpf_prog_array_valid(call)) {
1350 u32 ret;
1351
1352 preempt_disable();
1353 ret = trace_call_bpf(call, regs);
1354 preempt_enable();
1355 if (!ret)
1356 return;
1357 }
1358
1359 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1360
1361 size = esize + tu->tp.size + dsize;
1362 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1363 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1364 return;
1365
1366 preempt_disable();
1367 head = this_cpu_ptr(call->perf_events);
1368 if (hlist_empty(head))
1369 goto out;
1370
1371 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1372 if (!entry)
1373 goto out;
1374
1375 if (is_ret_probe(tu)) {
1376 entry->vaddr[0] = func;
1377 entry->vaddr[1] = instruction_pointer(regs);
1378 data = DATAOF_TRACE_ENTRY(entry, true);
1379 } else {
1380 entry->vaddr[0] = instruction_pointer(regs);
1381 data = DATAOF_TRACE_ENTRY(entry, false);
1382 }
1383
1384 memcpy(data, ucb->buf, tu->tp.size + dsize);
1385
1386 if (size - esize > tu->tp.size + dsize) {
1387 int len = tu->tp.size + dsize;
1388
1389 memset(data + len, 0, size - esize - len);
1390 }
1391
1392 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1393 head, NULL);
1394 out:
1395 preempt_enable();
1396 }
1397
1398 /* uprobe profile handler */
uprobe_perf_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1399 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1400 struct uprobe_cpu_buffer *ucb, int dsize)
1401 {
1402 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1403 return UPROBE_HANDLER_REMOVE;
1404
1405 if (!is_ret_probe(tu))
1406 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1407 return 0;
1408 }
1409
uretprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1410 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1411 struct pt_regs *regs,
1412 struct uprobe_cpu_buffer *ucb, int dsize)
1413 {
1414 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1415 }
1416
bpf_get_uprobe_info(const struct perf_event * event,u32 * fd_type,const char ** filename,u64 * probe_offset,bool perf_type_tracepoint)1417 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1418 const char **filename, u64 *probe_offset,
1419 bool perf_type_tracepoint)
1420 {
1421 const char *pevent = trace_event_name(event->tp_event);
1422 const char *group = event->tp_event->class->system;
1423 struct trace_uprobe *tu;
1424
1425 if (perf_type_tracepoint)
1426 tu = find_probe_event(pevent, group);
1427 else
1428 tu = trace_uprobe_primary_from_call(event->tp_event);
1429 if (!tu)
1430 return -EINVAL;
1431
1432 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1433 : BPF_FD_TYPE_UPROBE;
1434 *filename = tu->filename;
1435 *probe_offset = tu->offset;
1436 return 0;
1437 }
1438 #endif /* CONFIG_PERF_EVENTS */
1439
1440 static int
trace_uprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1441 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1442 void *data)
1443 {
1444 struct trace_event_file *file = data;
1445
1446 switch (type) {
1447 case TRACE_REG_REGISTER:
1448 return probe_event_enable(event, file, NULL);
1449
1450 case TRACE_REG_UNREGISTER:
1451 probe_event_disable(event, file);
1452 return 0;
1453
1454 #ifdef CONFIG_PERF_EVENTS
1455 case TRACE_REG_PERF_REGISTER:
1456 return probe_event_enable(event, NULL, uprobe_perf_filter);
1457
1458 case TRACE_REG_PERF_UNREGISTER:
1459 probe_event_disable(event, NULL);
1460 return 0;
1461
1462 case TRACE_REG_PERF_OPEN:
1463 return uprobe_perf_open(event, data);
1464
1465 case TRACE_REG_PERF_CLOSE:
1466 return uprobe_perf_close(event, data);
1467
1468 #endif
1469 default:
1470 return 0;
1471 }
1472 }
1473
uprobe_dispatcher(struct uprobe_consumer * con,struct pt_regs * regs)1474 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1475 {
1476 struct trace_uprobe *tu;
1477 struct uprobe_dispatch_data udd;
1478 struct uprobe_cpu_buffer *ucb;
1479 int dsize, esize;
1480 int ret = 0;
1481
1482
1483 tu = container_of(con, struct trace_uprobe, consumer);
1484 tu->nhit++;
1485
1486 udd.tu = tu;
1487 udd.bp_addr = instruction_pointer(regs);
1488
1489 current->utask->vaddr = (unsigned long) &udd;
1490
1491 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1492 return 0;
1493
1494 dsize = __get_data_size(&tu->tp, regs);
1495 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1496
1497 ucb = uprobe_buffer_get();
1498 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1499
1500 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1501 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1502
1503 #ifdef CONFIG_PERF_EVENTS
1504 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1505 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1506 #endif
1507 uprobe_buffer_put(ucb);
1508 return ret;
1509 }
1510
uretprobe_dispatcher(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs)1511 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1512 unsigned long func, struct pt_regs *regs)
1513 {
1514 struct trace_uprobe *tu;
1515 struct uprobe_dispatch_data udd;
1516 struct uprobe_cpu_buffer *ucb;
1517 int dsize, esize;
1518
1519 tu = container_of(con, struct trace_uprobe, consumer);
1520
1521 udd.tu = tu;
1522 udd.bp_addr = func;
1523
1524 current->utask->vaddr = (unsigned long) &udd;
1525
1526 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1527 return 0;
1528
1529 dsize = __get_data_size(&tu->tp, regs);
1530 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1531
1532 ucb = uprobe_buffer_get();
1533 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1534
1535 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1536 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1537
1538 #ifdef CONFIG_PERF_EVENTS
1539 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1540 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1541 #endif
1542 uprobe_buffer_put(ucb);
1543 return 0;
1544 }
1545
1546 static struct trace_event_functions uprobe_funcs = {
1547 .trace = print_uprobe_event
1548 };
1549
1550 static struct trace_event_fields uprobe_fields_array[] = {
1551 { .type = TRACE_FUNCTION_TYPE,
1552 .define_fields = uprobe_event_define_fields },
1553 {}
1554 };
1555
init_trace_event_call(struct trace_uprobe * tu)1556 static inline void init_trace_event_call(struct trace_uprobe *tu)
1557 {
1558 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1559 call->event.funcs = &uprobe_funcs;
1560 call->class->fields_array = uprobe_fields_array;
1561
1562 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1563 call->class->reg = trace_uprobe_register;
1564 }
1565
register_uprobe_event(struct trace_uprobe * tu)1566 static int register_uprobe_event(struct trace_uprobe *tu)
1567 {
1568 init_trace_event_call(tu);
1569
1570 return trace_probe_register_event_call(&tu->tp);
1571 }
1572
unregister_uprobe_event(struct trace_uprobe * tu)1573 static int unregister_uprobe_event(struct trace_uprobe *tu)
1574 {
1575 return trace_probe_unregister_event_call(&tu->tp);
1576 }
1577
1578 #ifdef CONFIG_PERF_EVENTS
1579 struct trace_event_call *
create_local_trace_uprobe(char * name,unsigned long offs,unsigned long ref_ctr_offset,bool is_return)1580 create_local_trace_uprobe(char *name, unsigned long offs,
1581 unsigned long ref_ctr_offset, bool is_return)
1582 {
1583 struct trace_uprobe *tu;
1584 struct path path;
1585 int ret;
1586
1587 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1588 if (ret)
1589 return ERR_PTR(ret);
1590
1591 if (!d_is_reg(path.dentry)) {
1592 path_put(&path);
1593 return ERR_PTR(-EINVAL);
1594 }
1595
1596 /*
1597 * local trace_kprobes are not added to dyn_event, so they are never
1598 * searched in find_trace_kprobe(). Therefore, there is no concern of
1599 * duplicated name "DUMMY_EVENT" here.
1600 */
1601 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1602 is_return);
1603
1604 if (IS_ERR(tu)) {
1605 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1606 (int)PTR_ERR(tu));
1607 path_put(&path);
1608 return ERR_CAST(tu);
1609 }
1610
1611 tu->offset = offs;
1612 tu->path = path;
1613 tu->ref_ctr_offset = ref_ctr_offset;
1614 tu->filename = kstrdup(name, GFP_KERNEL);
1615 init_trace_event_call(tu);
1616
1617 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1618 ret = -ENOMEM;
1619 goto error;
1620 }
1621
1622 return trace_probe_event_call(&tu->tp);
1623 error:
1624 free_trace_uprobe(tu);
1625 return ERR_PTR(ret);
1626 }
1627
destroy_local_trace_uprobe(struct trace_event_call * event_call)1628 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1629 {
1630 struct trace_uprobe *tu;
1631
1632 tu = trace_uprobe_primary_from_call(event_call);
1633
1634 free_trace_uprobe(tu);
1635 }
1636 #endif /* CONFIG_PERF_EVENTS */
1637
1638 /* Make a trace interface for controling probe points */
init_uprobe_trace(void)1639 static __init int init_uprobe_trace(void)
1640 {
1641 int ret;
1642
1643 ret = dyn_event_register(&trace_uprobe_ops);
1644 if (ret)
1645 return ret;
1646
1647 ret = tracing_init_dentry();
1648 if (ret)
1649 return 0;
1650
1651 trace_create_file("uprobe_events", 0644, NULL,
1652 NULL, &uprobe_events_ops);
1653 /* Profile interface */
1654 trace_create_file("uprobe_profile", 0444, NULL,
1655 NULL, &uprobe_profile_ops);
1656 return 0;
1657 }
1658
1659 fs_initcall(init_uprobe_trace);
1660