Lines Matching refs:uprobe
67 struct uprobe { struct
373 static struct uprobe *get_uprobe(struct uprobe *uprobe) in get_uprobe() argument
375 atomic_inc(&uprobe->ref); in get_uprobe()
376 return uprobe; in get_uprobe()
379 static void put_uprobe(struct uprobe *uprobe) in put_uprobe() argument
381 if (atomic_dec_and_test(&uprobe->ref)) in put_uprobe()
382 kfree(uprobe); in put_uprobe()
385 static int match_uprobe(struct uprobe *l, struct uprobe *r) in match_uprobe()
402 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) in __find_uprobe()
404 struct uprobe u = { .inode = inode, .offset = offset }; in __find_uprobe()
406 struct uprobe *uprobe; in __find_uprobe() local
410 uprobe = rb_entry(n, struct uprobe, rb_node); in __find_uprobe()
411 match = match_uprobe(&u, uprobe); in __find_uprobe()
413 return get_uprobe(uprobe); in __find_uprobe()
427 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) in find_uprobe()
429 struct uprobe *uprobe; in find_uprobe() local
432 uprobe = __find_uprobe(inode, offset); in find_uprobe()
435 return uprobe; in find_uprobe()
438 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) in __insert_uprobe() argument
442 struct uprobe *u; in __insert_uprobe()
447 u = rb_entry(parent, struct uprobe, rb_node); in __insert_uprobe()
448 match = match_uprobe(uprobe, u); in __insert_uprobe()
460 rb_link_node(&uprobe->rb_node, parent, p); in __insert_uprobe()
461 rb_insert_color(&uprobe->rb_node, &uprobes_tree); in __insert_uprobe()
463 atomic_set(&uprobe->ref, 2); in __insert_uprobe()
476 static struct uprobe *insert_uprobe(struct uprobe *uprobe) in insert_uprobe() argument
478 struct uprobe *u; in insert_uprobe()
481 u = __insert_uprobe(uprobe); in insert_uprobe()
487 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) in alloc_uprobe()
489 struct uprobe *uprobe, *cur_uprobe; in alloc_uprobe() local
491 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); in alloc_uprobe()
492 if (!uprobe) in alloc_uprobe()
495 uprobe->inode = inode; in alloc_uprobe()
496 uprobe->offset = offset; in alloc_uprobe()
497 init_rwsem(&uprobe->register_rwsem); in alloc_uprobe()
498 init_rwsem(&uprobe->consumer_rwsem); in alloc_uprobe()
501 cur_uprobe = insert_uprobe(uprobe); in alloc_uprobe()
504 kfree(uprobe); in alloc_uprobe()
505 uprobe = cur_uprobe; in alloc_uprobe()
508 return uprobe; in alloc_uprobe()
511 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) in consumer_add() argument
513 down_write(&uprobe->consumer_rwsem); in consumer_add()
514 uc->next = uprobe->consumers; in consumer_add()
515 uprobe->consumers = uc; in consumer_add()
516 up_write(&uprobe->consumer_rwsem); in consumer_add()
524 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) in consumer_del() argument
529 down_write(&uprobe->consumer_rwsem); in consumer_del()
530 for (con = &uprobe->consumers; *con; con = &(*con)->next) { in consumer_del()
537 up_write(&uprobe->consumer_rwsem); in consumer_del()
564 static int copy_insn(struct uprobe *uprobe, struct file *filp) in copy_insn() argument
566 struct address_space *mapping = uprobe->inode->i_mapping; in copy_insn()
567 loff_t offs = uprobe->offset; in copy_insn()
568 void *insn = &uprobe->arch.insn; in copy_insn()
569 int size = sizeof(uprobe->arch.insn); in copy_insn()
574 if (offs >= i_size_read(uprobe->inode)) in copy_insn()
590 static int prepare_uprobe(struct uprobe *uprobe, struct file *file, in prepare_uprobe() argument
595 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) in prepare_uprobe()
599 down_write(&uprobe->consumer_rwsem); in prepare_uprobe()
600 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) in prepare_uprobe()
603 ret = copy_insn(uprobe, file); in prepare_uprobe()
608 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) in prepare_uprobe()
611 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); in prepare_uprobe()
616 BUG_ON((uprobe->offset & ~PAGE_MASK) + in prepare_uprobe()
620 set_bit(UPROBE_COPY_INSN, &uprobe->flags); in prepare_uprobe()
623 up_write(&uprobe->consumer_rwsem); in prepare_uprobe()
634 static bool filter_chain(struct uprobe *uprobe, in filter_chain() argument
640 down_read(&uprobe->consumer_rwsem); in filter_chain()
641 for (uc = uprobe->consumers; uc; uc = uc->next) { in filter_chain()
646 up_read(&uprobe->consumer_rwsem); in filter_chain()
652 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, in install_breakpoint() argument
658 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); in install_breakpoint()
670 ret = set_swbp(&uprobe->arch, mm, vaddr); in install_breakpoint()
680 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) in remove_breakpoint() argument
683 return set_orig_insn(&uprobe->arch, mm, vaddr); in remove_breakpoint()
686 static inline bool uprobe_is_active(struct uprobe *uprobe) in uprobe_is_active() argument
688 return !RB_EMPTY_NODE(&uprobe->rb_node); in uprobe_is_active()
695 static void delete_uprobe(struct uprobe *uprobe) in delete_uprobe() argument
697 if (WARN_ON(!uprobe_is_active(uprobe))) in delete_uprobe()
701 rb_erase(&uprobe->rb_node, &uprobes_tree); in delete_uprobe()
703 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ in delete_uprobe()
704 put_uprobe(uprobe); in delete_uprobe()
791 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) in register_for_each_vma() argument
798 info = build_map_info(uprobe->inode->i_mapping, in register_for_each_vma()
799 uprobe->offset, is_register); in register_for_each_vma()
815 file_inode(vma->vm_file) != uprobe->inode) in register_for_each_vma()
819 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) in register_for_each_vma()
826 err = install_breakpoint(uprobe, mm, vma, info->vaddr); in register_for_each_vma()
828 if (!filter_chain(uprobe, in register_for_each_vma()
830 err |= remove_breakpoint(uprobe, mm, info->vaddr); in register_for_each_vma()
845 __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) in __uprobe_unregister() argument
849 if (WARN_ON(!consumer_del(uprobe, uc))) in __uprobe_unregister()
852 err = register_for_each_vma(uprobe, NULL); in __uprobe_unregister()
854 if (!uprobe->consumers && !err) in __uprobe_unregister()
855 delete_uprobe(uprobe); in __uprobe_unregister()
866 struct uprobe *uprobe; in uprobe_unregister() local
868 uprobe = find_uprobe(inode, offset); in uprobe_unregister()
869 if (WARN_ON(!uprobe)) in uprobe_unregister()
872 down_write(&uprobe->register_rwsem); in uprobe_unregister()
873 __uprobe_unregister(uprobe, uc); in uprobe_unregister()
874 up_write(&uprobe->register_rwsem); in uprobe_unregister()
875 put_uprobe(uprobe); in uprobe_unregister()
900 struct uprobe *uprobe; in __uprobe_register() local
915 uprobe = alloc_uprobe(inode, offset); in __uprobe_register()
916 if (!uprobe) in __uprobe_register()
922 down_write(&uprobe->register_rwsem); in __uprobe_register()
924 if (likely(uprobe_is_active(uprobe))) { in __uprobe_register()
925 consumer_add(uprobe, uc); in __uprobe_register()
926 ret = register_for_each_vma(uprobe, uc); in __uprobe_register()
928 __uprobe_unregister(uprobe, uc); in __uprobe_register()
930 up_write(&uprobe->register_rwsem); in __uprobe_register()
931 put_uprobe(uprobe); in __uprobe_register()
955 struct uprobe *uprobe; in uprobe_apply() local
959 uprobe = find_uprobe(inode, offset); in uprobe_apply()
960 if (WARN_ON(!uprobe)) in uprobe_apply()
963 down_write(&uprobe->register_rwsem); in uprobe_apply()
964 for (con = uprobe->consumers; con && con != uc ; con = con->next) in uprobe_apply()
967 ret = register_for_each_vma(uprobe, add ? uc : NULL); in uprobe_apply()
968 up_write(&uprobe->register_rwsem); in uprobe_apply()
969 put_uprobe(uprobe); in uprobe_apply()
974 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) in unapply_uprobe() argument
985 file_inode(vma->vm_file) != uprobe->inode) in unapply_uprobe()
989 if (uprobe->offset < offset || in unapply_uprobe()
990 uprobe->offset >= offset + vma->vm_end - vma->vm_start) in unapply_uprobe()
993 vaddr = offset_to_vaddr(vma, uprobe->offset); in unapply_uprobe()
994 err |= remove_breakpoint(uprobe, mm, vaddr); in unapply_uprobe()
1007 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); in find_node_in_range()
1036 struct uprobe *u; in build_probe_list()
1046 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1053 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1072 struct uprobe *uprobe, *u; in uprobe_mmap() local
1089 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { in uprobe_mmap()
1091 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { in uprobe_mmap()
1092 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); in uprobe_mmap()
1093 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap()
1095 put_uprobe(uprobe); in uprobe_mmap()
1306 static unsigned long xol_get_insn_slot(struct uprobe *uprobe) in xol_get_insn_slot() argument
1320 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); in xol_get_insn_slot()
1403 put_uprobe(ri->uprobe); in free_ret_instance()
1464 get_uprobe(n->uprobe); in dup_utask()
1556 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) in prepare_uretprobe() argument
1607 ri->uprobe = get_uprobe(uprobe); in prepare_uretprobe()
1624 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) in pre_ssout() argument
1634 xol_vaddr = xol_get_insn_slot(uprobe); in pre_ssout()
1641 err = arch_uprobe_pre_xol(&uprobe->arch, regs); in pre_ssout()
1647 utask->active_uprobe = uprobe; in pre_ssout()
1736 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) in find_active_uprobe()
1739 struct uprobe *uprobe = NULL; in find_active_uprobe() local
1749 uprobe = find_uprobe(inode, offset); in find_active_uprobe()
1752 if (!uprobe) in find_active_uprobe()
1758 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) in find_active_uprobe()
1762 return uprobe; in find_active_uprobe()
1765 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) in handler_chain() argument
1771 down_read(&uprobe->register_rwsem); in handler_chain()
1772 for (uc = uprobe->consumers; uc; uc = uc->next) { in handler_chain()
1788 prepare_uretprobe(uprobe, regs); /* put bp at return */ in handler_chain()
1790 if (remove && uprobe->consumers) { in handler_chain()
1791 WARN_ON(!uprobe_is_active(uprobe)); in handler_chain()
1792 unapply_uprobe(uprobe, current->mm); in handler_chain()
1794 up_read(&uprobe->register_rwsem); in handler_chain()
1800 struct uprobe *uprobe = ri->uprobe; in handle_uretprobe_chain() local
1803 down_read(&uprobe->register_rwsem); in handle_uretprobe_chain()
1804 for (uc = uprobe->consumers; uc; uc = uc->next) { in handle_uretprobe_chain()
1808 up_read(&uprobe->register_rwsem); in handle_uretprobe_chain()
1882 struct uprobe *uprobe; in handle_swbp() local
1890 uprobe = find_active_uprobe(bp_vaddr, &is_swbp); in handle_swbp()
1891 if (!uprobe) { in handle_swbp()
1918 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) in handle_swbp()
1925 if (arch_uprobe_ignore(&uprobe->arch, regs)) in handle_swbp()
1928 handler_chain(uprobe, regs); in handle_swbp()
1930 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) in handle_swbp()
1933 if (!pre_ssout(uprobe, regs, bp_vaddr)) in handle_swbp()
1938 put_uprobe(uprobe); in handle_swbp()
1947 struct uprobe *uprobe; in handle_singlestep() local
1950 uprobe = utask->active_uprobe; in handle_singlestep()
1952 err = arch_uprobe_post_xol(&uprobe->arch, regs); in handle_singlestep()
1954 arch_uprobe_abort_xol(&uprobe->arch, regs); in handle_singlestep()
1958 put_uprobe(uprobe); in handle_singlestep()