Lines Matching refs:kp

50 	struct kprobe *kp;  in __recover_optprobed_insn()  local
55 kp = get_kprobe((void *)addr - i); in __recover_optprobed_insn()
57 if (kp && kprobe_optimized(kp)) { in __recover_optprobed_insn()
58 op = container_of(kp, struct optimized_kprobe, kp); in __recover_optprobed_insn()
76 if (addr == (unsigned long)kp->addr) { in __recover_optprobed_insn()
77 buf[0] = kp->opcode; in __recover_optprobed_insn()
80 offs = addr - (unsigned long)kp->addr - 1; in __recover_optprobed_insn()
159 if (kprobe_disabled(&op->kp)) in optimized_callback()
164 kprobes_inc_nmissed_count(&op->kp); in optimized_callback()
174 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; in optimized_callback()
177 __this_cpu_write(current_kprobe, &op->kp); in optimized_callback()
179 opt_pre_handler(&op->kp, regs); in optimized_callback()
325 p = get_kprobe(op->kp.addr + i); in arch_check_optimized_kprobe()
337 return ((unsigned long)op->kp.addr <= addr && in arch_within_optimized_kprobe()
338 (unsigned long)op->kp.addr + op->optinsn.size > addr); in arch_within_optimized_kprobe()
369 if (!can_optimize((unsigned long)op->kp.addr)) in arch_prepare_optimized_kprobe()
386 rel = (long)slot - (long)op->kp.addr + RELATIVEJUMP_SIZE; in arch_prepare_optimized_kprobe()
396 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr, in arch_prepare_optimized_kprobe()
412 (u8 *)op->kp.addr + op->optinsn.size); in arch_prepare_optimized_kprobe()
438 ((long)op->kp.addr + RELATIVEJUMP_SIZE)); in arch_optimize_kprobes()
440 WARN_ON(kprobe_disabled(&op->kp)); in arch_optimize_kprobes()
443 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, in arch_optimize_kprobes()
449 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, in arch_optimize_kprobes()
464 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, in arch_unoptimize_kprobe()
489 op = container_of(p, struct optimized_kprobe, kp); in setup_detour_execution()