Lines Matching +full:ri +full:- +full:override
1 // SPDX-License-Identifier: GPL-2.0-or-later
9 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
69 s64 rel = ((s64) to - (s64) from) >> 4; in set_brl_inst()
72 brl->quad0.template = 0x05; /* [MLX](stop) */ in set_brl_inst()
73 brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ in set_brl_inst()
74 brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; in set_brl_inst()
75 brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); in set_brl_inst()
77 brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); in set_brl_inst()
90 p->ainsn.inst_flag = 0; in update_kprobe_inst_flag()
91 p->ainsn.target_br_reg = 0; in update_kprobe_inst_flag()
92 p->ainsn.slot = slot; in update_kprobe_inst_flag()
101 p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; in update_kprobe_inst_flag()
108 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; in update_kprobe_inst_flag()
109 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); in update_kprobe_inst_flag()
113 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; in update_kprobe_inst_flag()
116 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; in update_kprobe_inst_flag()
117 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; in update_kprobe_inst_flag()
118 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); in update_kprobe_inst_flag()
124 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; in update_kprobe_inst_flag()
125 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); in update_kprobe_inst_flag()
155 /* Integer compare - Register Register (A6 type)*/ in is_cmp_ctype_unc_inst()
160 /* Integer compare - Immediate Register (A8 type)*/ in is_cmp_ctype_unc_inst()
172 * Returns -EINVAL if unsupported
187 return -EINVAL; in unsupported_inst()
196 * - Bit 33-35 to be equal to 0x1 in unsupported_inst()
202 return -EINVAL; in unsupported_inst()
206 * - Bit 27-35 to be equal to 0x30 in unsupported_inst()
212 return -EINVAL; in unsupported_inst()
219 * bit 33-36 to be equal to 0 in unsupported_inst()
226 return -EINVAL; in unsupported_inst()
233 /* IP-Relative Predict major code is 7 */ in unsupported_inst()
234 printk(KERN_WARNING "Kprobes on IP-Relative" in unsupported_inst()
236 return -EINVAL; in unsupported_inst()
240 * bit 27-32 to be equal to 10 or 11 in unsupported_inst()
246 return -EINVAL; in unsupported_inst()
261 return -EINVAL; in unsupported_inst()
273 return -EINVAL; in unsupported_inst()
282 * In this function we override the bundle with
292 bundle_t *bundle = &p->opcode.bundle; in prepare_break_inst()
302 bundle->quad0.slot0 = break_inst; in prepare_break_inst()
305 bundle->quad0.slot1_p0 = break_inst; in prepare_break_inst()
306 bundle->quad1.slot1_p1 = break_inst >> (64-46); in prepare_break_inst()
309 bundle->quad1.slot2 = break_inst; in prepare_break_inst()
327 template = bundle->quad0.template; in get_kprobe_inst()
331 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); in get_kprobe_inst()
332 *kprobe_inst = bundle->quad0.slot0; in get_kprobe_inst()
335 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); in get_kprobe_inst()
336 kprobe_inst_p0 = bundle->quad0.slot1_p0; in get_kprobe_inst()
337 kprobe_inst_p1 = bundle->quad1.slot1_p1; in get_kprobe_inst()
338 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); in get_kprobe_inst()
341 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); in get_kprobe_inst()
342 *kprobe_inst = bundle->quad1.slot2; in get_kprobe_inst()
347 /* Returns non-zero if the addr is in the Interrupt Vector Table */
360 return -EINVAL; in valid_kprobe_addr()
366 return -EINVAL; in valid_kprobe_addr()
375 i = atomic_add_return(1, &kcb->prev_kprobe_index); in save_previous_kprobe()
376 kcb->prev_kprobe[i-1].kp = kprobe_running(); in save_previous_kprobe()
377 kcb->prev_kprobe[i-1].status = kcb->kprobe_status; in save_previous_kprobe()
383 i = atomic_read(&kcb->prev_kprobe_index); in restore_previous_kprobe()
384 __this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp); in restore_previous_kprobe()
385 kcb->kprobe_status = kcb->prev_kprobe[i-1].status; in restore_previous_kprobe()
386 atomic_sub(1, &kcb->prev_kprobe_index); in restore_previous_kprobe()
401 regs->cr_iip = __kretprobe_trampoline_handler(regs, NULL); in trampoline_probe_handler()
403 * By returning a non-zero value, we are telling in trampoline_probe_handler()
405 * to run (and have re-enabled preemption) in trampoline_probe_handler()
410 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, in arch_prepare_kretprobe() argument
413 ri->ret_addr = (kprobe_opcode_t *)regs->b0; in arch_prepare_kretprobe()
414 ri->fp = NULL; in arch_prepare_kretprobe()
417 regs->b0 = (unsigned long)dereference_function_descriptor(__kretprobe_trampoline); in arch_prepare_kretprobe()
424 unsigned int template = bundle->quad0.template; in __is_ia64_break_inst()
455 unsigned int template = bundle->quad0.template; in can_boost()
474 unsigned long addr = (unsigned long)p->addr & ~0xFULL; in prepare_booster()
475 unsigned int slot = (unsigned long)p->addr & 0xf; in prepare_booster()
478 if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { in prepare_booster()
479 set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); in prepare_booster()
480 p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; in prepare_booster()
484 for (; addr < (unsigned long)p->addr; addr++) { in prepare_booster()
487 other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; in prepare_booster()
493 unsigned long addr = (unsigned long) p->addr; in arch_prepare_kprobe()
500 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; in arch_prepare_kprobe()
501 template = bundle->quad0.template; in arch_prepare_kprobe()
504 return -EINVAL; in arch_prepare_kprobe()
515 return -EINVAL; in arch_prepare_kprobe()
517 p->ainsn.insn = get_insn_slot(); in arch_prepare_kprobe()
518 if (!p->ainsn.insn) in arch_prepare_kprobe()
519 return -ENOMEM; in arch_prepare_kprobe()
520 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); in arch_prepare_kprobe()
521 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); in arch_prepare_kprobe()
535 arm_addr = ((unsigned long)p->addr) & ~0xFUL; in arch_arm_kprobe()
536 dest = &((kprobe_opcode_t *)arm_addr)->bundle; in arch_arm_kprobe()
537 src = &p->opcode.bundle; in arch_arm_kprobe()
539 flush_icache_range((unsigned long)p->ainsn.insn, in arch_arm_kprobe()
540 (unsigned long)p->ainsn.insn + in arch_arm_kprobe()
543 switch (p->ainsn.slot) { in arch_arm_kprobe()
545 dest->quad0.slot0 = src->quad0.slot0; in arch_arm_kprobe()
548 dest->quad1.slot1_p1 = src->quad1.slot1_p1; in arch_arm_kprobe()
551 dest->quad1.slot2 = src->quad1.slot2; in arch_arm_kprobe()
562 arm_addr = ((unsigned long)p->addr) & ~0xFUL; in arch_disarm_kprobe()
563 dest = &((kprobe_opcode_t *)arm_addr)->bundle; in arch_disarm_kprobe()
564 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ in arch_disarm_kprobe()
565 src = &p->ainsn.insn->bundle; in arch_disarm_kprobe()
566 switch (p->ainsn.slot) { in arch_disarm_kprobe()
568 dest->quad0.slot0 = src->quad0.slot0; in arch_disarm_kprobe()
571 dest->quad1.slot1_p1 = src->quad1.slot1_p1; in arch_disarm_kprobe()
574 dest->quad1.slot2 = src->quad1.slot2; in arch_disarm_kprobe()
582 if (p->ainsn.insn) { in arch_remove_kprobe()
583 free_insn_slot(p->ainsn.insn, in arch_remove_kprobe()
584 p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); in arch_remove_kprobe()
585 p->ainsn.insn = NULL; in arch_remove_kprobe()
591 * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust
598 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); in resume_execution()
599 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; in resume_execution()
601 int slot = ((unsigned long)p->addr & 0xf); in resume_execution()
603 template = p->ainsn.insn->bundle.quad0.template; in resume_execution()
608 if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { in resume_execution()
610 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { in resume_execution()
612 regs->cr_iip = (regs->cr_iip - bundle_addr) + in resume_execution()
616 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { in resume_execution()
622 switch (p->ainsn.target_br_reg) { in resume_execution()
624 if ((regs->b0 == bundle_addr) || in resume_execution()
625 (regs->b0 == bundle_addr + 0x10)) { in resume_execution()
626 regs->b0 = (regs->b0 - bundle_addr) + in resume_execution()
631 if ((regs->b6 == bundle_addr) || in resume_execution()
632 (regs->b6 == bundle_addr + 0x10)) { in resume_execution()
633 regs->b6 = (regs->b6 - bundle_addr) + in resume_execution()
638 if ((regs->b7 == bundle_addr) || in resume_execution()
639 (regs->b7 == bundle_addr + 0x10)) { in resume_execution()
640 regs->b7 = (regs->b7 - bundle_addr) + in resume_execution()
650 if (regs->cr_iip == bundle_addr + 0x10) { in resume_execution()
651 regs->cr_iip = resume_addr + 0x10; in resume_execution()
654 if (regs->cr_iip == bundle_addr) { in resume_execution()
655 regs->cr_iip = resume_addr; in resume_execution()
661 ia64_psr(regs)->ss = 0; in resume_execution()
666 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle; in prepare_ss()
667 unsigned long slot = (unsigned long)p->addr & 0xf; in prepare_ss()
670 if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) in prepare_ss()
671 regs->cr_iip = (unsigned long)p->addr & ~0xFULL; in prepare_ss()
673 regs->cr_iip = bundle_addr & ~0xFULL; in prepare_ss()
678 ia64_psr(regs)->ri = slot; in prepare_ss()
681 ia64_psr(regs)->ss = 1; in prepare_ss()
686 unsigned int slot = ia64_psr(regs)->ri; in is_ia64_break_inst()
687 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; in is_ia64_break_inst()
699 struct pt_regs *regs = args->regs; in pre_kprobes_handler()
714 if ((kcb->kprobe_status == KPROBE_HIT_SS) && in pre_kprobes_handler()
715 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { in pre_kprobes_handler()
716 ia64_psr(regs)->ss = 0; in pre_kprobes_handler()
729 kcb->kprobe_status = KPROBE_REENTER; in pre_kprobes_handler()
763 kcb->kprobe_status = KPROBE_HIT_ACTIVE; in pre_kprobes_handler()
765 if (p->pre_handler && p->pre_handler(p, regs)) { in pre_kprobes_handler()
772 if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { in pre_kprobes_handler()
773 /* Boost up -- we can execute copied instructions directly */ in pre_kprobes_handler()
774 ia64_psr(regs)->ri = p->ainsn.slot; in pre_kprobes_handler()
775 regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; in pre_kprobes_handler()
777 ia64_psr(regs)->ss = 0; in pre_kprobes_handler()
785 kcb->kprobe_status = KPROBE_HIT_SS; in pre_kprobes_handler()
801 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { in post_kprobes_handler()
802 kcb->kprobe_status = KPROBE_HIT_SSDONE; in post_kprobes_handler()
803 cur->post_handler(cur, regs, 0); in post_kprobes_handler()
809 if (kcb->kprobe_status == KPROBE_REENTER) { in post_kprobes_handler()
826 switch(kcb->kprobe_status) { in kprobe_fault_handler()
836 regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; in kprobe_fault_handler()
837 ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; in kprobe_fault_handler()
838 if (kcb->kprobe_status == KPROBE_REENTER) in kprobe_fault_handler()
847 * In case the user-specified fault handler returned in kprobe_fault_handler()
870 if (args->regs && user_mode(args->regs)) in kprobe_exceptions_notify()
876 if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) in kprobe_exceptions_notify()
877 || args->err == 0) in kprobe_exceptions_notify()
883 if (args->err == 36) in kprobe_exceptions_notify()
884 if (post_kprobes_handler(args->regs)) in kprobe_exceptions_notify()
906 if (p->addr == in arch_trampoline_kprobe()