Lines Matching +full:post +full:- +full:processing

1 // SPDX-License-Identifier: GPL-2.0-only
27 #include <asm/debug-monitors.h>
36 #include "decode-insn.h"
46 kprobe_opcode_t *addr = p->ainsn.api.insn; in arch_prepare_ss_slot()
52 * - That the I-cache maintenance for these instructions is complete in arch_prepare_ss_slot()
54 * ensures this, but just omits causing a Context-Synchronization-Event in arch_prepare_ss_slot()
57 * - That the kprobe BRK results in an exception (and consequently a in arch_prepare_ss_slot()
58 * Context-Synchronoization-Event), which ensures that the CPU will in arch_prepare_ss_slot()
59 * fetch thesingle-step slot instructions *after* this, ensuring that in arch_prepare_ss_slot()
62 * It supposes to place ISB after patching to guarantee I-cache maintenance in arch_prepare_ss_slot()
63 * is observed on all CPUS, however, single-step slot is installed in in arch_prepare_ss_slot()
65 * Contex-Synchronization-Event via ISB again. in arch_prepare_ss_slot()
67 aarch64_insn_patch_text_nosync(addr, p->opcode); in arch_prepare_ss_slot()
73 p->ainsn.api.restore = (unsigned long) p->addr + in arch_prepare_ss_slot()
80 p->ainsn.api.restore = 0; in arch_prepare_simulate()
87 if (p->ainsn.api.handler) in arch_simulate_insn()
88 p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); in arch_simulate_insn()
90 /* single step simulated, now go for post processing */ in arch_simulate_insn()
96 unsigned long probe_addr = (unsigned long)p->addr; in arch_prepare_kprobe()
99 return -EINVAL; in arch_prepare_kprobe()
102 p->opcode = le32_to_cpu(*p->addr); in arch_prepare_kprobe()
105 return -EINVAL; in arch_prepare_kprobe()
108 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { in arch_prepare_kprobe()
110 return -EINVAL; in arch_prepare_kprobe()
113 p->ainsn.api.insn = NULL; in arch_prepare_kprobe()
117 p->ainsn.api.insn = get_insn_slot(); in arch_prepare_kprobe()
118 if (!p->ainsn.api.insn) in arch_prepare_kprobe()
119 return -ENOMEM; in arch_prepare_kprobe()
124 if (p->ainsn.api.insn) in arch_prepare_kprobe()
142 void *addr = p->addr; in arch_arm_kprobe()
151 void *addr = p->addr; in arch_disarm_kprobe()
153 aarch64_insn_patch_text(&addr, &p->opcode, 1); in arch_disarm_kprobe()
158 if (p->ainsn.api.insn) { in arch_remove_kprobe()
159 free_insn_slot(p->ainsn.api.insn, 0); in arch_remove_kprobe()
160 p->ainsn.api.insn = NULL; in arch_remove_kprobe()
166 kcb->prev_kprobe.kp = kprobe_running(); in save_previous_kprobe()
167 kcb->prev_kprobe.status = kcb->kprobe_status; in save_previous_kprobe()
172 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); in restore_previous_kprobe()
173 kcb->kprobe_status = kcb->prev_kprobe.status; in restore_previous_kprobe()
182 * Mask all of DAIF while executing the instruction out-of-line, to keep things
184 * the kprobe state is per-CPU and doesn't get migrated.
189 kcb->saved_irqflag = regs->pstate & DAIF_MASK; in kprobes_save_local_irqflag()
190 regs->pstate |= DAIF_MASK; in kprobes_save_local_irqflag()
196 regs->pstate &= ~DAIF_MASK; in kprobes_restore_local_irqflag()
197 regs->pstate |= kcb->saved_irqflag; in kprobes_restore_local_irqflag()
209 kcb->kprobe_status = KPROBE_REENTER; in setup_singlestep()
211 kcb->kprobe_status = KPROBE_HIT_SS; in setup_singlestep()
215 if (p->ainsn.api.insn) { in setup_singlestep()
217 slot = (unsigned long)p->ainsn.api.insn; in setup_singlestep()
231 switch (kcb->kprobe_status) { in reenter_kprobe()
254 /* return addr restore if non-branching insn */ in post_kprobe_handler()
255 if (cur->ainsn.api.restore != 0) in post_kprobe_handler()
256 instruction_pointer_set(regs, cur->ainsn.api.restore); in post_kprobe_handler()
259 if (kcb->kprobe_status == KPROBE_REENTER) { in post_kprobe_handler()
263 /* call post handler */ in post_kprobe_handler()
264 kcb->kprobe_status = KPROBE_HIT_SSDONE; in post_kprobe_handler()
265 if (cur->post_handler) in post_kprobe_handler()
266 cur->post_handler(cur, regs, 0); in post_kprobe_handler()
276 switch (kcb->kprobe_status) { in kprobe_fault_handler()
286 instruction_pointer_set(regs, (unsigned long) cur->addr); in kprobe_fault_handler()
289 if (kcb->kprobe_status == KPROBE_REENTER) { in kprobe_fault_handler()
300 * In case the user-specified fault handler returned in kprobe_fault_handler()
327 kcb->kprobe_status = KPROBE_HIT_ACTIVE; in kprobe_handler()
330 * If we have no pre-handler or it returned 0, we in kprobe_handler()
331 * continue with normal processing. If we have a in kprobe_handler()
332 * pre-handler and it returned non-zero, it will in kprobe_handler()
336 if (!p->pre_handler || !p->pre_handler(p, regs)) { in kprobe_handler()
359 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && in kprobe_breakpoint_ss_handler()
360 ((unsigned long)&cur->ainsn.api.insn[1] == addr)) { in kprobe_breakpoint_ss_handler()
419 return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]); in trampoline_probe_handler()
425 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; in arch_prepare_kretprobe()
426 ri->fp = (void *)regs->regs[29]; in arch_prepare_kretprobe()
429 regs->regs[30] = (long)&__kretprobe_trampoline; in arch_prepare_kretprobe()