Lines Matching +full:0 +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * User-space Probes (UProbes) for x86
5 * Copyright (C) IBM Corporation, 2008-2011
21 /* Post-execution fixups. */
24 #define UPROBE_FIX_IP 0x01
27 #define UPROBE_FIX_CALL 0x02
30 #define UPROBE_FIX_SETF 0x04
32 #define UPROBE_FIX_RIP_SI 0x08
33 #define UPROBE_FIX_RIP_DI 0x10
34 #define UPROBE_FIX_RIP_BX 0x20
41 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
42 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
43 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
44 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
47 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
48 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
49 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
50 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
54 * Good-instruction tables for 32-bit apps. This is non-const and volatile
59 * 6c-6f - ins,outs. SEGVs if used in userspace
60 * e4-e7 - in,out imm. SEGVs if used in userspace
61 * ec-ef - in,out acc. SEGVs if used in userspace
62 * cc - int3. SIGTRAP if used in userspace
63 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
65 * f1 - int1. SIGTRAP if used in userspace
66 * f4 - hlt. SEGVs if used in userspace
67 * fa - cli. SEGVs if used in userspace
68 * fb - sti. SEGVs if used in userspace
71 * 07,17,1f - pop es/ss/ds
75 * of userspace single-stepping (TF flag) is fragile.
76 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
78 * cd - int N.
80 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
81 * Not supported since kernel's handling of userspace single-stepping
83 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
87 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
88 /* ---------------------------------------------- */
89 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
90 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
91 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
96 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
99 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
101 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
102 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
103 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
104 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
105 /* ---------------------------------------------- */
106 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
112 /* Good-instruction tables for 64-bit apps.
115 * 06,07 - formerly push/pop es
116 * 0e - formerly push cs
117 * 16,17 - formerly push/pop ss
118 * 1e,1f - formerly push/pop ds
119 * 27,2f,37,3f - formerly daa/das/aaa/aas
120 * 60,61 - formerly pusha/popa
121 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
122 * 82 - formerly redundant encoding of Group1
123 * 9a - formerly call seg:ofs
124 * ce - formerly into
125 * d4,d5 - formerly aam/aad
126 * d6 - formerly undocumented salc
127 * ea - formerly jmp seg:ofs
130 * 6c-6f - ins,outs. SEGVs if used in userspace
131 * e4-e7 - in,out imm. SEGVs if used in userspace
132 * ec-ef - in,out acc. SEGVs if used in userspace
133 * cc - int3. SIGTRAP if used in userspace
134 * f1 - int1. SIGTRAP if used in userspace
135 * f4 - hlt. SEGVs if used in userspace
136 * fa - cli. SEGVs if used in userspace
137 * fb - sti. SEGVs if used in userspace
140 * cd - int N.
142 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
143 * Not supported since kernel's handling of userspace single-stepping
145 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
149 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
150 /* ---------------------------------------------- */
151 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
152 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
153 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
154 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
155 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
156 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
157 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
158 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
159 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
160 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
161 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
162 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
163 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
164 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
165 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
166 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
167 /* ---------------------------------------------- */
168 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
174 /* Using this for both 64-bit and 32-bit apps.
176 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
177 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
179 * Some are in fact non-system: xend, xtest, rdtscp, maybe more
180 * 0f 05 - syscall
181 * 0f 06 - clts (CPL0 insn)
182 * 0f 07 - sysret
183 * 0f 08 - invd (CPL0 insn)
184 * 0f 09 - wbinvd (CPL0 insn)
185 * 0f 0b - ud2
186 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
187 * 0f 34 - sysenter
188 * 0f 35 - sysexit
189 * 0f 37 - getsec
190 * 0f 78 - vmread (Intel VMX. CPL0 insn)
191 * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
194 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
196 * Why? They are all user-executable.
199 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
200 /* ---------------------------------------------- */
201 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
202 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
203 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
204 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
205 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
206 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
207 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
208 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
209 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
210 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
211 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
212 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
213 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
214 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
215 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
216 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */
217 /* ---------------------------------------------- */
218 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
225 * 0f - 2-byte instructions: For many of these instructions, the validity
230 * 8f - Group 1 - only reg = 0 is OK
231 * c6-c7 - Group 11 - only reg = 0 is OK
232 * d9-df - fpu insns with some illegal encodings
233 * f2, f3 - repnz, repz prefixes. These are also the first byte for
234 * certain floating-point instructions, such as addsd.
236 * fe - Group 4 - only reg = 0 or 1 is OK
237 * ff - Group 5 - only reg = 0-6 is OK
239 * others -- Do we need to support these?
241 * 0f - (floating-point?) prefetch instructions
242 * 07, 17, 1f - pop es, pop ss, pop ds
243 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
245 * 67 - addr16 prefix
246 * ce - into
247 * f0 - lock prefix
252 * - Where necessary, examine the modrm byte and allow only valid instructions
283 ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m); in uprobe_init_insn()
284 if (ret < 0) in uprobe_init_insn()
285 return -ENOEXEC; in uprobe_init_insn()
288 return -ENOTSUPP; in uprobe_init_insn()
292 return -ENOTSUPP; in uprobe_init_insn()
300 return 0; in uprobe_init_insn()
302 if (insn->opcode.nbytes == 2) { in uprobe_init_insn()
304 return 0; in uprobe_init_insn()
307 return -ENOTSUPP; in uprobe_init_insn()
312 * If arch_uprobe->insn doesn't use rip-relative addressing, return
315 * defparam->fixups accordingly. (The contents of the scratch register
316 * will be saved before we single-step the modified instruction,
319 * We do this because a rip-relative instruction can access only a
320 * relatively small area (+/- 2 GB from the instruction), and the XOL
326 * Some useful facts about rip-relative instructions:
328 * - There's always a modrm byte with bit layout "00 reg 101".
329 * - There's never a SIB byte.
330 * - The displacement is always 4 bytes.
331 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
332 * has no effect on rip-relative mode. It doesn't make modrm byte
349 if (insn->rex_prefix.nbytes) { in riprel_analyze()
350 cursor = auprobe->insn + insn_offset_rex_prefix(insn); in riprel_analyze()
352 *cursor &= 0xfe; in riprel_analyze()
358 if (insn->vex_prefix.nbytes >= 3) { in riprel_analyze()
364 * Setting EVEX.x since (in non-SIB encoding) EVEX.x in riprel_analyze()
366 * For VEX3-encoded insns, VEX3.x value has no effect in in riprel_analyze()
367 * non-SIB encoding, the change is superfluous but harmless. in riprel_analyze()
369 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; in riprel_analyze()
370 *cursor |= 0x60; in riprel_analyze()
374 * Convert from rip-relative addressing to register-relative addressing in riprel_analyze()
383 * Encoding: 0f c7/1 modrm in riprel_analyze()
384 * The code below thinks that reg=1 (cx), chooses si as scratch. in riprel_analyze()
386 * First appeared in Haswell (BMI2 insn). It is vex-encoded. in riprel_analyze()
388 * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx in riprel_analyze()
395 * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm. in riprel_analyze()
396 * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi). in riprel_analyze()
397 * AMD says it has no 3-operand form (vex.vvvv must be 1111) in riprel_analyze()
407 * BP is stack-segment based (may be a problem?). in riprel_analyze()
408 * AX, DX, CX are off-limits (many implicit users). in riprel_analyze()
409 * SP is unusable (it's stack pointer - think about "pop mem"; in riprel_analyze()
410 * also, rsp+disp32 needs sib encoding -> insn length change). in riprel_analyze()
414 reg2 = 0xff; /* Fetch vex.vvvv */ in riprel_analyze()
415 if (insn->vex_prefix.nbytes) in riprel_analyze()
416 reg2 = insn->vex_prefix.bytes[2]; in riprel_analyze()
420 * vex.vvvv field is in bits 6-3, bits are inverted. in riprel_analyze()
421 * But in 32-bit mode, high-order bit may be ignored. in riprel_analyze()
422 * Therefore, let's consider only 3 low-order bits. in riprel_analyze()
424 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7; in riprel_analyze()
433 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI; in riprel_analyze()
436 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI; in riprel_analyze()
440 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX; in riprel_analyze()
447 cursor = auprobe->insn + insn_offset_modrm(insn); in riprel_analyze()
453 *cursor = 0x80 | (reg << 3) | reg2; in riprel_analyze()
459 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI) in scratch_reg()
460 return &regs->si; in scratch_reg()
461 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI) in scratch_reg()
462 return &regs->di; in scratch_reg()
463 return &regs->bx; in scratch_reg()
467 * If we're emulating a rip-relative instruction, save the contents
472 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { in riprel_pre_xol()
473 struct uprobe_task *utask = current->utask; in riprel_pre_xol()
476 utask->autask.saved_scratch_register = *sr; in riprel_pre_xol()
477 *sr = utask->vaddr + auprobe->defparam.ilen; in riprel_pre_xol()
483 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { in riprel_post_xol()
484 struct uprobe_task *utask = current->utask; in riprel_post_xol()
487 *sr = utask->autask.saved_scratch_register; in riprel_post_xol()
490 #else /* 32-bit: */
492 * No RIP-relative addressing on 32-bit
523 return 0; in default_pre_xol_op()
528 unsigned long new_sp = regs->sp - sizeof_long(regs); in emulate_push_stack()
531 return -EFAULT; in emulate_push_stack()
533 regs->sp = new_sp; in emulate_push_stack()
534 return 0; in emulate_push_stack()
544 * If the single-stepped instruction was a call, the return address that
548 * If the original instruction was a rip-relative instruction such as
549 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
550 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)".
556 struct uprobe_task *utask = current->utask; in default_post_xol_op()
559 if (auprobe->defparam.fixups & UPROBE_FIX_IP) { in default_post_xol_op()
560 long correction = utask->vaddr - utask->xol_vaddr; in default_post_xol_op()
561 regs->ip += correction; in default_post_xol_op()
562 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { in default_post_xol_op()
563 regs->sp += sizeof_long(regs); /* Pop incorrect return address */ in default_post_xol_op()
564 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) in default_post_xol_op()
565 return -ERESTART; in default_post_xol_op()
568 if (auprobe->defparam.fixups & UPROBE_FIX_SETF) in default_post_xol_op()
569 utask->autask.saved_tf = true; in default_post_xol_op()
571 return 0; in default_post_xol_op()
587 return auprobe->branch.opc1 == 0xe8; in branch_is_call()
601 case 0x ## op_y: DO((expr) != 0) \
602 case 0x ## op_n: DO((expr) == 0)
621 unsigned long flags = regs->flags; in check_jmp_cond()
623 switch (auprobe->branch.opc1) { in check_jmp_cond()
640 unsigned long new_ip = regs->ip += auprobe->branch.ilen; in branch_emulate_op()
641 unsigned long offs = (long)auprobe->branch.offs; in branch_emulate_op()
646 * branch_clear_offset) insn out-of-line. In the likely case in branch_emulate_op()
651 * But there is corner case, see the comment in ->post_xol(). in branch_emulate_op()
656 offs = 0; in branch_emulate_op()
659 regs->ip = new_ip + offs; in branch_emulate_op()
665 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset; in push_emulate_op()
669 regs->ip += auprobe->push.ilen; in push_emulate_op()
679 * "call" insn was executed out-of-line. Just restore ->sp and restart. in branch_post_xol_op()
680 * We could also restore ->ip and try to call branch_emulate_op() again. in branch_post_xol_op()
682 regs->sp += sizeof_long(regs); in branch_post_xol_op()
683 return -ERESTART; in branch_post_xol_op()
689 * Turn this insn into "call 1f; 1:", this is what we will execute in branch_clear_offset()
690 * out-of-line if ->emulate() fails. We only need this to generate in branch_clear_offset()
694 * But see the comment in ->post_xol(), in the unlikely case it can in branch_clear_offset()
695 * succeed. So we need to ensure that the new ->ip can not fall into in branch_clear_offset()
696 * the non-canonical area and trigger #GP. in branch_clear_offset()
699 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte in branch_clear_offset()
700 * of ->insn[] for set_orig_insn(). in branch_clear_offset()
702 memset(auprobe->insn + insn_offset_immediate(insn), in branch_clear_offset()
703 0, insn->immediate.nbytes); in branch_clear_offset()
715 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
723 case 0xeb: /* jmp 8 */ in branch_setup_xol_ops()
724 case 0xe9: /* jmp 32 */ in branch_setup_xol_ops()
725 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ in branch_setup_xol_ops()
728 case 0xe8: /* call relative */ in branch_setup_xol_ops()
732 case 0x0f: in branch_setup_xol_ops()
733 if (insn->opcode.nbytes != 2) in branch_setup_xol_ops()
734 return -ENOSYS; in branch_setup_xol_ops()
736 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches in branch_setup_xol_ops()
739 opc1 = OPCODE2(insn) - 0x10; in branch_setup_xol_ops()
743 return -ENOSYS; in branch_setup_xol_ops()
747 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported. in branch_setup_xol_ops()
748 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. in branch_setup_xol_ops()
752 if (p == 0x66) in branch_setup_xol_ops()
753 return -ENOTSUPP; in branch_setup_xol_ops()
756 auprobe->branch.opc1 = opc1; in branch_setup_xol_ops()
757 auprobe->branch.ilen = insn->length; in branch_setup_xol_ops()
758 auprobe->branch.offs = insn->immediate.value; in branch_setup_xol_ops()
760 auprobe->ops = &branch_xol_ops; in branch_setup_xol_ops()
761 return 0; in branch_setup_xol_ops()
764 /* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
767 u8 opc1 = OPCODE1(insn), reg_offset = 0; in push_setup_xol_ops()
769 if (opc1 < 0x50 || opc1 > 0x57) in push_setup_xol_ops()
770 return -ENOSYS; in push_setup_xol_ops()
772 if (insn->length > 2) in push_setup_xol_ops()
773 return -ENOSYS; in push_setup_xol_ops()
774 if (insn->length == 2) { in push_setup_xol_ops()
775 /* only support rex_prefix 0x41 (x64 only) */ in push_setup_xol_ops()
777 if (insn->rex_prefix.nbytes != 1 || in push_setup_xol_ops()
778 insn->rex_prefix.bytes[0] != 0x41) in push_setup_xol_ops()
779 return -ENOSYS; in push_setup_xol_ops()
782 case 0x50: in push_setup_xol_ops()
785 case 0x51: in push_setup_xol_ops()
788 case 0x52: in push_setup_xol_ops()
791 case 0x53: in push_setup_xol_ops()
794 case 0x54: in push_setup_xol_ops()
797 case 0x55: in push_setup_xol_ops()
800 case 0x56: in push_setup_xol_ops()
803 case 0x57: in push_setup_xol_ops()
808 return -ENOSYS; in push_setup_xol_ops()
812 case 0x50: in push_setup_xol_ops()
815 case 0x51: in push_setup_xol_ops()
818 case 0x52: in push_setup_xol_ops()
821 case 0x53: in push_setup_xol_ops()
824 case 0x54: in push_setup_xol_ops()
827 case 0x55: in push_setup_xol_ops()
830 case 0x56: in push_setup_xol_ops()
833 case 0x57: in push_setup_xol_ops()
839 auprobe->push.reg_offset = reg_offset; in push_setup_xol_ops()
840 auprobe->push.ilen = insn->length; in push_setup_xol_ops()
841 auprobe->ops = &push_xol_ops; in push_setup_xol_ops()
842 return 0; in push_setup_xol_ops()
846 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
850 * Return 0 on success or a -ve number on error.
863 if (ret != -ENOSYS) in arch_uprobe_analyze_insn()
867 if (ret != -ENOSYS) in arch_uprobe_analyze_insn()
872 * and annotate defparam->fixups accordingly. in arch_uprobe_analyze_insn()
875 case 0x9d: /* popf */ in arch_uprobe_analyze_insn()
876 auprobe->defparam.fixups |= UPROBE_FIX_SETF; in arch_uprobe_analyze_insn()
878 case 0xc3: /* ret or lret -- ip is correct */ in arch_uprobe_analyze_insn()
879 case 0xcb: in arch_uprobe_analyze_insn()
880 case 0xc2: in arch_uprobe_analyze_insn()
881 case 0xca: in arch_uprobe_analyze_insn()
882 case 0xea: /* jmp absolute -- ip is correct */ in arch_uprobe_analyze_insn()
883 fix_ip_or_call = 0; in arch_uprobe_analyze_insn()
885 case 0x9a: /* call absolute - Fix return addr, not ip */ in arch_uprobe_analyze_insn()
888 case 0xff: in arch_uprobe_analyze_insn()
894 fix_ip_or_call = 0; in arch_uprobe_analyze_insn()
902 auprobe->defparam.ilen = insn.length; in arch_uprobe_analyze_insn()
903 auprobe->defparam.fixups |= fix_ip_or_call; in arch_uprobe_analyze_insn()
905 auprobe->ops = &default_xol_ops; in arch_uprobe_analyze_insn()
906 return 0; in arch_uprobe_analyze_insn()
910 * arch_uprobe_pre_xol - prepare to execute out of line.
916 struct uprobe_task *utask = current->utask; in arch_uprobe_pre_xol()
918 if (auprobe->ops->pre_xol) { in arch_uprobe_pre_xol()
919 int err = auprobe->ops->pre_xol(auprobe, regs); in arch_uprobe_pre_xol()
924 regs->ip = utask->xol_vaddr; in arch_uprobe_pre_xol()
925 utask->autask.saved_trap_nr = current->thread.trap_nr; in arch_uprobe_pre_xol()
926 current->thread.trap_nr = UPROBE_TRAP_NR; in arch_uprobe_pre_xol()
928 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF); in arch_uprobe_pre_xol()
929 regs->flags |= X86_EFLAGS_TF; in arch_uprobe_pre_xol()
933 return 0; in arch_uprobe_pre_xol()
940 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
943 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
944 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
948 if (t->thread.trap_nr != UPROBE_TRAP_NR) in arch_uprobe_xol_was_trapped()
955 * Called after single-stepping. To avoid the SMP problems that can
957 * single-step, we single-stepped a copy of the instruction.
959 * This function prepares to resume execution after the single-step.
963 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol()
964 bool send_sigtrap = utask->autask.saved_tf; in arch_uprobe_post_xol()
965 int err = 0; in arch_uprobe_post_xol()
967 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); in arch_uprobe_post_xol()
968 current->thread.trap_nr = utask->autask.saved_trap_nr; in arch_uprobe_post_xol()
970 if (auprobe->ops->post_xol) { in arch_uprobe_post_xol()
971 err = auprobe->ops->post_xol(auprobe, regs); in arch_uprobe_post_xol()
974 * Restore ->ip for restart or post mortem analysis. in arch_uprobe_post_xol()
975 * ->post_xol() must not return -ERESTART unless this in arch_uprobe_post_xol()
978 regs->ip = utask->vaddr; in arch_uprobe_post_xol()
979 if (err == -ERESTART) in arch_uprobe_post_xol()
980 err = 0; in arch_uprobe_post_xol()
990 send_sig(SIGTRAP, current, 0); in arch_uprobe_post_xol()
992 if (!utask->autask.saved_tf) in arch_uprobe_post_xol()
993 regs->flags &= ~X86_EFLAGS_TF; in arch_uprobe_post_xol()
1002 struct pt_regs *regs = args->regs; in arch_uprobe_exception_notify()
1036 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol()
1038 if (auprobe->ops->abort) in arch_uprobe_abort_xol()
1039 auprobe->ops->abort(auprobe, regs); in arch_uprobe_abort_xol()
1041 current->thread.trap_nr = utask->autask.saved_trap_nr; in arch_uprobe_abort_xol()
1042 regs->ip = utask->vaddr; in arch_uprobe_abort_xol()
1044 if (!utask->autask.saved_tf) in arch_uprobe_abort_xol()
1045 regs->flags &= ~X86_EFLAGS_TF; in arch_uprobe_abort_xol()
1050 if (auprobe->ops->emulate) in __skip_sstep()
1051 return auprobe->ops->emulate(auprobe, regs); in __skip_sstep()
1058 if (ret && (regs->flags & X86_EFLAGS_TF)) in arch_uprobe_skip_sstep()
1059 send_sig(SIGTRAP, current, 0); in arch_uprobe_skip_sstep()
1067 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ in arch_uretprobe_hijack_return_addr()
1069 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) in arch_uretprobe_hijack_return_addr()
1070 return -1; in arch_uretprobe_hijack_return_addr()
1076 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); in arch_uretprobe_hijack_return_addr()
1082 current->pid, regs->sp, regs->ip); in arch_uretprobe_hijack_return_addr()
1087 return -1; in arch_uretprobe_hijack_return_addr()
1094 return regs->sp < ret->stack; in arch_uretprobe_is_alive()
1096 return regs->sp <= ret->stack; in arch_uretprobe_is_alive()