1 /******************************************************************************
2  * emulate.c
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005 Keir Fraser
7  *
8  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9  * privileged instructions:
10  *
11  * Copyright (C) 2006 Qumranet
12  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13  *
14  *   Avi Kivity <avi@qumranet.com>
15  *   Yaniv Kamay <yaniv@qumranet.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21  */
22 
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 #include <asm/nospec-branch.h>
29 
30 #include "x86.h"
31 #include "tss.h"
32 #include "mmu.h"
33 #include "pmu.h"
34 
35 /*
36  * Operand types
37  */
38 #define OpNone             0ull
39 #define OpImplicit         1ull  /* No generic decode */
40 #define OpReg              2ull  /* Register */
41 #define OpMem              3ull  /* Memory */
42 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
43 #define OpDI               5ull  /* ES:DI/EDI/RDI */
44 #define OpMem64            6ull  /* Memory, 64-bit */
45 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
46 #define OpDX               8ull  /* DX register */
47 #define OpCL               9ull  /* CL register (for shifts) */
48 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
49 #define OpOne             11ull  /* Implied 1 */
50 #define OpImm             12ull  /* Sign extended up to 32-bit immediate */
51 #define OpMem16           13ull  /* Memory operand (16-bit). */
52 #define OpMem32           14ull  /* Memory operand (32-bit). */
53 #define OpImmU            15ull  /* Immediate operand, zero extended */
54 #define OpSI              16ull  /* SI/ESI/RSI */
55 #define OpImmFAddr        17ull  /* Immediate far address */
56 #define OpMemFAddr        18ull  /* Far address in memory */
57 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
58 #define OpES              20ull  /* ES */
59 #define OpCS              21ull  /* CS */
60 #define OpSS              22ull  /* SS */
61 #define OpDS              23ull  /* DS */
62 #define OpFS              24ull  /* FS */
63 #define OpGS              25ull  /* GS */
64 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
65 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
66 #define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
67 #define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
68 #define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
69 
70 #define OpBits             5  /* Width of operand field */
71 #define OpMask             ((1ull << OpBits) - 1)
72 
73 /*
74  * Opcode effective-address decode tables.
75  * Note that we only emulate instructions that have at least one memory
76  * operand (excluding implicit stack references). We assume that stack
77  * references and instruction fetches will never occur in special memory
78  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79  * not be handled.
80  */
81 
82 /* Operand sizes: 8-bit operands or specified/overridden size. */
83 #define ByteOp      (1<<0)	/* 8-bit operands. */
84 /* Destination operand type. */
85 #define DstShift    1
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg      (OpReg << DstShift)
88 #define DstMem      (OpMem << DstShift)
89 #define DstAcc      (OpAcc << DstShift)
90 #define DstDI       (OpDI << DstShift)
91 #define DstMem64    (OpMem64 << DstShift)
92 #define DstMem16    (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX       (OpDX << DstShift)
95 #define DstAccLo    (OpAccLo << DstShift)
96 #define DstMask     (OpMask << DstShift)
97 /* Source operand type. */
98 #define SrcShift    6
99 #define SrcNone     (OpNone << SrcShift)
100 #define SrcReg      (OpReg << SrcShift)
101 #define SrcMem      (OpMem << SrcShift)
102 #define SrcMem16    (OpMem16 << SrcShift)
103 #define SrcMem32    (OpMem32 << SrcShift)
104 #define SrcImm      (OpImm << SrcShift)
105 #define SrcImmByte  (OpImmByte << SrcShift)
106 #define SrcOne      (OpOne << SrcShift)
107 #define SrcImmUByte (OpImmUByte << SrcShift)
108 #define SrcImmU     (OpImmU << SrcShift)
109 #define SrcSI       (OpSI << SrcShift)
110 #define SrcXLat     (OpXLat << SrcShift)
111 #define SrcImmFAddr (OpImmFAddr << SrcShift)
112 #define SrcMemFAddr (OpMemFAddr << SrcShift)
113 #define SrcAcc      (OpAcc << SrcShift)
114 #define SrcImmU16   (OpImmU16 << SrcShift)
115 #define SrcImm64    (OpImm64 << SrcShift)
116 #define SrcDX       (OpDX << SrcShift)
117 #define SrcMem8     (OpMem8 << SrcShift)
118 #define SrcAccHi    (OpAccHi << SrcShift)
119 #define SrcMask     (OpMask << SrcShift)
120 #define BitOp       (1<<11)
121 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
122 #define String      (1<<13)     /* String instruction (rep capable) */
123 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
124 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
125 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
126 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
127 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
128 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
129 #define Escape      (5<<15)     /* Escape to coprocessor instruction */
130 #define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
131 #define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
132 #define Sse         (1<<18)     /* SSE Vector instruction */
133 /* Generic ModRM decode. */
134 #define ModRM       (1<<19)
135 /* Destination is only written; never read. */
136 #define Mov         (1<<20)
137 /* Misc flags */
138 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
139 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
140 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
141 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
142 #define Undefined   (1<<25) /* No Such Instruction */
143 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
144 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
145 #define No64	    (1<<28)
146 #define PageTable   (1 << 29)   /* instruction used to write page table */
147 #define NotImpl     (1 << 30)   /* instruction is not implemented */
148 /* Source 2 operand type */
149 #define Src2Shift   (31)
150 #define Src2None    (OpNone << Src2Shift)
151 #define Src2Mem     (OpMem << Src2Shift)
152 #define Src2CL      (OpCL << Src2Shift)
153 #define Src2ImmByte (OpImmByte << Src2Shift)
154 #define Src2One     (OpOne << Src2Shift)
155 #define Src2Imm     (OpImm << Src2Shift)
156 #define Src2ES      (OpES << Src2Shift)
157 #define Src2CS      (OpCS << Src2Shift)
158 #define Src2SS      (OpSS << Src2Shift)
159 #define Src2DS      (OpDS << Src2Shift)
160 #define Src2FS      (OpFS << Src2Shift)
161 #define Src2GS      (OpGS << Src2Shift)
162 #define Src2Mask    (OpMask << Src2Shift)
163 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
164 #define AlignMask   ((u64)7 << 41)
165 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
166 #define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
167 #define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
168 #define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
169 #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
170 #define NoWrite     ((u64)1 << 45)  /* No writeback */
171 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
172 #define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
173 #define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
174 #define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
175 #define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
176 #define NearBranch  ((u64)1 << 52)  /* Near branches */
177 #define No16	    ((u64)1 << 53)  /* No 16 bit operand */
178 #define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
179 #define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
180 
181 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
182 
183 #define X2(x...) x, x
184 #define X3(x...) X2(x), x
185 #define X4(x...) X2(x), X2(x)
186 #define X5(x...) X4(x), x
187 #define X6(x...) X4(x), X2(x)
188 #define X7(x...) X4(x), X3(x)
189 #define X8(x...) X4(x), X4(x)
190 #define X16(x...) X8(x), X8(x)
191 
192 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
193 #define FASTOP_SIZE 8
194 
195 /*
196  * fastop functions have a special calling convention:
197  *
198  * dst:    rax        (in/out)
199  * src:    rdx        (in/out)
200  * src2:   rcx        (in)
201  * flags:  rflags     (in/out)
202  * ex:     rsi        (in:fastop pointer, out:zero if exception)
203  *
204  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
205  * different operand sizes can be reached by calculation, rather than a jump
206  * table (which would be bigger than the code).
207  *
208  * fastop functions are declared as taking a never-defined fastop parameter,
209  * so they can't be called from C directly.
210  */
211 
212 struct fastop;
213 
214 struct opcode {
215 	u64 flags : 56;
216 	u64 intercept : 8;
217 	union {
218 		int (*execute)(struct x86_emulate_ctxt *ctxt);
219 		const struct opcode *group;
220 		const struct group_dual *gdual;
221 		const struct gprefix *gprefix;
222 		const struct escape *esc;
223 		const struct instr_dual *idual;
224 		const struct mode_dual *mdual;
225 		void (*fastop)(struct fastop *fake);
226 	} u;
227 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
228 };
229 
230 struct group_dual {
231 	struct opcode mod012[8];
232 	struct opcode mod3[8];
233 };
234 
235 struct gprefix {
236 	struct opcode pfx_no;
237 	struct opcode pfx_66;
238 	struct opcode pfx_f2;
239 	struct opcode pfx_f3;
240 };
241 
242 struct escape {
243 	struct opcode op[8];
244 	struct opcode high[64];
245 };
246 
247 struct instr_dual {
248 	struct opcode mod012;
249 	struct opcode mod3;
250 };
251 
252 struct mode_dual {
253 	struct opcode mode32;
254 	struct opcode mode64;
255 };
256 
257 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
258 
259 enum x86_transfer_type {
260 	X86_TRANSFER_NONE,
261 	X86_TRANSFER_CALL_JMP,
262 	X86_TRANSFER_RET,
263 	X86_TRANSFER_TASK_SWITCH,
264 };
265 
reg_read(struct x86_emulate_ctxt * ctxt,unsigned nr)266 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
267 {
268 	if (!(ctxt->regs_valid & (1 << nr))) {
269 		ctxt->regs_valid |= 1 << nr;
270 		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
271 	}
272 	return ctxt->_regs[nr];
273 }
274 
reg_write(struct x86_emulate_ctxt * ctxt,unsigned nr)275 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 {
277 	ctxt->regs_valid |= 1 << nr;
278 	ctxt->regs_dirty |= 1 << nr;
279 	return &ctxt->_regs[nr];
280 }
281 
reg_rmw(struct x86_emulate_ctxt * ctxt,unsigned nr)282 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
283 {
284 	reg_read(ctxt, nr);
285 	return reg_write(ctxt, nr);
286 }
287 
writeback_registers(struct x86_emulate_ctxt * ctxt)288 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
289 {
290 	unsigned reg;
291 
292 	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
293 		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
294 }
295 
invalidate_registers(struct x86_emulate_ctxt * ctxt)296 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
297 {
298 	ctxt->regs_dirty = 0;
299 	ctxt->regs_valid = 0;
300 }
301 
302 /*
303  * These EFLAGS bits are restored from saved value during emulation, and
304  * any changes are written back to the saved value after emulation.
305  */
306 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
307 		     X86_EFLAGS_PF|X86_EFLAGS_CF)
308 
309 #ifdef CONFIG_X86_64
310 #define ON64(x) x
311 #else
312 #define ON64(x)
313 #endif
314 
315 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
316 
317 #define FOP_FUNC(name) \
318 	".align " __stringify(FASTOP_SIZE) " \n\t" \
319 	".type " name ", @function \n\t" \
320 	name ":\n\t"
321 
322 #define FOP_RET   "ret \n\t"
323 
324 #define FOP_START(op) \
325 	extern void em_##op(struct fastop *fake); \
326 	asm(".pushsection .text, \"ax\" \n\t" \
327 	    ".global em_" #op " \n\t" \
328 	    FOP_FUNC("em_" #op)
329 
330 #define FOP_END \
331 	    ".popsection")
332 
333 #define FOPNOP() \
334 	FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
335 	FOP_RET
336 
337 #define FOP1E(op,  dst) \
338 	FOP_FUNC(#op "_" #dst) \
339 	"10: " #op " %" #dst " \n\t" FOP_RET
340 
341 #define FOP1EEX(op,  dst) \
342 	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
343 
344 #define FASTOP1(op) \
345 	FOP_START(op) \
346 	FOP1E(op##b, al) \
347 	FOP1E(op##w, ax) \
348 	FOP1E(op##l, eax) \
349 	ON64(FOP1E(op##q, rax))	\
350 	FOP_END
351 
352 /* 1-operand, using src2 (for MUL/DIV r/m) */
353 #define FASTOP1SRC2(op, name) \
354 	FOP_START(name) \
355 	FOP1E(op, cl) \
356 	FOP1E(op, cx) \
357 	FOP1E(op, ecx) \
358 	ON64(FOP1E(op, rcx)) \
359 	FOP_END
360 
361 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
362 #define FASTOP1SRC2EX(op, name) \
363 	FOP_START(name) \
364 	FOP1EEX(op, cl) \
365 	FOP1EEX(op, cx) \
366 	FOP1EEX(op, ecx) \
367 	ON64(FOP1EEX(op, rcx)) \
368 	FOP_END
369 
370 #define FOP2E(op,  dst, src)	   \
371 	FOP_FUNC(#op "_" #dst "_" #src) \
372 	#op " %" #src ", %" #dst " \n\t" FOP_RET
373 
374 #define FASTOP2(op) \
375 	FOP_START(op) \
376 	FOP2E(op##b, al, dl) \
377 	FOP2E(op##w, ax, dx) \
378 	FOP2E(op##l, eax, edx) \
379 	ON64(FOP2E(op##q, rax, rdx)) \
380 	FOP_END
381 
382 /* 2 operand, word only */
383 #define FASTOP2W(op) \
384 	FOP_START(op) \
385 	FOPNOP() \
386 	FOP2E(op##w, ax, dx) \
387 	FOP2E(op##l, eax, edx) \
388 	ON64(FOP2E(op##q, rax, rdx)) \
389 	FOP_END
390 
391 /* 2 operand, src is CL */
392 #define FASTOP2CL(op) \
393 	FOP_START(op) \
394 	FOP2E(op##b, al, cl) \
395 	FOP2E(op##w, ax, cl) \
396 	FOP2E(op##l, eax, cl) \
397 	ON64(FOP2E(op##q, rax, cl)) \
398 	FOP_END
399 
400 /* 2 operand, src and dest are reversed */
401 #define FASTOP2R(op, name) \
402 	FOP_START(name) \
403 	FOP2E(op##b, dl, al) \
404 	FOP2E(op##w, dx, ax) \
405 	FOP2E(op##l, edx, eax) \
406 	ON64(FOP2E(op##q, rdx, rax)) \
407 	FOP_END
408 
409 #define FOP3E(op,  dst, src, src2) \
410 	FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
411 	#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
412 
413 /* 3-operand, word-only, src2=cl */
414 #define FASTOP3WCL(op) \
415 	FOP_START(op) \
416 	FOPNOP() \
417 	FOP3E(op##w, ax, dx, cl) \
418 	FOP3E(op##l, eax, edx, cl) \
419 	ON64(FOP3E(op##q, rax, rdx, cl)) \
420 	FOP_END
421 
422 /* Special case for SETcc - 1 instruction per cc */
423 #define FOP_SETCC(op) \
424 	".align 4 \n\t" \
425 	".type " #op ", @function \n\t" \
426 	#op ": \n\t" \
427 	#op " %al \n\t" \
428 	FOP_RET
429 
430 asm(".pushsection .fixup, \"ax\"\n"
431     ".global kvm_fastop_exception \n"
432     "kvm_fastop_exception: xor %esi, %esi; ret\n"
433     ".popsection");
434 
435 FOP_START(setcc)
436 FOP_SETCC(seto)
437 FOP_SETCC(setno)
438 FOP_SETCC(setc)
439 FOP_SETCC(setnc)
440 FOP_SETCC(setz)
441 FOP_SETCC(setnz)
442 FOP_SETCC(setbe)
443 FOP_SETCC(setnbe)
444 FOP_SETCC(sets)
445 FOP_SETCC(setns)
446 FOP_SETCC(setp)
447 FOP_SETCC(setnp)
448 FOP_SETCC(setl)
449 FOP_SETCC(setnl)
450 FOP_SETCC(setle)
451 FOP_SETCC(setnle)
452 FOP_END;
453 
454 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
455 FOP_END;
456 
457 /*
458  * XXX: inoutclob user must know where the argument is being expanded.
459  *      Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
460  */
461 #define asm_safe(insn, inoutclob...) \
462 ({ \
463 	int _fault = 0; \
464  \
465 	asm volatile("1:" insn "\n" \
466 	             "2:\n" \
467 	             ".pushsection .fixup, \"ax\"\n" \
468 	             "3: movl $1, %[_fault]\n" \
469 	             "   jmp  2b\n" \
470 	             ".popsection\n" \
471 	             _ASM_EXTABLE(1b, 3b) \
472 	             : [_fault] "+qm"(_fault) inoutclob ); \
473  \
474 	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
475 })
476 
emulator_check_intercept(struct x86_emulate_ctxt * ctxt,enum x86_intercept intercept,enum x86_intercept_stage stage)477 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
478 				    enum x86_intercept intercept,
479 				    enum x86_intercept_stage stage)
480 {
481 	struct x86_instruction_info info = {
482 		.intercept  = intercept,
483 		.rep_prefix = ctxt->rep_prefix,
484 		.modrm_mod  = ctxt->modrm_mod,
485 		.modrm_reg  = ctxt->modrm_reg,
486 		.modrm_rm   = ctxt->modrm_rm,
487 		.src_val    = ctxt->src.val64,
488 		.dst_val    = ctxt->dst.val64,
489 		.src_bytes  = ctxt->src.bytes,
490 		.dst_bytes  = ctxt->dst.bytes,
491 		.ad_bytes   = ctxt->ad_bytes,
492 		.next_rip   = ctxt->eip,
493 	};
494 
495 	return ctxt->ops->intercept(ctxt, &info, stage);
496 }
497 
assign_masked(ulong * dest,ulong src,ulong mask)498 static void assign_masked(ulong *dest, ulong src, ulong mask)
499 {
500 	*dest = (*dest & ~mask) | (src & mask);
501 }
502 
assign_register(unsigned long * reg,u64 val,int bytes)503 static void assign_register(unsigned long *reg, u64 val, int bytes)
504 {
505 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
506 	switch (bytes) {
507 	case 1:
508 		*(u8 *)reg = (u8)val;
509 		break;
510 	case 2:
511 		*(u16 *)reg = (u16)val;
512 		break;
513 	case 4:
514 		*reg = (u32)val;
515 		break;	/* 64b: zero-extend */
516 	case 8:
517 		*reg = val;
518 		break;
519 	}
520 }
521 
ad_mask(struct x86_emulate_ctxt * ctxt)522 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
523 {
524 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
525 }
526 
stack_mask(struct x86_emulate_ctxt * ctxt)527 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
528 {
529 	u16 sel;
530 	struct desc_struct ss;
531 
532 	if (ctxt->mode == X86EMUL_MODE_PROT64)
533 		return ~0UL;
534 	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
535 	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
536 }
537 
stack_size(struct x86_emulate_ctxt * ctxt)538 static int stack_size(struct x86_emulate_ctxt *ctxt)
539 {
540 	return (__fls(stack_mask(ctxt)) + 1) >> 3;
541 }
542 
543 /* Access/update address held in a register, based on addressing mode. */
544 static inline unsigned long
address_mask(struct x86_emulate_ctxt * ctxt,unsigned long reg)545 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
546 {
547 	if (ctxt->ad_bytes == sizeof(unsigned long))
548 		return reg;
549 	else
550 		return reg & ad_mask(ctxt);
551 }
552 
553 static inline unsigned long
register_address(struct x86_emulate_ctxt * ctxt,int reg)554 register_address(struct x86_emulate_ctxt *ctxt, int reg)
555 {
556 	return address_mask(ctxt, reg_read(ctxt, reg));
557 }
558 
masked_increment(ulong * reg,ulong mask,int inc)559 static void masked_increment(ulong *reg, ulong mask, int inc)
560 {
561 	assign_masked(reg, *reg + inc, mask);
562 }
563 
564 static inline void
register_address_increment(struct x86_emulate_ctxt * ctxt,int reg,int inc)565 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
566 {
567 	ulong *preg = reg_rmw(ctxt, reg);
568 
569 	assign_register(preg, *preg + inc, ctxt->ad_bytes);
570 }
571 
rsp_increment(struct x86_emulate_ctxt * ctxt,int inc)572 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
573 {
574 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
575 }
576 
desc_limit_scaled(struct desc_struct * desc)577 static u32 desc_limit_scaled(struct desc_struct *desc)
578 {
579 	u32 limit = get_desc_limit(desc);
580 
581 	return desc->g ? (limit << 12) | 0xfff : limit;
582 }
583 
seg_base(struct x86_emulate_ctxt * ctxt,int seg)584 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
585 {
586 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
587 		return 0;
588 
589 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
590 }
591 
emulate_exception(struct x86_emulate_ctxt * ctxt,int vec,u32 error,bool valid)592 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
593 			     u32 error, bool valid)
594 {
595 	WARN_ON(vec > 0x1f);
596 	ctxt->exception.vector = vec;
597 	ctxt->exception.error_code = error;
598 	ctxt->exception.error_code_valid = valid;
599 	return X86EMUL_PROPAGATE_FAULT;
600 }
601 
emulate_db(struct x86_emulate_ctxt * ctxt)602 static int emulate_db(struct x86_emulate_ctxt *ctxt)
603 {
604 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
605 }
606 
emulate_gp(struct x86_emulate_ctxt * ctxt,int err)607 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
608 {
609 	return emulate_exception(ctxt, GP_VECTOR, err, true);
610 }
611 
emulate_ss(struct x86_emulate_ctxt * ctxt,int err)612 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
613 {
614 	return emulate_exception(ctxt, SS_VECTOR, err, true);
615 }
616 
emulate_ud(struct x86_emulate_ctxt * ctxt)617 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
618 {
619 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
620 }
621 
emulate_ts(struct x86_emulate_ctxt * ctxt,int err)622 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
623 {
624 	return emulate_exception(ctxt, TS_VECTOR, err, true);
625 }
626 
emulate_de(struct x86_emulate_ctxt * ctxt)627 static int emulate_de(struct x86_emulate_ctxt *ctxt)
628 {
629 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
630 }
631 
emulate_nm(struct x86_emulate_ctxt * ctxt)632 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
633 {
634 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
635 }
636 
get_segment_selector(struct x86_emulate_ctxt * ctxt,unsigned seg)637 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
638 {
639 	u16 selector;
640 	struct desc_struct desc;
641 
642 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
643 	return selector;
644 }
645 
set_segment_selector(struct x86_emulate_ctxt * ctxt,u16 selector,unsigned seg)646 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
647 				 unsigned seg)
648 {
649 	u16 dummy;
650 	u32 base3;
651 	struct desc_struct desc;
652 
653 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
654 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
655 }
656 
657 /*
658  * x86 defines three classes of vector instructions: explicitly
659  * aligned, explicitly unaligned, and the rest, which change behaviour
660  * depending on whether they're AVX encoded or not.
661  *
662  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
663  * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
664  * 512 bytes of data must be aligned to a 16 byte boundary.
665  */
insn_alignment(struct x86_emulate_ctxt * ctxt,unsigned size)666 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
667 {
668 	u64 alignment = ctxt->d & AlignMask;
669 
670 	if (likely(size < 16))
671 		return 1;
672 
673 	switch (alignment) {
674 	case Unaligned:
675 	case Avx:
676 		return 1;
677 	case Aligned16:
678 		return 16;
679 	case Aligned:
680 	default:
681 		return size;
682 	}
683 }
684 
__linearize(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,unsigned * max_size,unsigned size,bool write,bool fetch,enum x86emul_mode mode,ulong * linear)685 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
686 				       struct segmented_address addr,
687 				       unsigned *max_size, unsigned size,
688 				       bool write, bool fetch,
689 				       enum x86emul_mode mode, ulong *linear)
690 {
691 	struct desc_struct desc;
692 	bool usable;
693 	ulong la;
694 	u32 lim;
695 	u16 sel;
696 	u8  va_bits;
697 
698 	la = seg_base(ctxt, addr.seg) + addr.ea;
699 	*max_size = 0;
700 	switch (mode) {
701 	case X86EMUL_MODE_PROT64:
702 		*linear = la;
703 		va_bits = ctxt_virt_addr_bits(ctxt);
704 		if (get_canonical(la, va_bits) != la)
705 			goto bad;
706 
707 		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
708 		if (size > *max_size)
709 			goto bad;
710 		break;
711 	default:
712 		*linear = la = (u32)la;
713 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
714 						addr.seg);
715 		if (!usable)
716 			goto bad;
717 		/* code segment in protected mode or read-only data segment */
718 		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
719 					|| !(desc.type & 2)) && write)
720 			goto bad;
721 		/* unreadable code segment */
722 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
723 			goto bad;
724 		lim = desc_limit_scaled(&desc);
725 		if (!(desc.type & 8) && (desc.type & 4)) {
726 			/* expand-down segment */
727 			if (addr.ea <= lim)
728 				goto bad;
729 			lim = desc.d ? 0xffffffff : 0xffff;
730 		}
731 		if (addr.ea > lim)
732 			goto bad;
733 		if (lim == 0xffffffff)
734 			*max_size = ~0u;
735 		else {
736 			*max_size = (u64)lim + 1 - addr.ea;
737 			if (size > *max_size)
738 				goto bad;
739 		}
740 		break;
741 	}
742 	if (la & (insn_alignment(ctxt, size) - 1))
743 		return emulate_gp(ctxt, 0);
744 	return X86EMUL_CONTINUE;
745 bad:
746 	if (addr.seg == VCPU_SREG_SS)
747 		return emulate_ss(ctxt, 0);
748 	else
749 		return emulate_gp(ctxt, 0);
750 }
751 
linearize(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,unsigned size,bool write,ulong * linear)752 static int linearize(struct x86_emulate_ctxt *ctxt,
753 		     struct segmented_address addr,
754 		     unsigned size, bool write,
755 		     ulong *linear)
756 {
757 	unsigned max_size;
758 	return __linearize(ctxt, addr, &max_size, size, write, false,
759 			   ctxt->mode, linear);
760 }
761 
assign_eip(struct x86_emulate_ctxt * ctxt,ulong dst,enum x86emul_mode mode)762 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
763 			     enum x86emul_mode mode)
764 {
765 	ulong linear;
766 	int rc;
767 	unsigned max_size;
768 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 					   .ea = dst };
770 
771 	if (ctxt->op_bytes != sizeof(unsigned long))
772 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
773 	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
774 	if (rc == X86EMUL_CONTINUE)
775 		ctxt->_eip = addr.ea;
776 	return rc;
777 }
778 
assign_eip_near(struct x86_emulate_ctxt * ctxt,ulong dst)779 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
780 {
781 	return assign_eip(ctxt, dst, ctxt->mode);
782 }
783 
assign_eip_far(struct x86_emulate_ctxt * ctxt,ulong dst,const struct desc_struct * cs_desc)784 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
785 			  const struct desc_struct *cs_desc)
786 {
787 	enum x86emul_mode mode = ctxt->mode;
788 	int rc;
789 
790 #ifdef CONFIG_X86_64
791 	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
792 		if (cs_desc->l) {
793 			u64 efer = 0;
794 
795 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
796 			if (efer & EFER_LMA)
797 				mode = X86EMUL_MODE_PROT64;
798 		} else
799 			mode = X86EMUL_MODE_PROT32; /* temporary value */
800 	}
801 #endif
802 	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
803 		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
804 	rc = assign_eip(ctxt, dst, mode);
805 	if (rc == X86EMUL_CONTINUE)
806 		ctxt->mode = mode;
807 	return rc;
808 }
809 
jmp_rel(struct x86_emulate_ctxt * ctxt,int rel)810 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
811 {
812 	return assign_eip_near(ctxt, ctxt->_eip + rel);
813 }
814 
linear_read_system(struct x86_emulate_ctxt * ctxt,ulong linear,void * data,unsigned size)815 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
816 			      void *data, unsigned size)
817 {
818 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
819 }
820 
linear_write_system(struct x86_emulate_ctxt * ctxt,ulong linear,void * data,unsigned int size)821 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
822 			       ulong linear, void *data,
823 			       unsigned int size)
824 {
825 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
826 }
827 
segmented_read_std(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned size)828 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
829 			      struct segmented_address addr,
830 			      void *data,
831 			      unsigned size)
832 {
833 	int rc;
834 	ulong linear;
835 
836 	rc = linearize(ctxt, addr, size, false, &linear);
837 	if (rc != X86EMUL_CONTINUE)
838 		return rc;
839 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
840 }
841 
segmented_write_std(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned int size)842 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
843 			       struct segmented_address addr,
844 			       void *data,
845 			       unsigned int size)
846 {
847 	int rc;
848 	ulong linear;
849 
850 	rc = linearize(ctxt, addr, size, true, &linear);
851 	if (rc != X86EMUL_CONTINUE)
852 		return rc;
853 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
854 }
855 
856 /*
857  * Prefetch the remaining bytes of the instruction without crossing page
858  * boundary if they are not in fetch_cache yet.
859  */
__do_insn_fetch_bytes(struct x86_emulate_ctxt * ctxt,int op_size)860 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
861 {
862 	int rc;
863 	unsigned size, max_size;
864 	unsigned long linear;
865 	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
866 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
867 					   .ea = ctxt->eip + cur_size };
868 
869 	/*
870 	 * We do not know exactly how many bytes will be needed, and
871 	 * __linearize is expensive, so fetch as much as possible.  We
872 	 * just have to avoid going beyond the 15 byte limit, the end
873 	 * of the segment, or the end of the page.
874 	 *
875 	 * __linearize is called with size 0 so that it does not do any
876 	 * boundary check itself.  Instead, we use max_size to check
877 	 * against op_size.
878 	 */
879 	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
880 			 &linear);
881 	if (unlikely(rc != X86EMUL_CONTINUE))
882 		return rc;
883 
884 	size = min_t(unsigned, 15UL ^ cur_size, max_size);
885 	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
886 
887 	/*
888 	 * One instruction can only straddle two pages,
889 	 * and one has been loaded at the beginning of
890 	 * x86_decode_insn.  So, if not enough bytes
891 	 * still, we must have hit the 15-byte boundary.
892 	 */
893 	if (unlikely(size < op_size))
894 		return emulate_gp(ctxt, 0);
895 
896 	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
897 			      size, &ctxt->exception);
898 	if (unlikely(rc != X86EMUL_CONTINUE))
899 		return rc;
900 	ctxt->fetch.end += size;
901 	return X86EMUL_CONTINUE;
902 }
903 
do_insn_fetch_bytes(struct x86_emulate_ctxt * ctxt,unsigned size)904 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
905 					       unsigned size)
906 {
907 	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
908 
909 	if (unlikely(done_size < size))
910 		return __do_insn_fetch_bytes(ctxt, size - done_size);
911 	else
912 		return X86EMUL_CONTINUE;
913 }
914 
915 /* Fetch next part of the instruction being emulated. */
916 #define insn_fetch(_type, _ctxt)					\
917 ({	_type _x;							\
918 									\
919 	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
920 	if (rc != X86EMUL_CONTINUE)					\
921 		goto done;						\
922 	ctxt->_eip += sizeof(_type);					\
923 	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
924 	ctxt->fetch.ptr += sizeof(_type);				\
925 	_x;								\
926 })
927 
928 #define insn_fetch_arr(_arr, _size, _ctxt)				\
929 ({									\
930 	rc = do_insn_fetch_bytes(_ctxt, _size);				\
931 	if (rc != X86EMUL_CONTINUE)					\
932 		goto done;						\
933 	ctxt->_eip += (_size);						\
934 	memcpy(_arr, ctxt->fetch.ptr, _size);				\
935 	ctxt->fetch.ptr += (_size);					\
936 })
937 
938 /*
939  * Given the 'reg' portion of a ModRM byte, and a register block, return a
940  * pointer into the block that addresses the relevant register.
941  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
942  */
decode_register(struct x86_emulate_ctxt * ctxt,u8 modrm_reg,int byteop)943 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
944 			     int byteop)
945 {
946 	void *p;
947 	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
948 
949 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
950 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
951 	else
952 		p = reg_rmw(ctxt, modrm_reg);
953 	return p;
954 }
955 
read_descriptor(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,u16 * size,unsigned long * address,int op_bytes)956 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
957 			   struct segmented_address addr,
958 			   u16 *size, unsigned long *address, int op_bytes)
959 {
960 	int rc;
961 
962 	if (op_bytes == 2)
963 		op_bytes = 3;
964 	*address = 0;
965 	rc = segmented_read_std(ctxt, addr, size, 2);
966 	if (rc != X86EMUL_CONTINUE)
967 		return rc;
968 	addr.ea += 2;
969 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
970 	return rc;
971 }
972 
973 FASTOP2(add);
974 FASTOP2(or);
975 FASTOP2(adc);
976 FASTOP2(sbb);
977 FASTOP2(and);
978 FASTOP2(sub);
979 FASTOP2(xor);
980 FASTOP2(cmp);
981 FASTOP2(test);
982 
983 FASTOP1SRC2(mul, mul_ex);
984 FASTOP1SRC2(imul, imul_ex);
985 FASTOP1SRC2EX(div, div_ex);
986 FASTOP1SRC2EX(idiv, idiv_ex);
987 
988 FASTOP3WCL(shld);
989 FASTOP3WCL(shrd);
990 
991 FASTOP2W(imul);
992 
993 FASTOP1(not);
994 FASTOP1(neg);
995 FASTOP1(inc);
996 FASTOP1(dec);
997 
998 FASTOP2CL(rol);
999 FASTOP2CL(ror);
1000 FASTOP2CL(rcl);
1001 FASTOP2CL(rcr);
1002 FASTOP2CL(shl);
1003 FASTOP2CL(shr);
1004 FASTOP2CL(sar);
1005 
1006 FASTOP2W(bsf);
1007 FASTOP2W(bsr);
1008 FASTOP2W(bt);
1009 FASTOP2W(bts);
1010 FASTOP2W(btr);
1011 FASTOP2W(btc);
1012 
1013 FASTOP2(xadd);
1014 
1015 FASTOP2R(cmp, cmp_r);
1016 
em_bsf_c(struct x86_emulate_ctxt * ctxt)1017 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1018 {
1019 	/* If src is zero, do not writeback, but update flags */
1020 	if (ctxt->src.val == 0)
1021 		ctxt->dst.type = OP_NONE;
1022 	return fastop(ctxt, em_bsf);
1023 }
1024 
em_bsr_c(struct x86_emulate_ctxt * ctxt)1025 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1026 {
1027 	/* If src is zero, do not writeback, but update flags */
1028 	if (ctxt->src.val == 0)
1029 		ctxt->dst.type = OP_NONE;
1030 	return fastop(ctxt, em_bsr);
1031 }
1032 
test_cc(unsigned int condition,unsigned long flags)1033 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1034 {
1035 	u8 rc;
1036 	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1037 
1038 	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1039 	asm("push %[flags]; popf; " CALL_NOSPEC
1040 	    : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1041 	return rc;
1042 }
1043 
fetch_register_operand(struct operand * op)1044 static void fetch_register_operand(struct operand *op)
1045 {
1046 	switch (op->bytes) {
1047 	case 1:
1048 		op->val = *(u8 *)op->addr.reg;
1049 		break;
1050 	case 2:
1051 		op->val = *(u16 *)op->addr.reg;
1052 		break;
1053 	case 4:
1054 		op->val = *(u32 *)op->addr.reg;
1055 		break;
1056 	case 8:
1057 		op->val = *(u64 *)op->addr.reg;
1058 		break;
1059 	}
1060 }
1061 
read_sse_reg(struct x86_emulate_ctxt * ctxt,sse128_t * data,int reg)1062 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1063 {
1064 	switch (reg) {
1065 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1066 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1067 	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1068 	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1069 	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1070 	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1071 	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1072 	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1073 #ifdef CONFIG_X86_64
1074 	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1075 	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1076 	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1077 	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1078 	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1079 	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1080 	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1081 	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1082 #endif
1083 	default: BUG();
1084 	}
1085 }
1086 
write_sse_reg(struct x86_emulate_ctxt * ctxt,sse128_t * data,int reg)1087 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1088 			  int reg)
1089 {
1090 	switch (reg) {
1091 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1092 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1093 	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1094 	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1095 	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1096 	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1097 	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1098 	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1099 #ifdef CONFIG_X86_64
1100 	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1101 	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1102 	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1103 	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1104 	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1105 	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1106 	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1107 	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1108 #endif
1109 	default: BUG();
1110 	}
1111 }
1112 
read_mmx_reg(struct x86_emulate_ctxt * ctxt,u64 * data,int reg)1113 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1114 {
1115 	switch (reg) {
1116 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1117 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1118 	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1119 	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1120 	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1121 	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1122 	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1123 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1124 	default: BUG();
1125 	}
1126 }
1127 
write_mmx_reg(struct x86_emulate_ctxt * ctxt,u64 * data,int reg)1128 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1129 {
1130 	switch (reg) {
1131 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1132 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1133 	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1134 	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1135 	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1136 	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1137 	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1138 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1139 	default: BUG();
1140 	}
1141 }
1142 
em_fninit(struct x86_emulate_ctxt * ctxt)1143 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1144 {
1145 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1146 		return emulate_nm(ctxt);
1147 
1148 	asm volatile("fninit");
1149 	return X86EMUL_CONTINUE;
1150 }
1151 
em_fnstcw(struct x86_emulate_ctxt * ctxt)1152 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1153 {
1154 	u16 fcw;
1155 
1156 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1157 		return emulate_nm(ctxt);
1158 
1159 	asm volatile("fnstcw %0": "+m"(fcw));
1160 
1161 	ctxt->dst.val = fcw;
1162 
1163 	return X86EMUL_CONTINUE;
1164 }
1165 
em_fnstsw(struct x86_emulate_ctxt * ctxt)1166 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1167 {
1168 	u16 fsw;
1169 
1170 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1171 		return emulate_nm(ctxt);
1172 
1173 	asm volatile("fnstsw %0": "+m"(fsw));
1174 
1175 	ctxt->dst.val = fsw;
1176 
1177 	return X86EMUL_CONTINUE;
1178 }
1179 
decode_register_operand(struct x86_emulate_ctxt * ctxt,struct operand * op)1180 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1181 				    struct operand *op)
1182 {
1183 	unsigned reg = ctxt->modrm_reg;
1184 
1185 	if (!(ctxt->d & ModRM))
1186 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1187 
1188 	if (ctxt->d & Sse) {
1189 		op->type = OP_XMM;
1190 		op->bytes = 16;
1191 		op->addr.xmm = reg;
1192 		read_sse_reg(ctxt, &op->vec_val, reg);
1193 		return;
1194 	}
1195 	if (ctxt->d & Mmx) {
1196 		reg &= 7;
1197 		op->type = OP_MM;
1198 		op->bytes = 8;
1199 		op->addr.mm = reg;
1200 		return;
1201 	}
1202 
1203 	op->type = OP_REG;
1204 	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1205 	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1206 
1207 	fetch_register_operand(op);
1208 	op->orig_val = op->val;
1209 }
1210 
adjust_modrm_seg(struct x86_emulate_ctxt * ctxt,int base_reg)1211 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1212 {
1213 	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1214 		ctxt->modrm_seg = VCPU_SREG_SS;
1215 }
1216 
decode_modrm(struct x86_emulate_ctxt * ctxt,struct operand * op)1217 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1218 			struct operand *op)
1219 {
1220 	u8 sib;
1221 	int index_reg, base_reg, scale;
1222 	int rc = X86EMUL_CONTINUE;
1223 	ulong modrm_ea = 0;
1224 
1225 	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1226 	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1227 	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1228 
1229 	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1230 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1231 	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1232 	ctxt->modrm_seg = VCPU_SREG_DS;
1233 
1234 	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1235 		op->type = OP_REG;
1236 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1237 		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1238 				ctxt->d & ByteOp);
1239 		if (ctxt->d & Sse) {
1240 			op->type = OP_XMM;
1241 			op->bytes = 16;
1242 			op->addr.xmm = ctxt->modrm_rm;
1243 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1244 			return rc;
1245 		}
1246 		if (ctxt->d & Mmx) {
1247 			op->type = OP_MM;
1248 			op->bytes = 8;
1249 			op->addr.mm = ctxt->modrm_rm & 7;
1250 			return rc;
1251 		}
1252 		fetch_register_operand(op);
1253 		return rc;
1254 	}
1255 
1256 	op->type = OP_MEM;
1257 
1258 	if (ctxt->ad_bytes == 2) {
1259 		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1260 		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1261 		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1262 		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1263 
1264 		/* 16-bit ModR/M decode. */
1265 		switch (ctxt->modrm_mod) {
1266 		case 0:
1267 			if (ctxt->modrm_rm == 6)
1268 				modrm_ea += insn_fetch(u16, ctxt);
1269 			break;
1270 		case 1:
1271 			modrm_ea += insn_fetch(s8, ctxt);
1272 			break;
1273 		case 2:
1274 			modrm_ea += insn_fetch(u16, ctxt);
1275 			break;
1276 		}
1277 		switch (ctxt->modrm_rm) {
1278 		case 0:
1279 			modrm_ea += bx + si;
1280 			break;
1281 		case 1:
1282 			modrm_ea += bx + di;
1283 			break;
1284 		case 2:
1285 			modrm_ea += bp + si;
1286 			break;
1287 		case 3:
1288 			modrm_ea += bp + di;
1289 			break;
1290 		case 4:
1291 			modrm_ea += si;
1292 			break;
1293 		case 5:
1294 			modrm_ea += di;
1295 			break;
1296 		case 6:
1297 			if (ctxt->modrm_mod != 0)
1298 				modrm_ea += bp;
1299 			break;
1300 		case 7:
1301 			modrm_ea += bx;
1302 			break;
1303 		}
1304 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1305 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1306 			ctxt->modrm_seg = VCPU_SREG_SS;
1307 		modrm_ea = (u16)modrm_ea;
1308 	} else {
1309 		/* 32/64-bit ModR/M decode. */
1310 		if ((ctxt->modrm_rm & 7) == 4) {
1311 			sib = insn_fetch(u8, ctxt);
1312 			index_reg |= (sib >> 3) & 7;
1313 			base_reg |= sib & 7;
1314 			scale = sib >> 6;
1315 
1316 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1317 				modrm_ea += insn_fetch(s32, ctxt);
1318 			else {
1319 				modrm_ea += reg_read(ctxt, base_reg);
1320 				adjust_modrm_seg(ctxt, base_reg);
1321 				/* Increment ESP on POP [ESP] */
1322 				if ((ctxt->d & IncSP) &&
1323 				    base_reg == VCPU_REGS_RSP)
1324 					modrm_ea += ctxt->op_bytes;
1325 			}
1326 			if (index_reg != 4)
1327 				modrm_ea += reg_read(ctxt, index_reg) << scale;
1328 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1329 			modrm_ea += insn_fetch(s32, ctxt);
1330 			if (ctxt->mode == X86EMUL_MODE_PROT64)
1331 				ctxt->rip_relative = 1;
1332 		} else {
1333 			base_reg = ctxt->modrm_rm;
1334 			modrm_ea += reg_read(ctxt, base_reg);
1335 			adjust_modrm_seg(ctxt, base_reg);
1336 		}
1337 		switch (ctxt->modrm_mod) {
1338 		case 1:
1339 			modrm_ea += insn_fetch(s8, ctxt);
1340 			break;
1341 		case 2:
1342 			modrm_ea += insn_fetch(s32, ctxt);
1343 			break;
1344 		}
1345 	}
1346 	op->addr.mem.ea = modrm_ea;
1347 	if (ctxt->ad_bytes != 8)
1348 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1349 
1350 done:
1351 	return rc;
1352 }
1353 
decode_abs(struct x86_emulate_ctxt * ctxt,struct operand * op)1354 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1355 		      struct operand *op)
1356 {
1357 	int rc = X86EMUL_CONTINUE;
1358 
1359 	op->type = OP_MEM;
1360 	switch (ctxt->ad_bytes) {
1361 	case 2:
1362 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1363 		break;
1364 	case 4:
1365 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1366 		break;
1367 	case 8:
1368 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1369 		break;
1370 	}
1371 done:
1372 	return rc;
1373 }
1374 
fetch_bit_operand(struct x86_emulate_ctxt * ctxt)1375 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1376 {
1377 	long sv = 0, mask;
1378 
1379 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1380 		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1381 
1382 		if (ctxt->src.bytes == 2)
1383 			sv = (s16)ctxt->src.val & (s16)mask;
1384 		else if (ctxt->src.bytes == 4)
1385 			sv = (s32)ctxt->src.val & (s32)mask;
1386 		else
1387 			sv = (s64)ctxt->src.val & (s64)mask;
1388 
1389 		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1390 					   ctxt->dst.addr.mem.ea + (sv >> 3));
1391 	}
1392 
1393 	/* only subword offset */
1394 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1395 }
1396 
read_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * dest,unsigned size)1397 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1398 			 unsigned long addr, void *dest, unsigned size)
1399 {
1400 	int rc;
1401 	struct read_cache *mc = &ctxt->mem_read;
1402 
1403 	if (mc->pos < mc->end)
1404 		goto read_cached;
1405 
1406 	WARN_ON((mc->end + size) >= sizeof(mc->data));
1407 
1408 	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1409 				      &ctxt->exception);
1410 	if (rc != X86EMUL_CONTINUE)
1411 		return rc;
1412 
1413 	mc->end += size;
1414 
1415 read_cached:
1416 	memcpy(dest, mc->data + mc->pos, size);
1417 	mc->pos += size;
1418 	return X86EMUL_CONTINUE;
1419 }
1420 
segmented_read(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned size)1421 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1422 			  struct segmented_address addr,
1423 			  void *data,
1424 			  unsigned size)
1425 {
1426 	int rc;
1427 	ulong linear;
1428 
1429 	rc = linearize(ctxt, addr, size, false, &linear);
1430 	if (rc != X86EMUL_CONTINUE)
1431 		return rc;
1432 	return read_emulated(ctxt, linear, data, size);
1433 }
1434 
segmented_write(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,const void * data,unsigned size)1435 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1436 			   struct segmented_address addr,
1437 			   const void *data,
1438 			   unsigned size)
1439 {
1440 	int rc;
1441 	ulong linear;
1442 
1443 	rc = linearize(ctxt, addr, size, true, &linear);
1444 	if (rc != X86EMUL_CONTINUE)
1445 		return rc;
1446 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1447 					 &ctxt->exception);
1448 }
1449 
segmented_cmpxchg(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,const void * orig_data,const void * data,unsigned size)1450 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1451 			     struct segmented_address addr,
1452 			     const void *orig_data, const void *data,
1453 			     unsigned size)
1454 {
1455 	int rc;
1456 	ulong linear;
1457 
1458 	rc = linearize(ctxt, addr, size, true, &linear);
1459 	if (rc != X86EMUL_CONTINUE)
1460 		return rc;
1461 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1462 					   size, &ctxt->exception);
1463 }
1464 
pio_in_emulated(struct x86_emulate_ctxt * ctxt,unsigned int size,unsigned short port,void * dest)1465 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1466 			   unsigned int size, unsigned short port,
1467 			   void *dest)
1468 {
1469 	struct read_cache *rc = &ctxt->io_read;
1470 
1471 	if (rc->pos == rc->end) { /* refill pio read ahead */
1472 		unsigned int in_page, n;
1473 		unsigned int count = ctxt->rep_prefix ?
1474 			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1475 		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1476 			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1477 			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1478 		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1479 		if (n == 0)
1480 			n = 1;
1481 		rc->pos = rc->end = 0;
1482 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1483 			return 0;
1484 		rc->end = n * size;
1485 	}
1486 
1487 	if (ctxt->rep_prefix && (ctxt->d & String) &&
1488 	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1489 		ctxt->dst.data = rc->data + rc->pos;
1490 		ctxt->dst.type = OP_MEM_STR;
1491 		ctxt->dst.count = (rc->end - rc->pos) / size;
1492 		rc->pos = rc->end;
1493 	} else {
1494 		memcpy(dest, rc->data + rc->pos, size);
1495 		rc->pos += size;
1496 	}
1497 	return 1;
1498 }
1499 
read_interrupt_descriptor(struct x86_emulate_ctxt * ctxt,u16 index,struct desc_struct * desc)1500 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1501 				     u16 index, struct desc_struct *desc)
1502 {
1503 	struct desc_ptr dt;
1504 	ulong addr;
1505 
1506 	ctxt->ops->get_idt(ctxt, &dt);
1507 
1508 	if (dt.size < index * 8 + 7)
1509 		return emulate_gp(ctxt, index << 3 | 0x2);
1510 
1511 	addr = dt.address + index * 8;
1512 	return linear_read_system(ctxt, addr, desc, sizeof *desc);
1513 }
1514 
get_descriptor_table_ptr(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_ptr * dt)1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1516 				     u16 selector, struct desc_ptr *dt)
1517 {
1518 	const struct x86_emulate_ops *ops = ctxt->ops;
1519 	u32 base3 = 0;
1520 
1521 	if (selector & 1 << 2) {
1522 		struct desc_struct desc;
1523 		u16 sel;
1524 
1525 		memset (dt, 0, sizeof *dt);
1526 		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1527 				      VCPU_SREG_LDTR))
1528 			return;
1529 
1530 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1531 		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1532 	} else
1533 		ops->get_gdt(ctxt, dt);
1534 }
1535 
get_descriptor_ptr(struct x86_emulate_ctxt * ctxt,u16 selector,ulong * desc_addr_p)1536 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1537 			      u16 selector, ulong *desc_addr_p)
1538 {
1539 	struct desc_ptr dt;
1540 	u16 index = selector >> 3;
1541 	ulong addr;
1542 
1543 	get_descriptor_table_ptr(ctxt, selector, &dt);
1544 
1545 	if (dt.size < index * 8 + 7)
1546 		return emulate_gp(ctxt, selector & 0xfffc);
1547 
1548 	addr = dt.address + index * 8;
1549 
1550 #ifdef CONFIG_X86_64
1551 	if (addr >> 32 != 0) {
1552 		u64 efer = 0;
1553 
1554 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1555 		if (!(efer & EFER_LMA))
1556 			addr &= (u32)-1;
1557 	}
1558 #endif
1559 
1560 	*desc_addr_p = addr;
1561 	return X86EMUL_CONTINUE;
1562 }
1563 
1564 /* allowed just for 8 bytes segments */
read_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc,ulong * desc_addr_p)1565 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 				   u16 selector, struct desc_struct *desc,
1567 				   ulong *desc_addr_p)
1568 {
1569 	int rc;
1570 
1571 	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1572 	if (rc != X86EMUL_CONTINUE)
1573 		return rc;
1574 
1575 	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1576 }
1577 
1578 /* allowed just for 8 bytes segments */
write_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc)1579 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1580 				    u16 selector, struct desc_struct *desc)
1581 {
1582 	int rc;
1583 	ulong addr;
1584 
1585 	rc = get_descriptor_ptr(ctxt, selector, &addr);
1586 	if (rc != X86EMUL_CONTINUE)
1587 		return rc;
1588 
1589 	return linear_write_system(ctxt, addr, desc, sizeof *desc);
1590 }
1591 
__load_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,int seg,u8 cpl,enum x86_transfer_type transfer,struct desc_struct * desc)1592 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1593 				     u16 selector, int seg, u8 cpl,
1594 				     enum x86_transfer_type transfer,
1595 				     struct desc_struct *desc)
1596 {
1597 	struct desc_struct seg_desc, old_desc;
1598 	u8 dpl, rpl;
1599 	unsigned err_vec = GP_VECTOR;
1600 	u32 err_code = 0;
1601 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1602 	ulong desc_addr;
1603 	int ret;
1604 	u16 dummy;
1605 	u32 base3 = 0;
1606 
1607 	memset(&seg_desc, 0, sizeof seg_desc);
1608 
1609 	if (ctxt->mode == X86EMUL_MODE_REAL) {
1610 		/* set real mode segment descriptor (keep limit etc. for
1611 		 * unreal mode) */
1612 		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1613 		set_desc_base(&seg_desc, selector << 4);
1614 		goto load;
1615 	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1616 		/* VM86 needs a clean new segment descriptor */
1617 		set_desc_base(&seg_desc, selector << 4);
1618 		set_desc_limit(&seg_desc, 0xffff);
1619 		seg_desc.type = 3;
1620 		seg_desc.p = 1;
1621 		seg_desc.s = 1;
1622 		seg_desc.dpl = 3;
1623 		goto load;
1624 	}
1625 
1626 	rpl = selector & 3;
1627 
1628 	/* TR should be in GDT only */
1629 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1630 		goto exception;
1631 
1632 	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
1633 	if (null_selector) {
1634 		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1635 			goto exception;
1636 
1637 		if (seg == VCPU_SREG_SS) {
1638 			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1639 				goto exception;
1640 
1641 			/*
1642 			 * ctxt->ops->set_segment expects the CPL to be in
1643 			 * SS.DPL, so fake an expand-up 32-bit data segment.
1644 			 */
1645 			seg_desc.type = 3;
1646 			seg_desc.p = 1;
1647 			seg_desc.s = 1;
1648 			seg_desc.dpl = cpl;
1649 			seg_desc.d = 1;
1650 			seg_desc.g = 1;
1651 		}
1652 
1653 		/* Skip all following checks */
1654 		goto load;
1655 	}
1656 
1657 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1658 	if (ret != X86EMUL_CONTINUE)
1659 		return ret;
1660 
1661 	err_code = selector & 0xfffc;
1662 	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1663 							   GP_VECTOR;
1664 
1665 	/* can't load system descriptor into segment selector */
1666 	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1667 		if (transfer == X86_TRANSFER_CALL_JMP)
1668 			return X86EMUL_UNHANDLEABLE;
1669 		goto exception;
1670 	}
1671 
1672 	if (!seg_desc.p) {
1673 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1674 		goto exception;
1675 	}
1676 
1677 	dpl = seg_desc.dpl;
1678 
1679 	switch (seg) {
1680 	case VCPU_SREG_SS:
1681 		/*
1682 		 * segment is not a writable data segment or segment
1683 		 * selector's RPL != CPL or segment selector's RPL != CPL
1684 		 */
1685 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1686 			goto exception;
1687 		break;
1688 	case VCPU_SREG_CS:
1689 		if (!(seg_desc.type & 8))
1690 			goto exception;
1691 
1692 		if (seg_desc.type & 4) {
1693 			/* conforming */
1694 			if (dpl > cpl)
1695 				goto exception;
1696 		} else {
1697 			/* nonconforming */
1698 			if (rpl > cpl || dpl != cpl)
1699 				goto exception;
1700 		}
1701 		/* in long-mode d/b must be clear if l is set */
1702 		if (seg_desc.d && seg_desc.l) {
1703 			u64 efer = 0;
1704 
1705 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1706 			if (efer & EFER_LMA)
1707 				goto exception;
1708 		}
1709 
1710 		/* CS(RPL) <- CPL */
1711 		selector = (selector & 0xfffc) | cpl;
1712 		break;
1713 	case VCPU_SREG_TR:
1714 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1715 			goto exception;
1716 		old_desc = seg_desc;
1717 		seg_desc.type |= 2; /* busy */
1718 		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1719 						  sizeof(seg_desc), &ctxt->exception);
1720 		if (ret != X86EMUL_CONTINUE)
1721 			return ret;
1722 		break;
1723 	case VCPU_SREG_LDTR:
1724 		if (seg_desc.s || seg_desc.type != 2)
1725 			goto exception;
1726 		break;
1727 	default: /*  DS, ES, FS, or GS */
1728 		/*
1729 		 * segment is not a data or readable code segment or
1730 		 * ((segment is a data or nonconforming code segment)
1731 		 * and (both RPL and CPL > DPL))
1732 		 */
1733 		if ((seg_desc.type & 0xa) == 0x8 ||
1734 		    (((seg_desc.type & 0xc) != 0xc) &&
1735 		     (rpl > dpl && cpl > dpl)))
1736 			goto exception;
1737 		break;
1738 	}
1739 
1740 	if (seg_desc.s) {
1741 		/* mark segment as accessed */
1742 		if (!(seg_desc.type & 1)) {
1743 			seg_desc.type |= 1;
1744 			ret = write_segment_descriptor(ctxt, selector,
1745 						       &seg_desc);
1746 			if (ret != X86EMUL_CONTINUE)
1747 				return ret;
1748 		}
1749 	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1750 		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1751 		if (ret != X86EMUL_CONTINUE)
1752 			return ret;
1753 		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1754 				((u64)base3 << 32), ctxt))
1755 			return emulate_gp(ctxt, 0);
1756 	}
1757 load:
1758 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1759 	if (desc)
1760 		*desc = seg_desc;
1761 	return X86EMUL_CONTINUE;
1762 exception:
1763 	return emulate_exception(ctxt, err_vec, err_code, true);
1764 }
1765 
load_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,int seg)1766 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1767 				   u16 selector, int seg)
1768 {
1769 	u8 cpl = ctxt->ops->cpl(ctxt);
1770 
1771 	/*
1772 	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1773 	 * they can load it at CPL<3 (Intel's manual says only LSS can,
1774 	 * but it's wrong).
1775 	 *
1776 	 * However, the Intel manual says that putting IST=1/DPL=3 in
1777 	 * an interrupt gate will result in SS=3 (the AMD manual instead
1778 	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1779 	 * and only forbid it here.
1780 	 */
1781 	if (seg == VCPU_SREG_SS && selector == 3 &&
1782 	    ctxt->mode == X86EMUL_MODE_PROT64)
1783 		return emulate_exception(ctxt, GP_VECTOR, 0, true);
1784 
1785 	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1786 					 X86_TRANSFER_NONE, NULL);
1787 }
1788 
write_register_operand(struct operand * op)1789 static void write_register_operand(struct operand *op)
1790 {
1791 	return assign_register(op->addr.reg, op->val, op->bytes);
1792 }
1793 
writeback(struct x86_emulate_ctxt * ctxt,struct operand * op)1794 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1795 {
1796 	switch (op->type) {
1797 	case OP_REG:
1798 		write_register_operand(op);
1799 		break;
1800 	case OP_MEM:
1801 		if (ctxt->lock_prefix)
1802 			return segmented_cmpxchg(ctxt,
1803 						 op->addr.mem,
1804 						 &op->orig_val,
1805 						 &op->val,
1806 						 op->bytes);
1807 		else
1808 			return segmented_write(ctxt,
1809 					       op->addr.mem,
1810 					       &op->val,
1811 					       op->bytes);
1812 		break;
1813 	case OP_MEM_STR:
1814 		return segmented_write(ctxt,
1815 				       op->addr.mem,
1816 				       op->data,
1817 				       op->bytes * op->count);
1818 		break;
1819 	case OP_XMM:
1820 		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1821 		break;
1822 	case OP_MM:
1823 		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1824 		break;
1825 	case OP_NONE:
1826 		/* no writeback */
1827 		break;
1828 	default:
1829 		break;
1830 	}
1831 	return X86EMUL_CONTINUE;
1832 }
1833 
push(struct x86_emulate_ctxt * ctxt,void * data,int bytes)1834 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1835 {
1836 	struct segmented_address addr;
1837 
1838 	rsp_increment(ctxt, -bytes);
1839 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1840 	addr.seg = VCPU_SREG_SS;
1841 
1842 	return segmented_write(ctxt, addr, data, bytes);
1843 }
1844 
em_push(struct x86_emulate_ctxt * ctxt)1845 static int em_push(struct x86_emulate_ctxt *ctxt)
1846 {
1847 	/* Disable writeback. */
1848 	ctxt->dst.type = OP_NONE;
1849 	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1850 }
1851 
emulate_pop(struct x86_emulate_ctxt * ctxt,void * dest,int len)1852 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1853 		       void *dest, int len)
1854 {
1855 	int rc;
1856 	struct segmented_address addr;
1857 
1858 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1859 	addr.seg = VCPU_SREG_SS;
1860 	rc = segmented_read(ctxt, addr, dest, len);
1861 	if (rc != X86EMUL_CONTINUE)
1862 		return rc;
1863 
1864 	rsp_increment(ctxt, len);
1865 	return rc;
1866 }
1867 
em_pop(struct x86_emulate_ctxt * ctxt)1868 static int em_pop(struct x86_emulate_ctxt *ctxt)
1869 {
1870 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1871 }
1872 
emulate_popf(struct x86_emulate_ctxt * ctxt,void * dest,int len)1873 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1874 			void *dest, int len)
1875 {
1876 	int rc;
1877 	unsigned long val, change_mask;
1878 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1879 	int cpl = ctxt->ops->cpl(ctxt);
1880 
1881 	rc = emulate_pop(ctxt, &val, len);
1882 	if (rc != X86EMUL_CONTINUE)
1883 		return rc;
1884 
1885 	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1886 		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1887 		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1888 		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1889 
1890 	switch(ctxt->mode) {
1891 	case X86EMUL_MODE_PROT64:
1892 	case X86EMUL_MODE_PROT32:
1893 	case X86EMUL_MODE_PROT16:
1894 		if (cpl == 0)
1895 			change_mask |= X86_EFLAGS_IOPL;
1896 		if (cpl <= iopl)
1897 			change_mask |= X86_EFLAGS_IF;
1898 		break;
1899 	case X86EMUL_MODE_VM86:
1900 		if (iopl < 3)
1901 			return emulate_gp(ctxt, 0);
1902 		change_mask |= X86_EFLAGS_IF;
1903 		break;
1904 	default: /* real mode */
1905 		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1906 		break;
1907 	}
1908 
1909 	*(unsigned long *)dest =
1910 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1911 
1912 	return rc;
1913 }
1914 
em_popf(struct x86_emulate_ctxt * ctxt)1915 static int em_popf(struct x86_emulate_ctxt *ctxt)
1916 {
1917 	ctxt->dst.type = OP_REG;
1918 	ctxt->dst.addr.reg = &ctxt->eflags;
1919 	ctxt->dst.bytes = ctxt->op_bytes;
1920 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1921 }
1922 
em_enter(struct x86_emulate_ctxt * ctxt)1923 static int em_enter(struct x86_emulate_ctxt *ctxt)
1924 {
1925 	int rc;
1926 	unsigned frame_size = ctxt->src.val;
1927 	unsigned nesting_level = ctxt->src2.val & 31;
1928 	ulong rbp;
1929 
1930 	if (nesting_level)
1931 		return X86EMUL_UNHANDLEABLE;
1932 
1933 	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1934 	rc = push(ctxt, &rbp, stack_size(ctxt));
1935 	if (rc != X86EMUL_CONTINUE)
1936 		return rc;
1937 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1938 		      stack_mask(ctxt));
1939 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1940 		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1941 		      stack_mask(ctxt));
1942 	return X86EMUL_CONTINUE;
1943 }
1944 
em_leave(struct x86_emulate_ctxt * ctxt)1945 static int em_leave(struct x86_emulate_ctxt *ctxt)
1946 {
1947 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1948 		      stack_mask(ctxt));
1949 	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1950 }
1951 
em_push_sreg(struct x86_emulate_ctxt * ctxt)1952 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1953 {
1954 	int seg = ctxt->src2.val;
1955 
1956 	ctxt->src.val = get_segment_selector(ctxt, seg);
1957 	if (ctxt->op_bytes == 4) {
1958 		rsp_increment(ctxt, -2);
1959 		ctxt->op_bytes = 2;
1960 	}
1961 
1962 	return em_push(ctxt);
1963 }
1964 
em_pop_sreg(struct x86_emulate_ctxt * ctxt)1965 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1966 {
1967 	int seg = ctxt->src2.val;
1968 	unsigned long selector;
1969 	int rc;
1970 
1971 	rc = emulate_pop(ctxt, &selector, 2);
1972 	if (rc != X86EMUL_CONTINUE)
1973 		return rc;
1974 
1975 	if (ctxt->modrm_reg == VCPU_SREG_SS)
1976 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1977 	if (ctxt->op_bytes > 2)
1978 		rsp_increment(ctxt, ctxt->op_bytes - 2);
1979 
1980 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1981 	return rc;
1982 }
1983 
em_pusha(struct x86_emulate_ctxt * ctxt)1984 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1985 {
1986 	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1987 	int rc = X86EMUL_CONTINUE;
1988 	int reg = VCPU_REGS_RAX;
1989 
1990 	while (reg <= VCPU_REGS_RDI) {
1991 		(reg == VCPU_REGS_RSP) ?
1992 		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1993 
1994 		rc = em_push(ctxt);
1995 		if (rc != X86EMUL_CONTINUE)
1996 			return rc;
1997 
1998 		++reg;
1999 	}
2000 
2001 	return rc;
2002 }
2003 
em_pushf(struct x86_emulate_ctxt * ctxt)2004 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2005 {
2006 	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2007 	return em_push(ctxt);
2008 }
2009 
em_popa(struct x86_emulate_ctxt * ctxt)2010 static int em_popa(struct x86_emulate_ctxt *ctxt)
2011 {
2012 	int rc = X86EMUL_CONTINUE;
2013 	int reg = VCPU_REGS_RDI;
2014 	u32 val;
2015 
2016 	while (reg >= VCPU_REGS_RAX) {
2017 		if (reg == VCPU_REGS_RSP) {
2018 			rsp_increment(ctxt, ctxt->op_bytes);
2019 			--reg;
2020 		}
2021 
2022 		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2023 		if (rc != X86EMUL_CONTINUE)
2024 			break;
2025 		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2026 		--reg;
2027 	}
2028 	return rc;
2029 }
2030 
__emulate_int_real(struct x86_emulate_ctxt * ctxt,int irq)2031 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2032 {
2033 	const struct x86_emulate_ops *ops = ctxt->ops;
2034 	int rc;
2035 	struct desc_ptr dt;
2036 	gva_t cs_addr;
2037 	gva_t eip_addr;
2038 	u16 cs, eip;
2039 
2040 	/* TODO: Add limit checks */
2041 	ctxt->src.val = ctxt->eflags;
2042 	rc = em_push(ctxt);
2043 	if (rc != X86EMUL_CONTINUE)
2044 		return rc;
2045 
2046 	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2047 
2048 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2049 	rc = em_push(ctxt);
2050 	if (rc != X86EMUL_CONTINUE)
2051 		return rc;
2052 
2053 	ctxt->src.val = ctxt->_eip;
2054 	rc = em_push(ctxt);
2055 	if (rc != X86EMUL_CONTINUE)
2056 		return rc;
2057 
2058 	ops->get_idt(ctxt, &dt);
2059 
2060 	eip_addr = dt.address + (irq << 2);
2061 	cs_addr = dt.address + (irq << 2) + 2;
2062 
2063 	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2064 	if (rc != X86EMUL_CONTINUE)
2065 		return rc;
2066 
2067 	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2068 	if (rc != X86EMUL_CONTINUE)
2069 		return rc;
2070 
2071 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2072 	if (rc != X86EMUL_CONTINUE)
2073 		return rc;
2074 
2075 	ctxt->_eip = eip;
2076 
2077 	return rc;
2078 }
2079 
emulate_int_real(struct x86_emulate_ctxt * ctxt,int irq)2080 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081 {
2082 	int rc;
2083 
2084 	invalidate_registers(ctxt);
2085 	rc = __emulate_int_real(ctxt, irq);
2086 	if (rc == X86EMUL_CONTINUE)
2087 		writeback_registers(ctxt);
2088 	return rc;
2089 }
2090 
emulate_int(struct x86_emulate_ctxt * ctxt,int irq)2091 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2092 {
2093 	switch(ctxt->mode) {
2094 	case X86EMUL_MODE_REAL:
2095 		return __emulate_int_real(ctxt, irq);
2096 	case X86EMUL_MODE_VM86:
2097 	case X86EMUL_MODE_PROT16:
2098 	case X86EMUL_MODE_PROT32:
2099 	case X86EMUL_MODE_PROT64:
2100 	default:
2101 		/* Protected mode interrupts unimplemented yet */
2102 		return X86EMUL_UNHANDLEABLE;
2103 	}
2104 }
2105 
emulate_iret_real(struct x86_emulate_ctxt * ctxt)2106 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2107 {
2108 	int rc = X86EMUL_CONTINUE;
2109 	unsigned long temp_eip = 0;
2110 	unsigned long temp_eflags = 0;
2111 	unsigned long cs = 0;
2112 	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2113 			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2114 			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2115 			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2116 			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2117 			     X86_EFLAGS_FIXED;
2118 	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2119 				  X86_EFLAGS_VIP;
2120 
2121 	/* TODO: Add stack limit check */
2122 
2123 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2124 
2125 	if (rc != X86EMUL_CONTINUE)
2126 		return rc;
2127 
2128 	if (temp_eip & ~0xffff)
2129 		return emulate_gp(ctxt, 0);
2130 
2131 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2132 
2133 	if (rc != X86EMUL_CONTINUE)
2134 		return rc;
2135 
2136 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2137 
2138 	if (rc != X86EMUL_CONTINUE)
2139 		return rc;
2140 
2141 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2142 
2143 	if (rc != X86EMUL_CONTINUE)
2144 		return rc;
2145 
2146 	ctxt->_eip = temp_eip;
2147 
2148 	if (ctxt->op_bytes == 4)
2149 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2150 	else if (ctxt->op_bytes == 2) {
2151 		ctxt->eflags &= ~0xffff;
2152 		ctxt->eflags |= temp_eflags;
2153 	}
2154 
2155 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2156 	ctxt->eflags |= X86_EFLAGS_FIXED;
2157 	ctxt->ops->set_nmi_mask(ctxt, false);
2158 
2159 	return rc;
2160 }
2161 
em_iret(struct x86_emulate_ctxt * ctxt)2162 static int em_iret(struct x86_emulate_ctxt *ctxt)
2163 {
2164 	switch(ctxt->mode) {
2165 	case X86EMUL_MODE_REAL:
2166 		return emulate_iret_real(ctxt);
2167 	case X86EMUL_MODE_VM86:
2168 	case X86EMUL_MODE_PROT16:
2169 	case X86EMUL_MODE_PROT32:
2170 	case X86EMUL_MODE_PROT64:
2171 	default:
2172 		/* iret from protected mode unimplemented yet */
2173 		return X86EMUL_UNHANDLEABLE;
2174 	}
2175 }
2176 
em_jmp_far(struct x86_emulate_ctxt * ctxt)2177 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2178 {
2179 	int rc;
2180 	unsigned short sel;
2181 	struct desc_struct new_desc;
2182 	u8 cpl = ctxt->ops->cpl(ctxt);
2183 
2184 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2185 
2186 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2187 				       X86_TRANSFER_CALL_JMP,
2188 				       &new_desc);
2189 	if (rc != X86EMUL_CONTINUE)
2190 		return rc;
2191 
2192 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2193 	/* Error handling is not implemented. */
2194 	if (rc != X86EMUL_CONTINUE)
2195 		return X86EMUL_UNHANDLEABLE;
2196 
2197 	return rc;
2198 }
2199 
em_jmp_abs(struct x86_emulate_ctxt * ctxt)2200 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2201 {
2202 	return assign_eip_near(ctxt, ctxt->src.val);
2203 }
2204 
em_call_near_abs(struct x86_emulate_ctxt * ctxt)2205 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2206 {
2207 	int rc;
2208 	long int old_eip;
2209 
2210 	old_eip = ctxt->_eip;
2211 	rc = assign_eip_near(ctxt, ctxt->src.val);
2212 	if (rc != X86EMUL_CONTINUE)
2213 		return rc;
2214 	ctxt->src.val = old_eip;
2215 	rc = em_push(ctxt);
2216 	return rc;
2217 }
2218 
em_cmpxchg8b(struct x86_emulate_ctxt * ctxt)2219 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2220 {
2221 	u64 old = ctxt->dst.orig_val64;
2222 
2223 	if (ctxt->dst.bytes == 16)
2224 		return X86EMUL_UNHANDLEABLE;
2225 
2226 	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2227 	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2228 		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2229 		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2230 		ctxt->eflags &= ~X86_EFLAGS_ZF;
2231 	} else {
2232 		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2233 			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2234 
2235 		ctxt->eflags |= X86_EFLAGS_ZF;
2236 	}
2237 	return X86EMUL_CONTINUE;
2238 }
2239 
em_ret(struct x86_emulate_ctxt * ctxt)2240 static int em_ret(struct x86_emulate_ctxt *ctxt)
2241 {
2242 	int rc;
2243 	unsigned long eip;
2244 
2245 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2246 	if (rc != X86EMUL_CONTINUE)
2247 		return rc;
2248 
2249 	return assign_eip_near(ctxt, eip);
2250 }
2251 
em_ret_far(struct x86_emulate_ctxt * ctxt)2252 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2253 {
2254 	int rc;
2255 	unsigned long eip, cs;
2256 	int cpl = ctxt->ops->cpl(ctxt);
2257 	struct desc_struct new_desc;
2258 
2259 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2260 	if (rc != X86EMUL_CONTINUE)
2261 		return rc;
2262 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2263 	if (rc != X86EMUL_CONTINUE)
2264 		return rc;
2265 	/* Outer-privilege level return is not implemented */
2266 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2267 		return X86EMUL_UNHANDLEABLE;
2268 	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2269 				       X86_TRANSFER_RET,
2270 				       &new_desc);
2271 	if (rc != X86EMUL_CONTINUE)
2272 		return rc;
2273 	rc = assign_eip_far(ctxt, eip, &new_desc);
2274 	/* Error handling is not implemented. */
2275 	if (rc != X86EMUL_CONTINUE)
2276 		return X86EMUL_UNHANDLEABLE;
2277 
2278 	return rc;
2279 }
2280 
em_ret_far_imm(struct x86_emulate_ctxt * ctxt)2281 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2282 {
2283         int rc;
2284 
2285         rc = em_ret_far(ctxt);
2286         if (rc != X86EMUL_CONTINUE)
2287                 return rc;
2288         rsp_increment(ctxt, ctxt->src.val);
2289         return X86EMUL_CONTINUE;
2290 }
2291 
em_cmpxchg(struct x86_emulate_ctxt * ctxt)2292 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2293 {
2294 	/* Save real source value, then compare EAX against destination. */
2295 	ctxt->dst.orig_val = ctxt->dst.val;
2296 	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2297 	ctxt->src.orig_val = ctxt->src.val;
2298 	ctxt->src.val = ctxt->dst.orig_val;
2299 	fastop(ctxt, em_cmp);
2300 
2301 	if (ctxt->eflags & X86_EFLAGS_ZF) {
2302 		/* Success: write back to memory; no update of EAX */
2303 		ctxt->src.type = OP_NONE;
2304 		ctxt->dst.val = ctxt->src.orig_val;
2305 	} else {
2306 		/* Failure: write the value we saw to EAX. */
2307 		ctxt->src.type = OP_REG;
2308 		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2309 		ctxt->src.val = ctxt->dst.orig_val;
2310 		/* Create write-cycle to dest by writing the same value */
2311 		ctxt->dst.val = ctxt->dst.orig_val;
2312 	}
2313 	return X86EMUL_CONTINUE;
2314 }
2315 
em_lseg(struct x86_emulate_ctxt * ctxt)2316 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2317 {
2318 	int seg = ctxt->src2.val;
2319 	unsigned short sel;
2320 	int rc;
2321 
2322 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2323 
2324 	rc = load_segment_descriptor(ctxt, sel, seg);
2325 	if (rc != X86EMUL_CONTINUE)
2326 		return rc;
2327 
2328 	ctxt->dst.val = ctxt->src.val;
2329 	return rc;
2330 }
2331 
emulator_has_longmode(struct x86_emulate_ctxt * ctxt)2332 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2333 {
2334 	u32 eax, ebx, ecx, edx;
2335 
2336 	eax = 0x80000001;
2337 	ecx = 0;
2338 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2339 	return edx & bit(X86_FEATURE_LM);
2340 }
2341 
2342 #define GET_SMSTATE(type, smbase, offset)				  \
2343 	({								  \
2344 	 type __val;							  \
2345 	 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
2346 				      sizeof(__val));			  \
2347 	 if (r != X86EMUL_CONTINUE)					  \
2348 		 return X86EMUL_UNHANDLEABLE;				  \
2349 	 __val;								  \
2350 	})
2351 
rsm_set_desc_flags(struct desc_struct * desc,u32 flags)2352 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2353 {
2354 	desc->g    = (flags >> 23) & 1;
2355 	desc->d    = (flags >> 22) & 1;
2356 	desc->l    = (flags >> 21) & 1;
2357 	desc->avl  = (flags >> 20) & 1;
2358 	desc->p    = (flags >> 15) & 1;
2359 	desc->dpl  = (flags >> 13) & 3;
2360 	desc->s    = (flags >> 12) & 1;
2361 	desc->type = (flags >>  8) & 15;
2362 }
2363 
rsm_load_seg_32(struct x86_emulate_ctxt * ctxt,u64 smbase,int n)2364 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2365 {
2366 	struct desc_struct desc;
2367 	int offset;
2368 	u16 selector;
2369 
2370 	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2371 
2372 	if (n < 3)
2373 		offset = 0x7f84 + n * 12;
2374 	else
2375 		offset = 0x7f2c + (n - 3) * 12;
2376 
2377 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2378 	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2379 	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2380 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2381 	return X86EMUL_CONTINUE;
2382 }
2383 
rsm_load_seg_64(struct x86_emulate_ctxt * ctxt,u64 smbase,int n)2384 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2385 {
2386 	struct desc_struct desc;
2387 	int offset;
2388 	u16 selector;
2389 	u32 base3;
2390 
2391 	offset = 0x7e00 + n * 16;
2392 
2393 	selector =                GET_SMSTATE(u16, smbase, offset);
2394 	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2395 	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2396 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2397 	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
2398 
2399 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2400 	return X86EMUL_CONTINUE;
2401 }
2402 
rsm_enter_protected_mode(struct x86_emulate_ctxt * ctxt,u64 cr0,u64 cr3,u64 cr4)2403 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2404 				    u64 cr0, u64 cr3, u64 cr4)
2405 {
2406 	int bad;
2407 	u64 pcid;
2408 
2409 	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
2410 	pcid = 0;
2411 	if (cr4 & X86_CR4_PCIDE) {
2412 		pcid = cr3 & 0xfff;
2413 		cr3 &= ~0xfff;
2414 	}
2415 
2416 	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2417 	if (bad)
2418 		return X86EMUL_UNHANDLEABLE;
2419 
2420 	/*
2421 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2422 	 * Then enable protected mode.	However, PCID cannot be enabled
2423 	 * if EFER.LMA=0, so set it separately.
2424 	 */
2425 	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2426 	if (bad)
2427 		return X86EMUL_UNHANDLEABLE;
2428 
2429 	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2430 	if (bad)
2431 		return X86EMUL_UNHANDLEABLE;
2432 
2433 	if (cr4 & X86_CR4_PCIDE) {
2434 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2435 		if (bad)
2436 			return X86EMUL_UNHANDLEABLE;
2437 		if (pcid) {
2438 			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2439 			if (bad)
2440 				return X86EMUL_UNHANDLEABLE;
2441 		}
2442 
2443 	}
2444 
2445 	return X86EMUL_CONTINUE;
2446 }
2447 
rsm_load_state_32(struct x86_emulate_ctxt * ctxt,u64 smbase)2448 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2449 {
2450 	struct desc_struct desc;
2451 	struct desc_ptr dt;
2452 	u16 selector;
2453 	u32 val, cr0, cr3, cr4;
2454 	int i;
2455 
2456 	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
2457 	cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
2458 	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2459 	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
2460 
2461 	for (i = 0; i < 8; i++)
2462 		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2463 
2464 	val = GET_SMSTATE(u32, smbase, 0x7fcc);
2465 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2466 	val = GET_SMSTATE(u32, smbase, 0x7fc8);
2467 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2468 
2469 	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
2470 	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
2471 	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
2472 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
2473 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2474 
2475 	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
2476 	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
2477 	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
2478 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
2479 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2480 
2481 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
2482 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
2483 	ctxt->ops->set_gdt(ctxt, &dt);
2484 
2485 	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
2486 	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
2487 	ctxt->ops->set_idt(ctxt, &dt);
2488 
2489 	for (i = 0; i < 6; i++) {
2490 		int r = rsm_load_seg_32(ctxt, smbase, i);
2491 		if (r != X86EMUL_CONTINUE)
2492 			return r;
2493 	}
2494 
2495 	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2496 
2497 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2498 
2499 	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2500 }
2501 
rsm_load_state_64(struct x86_emulate_ctxt * ctxt,u64 smbase)2502 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2503 {
2504 	struct desc_struct desc;
2505 	struct desc_ptr dt;
2506 	u64 val, cr0, cr3, cr4;
2507 	u32 base3;
2508 	u16 selector;
2509 	int i, r;
2510 
2511 	for (i = 0; i < 16; i++)
2512 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2513 
2514 	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
2515 	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2516 
2517 	val = GET_SMSTATE(u32, smbase, 0x7f68);
2518 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2519 	val = GET_SMSTATE(u32, smbase, 0x7f60);
2520 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2521 
2522 	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
2523 	cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
2524 	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
2525 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2526 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
2527 	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2528 
2529 	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
2530 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2531 	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
2532 	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
2533 	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
2534 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2535 
2536 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
2537 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
2538 	ctxt->ops->set_idt(ctxt, &dt);
2539 
2540 	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
2541 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2542 	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
2543 	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
2544 	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
2545 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2546 
2547 	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
2548 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
2549 	ctxt->ops->set_gdt(ctxt, &dt);
2550 
2551 	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2552 	if (r != X86EMUL_CONTINUE)
2553 		return r;
2554 
2555 	for (i = 0; i < 6; i++) {
2556 		r = rsm_load_seg_64(ctxt, smbase, i);
2557 		if (r != X86EMUL_CONTINUE)
2558 			return r;
2559 	}
2560 
2561 	return X86EMUL_CONTINUE;
2562 }
2563 
em_rsm(struct x86_emulate_ctxt * ctxt)2564 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2565 {
2566 	unsigned long cr0, cr4, efer;
2567 	u64 smbase;
2568 	int ret;
2569 
2570 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2571 		return emulate_ud(ctxt);
2572 
2573 	/*
2574 	 * Get back to real mode, to prepare a safe state in which to load
2575 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
2576 	 * supports long mode.
2577 	 */
2578 	cr4 = ctxt->ops->get_cr(ctxt, 4);
2579 	if (emulator_has_longmode(ctxt)) {
2580 		struct desc_struct cs_desc;
2581 
2582 		/* Zero CR4.PCIDE before CR0.PG.  */
2583 		if (cr4 & X86_CR4_PCIDE) {
2584 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2585 			cr4 &= ~X86_CR4_PCIDE;
2586 		}
2587 
2588 		/* A 32-bit code segment is required to clear EFER.LMA.  */
2589 		memset(&cs_desc, 0, sizeof(cs_desc));
2590 		cs_desc.type = 0xb;
2591 		cs_desc.s = cs_desc.g = cs_desc.p = 1;
2592 		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2593 	}
2594 
2595 	/* For the 64-bit case, this will clear EFER.LMA.  */
2596 	cr0 = ctxt->ops->get_cr(ctxt, 0);
2597 	if (cr0 & X86_CR0_PE)
2598 		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2599 
2600 	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
2601 	if (cr4 & X86_CR4_PAE)
2602 		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2603 
2604 	/* And finally go back to 32-bit mode.  */
2605 	efer = 0;
2606 	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2607 
2608 	smbase = ctxt->ops->get_smbase(ctxt);
2609 
2610 	/*
2611 	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2612 	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2613 	 * state-save area.
2614 	 */
2615 	if (ctxt->ops->pre_leave_smm(ctxt, smbase))
2616 		return X86EMUL_UNHANDLEABLE;
2617 
2618 	if (emulator_has_longmode(ctxt))
2619 		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2620 	else
2621 		ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2622 
2623 	if (ret != X86EMUL_CONTINUE) {
2624 		/* FIXME: should triple fault */
2625 		return X86EMUL_UNHANDLEABLE;
2626 	}
2627 
2628 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2629 		ctxt->ops->set_nmi_mask(ctxt, false);
2630 
2631 	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2632 		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2633 	return X86EMUL_CONTINUE;
2634 }
2635 
2636 static void
setup_syscalls_segments(struct x86_emulate_ctxt * ctxt,struct desc_struct * cs,struct desc_struct * ss)2637 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2638 			struct desc_struct *cs, struct desc_struct *ss)
2639 {
2640 	cs->l = 0;		/* will be adjusted later */
2641 	set_desc_base(cs, 0);	/* flat segment */
2642 	cs->g = 1;		/* 4kb granularity */
2643 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2644 	cs->type = 0x0b;	/* Read, Execute, Accessed */
2645 	cs->s = 1;
2646 	cs->dpl = 0;		/* will be adjusted later */
2647 	cs->p = 1;
2648 	cs->d = 1;
2649 	cs->avl = 0;
2650 
2651 	set_desc_base(ss, 0);	/* flat segment */
2652 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2653 	ss->g = 1;		/* 4kb granularity */
2654 	ss->s = 1;
2655 	ss->type = 0x03;	/* Read/Write, Accessed */
2656 	ss->d = 1;		/* 32bit stack segment */
2657 	ss->dpl = 0;
2658 	ss->p = 1;
2659 	ss->l = 0;
2660 	ss->avl = 0;
2661 }
2662 
vendor_intel(struct x86_emulate_ctxt * ctxt)2663 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2664 {
2665 	u32 eax, ebx, ecx, edx;
2666 
2667 	eax = ecx = 0;
2668 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2669 	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2670 		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2671 		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2672 }
2673 
em_syscall_is_enabled(struct x86_emulate_ctxt * ctxt)2674 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2675 {
2676 	const struct x86_emulate_ops *ops = ctxt->ops;
2677 	u32 eax, ebx, ecx, edx;
2678 
2679 	/*
2680 	 * syscall should always be enabled in longmode - so only become
2681 	 * vendor specific (cpuid) if other modes are active...
2682 	 */
2683 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2684 		return true;
2685 
2686 	eax = 0x00000000;
2687 	ecx = 0x00000000;
2688 	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2689 	/*
2690 	 * Intel ("GenuineIntel")
2691 	 * remark: Intel CPUs only support "syscall" in 64bit
2692 	 * longmode. Also an 64bit guest with a
2693 	 * 32bit compat-app running will #UD !! While this
2694 	 * behaviour can be fixed (by emulating) into AMD
2695 	 * response - CPUs of AMD can't behave like Intel.
2696 	 */
2697 	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2698 	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2699 	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2700 		return false;
2701 
2702 	/* AMD ("AuthenticAMD") */
2703 	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2704 	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2705 	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2706 		return true;
2707 
2708 	/* AMD ("AMDisbetter!") */
2709 	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2710 	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2711 	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2712 		return true;
2713 
2714 	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
2715 	return false;
2716 }
2717 
em_syscall(struct x86_emulate_ctxt * ctxt)2718 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2719 {
2720 	const struct x86_emulate_ops *ops = ctxt->ops;
2721 	struct desc_struct cs, ss;
2722 	u64 msr_data;
2723 	u16 cs_sel, ss_sel;
2724 	u64 efer = 0;
2725 
2726 	/* syscall is not available in real mode */
2727 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2728 	    ctxt->mode == X86EMUL_MODE_VM86)
2729 		return emulate_ud(ctxt);
2730 
2731 	if (!(em_syscall_is_enabled(ctxt)))
2732 		return emulate_ud(ctxt);
2733 
2734 	ops->get_msr(ctxt, MSR_EFER, &efer);
2735 	setup_syscalls_segments(ctxt, &cs, &ss);
2736 
2737 	if (!(efer & EFER_SCE))
2738 		return emulate_ud(ctxt);
2739 
2740 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2741 	msr_data >>= 32;
2742 	cs_sel = (u16)(msr_data & 0xfffc);
2743 	ss_sel = (u16)(msr_data + 8);
2744 
2745 	if (efer & EFER_LMA) {
2746 		cs.d = 0;
2747 		cs.l = 1;
2748 	}
2749 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2750 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2751 
2752 	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2753 	if (efer & EFER_LMA) {
2754 #ifdef CONFIG_X86_64
2755 		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2756 
2757 		ops->get_msr(ctxt,
2758 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2759 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2760 		ctxt->_eip = msr_data;
2761 
2762 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2763 		ctxt->eflags &= ~msr_data;
2764 		ctxt->eflags |= X86_EFLAGS_FIXED;
2765 #endif
2766 	} else {
2767 		/* legacy mode */
2768 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2769 		ctxt->_eip = (u32)msr_data;
2770 
2771 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2772 	}
2773 
2774 	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2775 	return X86EMUL_CONTINUE;
2776 }
2777 
em_sysenter(struct x86_emulate_ctxt * ctxt)2778 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2779 {
2780 	const struct x86_emulate_ops *ops = ctxt->ops;
2781 	struct desc_struct cs, ss;
2782 	u64 msr_data;
2783 	u16 cs_sel, ss_sel;
2784 	u64 efer = 0;
2785 
2786 	ops->get_msr(ctxt, MSR_EFER, &efer);
2787 	/* inject #GP if in real mode */
2788 	if (ctxt->mode == X86EMUL_MODE_REAL)
2789 		return emulate_gp(ctxt, 0);
2790 
2791 	/*
2792 	 * Not recognized on AMD in compat mode (but is recognized in legacy
2793 	 * mode).
2794 	 */
2795 	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2796 	    && !vendor_intel(ctxt))
2797 		return emulate_ud(ctxt);
2798 
2799 	/* sysenter/sysexit have not been tested in 64bit mode. */
2800 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2801 		return X86EMUL_UNHANDLEABLE;
2802 
2803 	setup_syscalls_segments(ctxt, &cs, &ss);
2804 
2805 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2806 	if ((msr_data & 0xfffc) == 0x0)
2807 		return emulate_gp(ctxt, 0);
2808 
2809 	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2810 	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2811 	ss_sel = cs_sel + 8;
2812 	if (efer & EFER_LMA) {
2813 		cs.d = 0;
2814 		cs.l = 1;
2815 	}
2816 
2817 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2818 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2819 
2820 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2821 	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2822 
2823 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2824 	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2825 							      (u32)msr_data;
2826 
2827 	return X86EMUL_CONTINUE;
2828 }
2829 
em_sysexit(struct x86_emulate_ctxt * ctxt)2830 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2831 {
2832 	const struct x86_emulate_ops *ops = ctxt->ops;
2833 	struct desc_struct cs, ss;
2834 	u64 msr_data, rcx, rdx;
2835 	int usermode;
2836 	u16 cs_sel = 0, ss_sel = 0;
2837 
2838 	/* inject #GP if in real mode or Virtual 8086 mode */
2839 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2840 	    ctxt->mode == X86EMUL_MODE_VM86)
2841 		return emulate_gp(ctxt, 0);
2842 
2843 	setup_syscalls_segments(ctxt, &cs, &ss);
2844 
2845 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2846 		usermode = X86EMUL_MODE_PROT64;
2847 	else
2848 		usermode = X86EMUL_MODE_PROT32;
2849 
2850 	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2851 	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2852 
2853 	cs.dpl = 3;
2854 	ss.dpl = 3;
2855 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2856 	switch (usermode) {
2857 	case X86EMUL_MODE_PROT32:
2858 		cs_sel = (u16)(msr_data + 16);
2859 		if ((msr_data & 0xfffc) == 0x0)
2860 			return emulate_gp(ctxt, 0);
2861 		ss_sel = (u16)(msr_data + 24);
2862 		rcx = (u32)rcx;
2863 		rdx = (u32)rdx;
2864 		break;
2865 	case X86EMUL_MODE_PROT64:
2866 		cs_sel = (u16)(msr_data + 32);
2867 		if (msr_data == 0x0)
2868 			return emulate_gp(ctxt, 0);
2869 		ss_sel = cs_sel + 8;
2870 		cs.d = 0;
2871 		cs.l = 1;
2872 		if (emul_is_noncanonical_address(rcx, ctxt) ||
2873 		    emul_is_noncanonical_address(rdx, ctxt))
2874 			return emulate_gp(ctxt, 0);
2875 		break;
2876 	}
2877 	cs_sel |= SEGMENT_RPL_MASK;
2878 	ss_sel |= SEGMENT_RPL_MASK;
2879 
2880 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2881 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2882 
2883 	ctxt->_eip = rdx;
2884 	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2885 
2886 	return X86EMUL_CONTINUE;
2887 }
2888 
emulator_bad_iopl(struct x86_emulate_ctxt * ctxt)2889 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2890 {
2891 	int iopl;
2892 	if (ctxt->mode == X86EMUL_MODE_REAL)
2893 		return false;
2894 	if (ctxt->mode == X86EMUL_MODE_VM86)
2895 		return true;
2896 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2897 	return ctxt->ops->cpl(ctxt) > iopl;
2898 }
2899 
2900 #define VMWARE_PORT_VMPORT	(0x5658)
2901 #define VMWARE_PORT_VMRPC	(0x5659)
2902 
emulator_io_port_access_allowed(struct x86_emulate_ctxt * ctxt,u16 port,u16 len)2903 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2904 					    u16 port, u16 len)
2905 {
2906 	const struct x86_emulate_ops *ops = ctxt->ops;
2907 	struct desc_struct tr_seg;
2908 	u32 base3;
2909 	int r;
2910 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2911 	unsigned mask = (1 << len) - 1;
2912 	unsigned long base;
2913 
2914 	/*
2915 	 * VMware allows access to these ports even if denied
2916 	 * by TSS I/O permission bitmap. Mimic behavior.
2917 	 */
2918 	if (enable_vmware_backdoor &&
2919 	    ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2920 		return true;
2921 
2922 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2923 	if (!tr_seg.p)
2924 		return false;
2925 	if (desc_limit_scaled(&tr_seg) < 103)
2926 		return false;
2927 	base = get_desc_base(&tr_seg);
2928 #ifdef CONFIG_X86_64
2929 	base |= ((u64)base3) << 32;
2930 #endif
2931 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2932 	if (r != X86EMUL_CONTINUE)
2933 		return false;
2934 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2935 		return false;
2936 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2937 	if (r != X86EMUL_CONTINUE)
2938 		return false;
2939 	if ((perm >> bit_idx) & mask)
2940 		return false;
2941 	return true;
2942 }
2943 
emulator_io_permited(struct x86_emulate_ctxt * ctxt,u16 port,u16 len)2944 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2945 				 u16 port, u16 len)
2946 {
2947 	if (ctxt->perm_ok)
2948 		return true;
2949 
2950 	if (emulator_bad_iopl(ctxt))
2951 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2952 			return false;
2953 
2954 	ctxt->perm_ok = true;
2955 
2956 	return true;
2957 }
2958 
string_registers_quirk(struct x86_emulate_ctxt * ctxt)2959 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2960 {
2961 	/*
2962 	 * Intel CPUs mask the counter and pointers in quite strange
2963 	 * manner when ECX is zero due to REP-string optimizations.
2964 	 */
2965 #ifdef CONFIG_X86_64
2966 	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2967 		return;
2968 
2969 	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
2970 
2971 	switch (ctxt->b) {
2972 	case 0xa4:	/* movsb */
2973 	case 0xa5:	/* movsd/w */
2974 		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2975 		/* fall through */
2976 	case 0xaa:	/* stosb */
2977 	case 0xab:	/* stosd/w */
2978 		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2979 	}
2980 #endif
2981 }
2982 
save_state_to_tss16(struct x86_emulate_ctxt * ctxt,struct tss_segment_16 * tss)2983 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2984 				struct tss_segment_16 *tss)
2985 {
2986 	tss->ip = ctxt->_eip;
2987 	tss->flag = ctxt->eflags;
2988 	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2989 	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2990 	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2991 	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2992 	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2993 	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2994 	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2995 	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2996 
2997 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2998 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2999 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3000 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3001 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3002 }
3003 
load_state_from_tss16(struct x86_emulate_ctxt * ctxt,struct tss_segment_16 * tss)3004 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3005 				 struct tss_segment_16 *tss)
3006 {
3007 	int ret;
3008 	u8 cpl;
3009 
3010 	ctxt->_eip = tss->ip;
3011 	ctxt->eflags = tss->flag | 2;
3012 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3013 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3014 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3015 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3016 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3017 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3018 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3019 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3020 
3021 	/*
3022 	 * SDM says that segment selectors are loaded before segment
3023 	 * descriptors
3024 	 */
3025 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3026 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3027 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3028 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3029 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3030 
3031 	cpl = tss->cs & 3;
3032 
3033 	/*
3034 	 * Now load segment descriptors. If fault happens at this stage
3035 	 * it is handled in a context of new task
3036 	 */
3037 	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3038 					X86_TRANSFER_TASK_SWITCH, NULL);
3039 	if (ret != X86EMUL_CONTINUE)
3040 		return ret;
3041 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3042 					X86_TRANSFER_TASK_SWITCH, NULL);
3043 	if (ret != X86EMUL_CONTINUE)
3044 		return ret;
3045 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3046 					X86_TRANSFER_TASK_SWITCH, NULL);
3047 	if (ret != X86EMUL_CONTINUE)
3048 		return ret;
3049 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3050 					X86_TRANSFER_TASK_SWITCH, NULL);
3051 	if (ret != X86EMUL_CONTINUE)
3052 		return ret;
3053 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3054 					X86_TRANSFER_TASK_SWITCH, NULL);
3055 	if (ret != X86EMUL_CONTINUE)
3056 		return ret;
3057 
3058 	return X86EMUL_CONTINUE;
3059 }
3060 
task_switch_16(struct x86_emulate_ctxt * ctxt,u16 tss_selector,u16 old_tss_sel,ulong old_tss_base,struct desc_struct * new_desc)3061 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3062 			  u16 tss_selector, u16 old_tss_sel,
3063 			  ulong old_tss_base, struct desc_struct *new_desc)
3064 {
3065 	struct tss_segment_16 tss_seg;
3066 	int ret;
3067 	u32 new_tss_base = get_desc_base(new_desc);
3068 
3069 	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3070 	if (ret != X86EMUL_CONTINUE)
3071 		return ret;
3072 
3073 	save_state_to_tss16(ctxt, &tss_seg);
3074 
3075 	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3076 	if (ret != X86EMUL_CONTINUE)
3077 		return ret;
3078 
3079 	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
3080 	if (ret != X86EMUL_CONTINUE)
3081 		return ret;
3082 
3083 	if (old_tss_sel != 0xffff) {
3084 		tss_seg.prev_task_link = old_tss_sel;
3085 
3086 		ret = linear_write_system(ctxt, new_tss_base,
3087 					  &tss_seg.prev_task_link,
3088 					  sizeof tss_seg.prev_task_link);
3089 		if (ret != X86EMUL_CONTINUE)
3090 			return ret;
3091 	}
3092 
3093 	return load_state_from_tss16(ctxt, &tss_seg);
3094 }
3095 
save_state_to_tss32(struct x86_emulate_ctxt * ctxt,struct tss_segment_32 * tss)3096 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3097 				struct tss_segment_32 *tss)
3098 {
3099 	/* CR3 and ldt selector are not saved intentionally */
3100 	tss->eip = ctxt->_eip;
3101 	tss->eflags = ctxt->eflags;
3102 	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3103 	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3104 	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3105 	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3106 	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3107 	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3108 	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3109 	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3110 
3111 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3112 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3113 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3114 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3115 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3116 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3117 }
3118 
load_state_from_tss32(struct x86_emulate_ctxt * ctxt,struct tss_segment_32 * tss)3119 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3120 				 struct tss_segment_32 *tss)
3121 {
3122 	int ret;
3123 	u8 cpl;
3124 
3125 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3126 		return emulate_gp(ctxt, 0);
3127 	ctxt->_eip = tss->eip;
3128 	ctxt->eflags = tss->eflags | 2;
3129 
3130 	/* General purpose registers */
3131 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3132 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3133 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3134 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3135 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3136 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3137 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3138 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3139 
3140 	/*
3141 	 * SDM says that segment selectors are loaded before segment
3142 	 * descriptors.  This is important because CPL checks will
3143 	 * use CS.RPL.
3144 	 */
3145 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3146 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3147 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3148 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3149 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3150 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3151 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3152 
3153 	/*
3154 	 * If we're switching between Protected Mode and VM86, we need to make
3155 	 * sure to update the mode before loading the segment descriptors so
3156 	 * that the selectors are interpreted correctly.
3157 	 */
3158 	if (ctxt->eflags & X86_EFLAGS_VM) {
3159 		ctxt->mode = X86EMUL_MODE_VM86;
3160 		cpl = 3;
3161 	} else {
3162 		ctxt->mode = X86EMUL_MODE_PROT32;
3163 		cpl = tss->cs & 3;
3164 	}
3165 
3166 	/*
3167 	 * Now load segment descriptors. If fault happenes at this stage
3168 	 * it is handled in a context of new task
3169 	 */
3170 	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3171 					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3172 	if (ret != X86EMUL_CONTINUE)
3173 		return ret;
3174 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3175 					X86_TRANSFER_TASK_SWITCH, NULL);
3176 	if (ret != X86EMUL_CONTINUE)
3177 		return ret;
3178 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3179 					X86_TRANSFER_TASK_SWITCH, NULL);
3180 	if (ret != X86EMUL_CONTINUE)
3181 		return ret;
3182 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3183 					X86_TRANSFER_TASK_SWITCH, NULL);
3184 	if (ret != X86EMUL_CONTINUE)
3185 		return ret;
3186 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3187 					X86_TRANSFER_TASK_SWITCH, NULL);
3188 	if (ret != X86EMUL_CONTINUE)
3189 		return ret;
3190 	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3191 					X86_TRANSFER_TASK_SWITCH, NULL);
3192 	if (ret != X86EMUL_CONTINUE)
3193 		return ret;
3194 	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3195 					X86_TRANSFER_TASK_SWITCH, NULL);
3196 
3197 	return ret;
3198 }
3199 
task_switch_32(struct x86_emulate_ctxt * ctxt,u16 tss_selector,u16 old_tss_sel,ulong old_tss_base,struct desc_struct * new_desc)3200 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3201 			  u16 tss_selector, u16 old_tss_sel,
3202 			  ulong old_tss_base, struct desc_struct *new_desc)
3203 {
3204 	struct tss_segment_32 tss_seg;
3205 	int ret;
3206 	u32 new_tss_base = get_desc_base(new_desc);
3207 	u32 eip_offset = offsetof(struct tss_segment_32, eip);
3208 	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3209 
3210 	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3211 	if (ret != X86EMUL_CONTINUE)
3212 		return ret;
3213 
3214 	save_state_to_tss32(ctxt, &tss_seg);
3215 
3216 	/* Only GP registers and segment selectors are saved */
3217 	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3218 				  ldt_sel_offset - eip_offset);
3219 	if (ret != X86EMUL_CONTINUE)
3220 		return ret;
3221 
3222 	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
3223 	if (ret != X86EMUL_CONTINUE)
3224 		return ret;
3225 
3226 	if (old_tss_sel != 0xffff) {
3227 		tss_seg.prev_task_link = old_tss_sel;
3228 
3229 		ret = linear_write_system(ctxt, new_tss_base,
3230 					  &tss_seg.prev_task_link,
3231 					  sizeof tss_seg.prev_task_link);
3232 		if (ret != X86EMUL_CONTINUE)
3233 			return ret;
3234 	}
3235 
3236 	return load_state_from_tss32(ctxt, &tss_seg);
3237 }
3238 
emulator_do_task_switch(struct x86_emulate_ctxt * ctxt,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)3239 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3240 				   u16 tss_selector, int idt_index, int reason,
3241 				   bool has_error_code, u32 error_code)
3242 {
3243 	const struct x86_emulate_ops *ops = ctxt->ops;
3244 	struct desc_struct curr_tss_desc, next_tss_desc;
3245 	int ret;
3246 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3247 	ulong old_tss_base =
3248 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3249 	u32 desc_limit;
3250 	ulong desc_addr, dr7;
3251 
3252 	/* FIXME: old_tss_base == ~0 ? */
3253 
3254 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3255 	if (ret != X86EMUL_CONTINUE)
3256 		return ret;
3257 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3258 	if (ret != X86EMUL_CONTINUE)
3259 		return ret;
3260 
3261 	/* FIXME: check that next_tss_desc is tss */
3262 
3263 	/*
3264 	 * Check privileges. The three cases are task switch caused by...
3265 	 *
3266 	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3267 	 * 2. Exception/IRQ/iret: No check is performed
3268 	 * 3. jmp/call to TSS/task-gate: No check is performed since the
3269 	 *    hardware checks it before exiting.
3270 	 */
3271 	if (reason == TASK_SWITCH_GATE) {
3272 		if (idt_index != -1) {
3273 			/* Software interrupts */
3274 			struct desc_struct task_gate_desc;
3275 			int dpl;
3276 
3277 			ret = read_interrupt_descriptor(ctxt, idt_index,
3278 							&task_gate_desc);
3279 			if (ret != X86EMUL_CONTINUE)
3280 				return ret;
3281 
3282 			dpl = task_gate_desc.dpl;
3283 			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3284 				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3285 		}
3286 	}
3287 
3288 	desc_limit = desc_limit_scaled(&next_tss_desc);
3289 	if (!next_tss_desc.p ||
3290 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3291 	     desc_limit < 0x2b)) {
3292 		return emulate_ts(ctxt, tss_selector & 0xfffc);
3293 	}
3294 
3295 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3296 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3297 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3298 	}
3299 
3300 	if (reason == TASK_SWITCH_IRET)
3301 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3302 
3303 	/* set back link to prev task only if NT bit is set in eflags
3304 	   note that old_tss_sel is not used after this point */
3305 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3306 		old_tss_sel = 0xffff;
3307 
3308 	if (next_tss_desc.type & 8)
3309 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3310 				     old_tss_base, &next_tss_desc);
3311 	else
3312 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3313 				     old_tss_base, &next_tss_desc);
3314 	if (ret != X86EMUL_CONTINUE)
3315 		return ret;
3316 
3317 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3318 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3319 
3320 	if (reason != TASK_SWITCH_IRET) {
3321 		next_tss_desc.type |= (1 << 1); /* set busy flag */
3322 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3323 	}
3324 
3325 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3326 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3327 
3328 	if (has_error_code) {
3329 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3330 		ctxt->lock_prefix = 0;
3331 		ctxt->src.val = (unsigned long) error_code;
3332 		ret = em_push(ctxt);
3333 	}
3334 
3335 	ops->get_dr(ctxt, 7, &dr7);
3336 	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3337 
3338 	return ret;
3339 }
3340 
emulator_task_switch(struct x86_emulate_ctxt * ctxt,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)3341 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3342 			 u16 tss_selector, int idt_index, int reason,
3343 			 bool has_error_code, u32 error_code)
3344 {
3345 	int rc;
3346 
3347 	invalidate_registers(ctxt);
3348 	ctxt->_eip = ctxt->eip;
3349 	ctxt->dst.type = OP_NONE;
3350 
3351 	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3352 				     has_error_code, error_code);
3353 
3354 	if (rc == X86EMUL_CONTINUE) {
3355 		ctxt->eip = ctxt->_eip;
3356 		writeback_registers(ctxt);
3357 	}
3358 
3359 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3360 }
3361 
string_addr_inc(struct x86_emulate_ctxt * ctxt,int reg,struct operand * op)3362 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3363 		struct operand *op)
3364 {
3365 	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3366 
3367 	register_address_increment(ctxt, reg, df * op->bytes);
3368 	op->addr.mem.ea = register_address(ctxt, reg);
3369 }
3370 
em_das(struct x86_emulate_ctxt * ctxt)3371 static int em_das(struct x86_emulate_ctxt *ctxt)
3372 {
3373 	u8 al, old_al;
3374 	bool af, cf, old_cf;
3375 
3376 	cf = ctxt->eflags & X86_EFLAGS_CF;
3377 	al = ctxt->dst.val;
3378 
3379 	old_al = al;
3380 	old_cf = cf;
3381 	cf = false;
3382 	af = ctxt->eflags & X86_EFLAGS_AF;
3383 	if ((al & 0x0f) > 9 || af) {
3384 		al -= 6;
3385 		cf = old_cf | (al >= 250);
3386 		af = true;
3387 	} else {
3388 		af = false;
3389 	}
3390 	if (old_al > 0x99 || old_cf) {
3391 		al -= 0x60;
3392 		cf = true;
3393 	}
3394 
3395 	ctxt->dst.val = al;
3396 	/* Set PF, ZF, SF */
3397 	ctxt->src.type = OP_IMM;
3398 	ctxt->src.val = 0;
3399 	ctxt->src.bytes = 1;
3400 	fastop(ctxt, em_or);
3401 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3402 	if (cf)
3403 		ctxt->eflags |= X86_EFLAGS_CF;
3404 	if (af)
3405 		ctxt->eflags |= X86_EFLAGS_AF;
3406 	return X86EMUL_CONTINUE;
3407 }
3408 
em_aam(struct x86_emulate_ctxt * ctxt)3409 static int em_aam(struct x86_emulate_ctxt *ctxt)
3410 {
3411 	u8 al, ah;
3412 
3413 	if (ctxt->src.val == 0)
3414 		return emulate_de(ctxt);
3415 
3416 	al = ctxt->dst.val & 0xff;
3417 	ah = al / ctxt->src.val;
3418 	al %= ctxt->src.val;
3419 
3420 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3421 
3422 	/* Set PF, ZF, SF */
3423 	ctxt->src.type = OP_IMM;
3424 	ctxt->src.val = 0;
3425 	ctxt->src.bytes = 1;
3426 	fastop(ctxt, em_or);
3427 
3428 	return X86EMUL_CONTINUE;
3429 }
3430 
em_aad(struct x86_emulate_ctxt * ctxt)3431 static int em_aad(struct x86_emulate_ctxt *ctxt)
3432 {
3433 	u8 al = ctxt->dst.val & 0xff;
3434 	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3435 
3436 	al = (al + (ah * ctxt->src.val)) & 0xff;
3437 
3438 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3439 
3440 	/* Set PF, ZF, SF */
3441 	ctxt->src.type = OP_IMM;
3442 	ctxt->src.val = 0;
3443 	ctxt->src.bytes = 1;
3444 	fastop(ctxt, em_or);
3445 
3446 	return X86EMUL_CONTINUE;
3447 }
3448 
em_call(struct x86_emulate_ctxt * ctxt)3449 static int em_call(struct x86_emulate_ctxt *ctxt)
3450 {
3451 	int rc;
3452 	long rel = ctxt->src.val;
3453 
3454 	ctxt->src.val = (unsigned long)ctxt->_eip;
3455 	rc = jmp_rel(ctxt, rel);
3456 	if (rc != X86EMUL_CONTINUE)
3457 		return rc;
3458 	return em_push(ctxt);
3459 }
3460 
em_call_far(struct x86_emulate_ctxt * ctxt)3461 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3462 {
3463 	u16 sel, old_cs;
3464 	ulong old_eip;
3465 	int rc;
3466 	struct desc_struct old_desc, new_desc;
3467 	const struct x86_emulate_ops *ops = ctxt->ops;
3468 	int cpl = ctxt->ops->cpl(ctxt);
3469 	enum x86emul_mode prev_mode = ctxt->mode;
3470 
3471 	old_eip = ctxt->_eip;
3472 	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3473 
3474 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3475 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3476 				       X86_TRANSFER_CALL_JMP, &new_desc);
3477 	if (rc != X86EMUL_CONTINUE)
3478 		return rc;
3479 
3480 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3481 	if (rc != X86EMUL_CONTINUE)
3482 		goto fail;
3483 
3484 	ctxt->src.val = old_cs;
3485 	rc = em_push(ctxt);
3486 	if (rc != X86EMUL_CONTINUE)
3487 		goto fail;
3488 
3489 	ctxt->src.val = old_eip;
3490 	rc = em_push(ctxt);
3491 	/* If we failed, we tainted the memory, but the very least we should
3492 	   restore cs */
3493 	if (rc != X86EMUL_CONTINUE) {
3494 		pr_warn_once("faulting far call emulation tainted memory\n");
3495 		goto fail;
3496 	}
3497 	return rc;
3498 fail:
3499 	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3500 	ctxt->mode = prev_mode;
3501 	return rc;
3502 
3503 }
3504 
em_ret_near_imm(struct x86_emulate_ctxt * ctxt)3505 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3506 {
3507 	int rc;
3508 	unsigned long eip;
3509 
3510 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3511 	if (rc != X86EMUL_CONTINUE)
3512 		return rc;
3513 	rc = assign_eip_near(ctxt, eip);
3514 	if (rc != X86EMUL_CONTINUE)
3515 		return rc;
3516 	rsp_increment(ctxt, ctxt->src.val);
3517 	return X86EMUL_CONTINUE;
3518 }
3519 
em_xchg(struct x86_emulate_ctxt * ctxt)3520 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3521 {
3522 	/* Write back the register source. */
3523 	ctxt->src.val = ctxt->dst.val;
3524 	write_register_operand(&ctxt->src);
3525 
3526 	/* Write back the memory destination with implicit LOCK prefix. */
3527 	ctxt->dst.val = ctxt->src.orig_val;
3528 	ctxt->lock_prefix = 1;
3529 	return X86EMUL_CONTINUE;
3530 }
3531 
em_imul_3op(struct x86_emulate_ctxt * ctxt)3532 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3533 {
3534 	ctxt->dst.val = ctxt->src2.val;
3535 	return fastop(ctxt, em_imul);
3536 }
3537 
em_cwd(struct x86_emulate_ctxt * ctxt)3538 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3539 {
3540 	ctxt->dst.type = OP_REG;
3541 	ctxt->dst.bytes = ctxt->src.bytes;
3542 	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3543 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3544 
3545 	return X86EMUL_CONTINUE;
3546 }
3547 
em_rdpid(struct x86_emulate_ctxt * ctxt)3548 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3549 {
3550 	u64 tsc_aux = 0;
3551 
3552 	if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3553 		return emulate_gp(ctxt, 0);
3554 	ctxt->dst.val = tsc_aux;
3555 	return X86EMUL_CONTINUE;
3556 }
3557 
em_rdtsc(struct x86_emulate_ctxt * ctxt)3558 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3559 {
3560 	u64 tsc = 0;
3561 
3562 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3563 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3564 	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3565 	return X86EMUL_CONTINUE;
3566 }
3567 
em_rdpmc(struct x86_emulate_ctxt * ctxt)3568 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3569 {
3570 	u64 pmc;
3571 
3572 	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3573 		return emulate_gp(ctxt, 0);
3574 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3575 	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3576 	return X86EMUL_CONTINUE;
3577 }
3578 
em_mov(struct x86_emulate_ctxt * ctxt)3579 static int em_mov(struct x86_emulate_ctxt *ctxt)
3580 {
3581 	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3582 	return X86EMUL_CONTINUE;
3583 }
3584 
3585 #define FFL(x) bit(X86_FEATURE_##x)
3586 
em_movbe(struct x86_emulate_ctxt * ctxt)3587 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3588 {
3589 	u32 ebx, ecx, edx, eax = 1;
3590 	u16 tmp;
3591 
3592 	/*
3593 	 * Check MOVBE is set in the guest-visible CPUID leaf.
3594 	 */
3595 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3596 	if (!(ecx & FFL(MOVBE)))
3597 		return emulate_ud(ctxt);
3598 
3599 	switch (ctxt->op_bytes) {
3600 	case 2:
3601 		/*
3602 		 * From MOVBE definition: "...When the operand size is 16 bits,
3603 		 * the upper word of the destination register remains unchanged
3604 		 * ..."
3605 		 *
3606 		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3607 		 * rules so we have to do the operation almost per hand.
3608 		 */
3609 		tmp = (u16)ctxt->src.val;
3610 		ctxt->dst.val &= ~0xffffUL;
3611 		ctxt->dst.val |= (unsigned long)swab16(tmp);
3612 		break;
3613 	case 4:
3614 		ctxt->dst.val = swab32((u32)ctxt->src.val);
3615 		break;
3616 	case 8:
3617 		ctxt->dst.val = swab64(ctxt->src.val);
3618 		break;
3619 	default:
3620 		BUG();
3621 	}
3622 	return X86EMUL_CONTINUE;
3623 }
3624 
em_cr_write(struct x86_emulate_ctxt * ctxt)3625 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3626 {
3627 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3628 		return emulate_gp(ctxt, 0);
3629 
3630 	/* Disable writeback. */
3631 	ctxt->dst.type = OP_NONE;
3632 	return X86EMUL_CONTINUE;
3633 }
3634 
em_dr_write(struct x86_emulate_ctxt * ctxt)3635 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3636 {
3637 	unsigned long val;
3638 
3639 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3640 		val = ctxt->src.val & ~0ULL;
3641 	else
3642 		val = ctxt->src.val & ~0U;
3643 
3644 	/* #UD condition is already handled. */
3645 	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3646 		return emulate_gp(ctxt, 0);
3647 
3648 	/* Disable writeback. */
3649 	ctxt->dst.type = OP_NONE;
3650 	return X86EMUL_CONTINUE;
3651 }
3652 
em_wrmsr(struct x86_emulate_ctxt * ctxt)3653 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3654 {
3655 	u64 msr_data;
3656 
3657 	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3658 		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3659 	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3660 		return emulate_gp(ctxt, 0);
3661 
3662 	return X86EMUL_CONTINUE;
3663 }
3664 
em_rdmsr(struct x86_emulate_ctxt * ctxt)3665 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3666 {
3667 	u64 msr_data;
3668 
3669 	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3670 		return emulate_gp(ctxt, 0);
3671 
3672 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3673 	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3674 	return X86EMUL_CONTINUE;
3675 }
3676 
em_store_sreg(struct x86_emulate_ctxt * ctxt,int segment)3677 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3678 {
3679 	if (segment > VCPU_SREG_GS &&
3680 	    (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3681 	    ctxt->ops->cpl(ctxt) > 0)
3682 		return emulate_gp(ctxt, 0);
3683 
3684 	ctxt->dst.val = get_segment_selector(ctxt, segment);
3685 	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3686 		ctxt->dst.bytes = 2;
3687 	return X86EMUL_CONTINUE;
3688 }
3689 
em_mov_rm_sreg(struct x86_emulate_ctxt * ctxt)3690 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3691 {
3692 	if (ctxt->modrm_reg > VCPU_SREG_GS)
3693 		return emulate_ud(ctxt);
3694 
3695 	return em_store_sreg(ctxt, ctxt->modrm_reg);
3696 }
3697 
em_mov_sreg_rm(struct x86_emulate_ctxt * ctxt)3698 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3699 {
3700 	u16 sel = ctxt->src.val;
3701 
3702 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3703 		return emulate_ud(ctxt);
3704 
3705 	if (ctxt->modrm_reg == VCPU_SREG_SS)
3706 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3707 
3708 	/* Disable writeback. */
3709 	ctxt->dst.type = OP_NONE;
3710 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3711 }
3712 
em_sldt(struct x86_emulate_ctxt * ctxt)3713 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3714 {
3715 	return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3716 }
3717 
em_lldt(struct x86_emulate_ctxt * ctxt)3718 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3719 {
3720 	u16 sel = ctxt->src.val;
3721 
3722 	/* Disable writeback. */
3723 	ctxt->dst.type = OP_NONE;
3724 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3725 }
3726 
em_str(struct x86_emulate_ctxt * ctxt)3727 static int em_str(struct x86_emulate_ctxt *ctxt)
3728 {
3729 	return em_store_sreg(ctxt, VCPU_SREG_TR);
3730 }
3731 
em_ltr(struct x86_emulate_ctxt * ctxt)3732 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3733 {
3734 	u16 sel = ctxt->src.val;
3735 
3736 	/* Disable writeback. */
3737 	ctxt->dst.type = OP_NONE;
3738 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3739 }
3740 
em_invlpg(struct x86_emulate_ctxt * ctxt)3741 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3742 {
3743 	int rc;
3744 	ulong linear;
3745 
3746 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3747 	if (rc == X86EMUL_CONTINUE)
3748 		ctxt->ops->invlpg(ctxt, linear);
3749 	/* Disable writeback. */
3750 	ctxt->dst.type = OP_NONE;
3751 	return X86EMUL_CONTINUE;
3752 }
3753 
em_clts(struct x86_emulate_ctxt * ctxt)3754 static int em_clts(struct x86_emulate_ctxt *ctxt)
3755 {
3756 	ulong cr0;
3757 
3758 	cr0 = ctxt->ops->get_cr(ctxt, 0);
3759 	cr0 &= ~X86_CR0_TS;
3760 	ctxt->ops->set_cr(ctxt, 0, cr0);
3761 	return X86EMUL_CONTINUE;
3762 }
3763 
em_hypercall(struct x86_emulate_ctxt * ctxt)3764 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3765 {
3766 	int rc = ctxt->ops->fix_hypercall(ctxt);
3767 
3768 	if (rc != X86EMUL_CONTINUE)
3769 		return rc;
3770 
3771 	/* Let the processor re-execute the fixed hypercall */
3772 	ctxt->_eip = ctxt->eip;
3773 	/* Disable writeback. */
3774 	ctxt->dst.type = OP_NONE;
3775 	return X86EMUL_CONTINUE;
3776 }
3777 
emulate_store_desc_ptr(struct x86_emulate_ctxt * ctxt,void (* get)(struct x86_emulate_ctxt * ctxt,struct desc_ptr * ptr))3778 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3779 				  void (*get)(struct x86_emulate_ctxt *ctxt,
3780 					      struct desc_ptr *ptr))
3781 {
3782 	struct desc_ptr desc_ptr;
3783 
3784 	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3785 	    ctxt->ops->cpl(ctxt) > 0)
3786 		return emulate_gp(ctxt, 0);
3787 
3788 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3789 		ctxt->op_bytes = 8;
3790 	get(ctxt, &desc_ptr);
3791 	if (ctxt->op_bytes == 2) {
3792 		ctxt->op_bytes = 4;
3793 		desc_ptr.address &= 0x00ffffff;
3794 	}
3795 	/* Disable writeback. */
3796 	ctxt->dst.type = OP_NONE;
3797 	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3798 				   &desc_ptr, 2 + ctxt->op_bytes);
3799 }
3800 
em_sgdt(struct x86_emulate_ctxt * ctxt)3801 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3802 {
3803 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3804 }
3805 
em_sidt(struct x86_emulate_ctxt * ctxt)3806 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3807 {
3808 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3809 }
3810 
em_lgdt_lidt(struct x86_emulate_ctxt * ctxt,bool lgdt)3811 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3812 {
3813 	struct desc_ptr desc_ptr;
3814 	int rc;
3815 
3816 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3817 		ctxt->op_bytes = 8;
3818 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3819 			     &desc_ptr.size, &desc_ptr.address,
3820 			     ctxt->op_bytes);
3821 	if (rc != X86EMUL_CONTINUE)
3822 		return rc;
3823 	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3824 	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3825 		return emulate_gp(ctxt, 0);
3826 	if (lgdt)
3827 		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3828 	else
3829 		ctxt->ops->set_idt(ctxt, &desc_ptr);
3830 	/* Disable writeback. */
3831 	ctxt->dst.type = OP_NONE;
3832 	return X86EMUL_CONTINUE;
3833 }
3834 
em_lgdt(struct x86_emulate_ctxt * ctxt)3835 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3836 {
3837 	return em_lgdt_lidt(ctxt, true);
3838 }
3839 
em_lidt(struct x86_emulate_ctxt * ctxt)3840 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3841 {
3842 	return em_lgdt_lidt(ctxt, false);
3843 }
3844 
em_smsw(struct x86_emulate_ctxt * ctxt)3845 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3846 {
3847 	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3848 	    ctxt->ops->cpl(ctxt) > 0)
3849 		return emulate_gp(ctxt, 0);
3850 
3851 	if (ctxt->dst.type == OP_MEM)
3852 		ctxt->dst.bytes = 2;
3853 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3854 	return X86EMUL_CONTINUE;
3855 }
3856 
em_lmsw(struct x86_emulate_ctxt * ctxt)3857 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3858 {
3859 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3860 			  | (ctxt->src.val & 0x0f));
3861 	ctxt->dst.type = OP_NONE;
3862 	return X86EMUL_CONTINUE;
3863 }
3864 
em_loop(struct x86_emulate_ctxt * ctxt)3865 static int em_loop(struct x86_emulate_ctxt *ctxt)
3866 {
3867 	int rc = X86EMUL_CONTINUE;
3868 
3869 	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3870 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3871 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3872 		rc = jmp_rel(ctxt, ctxt->src.val);
3873 
3874 	return rc;
3875 }
3876 
em_jcxz(struct x86_emulate_ctxt * ctxt)3877 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3878 {
3879 	int rc = X86EMUL_CONTINUE;
3880 
3881 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3882 		rc = jmp_rel(ctxt, ctxt->src.val);
3883 
3884 	return rc;
3885 }
3886 
em_in(struct x86_emulate_ctxt * ctxt)3887 static int em_in(struct x86_emulate_ctxt *ctxt)
3888 {
3889 	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3890 			     &ctxt->dst.val))
3891 		return X86EMUL_IO_NEEDED;
3892 
3893 	return X86EMUL_CONTINUE;
3894 }
3895 
em_out(struct x86_emulate_ctxt * ctxt)3896 static int em_out(struct x86_emulate_ctxt *ctxt)
3897 {
3898 	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3899 				    &ctxt->src.val, 1);
3900 	/* Disable writeback. */
3901 	ctxt->dst.type = OP_NONE;
3902 	return X86EMUL_CONTINUE;
3903 }
3904 
em_cli(struct x86_emulate_ctxt * ctxt)3905 static int em_cli(struct x86_emulate_ctxt *ctxt)
3906 {
3907 	if (emulator_bad_iopl(ctxt))
3908 		return emulate_gp(ctxt, 0);
3909 
3910 	ctxt->eflags &= ~X86_EFLAGS_IF;
3911 	return X86EMUL_CONTINUE;
3912 }
3913 
em_sti(struct x86_emulate_ctxt * ctxt)3914 static int em_sti(struct x86_emulate_ctxt *ctxt)
3915 {
3916 	if (emulator_bad_iopl(ctxt))
3917 		return emulate_gp(ctxt, 0);
3918 
3919 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3920 	ctxt->eflags |= X86_EFLAGS_IF;
3921 	return X86EMUL_CONTINUE;
3922 }
3923 
em_cpuid(struct x86_emulate_ctxt * ctxt)3924 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3925 {
3926 	u32 eax, ebx, ecx, edx;
3927 	u64 msr = 0;
3928 
3929 	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3930 	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3931 	    ctxt->ops->cpl(ctxt)) {
3932 		return emulate_gp(ctxt, 0);
3933 	}
3934 
3935 	eax = reg_read(ctxt, VCPU_REGS_RAX);
3936 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3937 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3938 	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3939 	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3940 	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3941 	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3942 	return X86EMUL_CONTINUE;
3943 }
3944 
em_sahf(struct x86_emulate_ctxt * ctxt)3945 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3946 {
3947 	u32 flags;
3948 
3949 	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3950 		X86_EFLAGS_SF;
3951 	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3952 
3953 	ctxt->eflags &= ~0xffUL;
3954 	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3955 	return X86EMUL_CONTINUE;
3956 }
3957 
em_lahf(struct x86_emulate_ctxt * ctxt)3958 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3959 {
3960 	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3961 	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3962 	return X86EMUL_CONTINUE;
3963 }
3964 
em_bswap(struct x86_emulate_ctxt * ctxt)3965 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3966 {
3967 	switch (ctxt->op_bytes) {
3968 #ifdef CONFIG_X86_64
3969 	case 8:
3970 		asm("bswap %0" : "+r"(ctxt->dst.val));
3971 		break;
3972 #endif
3973 	default:
3974 		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3975 		break;
3976 	}
3977 	return X86EMUL_CONTINUE;
3978 }
3979 
em_clflush(struct x86_emulate_ctxt * ctxt)3980 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3981 {
3982 	/* emulating clflush regardless of cpuid */
3983 	return X86EMUL_CONTINUE;
3984 }
3985 
em_movsxd(struct x86_emulate_ctxt * ctxt)3986 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3987 {
3988 	ctxt->dst.val = (s32) ctxt->src.val;
3989 	return X86EMUL_CONTINUE;
3990 }
3991 
check_fxsr(struct x86_emulate_ctxt * ctxt)3992 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3993 {
3994 	u32 eax = 1, ebx, ecx = 0, edx;
3995 
3996 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3997 	if (!(edx & FFL(FXSR)))
3998 		return emulate_ud(ctxt);
3999 
4000 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4001 		return emulate_nm(ctxt);
4002 
4003 	/*
4004 	 * Don't emulate a case that should never be hit, instead of working
4005 	 * around a lack of fxsave64/fxrstor64 on old compilers.
4006 	 */
4007 	if (ctxt->mode >= X86EMUL_MODE_PROT64)
4008 		return X86EMUL_UNHANDLEABLE;
4009 
4010 	return X86EMUL_CONTINUE;
4011 }
4012 
4013 /*
4014  * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4015  * and restore MXCSR.
4016  */
__fxstate_size(int nregs)4017 static size_t __fxstate_size(int nregs)
4018 {
4019 	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4020 }
4021 
fxstate_size(struct x86_emulate_ctxt * ctxt)4022 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4023 {
4024 	bool cr4_osfxsr;
4025 	if (ctxt->mode == X86EMUL_MODE_PROT64)
4026 		return __fxstate_size(16);
4027 
4028 	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4029 	return __fxstate_size(cr4_osfxsr ? 8 : 0);
4030 }
4031 
4032 /*
4033  * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4034  *  1) 16 bit mode
4035  *  2) 32 bit mode
4036  *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
4037  *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4038  *       save and restore
4039  *  3) 64-bit mode with REX.W prefix
4040  *     - like (2), but XMM 8-15 are being saved and restored
4041  *  4) 64-bit mode without REX.W prefix
4042  *     - like (3), but FIP and FDP are 64 bit
4043  *
4044  * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4045  * desired result.  (4) is not emulated.
4046  *
4047  * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4048  * and FPU DS) should match.
4049  */
em_fxsave(struct x86_emulate_ctxt * ctxt)4050 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4051 {
4052 	struct fxregs_state fx_state;
4053 	int rc;
4054 
4055 	rc = check_fxsr(ctxt);
4056 	if (rc != X86EMUL_CONTINUE)
4057 		return rc;
4058 
4059 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4060 
4061 	if (rc != X86EMUL_CONTINUE)
4062 		return rc;
4063 
4064 	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4065 		                   fxstate_size(ctxt));
4066 }
4067 
4068 /*
4069  * FXRSTOR might restore XMM registers not provided by the guest. Fill
4070  * in the host registers (via FXSAVE) instead, so they won't be modified.
4071  * (preemption has to stay disabled until FXRSTOR).
4072  *
4073  * Use noinline to keep the stack for other functions called by callers small.
4074  */
fxregs_fixup(struct fxregs_state * fx_state,const size_t used_size)4075 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4076 				 const size_t used_size)
4077 {
4078 	struct fxregs_state fx_tmp;
4079 	int rc;
4080 
4081 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4082 	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4083 	       __fxstate_size(16) - used_size);
4084 
4085 	return rc;
4086 }
4087 
em_fxrstor(struct x86_emulate_ctxt * ctxt)4088 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4089 {
4090 	struct fxregs_state fx_state;
4091 	int rc;
4092 	size_t size;
4093 
4094 	rc = check_fxsr(ctxt);
4095 	if (rc != X86EMUL_CONTINUE)
4096 		return rc;
4097 
4098 	size = fxstate_size(ctxt);
4099 	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4100 	if (rc != X86EMUL_CONTINUE)
4101 		return rc;
4102 
4103 	if (size < __fxstate_size(16)) {
4104 		rc = fxregs_fixup(&fx_state, size);
4105 		if (rc != X86EMUL_CONTINUE)
4106 			goto out;
4107 	}
4108 
4109 	if (fx_state.mxcsr >> 16) {
4110 		rc = emulate_gp(ctxt, 0);
4111 		goto out;
4112 	}
4113 
4114 	if (rc == X86EMUL_CONTINUE)
4115 		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4116 
4117 out:
4118 	return rc;
4119 }
4120 
valid_cr(int nr)4121 static bool valid_cr(int nr)
4122 {
4123 	switch (nr) {
4124 	case 0:
4125 	case 2 ... 4:
4126 	case 8:
4127 		return true;
4128 	default:
4129 		return false;
4130 	}
4131 }
4132 
check_cr_read(struct x86_emulate_ctxt * ctxt)4133 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4134 {
4135 	if (!valid_cr(ctxt->modrm_reg))
4136 		return emulate_ud(ctxt);
4137 
4138 	return X86EMUL_CONTINUE;
4139 }
4140 
check_cr_write(struct x86_emulate_ctxt * ctxt)4141 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4142 {
4143 	u64 new_val = ctxt->src.val64;
4144 	int cr = ctxt->modrm_reg;
4145 	u64 efer = 0;
4146 
4147 	static u64 cr_reserved_bits[] = {
4148 		0xffffffff00000000ULL,
4149 		0, 0, 0, /* CR3 checked later */
4150 		CR4_RESERVED_BITS,
4151 		0, 0, 0,
4152 		CR8_RESERVED_BITS,
4153 	};
4154 
4155 	if (!valid_cr(cr))
4156 		return emulate_ud(ctxt);
4157 
4158 	if (new_val & cr_reserved_bits[cr])
4159 		return emulate_gp(ctxt, 0);
4160 
4161 	switch (cr) {
4162 	case 0: {
4163 		u64 cr4;
4164 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4165 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4166 			return emulate_gp(ctxt, 0);
4167 
4168 		cr4 = ctxt->ops->get_cr(ctxt, 4);
4169 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4170 
4171 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4172 		    !(cr4 & X86_CR4_PAE))
4173 			return emulate_gp(ctxt, 0);
4174 
4175 		break;
4176 		}
4177 	case 3: {
4178 		u64 rsvd = 0;
4179 
4180 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4181 		if (efer & EFER_LMA) {
4182 			u64 maxphyaddr;
4183 			u32 eax, ebx, ecx, edx;
4184 
4185 			eax = 0x80000008;
4186 			ecx = 0;
4187 			if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4188 						 &edx, false))
4189 				maxphyaddr = eax & 0xff;
4190 			else
4191 				maxphyaddr = 36;
4192 			rsvd = rsvd_bits(maxphyaddr, 63);
4193 			if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4194 				rsvd &= ~X86_CR3_PCID_NOFLUSH;
4195 		}
4196 
4197 		if (new_val & rsvd)
4198 			return emulate_gp(ctxt, 0);
4199 
4200 		break;
4201 		}
4202 	case 4: {
4203 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4204 
4205 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4206 			return emulate_gp(ctxt, 0);
4207 
4208 		break;
4209 		}
4210 	}
4211 
4212 	return X86EMUL_CONTINUE;
4213 }
4214 
check_dr7_gd(struct x86_emulate_ctxt * ctxt)4215 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4216 {
4217 	unsigned long dr7;
4218 
4219 	ctxt->ops->get_dr(ctxt, 7, &dr7);
4220 
4221 	/* Check if DR7.Global_Enable is set */
4222 	return dr7 & (1 << 13);
4223 }
4224 
check_dr_read(struct x86_emulate_ctxt * ctxt)4225 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4226 {
4227 	int dr = ctxt->modrm_reg;
4228 	u64 cr4;
4229 
4230 	if (dr > 7)
4231 		return emulate_ud(ctxt);
4232 
4233 	cr4 = ctxt->ops->get_cr(ctxt, 4);
4234 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4235 		return emulate_ud(ctxt);
4236 
4237 	if (check_dr7_gd(ctxt)) {
4238 		ulong dr6;
4239 
4240 		ctxt->ops->get_dr(ctxt, 6, &dr6);
4241 		dr6 &= ~15;
4242 		dr6 |= DR6_BD | DR6_RTM;
4243 		ctxt->ops->set_dr(ctxt, 6, dr6);
4244 		return emulate_db(ctxt);
4245 	}
4246 
4247 	return X86EMUL_CONTINUE;
4248 }
4249 
check_dr_write(struct x86_emulate_ctxt * ctxt)4250 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4251 {
4252 	u64 new_val = ctxt->src.val64;
4253 	int dr = ctxt->modrm_reg;
4254 
4255 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4256 		return emulate_gp(ctxt, 0);
4257 
4258 	return check_dr_read(ctxt);
4259 }
4260 
check_svme(struct x86_emulate_ctxt * ctxt)4261 static int check_svme(struct x86_emulate_ctxt *ctxt)
4262 {
4263 	u64 efer = 0;
4264 
4265 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4266 
4267 	if (!(efer & EFER_SVME))
4268 		return emulate_ud(ctxt);
4269 
4270 	return X86EMUL_CONTINUE;
4271 }
4272 
check_svme_pa(struct x86_emulate_ctxt * ctxt)4273 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4274 {
4275 	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4276 
4277 	/* Valid physical address? */
4278 	if (rax & 0xffff000000000000ULL)
4279 		return emulate_gp(ctxt, 0);
4280 
4281 	return check_svme(ctxt);
4282 }
4283 
check_rdtsc(struct x86_emulate_ctxt * ctxt)4284 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4285 {
4286 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4287 
4288 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4289 		return emulate_ud(ctxt);
4290 
4291 	return X86EMUL_CONTINUE;
4292 }
4293 
check_rdpmc(struct x86_emulate_ctxt * ctxt)4294 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4295 {
4296 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4297 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4298 
4299 	/*
4300 	 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4301 	 * in Ring3 when CR4.PCE=0.
4302 	 */
4303 	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4304 		return X86EMUL_CONTINUE;
4305 
4306 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4307 	    ctxt->ops->check_pmc(ctxt, rcx))
4308 		return emulate_gp(ctxt, 0);
4309 
4310 	return X86EMUL_CONTINUE;
4311 }
4312 
check_perm_in(struct x86_emulate_ctxt * ctxt)4313 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4314 {
4315 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4316 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4317 		return emulate_gp(ctxt, 0);
4318 
4319 	return X86EMUL_CONTINUE;
4320 }
4321 
check_perm_out(struct x86_emulate_ctxt * ctxt)4322 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4323 {
4324 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4325 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4326 		return emulate_gp(ctxt, 0);
4327 
4328 	return X86EMUL_CONTINUE;
4329 }
4330 
4331 #define D(_y) { .flags = (_y) }
4332 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4333 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4334 		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4335 #define N    D(NotImpl)
4336 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4337 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4338 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4339 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4340 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4341 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4342 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4343 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4344 #define II(_f, _e, _i) \
4345 	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4346 #define IIP(_f, _e, _i, _p) \
4347 	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4348 	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4349 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4350 
4351 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
4352 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4353 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4354 #define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4355 #define I2bvIP(_f, _e, _i, _p) \
4356 	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4357 
4358 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4359 		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4360 		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4361 
4362 static const struct opcode group7_rm0[] = {
4363 	N,
4364 	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4365 	N, N, N, N, N, N,
4366 };
4367 
4368 static const struct opcode group7_rm1[] = {
4369 	DI(SrcNone | Priv, monitor),
4370 	DI(SrcNone | Priv, mwait),
4371 	N, N, N, N, N, N,
4372 };
4373 
4374 static const struct opcode group7_rm3[] = {
4375 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4376 	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4377 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4378 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4379 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4380 	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4381 	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4382 	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4383 };
4384 
4385 static const struct opcode group7_rm7[] = {
4386 	N,
4387 	DIP(SrcNone, rdtscp, check_rdtsc),
4388 	N, N, N, N, N, N,
4389 };
4390 
4391 static const struct opcode group1[] = {
4392 	F(Lock, em_add),
4393 	F(Lock | PageTable, em_or),
4394 	F(Lock, em_adc),
4395 	F(Lock, em_sbb),
4396 	F(Lock | PageTable, em_and),
4397 	F(Lock, em_sub),
4398 	F(Lock, em_xor),
4399 	F(NoWrite, em_cmp),
4400 };
4401 
4402 static const struct opcode group1A[] = {
4403 	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4404 };
4405 
4406 static const struct opcode group2[] = {
4407 	F(DstMem | ModRM, em_rol),
4408 	F(DstMem | ModRM, em_ror),
4409 	F(DstMem | ModRM, em_rcl),
4410 	F(DstMem | ModRM, em_rcr),
4411 	F(DstMem | ModRM, em_shl),
4412 	F(DstMem | ModRM, em_shr),
4413 	F(DstMem | ModRM, em_shl),
4414 	F(DstMem | ModRM, em_sar),
4415 };
4416 
4417 static const struct opcode group3[] = {
4418 	F(DstMem | SrcImm | NoWrite, em_test),
4419 	F(DstMem | SrcImm | NoWrite, em_test),
4420 	F(DstMem | SrcNone | Lock, em_not),
4421 	F(DstMem | SrcNone | Lock, em_neg),
4422 	F(DstXacc | Src2Mem, em_mul_ex),
4423 	F(DstXacc | Src2Mem, em_imul_ex),
4424 	F(DstXacc | Src2Mem, em_div_ex),
4425 	F(DstXacc | Src2Mem, em_idiv_ex),
4426 };
4427 
4428 static const struct opcode group4[] = {
4429 	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4430 	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4431 	N, N, N, N, N, N,
4432 };
4433 
4434 static const struct opcode group5[] = {
4435 	F(DstMem | SrcNone | Lock,		em_inc),
4436 	F(DstMem | SrcNone | Lock,		em_dec),
4437 	I(SrcMem | NearBranch,			em_call_near_abs),
4438 	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4439 	I(SrcMem | NearBranch,			em_jmp_abs),
4440 	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4441 	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4442 };
4443 
4444 static const struct opcode group6[] = {
4445 	II(Prot | DstMem,	   em_sldt, sldt),
4446 	II(Prot | DstMem,	   em_str, str),
4447 	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4448 	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4449 	N, N, N, N,
4450 };
4451 
4452 static const struct group_dual group7 = { {
4453 	II(Mov | DstMem,			em_sgdt, sgdt),
4454 	II(Mov | DstMem,			em_sidt, sidt),
4455 	II(SrcMem | Priv,			em_lgdt, lgdt),
4456 	II(SrcMem | Priv,			em_lidt, lidt),
4457 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4458 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4459 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4460 }, {
4461 	EXT(0, group7_rm0),
4462 	EXT(0, group7_rm1),
4463 	N, EXT(0, group7_rm3),
4464 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4465 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4466 	EXT(0, group7_rm7),
4467 } };
4468 
4469 static const struct opcode group8[] = {
4470 	N, N, N, N,
4471 	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4472 	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4473 	F(DstMem | SrcImmByte | Lock,			em_btr),
4474 	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4475 };
4476 
4477 /*
4478  * The "memory" destination is actually always a register, since we come
4479  * from the register case of group9.
4480  */
4481 static const struct gprefix pfx_0f_c7_7 = {
4482 	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4483 };
4484 
4485 
4486 static const struct group_dual group9 = { {
4487 	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4488 }, {
4489 	N, N, N, N, N, N, N,
4490 	GP(0, &pfx_0f_c7_7),
4491 } };
4492 
4493 static const struct opcode group11[] = {
4494 	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4495 	X7(D(Undefined)),
4496 };
4497 
4498 static const struct gprefix pfx_0f_ae_7 = {
4499 	I(SrcMem | ByteOp, em_clflush), N, N, N,
4500 };
4501 
4502 static const struct group_dual group15 = { {
4503 	I(ModRM | Aligned16, em_fxsave),
4504 	I(ModRM | Aligned16, em_fxrstor),
4505 	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4506 }, {
4507 	N, N, N, N, N, N, N, N,
4508 } };
4509 
4510 static const struct gprefix pfx_0f_6f_0f_7f = {
4511 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4512 };
4513 
4514 static const struct instr_dual instr_dual_0f_2b = {
4515 	I(0, em_mov), N
4516 };
4517 
4518 static const struct gprefix pfx_0f_2b = {
4519 	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4520 };
4521 
4522 static const struct gprefix pfx_0f_10_0f_11 = {
4523 	I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4524 };
4525 
4526 static const struct gprefix pfx_0f_28_0f_29 = {
4527 	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4528 };
4529 
4530 static const struct gprefix pfx_0f_e7 = {
4531 	N, I(Sse, em_mov), N, N,
4532 };
4533 
4534 static const struct escape escape_d9 = { {
4535 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4536 }, {
4537 	/* 0xC0 - 0xC7 */
4538 	N, N, N, N, N, N, N, N,
4539 	/* 0xC8 - 0xCF */
4540 	N, N, N, N, N, N, N, N,
4541 	/* 0xD0 - 0xC7 */
4542 	N, N, N, N, N, N, N, N,
4543 	/* 0xD8 - 0xDF */
4544 	N, N, N, N, N, N, N, N,
4545 	/* 0xE0 - 0xE7 */
4546 	N, N, N, N, N, N, N, N,
4547 	/* 0xE8 - 0xEF */
4548 	N, N, N, N, N, N, N, N,
4549 	/* 0xF0 - 0xF7 */
4550 	N, N, N, N, N, N, N, N,
4551 	/* 0xF8 - 0xFF */
4552 	N, N, N, N, N, N, N, N,
4553 } };
4554 
4555 static const struct escape escape_db = { {
4556 	N, N, N, N, N, N, N, N,
4557 }, {
4558 	/* 0xC0 - 0xC7 */
4559 	N, N, N, N, N, N, N, N,
4560 	/* 0xC8 - 0xCF */
4561 	N, N, N, N, N, N, N, N,
4562 	/* 0xD0 - 0xC7 */
4563 	N, N, N, N, N, N, N, N,
4564 	/* 0xD8 - 0xDF */
4565 	N, N, N, N, N, N, N, N,
4566 	/* 0xE0 - 0xE7 */
4567 	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4568 	/* 0xE8 - 0xEF */
4569 	N, N, N, N, N, N, N, N,
4570 	/* 0xF0 - 0xF7 */
4571 	N, N, N, N, N, N, N, N,
4572 	/* 0xF8 - 0xFF */
4573 	N, N, N, N, N, N, N, N,
4574 } };
4575 
4576 static const struct escape escape_dd = { {
4577 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4578 }, {
4579 	/* 0xC0 - 0xC7 */
4580 	N, N, N, N, N, N, N, N,
4581 	/* 0xC8 - 0xCF */
4582 	N, N, N, N, N, N, N, N,
4583 	/* 0xD0 - 0xC7 */
4584 	N, N, N, N, N, N, N, N,
4585 	/* 0xD8 - 0xDF */
4586 	N, N, N, N, N, N, N, N,
4587 	/* 0xE0 - 0xE7 */
4588 	N, N, N, N, N, N, N, N,
4589 	/* 0xE8 - 0xEF */
4590 	N, N, N, N, N, N, N, N,
4591 	/* 0xF0 - 0xF7 */
4592 	N, N, N, N, N, N, N, N,
4593 	/* 0xF8 - 0xFF */
4594 	N, N, N, N, N, N, N, N,
4595 } };
4596 
4597 static const struct instr_dual instr_dual_0f_c3 = {
4598 	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4599 };
4600 
4601 static const struct mode_dual mode_dual_63 = {
4602 	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4603 };
4604 
4605 static const struct opcode opcode_table[256] = {
4606 	/* 0x00 - 0x07 */
4607 	F6ALU(Lock, em_add),
4608 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4609 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4610 	/* 0x08 - 0x0F */
4611 	F6ALU(Lock | PageTable, em_or),
4612 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4613 	N,
4614 	/* 0x10 - 0x17 */
4615 	F6ALU(Lock, em_adc),
4616 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4617 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4618 	/* 0x18 - 0x1F */
4619 	F6ALU(Lock, em_sbb),
4620 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4621 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4622 	/* 0x20 - 0x27 */
4623 	F6ALU(Lock | PageTable, em_and), N, N,
4624 	/* 0x28 - 0x2F */
4625 	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4626 	/* 0x30 - 0x37 */
4627 	F6ALU(Lock, em_xor), N, N,
4628 	/* 0x38 - 0x3F */
4629 	F6ALU(NoWrite, em_cmp), N, N,
4630 	/* 0x40 - 0x4F */
4631 	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4632 	/* 0x50 - 0x57 */
4633 	X8(I(SrcReg | Stack, em_push)),
4634 	/* 0x58 - 0x5F */
4635 	X8(I(DstReg | Stack, em_pop)),
4636 	/* 0x60 - 0x67 */
4637 	I(ImplicitOps | Stack | No64, em_pusha),
4638 	I(ImplicitOps | Stack | No64, em_popa),
4639 	N, MD(ModRM, &mode_dual_63),
4640 	N, N, N, N,
4641 	/* 0x68 - 0x6F */
4642 	I(SrcImm | Mov | Stack, em_push),
4643 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4644 	I(SrcImmByte | Mov | Stack, em_push),
4645 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4646 	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4647 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4648 	/* 0x70 - 0x7F */
4649 	X16(D(SrcImmByte | NearBranch)),
4650 	/* 0x80 - 0x87 */
4651 	G(ByteOp | DstMem | SrcImm, group1),
4652 	G(DstMem | SrcImm, group1),
4653 	G(ByteOp | DstMem | SrcImm | No64, group1),
4654 	G(DstMem | SrcImmByte, group1),
4655 	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4656 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4657 	/* 0x88 - 0x8F */
4658 	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4659 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4660 	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4661 	D(ModRM | SrcMem | NoAccess | DstReg),
4662 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4663 	G(0, group1A),
4664 	/* 0x90 - 0x97 */
4665 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4666 	/* 0x98 - 0x9F */
4667 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4668 	I(SrcImmFAddr | No64, em_call_far), N,
4669 	II(ImplicitOps | Stack, em_pushf, pushf),
4670 	II(ImplicitOps | Stack, em_popf, popf),
4671 	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4672 	/* 0xA0 - 0xA7 */
4673 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4674 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4675 	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4676 	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4677 	/* 0xA8 - 0xAF */
4678 	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4679 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4680 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4681 	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4682 	/* 0xB0 - 0xB7 */
4683 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4684 	/* 0xB8 - 0xBF */
4685 	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4686 	/* 0xC0 - 0xC7 */
4687 	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4688 	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4689 	I(ImplicitOps | NearBranch, em_ret),
4690 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4691 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4692 	G(ByteOp, group11), G(0, group11),
4693 	/* 0xC8 - 0xCF */
4694 	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4695 	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4696 	I(ImplicitOps, em_ret_far),
4697 	D(ImplicitOps), DI(SrcImmByte, intn),
4698 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4699 	/* 0xD0 - 0xD7 */
4700 	G(Src2One | ByteOp, group2), G(Src2One, group2),
4701 	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4702 	I(DstAcc | SrcImmUByte | No64, em_aam),
4703 	I(DstAcc | SrcImmUByte | No64, em_aad),
4704 	F(DstAcc | ByteOp | No64, em_salc),
4705 	I(DstAcc | SrcXLat | ByteOp, em_mov),
4706 	/* 0xD8 - 0xDF */
4707 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4708 	/* 0xE0 - 0xE7 */
4709 	X3(I(SrcImmByte | NearBranch, em_loop)),
4710 	I(SrcImmByte | NearBranch, em_jcxz),
4711 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4712 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4713 	/* 0xE8 - 0xEF */
4714 	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4715 	I(SrcImmFAddr | No64, em_jmp_far),
4716 	D(SrcImmByte | ImplicitOps | NearBranch),
4717 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4718 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4719 	/* 0xF0 - 0xF7 */
4720 	N, DI(ImplicitOps, icebp), N, N,
4721 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4722 	G(ByteOp, group3), G(0, group3),
4723 	/* 0xF8 - 0xFF */
4724 	D(ImplicitOps), D(ImplicitOps),
4725 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4726 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4727 };
4728 
4729 static const struct opcode twobyte_table[256] = {
4730 	/* 0x00 - 0x0F */
4731 	G(0, group6), GD(0, &group7), N, N,
4732 	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4733 	II(ImplicitOps | Priv, em_clts, clts), N,
4734 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4735 	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4736 	/* 0x10 - 0x1F */
4737 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4738 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4739 	N, N, N, N, N, N,
4740 	D(ImplicitOps | ModRM | SrcMem | NoAccess),
4741 	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4742 	/* 0x20 - 0x2F */
4743 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4744 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4745 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4746 						check_cr_write),
4747 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4748 						check_dr_write),
4749 	N, N, N, N,
4750 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4751 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4752 	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4753 	N, N, N, N,
4754 	/* 0x30 - 0x3F */
4755 	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4756 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4757 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4758 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4759 	I(ImplicitOps | EmulateOnUD, em_sysenter),
4760 	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4761 	N, N,
4762 	N, N, N, N, N, N, N, N,
4763 	/* 0x40 - 0x4F */
4764 	X16(D(DstReg | SrcMem | ModRM)),
4765 	/* 0x50 - 0x5F */
4766 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4767 	/* 0x60 - 0x6F */
4768 	N, N, N, N,
4769 	N, N, N, N,
4770 	N, N, N, N,
4771 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4772 	/* 0x70 - 0x7F */
4773 	N, N, N, N,
4774 	N, N, N, N,
4775 	N, N, N, N,
4776 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4777 	/* 0x80 - 0x8F */
4778 	X16(D(SrcImm | NearBranch)),
4779 	/* 0x90 - 0x9F */
4780 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4781 	/* 0xA0 - 0xA7 */
4782 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4783 	II(ImplicitOps, em_cpuid, cpuid),
4784 	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4785 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4786 	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4787 	/* 0xA8 - 0xAF */
4788 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4789 	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4790 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4791 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4792 	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4793 	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4794 	/* 0xB0 - 0xB7 */
4795 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4796 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4797 	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4798 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4799 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4800 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4801 	/* 0xB8 - 0xBF */
4802 	N, N,
4803 	G(BitOp, group8),
4804 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4805 	I(DstReg | SrcMem | ModRM, em_bsf_c),
4806 	I(DstReg | SrcMem | ModRM, em_bsr_c),
4807 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4808 	/* 0xC0 - 0xC7 */
4809 	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4810 	N, ID(0, &instr_dual_0f_c3),
4811 	N, N, N, GD(0, &group9),
4812 	/* 0xC8 - 0xCF */
4813 	X8(I(DstReg, em_bswap)),
4814 	/* 0xD0 - 0xDF */
4815 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4816 	/* 0xE0 - 0xEF */
4817 	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4818 	N, N, N, N, N, N, N, N,
4819 	/* 0xF0 - 0xFF */
4820 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4821 };
4822 
4823 static const struct instr_dual instr_dual_0f_38_f0 = {
4824 	I(DstReg | SrcMem | Mov, em_movbe), N
4825 };
4826 
4827 static const struct instr_dual instr_dual_0f_38_f1 = {
4828 	I(DstMem | SrcReg | Mov, em_movbe), N
4829 };
4830 
4831 static const struct gprefix three_byte_0f_38_f0 = {
4832 	ID(0, &instr_dual_0f_38_f0), N, N, N
4833 };
4834 
4835 static const struct gprefix three_byte_0f_38_f1 = {
4836 	ID(0, &instr_dual_0f_38_f1), N, N, N
4837 };
4838 
4839 /*
4840  * Insns below are selected by the prefix which indexed by the third opcode
4841  * byte.
4842  */
4843 static const struct opcode opcode_map_0f_38[256] = {
4844 	/* 0x00 - 0x7f */
4845 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4846 	/* 0x80 - 0xef */
4847 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4848 	/* 0xf0 - 0xf1 */
4849 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4850 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4851 	/* 0xf2 - 0xff */
4852 	N, N, X4(N), X8(N)
4853 };
4854 
4855 #undef D
4856 #undef N
4857 #undef G
4858 #undef GD
4859 #undef I
4860 #undef GP
4861 #undef EXT
4862 #undef MD
4863 #undef ID
4864 
4865 #undef D2bv
4866 #undef D2bvIP
4867 #undef I2bv
4868 #undef I2bvIP
4869 #undef I6ALU
4870 
imm_size(struct x86_emulate_ctxt * ctxt)4871 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4872 {
4873 	unsigned size;
4874 
4875 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4876 	if (size == 8)
4877 		size = 4;
4878 	return size;
4879 }
4880 
decode_imm(struct x86_emulate_ctxt * ctxt,struct operand * op,unsigned size,bool sign_extension)4881 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4882 		      unsigned size, bool sign_extension)
4883 {
4884 	int rc = X86EMUL_CONTINUE;
4885 
4886 	op->type = OP_IMM;
4887 	op->bytes = size;
4888 	op->addr.mem.ea = ctxt->_eip;
4889 	/* NB. Immediates are sign-extended as necessary. */
4890 	switch (op->bytes) {
4891 	case 1:
4892 		op->val = insn_fetch(s8, ctxt);
4893 		break;
4894 	case 2:
4895 		op->val = insn_fetch(s16, ctxt);
4896 		break;
4897 	case 4:
4898 		op->val = insn_fetch(s32, ctxt);
4899 		break;
4900 	case 8:
4901 		op->val = insn_fetch(s64, ctxt);
4902 		break;
4903 	}
4904 	if (!sign_extension) {
4905 		switch (op->bytes) {
4906 		case 1:
4907 			op->val &= 0xff;
4908 			break;
4909 		case 2:
4910 			op->val &= 0xffff;
4911 			break;
4912 		case 4:
4913 			op->val &= 0xffffffff;
4914 			break;
4915 		}
4916 	}
4917 done:
4918 	return rc;
4919 }
4920 
decode_operand(struct x86_emulate_ctxt * ctxt,struct operand * op,unsigned d)4921 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4922 			  unsigned d)
4923 {
4924 	int rc = X86EMUL_CONTINUE;
4925 
4926 	switch (d) {
4927 	case OpReg:
4928 		decode_register_operand(ctxt, op);
4929 		break;
4930 	case OpImmUByte:
4931 		rc = decode_imm(ctxt, op, 1, false);
4932 		break;
4933 	case OpMem:
4934 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4935 	mem_common:
4936 		*op = ctxt->memop;
4937 		ctxt->memopp = op;
4938 		if (ctxt->d & BitOp)
4939 			fetch_bit_operand(ctxt);
4940 		op->orig_val = op->val;
4941 		break;
4942 	case OpMem64:
4943 		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4944 		goto mem_common;
4945 	case OpAcc:
4946 		op->type = OP_REG;
4947 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4948 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4949 		fetch_register_operand(op);
4950 		op->orig_val = op->val;
4951 		break;
4952 	case OpAccLo:
4953 		op->type = OP_REG;
4954 		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4955 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4956 		fetch_register_operand(op);
4957 		op->orig_val = op->val;
4958 		break;
4959 	case OpAccHi:
4960 		if (ctxt->d & ByteOp) {
4961 			op->type = OP_NONE;
4962 			break;
4963 		}
4964 		op->type = OP_REG;
4965 		op->bytes = ctxt->op_bytes;
4966 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4967 		fetch_register_operand(op);
4968 		op->orig_val = op->val;
4969 		break;
4970 	case OpDI:
4971 		op->type = OP_MEM;
4972 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4973 		op->addr.mem.ea =
4974 			register_address(ctxt, VCPU_REGS_RDI);
4975 		op->addr.mem.seg = VCPU_SREG_ES;
4976 		op->val = 0;
4977 		op->count = 1;
4978 		break;
4979 	case OpDX:
4980 		op->type = OP_REG;
4981 		op->bytes = 2;
4982 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4983 		fetch_register_operand(op);
4984 		break;
4985 	case OpCL:
4986 		op->type = OP_IMM;
4987 		op->bytes = 1;
4988 		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4989 		break;
4990 	case OpImmByte:
4991 		rc = decode_imm(ctxt, op, 1, true);
4992 		break;
4993 	case OpOne:
4994 		op->type = OP_IMM;
4995 		op->bytes = 1;
4996 		op->val = 1;
4997 		break;
4998 	case OpImm:
4999 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5000 		break;
5001 	case OpImm64:
5002 		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5003 		break;
5004 	case OpMem8:
5005 		ctxt->memop.bytes = 1;
5006 		if (ctxt->memop.type == OP_REG) {
5007 			ctxt->memop.addr.reg = decode_register(ctxt,
5008 					ctxt->modrm_rm, true);
5009 			fetch_register_operand(&ctxt->memop);
5010 		}
5011 		goto mem_common;
5012 	case OpMem16:
5013 		ctxt->memop.bytes = 2;
5014 		goto mem_common;
5015 	case OpMem32:
5016 		ctxt->memop.bytes = 4;
5017 		goto mem_common;
5018 	case OpImmU16:
5019 		rc = decode_imm(ctxt, op, 2, false);
5020 		break;
5021 	case OpImmU:
5022 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5023 		break;
5024 	case OpSI:
5025 		op->type = OP_MEM;
5026 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5027 		op->addr.mem.ea =
5028 			register_address(ctxt, VCPU_REGS_RSI);
5029 		op->addr.mem.seg = ctxt->seg_override;
5030 		op->val = 0;
5031 		op->count = 1;
5032 		break;
5033 	case OpXLat:
5034 		op->type = OP_MEM;
5035 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5036 		op->addr.mem.ea =
5037 			address_mask(ctxt,
5038 				reg_read(ctxt, VCPU_REGS_RBX) +
5039 				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5040 		op->addr.mem.seg = ctxt->seg_override;
5041 		op->val = 0;
5042 		break;
5043 	case OpImmFAddr:
5044 		op->type = OP_IMM;
5045 		op->addr.mem.ea = ctxt->_eip;
5046 		op->bytes = ctxt->op_bytes + 2;
5047 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
5048 		break;
5049 	case OpMemFAddr:
5050 		ctxt->memop.bytes = ctxt->op_bytes + 2;
5051 		goto mem_common;
5052 	case OpES:
5053 		op->type = OP_IMM;
5054 		op->val = VCPU_SREG_ES;
5055 		break;
5056 	case OpCS:
5057 		op->type = OP_IMM;
5058 		op->val = VCPU_SREG_CS;
5059 		break;
5060 	case OpSS:
5061 		op->type = OP_IMM;
5062 		op->val = VCPU_SREG_SS;
5063 		break;
5064 	case OpDS:
5065 		op->type = OP_IMM;
5066 		op->val = VCPU_SREG_DS;
5067 		break;
5068 	case OpFS:
5069 		op->type = OP_IMM;
5070 		op->val = VCPU_SREG_FS;
5071 		break;
5072 	case OpGS:
5073 		op->type = OP_IMM;
5074 		op->val = VCPU_SREG_GS;
5075 		break;
5076 	case OpImplicit:
5077 		/* Special instructions do their own operand decoding. */
5078 	default:
5079 		op->type = OP_NONE; /* Disable writeback. */
5080 		break;
5081 	}
5082 
5083 done:
5084 	return rc;
5085 }
5086 
x86_decode_insn(struct x86_emulate_ctxt * ctxt,void * insn,int insn_len)5087 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5088 {
5089 	int rc = X86EMUL_CONTINUE;
5090 	int mode = ctxt->mode;
5091 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5092 	bool op_prefix = false;
5093 	bool has_seg_override = false;
5094 	struct opcode opcode;
5095 	u16 dummy;
5096 	struct desc_struct desc;
5097 
5098 	ctxt->memop.type = OP_NONE;
5099 	ctxt->memopp = NULL;
5100 	ctxt->_eip = ctxt->eip;
5101 	ctxt->fetch.ptr = ctxt->fetch.data;
5102 	ctxt->fetch.end = ctxt->fetch.data + insn_len;
5103 	ctxt->opcode_len = 1;
5104 	if (insn_len > 0)
5105 		memcpy(ctxt->fetch.data, insn, insn_len);
5106 	else {
5107 		rc = __do_insn_fetch_bytes(ctxt, 1);
5108 		if (rc != X86EMUL_CONTINUE)
5109 			return rc;
5110 	}
5111 
5112 	switch (mode) {
5113 	case X86EMUL_MODE_REAL:
5114 	case X86EMUL_MODE_VM86:
5115 		def_op_bytes = def_ad_bytes = 2;
5116 		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5117 		if (desc.d)
5118 			def_op_bytes = def_ad_bytes = 4;
5119 		break;
5120 	case X86EMUL_MODE_PROT16:
5121 		def_op_bytes = def_ad_bytes = 2;
5122 		break;
5123 	case X86EMUL_MODE_PROT32:
5124 		def_op_bytes = def_ad_bytes = 4;
5125 		break;
5126 #ifdef CONFIG_X86_64
5127 	case X86EMUL_MODE_PROT64:
5128 		def_op_bytes = 4;
5129 		def_ad_bytes = 8;
5130 		break;
5131 #endif
5132 	default:
5133 		return EMULATION_FAILED;
5134 	}
5135 
5136 	ctxt->op_bytes = def_op_bytes;
5137 	ctxt->ad_bytes = def_ad_bytes;
5138 
5139 	/* Legacy prefixes. */
5140 	for (;;) {
5141 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
5142 		case 0x66:	/* operand-size override */
5143 			op_prefix = true;
5144 			/* switch between 2/4 bytes */
5145 			ctxt->op_bytes = def_op_bytes ^ 6;
5146 			break;
5147 		case 0x67:	/* address-size override */
5148 			if (mode == X86EMUL_MODE_PROT64)
5149 				/* switch between 4/8 bytes */
5150 				ctxt->ad_bytes = def_ad_bytes ^ 12;
5151 			else
5152 				/* switch between 2/4 bytes */
5153 				ctxt->ad_bytes = def_ad_bytes ^ 6;
5154 			break;
5155 		case 0x26:	/* ES override */
5156 		case 0x2e:	/* CS override */
5157 		case 0x36:	/* SS override */
5158 		case 0x3e:	/* DS override */
5159 			has_seg_override = true;
5160 			ctxt->seg_override = (ctxt->b >> 3) & 3;
5161 			break;
5162 		case 0x64:	/* FS override */
5163 		case 0x65:	/* GS override */
5164 			has_seg_override = true;
5165 			ctxt->seg_override = ctxt->b & 7;
5166 			break;
5167 		case 0x40 ... 0x4f: /* REX */
5168 			if (mode != X86EMUL_MODE_PROT64)
5169 				goto done_prefixes;
5170 			ctxt->rex_prefix = ctxt->b;
5171 			continue;
5172 		case 0xf0:	/* LOCK */
5173 			ctxt->lock_prefix = 1;
5174 			break;
5175 		case 0xf2:	/* REPNE/REPNZ */
5176 		case 0xf3:	/* REP/REPE/REPZ */
5177 			ctxt->rep_prefix = ctxt->b;
5178 			break;
5179 		default:
5180 			goto done_prefixes;
5181 		}
5182 
5183 		/* Any legacy prefix after a REX prefix nullifies its effect. */
5184 
5185 		ctxt->rex_prefix = 0;
5186 	}
5187 
5188 done_prefixes:
5189 
5190 	/* REX prefix. */
5191 	if (ctxt->rex_prefix & 8)
5192 		ctxt->op_bytes = 8;	/* REX.W */
5193 
5194 	/* Opcode byte(s). */
5195 	opcode = opcode_table[ctxt->b];
5196 	/* Two-byte opcode? */
5197 	if (ctxt->b == 0x0f) {
5198 		ctxt->opcode_len = 2;
5199 		ctxt->b = insn_fetch(u8, ctxt);
5200 		opcode = twobyte_table[ctxt->b];
5201 
5202 		/* 0F_38 opcode map */
5203 		if (ctxt->b == 0x38) {
5204 			ctxt->opcode_len = 3;
5205 			ctxt->b = insn_fetch(u8, ctxt);
5206 			opcode = opcode_map_0f_38[ctxt->b];
5207 		}
5208 	}
5209 	ctxt->d = opcode.flags;
5210 
5211 	if (ctxt->d & ModRM)
5212 		ctxt->modrm = insn_fetch(u8, ctxt);
5213 
5214 	/* vex-prefix instructions are not implemented */
5215 	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5216 	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5217 		ctxt->d = NotImpl;
5218 	}
5219 
5220 	while (ctxt->d & GroupMask) {
5221 		switch (ctxt->d & GroupMask) {
5222 		case Group:
5223 			goffset = (ctxt->modrm >> 3) & 7;
5224 			opcode = opcode.u.group[goffset];
5225 			break;
5226 		case GroupDual:
5227 			goffset = (ctxt->modrm >> 3) & 7;
5228 			if ((ctxt->modrm >> 6) == 3)
5229 				opcode = opcode.u.gdual->mod3[goffset];
5230 			else
5231 				opcode = opcode.u.gdual->mod012[goffset];
5232 			break;
5233 		case RMExt:
5234 			goffset = ctxt->modrm & 7;
5235 			opcode = opcode.u.group[goffset];
5236 			break;
5237 		case Prefix:
5238 			if (ctxt->rep_prefix && op_prefix)
5239 				return EMULATION_FAILED;
5240 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5241 			switch (simd_prefix) {
5242 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5243 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5244 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5245 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5246 			}
5247 			break;
5248 		case Escape:
5249 			if (ctxt->modrm > 0xbf)
5250 				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5251 			else
5252 				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5253 			break;
5254 		case InstrDual:
5255 			if ((ctxt->modrm >> 6) == 3)
5256 				opcode = opcode.u.idual->mod3;
5257 			else
5258 				opcode = opcode.u.idual->mod012;
5259 			break;
5260 		case ModeDual:
5261 			if (ctxt->mode == X86EMUL_MODE_PROT64)
5262 				opcode = opcode.u.mdual->mode64;
5263 			else
5264 				opcode = opcode.u.mdual->mode32;
5265 			break;
5266 		default:
5267 			return EMULATION_FAILED;
5268 		}
5269 
5270 		ctxt->d &= ~(u64)GroupMask;
5271 		ctxt->d |= opcode.flags;
5272 	}
5273 
5274 	/* Unrecognised? */
5275 	if (ctxt->d == 0)
5276 		return EMULATION_FAILED;
5277 
5278 	ctxt->execute = opcode.u.execute;
5279 
5280 	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5281 		return EMULATION_FAILED;
5282 
5283 	if (unlikely(ctxt->d &
5284 	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5285 	     No16))) {
5286 		/*
5287 		 * These are copied unconditionally here, and checked unconditionally
5288 		 * in x86_emulate_insn.
5289 		 */
5290 		ctxt->check_perm = opcode.check_perm;
5291 		ctxt->intercept = opcode.intercept;
5292 
5293 		if (ctxt->d & NotImpl)
5294 			return EMULATION_FAILED;
5295 
5296 		if (mode == X86EMUL_MODE_PROT64) {
5297 			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5298 				ctxt->op_bytes = 8;
5299 			else if (ctxt->d & NearBranch)
5300 				ctxt->op_bytes = 8;
5301 		}
5302 
5303 		if (ctxt->d & Op3264) {
5304 			if (mode == X86EMUL_MODE_PROT64)
5305 				ctxt->op_bytes = 8;
5306 			else
5307 				ctxt->op_bytes = 4;
5308 		}
5309 
5310 		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5311 			ctxt->op_bytes = 4;
5312 
5313 		if (ctxt->d & Sse)
5314 			ctxt->op_bytes = 16;
5315 		else if (ctxt->d & Mmx)
5316 			ctxt->op_bytes = 8;
5317 	}
5318 
5319 	/* ModRM and SIB bytes. */
5320 	if (ctxt->d & ModRM) {
5321 		rc = decode_modrm(ctxt, &ctxt->memop);
5322 		if (!has_seg_override) {
5323 			has_seg_override = true;
5324 			ctxt->seg_override = ctxt->modrm_seg;
5325 		}
5326 	} else if (ctxt->d & MemAbs)
5327 		rc = decode_abs(ctxt, &ctxt->memop);
5328 	if (rc != X86EMUL_CONTINUE)
5329 		goto done;
5330 
5331 	if (!has_seg_override)
5332 		ctxt->seg_override = VCPU_SREG_DS;
5333 
5334 	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5335 
5336 	/*
5337 	 * Decode and fetch the source operand: register, memory
5338 	 * or immediate.
5339 	 */
5340 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5341 	if (rc != X86EMUL_CONTINUE)
5342 		goto done;
5343 
5344 	/*
5345 	 * Decode and fetch the second source operand: register, memory
5346 	 * or immediate.
5347 	 */
5348 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5349 	if (rc != X86EMUL_CONTINUE)
5350 		goto done;
5351 
5352 	/* Decode and fetch the destination operand: register or memory. */
5353 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5354 
5355 	if (ctxt->rip_relative && likely(ctxt->memopp))
5356 		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5357 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5358 
5359 done:
5360 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5361 }
5362 
x86_page_table_writing_insn(struct x86_emulate_ctxt * ctxt)5363 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5364 {
5365 	return ctxt->d & PageTable;
5366 }
5367 
string_insn_completed(struct x86_emulate_ctxt * ctxt)5368 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5369 {
5370 	/* The second termination condition only applies for REPE
5371 	 * and REPNE. Test if the repeat string operation prefix is
5372 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5373 	 * corresponding termination condition according to:
5374 	 * 	- if REPE/REPZ and ZF = 0 then done
5375 	 * 	- if REPNE/REPNZ and ZF = 1 then done
5376 	 */
5377 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5378 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5379 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5380 		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5381 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5382 		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5383 		return true;
5384 
5385 	return false;
5386 }
5387 
flush_pending_x87_faults(struct x86_emulate_ctxt * ctxt)5388 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5389 {
5390 	int rc;
5391 
5392 	rc = asm_safe("fwait");
5393 
5394 	if (unlikely(rc != X86EMUL_CONTINUE))
5395 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5396 
5397 	return X86EMUL_CONTINUE;
5398 }
5399 
fetch_possible_mmx_operand(struct x86_emulate_ctxt * ctxt,struct operand * op)5400 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5401 				       struct operand *op)
5402 {
5403 	if (op->type == OP_MM)
5404 		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5405 }
5406 
fastop(struct x86_emulate_ctxt * ctxt,void (* fop)(struct fastop *))5407 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5408 {
5409 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5410 
5411 	if (!(ctxt->d & ByteOp))
5412 		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5413 
5414 	asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5415 	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5416 	      [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5417 	    : "c"(ctxt->src2.val));
5418 
5419 	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5420 	if (!fop) /* exception is returned in fop variable */
5421 		return emulate_de(ctxt);
5422 	return X86EMUL_CONTINUE;
5423 }
5424 
init_decode_cache(struct x86_emulate_ctxt * ctxt)5425 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5426 {
5427 	memset(&ctxt->rip_relative, 0,
5428 	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5429 
5430 	ctxt->io_read.pos = 0;
5431 	ctxt->io_read.end = 0;
5432 	ctxt->mem_read.end = 0;
5433 }
5434 
x86_emulate_insn(struct x86_emulate_ctxt * ctxt)5435 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5436 {
5437 	const struct x86_emulate_ops *ops = ctxt->ops;
5438 	int rc = X86EMUL_CONTINUE;
5439 	int saved_dst_type = ctxt->dst.type;
5440 	unsigned emul_flags;
5441 
5442 	ctxt->mem_read.pos = 0;
5443 
5444 	/* LOCK prefix is allowed only with some instructions */
5445 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5446 		rc = emulate_ud(ctxt);
5447 		goto done;
5448 	}
5449 
5450 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5451 		rc = emulate_ud(ctxt);
5452 		goto done;
5453 	}
5454 
5455 	emul_flags = ctxt->ops->get_hflags(ctxt);
5456 	if (unlikely(ctxt->d &
5457 		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5458 		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5459 				(ctxt->d & Undefined)) {
5460 			rc = emulate_ud(ctxt);
5461 			goto done;
5462 		}
5463 
5464 		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5465 		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5466 			rc = emulate_ud(ctxt);
5467 			goto done;
5468 		}
5469 
5470 		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5471 			rc = emulate_nm(ctxt);
5472 			goto done;
5473 		}
5474 
5475 		if (ctxt->d & Mmx) {
5476 			rc = flush_pending_x87_faults(ctxt);
5477 			if (rc != X86EMUL_CONTINUE)
5478 				goto done;
5479 			/*
5480 			 * Now that we know the fpu is exception safe, we can fetch
5481 			 * operands from it.
5482 			 */
5483 			fetch_possible_mmx_operand(ctxt, &ctxt->src);
5484 			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5485 			if (!(ctxt->d & Mov))
5486 				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5487 		}
5488 
5489 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5490 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5491 						      X86_ICPT_PRE_EXCEPT);
5492 			if (rc != X86EMUL_CONTINUE)
5493 				goto done;
5494 		}
5495 
5496 		/* Instruction can only be executed in protected mode */
5497 		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5498 			rc = emulate_ud(ctxt);
5499 			goto done;
5500 		}
5501 
5502 		/* Privileged instruction can be executed only in CPL=0 */
5503 		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5504 			if (ctxt->d & PrivUD)
5505 				rc = emulate_ud(ctxt);
5506 			else
5507 				rc = emulate_gp(ctxt, 0);
5508 			goto done;
5509 		}
5510 
5511 		/* Do instruction specific permission checks */
5512 		if (ctxt->d & CheckPerm) {
5513 			rc = ctxt->check_perm(ctxt);
5514 			if (rc != X86EMUL_CONTINUE)
5515 				goto done;
5516 		}
5517 
5518 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5519 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5520 						      X86_ICPT_POST_EXCEPT);
5521 			if (rc != X86EMUL_CONTINUE)
5522 				goto done;
5523 		}
5524 
5525 		if (ctxt->rep_prefix && (ctxt->d & String)) {
5526 			/* All REP prefixes have the same first termination condition */
5527 			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5528 				string_registers_quirk(ctxt);
5529 				ctxt->eip = ctxt->_eip;
5530 				ctxt->eflags &= ~X86_EFLAGS_RF;
5531 				goto done;
5532 			}
5533 		}
5534 	}
5535 
5536 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5537 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5538 				    ctxt->src.valptr, ctxt->src.bytes);
5539 		if (rc != X86EMUL_CONTINUE)
5540 			goto done;
5541 		ctxt->src.orig_val64 = ctxt->src.val64;
5542 	}
5543 
5544 	if (ctxt->src2.type == OP_MEM) {
5545 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5546 				    &ctxt->src2.val, ctxt->src2.bytes);
5547 		if (rc != X86EMUL_CONTINUE)
5548 			goto done;
5549 	}
5550 
5551 	if ((ctxt->d & DstMask) == ImplicitOps)
5552 		goto special_insn;
5553 
5554 
5555 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5556 		/* optimisation - avoid slow emulated read if Mov */
5557 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5558 				   &ctxt->dst.val, ctxt->dst.bytes);
5559 		if (rc != X86EMUL_CONTINUE) {
5560 			if (!(ctxt->d & NoWrite) &&
5561 			    rc == X86EMUL_PROPAGATE_FAULT &&
5562 			    ctxt->exception.vector == PF_VECTOR)
5563 				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5564 			goto done;
5565 		}
5566 	}
5567 	/* Copy full 64-bit value for CMPXCHG8B.  */
5568 	ctxt->dst.orig_val64 = ctxt->dst.val64;
5569 
5570 special_insn:
5571 
5572 	if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5573 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5574 					      X86_ICPT_POST_MEMACCESS);
5575 		if (rc != X86EMUL_CONTINUE)
5576 			goto done;
5577 	}
5578 
5579 	if (ctxt->rep_prefix && (ctxt->d & String))
5580 		ctxt->eflags |= X86_EFLAGS_RF;
5581 	else
5582 		ctxt->eflags &= ~X86_EFLAGS_RF;
5583 
5584 	if (ctxt->execute) {
5585 		if (ctxt->d & Fastop) {
5586 			void (*fop)(struct fastop *) = (void *)ctxt->execute;
5587 			rc = fastop(ctxt, fop);
5588 			if (rc != X86EMUL_CONTINUE)
5589 				goto done;
5590 			goto writeback;
5591 		}
5592 		rc = ctxt->execute(ctxt);
5593 		if (rc != X86EMUL_CONTINUE)
5594 			goto done;
5595 		goto writeback;
5596 	}
5597 
5598 	if (ctxt->opcode_len == 2)
5599 		goto twobyte_insn;
5600 	else if (ctxt->opcode_len == 3)
5601 		goto threebyte_insn;
5602 
5603 	switch (ctxt->b) {
5604 	case 0x70 ... 0x7f: /* jcc (short) */
5605 		if (test_cc(ctxt->b, ctxt->eflags))
5606 			rc = jmp_rel(ctxt, ctxt->src.val);
5607 		break;
5608 	case 0x8d: /* lea r16/r32, m */
5609 		ctxt->dst.val = ctxt->src.addr.mem.ea;
5610 		break;
5611 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5612 		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5613 			ctxt->dst.type = OP_NONE;
5614 		else
5615 			rc = em_xchg(ctxt);
5616 		break;
5617 	case 0x98: /* cbw/cwde/cdqe */
5618 		switch (ctxt->op_bytes) {
5619 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5620 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5621 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5622 		}
5623 		break;
5624 	case 0xcc:		/* int3 */
5625 		rc = emulate_int(ctxt, 3);
5626 		break;
5627 	case 0xcd:		/* int n */
5628 		rc = emulate_int(ctxt, ctxt->src.val);
5629 		break;
5630 	case 0xce:		/* into */
5631 		if (ctxt->eflags & X86_EFLAGS_OF)
5632 			rc = emulate_int(ctxt, 4);
5633 		break;
5634 	case 0xe9: /* jmp rel */
5635 	case 0xeb: /* jmp rel short */
5636 		rc = jmp_rel(ctxt, ctxt->src.val);
5637 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5638 		break;
5639 	case 0xf4:              /* hlt */
5640 		ctxt->ops->halt(ctxt);
5641 		break;
5642 	case 0xf5:	/* cmc */
5643 		/* complement carry flag from eflags reg */
5644 		ctxt->eflags ^= X86_EFLAGS_CF;
5645 		break;
5646 	case 0xf8: /* clc */
5647 		ctxt->eflags &= ~X86_EFLAGS_CF;
5648 		break;
5649 	case 0xf9: /* stc */
5650 		ctxt->eflags |= X86_EFLAGS_CF;
5651 		break;
5652 	case 0xfc: /* cld */
5653 		ctxt->eflags &= ~X86_EFLAGS_DF;
5654 		break;
5655 	case 0xfd: /* std */
5656 		ctxt->eflags |= X86_EFLAGS_DF;
5657 		break;
5658 	default:
5659 		goto cannot_emulate;
5660 	}
5661 
5662 	if (rc != X86EMUL_CONTINUE)
5663 		goto done;
5664 
5665 writeback:
5666 	if (ctxt->d & SrcWrite) {
5667 		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5668 		rc = writeback(ctxt, &ctxt->src);
5669 		if (rc != X86EMUL_CONTINUE)
5670 			goto done;
5671 	}
5672 	if (!(ctxt->d & NoWrite)) {
5673 		rc = writeback(ctxt, &ctxt->dst);
5674 		if (rc != X86EMUL_CONTINUE)
5675 			goto done;
5676 	}
5677 
5678 	/*
5679 	 * restore dst type in case the decoding will be reused
5680 	 * (happens for string instruction )
5681 	 */
5682 	ctxt->dst.type = saved_dst_type;
5683 
5684 	if ((ctxt->d & SrcMask) == SrcSI)
5685 		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5686 
5687 	if ((ctxt->d & DstMask) == DstDI)
5688 		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5689 
5690 	if (ctxt->rep_prefix && (ctxt->d & String)) {
5691 		unsigned int count;
5692 		struct read_cache *r = &ctxt->io_read;
5693 		if ((ctxt->d & SrcMask) == SrcSI)
5694 			count = ctxt->src.count;
5695 		else
5696 			count = ctxt->dst.count;
5697 		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5698 
5699 		if (!string_insn_completed(ctxt)) {
5700 			/*
5701 			 * Re-enter guest when pio read ahead buffer is empty
5702 			 * or, if it is not used, after each 1024 iteration.
5703 			 */
5704 			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5705 			    (r->end == 0 || r->end != r->pos)) {
5706 				/*
5707 				 * Reset read cache. Usually happens before
5708 				 * decode, but since instruction is restarted
5709 				 * we have to do it here.
5710 				 */
5711 				ctxt->mem_read.end = 0;
5712 				writeback_registers(ctxt);
5713 				return EMULATION_RESTART;
5714 			}
5715 			goto done; /* skip rip writeback */
5716 		}
5717 		ctxt->eflags &= ~X86_EFLAGS_RF;
5718 	}
5719 
5720 	ctxt->eip = ctxt->_eip;
5721 
5722 done:
5723 	if (rc == X86EMUL_PROPAGATE_FAULT) {
5724 		WARN_ON(ctxt->exception.vector > 0x1f);
5725 		ctxt->have_exception = true;
5726 	}
5727 	if (rc == X86EMUL_INTERCEPTED)
5728 		return EMULATION_INTERCEPTED;
5729 
5730 	if (rc == X86EMUL_CONTINUE)
5731 		writeback_registers(ctxt);
5732 
5733 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5734 
5735 twobyte_insn:
5736 	switch (ctxt->b) {
5737 	case 0x09:		/* wbinvd */
5738 		(ctxt->ops->wbinvd)(ctxt);
5739 		break;
5740 	case 0x08:		/* invd */
5741 	case 0x0d:		/* GrpP (prefetch) */
5742 	case 0x18:		/* Grp16 (prefetch/nop) */
5743 	case 0x1f:		/* nop */
5744 		break;
5745 	case 0x20: /* mov cr, reg */
5746 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5747 		break;
5748 	case 0x21: /* mov from dr to reg */
5749 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5750 		break;
5751 	case 0x40 ... 0x4f:	/* cmov */
5752 		if (test_cc(ctxt->b, ctxt->eflags))
5753 			ctxt->dst.val = ctxt->src.val;
5754 		else if (ctxt->op_bytes != 4)
5755 			ctxt->dst.type = OP_NONE; /* no writeback */
5756 		break;
5757 	case 0x80 ... 0x8f: /* jnz rel, etc*/
5758 		if (test_cc(ctxt->b, ctxt->eflags))
5759 			rc = jmp_rel(ctxt, ctxt->src.val);
5760 		break;
5761 	case 0x90 ... 0x9f:     /* setcc r/m8 */
5762 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5763 		break;
5764 	case 0xb6 ... 0xb7:	/* movzx */
5765 		ctxt->dst.bytes = ctxt->op_bytes;
5766 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5767 						       : (u16) ctxt->src.val;
5768 		break;
5769 	case 0xbe ... 0xbf:	/* movsx */
5770 		ctxt->dst.bytes = ctxt->op_bytes;
5771 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5772 							(s16) ctxt->src.val;
5773 		break;
5774 	default:
5775 		goto cannot_emulate;
5776 	}
5777 
5778 threebyte_insn:
5779 
5780 	if (rc != X86EMUL_CONTINUE)
5781 		goto done;
5782 
5783 	goto writeback;
5784 
5785 cannot_emulate:
5786 	return EMULATION_FAILED;
5787 }
5788 
emulator_invalidate_register_cache(struct x86_emulate_ctxt * ctxt)5789 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5790 {
5791 	invalidate_registers(ctxt);
5792 }
5793 
emulator_writeback_register_cache(struct x86_emulate_ctxt * ctxt)5794 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5795 {
5796 	writeback_registers(ctxt);
5797 }
5798 
emulator_can_use_gpa(struct x86_emulate_ctxt * ctxt)5799 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5800 {
5801 	if (ctxt->rep_prefix && (ctxt->d & String))
5802 		return false;
5803 
5804 	if (ctxt->d & TwoMemOp)
5805 		return false;
5806 
5807 	return true;
5808 }
5809