xref: /linux/arch/x86/kvm/emulate.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3  * emulate.c
4  *
5  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6  *
7  * Copyright (c) 2005 Keir Fraser
8  *
9  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10  * privileged instructions:
11  *
12  * Copyright (C) 2006 Qumranet
13  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14  *
15  *   Avi Kivity <avi@qumranet.com>
16  *   Yaniv Kamay <yaniv@qumranet.com>
17  *
18  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19  */
20 
21 #include <linux/kvm_host.h>
22 #include "kvm_cache_regs.h"
23 #include <asm/kvm_emulate.h>
24 #include <linux/stringify.h>
25 #include <asm/debugreg.h>
26 #include <asm/nospec-branch.h>
27 
28 #include "x86.h"
29 #include "tss.h"
30 #include "mmu.h"
31 #include "pmu.h"
32 
33 /*
34  * Operand types
35  */
36 #define OpNone             0ull
37 #define OpImplicit         1ull  /* No generic decode */
38 #define OpReg              2ull  /* Register */
39 #define OpMem              3ull  /* Memory */
40 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI               5ull  /* ES:DI/EDI/RDI */
42 #define OpMem64            6ull  /* Memory, 64-bit */
43 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
44 #define OpDX               8ull  /* DX register */
45 #define OpCL               9ull  /* CL register (for shifts) */
46 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
47 #define OpOne             11ull  /* Implied 1 */
48 #define OpImm             12ull  /* Sign extended up to 32-bit immediate */
49 #define OpMem16           13ull  /* Memory operand (16-bit). */
50 #define OpMem32           14ull  /* Memory operand (32-bit). */
51 #define OpImmU            15ull  /* Immediate operand, zero extended */
52 #define OpSI              16ull  /* SI/ESI/RSI */
53 #define OpImmFAddr        17ull  /* Immediate far address */
54 #define OpMemFAddr        18ull  /* Far address in memory */
55 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
56 #define OpES              20ull  /* ES */
57 #define OpCS              21ull  /* CS */
58 #define OpSS              22ull  /* SS */
59 #define OpDS              23ull  /* DS */
60 #define OpFS              24ull  /* FS */
61 #define OpGS              25ull  /* GS */
62 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
63 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
67 
68 #define OpBits             5  /* Width of operand field */
69 #define OpMask             ((1ull << OpBits) - 1)
70 
71 /*
72  * Opcode effective-address decode tables.
73  * Note that we only emulate instructions that have at least one memory
74  * operand (excluding implicit stack references). We assume that stack
75  * references and instruction fetches will never occur in special memory
76  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
77  * not be handled.
78  */
79 
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp      (1<<0)	/* 8-bit operands. */
82 /* Destination operand type. */
83 #define DstShift    1
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg      (OpReg << DstShift)
86 #define DstMem      (OpMem << DstShift)
87 #define DstAcc      (OpAcc << DstShift)
88 #define DstDI       (OpDI << DstShift)
89 #define DstMem64    (OpMem64 << DstShift)
90 #define DstMem16    (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX       (OpDX << DstShift)
93 #define DstAccLo    (OpAccLo << DstShift)
94 #define DstMask     (OpMask << DstShift)
95 /* Source operand type. */
96 #define SrcShift    6
97 #define SrcNone     (OpNone << SrcShift)
98 #define SrcReg      (OpReg << SrcShift)
99 #define SrcMem      (OpMem << SrcShift)
100 #define SrcMem16    (OpMem16 << SrcShift)
101 #define SrcMem32    (OpMem32 << SrcShift)
102 #define SrcImm      (OpImm << SrcShift)
103 #define SrcImmByte  (OpImmByte << SrcShift)
104 #define SrcOne      (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU     (OpImmU << SrcShift)
107 #define SrcSI       (OpSI << SrcShift)
108 #define SrcXLat     (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc      (OpAcc << SrcShift)
112 #define SrcImmU16   (OpImmU16 << SrcShift)
113 #define SrcImm64    (OpImm64 << SrcShift)
114 #define SrcDX       (OpDX << SrcShift)
115 #define SrcMem8     (OpMem8 << SrcShift)
116 #define SrcAccHi    (OpAccHi << SrcShift)
117 #define SrcMask     (OpMask << SrcShift)
118 #define BitOp       (1<<11)
119 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
120 #define String      (1<<13)     /* String instruction (rep capable) */
121 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
122 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
123 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
125 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape      (5<<15)     /* Escape to coprocessor instruction */
128 #define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
130 #define Sse         (1<<18)     /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM       (1<<19)
133 /* Destination is only written; never read. */
134 #define Mov         (1<<20)
135 /* Misc flags */
136 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined   (1<<25) /* No Such Instruction */
141 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
143 #define No64	    (1<<28)
144 #define PageTable   (1 << 29)   /* instruction used to write page table */
145 #define NotImpl     (1 << 30)   /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift   (31)
148 #define Src2None    (OpNone << Src2Shift)
149 #define Src2Mem     (OpMem << Src2Shift)
150 #define Src2CL      (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One     (OpOne << Src2Shift)
153 #define Src2Imm     (OpImm << Src2Shift)
154 #define Src2ES      (OpES << Src2Shift)
155 #define Src2CS      (OpCS << Src2Shift)
156 #define Src2SS      (OpSS << Src2Shift)
157 #define Src2DS      (OpDS << Src2Shift)
158 #define Src2FS      (OpFS << Src2Shift)
159 #define Src2GS      (OpGS << Src2Shift)
160 #define Src2Mask    (OpMask << Src2Shift)
161 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
162 #define AlignMask   ((u64)7 << 41)
163 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
166 #define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
168 #define NoWrite     ((u64)1 << 45)  /* No writeback */
169 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
170 #define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
171 #define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
172 #define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
173 #define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch  ((u64)1 << 52)  /* Near branches */
175 #define No16	    ((u64)1 << 53)  /* No 16 bit operand */
176 #define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
177 #define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
178 
179 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
180 
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
189 
190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191 #define FASTOP_SIZE 8
192 
193 /*
194  * fastop functions have a special calling convention:
195  *
196  * dst:    rax        (in/out)
197  * src:    rdx        (in/out)
198  * src2:   rcx        (in)
199  * flags:  rflags     (in/out)
200  * ex:     rsi        (in:fastop pointer, out:zero if exception)
201  *
202  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203  * different operand sizes can be reached by calculation, rather than a jump
204  * table (which would be bigger than the code).
205  *
206  * fastop functions are declared as taking a never-defined fastop parameter,
207  * so they can't be called from C directly.
208  */
209 
210 struct fastop;
211 
212 struct opcode {
213 	u64 flags : 56;
214 	u64 intercept : 8;
215 	union {
216 		int (*execute)(struct x86_emulate_ctxt *ctxt);
217 		const struct opcode *group;
218 		const struct group_dual *gdual;
219 		const struct gprefix *gprefix;
220 		const struct escape *esc;
221 		const struct instr_dual *idual;
222 		const struct mode_dual *mdual;
223 		void (*fastop)(struct fastop *fake);
224 	} u;
225 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
226 };
227 
228 struct group_dual {
229 	struct opcode mod012[8];
230 	struct opcode mod3[8];
231 };
232 
233 struct gprefix {
234 	struct opcode pfx_no;
235 	struct opcode pfx_66;
236 	struct opcode pfx_f2;
237 	struct opcode pfx_f3;
238 };
239 
240 struct escape {
241 	struct opcode op[8];
242 	struct opcode high[64];
243 };
244 
245 struct instr_dual {
246 	struct opcode mod012;
247 	struct opcode mod3;
248 };
249 
250 struct mode_dual {
251 	struct opcode mode32;
252 	struct opcode mode64;
253 };
254 
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256 
257 enum x86_transfer_type {
258 	X86_TRANSFER_NONE,
259 	X86_TRANSFER_CALL_JMP,
260 	X86_TRANSFER_RET,
261 	X86_TRANSFER_TASK_SWITCH,
262 };
263 
264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
265 {
266 	if (!(ctxt->regs_valid & (1 << nr))) {
267 		ctxt->regs_valid |= 1 << nr;
268 		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
269 	}
270 	return ctxt->_regs[nr];
271 }
272 
273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
274 {
275 	ctxt->regs_valid |= 1 << nr;
276 	ctxt->regs_dirty |= 1 << nr;
277 	return &ctxt->_regs[nr];
278 }
279 
280 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
281 {
282 	reg_read(ctxt, nr);
283 	return reg_write(ctxt, nr);
284 }
285 
286 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
287 {
288 	unsigned reg;
289 
290 	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
292 }
293 
294 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
295 {
296 	ctxt->regs_dirty = 0;
297 	ctxt->regs_valid = 0;
298 }
299 
300 /*
301  * These EFLAGS bits are restored from saved value during emulation, and
302  * any changes are written back to the saved value after emulation.
303  */
304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 		     X86_EFLAGS_PF|X86_EFLAGS_CF)
306 
307 #ifdef CONFIG_X86_64
308 #define ON64(x) x
309 #else
310 #define ON64(x)
311 #endif
312 
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314 
315 #define __FOP_FUNC(name) \
316 	".align " __stringify(FASTOP_SIZE) " \n\t" \
317 	".type " name ", @function \n\t" \
318 	name ":\n\t"
319 
320 #define FOP_FUNC(name) \
321 	__FOP_FUNC(#name)
322 
323 #define __FOP_RET(name) \
324 	"ret \n\t" \
325 	".size " name ", .-" name "\n\t"
326 
327 #define FOP_RET(name) \
328 	__FOP_RET(#name)
329 
330 #define FOP_START(op) \
331 	extern void em_##op(struct fastop *fake); \
332 	asm(".pushsection .text, \"ax\" \n\t" \
333 	    ".global em_" #op " \n\t" \
334 	    ".align " __stringify(FASTOP_SIZE) " \n\t" \
335 	    "em_" #op ":\n\t"
336 
337 #define FOP_END \
338 	    ".popsection")
339 
340 #define __FOPNOP(name) \
341 	__FOP_FUNC(name) \
342 	__FOP_RET(name)
343 
344 #define FOPNOP() \
345 	__FOPNOP(__stringify(__UNIQUE_ID(nop)))
346 
347 #define FOP1E(op,  dst) \
348 	__FOP_FUNC(#op "_" #dst) \
349 	"10: " #op " %" #dst " \n\t" \
350 	__FOP_RET(#op "_" #dst)
351 
352 #define FOP1EEX(op,  dst) \
353 	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
354 
355 #define FASTOP1(op) \
356 	FOP_START(op) \
357 	FOP1E(op##b, al) \
358 	FOP1E(op##w, ax) \
359 	FOP1E(op##l, eax) \
360 	ON64(FOP1E(op##q, rax))	\
361 	FOP_END
362 
363 /* 1-operand, using src2 (for MUL/DIV r/m) */
364 #define FASTOP1SRC2(op, name) \
365 	FOP_START(name) \
366 	FOP1E(op, cl) \
367 	FOP1E(op, cx) \
368 	FOP1E(op, ecx) \
369 	ON64(FOP1E(op, rcx)) \
370 	FOP_END
371 
372 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
373 #define FASTOP1SRC2EX(op, name) \
374 	FOP_START(name) \
375 	FOP1EEX(op, cl) \
376 	FOP1EEX(op, cx) \
377 	FOP1EEX(op, ecx) \
378 	ON64(FOP1EEX(op, rcx)) \
379 	FOP_END
380 
381 #define FOP2E(op,  dst, src)	   \
382 	__FOP_FUNC(#op "_" #dst "_" #src) \
383 	#op " %" #src ", %" #dst " \n\t" \
384 	__FOP_RET(#op "_" #dst "_" #src)
385 
386 #define FASTOP2(op) \
387 	FOP_START(op) \
388 	FOP2E(op##b, al, dl) \
389 	FOP2E(op##w, ax, dx) \
390 	FOP2E(op##l, eax, edx) \
391 	ON64(FOP2E(op##q, rax, rdx)) \
392 	FOP_END
393 
394 /* 2 operand, word only */
395 #define FASTOP2W(op) \
396 	FOP_START(op) \
397 	FOPNOP() \
398 	FOP2E(op##w, ax, dx) \
399 	FOP2E(op##l, eax, edx) \
400 	ON64(FOP2E(op##q, rax, rdx)) \
401 	FOP_END
402 
403 /* 2 operand, src is CL */
404 #define FASTOP2CL(op) \
405 	FOP_START(op) \
406 	FOP2E(op##b, al, cl) \
407 	FOP2E(op##w, ax, cl) \
408 	FOP2E(op##l, eax, cl) \
409 	ON64(FOP2E(op##q, rax, cl)) \
410 	FOP_END
411 
412 /* 2 operand, src and dest are reversed */
413 #define FASTOP2R(op, name) \
414 	FOP_START(name) \
415 	FOP2E(op##b, dl, al) \
416 	FOP2E(op##w, dx, ax) \
417 	FOP2E(op##l, edx, eax) \
418 	ON64(FOP2E(op##q, rdx, rax)) \
419 	FOP_END
420 
421 #define FOP3E(op,  dst, src, src2) \
422 	__FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
423 	#op " %" #src2 ", %" #src ", %" #dst " \n\t"\
424 	__FOP_RET(#op "_" #dst "_" #src "_" #src2)
425 
426 /* 3-operand, word-only, src2=cl */
427 #define FASTOP3WCL(op) \
428 	FOP_START(op) \
429 	FOPNOP() \
430 	FOP3E(op##w, ax, dx, cl) \
431 	FOP3E(op##l, eax, edx, cl) \
432 	ON64(FOP3E(op##q, rax, rdx, cl)) \
433 	FOP_END
434 
435 /* Special case for SETcc - 1 instruction per cc */
436 #define FOP_SETCC(op) \
437 	".align 4 \n\t" \
438 	".type " #op ", @function \n\t" \
439 	#op ": \n\t" \
440 	#op " %al \n\t" \
441 	__FOP_RET(#op)
442 
443 asm(".pushsection .fixup, \"ax\"\n"
444     ".global kvm_fastop_exception \n"
445     "kvm_fastop_exception: xor %esi, %esi; ret\n"
446     ".popsection");
447 
448 FOP_START(setcc)
449 FOP_SETCC(seto)
450 FOP_SETCC(setno)
451 FOP_SETCC(setc)
452 FOP_SETCC(setnc)
453 FOP_SETCC(setz)
454 FOP_SETCC(setnz)
455 FOP_SETCC(setbe)
456 FOP_SETCC(setnbe)
457 FOP_SETCC(sets)
458 FOP_SETCC(setns)
459 FOP_SETCC(setp)
460 FOP_SETCC(setnp)
461 FOP_SETCC(setl)
462 FOP_SETCC(setnl)
463 FOP_SETCC(setle)
464 FOP_SETCC(setnle)
465 FOP_END;
466 
467 FOP_START(salc)
468 FOP_FUNC(salc)
469 "pushf; sbb %al, %al; popf \n\t"
470 FOP_RET(salc)
471 FOP_END;
472 
473 /*
474  * XXX: inoutclob user must know where the argument is being expanded.
475  *      Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
476  */
477 #define asm_safe(insn, inoutclob...) \
478 ({ \
479 	int _fault = 0; \
480  \
481 	asm volatile("1:" insn "\n" \
482 	             "2:\n" \
483 	             ".pushsection .fixup, \"ax\"\n" \
484 	             "3: movl $1, %[_fault]\n" \
485 	             "   jmp  2b\n" \
486 	             ".popsection\n" \
487 	             _ASM_EXTABLE(1b, 3b) \
488 	             : [_fault] "+qm"(_fault) inoutclob ); \
489  \
490 	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
491 })
492 
493 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
494 				    enum x86_intercept intercept,
495 				    enum x86_intercept_stage stage)
496 {
497 	struct x86_instruction_info info = {
498 		.intercept  = intercept,
499 		.rep_prefix = ctxt->rep_prefix,
500 		.modrm_mod  = ctxt->modrm_mod,
501 		.modrm_reg  = ctxt->modrm_reg,
502 		.modrm_rm   = ctxt->modrm_rm,
503 		.src_val    = ctxt->src.val64,
504 		.dst_val    = ctxt->dst.val64,
505 		.src_bytes  = ctxt->src.bytes,
506 		.dst_bytes  = ctxt->dst.bytes,
507 		.ad_bytes   = ctxt->ad_bytes,
508 		.next_rip   = ctxt->eip,
509 	};
510 
511 	return ctxt->ops->intercept(ctxt, &info, stage);
512 }
513 
514 static void assign_masked(ulong *dest, ulong src, ulong mask)
515 {
516 	*dest = (*dest & ~mask) | (src & mask);
517 }
518 
519 static void assign_register(unsigned long *reg, u64 val, int bytes)
520 {
521 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
522 	switch (bytes) {
523 	case 1:
524 		*(u8 *)reg = (u8)val;
525 		break;
526 	case 2:
527 		*(u16 *)reg = (u16)val;
528 		break;
529 	case 4:
530 		*reg = (u32)val;
531 		break;	/* 64b: zero-extend */
532 	case 8:
533 		*reg = val;
534 		break;
535 	}
536 }
537 
538 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
539 {
540 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
541 }
542 
543 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
544 {
545 	u16 sel;
546 	struct desc_struct ss;
547 
548 	if (ctxt->mode == X86EMUL_MODE_PROT64)
549 		return ~0UL;
550 	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
551 	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
552 }
553 
554 static int stack_size(struct x86_emulate_ctxt *ctxt)
555 {
556 	return (__fls(stack_mask(ctxt)) + 1) >> 3;
557 }
558 
559 /* Access/update address held in a register, based on addressing mode. */
560 static inline unsigned long
561 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
562 {
563 	if (ctxt->ad_bytes == sizeof(unsigned long))
564 		return reg;
565 	else
566 		return reg & ad_mask(ctxt);
567 }
568 
569 static inline unsigned long
570 register_address(struct x86_emulate_ctxt *ctxt, int reg)
571 {
572 	return address_mask(ctxt, reg_read(ctxt, reg));
573 }
574 
575 static void masked_increment(ulong *reg, ulong mask, int inc)
576 {
577 	assign_masked(reg, *reg + inc, mask);
578 }
579 
580 static inline void
581 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
582 {
583 	ulong *preg = reg_rmw(ctxt, reg);
584 
585 	assign_register(preg, *preg + inc, ctxt->ad_bytes);
586 }
587 
588 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
589 {
590 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
591 }
592 
593 static u32 desc_limit_scaled(struct desc_struct *desc)
594 {
595 	u32 limit = get_desc_limit(desc);
596 
597 	return desc->g ? (limit << 12) | 0xfff : limit;
598 }
599 
600 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
601 {
602 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
603 		return 0;
604 
605 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
606 }
607 
608 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
609 			     u32 error, bool valid)
610 {
611 	WARN_ON(vec > 0x1f);
612 	ctxt->exception.vector = vec;
613 	ctxt->exception.error_code = error;
614 	ctxt->exception.error_code_valid = valid;
615 	return X86EMUL_PROPAGATE_FAULT;
616 }
617 
618 static int emulate_db(struct x86_emulate_ctxt *ctxt)
619 {
620 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
621 }
622 
623 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
624 {
625 	return emulate_exception(ctxt, GP_VECTOR, err, true);
626 }
627 
628 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
629 {
630 	return emulate_exception(ctxt, SS_VECTOR, err, true);
631 }
632 
633 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
634 {
635 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
636 }
637 
638 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
639 {
640 	return emulate_exception(ctxt, TS_VECTOR, err, true);
641 }
642 
643 static int emulate_de(struct x86_emulate_ctxt *ctxt)
644 {
645 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
646 }
647 
648 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
649 {
650 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
651 }
652 
653 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
654 {
655 	u16 selector;
656 	struct desc_struct desc;
657 
658 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
659 	return selector;
660 }
661 
662 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
663 				 unsigned seg)
664 {
665 	u16 dummy;
666 	u32 base3;
667 	struct desc_struct desc;
668 
669 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
670 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
671 }
672 
673 /*
674  * x86 defines three classes of vector instructions: explicitly
675  * aligned, explicitly unaligned, and the rest, which change behaviour
676  * depending on whether they're AVX encoded or not.
677  *
678  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
679  * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
680  * 512 bytes of data must be aligned to a 16 byte boundary.
681  */
682 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
683 {
684 	u64 alignment = ctxt->d & AlignMask;
685 
686 	if (likely(size < 16))
687 		return 1;
688 
689 	switch (alignment) {
690 	case Unaligned:
691 	case Avx:
692 		return 1;
693 	case Aligned16:
694 		return 16;
695 	case Aligned:
696 	default:
697 		return size;
698 	}
699 }
700 
701 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
702 				       struct segmented_address addr,
703 				       unsigned *max_size, unsigned size,
704 				       bool write, bool fetch,
705 				       enum x86emul_mode mode, ulong *linear)
706 {
707 	struct desc_struct desc;
708 	bool usable;
709 	ulong la;
710 	u32 lim;
711 	u16 sel;
712 	u8  va_bits;
713 
714 	la = seg_base(ctxt, addr.seg) + addr.ea;
715 	*max_size = 0;
716 	switch (mode) {
717 	case X86EMUL_MODE_PROT64:
718 		*linear = la;
719 		va_bits = ctxt_virt_addr_bits(ctxt);
720 		if (get_canonical(la, va_bits) != la)
721 			goto bad;
722 
723 		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
724 		if (size > *max_size)
725 			goto bad;
726 		break;
727 	default:
728 		*linear = la = (u32)la;
729 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
730 						addr.seg);
731 		if (!usable)
732 			goto bad;
733 		/* code segment in protected mode or read-only data segment */
734 		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
735 					|| !(desc.type & 2)) && write)
736 			goto bad;
737 		/* unreadable code segment */
738 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
739 			goto bad;
740 		lim = desc_limit_scaled(&desc);
741 		if (!(desc.type & 8) && (desc.type & 4)) {
742 			/* expand-down segment */
743 			if (addr.ea <= lim)
744 				goto bad;
745 			lim = desc.d ? 0xffffffff : 0xffff;
746 		}
747 		if (addr.ea > lim)
748 			goto bad;
749 		if (lim == 0xffffffff)
750 			*max_size = ~0u;
751 		else {
752 			*max_size = (u64)lim + 1 - addr.ea;
753 			if (size > *max_size)
754 				goto bad;
755 		}
756 		break;
757 	}
758 	if (la & (insn_alignment(ctxt, size) - 1))
759 		return emulate_gp(ctxt, 0);
760 	return X86EMUL_CONTINUE;
761 bad:
762 	if (addr.seg == VCPU_SREG_SS)
763 		return emulate_ss(ctxt, 0);
764 	else
765 		return emulate_gp(ctxt, 0);
766 }
767 
768 static int linearize(struct x86_emulate_ctxt *ctxt,
769 		     struct segmented_address addr,
770 		     unsigned size, bool write,
771 		     ulong *linear)
772 {
773 	unsigned max_size;
774 	return __linearize(ctxt, addr, &max_size, size, write, false,
775 			   ctxt->mode, linear);
776 }
777 
778 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
779 			     enum x86emul_mode mode)
780 {
781 	ulong linear;
782 	int rc;
783 	unsigned max_size;
784 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
785 					   .ea = dst };
786 
787 	if (ctxt->op_bytes != sizeof(unsigned long))
788 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
789 	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
790 	if (rc == X86EMUL_CONTINUE)
791 		ctxt->_eip = addr.ea;
792 	return rc;
793 }
794 
795 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
796 {
797 	return assign_eip(ctxt, dst, ctxt->mode);
798 }
799 
800 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
801 			  const struct desc_struct *cs_desc)
802 {
803 	enum x86emul_mode mode = ctxt->mode;
804 	int rc;
805 
806 #ifdef CONFIG_X86_64
807 	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
808 		if (cs_desc->l) {
809 			u64 efer = 0;
810 
811 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
812 			if (efer & EFER_LMA)
813 				mode = X86EMUL_MODE_PROT64;
814 		} else
815 			mode = X86EMUL_MODE_PROT32; /* temporary value */
816 	}
817 #endif
818 	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
819 		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
820 	rc = assign_eip(ctxt, dst, mode);
821 	if (rc == X86EMUL_CONTINUE)
822 		ctxt->mode = mode;
823 	return rc;
824 }
825 
826 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
827 {
828 	return assign_eip_near(ctxt, ctxt->_eip + rel);
829 }
830 
831 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
832 			      void *data, unsigned size)
833 {
834 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
835 }
836 
837 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
838 			       ulong linear, void *data,
839 			       unsigned int size)
840 {
841 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
842 }
843 
844 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
845 			      struct segmented_address addr,
846 			      void *data,
847 			      unsigned size)
848 {
849 	int rc;
850 	ulong linear;
851 
852 	rc = linearize(ctxt, addr, size, false, &linear);
853 	if (rc != X86EMUL_CONTINUE)
854 		return rc;
855 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
856 }
857 
858 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
859 			       struct segmented_address addr,
860 			       void *data,
861 			       unsigned int size)
862 {
863 	int rc;
864 	ulong linear;
865 
866 	rc = linearize(ctxt, addr, size, true, &linear);
867 	if (rc != X86EMUL_CONTINUE)
868 		return rc;
869 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
870 }
871 
872 /*
873  * Prefetch the remaining bytes of the instruction without crossing page
874  * boundary if they are not in fetch_cache yet.
875  */
876 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
877 {
878 	int rc;
879 	unsigned size, max_size;
880 	unsigned long linear;
881 	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
882 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
883 					   .ea = ctxt->eip + cur_size };
884 
885 	/*
886 	 * We do not know exactly how many bytes will be needed, and
887 	 * __linearize is expensive, so fetch as much as possible.  We
888 	 * just have to avoid going beyond the 15 byte limit, the end
889 	 * of the segment, or the end of the page.
890 	 *
891 	 * __linearize is called with size 0 so that it does not do any
892 	 * boundary check itself.  Instead, we use max_size to check
893 	 * against op_size.
894 	 */
895 	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
896 			 &linear);
897 	if (unlikely(rc != X86EMUL_CONTINUE))
898 		return rc;
899 
900 	size = min_t(unsigned, 15UL ^ cur_size, max_size);
901 	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
902 
903 	/*
904 	 * One instruction can only straddle two pages,
905 	 * and one has been loaded at the beginning of
906 	 * x86_decode_insn.  So, if not enough bytes
907 	 * still, we must have hit the 15-byte boundary.
908 	 */
909 	if (unlikely(size < op_size))
910 		return emulate_gp(ctxt, 0);
911 
912 	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
913 			      size, &ctxt->exception);
914 	if (unlikely(rc != X86EMUL_CONTINUE))
915 		return rc;
916 	ctxt->fetch.end += size;
917 	return X86EMUL_CONTINUE;
918 }
919 
920 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
921 					       unsigned size)
922 {
923 	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
924 
925 	if (unlikely(done_size < size))
926 		return __do_insn_fetch_bytes(ctxt, size - done_size);
927 	else
928 		return X86EMUL_CONTINUE;
929 }
930 
931 /* Fetch next part of the instruction being emulated. */
932 #define insn_fetch(_type, _ctxt)					\
933 ({	_type _x;							\
934 									\
935 	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
936 	if (rc != X86EMUL_CONTINUE)					\
937 		goto done;						\
938 	ctxt->_eip += sizeof(_type);					\
939 	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
940 	ctxt->fetch.ptr += sizeof(_type);				\
941 	_x;								\
942 })
943 
944 #define insn_fetch_arr(_arr, _size, _ctxt)				\
945 ({									\
946 	rc = do_insn_fetch_bytes(_ctxt, _size);				\
947 	if (rc != X86EMUL_CONTINUE)					\
948 		goto done;						\
949 	ctxt->_eip += (_size);						\
950 	memcpy(_arr, ctxt->fetch.ptr, _size);				\
951 	ctxt->fetch.ptr += (_size);					\
952 })
953 
954 /*
955  * Given the 'reg' portion of a ModRM byte, and a register block, return a
956  * pointer into the block that addresses the relevant register.
957  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
958  */
959 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
960 			     int byteop)
961 {
962 	void *p;
963 	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
964 
965 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
966 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
967 	else
968 		p = reg_rmw(ctxt, modrm_reg);
969 	return p;
970 }
971 
972 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
973 			   struct segmented_address addr,
974 			   u16 *size, unsigned long *address, int op_bytes)
975 {
976 	int rc;
977 
978 	if (op_bytes == 2)
979 		op_bytes = 3;
980 	*address = 0;
981 	rc = segmented_read_std(ctxt, addr, size, 2);
982 	if (rc != X86EMUL_CONTINUE)
983 		return rc;
984 	addr.ea += 2;
985 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
986 	return rc;
987 }
988 
989 FASTOP2(add);
990 FASTOP2(or);
991 FASTOP2(adc);
992 FASTOP2(sbb);
993 FASTOP2(and);
994 FASTOP2(sub);
995 FASTOP2(xor);
996 FASTOP2(cmp);
997 FASTOP2(test);
998 
999 FASTOP1SRC2(mul, mul_ex);
1000 FASTOP1SRC2(imul, imul_ex);
1001 FASTOP1SRC2EX(div, div_ex);
1002 FASTOP1SRC2EX(idiv, idiv_ex);
1003 
1004 FASTOP3WCL(shld);
1005 FASTOP3WCL(shrd);
1006 
1007 FASTOP2W(imul);
1008 
1009 FASTOP1(not);
1010 FASTOP1(neg);
1011 FASTOP1(inc);
1012 FASTOP1(dec);
1013 
1014 FASTOP2CL(rol);
1015 FASTOP2CL(ror);
1016 FASTOP2CL(rcl);
1017 FASTOP2CL(rcr);
1018 FASTOP2CL(shl);
1019 FASTOP2CL(shr);
1020 FASTOP2CL(sar);
1021 
1022 FASTOP2W(bsf);
1023 FASTOP2W(bsr);
1024 FASTOP2W(bt);
1025 FASTOP2W(bts);
1026 FASTOP2W(btr);
1027 FASTOP2W(btc);
1028 
1029 FASTOP2(xadd);
1030 
1031 FASTOP2R(cmp, cmp_r);
1032 
1033 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1034 {
1035 	/* If src is zero, do not writeback, but update flags */
1036 	if (ctxt->src.val == 0)
1037 		ctxt->dst.type = OP_NONE;
1038 	return fastop(ctxt, em_bsf);
1039 }
1040 
1041 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1042 {
1043 	/* If src is zero, do not writeback, but update flags */
1044 	if (ctxt->src.val == 0)
1045 		ctxt->dst.type = OP_NONE;
1046 	return fastop(ctxt, em_bsr);
1047 }
1048 
1049 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1050 {
1051 	u8 rc;
1052 	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1053 
1054 	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1055 	asm("push %[flags]; popf; " CALL_NOSPEC
1056 	    : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1057 	return rc;
1058 }
1059 
1060 static void fetch_register_operand(struct operand *op)
1061 {
1062 	switch (op->bytes) {
1063 	case 1:
1064 		op->val = *(u8 *)op->addr.reg;
1065 		break;
1066 	case 2:
1067 		op->val = *(u16 *)op->addr.reg;
1068 		break;
1069 	case 4:
1070 		op->val = *(u32 *)op->addr.reg;
1071 		break;
1072 	case 8:
1073 		op->val = *(u64 *)op->addr.reg;
1074 		break;
1075 	}
1076 }
1077 
1078 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1079 {
1080 	switch (reg) {
1081 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1082 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1083 	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1084 	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1085 	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1086 	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1087 	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1088 	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1089 #ifdef CONFIG_X86_64
1090 	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1091 	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1092 	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1093 	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1094 	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1095 	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1096 	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1097 	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1098 #endif
1099 	default: BUG();
1100 	}
1101 }
1102 
1103 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1104 			  int reg)
1105 {
1106 	switch (reg) {
1107 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1108 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1109 	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1110 	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1111 	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1112 	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1113 	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1114 	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1115 #ifdef CONFIG_X86_64
1116 	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1117 	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1118 	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1119 	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1120 	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1121 	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1122 	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1123 	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1124 #endif
1125 	default: BUG();
1126 	}
1127 }
1128 
1129 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1130 {
1131 	switch (reg) {
1132 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1133 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1134 	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1135 	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1136 	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1137 	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1138 	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1139 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1140 	default: BUG();
1141 	}
1142 }
1143 
1144 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1145 {
1146 	switch (reg) {
1147 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1148 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1149 	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1150 	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1151 	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1152 	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1153 	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1154 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1155 	default: BUG();
1156 	}
1157 }
1158 
1159 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1160 {
1161 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1162 		return emulate_nm(ctxt);
1163 
1164 	asm volatile("fninit");
1165 	return X86EMUL_CONTINUE;
1166 }
1167 
1168 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1169 {
1170 	u16 fcw;
1171 
1172 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1173 		return emulate_nm(ctxt);
1174 
1175 	asm volatile("fnstcw %0": "+m"(fcw));
1176 
1177 	ctxt->dst.val = fcw;
1178 
1179 	return X86EMUL_CONTINUE;
1180 }
1181 
1182 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1183 {
1184 	u16 fsw;
1185 
1186 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1187 		return emulate_nm(ctxt);
1188 
1189 	asm volatile("fnstsw %0": "+m"(fsw));
1190 
1191 	ctxt->dst.val = fsw;
1192 
1193 	return X86EMUL_CONTINUE;
1194 }
1195 
1196 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1197 				    struct operand *op)
1198 {
1199 	unsigned reg = ctxt->modrm_reg;
1200 
1201 	if (!(ctxt->d & ModRM))
1202 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1203 
1204 	if (ctxt->d & Sse) {
1205 		op->type = OP_XMM;
1206 		op->bytes = 16;
1207 		op->addr.xmm = reg;
1208 		read_sse_reg(ctxt, &op->vec_val, reg);
1209 		return;
1210 	}
1211 	if (ctxt->d & Mmx) {
1212 		reg &= 7;
1213 		op->type = OP_MM;
1214 		op->bytes = 8;
1215 		op->addr.mm = reg;
1216 		return;
1217 	}
1218 
1219 	op->type = OP_REG;
1220 	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1221 	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1222 
1223 	fetch_register_operand(op);
1224 	op->orig_val = op->val;
1225 }
1226 
1227 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1228 {
1229 	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1230 		ctxt->modrm_seg = VCPU_SREG_SS;
1231 }
1232 
1233 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1234 			struct operand *op)
1235 {
1236 	u8 sib;
1237 	int index_reg, base_reg, scale;
1238 	int rc = X86EMUL_CONTINUE;
1239 	ulong modrm_ea = 0;
1240 
1241 	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1242 	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1243 	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1244 
1245 	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1246 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1247 	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1248 	ctxt->modrm_seg = VCPU_SREG_DS;
1249 
1250 	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1251 		op->type = OP_REG;
1252 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1253 		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1254 				ctxt->d & ByteOp);
1255 		if (ctxt->d & Sse) {
1256 			op->type = OP_XMM;
1257 			op->bytes = 16;
1258 			op->addr.xmm = ctxt->modrm_rm;
1259 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1260 			return rc;
1261 		}
1262 		if (ctxt->d & Mmx) {
1263 			op->type = OP_MM;
1264 			op->bytes = 8;
1265 			op->addr.mm = ctxt->modrm_rm & 7;
1266 			return rc;
1267 		}
1268 		fetch_register_operand(op);
1269 		return rc;
1270 	}
1271 
1272 	op->type = OP_MEM;
1273 
1274 	if (ctxt->ad_bytes == 2) {
1275 		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1276 		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1277 		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1278 		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1279 
1280 		/* 16-bit ModR/M decode. */
1281 		switch (ctxt->modrm_mod) {
1282 		case 0:
1283 			if (ctxt->modrm_rm == 6)
1284 				modrm_ea += insn_fetch(u16, ctxt);
1285 			break;
1286 		case 1:
1287 			modrm_ea += insn_fetch(s8, ctxt);
1288 			break;
1289 		case 2:
1290 			modrm_ea += insn_fetch(u16, ctxt);
1291 			break;
1292 		}
1293 		switch (ctxt->modrm_rm) {
1294 		case 0:
1295 			modrm_ea += bx + si;
1296 			break;
1297 		case 1:
1298 			modrm_ea += bx + di;
1299 			break;
1300 		case 2:
1301 			modrm_ea += bp + si;
1302 			break;
1303 		case 3:
1304 			modrm_ea += bp + di;
1305 			break;
1306 		case 4:
1307 			modrm_ea += si;
1308 			break;
1309 		case 5:
1310 			modrm_ea += di;
1311 			break;
1312 		case 6:
1313 			if (ctxt->modrm_mod != 0)
1314 				modrm_ea += bp;
1315 			break;
1316 		case 7:
1317 			modrm_ea += bx;
1318 			break;
1319 		}
1320 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1321 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1322 			ctxt->modrm_seg = VCPU_SREG_SS;
1323 		modrm_ea = (u16)modrm_ea;
1324 	} else {
1325 		/* 32/64-bit ModR/M decode. */
1326 		if ((ctxt->modrm_rm & 7) == 4) {
1327 			sib = insn_fetch(u8, ctxt);
1328 			index_reg |= (sib >> 3) & 7;
1329 			base_reg |= sib & 7;
1330 			scale = sib >> 6;
1331 
1332 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1333 				modrm_ea += insn_fetch(s32, ctxt);
1334 			else {
1335 				modrm_ea += reg_read(ctxt, base_reg);
1336 				adjust_modrm_seg(ctxt, base_reg);
1337 				/* Increment ESP on POP [ESP] */
1338 				if ((ctxt->d & IncSP) &&
1339 				    base_reg == VCPU_REGS_RSP)
1340 					modrm_ea += ctxt->op_bytes;
1341 			}
1342 			if (index_reg != 4)
1343 				modrm_ea += reg_read(ctxt, index_reg) << scale;
1344 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1345 			modrm_ea += insn_fetch(s32, ctxt);
1346 			if (ctxt->mode == X86EMUL_MODE_PROT64)
1347 				ctxt->rip_relative = 1;
1348 		} else {
1349 			base_reg = ctxt->modrm_rm;
1350 			modrm_ea += reg_read(ctxt, base_reg);
1351 			adjust_modrm_seg(ctxt, base_reg);
1352 		}
1353 		switch (ctxt->modrm_mod) {
1354 		case 1:
1355 			modrm_ea += insn_fetch(s8, ctxt);
1356 			break;
1357 		case 2:
1358 			modrm_ea += insn_fetch(s32, ctxt);
1359 			break;
1360 		}
1361 	}
1362 	op->addr.mem.ea = modrm_ea;
1363 	if (ctxt->ad_bytes != 8)
1364 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1365 
1366 done:
1367 	return rc;
1368 }
1369 
1370 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1371 		      struct operand *op)
1372 {
1373 	int rc = X86EMUL_CONTINUE;
1374 
1375 	op->type = OP_MEM;
1376 	switch (ctxt->ad_bytes) {
1377 	case 2:
1378 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1379 		break;
1380 	case 4:
1381 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1382 		break;
1383 	case 8:
1384 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1385 		break;
1386 	}
1387 done:
1388 	return rc;
1389 }
1390 
1391 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1392 {
1393 	long sv = 0, mask;
1394 
1395 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1396 		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1397 
1398 		if (ctxt->src.bytes == 2)
1399 			sv = (s16)ctxt->src.val & (s16)mask;
1400 		else if (ctxt->src.bytes == 4)
1401 			sv = (s32)ctxt->src.val & (s32)mask;
1402 		else
1403 			sv = (s64)ctxt->src.val & (s64)mask;
1404 
1405 		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1406 					   ctxt->dst.addr.mem.ea + (sv >> 3));
1407 	}
1408 
1409 	/* only subword offset */
1410 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1411 }
1412 
1413 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1414 			 unsigned long addr, void *dest, unsigned size)
1415 {
1416 	int rc;
1417 	struct read_cache *mc = &ctxt->mem_read;
1418 
1419 	if (mc->pos < mc->end)
1420 		goto read_cached;
1421 
1422 	WARN_ON((mc->end + size) >= sizeof(mc->data));
1423 
1424 	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1425 				      &ctxt->exception);
1426 	if (rc != X86EMUL_CONTINUE)
1427 		return rc;
1428 
1429 	mc->end += size;
1430 
1431 read_cached:
1432 	memcpy(dest, mc->data + mc->pos, size);
1433 	mc->pos += size;
1434 	return X86EMUL_CONTINUE;
1435 }
1436 
1437 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1438 			  struct segmented_address addr,
1439 			  void *data,
1440 			  unsigned size)
1441 {
1442 	int rc;
1443 	ulong linear;
1444 
1445 	rc = linearize(ctxt, addr, size, false, &linear);
1446 	if (rc != X86EMUL_CONTINUE)
1447 		return rc;
1448 	return read_emulated(ctxt, linear, data, size);
1449 }
1450 
1451 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1452 			   struct segmented_address addr,
1453 			   const void *data,
1454 			   unsigned size)
1455 {
1456 	int rc;
1457 	ulong linear;
1458 
1459 	rc = linearize(ctxt, addr, size, true, &linear);
1460 	if (rc != X86EMUL_CONTINUE)
1461 		return rc;
1462 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1463 					 &ctxt->exception);
1464 }
1465 
1466 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1467 			     struct segmented_address addr,
1468 			     const void *orig_data, const void *data,
1469 			     unsigned size)
1470 {
1471 	int rc;
1472 	ulong linear;
1473 
1474 	rc = linearize(ctxt, addr, size, true, &linear);
1475 	if (rc != X86EMUL_CONTINUE)
1476 		return rc;
1477 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1478 					   size, &ctxt->exception);
1479 }
1480 
1481 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1482 			   unsigned int size, unsigned short port,
1483 			   void *dest)
1484 {
1485 	struct read_cache *rc = &ctxt->io_read;
1486 
1487 	if (rc->pos == rc->end) { /* refill pio read ahead */
1488 		unsigned int in_page, n;
1489 		unsigned int count = ctxt->rep_prefix ?
1490 			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1491 		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1492 			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1493 			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1494 		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1495 		if (n == 0)
1496 			n = 1;
1497 		rc->pos = rc->end = 0;
1498 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1499 			return 0;
1500 		rc->end = n * size;
1501 	}
1502 
1503 	if (ctxt->rep_prefix && (ctxt->d & String) &&
1504 	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1505 		ctxt->dst.data = rc->data + rc->pos;
1506 		ctxt->dst.type = OP_MEM_STR;
1507 		ctxt->dst.count = (rc->end - rc->pos) / size;
1508 		rc->pos = rc->end;
1509 	} else {
1510 		memcpy(dest, rc->data + rc->pos, size);
1511 		rc->pos += size;
1512 	}
1513 	return 1;
1514 }
1515 
1516 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1517 				     u16 index, struct desc_struct *desc)
1518 {
1519 	struct desc_ptr dt;
1520 	ulong addr;
1521 
1522 	ctxt->ops->get_idt(ctxt, &dt);
1523 
1524 	if (dt.size < index * 8 + 7)
1525 		return emulate_gp(ctxt, index << 3 | 0x2);
1526 
1527 	addr = dt.address + index * 8;
1528 	return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1529 }
1530 
1531 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1532 				     u16 selector, struct desc_ptr *dt)
1533 {
1534 	const struct x86_emulate_ops *ops = ctxt->ops;
1535 	u32 base3 = 0;
1536 
1537 	if (selector & 1 << 2) {
1538 		struct desc_struct desc;
1539 		u16 sel;
1540 
1541 		memset(dt, 0, sizeof(*dt));
1542 		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1543 				      VCPU_SREG_LDTR))
1544 			return;
1545 
1546 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1547 		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1548 	} else
1549 		ops->get_gdt(ctxt, dt);
1550 }
1551 
1552 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1553 			      u16 selector, ulong *desc_addr_p)
1554 {
1555 	struct desc_ptr dt;
1556 	u16 index = selector >> 3;
1557 	ulong addr;
1558 
1559 	get_descriptor_table_ptr(ctxt, selector, &dt);
1560 
1561 	if (dt.size < index * 8 + 7)
1562 		return emulate_gp(ctxt, selector & 0xfffc);
1563 
1564 	addr = dt.address + index * 8;
1565 
1566 #ifdef CONFIG_X86_64
1567 	if (addr >> 32 != 0) {
1568 		u64 efer = 0;
1569 
1570 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1571 		if (!(efer & EFER_LMA))
1572 			addr &= (u32)-1;
1573 	}
1574 #endif
1575 
1576 	*desc_addr_p = addr;
1577 	return X86EMUL_CONTINUE;
1578 }
1579 
1580 /* allowed just for 8 bytes segments */
1581 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1582 				   u16 selector, struct desc_struct *desc,
1583 				   ulong *desc_addr_p)
1584 {
1585 	int rc;
1586 
1587 	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1588 	if (rc != X86EMUL_CONTINUE)
1589 		return rc;
1590 
1591 	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1592 }
1593 
1594 /* allowed just for 8 bytes segments */
1595 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1596 				    u16 selector, struct desc_struct *desc)
1597 {
1598 	int rc;
1599 	ulong addr;
1600 
1601 	rc = get_descriptor_ptr(ctxt, selector, &addr);
1602 	if (rc != X86EMUL_CONTINUE)
1603 		return rc;
1604 
1605 	return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1606 }
1607 
1608 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1609 				     u16 selector, int seg, u8 cpl,
1610 				     enum x86_transfer_type transfer,
1611 				     struct desc_struct *desc)
1612 {
1613 	struct desc_struct seg_desc, old_desc;
1614 	u8 dpl, rpl;
1615 	unsigned err_vec = GP_VECTOR;
1616 	u32 err_code = 0;
1617 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1618 	ulong desc_addr;
1619 	int ret;
1620 	u16 dummy;
1621 	u32 base3 = 0;
1622 
1623 	memset(&seg_desc, 0, sizeof(seg_desc));
1624 
1625 	if (ctxt->mode == X86EMUL_MODE_REAL) {
1626 		/* set real mode segment descriptor (keep limit etc. for
1627 		 * unreal mode) */
1628 		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1629 		set_desc_base(&seg_desc, selector << 4);
1630 		goto load;
1631 	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1632 		/* VM86 needs a clean new segment descriptor */
1633 		set_desc_base(&seg_desc, selector << 4);
1634 		set_desc_limit(&seg_desc, 0xffff);
1635 		seg_desc.type = 3;
1636 		seg_desc.p = 1;
1637 		seg_desc.s = 1;
1638 		seg_desc.dpl = 3;
1639 		goto load;
1640 	}
1641 
1642 	rpl = selector & 3;
1643 
1644 	/* TR should be in GDT only */
1645 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1646 		goto exception;
1647 
1648 	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
1649 	if (null_selector) {
1650 		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1651 			goto exception;
1652 
1653 		if (seg == VCPU_SREG_SS) {
1654 			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1655 				goto exception;
1656 
1657 			/*
1658 			 * ctxt->ops->set_segment expects the CPL to be in
1659 			 * SS.DPL, so fake an expand-up 32-bit data segment.
1660 			 */
1661 			seg_desc.type = 3;
1662 			seg_desc.p = 1;
1663 			seg_desc.s = 1;
1664 			seg_desc.dpl = cpl;
1665 			seg_desc.d = 1;
1666 			seg_desc.g = 1;
1667 		}
1668 
1669 		/* Skip all following checks */
1670 		goto load;
1671 	}
1672 
1673 	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1674 	if (ret != X86EMUL_CONTINUE)
1675 		return ret;
1676 
1677 	err_code = selector & 0xfffc;
1678 	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1679 							   GP_VECTOR;
1680 
1681 	/* can't load system descriptor into segment selector */
1682 	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1683 		if (transfer == X86_TRANSFER_CALL_JMP)
1684 			return X86EMUL_UNHANDLEABLE;
1685 		goto exception;
1686 	}
1687 
1688 	if (!seg_desc.p) {
1689 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1690 		goto exception;
1691 	}
1692 
1693 	dpl = seg_desc.dpl;
1694 
1695 	switch (seg) {
1696 	case VCPU_SREG_SS:
1697 		/*
1698 		 * segment is not a writable data segment or segment
1699 		 * selector's RPL != CPL or segment selector's RPL != CPL
1700 		 */
1701 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1702 			goto exception;
1703 		break;
1704 	case VCPU_SREG_CS:
1705 		if (!(seg_desc.type & 8))
1706 			goto exception;
1707 
1708 		if (seg_desc.type & 4) {
1709 			/* conforming */
1710 			if (dpl > cpl)
1711 				goto exception;
1712 		} else {
1713 			/* nonconforming */
1714 			if (rpl > cpl || dpl != cpl)
1715 				goto exception;
1716 		}
1717 		/* in long-mode d/b must be clear if l is set */
1718 		if (seg_desc.d && seg_desc.l) {
1719 			u64 efer = 0;
1720 
1721 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1722 			if (efer & EFER_LMA)
1723 				goto exception;
1724 		}
1725 
1726 		/* CS(RPL) <- CPL */
1727 		selector = (selector & 0xfffc) | cpl;
1728 		break;
1729 	case VCPU_SREG_TR:
1730 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1731 			goto exception;
1732 		old_desc = seg_desc;
1733 		seg_desc.type |= 2; /* busy */
1734 		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1735 						  sizeof(seg_desc), &ctxt->exception);
1736 		if (ret != X86EMUL_CONTINUE)
1737 			return ret;
1738 		break;
1739 	case VCPU_SREG_LDTR:
1740 		if (seg_desc.s || seg_desc.type != 2)
1741 			goto exception;
1742 		break;
1743 	default: /*  DS, ES, FS, or GS */
1744 		/*
1745 		 * segment is not a data or readable code segment or
1746 		 * ((segment is a data or nonconforming code segment)
1747 		 * and (both RPL and CPL > DPL))
1748 		 */
1749 		if ((seg_desc.type & 0xa) == 0x8 ||
1750 		    (((seg_desc.type & 0xc) != 0xc) &&
1751 		     (rpl > dpl && cpl > dpl)))
1752 			goto exception;
1753 		break;
1754 	}
1755 
1756 	if (seg_desc.s) {
1757 		/* mark segment as accessed */
1758 		if (!(seg_desc.type & 1)) {
1759 			seg_desc.type |= 1;
1760 			ret = write_segment_descriptor(ctxt, selector,
1761 						       &seg_desc);
1762 			if (ret != X86EMUL_CONTINUE)
1763 				return ret;
1764 		}
1765 	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1766 		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1767 		if (ret != X86EMUL_CONTINUE)
1768 			return ret;
1769 		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1770 				((u64)base3 << 32), ctxt))
1771 			return emulate_gp(ctxt, 0);
1772 	}
1773 load:
1774 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1775 	if (desc)
1776 		*desc = seg_desc;
1777 	return X86EMUL_CONTINUE;
1778 exception:
1779 	return emulate_exception(ctxt, err_vec, err_code, true);
1780 }
1781 
1782 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1783 				   u16 selector, int seg)
1784 {
1785 	u8 cpl = ctxt->ops->cpl(ctxt);
1786 
1787 	/*
1788 	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1789 	 * they can load it at CPL<3 (Intel's manual says only LSS can,
1790 	 * but it's wrong).
1791 	 *
1792 	 * However, the Intel manual says that putting IST=1/DPL=3 in
1793 	 * an interrupt gate will result in SS=3 (the AMD manual instead
1794 	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1795 	 * and only forbid it here.
1796 	 */
1797 	if (seg == VCPU_SREG_SS && selector == 3 &&
1798 	    ctxt->mode == X86EMUL_MODE_PROT64)
1799 		return emulate_exception(ctxt, GP_VECTOR, 0, true);
1800 
1801 	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1802 					 X86_TRANSFER_NONE, NULL);
1803 }
1804 
1805 static void write_register_operand(struct operand *op)
1806 {
1807 	return assign_register(op->addr.reg, op->val, op->bytes);
1808 }
1809 
1810 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1811 {
1812 	switch (op->type) {
1813 	case OP_REG:
1814 		write_register_operand(op);
1815 		break;
1816 	case OP_MEM:
1817 		if (ctxt->lock_prefix)
1818 			return segmented_cmpxchg(ctxt,
1819 						 op->addr.mem,
1820 						 &op->orig_val,
1821 						 &op->val,
1822 						 op->bytes);
1823 		else
1824 			return segmented_write(ctxt,
1825 					       op->addr.mem,
1826 					       &op->val,
1827 					       op->bytes);
1828 		break;
1829 	case OP_MEM_STR:
1830 		return segmented_write(ctxt,
1831 				       op->addr.mem,
1832 				       op->data,
1833 				       op->bytes * op->count);
1834 		break;
1835 	case OP_XMM:
1836 		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1837 		break;
1838 	case OP_MM:
1839 		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1840 		break;
1841 	case OP_NONE:
1842 		/* no writeback */
1843 		break;
1844 	default:
1845 		break;
1846 	}
1847 	return X86EMUL_CONTINUE;
1848 }
1849 
1850 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1851 {
1852 	struct segmented_address addr;
1853 
1854 	rsp_increment(ctxt, -bytes);
1855 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1856 	addr.seg = VCPU_SREG_SS;
1857 
1858 	return segmented_write(ctxt, addr, data, bytes);
1859 }
1860 
1861 static int em_push(struct x86_emulate_ctxt *ctxt)
1862 {
1863 	/* Disable writeback. */
1864 	ctxt->dst.type = OP_NONE;
1865 	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1866 }
1867 
1868 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1869 		       void *dest, int len)
1870 {
1871 	int rc;
1872 	struct segmented_address addr;
1873 
1874 	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1875 	addr.seg = VCPU_SREG_SS;
1876 	rc = segmented_read(ctxt, addr, dest, len);
1877 	if (rc != X86EMUL_CONTINUE)
1878 		return rc;
1879 
1880 	rsp_increment(ctxt, len);
1881 	return rc;
1882 }
1883 
1884 static int em_pop(struct x86_emulate_ctxt *ctxt)
1885 {
1886 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1887 }
1888 
1889 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1890 			void *dest, int len)
1891 {
1892 	int rc;
1893 	unsigned long val, change_mask;
1894 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1895 	int cpl = ctxt->ops->cpl(ctxt);
1896 
1897 	rc = emulate_pop(ctxt, &val, len);
1898 	if (rc != X86EMUL_CONTINUE)
1899 		return rc;
1900 
1901 	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1902 		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1903 		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1904 		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1905 
1906 	switch(ctxt->mode) {
1907 	case X86EMUL_MODE_PROT64:
1908 	case X86EMUL_MODE_PROT32:
1909 	case X86EMUL_MODE_PROT16:
1910 		if (cpl == 0)
1911 			change_mask |= X86_EFLAGS_IOPL;
1912 		if (cpl <= iopl)
1913 			change_mask |= X86_EFLAGS_IF;
1914 		break;
1915 	case X86EMUL_MODE_VM86:
1916 		if (iopl < 3)
1917 			return emulate_gp(ctxt, 0);
1918 		change_mask |= X86_EFLAGS_IF;
1919 		break;
1920 	default: /* real mode */
1921 		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1922 		break;
1923 	}
1924 
1925 	*(unsigned long *)dest =
1926 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1927 
1928 	return rc;
1929 }
1930 
1931 static int em_popf(struct x86_emulate_ctxt *ctxt)
1932 {
1933 	ctxt->dst.type = OP_REG;
1934 	ctxt->dst.addr.reg = &ctxt->eflags;
1935 	ctxt->dst.bytes = ctxt->op_bytes;
1936 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1937 }
1938 
1939 static int em_enter(struct x86_emulate_ctxt *ctxt)
1940 {
1941 	int rc;
1942 	unsigned frame_size = ctxt->src.val;
1943 	unsigned nesting_level = ctxt->src2.val & 31;
1944 	ulong rbp;
1945 
1946 	if (nesting_level)
1947 		return X86EMUL_UNHANDLEABLE;
1948 
1949 	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1950 	rc = push(ctxt, &rbp, stack_size(ctxt));
1951 	if (rc != X86EMUL_CONTINUE)
1952 		return rc;
1953 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1954 		      stack_mask(ctxt));
1955 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1956 		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1957 		      stack_mask(ctxt));
1958 	return X86EMUL_CONTINUE;
1959 }
1960 
1961 static int em_leave(struct x86_emulate_ctxt *ctxt)
1962 {
1963 	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1964 		      stack_mask(ctxt));
1965 	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1966 }
1967 
1968 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1969 {
1970 	int seg = ctxt->src2.val;
1971 
1972 	ctxt->src.val = get_segment_selector(ctxt, seg);
1973 	if (ctxt->op_bytes == 4) {
1974 		rsp_increment(ctxt, -2);
1975 		ctxt->op_bytes = 2;
1976 	}
1977 
1978 	return em_push(ctxt);
1979 }
1980 
1981 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1982 {
1983 	int seg = ctxt->src2.val;
1984 	unsigned long selector;
1985 	int rc;
1986 
1987 	rc = emulate_pop(ctxt, &selector, 2);
1988 	if (rc != X86EMUL_CONTINUE)
1989 		return rc;
1990 
1991 	if (ctxt->modrm_reg == VCPU_SREG_SS)
1992 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1993 	if (ctxt->op_bytes > 2)
1994 		rsp_increment(ctxt, ctxt->op_bytes - 2);
1995 
1996 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1997 	return rc;
1998 }
1999 
2000 static int em_pusha(struct x86_emulate_ctxt *ctxt)
2001 {
2002 	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2003 	int rc = X86EMUL_CONTINUE;
2004 	int reg = VCPU_REGS_RAX;
2005 
2006 	while (reg <= VCPU_REGS_RDI) {
2007 		(reg == VCPU_REGS_RSP) ?
2008 		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2009 
2010 		rc = em_push(ctxt);
2011 		if (rc != X86EMUL_CONTINUE)
2012 			return rc;
2013 
2014 		++reg;
2015 	}
2016 
2017 	return rc;
2018 }
2019 
2020 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2021 {
2022 	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2023 	return em_push(ctxt);
2024 }
2025 
2026 static int em_popa(struct x86_emulate_ctxt *ctxt)
2027 {
2028 	int rc = X86EMUL_CONTINUE;
2029 	int reg = VCPU_REGS_RDI;
2030 	u32 val;
2031 
2032 	while (reg >= VCPU_REGS_RAX) {
2033 		if (reg == VCPU_REGS_RSP) {
2034 			rsp_increment(ctxt, ctxt->op_bytes);
2035 			--reg;
2036 		}
2037 
2038 		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2039 		if (rc != X86EMUL_CONTINUE)
2040 			break;
2041 		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2042 		--reg;
2043 	}
2044 	return rc;
2045 }
2046 
2047 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2048 {
2049 	const struct x86_emulate_ops *ops = ctxt->ops;
2050 	int rc;
2051 	struct desc_ptr dt;
2052 	gva_t cs_addr;
2053 	gva_t eip_addr;
2054 	u16 cs, eip;
2055 
2056 	/* TODO: Add limit checks */
2057 	ctxt->src.val = ctxt->eflags;
2058 	rc = em_push(ctxt);
2059 	if (rc != X86EMUL_CONTINUE)
2060 		return rc;
2061 
2062 	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2063 
2064 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2065 	rc = em_push(ctxt);
2066 	if (rc != X86EMUL_CONTINUE)
2067 		return rc;
2068 
2069 	ctxt->src.val = ctxt->_eip;
2070 	rc = em_push(ctxt);
2071 	if (rc != X86EMUL_CONTINUE)
2072 		return rc;
2073 
2074 	ops->get_idt(ctxt, &dt);
2075 
2076 	eip_addr = dt.address + (irq << 2);
2077 	cs_addr = dt.address + (irq << 2) + 2;
2078 
2079 	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2080 	if (rc != X86EMUL_CONTINUE)
2081 		return rc;
2082 
2083 	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2084 	if (rc != X86EMUL_CONTINUE)
2085 		return rc;
2086 
2087 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2088 	if (rc != X86EMUL_CONTINUE)
2089 		return rc;
2090 
2091 	ctxt->_eip = eip;
2092 
2093 	return rc;
2094 }
2095 
2096 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2097 {
2098 	int rc;
2099 
2100 	invalidate_registers(ctxt);
2101 	rc = __emulate_int_real(ctxt, irq);
2102 	if (rc == X86EMUL_CONTINUE)
2103 		writeback_registers(ctxt);
2104 	return rc;
2105 }
2106 
2107 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2108 {
2109 	switch(ctxt->mode) {
2110 	case X86EMUL_MODE_REAL:
2111 		return __emulate_int_real(ctxt, irq);
2112 	case X86EMUL_MODE_VM86:
2113 	case X86EMUL_MODE_PROT16:
2114 	case X86EMUL_MODE_PROT32:
2115 	case X86EMUL_MODE_PROT64:
2116 	default:
2117 		/* Protected mode interrupts unimplemented yet */
2118 		return X86EMUL_UNHANDLEABLE;
2119 	}
2120 }
2121 
2122 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2123 {
2124 	int rc = X86EMUL_CONTINUE;
2125 	unsigned long temp_eip = 0;
2126 	unsigned long temp_eflags = 0;
2127 	unsigned long cs = 0;
2128 	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2129 			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2130 			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2131 			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2132 			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2133 			     X86_EFLAGS_FIXED;
2134 	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2135 				  X86_EFLAGS_VIP;
2136 
2137 	/* TODO: Add stack limit check */
2138 
2139 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2140 
2141 	if (rc != X86EMUL_CONTINUE)
2142 		return rc;
2143 
2144 	if (temp_eip & ~0xffff)
2145 		return emulate_gp(ctxt, 0);
2146 
2147 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2148 
2149 	if (rc != X86EMUL_CONTINUE)
2150 		return rc;
2151 
2152 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2153 
2154 	if (rc != X86EMUL_CONTINUE)
2155 		return rc;
2156 
2157 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2158 
2159 	if (rc != X86EMUL_CONTINUE)
2160 		return rc;
2161 
2162 	ctxt->_eip = temp_eip;
2163 
2164 	if (ctxt->op_bytes == 4)
2165 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2166 	else if (ctxt->op_bytes == 2) {
2167 		ctxt->eflags &= ~0xffff;
2168 		ctxt->eflags |= temp_eflags;
2169 	}
2170 
2171 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2172 	ctxt->eflags |= X86_EFLAGS_FIXED;
2173 	ctxt->ops->set_nmi_mask(ctxt, false);
2174 
2175 	return rc;
2176 }
2177 
2178 static int em_iret(struct x86_emulate_ctxt *ctxt)
2179 {
2180 	switch(ctxt->mode) {
2181 	case X86EMUL_MODE_REAL:
2182 		return emulate_iret_real(ctxt);
2183 	case X86EMUL_MODE_VM86:
2184 	case X86EMUL_MODE_PROT16:
2185 	case X86EMUL_MODE_PROT32:
2186 	case X86EMUL_MODE_PROT64:
2187 	default:
2188 		/* iret from protected mode unimplemented yet */
2189 		return X86EMUL_UNHANDLEABLE;
2190 	}
2191 }
2192 
2193 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2194 {
2195 	int rc;
2196 	unsigned short sel;
2197 	struct desc_struct new_desc;
2198 	u8 cpl = ctxt->ops->cpl(ctxt);
2199 
2200 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2201 
2202 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2203 				       X86_TRANSFER_CALL_JMP,
2204 				       &new_desc);
2205 	if (rc != X86EMUL_CONTINUE)
2206 		return rc;
2207 
2208 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2209 	/* Error handling is not implemented. */
2210 	if (rc != X86EMUL_CONTINUE)
2211 		return X86EMUL_UNHANDLEABLE;
2212 
2213 	return rc;
2214 }
2215 
2216 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2217 {
2218 	return assign_eip_near(ctxt, ctxt->src.val);
2219 }
2220 
2221 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2222 {
2223 	int rc;
2224 	long int old_eip;
2225 
2226 	old_eip = ctxt->_eip;
2227 	rc = assign_eip_near(ctxt, ctxt->src.val);
2228 	if (rc != X86EMUL_CONTINUE)
2229 		return rc;
2230 	ctxt->src.val = old_eip;
2231 	rc = em_push(ctxt);
2232 	return rc;
2233 }
2234 
2235 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2236 {
2237 	u64 old = ctxt->dst.orig_val64;
2238 
2239 	if (ctxt->dst.bytes == 16)
2240 		return X86EMUL_UNHANDLEABLE;
2241 
2242 	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2243 	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2244 		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2245 		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2246 		ctxt->eflags &= ~X86_EFLAGS_ZF;
2247 	} else {
2248 		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2249 			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2250 
2251 		ctxt->eflags |= X86_EFLAGS_ZF;
2252 	}
2253 	return X86EMUL_CONTINUE;
2254 }
2255 
2256 static int em_ret(struct x86_emulate_ctxt *ctxt)
2257 {
2258 	int rc;
2259 	unsigned long eip;
2260 
2261 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2262 	if (rc != X86EMUL_CONTINUE)
2263 		return rc;
2264 
2265 	return assign_eip_near(ctxt, eip);
2266 }
2267 
2268 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2269 {
2270 	int rc;
2271 	unsigned long eip, cs;
2272 	int cpl = ctxt->ops->cpl(ctxt);
2273 	struct desc_struct new_desc;
2274 
2275 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2276 	if (rc != X86EMUL_CONTINUE)
2277 		return rc;
2278 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2279 	if (rc != X86EMUL_CONTINUE)
2280 		return rc;
2281 	/* Outer-privilege level return is not implemented */
2282 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2283 		return X86EMUL_UNHANDLEABLE;
2284 	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2285 				       X86_TRANSFER_RET,
2286 				       &new_desc);
2287 	if (rc != X86EMUL_CONTINUE)
2288 		return rc;
2289 	rc = assign_eip_far(ctxt, eip, &new_desc);
2290 	/* Error handling is not implemented. */
2291 	if (rc != X86EMUL_CONTINUE)
2292 		return X86EMUL_UNHANDLEABLE;
2293 
2294 	return rc;
2295 }
2296 
2297 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2298 {
2299         int rc;
2300 
2301         rc = em_ret_far(ctxt);
2302         if (rc != X86EMUL_CONTINUE)
2303                 return rc;
2304         rsp_increment(ctxt, ctxt->src.val);
2305         return X86EMUL_CONTINUE;
2306 }
2307 
2308 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2309 {
2310 	/* Save real source value, then compare EAX against destination. */
2311 	ctxt->dst.orig_val = ctxt->dst.val;
2312 	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2313 	ctxt->src.orig_val = ctxt->src.val;
2314 	ctxt->src.val = ctxt->dst.orig_val;
2315 	fastop(ctxt, em_cmp);
2316 
2317 	if (ctxt->eflags & X86_EFLAGS_ZF) {
2318 		/* Success: write back to memory; no update of EAX */
2319 		ctxt->src.type = OP_NONE;
2320 		ctxt->dst.val = ctxt->src.orig_val;
2321 	} else {
2322 		/* Failure: write the value we saw to EAX. */
2323 		ctxt->src.type = OP_REG;
2324 		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2325 		ctxt->src.val = ctxt->dst.orig_val;
2326 		/* Create write-cycle to dest by writing the same value */
2327 		ctxt->dst.val = ctxt->dst.orig_val;
2328 	}
2329 	return X86EMUL_CONTINUE;
2330 }
2331 
2332 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2333 {
2334 	int seg = ctxt->src2.val;
2335 	unsigned short sel;
2336 	int rc;
2337 
2338 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2339 
2340 	rc = load_segment_descriptor(ctxt, sel, seg);
2341 	if (rc != X86EMUL_CONTINUE)
2342 		return rc;
2343 
2344 	ctxt->dst.val = ctxt->src.val;
2345 	return rc;
2346 }
2347 
2348 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2349 {
2350 #ifdef CONFIG_X86_64
2351 	u32 eax, ebx, ecx, edx;
2352 
2353 	eax = 0x80000001;
2354 	ecx = 0;
2355 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2356 	return edx & bit(X86_FEATURE_LM);
2357 #else
2358 	return false;
2359 #endif
2360 }
2361 
2362 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2363 {
2364 	desc->g    = (flags >> 23) & 1;
2365 	desc->d    = (flags >> 22) & 1;
2366 	desc->l    = (flags >> 21) & 1;
2367 	desc->avl  = (flags >> 20) & 1;
2368 	desc->p    = (flags >> 15) & 1;
2369 	desc->dpl  = (flags >> 13) & 3;
2370 	desc->s    = (flags >> 12) & 1;
2371 	desc->type = (flags >>  8) & 15;
2372 }
2373 
2374 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2375 			   int n)
2376 {
2377 	struct desc_struct desc;
2378 	int offset;
2379 	u16 selector;
2380 
2381 	selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2382 
2383 	if (n < 3)
2384 		offset = 0x7f84 + n * 12;
2385 	else
2386 		offset = 0x7f2c + (n - 3) * 12;
2387 
2388 	set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
2389 	set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
2390 	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2391 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2392 	return X86EMUL_CONTINUE;
2393 }
2394 
2395 #ifdef CONFIG_X86_64
2396 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2397 			   int n)
2398 {
2399 	struct desc_struct desc;
2400 	int offset;
2401 	u16 selector;
2402 	u32 base3;
2403 
2404 	offset = 0x7e00 + n * 16;
2405 
2406 	selector =                GET_SMSTATE(u16, smstate, offset);
2407 	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2408 	set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
2409 	set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
2410 	base3 =                   GET_SMSTATE(u32, smstate, offset + 12);
2411 
2412 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2413 	return X86EMUL_CONTINUE;
2414 }
2415 #endif
2416 
2417 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2418 				    u64 cr0, u64 cr3, u64 cr4)
2419 {
2420 	int bad;
2421 	u64 pcid;
2422 
2423 	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
2424 	pcid = 0;
2425 	if (cr4 & X86_CR4_PCIDE) {
2426 		pcid = cr3 & 0xfff;
2427 		cr3 &= ~0xfff;
2428 	}
2429 
2430 	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2431 	if (bad)
2432 		return X86EMUL_UNHANDLEABLE;
2433 
2434 	/*
2435 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2436 	 * Then enable protected mode.	However, PCID cannot be enabled
2437 	 * if EFER.LMA=0, so set it separately.
2438 	 */
2439 	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2440 	if (bad)
2441 		return X86EMUL_UNHANDLEABLE;
2442 
2443 	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2444 	if (bad)
2445 		return X86EMUL_UNHANDLEABLE;
2446 
2447 	if (cr4 & X86_CR4_PCIDE) {
2448 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2449 		if (bad)
2450 			return X86EMUL_UNHANDLEABLE;
2451 		if (pcid) {
2452 			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2453 			if (bad)
2454 				return X86EMUL_UNHANDLEABLE;
2455 		}
2456 
2457 	}
2458 
2459 	return X86EMUL_CONTINUE;
2460 }
2461 
2462 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2463 			     const char *smstate)
2464 {
2465 	struct desc_struct desc;
2466 	struct desc_ptr dt;
2467 	u16 selector;
2468 	u32 val, cr0, cr3, cr4;
2469 	int i;
2470 
2471 	cr0 =                      GET_SMSTATE(u32, smstate, 0x7ffc);
2472 	cr3 =                      GET_SMSTATE(u32, smstate, 0x7ff8);
2473 	ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2474 	ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
2475 
2476 	for (i = 0; i < 8; i++)
2477 		*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2478 
2479 	val = GET_SMSTATE(u32, smstate, 0x7fcc);
2480 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2481 	val = GET_SMSTATE(u32, smstate, 0x7fc8);
2482 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2483 
2484 	selector =                 GET_SMSTATE(u32, smstate, 0x7fc4);
2485 	set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f64));
2486 	set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f60));
2487 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f5c));
2488 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2489 
2490 	selector =                 GET_SMSTATE(u32, smstate, 0x7fc0);
2491 	set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f80));
2492 	set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f7c));
2493 	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f78));
2494 	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2495 
2496 	dt.address =               GET_SMSTATE(u32, smstate, 0x7f74);
2497 	dt.size =                  GET_SMSTATE(u32, smstate, 0x7f70);
2498 	ctxt->ops->set_gdt(ctxt, &dt);
2499 
2500 	dt.address =               GET_SMSTATE(u32, smstate, 0x7f58);
2501 	dt.size =                  GET_SMSTATE(u32, smstate, 0x7f54);
2502 	ctxt->ops->set_idt(ctxt, &dt);
2503 
2504 	for (i = 0; i < 6; i++) {
2505 		int r = rsm_load_seg_32(ctxt, smstate, i);
2506 		if (r != X86EMUL_CONTINUE)
2507 			return r;
2508 	}
2509 
2510 	cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2511 
2512 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2513 
2514 	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2515 }
2516 
2517 #ifdef CONFIG_X86_64
2518 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2519 			     const char *smstate)
2520 {
2521 	struct desc_struct desc;
2522 	struct desc_ptr dt;
2523 	u64 val, cr0, cr3, cr4;
2524 	u32 base3;
2525 	u16 selector;
2526 	int i, r;
2527 
2528 	for (i = 0; i < 16; i++)
2529 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2530 
2531 	ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
2532 	ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2533 
2534 	val = GET_SMSTATE(u32, smstate, 0x7f68);
2535 	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2536 	val = GET_SMSTATE(u32, smstate, 0x7f60);
2537 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2538 
2539 	cr0 =                       GET_SMSTATE(u64, smstate, 0x7f58);
2540 	cr3 =                       GET_SMSTATE(u64, smstate, 0x7f50);
2541 	cr4 =                       GET_SMSTATE(u64, smstate, 0x7f48);
2542 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2543 	val =                       GET_SMSTATE(u64, smstate, 0x7ed0);
2544 	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2545 
2546 	selector =                  GET_SMSTATE(u32, smstate, 0x7e90);
2547 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2548 	set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e94));
2549 	set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e98));
2550 	base3 =                     GET_SMSTATE(u32, smstate, 0x7e9c);
2551 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2552 
2553 	dt.size =                   GET_SMSTATE(u32, smstate, 0x7e84);
2554 	dt.address =                GET_SMSTATE(u64, smstate, 0x7e88);
2555 	ctxt->ops->set_idt(ctxt, &dt);
2556 
2557 	selector =                  GET_SMSTATE(u32, smstate, 0x7e70);
2558 	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2559 	set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e74));
2560 	set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e78));
2561 	base3 =                     GET_SMSTATE(u32, smstate, 0x7e7c);
2562 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2563 
2564 	dt.size =                   GET_SMSTATE(u32, smstate, 0x7e64);
2565 	dt.address =                GET_SMSTATE(u64, smstate, 0x7e68);
2566 	ctxt->ops->set_gdt(ctxt, &dt);
2567 
2568 	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2569 	if (r != X86EMUL_CONTINUE)
2570 		return r;
2571 
2572 	for (i = 0; i < 6; i++) {
2573 		r = rsm_load_seg_64(ctxt, smstate, i);
2574 		if (r != X86EMUL_CONTINUE)
2575 			return r;
2576 	}
2577 
2578 	return X86EMUL_CONTINUE;
2579 }
2580 #endif
2581 
2582 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2583 {
2584 	unsigned long cr0, cr4, efer;
2585 	char buf[512];
2586 	u64 smbase;
2587 	int ret;
2588 
2589 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2590 		return emulate_ud(ctxt);
2591 
2592 	smbase = ctxt->ops->get_smbase(ctxt);
2593 
2594 	ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2595 	if (ret != X86EMUL_CONTINUE)
2596 		return X86EMUL_UNHANDLEABLE;
2597 
2598 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2599 		ctxt->ops->set_nmi_mask(ctxt, false);
2600 
2601 	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2602 		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2603 
2604 	/*
2605 	 * Get back to real mode, to prepare a safe state in which to load
2606 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
2607 	 * supports long mode.
2608 	 */
2609 	if (emulator_has_longmode(ctxt)) {
2610 		struct desc_struct cs_desc;
2611 
2612 		/* Zero CR4.PCIDE before CR0.PG.  */
2613 		cr4 = ctxt->ops->get_cr(ctxt, 4);
2614 		if (cr4 & X86_CR4_PCIDE)
2615 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2616 
2617 		/* A 32-bit code segment is required to clear EFER.LMA.  */
2618 		memset(&cs_desc, 0, sizeof(cs_desc));
2619 		cs_desc.type = 0xb;
2620 		cs_desc.s = cs_desc.g = cs_desc.p = 1;
2621 		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2622 	}
2623 
2624 	/* For the 64-bit case, this will clear EFER.LMA.  */
2625 	cr0 = ctxt->ops->get_cr(ctxt, 0);
2626 	if (cr0 & X86_CR0_PE)
2627 		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2628 
2629 	if (emulator_has_longmode(ctxt)) {
2630 		/* Clear CR4.PAE before clearing EFER.LME. */
2631 		cr4 = ctxt->ops->get_cr(ctxt, 4);
2632 		if (cr4 & X86_CR4_PAE)
2633 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2634 
2635 		/* And finally go back to 32-bit mode.  */
2636 		efer = 0;
2637 		ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2638 	}
2639 
2640 	/*
2641 	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2642 	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2643 	 * state-save area.
2644 	 */
2645 	if (ctxt->ops->pre_leave_smm(ctxt, buf))
2646 		return X86EMUL_UNHANDLEABLE;
2647 
2648 #ifdef CONFIG_X86_64
2649 	if (emulator_has_longmode(ctxt))
2650 		ret = rsm_load_state_64(ctxt, buf);
2651 	else
2652 #endif
2653 		ret = rsm_load_state_32(ctxt, buf);
2654 
2655 	if (ret != X86EMUL_CONTINUE) {
2656 		/* FIXME: should triple fault */
2657 		return X86EMUL_UNHANDLEABLE;
2658 	}
2659 
2660 	ctxt->ops->post_leave_smm(ctxt);
2661 
2662 	return X86EMUL_CONTINUE;
2663 }
2664 
2665 static void
2666 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2667 			struct desc_struct *cs, struct desc_struct *ss)
2668 {
2669 	cs->l = 0;		/* will be adjusted later */
2670 	set_desc_base(cs, 0);	/* flat segment */
2671 	cs->g = 1;		/* 4kb granularity */
2672 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2673 	cs->type = 0x0b;	/* Read, Execute, Accessed */
2674 	cs->s = 1;
2675 	cs->dpl = 0;		/* will be adjusted later */
2676 	cs->p = 1;
2677 	cs->d = 1;
2678 	cs->avl = 0;
2679 
2680 	set_desc_base(ss, 0);	/* flat segment */
2681 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2682 	ss->g = 1;		/* 4kb granularity */
2683 	ss->s = 1;
2684 	ss->type = 0x03;	/* Read/Write, Accessed */
2685 	ss->d = 1;		/* 32bit stack segment */
2686 	ss->dpl = 0;
2687 	ss->p = 1;
2688 	ss->l = 0;
2689 	ss->avl = 0;
2690 }
2691 
2692 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2693 {
2694 	u32 eax, ebx, ecx, edx;
2695 
2696 	eax = ecx = 0;
2697 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2698 	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2699 		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2700 		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2701 }
2702 
2703 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2704 {
2705 	const struct x86_emulate_ops *ops = ctxt->ops;
2706 	u32 eax, ebx, ecx, edx;
2707 
2708 	/*
2709 	 * syscall should always be enabled in longmode - so only become
2710 	 * vendor specific (cpuid) if other modes are active...
2711 	 */
2712 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2713 		return true;
2714 
2715 	eax = 0x00000000;
2716 	ecx = 0x00000000;
2717 	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2718 	/*
2719 	 * Intel ("GenuineIntel")
2720 	 * remark: Intel CPUs only support "syscall" in 64bit
2721 	 * longmode. Also an 64bit guest with a
2722 	 * 32bit compat-app running will #UD !! While this
2723 	 * behaviour can be fixed (by emulating) into AMD
2724 	 * response - CPUs of AMD can't behave like Intel.
2725 	 */
2726 	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2727 	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2728 	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2729 		return false;
2730 
2731 	/* AMD ("AuthenticAMD") */
2732 	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2733 	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2734 	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2735 		return true;
2736 
2737 	/* AMD ("AMDisbetter!") */
2738 	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2739 	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2740 	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2741 		return true;
2742 
2743 	/* Hygon ("HygonGenuine") */
2744 	if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
2745 	    ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
2746 	    edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
2747 		return true;
2748 
2749 	/*
2750 	 * default: (not Intel, not AMD, not Hygon), apply Intel's
2751 	 * stricter rules...
2752 	 */
2753 	return false;
2754 }
2755 
2756 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2757 {
2758 	const struct x86_emulate_ops *ops = ctxt->ops;
2759 	struct desc_struct cs, ss;
2760 	u64 msr_data;
2761 	u16 cs_sel, ss_sel;
2762 	u64 efer = 0;
2763 
2764 	/* syscall is not available in real mode */
2765 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2766 	    ctxt->mode == X86EMUL_MODE_VM86)
2767 		return emulate_ud(ctxt);
2768 
2769 	if (!(em_syscall_is_enabled(ctxt)))
2770 		return emulate_ud(ctxt);
2771 
2772 	ops->get_msr(ctxt, MSR_EFER, &efer);
2773 	if (!(efer & EFER_SCE))
2774 		return emulate_ud(ctxt);
2775 
2776 	setup_syscalls_segments(ctxt, &cs, &ss);
2777 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2778 	msr_data >>= 32;
2779 	cs_sel = (u16)(msr_data & 0xfffc);
2780 	ss_sel = (u16)(msr_data + 8);
2781 
2782 	if (efer & EFER_LMA) {
2783 		cs.d = 0;
2784 		cs.l = 1;
2785 	}
2786 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2787 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2788 
2789 	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2790 	if (efer & EFER_LMA) {
2791 #ifdef CONFIG_X86_64
2792 		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2793 
2794 		ops->get_msr(ctxt,
2795 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2796 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2797 		ctxt->_eip = msr_data;
2798 
2799 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2800 		ctxt->eflags &= ~msr_data;
2801 		ctxt->eflags |= X86_EFLAGS_FIXED;
2802 #endif
2803 	} else {
2804 		/* legacy mode */
2805 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2806 		ctxt->_eip = (u32)msr_data;
2807 
2808 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2809 	}
2810 
2811 	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2812 	return X86EMUL_CONTINUE;
2813 }
2814 
2815 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2816 {
2817 	const struct x86_emulate_ops *ops = ctxt->ops;
2818 	struct desc_struct cs, ss;
2819 	u64 msr_data;
2820 	u16 cs_sel, ss_sel;
2821 	u64 efer = 0;
2822 
2823 	ops->get_msr(ctxt, MSR_EFER, &efer);
2824 	/* inject #GP if in real mode */
2825 	if (ctxt->mode == X86EMUL_MODE_REAL)
2826 		return emulate_gp(ctxt, 0);
2827 
2828 	/*
2829 	 * Not recognized on AMD in compat mode (but is recognized in legacy
2830 	 * mode).
2831 	 */
2832 	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2833 	    && !vendor_intel(ctxt))
2834 		return emulate_ud(ctxt);
2835 
2836 	/* sysenter/sysexit have not been tested in 64bit mode. */
2837 	if (ctxt->mode == X86EMUL_MODE_PROT64)
2838 		return X86EMUL_UNHANDLEABLE;
2839 
2840 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2841 	if ((msr_data & 0xfffc) == 0x0)
2842 		return emulate_gp(ctxt, 0);
2843 
2844 	setup_syscalls_segments(ctxt, &cs, &ss);
2845 	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2846 	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2847 	ss_sel = cs_sel + 8;
2848 	if (efer & EFER_LMA) {
2849 		cs.d = 0;
2850 		cs.l = 1;
2851 	}
2852 
2853 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2854 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2855 
2856 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2857 	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2858 
2859 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2860 	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2861 							      (u32)msr_data;
2862 
2863 	return X86EMUL_CONTINUE;
2864 }
2865 
2866 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2867 {
2868 	const struct x86_emulate_ops *ops = ctxt->ops;
2869 	struct desc_struct cs, ss;
2870 	u64 msr_data, rcx, rdx;
2871 	int usermode;
2872 	u16 cs_sel = 0, ss_sel = 0;
2873 
2874 	/* inject #GP if in real mode or Virtual 8086 mode */
2875 	if (ctxt->mode == X86EMUL_MODE_REAL ||
2876 	    ctxt->mode == X86EMUL_MODE_VM86)
2877 		return emulate_gp(ctxt, 0);
2878 
2879 	setup_syscalls_segments(ctxt, &cs, &ss);
2880 
2881 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2882 		usermode = X86EMUL_MODE_PROT64;
2883 	else
2884 		usermode = X86EMUL_MODE_PROT32;
2885 
2886 	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2887 	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2888 
2889 	cs.dpl = 3;
2890 	ss.dpl = 3;
2891 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2892 	switch (usermode) {
2893 	case X86EMUL_MODE_PROT32:
2894 		cs_sel = (u16)(msr_data + 16);
2895 		if ((msr_data & 0xfffc) == 0x0)
2896 			return emulate_gp(ctxt, 0);
2897 		ss_sel = (u16)(msr_data + 24);
2898 		rcx = (u32)rcx;
2899 		rdx = (u32)rdx;
2900 		break;
2901 	case X86EMUL_MODE_PROT64:
2902 		cs_sel = (u16)(msr_data + 32);
2903 		if (msr_data == 0x0)
2904 			return emulate_gp(ctxt, 0);
2905 		ss_sel = cs_sel + 8;
2906 		cs.d = 0;
2907 		cs.l = 1;
2908 		if (emul_is_noncanonical_address(rcx, ctxt) ||
2909 		    emul_is_noncanonical_address(rdx, ctxt))
2910 			return emulate_gp(ctxt, 0);
2911 		break;
2912 	}
2913 	cs_sel |= SEGMENT_RPL_MASK;
2914 	ss_sel |= SEGMENT_RPL_MASK;
2915 
2916 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2917 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2918 
2919 	ctxt->_eip = rdx;
2920 	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2921 
2922 	return X86EMUL_CONTINUE;
2923 }
2924 
2925 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2926 {
2927 	int iopl;
2928 	if (ctxt->mode == X86EMUL_MODE_REAL)
2929 		return false;
2930 	if (ctxt->mode == X86EMUL_MODE_VM86)
2931 		return true;
2932 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2933 	return ctxt->ops->cpl(ctxt) > iopl;
2934 }
2935 
2936 #define VMWARE_PORT_VMPORT	(0x5658)
2937 #define VMWARE_PORT_VMRPC	(0x5659)
2938 
2939 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2940 					    u16 port, u16 len)
2941 {
2942 	const struct x86_emulate_ops *ops = ctxt->ops;
2943 	struct desc_struct tr_seg;
2944 	u32 base3;
2945 	int r;
2946 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2947 	unsigned mask = (1 << len) - 1;
2948 	unsigned long base;
2949 
2950 	/*
2951 	 * VMware allows access to these ports even if denied
2952 	 * by TSS I/O permission bitmap. Mimic behavior.
2953 	 */
2954 	if (enable_vmware_backdoor &&
2955 	    ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2956 		return true;
2957 
2958 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2959 	if (!tr_seg.p)
2960 		return false;
2961 	if (desc_limit_scaled(&tr_seg) < 103)
2962 		return false;
2963 	base = get_desc_base(&tr_seg);
2964 #ifdef CONFIG_X86_64
2965 	base |= ((u64)base3) << 32;
2966 #endif
2967 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2968 	if (r != X86EMUL_CONTINUE)
2969 		return false;
2970 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2971 		return false;
2972 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2973 	if (r != X86EMUL_CONTINUE)
2974 		return false;
2975 	if ((perm >> bit_idx) & mask)
2976 		return false;
2977 	return true;
2978 }
2979 
2980 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2981 				 u16 port, u16 len)
2982 {
2983 	if (ctxt->perm_ok)
2984 		return true;
2985 
2986 	if (emulator_bad_iopl(ctxt))
2987 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2988 			return false;
2989 
2990 	ctxt->perm_ok = true;
2991 
2992 	return true;
2993 }
2994 
2995 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2996 {
2997 	/*
2998 	 * Intel CPUs mask the counter and pointers in quite strange
2999 	 * manner when ECX is zero due to REP-string optimizations.
3000 	 */
3001 #ifdef CONFIG_X86_64
3002 	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3003 		return;
3004 
3005 	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
3006 
3007 	switch (ctxt->b) {
3008 	case 0xa4:	/* movsb */
3009 	case 0xa5:	/* movsd/w */
3010 		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3011 		/* fall through */
3012 	case 0xaa:	/* stosb */
3013 	case 0xab:	/* stosd/w */
3014 		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3015 	}
3016 #endif
3017 }
3018 
3019 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3020 				struct tss_segment_16 *tss)
3021 {
3022 	tss->ip = ctxt->_eip;
3023 	tss->flag = ctxt->eflags;
3024 	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3025 	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3026 	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3027 	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3028 	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3029 	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3030 	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3031 	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3032 
3033 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3034 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3035 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3036 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3037 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3038 }
3039 
3040 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3041 				 struct tss_segment_16 *tss)
3042 {
3043 	int ret;
3044 	u8 cpl;
3045 
3046 	ctxt->_eip = tss->ip;
3047 	ctxt->eflags = tss->flag | 2;
3048 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3049 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3050 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3051 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3052 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3053 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3054 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3055 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3056 
3057 	/*
3058 	 * SDM says that segment selectors are loaded before segment
3059 	 * descriptors
3060 	 */
3061 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3062 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3063 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3064 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3065 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3066 
3067 	cpl = tss->cs & 3;
3068 
3069 	/*
3070 	 * Now load segment descriptors. If fault happens at this stage
3071 	 * it is handled in a context of new task
3072 	 */
3073 	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3074 					X86_TRANSFER_TASK_SWITCH, NULL);
3075 	if (ret != X86EMUL_CONTINUE)
3076 		return ret;
3077 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3078 					X86_TRANSFER_TASK_SWITCH, NULL);
3079 	if (ret != X86EMUL_CONTINUE)
3080 		return ret;
3081 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3082 					X86_TRANSFER_TASK_SWITCH, NULL);
3083 	if (ret != X86EMUL_CONTINUE)
3084 		return ret;
3085 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3086 					X86_TRANSFER_TASK_SWITCH, NULL);
3087 	if (ret != X86EMUL_CONTINUE)
3088 		return ret;
3089 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3090 					X86_TRANSFER_TASK_SWITCH, NULL);
3091 	if (ret != X86EMUL_CONTINUE)
3092 		return ret;
3093 
3094 	return X86EMUL_CONTINUE;
3095 }
3096 
3097 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3098 			  u16 tss_selector, u16 old_tss_sel,
3099 			  ulong old_tss_base, struct desc_struct *new_desc)
3100 {
3101 	struct tss_segment_16 tss_seg;
3102 	int ret;
3103 	u32 new_tss_base = get_desc_base(new_desc);
3104 
3105 	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3106 	if (ret != X86EMUL_CONTINUE)
3107 		return ret;
3108 
3109 	save_state_to_tss16(ctxt, &tss_seg);
3110 
3111 	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3112 	if (ret != X86EMUL_CONTINUE)
3113 		return ret;
3114 
3115 	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3116 	if (ret != X86EMUL_CONTINUE)
3117 		return ret;
3118 
3119 	if (old_tss_sel != 0xffff) {
3120 		tss_seg.prev_task_link = old_tss_sel;
3121 
3122 		ret = linear_write_system(ctxt, new_tss_base,
3123 					  &tss_seg.prev_task_link,
3124 					  sizeof(tss_seg.prev_task_link));
3125 		if (ret != X86EMUL_CONTINUE)
3126 			return ret;
3127 	}
3128 
3129 	return load_state_from_tss16(ctxt, &tss_seg);
3130 }
3131 
3132 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3133 				struct tss_segment_32 *tss)
3134 {
3135 	/* CR3 and ldt selector are not saved intentionally */
3136 	tss->eip = ctxt->_eip;
3137 	tss->eflags = ctxt->eflags;
3138 	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3139 	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3140 	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3141 	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3142 	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3143 	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3144 	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3145 	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3146 
3147 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3148 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3149 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3150 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3151 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3152 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3153 }
3154 
3155 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3156 				 struct tss_segment_32 *tss)
3157 {
3158 	int ret;
3159 	u8 cpl;
3160 
3161 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3162 		return emulate_gp(ctxt, 0);
3163 	ctxt->_eip = tss->eip;
3164 	ctxt->eflags = tss->eflags | 2;
3165 
3166 	/* General purpose registers */
3167 	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3168 	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3169 	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3170 	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3171 	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3172 	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3173 	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3174 	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3175 
3176 	/*
3177 	 * SDM says that segment selectors are loaded before segment
3178 	 * descriptors.  This is important because CPL checks will
3179 	 * use CS.RPL.
3180 	 */
3181 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3182 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3183 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3184 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3185 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3186 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3187 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3188 
3189 	/*
3190 	 * If we're switching between Protected Mode and VM86, we need to make
3191 	 * sure to update the mode before loading the segment descriptors so
3192 	 * that the selectors are interpreted correctly.
3193 	 */
3194 	if (ctxt->eflags & X86_EFLAGS_VM) {
3195 		ctxt->mode = X86EMUL_MODE_VM86;
3196 		cpl = 3;
3197 	} else {
3198 		ctxt->mode = X86EMUL_MODE_PROT32;
3199 		cpl = tss->cs & 3;
3200 	}
3201 
3202 	/*
3203 	 * Now load segment descriptors. If fault happenes at this stage
3204 	 * it is handled in a context of new task
3205 	 */
3206 	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3207 					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3208 	if (ret != X86EMUL_CONTINUE)
3209 		return ret;
3210 	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3211 					X86_TRANSFER_TASK_SWITCH, NULL);
3212 	if (ret != X86EMUL_CONTINUE)
3213 		return ret;
3214 	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3215 					X86_TRANSFER_TASK_SWITCH, NULL);
3216 	if (ret != X86EMUL_CONTINUE)
3217 		return ret;
3218 	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3219 					X86_TRANSFER_TASK_SWITCH, NULL);
3220 	if (ret != X86EMUL_CONTINUE)
3221 		return ret;
3222 	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3223 					X86_TRANSFER_TASK_SWITCH, NULL);
3224 	if (ret != X86EMUL_CONTINUE)
3225 		return ret;
3226 	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3227 					X86_TRANSFER_TASK_SWITCH, NULL);
3228 	if (ret != X86EMUL_CONTINUE)
3229 		return ret;
3230 	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3231 					X86_TRANSFER_TASK_SWITCH, NULL);
3232 
3233 	return ret;
3234 }
3235 
3236 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3237 			  u16 tss_selector, u16 old_tss_sel,
3238 			  ulong old_tss_base, struct desc_struct *new_desc)
3239 {
3240 	struct tss_segment_32 tss_seg;
3241 	int ret;
3242 	u32 new_tss_base = get_desc_base(new_desc);
3243 	u32 eip_offset = offsetof(struct tss_segment_32, eip);
3244 	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3245 
3246 	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3247 	if (ret != X86EMUL_CONTINUE)
3248 		return ret;
3249 
3250 	save_state_to_tss32(ctxt, &tss_seg);
3251 
3252 	/* Only GP registers and segment selectors are saved */
3253 	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3254 				  ldt_sel_offset - eip_offset);
3255 	if (ret != X86EMUL_CONTINUE)
3256 		return ret;
3257 
3258 	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3259 	if (ret != X86EMUL_CONTINUE)
3260 		return ret;
3261 
3262 	if (old_tss_sel != 0xffff) {
3263 		tss_seg.prev_task_link = old_tss_sel;
3264 
3265 		ret = linear_write_system(ctxt, new_tss_base,
3266 					  &tss_seg.prev_task_link,
3267 					  sizeof(tss_seg.prev_task_link));
3268 		if (ret != X86EMUL_CONTINUE)
3269 			return ret;
3270 	}
3271 
3272 	return load_state_from_tss32(ctxt, &tss_seg);
3273 }
3274 
3275 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3276 				   u16 tss_selector, int idt_index, int reason,
3277 				   bool has_error_code, u32 error_code)
3278 {
3279 	const struct x86_emulate_ops *ops = ctxt->ops;
3280 	struct desc_struct curr_tss_desc, next_tss_desc;
3281 	int ret;
3282 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3283 	ulong old_tss_base =
3284 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3285 	u32 desc_limit;
3286 	ulong desc_addr, dr7;
3287 
3288 	/* FIXME: old_tss_base == ~0 ? */
3289 
3290 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3291 	if (ret != X86EMUL_CONTINUE)
3292 		return ret;
3293 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3294 	if (ret != X86EMUL_CONTINUE)
3295 		return ret;
3296 
3297 	/* FIXME: check that next_tss_desc is tss */
3298 
3299 	/*
3300 	 * Check privileges. The three cases are task switch caused by...
3301 	 *
3302 	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3303 	 * 2. Exception/IRQ/iret: No check is performed
3304 	 * 3. jmp/call to TSS/task-gate: No check is performed since the
3305 	 *    hardware checks it before exiting.
3306 	 */
3307 	if (reason == TASK_SWITCH_GATE) {
3308 		if (idt_index != -1) {
3309 			/* Software interrupts */
3310 			struct desc_struct task_gate_desc;
3311 			int dpl;
3312 
3313 			ret = read_interrupt_descriptor(ctxt, idt_index,
3314 							&task_gate_desc);
3315 			if (ret != X86EMUL_CONTINUE)
3316 				return ret;
3317 
3318 			dpl = task_gate_desc.dpl;
3319 			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3320 				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3321 		}
3322 	}
3323 
3324 	desc_limit = desc_limit_scaled(&next_tss_desc);
3325 	if (!next_tss_desc.p ||
3326 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3327 	     desc_limit < 0x2b)) {
3328 		return emulate_ts(ctxt, tss_selector & 0xfffc);
3329 	}
3330 
3331 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3332 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3333 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3334 	}
3335 
3336 	if (reason == TASK_SWITCH_IRET)
3337 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3338 
3339 	/* set back link to prev task only if NT bit is set in eflags
3340 	   note that old_tss_sel is not used after this point */
3341 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3342 		old_tss_sel = 0xffff;
3343 
3344 	if (next_tss_desc.type & 8)
3345 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3346 				     old_tss_base, &next_tss_desc);
3347 	else
3348 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3349 				     old_tss_base, &next_tss_desc);
3350 	if (ret != X86EMUL_CONTINUE)
3351 		return ret;
3352 
3353 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3354 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3355 
3356 	if (reason != TASK_SWITCH_IRET) {
3357 		next_tss_desc.type |= (1 << 1); /* set busy flag */
3358 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3359 	}
3360 
3361 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3362 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3363 
3364 	if (has_error_code) {
3365 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3366 		ctxt->lock_prefix = 0;
3367 		ctxt->src.val = (unsigned long) error_code;
3368 		ret = em_push(ctxt);
3369 	}
3370 
3371 	ops->get_dr(ctxt, 7, &dr7);
3372 	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3373 
3374 	return ret;
3375 }
3376 
3377 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3378 			 u16 tss_selector, int idt_index, int reason,
3379 			 bool has_error_code, u32 error_code)
3380 {
3381 	int rc;
3382 
3383 	invalidate_registers(ctxt);
3384 	ctxt->_eip = ctxt->eip;
3385 	ctxt->dst.type = OP_NONE;
3386 
3387 	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3388 				     has_error_code, error_code);
3389 
3390 	if (rc == X86EMUL_CONTINUE) {
3391 		ctxt->eip = ctxt->_eip;
3392 		writeback_registers(ctxt);
3393 	}
3394 
3395 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3396 }
3397 
3398 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3399 		struct operand *op)
3400 {
3401 	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3402 
3403 	register_address_increment(ctxt, reg, df * op->bytes);
3404 	op->addr.mem.ea = register_address(ctxt, reg);
3405 }
3406 
3407 static int em_das(struct x86_emulate_ctxt *ctxt)
3408 {
3409 	u8 al, old_al;
3410 	bool af, cf, old_cf;
3411 
3412 	cf = ctxt->eflags & X86_EFLAGS_CF;
3413 	al = ctxt->dst.val;
3414 
3415 	old_al = al;
3416 	old_cf = cf;
3417 	cf = false;
3418 	af = ctxt->eflags & X86_EFLAGS_AF;
3419 	if ((al & 0x0f) > 9 || af) {
3420 		al -= 6;
3421 		cf = old_cf | (al >= 250);
3422 		af = true;
3423 	} else {
3424 		af = false;
3425 	}
3426 	if (old_al > 0x99 || old_cf) {
3427 		al -= 0x60;
3428 		cf = true;
3429 	}
3430 
3431 	ctxt->dst.val = al;
3432 	/* Set PF, ZF, SF */
3433 	ctxt->src.type = OP_IMM;
3434 	ctxt->src.val = 0;
3435 	ctxt->src.bytes = 1;
3436 	fastop(ctxt, em_or);
3437 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3438 	if (cf)
3439 		ctxt->eflags |= X86_EFLAGS_CF;
3440 	if (af)
3441 		ctxt->eflags |= X86_EFLAGS_AF;
3442 	return X86EMUL_CONTINUE;
3443 }
3444 
3445 static int em_aam(struct x86_emulate_ctxt *ctxt)
3446 {
3447 	u8 al, ah;
3448 
3449 	if (ctxt->src.val == 0)
3450 		return emulate_de(ctxt);
3451 
3452 	al = ctxt->dst.val & 0xff;
3453 	ah = al / ctxt->src.val;
3454 	al %= ctxt->src.val;
3455 
3456 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3457 
3458 	/* Set PF, ZF, SF */
3459 	ctxt->src.type = OP_IMM;
3460 	ctxt->src.val = 0;
3461 	ctxt->src.bytes = 1;
3462 	fastop(ctxt, em_or);
3463 
3464 	return X86EMUL_CONTINUE;
3465 }
3466 
3467 static int em_aad(struct x86_emulate_ctxt *ctxt)
3468 {
3469 	u8 al = ctxt->dst.val & 0xff;
3470 	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3471 
3472 	al = (al + (ah * ctxt->src.val)) & 0xff;
3473 
3474 	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3475 
3476 	/* Set PF, ZF, SF */
3477 	ctxt->src.type = OP_IMM;
3478 	ctxt->src.val = 0;
3479 	ctxt->src.bytes = 1;
3480 	fastop(ctxt, em_or);
3481 
3482 	return X86EMUL_CONTINUE;
3483 }
3484 
3485 static int em_call(struct x86_emulate_ctxt *ctxt)
3486 {
3487 	int rc;
3488 	long rel = ctxt->src.val;
3489 
3490 	ctxt->src.val = (unsigned long)ctxt->_eip;
3491 	rc = jmp_rel(ctxt, rel);
3492 	if (rc != X86EMUL_CONTINUE)
3493 		return rc;
3494 	return em_push(ctxt);
3495 }
3496 
3497 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3498 {
3499 	u16 sel, old_cs;
3500 	ulong old_eip;
3501 	int rc;
3502 	struct desc_struct old_desc, new_desc;
3503 	const struct x86_emulate_ops *ops = ctxt->ops;
3504 	int cpl = ctxt->ops->cpl(ctxt);
3505 	enum x86emul_mode prev_mode = ctxt->mode;
3506 
3507 	old_eip = ctxt->_eip;
3508 	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3509 
3510 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3511 	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3512 				       X86_TRANSFER_CALL_JMP, &new_desc);
3513 	if (rc != X86EMUL_CONTINUE)
3514 		return rc;
3515 
3516 	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3517 	if (rc != X86EMUL_CONTINUE)
3518 		goto fail;
3519 
3520 	ctxt->src.val = old_cs;
3521 	rc = em_push(ctxt);
3522 	if (rc != X86EMUL_CONTINUE)
3523 		goto fail;
3524 
3525 	ctxt->src.val = old_eip;
3526 	rc = em_push(ctxt);
3527 	/* If we failed, we tainted the memory, but the very least we should
3528 	   restore cs */
3529 	if (rc != X86EMUL_CONTINUE) {
3530 		pr_warn_once("faulting far call emulation tainted memory\n");
3531 		goto fail;
3532 	}
3533 	return rc;
3534 fail:
3535 	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3536 	ctxt->mode = prev_mode;
3537 	return rc;
3538 
3539 }
3540 
3541 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3542 {
3543 	int rc;
3544 	unsigned long eip;
3545 
3546 	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3547 	if (rc != X86EMUL_CONTINUE)
3548 		return rc;
3549 	rc = assign_eip_near(ctxt, eip);
3550 	if (rc != X86EMUL_CONTINUE)
3551 		return rc;
3552 	rsp_increment(ctxt, ctxt->src.val);
3553 	return X86EMUL_CONTINUE;
3554 }
3555 
3556 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3557 {
3558 	/* Write back the register source. */
3559 	ctxt->src.val = ctxt->dst.val;
3560 	write_register_operand(&ctxt->src);
3561 
3562 	/* Write back the memory destination with implicit LOCK prefix. */
3563 	ctxt->dst.val = ctxt->src.orig_val;
3564 	ctxt->lock_prefix = 1;
3565 	return X86EMUL_CONTINUE;
3566 }
3567 
3568 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3569 {
3570 	ctxt->dst.val = ctxt->src2.val;
3571 	return fastop(ctxt, em_imul);
3572 }
3573 
3574 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3575 {
3576 	ctxt->dst.type = OP_REG;
3577 	ctxt->dst.bytes = ctxt->src.bytes;
3578 	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3579 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3580 
3581 	return X86EMUL_CONTINUE;
3582 }
3583 
3584 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3585 {
3586 	u64 tsc_aux = 0;
3587 
3588 	if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3589 		return emulate_gp(ctxt, 0);
3590 	ctxt->dst.val = tsc_aux;
3591 	return X86EMUL_CONTINUE;
3592 }
3593 
3594 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3595 {
3596 	u64 tsc = 0;
3597 
3598 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3599 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3600 	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3601 	return X86EMUL_CONTINUE;
3602 }
3603 
3604 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3605 {
3606 	u64 pmc;
3607 
3608 	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3609 		return emulate_gp(ctxt, 0);
3610 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3611 	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3612 	return X86EMUL_CONTINUE;
3613 }
3614 
3615 static int em_mov(struct x86_emulate_ctxt *ctxt)
3616 {
3617 	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3618 	return X86EMUL_CONTINUE;
3619 }
3620 
3621 #define FFL(x) bit(X86_FEATURE_##x)
3622 
3623 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3624 {
3625 	u32 ebx, ecx, edx, eax = 1;
3626 	u16 tmp;
3627 
3628 	/*
3629 	 * Check MOVBE is set in the guest-visible CPUID leaf.
3630 	 */
3631 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3632 	if (!(ecx & FFL(MOVBE)))
3633 		return emulate_ud(ctxt);
3634 
3635 	switch (ctxt->op_bytes) {
3636 	case 2:
3637 		/*
3638 		 * From MOVBE definition: "...When the operand size is 16 bits,
3639 		 * the upper word of the destination register remains unchanged
3640 		 * ..."
3641 		 *
3642 		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3643 		 * rules so we have to do the operation almost per hand.
3644 		 */
3645 		tmp = (u16)ctxt->src.val;
3646 		ctxt->dst.val &= ~0xffffUL;
3647 		ctxt->dst.val |= (unsigned long)swab16(tmp);
3648 		break;
3649 	case 4:
3650 		ctxt->dst.val = swab32((u32)ctxt->src.val);
3651 		break;
3652 	case 8:
3653 		ctxt->dst.val = swab64(ctxt->src.val);
3654 		break;
3655 	default:
3656 		BUG();
3657 	}
3658 	return X86EMUL_CONTINUE;
3659 }
3660 
3661 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3662 {
3663 	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3664 		return emulate_gp(ctxt, 0);
3665 
3666 	/* Disable writeback. */
3667 	ctxt->dst.type = OP_NONE;
3668 	return X86EMUL_CONTINUE;
3669 }
3670 
3671 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3672 {
3673 	unsigned long val;
3674 
3675 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3676 		val = ctxt->src.val & ~0ULL;
3677 	else
3678 		val = ctxt->src.val & ~0U;
3679 
3680 	/* #UD condition is already handled. */
3681 	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3682 		return emulate_gp(ctxt, 0);
3683 
3684 	/* Disable writeback. */
3685 	ctxt->dst.type = OP_NONE;
3686 	return X86EMUL_CONTINUE;
3687 }
3688 
3689 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3690 {
3691 	u64 msr_data;
3692 
3693 	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3694 		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3695 	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3696 		return emulate_gp(ctxt, 0);
3697 
3698 	return X86EMUL_CONTINUE;
3699 }
3700 
3701 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3702 {
3703 	u64 msr_data;
3704 
3705 	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3706 		return emulate_gp(ctxt, 0);
3707 
3708 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3709 	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3710 	return X86EMUL_CONTINUE;
3711 }
3712 
3713 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3714 {
3715 	if (segment > VCPU_SREG_GS &&
3716 	    (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3717 	    ctxt->ops->cpl(ctxt) > 0)
3718 		return emulate_gp(ctxt, 0);
3719 
3720 	ctxt->dst.val = get_segment_selector(ctxt, segment);
3721 	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3722 		ctxt->dst.bytes = 2;
3723 	return X86EMUL_CONTINUE;
3724 }
3725 
3726 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3727 {
3728 	if (ctxt->modrm_reg > VCPU_SREG_GS)
3729 		return emulate_ud(ctxt);
3730 
3731 	return em_store_sreg(ctxt, ctxt->modrm_reg);
3732 }
3733 
3734 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3735 {
3736 	u16 sel = ctxt->src.val;
3737 
3738 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3739 		return emulate_ud(ctxt);
3740 
3741 	if (ctxt->modrm_reg == VCPU_SREG_SS)
3742 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3743 
3744 	/* Disable writeback. */
3745 	ctxt->dst.type = OP_NONE;
3746 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3747 }
3748 
3749 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3750 {
3751 	return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3752 }
3753 
3754 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3755 {
3756 	u16 sel = ctxt->src.val;
3757 
3758 	/* Disable writeback. */
3759 	ctxt->dst.type = OP_NONE;
3760 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3761 }
3762 
3763 static int em_str(struct x86_emulate_ctxt *ctxt)
3764 {
3765 	return em_store_sreg(ctxt, VCPU_SREG_TR);
3766 }
3767 
3768 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3769 {
3770 	u16 sel = ctxt->src.val;
3771 
3772 	/* Disable writeback. */
3773 	ctxt->dst.type = OP_NONE;
3774 	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3775 }
3776 
3777 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3778 {
3779 	int rc;
3780 	ulong linear;
3781 
3782 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3783 	if (rc == X86EMUL_CONTINUE)
3784 		ctxt->ops->invlpg(ctxt, linear);
3785 	/* Disable writeback. */
3786 	ctxt->dst.type = OP_NONE;
3787 	return X86EMUL_CONTINUE;
3788 }
3789 
3790 static int em_clts(struct x86_emulate_ctxt *ctxt)
3791 {
3792 	ulong cr0;
3793 
3794 	cr0 = ctxt->ops->get_cr(ctxt, 0);
3795 	cr0 &= ~X86_CR0_TS;
3796 	ctxt->ops->set_cr(ctxt, 0, cr0);
3797 	return X86EMUL_CONTINUE;
3798 }
3799 
3800 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3801 {
3802 	int rc = ctxt->ops->fix_hypercall(ctxt);
3803 
3804 	if (rc != X86EMUL_CONTINUE)
3805 		return rc;
3806 
3807 	/* Let the processor re-execute the fixed hypercall */
3808 	ctxt->_eip = ctxt->eip;
3809 	/* Disable writeback. */
3810 	ctxt->dst.type = OP_NONE;
3811 	return X86EMUL_CONTINUE;
3812 }
3813 
3814 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3815 				  void (*get)(struct x86_emulate_ctxt *ctxt,
3816 					      struct desc_ptr *ptr))
3817 {
3818 	struct desc_ptr desc_ptr;
3819 
3820 	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3821 	    ctxt->ops->cpl(ctxt) > 0)
3822 		return emulate_gp(ctxt, 0);
3823 
3824 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3825 		ctxt->op_bytes = 8;
3826 	get(ctxt, &desc_ptr);
3827 	if (ctxt->op_bytes == 2) {
3828 		ctxt->op_bytes = 4;
3829 		desc_ptr.address &= 0x00ffffff;
3830 	}
3831 	/* Disable writeback. */
3832 	ctxt->dst.type = OP_NONE;
3833 	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3834 				   &desc_ptr, 2 + ctxt->op_bytes);
3835 }
3836 
3837 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3838 {
3839 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3840 }
3841 
3842 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3843 {
3844 	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3845 }
3846 
3847 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3848 {
3849 	struct desc_ptr desc_ptr;
3850 	int rc;
3851 
3852 	if (ctxt->mode == X86EMUL_MODE_PROT64)
3853 		ctxt->op_bytes = 8;
3854 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3855 			     &desc_ptr.size, &desc_ptr.address,
3856 			     ctxt->op_bytes);
3857 	if (rc != X86EMUL_CONTINUE)
3858 		return rc;
3859 	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3860 	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3861 		return emulate_gp(ctxt, 0);
3862 	if (lgdt)
3863 		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3864 	else
3865 		ctxt->ops->set_idt(ctxt, &desc_ptr);
3866 	/* Disable writeback. */
3867 	ctxt->dst.type = OP_NONE;
3868 	return X86EMUL_CONTINUE;
3869 }
3870 
3871 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3872 {
3873 	return em_lgdt_lidt(ctxt, true);
3874 }
3875 
3876 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3877 {
3878 	return em_lgdt_lidt(ctxt, false);
3879 }
3880 
3881 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3882 {
3883 	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3884 	    ctxt->ops->cpl(ctxt) > 0)
3885 		return emulate_gp(ctxt, 0);
3886 
3887 	if (ctxt->dst.type == OP_MEM)
3888 		ctxt->dst.bytes = 2;
3889 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3890 	return X86EMUL_CONTINUE;
3891 }
3892 
3893 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3894 {
3895 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3896 			  | (ctxt->src.val & 0x0f));
3897 	ctxt->dst.type = OP_NONE;
3898 	return X86EMUL_CONTINUE;
3899 }
3900 
3901 static int em_loop(struct x86_emulate_ctxt *ctxt)
3902 {
3903 	int rc = X86EMUL_CONTINUE;
3904 
3905 	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3906 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3907 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3908 		rc = jmp_rel(ctxt, ctxt->src.val);
3909 
3910 	return rc;
3911 }
3912 
3913 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3914 {
3915 	int rc = X86EMUL_CONTINUE;
3916 
3917 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3918 		rc = jmp_rel(ctxt, ctxt->src.val);
3919 
3920 	return rc;
3921 }
3922 
3923 static int em_in(struct x86_emulate_ctxt *ctxt)
3924 {
3925 	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3926 			     &ctxt->dst.val))
3927 		return X86EMUL_IO_NEEDED;
3928 
3929 	return X86EMUL_CONTINUE;
3930 }
3931 
3932 static int em_out(struct x86_emulate_ctxt *ctxt)
3933 {
3934 	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3935 				    &ctxt->src.val, 1);
3936 	/* Disable writeback. */
3937 	ctxt->dst.type = OP_NONE;
3938 	return X86EMUL_CONTINUE;
3939 }
3940 
3941 static int em_cli(struct x86_emulate_ctxt *ctxt)
3942 {
3943 	if (emulator_bad_iopl(ctxt))
3944 		return emulate_gp(ctxt, 0);
3945 
3946 	ctxt->eflags &= ~X86_EFLAGS_IF;
3947 	return X86EMUL_CONTINUE;
3948 }
3949 
3950 static int em_sti(struct x86_emulate_ctxt *ctxt)
3951 {
3952 	if (emulator_bad_iopl(ctxt))
3953 		return emulate_gp(ctxt, 0);
3954 
3955 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3956 	ctxt->eflags |= X86_EFLAGS_IF;
3957 	return X86EMUL_CONTINUE;
3958 }
3959 
3960 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3961 {
3962 	u32 eax, ebx, ecx, edx;
3963 	u64 msr = 0;
3964 
3965 	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3966 	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3967 	    ctxt->ops->cpl(ctxt)) {
3968 		return emulate_gp(ctxt, 0);
3969 	}
3970 
3971 	eax = reg_read(ctxt, VCPU_REGS_RAX);
3972 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3973 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3974 	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3975 	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3976 	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3977 	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3978 	return X86EMUL_CONTINUE;
3979 }
3980 
3981 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3982 {
3983 	u32 flags;
3984 
3985 	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3986 		X86_EFLAGS_SF;
3987 	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3988 
3989 	ctxt->eflags &= ~0xffUL;
3990 	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3991 	return X86EMUL_CONTINUE;
3992 }
3993 
3994 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3995 {
3996 	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3997 	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3998 	return X86EMUL_CONTINUE;
3999 }
4000 
4001 static int em_bswap(struct x86_emulate_ctxt *ctxt)
4002 {
4003 	switch (ctxt->op_bytes) {
4004 #ifdef CONFIG_X86_64
4005 	case 8:
4006 		asm("bswap %0" : "+r"(ctxt->dst.val));
4007 		break;
4008 #endif
4009 	default:
4010 		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4011 		break;
4012 	}
4013 	return X86EMUL_CONTINUE;
4014 }
4015 
4016 static int em_clflush(struct x86_emulate_ctxt *ctxt)
4017 {
4018 	/* emulating clflush regardless of cpuid */
4019 	return X86EMUL_CONTINUE;
4020 }
4021 
4022 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4023 {
4024 	ctxt->dst.val = (s32) ctxt->src.val;
4025 	return X86EMUL_CONTINUE;
4026 }
4027 
4028 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4029 {
4030 	u32 eax = 1, ebx, ecx = 0, edx;
4031 
4032 	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4033 	if (!(edx & FFL(FXSR)))
4034 		return emulate_ud(ctxt);
4035 
4036 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4037 		return emulate_nm(ctxt);
4038 
4039 	/*
4040 	 * Don't emulate a case that should never be hit, instead of working
4041 	 * around a lack of fxsave64/fxrstor64 on old compilers.
4042 	 */
4043 	if (ctxt->mode >= X86EMUL_MODE_PROT64)
4044 		return X86EMUL_UNHANDLEABLE;
4045 
4046 	return X86EMUL_CONTINUE;
4047 }
4048 
4049 /*
4050  * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4051  * and restore MXCSR.
4052  */
4053 static size_t __fxstate_size(int nregs)
4054 {
4055 	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4056 }
4057 
4058 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4059 {
4060 	bool cr4_osfxsr;
4061 	if (ctxt->mode == X86EMUL_MODE_PROT64)
4062 		return __fxstate_size(16);
4063 
4064 	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4065 	return __fxstate_size(cr4_osfxsr ? 8 : 0);
4066 }
4067 
4068 /*
4069  * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4070  *  1) 16 bit mode
4071  *  2) 32 bit mode
4072  *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
4073  *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4074  *       save and restore
4075  *  3) 64-bit mode with REX.W prefix
4076  *     - like (2), but XMM 8-15 are being saved and restored
4077  *  4) 64-bit mode without REX.W prefix
4078  *     - like (3), but FIP and FDP are 64 bit
4079  *
4080  * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4081  * desired result.  (4) is not emulated.
4082  *
4083  * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4084  * and FPU DS) should match.
4085  */
4086 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4087 {
4088 	struct fxregs_state fx_state;
4089 	int rc;
4090 
4091 	rc = check_fxsr(ctxt);
4092 	if (rc != X86EMUL_CONTINUE)
4093 		return rc;
4094 
4095 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4096 
4097 	if (rc != X86EMUL_CONTINUE)
4098 		return rc;
4099 
4100 	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4101 		                   fxstate_size(ctxt));
4102 }
4103 
4104 /*
4105  * FXRSTOR might restore XMM registers not provided by the guest. Fill
4106  * in the host registers (via FXSAVE) instead, so they won't be modified.
4107  * (preemption has to stay disabled until FXRSTOR).
4108  *
4109  * Use noinline to keep the stack for other functions called by callers small.
4110  */
4111 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4112 				 const size_t used_size)
4113 {
4114 	struct fxregs_state fx_tmp;
4115 	int rc;
4116 
4117 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4118 	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4119 	       __fxstate_size(16) - used_size);
4120 
4121 	return rc;
4122 }
4123 
4124 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4125 {
4126 	struct fxregs_state fx_state;
4127 	int rc;
4128 	size_t size;
4129 
4130 	rc = check_fxsr(ctxt);
4131 	if (rc != X86EMUL_CONTINUE)
4132 		return rc;
4133 
4134 	size = fxstate_size(ctxt);
4135 	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4136 	if (rc != X86EMUL_CONTINUE)
4137 		return rc;
4138 
4139 	if (size < __fxstate_size(16)) {
4140 		rc = fxregs_fixup(&fx_state, size);
4141 		if (rc != X86EMUL_CONTINUE)
4142 			goto out;
4143 	}
4144 
4145 	if (fx_state.mxcsr >> 16) {
4146 		rc = emulate_gp(ctxt, 0);
4147 		goto out;
4148 	}
4149 
4150 	if (rc == X86EMUL_CONTINUE)
4151 		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4152 
4153 out:
4154 	return rc;
4155 }
4156 
4157 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4158 {
4159 	u32 eax, ecx, edx;
4160 
4161 	eax = reg_read(ctxt, VCPU_REGS_RAX);
4162 	edx = reg_read(ctxt, VCPU_REGS_RDX);
4163 	ecx = reg_read(ctxt, VCPU_REGS_RCX);
4164 
4165 	if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4166 		return emulate_gp(ctxt, 0);
4167 
4168 	return X86EMUL_CONTINUE;
4169 }
4170 
4171 static bool valid_cr(int nr)
4172 {
4173 	switch (nr) {
4174 	case 0:
4175 	case 2 ... 4:
4176 	case 8:
4177 		return true;
4178 	default:
4179 		return false;
4180 	}
4181 }
4182 
4183 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4184 {
4185 	if (!valid_cr(ctxt->modrm_reg))
4186 		return emulate_ud(ctxt);
4187 
4188 	return X86EMUL_CONTINUE;
4189 }
4190 
4191 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4192 {
4193 	u64 new_val = ctxt->src.val64;
4194 	int cr = ctxt->modrm_reg;
4195 	u64 efer = 0;
4196 
4197 	static u64 cr_reserved_bits[] = {
4198 		0xffffffff00000000ULL,
4199 		0, 0, 0, /* CR3 checked later */
4200 		CR4_RESERVED_BITS,
4201 		0, 0, 0,
4202 		CR8_RESERVED_BITS,
4203 	};
4204 
4205 	if (!valid_cr(cr))
4206 		return emulate_ud(ctxt);
4207 
4208 	if (new_val & cr_reserved_bits[cr])
4209 		return emulate_gp(ctxt, 0);
4210 
4211 	switch (cr) {
4212 	case 0: {
4213 		u64 cr4;
4214 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4215 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4216 			return emulate_gp(ctxt, 0);
4217 
4218 		cr4 = ctxt->ops->get_cr(ctxt, 4);
4219 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4220 
4221 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4222 		    !(cr4 & X86_CR4_PAE))
4223 			return emulate_gp(ctxt, 0);
4224 
4225 		break;
4226 		}
4227 	case 3: {
4228 		u64 rsvd = 0;
4229 
4230 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4231 		if (efer & EFER_LMA) {
4232 			u64 maxphyaddr;
4233 			u32 eax, ebx, ecx, edx;
4234 
4235 			eax = 0x80000008;
4236 			ecx = 0;
4237 			if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4238 						 &edx, false))
4239 				maxphyaddr = eax & 0xff;
4240 			else
4241 				maxphyaddr = 36;
4242 			rsvd = rsvd_bits(maxphyaddr, 63);
4243 			if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4244 				rsvd &= ~X86_CR3_PCID_NOFLUSH;
4245 		}
4246 
4247 		if (new_val & rsvd)
4248 			return emulate_gp(ctxt, 0);
4249 
4250 		break;
4251 		}
4252 	case 4: {
4253 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4254 
4255 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4256 			return emulate_gp(ctxt, 0);
4257 
4258 		break;
4259 		}
4260 	}
4261 
4262 	return X86EMUL_CONTINUE;
4263 }
4264 
4265 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4266 {
4267 	unsigned long dr7;
4268 
4269 	ctxt->ops->get_dr(ctxt, 7, &dr7);
4270 
4271 	/* Check if DR7.Global_Enable is set */
4272 	return dr7 & (1 << 13);
4273 }
4274 
4275 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4276 {
4277 	int dr = ctxt->modrm_reg;
4278 	u64 cr4;
4279 
4280 	if (dr > 7)
4281 		return emulate_ud(ctxt);
4282 
4283 	cr4 = ctxt->ops->get_cr(ctxt, 4);
4284 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4285 		return emulate_ud(ctxt);
4286 
4287 	if (check_dr7_gd(ctxt)) {
4288 		ulong dr6;
4289 
4290 		ctxt->ops->get_dr(ctxt, 6, &dr6);
4291 		dr6 &= ~DR_TRAP_BITS;
4292 		dr6 |= DR6_BD | DR6_RTM;
4293 		ctxt->ops->set_dr(ctxt, 6, dr6);
4294 		return emulate_db(ctxt);
4295 	}
4296 
4297 	return X86EMUL_CONTINUE;
4298 }
4299 
4300 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4301 {
4302 	u64 new_val = ctxt->src.val64;
4303 	int dr = ctxt->modrm_reg;
4304 
4305 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4306 		return emulate_gp(ctxt, 0);
4307 
4308 	return check_dr_read(ctxt);
4309 }
4310 
4311 static int check_svme(struct x86_emulate_ctxt *ctxt)
4312 {
4313 	u64 efer = 0;
4314 
4315 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4316 
4317 	if (!(efer & EFER_SVME))
4318 		return emulate_ud(ctxt);
4319 
4320 	return X86EMUL_CONTINUE;
4321 }
4322 
4323 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4324 {
4325 	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4326 
4327 	/* Valid physical address? */
4328 	if (rax & 0xffff000000000000ULL)
4329 		return emulate_gp(ctxt, 0);
4330 
4331 	return check_svme(ctxt);
4332 }
4333 
4334 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4335 {
4336 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4337 
4338 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4339 		return emulate_ud(ctxt);
4340 
4341 	return X86EMUL_CONTINUE;
4342 }
4343 
4344 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4345 {
4346 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4347 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4348 
4349 	/*
4350 	 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4351 	 * in Ring3 when CR4.PCE=0.
4352 	 */
4353 	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4354 		return X86EMUL_CONTINUE;
4355 
4356 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4357 	    ctxt->ops->check_pmc(ctxt, rcx))
4358 		return emulate_gp(ctxt, 0);
4359 
4360 	return X86EMUL_CONTINUE;
4361 }
4362 
4363 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4364 {
4365 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4366 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4367 		return emulate_gp(ctxt, 0);
4368 
4369 	return X86EMUL_CONTINUE;
4370 }
4371 
4372 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4373 {
4374 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4375 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4376 		return emulate_gp(ctxt, 0);
4377 
4378 	return X86EMUL_CONTINUE;
4379 }
4380 
4381 #define D(_y) { .flags = (_y) }
4382 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4383 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4384 		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4385 #define N    D(NotImpl)
4386 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4387 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4388 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4389 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4390 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4391 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4392 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4393 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4394 #define II(_f, _e, _i) \
4395 	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4396 #define IIP(_f, _e, _i, _p) \
4397 	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4398 	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4399 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4400 
4401 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
4402 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4403 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4404 #define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4405 #define I2bvIP(_f, _e, _i, _p) \
4406 	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4407 
4408 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4409 		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4410 		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4411 
4412 static const struct opcode group7_rm0[] = {
4413 	N,
4414 	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4415 	N, N, N, N, N, N,
4416 };
4417 
4418 static const struct opcode group7_rm1[] = {
4419 	DI(SrcNone | Priv, monitor),
4420 	DI(SrcNone | Priv, mwait),
4421 	N, N, N, N, N, N,
4422 };
4423 
4424 static const struct opcode group7_rm2[] = {
4425 	N,
4426 	II(ImplicitOps | Priv,			em_xsetbv,	xsetbv),
4427 	N, N, N, N, N, N,
4428 };
4429 
4430 static const struct opcode group7_rm3[] = {
4431 	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4432 	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4433 	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4434 	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4435 	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4436 	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4437 	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4438 	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4439 };
4440 
4441 static const struct opcode group7_rm7[] = {
4442 	N,
4443 	DIP(SrcNone, rdtscp, check_rdtsc),
4444 	N, N, N, N, N, N,
4445 };
4446 
4447 static const struct opcode group1[] = {
4448 	F(Lock, em_add),
4449 	F(Lock | PageTable, em_or),
4450 	F(Lock, em_adc),
4451 	F(Lock, em_sbb),
4452 	F(Lock | PageTable, em_and),
4453 	F(Lock, em_sub),
4454 	F(Lock, em_xor),
4455 	F(NoWrite, em_cmp),
4456 };
4457 
4458 static const struct opcode group1A[] = {
4459 	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4460 };
4461 
4462 static const struct opcode group2[] = {
4463 	F(DstMem | ModRM, em_rol),
4464 	F(DstMem | ModRM, em_ror),
4465 	F(DstMem | ModRM, em_rcl),
4466 	F(DstMem | ModRM, em_rcr),
4467 	F(DstMem | ModRM, em_shl),
4468 	F(DstMem | ModRM, em_shr),
4469 	F(DstMem | ModRM, em_shl),
4470 	F(DstMem | ModRM, em_sar),
4471 };
4472 
4473 static const struct opcode group3[] = {
4474 	F(DstMem | SrcImm | NoWrite, em_test),
4475 	F(DstMem | SrcImm | NoWrite, em_test),
4476 	F(DstMem | SrcNone | Lock, em_not),
4477 	F(DstMem | SrcNone | Lock, em_neg),
4478 	F(DstXacc | Src2Mem, em_mul_ex),
4479 	F(DstXacc | Src2Mem, em_imul_ex),
4480 	F(DstXacc | Src2Mem, em_div_ex),
4481 	F(DstXacc | Src2Mem, em_idiv_ex),
4482 };
4483 
4484 static const struct opcode group4[] = {
4485 	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4486 	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4487 	N, N, N, N, N, N,
4488 };
4489 
4490 static const struct opcode group5[] = {
4491 	F(DstMem | SrcNone | Lock,		em_inc),
4492 	F(DstMem | SrcNone | Lock,		em_dec),
4493 	I(SrcMem | NearBranch,			em_call_near_abs),
4494 	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4495 	I(SrcMem | NearBranch,			em_jmp_abs),
4496 	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4497 	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4498 };
4499 
4500 static const struct opcode group6[] = {
4501 	II(Prot | DstMem,	   em_sldt, sldt),
4502 	II(Prot | DstMem,	   em_str, str),
4503 	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4504 	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4505 	N, N, N, N,
4506 };
4507 
4508 static const struct group_dual group7 = { {
4509 	II(Mov | DstMem,			em_sgdt, sgdt),
4510 	II(Mov | DstMem,			em_sidt, sidt),
4511 	II(SrcMem | Priv,			em_lgdt, lgdt),
4512 	II(SrcMem | Priv,			em_lidt, lidt),
4513 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4514 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4515 	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4516 }, {
4517 	EXT(0, group7_rm0),
4518 	EXT(0, group7_rm1),
4519 	EXT(0, group7_rm2),
4520 	EXT(0, group7_rm3),
4521 	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4522 	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4523 	EXT(0, group7_rm7),
4524 } };
4525 
4526 static const struct opcode group8[] = {
4527 	N, N, N, N,
4528 	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4529 	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4530 	F(DstMem | SrcImmByte | Lock,			em_btr),
4531 	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4532 };
4533 
4534 /*
4535  * The "memory" destination is actually always a register, since we come
4536  * from the register case of group9.
4537  */
4538 static const struct gprefix pfx_0f_c7_7 = {
4539 	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4540 };
4541 
4542 
4543 static const struct group_dual group9 = { {
4544 	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4545 }, {
4546 	N, N, N, N, N, N, N,
4547 	GP(0, &pfx_0f_c7_7),
4548 } };
4549 
4550 static const struct opcode group11[] = {
4551 	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4552 	X7(D(Undefined)),
4553 };
4554 
4555 static const struct gprefix pfx_0f_ae_7 = {
4556 	I(SrcMem | ByteOp, em_clflush), N, N, N,
4557 };
4558 
4559 static const struct group_dual group15 = { {
4560 	I(ModRM | Aligned16, em_fxsave),
4561 	I(ModRM | Aligned16, em_fxrstor),
4562 	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4563 }, {
4564 	N, N, N, N, N, N, N, N,
4565 } };
4566 
4567 static const struct gprefix pfx_0f_6f_0f_7f = {
4568 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4569 };
4570 
4571 static const struct instr_dual instr_dual_0f_2b = {
4572 	I(0, em_mov), N
4573 };
4574 
4575 static const struct gprefix pfx_0f_2b = {
4576 	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4577 };
4578 
4579 static const struct gprefix pfx_0f_10_0f_11 = {
4580 	I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4581 };
4582 
4583 static const struct gprefix pfx_0f_28_0f_29 = {
4584 	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4585 };
4586 
4587 static const struct gprefix pfx_0f_e7 = {
4588 	N, I(Sse, em_mov), N, N,
4589 };
4590 
4591 static const struct escape escape_d9 = { {
4592 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4593 }, {
4594 	/* 0xC0 - 0xC7 */
4595 	N, N, N, N, N, N, N, N,
4596 	/* 0xC8 - 0xCF */
4597 	N, N, N, N, N, N, N, N,
4598 	/* 0xD0 - 0xC7 */
4599 	N, N, N, N, N, N, N, N,
4600 	/* 0xD8 - 0xDF */
4601 	N, N, N, N, N, N, N, N,
4602 	/* 0xE0 - 0xE7 */
4603 	N, N, N, N, N, N, N, N,
4604 	/* 0xE8 - 0xEF */
4605 	N, N, N, N, N, N, N, N,
4606 	/* 0xF0 - 0xF7 */
4607 	N, N, N, N, N, N, N, N,
4608 	/* 0xF8 - 0xFF */
4609 	N, N, N, N, N, N, N, N,
4610 } };
4611 
4612 static const struct escape escape_db = { {
4613 	N, N, N, N, N, N, N, N,
4614 }, {
4615 	/* 0xC0 - 0xC7 */
4616 	N, N, N, N, N, N, N, N,
4617 	/* 0xC8 - 0xCF */
4618 	N, N, N, N, N, N, N, N,
4619 	/* 0xD0 - 0xC7 */
4620 	N, N, N, N, N, N, N, N,
4621 	/* 0xD8 - 0xDF */
4622 	N, N, N, N, N, N, N, N,
4623 	/* 0xE0 - 0xE7 */
4624 	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4625 	/* 0xE8 - 0xEF */
4626 	N, N, N, N, N, N, N, N,
4627 	/* 0xF0 - 0xF7 */
4628 	N, N, N, N, N, N, N, N,
4629 	/* 0xF8 - 0xFF */
4630 	N, N, N, N, N, N, N, N,
4631 } };
4632 
4633 static const struct escape escape_dd = { {
4634 	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4635 }, {
4636 	/* 0xC0 - 0xC7 */
4637 	N, N, N, N, N, N, N, N,
4638 	/* 0xC8 - 0xCF */
4639 	N, N, N, N, N, N, N, N,
4640 	/* 0xD0 - 0xC7 */
4641 	N, N, N, N, N, N, N, N,
4642 	/* 0xD8 - 0xDF */
4643 	N, N, N, N, N, N, N, N,
4644 	/* 0xE0 - 0xE7 */
4645 	N, N, N, N, N, N, N, N,
4646 	/* 0xE8 - 0xEF */
4647 	N, N, N, N, N, N, N, N,
4648 	/* 0xF0 - 0xF7 */
4649 	N, N, N, N, N, N, N, N,
4650 	/* 0xF8 - 0xFF */
4651 	N, N, N, N, N, N, N, N,
4652 } };
4653 
4654 static const struct instr_dual instr_dual_0f_c3 = {
4655 	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4656 };
4657 
4658 static const struct mode_dual mode_dual_63 = {
4659 	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4660 };
4661 
4662 static const struct opcode opcode_table[256] = {
4663 	/* 0x00 - 0x07 */
4664 	F6ALU(Lock, em_add),
4665 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4666 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4667 	/* 0x08 - 0x0F */
4668 	F6ALU(Lock | PageTable, em_or),
4669 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4670 	N,
4671 	/* 0x10 - 0x17 */
4672 	F6ALU(Lock, em_adc),
4673 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4674 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4675 	/* 0x18 - 0x1F */
4676 	F6ALU(Lock, em_sbb),
4677 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4678 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4679 	/* 0x20 - 0x27 */
4680 	F6ALU(Lock | PageTable, em_and), N, N,
4681 	/* 0x28 - 0x2F */
4682 	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4683 	/* 0x30 - 0x37 */
4684 	F6ALU(Lock, em_xor), N, N,
4685 	/* 0x38 - 0x3F */
4686 	F6ALU(NoWrite, em_cmp), N, N,
4687 	/* 0x40 - 0x4F */
4688 	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4689 	/* 0x50 - 0x57 */
4690 	X8(I(SrcReg | Stack, em_push)),
4691 	/* 0x58 - 0x5F */
4692 	X8(I(DstReg | Stack, em_pop)),
4693 	/* 0x60 - 0x67 */
4694 	I(ImplicitOps | Stack | No64, em_pusha),
4695 	I(ImplicitOps | Stack | No64, em_popa),
4696 	N, MD(ModRM, &mode_dual_63),
4697 	N, N, N, N,
4698 	/* 0x68 - 0x6F */
4699 	I(SrcImm | Mov | Stack, em_push),
4700 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4701 	I(SrcImmByte | Mov | Stack, em_push),
4702 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4703 	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4704 	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4705 	/* 0x70 - 0x7F */
4706 	X16(D(SrcImmByte | NearBranch)),
4707 	/* 0x80 - 0x87 */
4708 	G(ByteOp | DstMem | SrcImm, group1),
4709 	G(DstMem | SrcImm, group1),
4710 	G(ByteOp | DstMem | SrcImm | No64, group1),
4711 	G(DstMem | SrcImmByte, group1),
4712 	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4713 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4714 	/* 0x88 - 0x8F */
4715 	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4716 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4717 	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4718 	D(ModRM | SrcMem | NoAccess | DstReg),
4719 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4720 	G(0, group1A),
4721 	/* 0x90 - 0x97 */
4722 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4723 	/* 0x98 - 0x9F */
4724 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4725 	I(SrcImmFAddr | No64, em_call_far), N,
4726 	II(ImplicitOps | Stack, em_pushf, pushf),
4727 	II(ImplicitOps | Stack, em_popf, popf),
4728 	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4729 	/* 0xA0 - 0xA7 */
4730 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4731 	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4732 	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4733 	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4734 	/* 0xA8 - 0xAF */
4735 	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4736 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4737 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4738 	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4739 	/* 0xB0 - 0xB7 */
4740 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4741 	/* 0xB8 - 0xBF */
4742 	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4743 	/* 0xC0 - 0xC7 */
4744 	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4745 	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4746 	I(ImplicitOps | NearBranch, em_ret),
4747 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4748 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4749 	G(ByteOp, group11), G(0, group11),
4750 	/* 0xC8 - 0xCF */
4751 	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4752 	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4753 	I(ImplicitOps, em_ret_far),
4754 	D(ImplicitOps), DI(SrcImmByte, intn),
4755 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4756 	/* 0xD0 - 0xD7 */
4757 	G(Src2One | ByteOp, group2), G(Src2One, group2),
4758 	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4759 	I(DstAcc | SrcImmUByte | No64, em_aam),
4760 	I(DstAcc | SrcImmUByte | No64, em_aad),
4761 	F(DstAcc | ByteOp | No64, em_salc),
4762 	I(DstAcc | SrcXLat | ByteOp, em_mov),
4763 	/* 0xD8 - 0xDF */
4764 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4765 	/* 0xE0 - 0xE7 */
4766 	X3(I(SrcImmByte | NearBranch, em_loop)),
4767 	I(SrcImmByte | NearBranch, em_jcxz),
4768 	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4769 	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4770 	/* 0xE8 - 0xEF */
4771 	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4772 	I(SrcImmFAddr | No64, em_jmp_far),
4773 	D(SrcImmByte | ImplicitOps | NearBranch),
4774 	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4775 	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4776 	/* 0xF0 - 0xF7 */
4777 	N, DI(ImplicitOps, icebp), N, N,
4778 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4779 	G(ByteOp, group3), G(0, group3),
4780 	/* 0xF8 - 0xFF */
4781 	D(ImplicitOps), D(ImplicitOps),
4782 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4783 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4784 };
4785 
4786 static const struct opcode twobyte_table[256] = {
4787 	/* 0x00 - 0x0F */
4788 	G(0, group6), GD(0, &group7), N, N,
4789 	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4790 	II(ImplicitOps | Priv, em_clts, clts), N,
4791 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4792 	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4793 	/* 0x10 - 0x1F */
4794 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4795 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4796 	N, N, N, N, N, N,
4797 	D(ImplicitOps | ModRM | SrcMem | NoAccess),
4798 	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4799 	/* 0x20 - 0x2F */
4800 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4801 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4802 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4803 						check_cr_write),
4804 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4805 						check_dr_write),
4806 	N, N, N, N,
4807 	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4808 	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4809 	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4810 	N, N, N, N,
4811 	/* 0x30 - 0x3F */
4812 	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4813 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4814 	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4815 	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4816 	I(ImplicitOps | EmulateOnUD, em_sysenter),
4817 	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4818 	N, N,
4819 	N, N, N, N, N, N, N, N,
4820 	/* 0x40 - 0x4F */
4821 	X16(D(DstReg | SrcMem | ModRM)),
4822 	/* 0x50 - 0x5F */
4823 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4824 	/* 0x60 - 0x6F */
4825 	N, N, N, N,
4826 	N, N, N, N,
4827 	N, N, N, N,
4828 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4829 	/* 0x70 - 0x7F */
4830 	N, N, N, N,
4831 	N, N, N, N,
4832 	N, N, N, N,
4833 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4834 	/* 0x80 - 0x8F */
4835 	X16(D(SrcImm | NearBranch)),
4836 	/* 0x90 - 0x9F */
4837 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4838 	/* 0xA0 - 0xA7 */
4839 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4840 	II(ImplicitOps, em_cpuid, cpuid),
4841 	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4842 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4843 	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4844 	/* 0xA8 - 0xAF */
4845 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4846 	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4847 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4848 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4849 	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4850 	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4851 	/* 0xB0 - 0xB7 */
4852 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4853 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4854 	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4855 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4856 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4857 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4858 	/* 0xB8 - 0xBF */
4859 	N, N,
4860 	G(BitOp, group8),
4861 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4862 	I(DstReg | SrcMem | ModRM, em_bsf_c),
4863 	I(DstReg | SrcMem | ModRM, em_bsr_c),
4864 	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4865 	/* 0xC0 - 0xC7 */
4866 	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4867 	N, ID(0, &instr_dual_0f_c3),
4868 	N, N, N, GD(0, &group9),
4869 	/* 0xC8 - 0xCF */
4870 	X8(I(DstReg, em_bswap)),
4871 	/* 0xD0 - 0xDF */
4872 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4873 	/* 0xE0 - 0xEF */
4874 	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4875 	N, N, N, N, N, N, N, N,
4876 	/* 0xF0 - 0xFF */
4877 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4878 };
4879 
4880 static const struct instr_dual instr_dual_0f_38_f0 = {
4881 	I(DstReg | SrcMem | Mov, em_movbe), N
4882 };
4883 
4884 static const struct instr_dual instr_dual_0f_38_f1 = {
4885 	I(DstMem | SrcReg | Mov, em_movbe), N
4886 };
4887 
4888 static const struct gprefix three_byte_0f_38_f0 = {
4889 	ID(0, &instr_dual_0f_38_f0), N, N, N
4890 };
4891 
4892 static const struct gprefix three_byte_0f_38_f1 = {
4893 	ID(0, &instr_dual_0f_38_f1), N, N, N
4894 };
4895 
4896 /*
4897  * Insns below are selected by the prefix which indexed by the third opcode
4898  * byte.
4899  */
4900 static const struct opcode opcode_map_0f_38[256] = {
4901 	/* 0x00 - 0x7f */
4902 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4903 	/* 0x80 - 0xef */
4904 	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4905 	/* 0xf0 - 0xf1 */
4906 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4907 	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4908 	/* 0xf2 - 0xff */
4909 	N, N, X4(N), X8(N)
4910 };
4911 
4912 #undef D
4913 #undef N
4914 #undef G
4915 #undef GD
4916 #undef I
4917 #undef GP
4918 #undef EXT
4919 #undef MD
4920 #undef ID
4921 
4922 #undef D2bv
4923 #undef D2bvIP
4924 #undef I2bv
4925 #undef I2bvIP
4926 #undef I6ALU
4927 
4928 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4929 {
4930 	unsigned size;
4931 
4932 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4933 	if (size == 8)
4934 		size = 4;
4935 	return size;
4936 }
4937 
4938 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4939 		      unsigned size, bool sign_extension)
4940 {
4941 	int rc = X86EMUL_CONTINUE;
4942 
4943 	op->type = OP_IMM;
4944 	op->bytes = size;
4945 	op->addr.mem.ea = ctxt->_eip;
4946 	/* NB. Immediates are sign-extended as necessary. */
4947 	switch (op->bytes) {
4948 	case 1:
4949 		op->val = insn_fetch(s8, ctxt);
4950 		break;
4951 	case 2:
4952 		op->val = insn_fetch(s16, ctxt);
4953 		break;
4954 	case 4:
4955 		op->val = insn_fetch(s32, ctxt);
4956 		break;
4957 	case 8:
4958 		op->val = insn_fetch(s64, ctxt);
4959 		break;
4960 	}
4961 	if (!sign_extension) {
4962 		switch (op->bytes) {
4963 		case 1:
4964 			op->val &= 0xff;
4965 			break;
4966 		case 2:
4967 			op->val &= 0xffff;
4968 			break;
4969 		case 4:
4970 			op->val &= 0xffffffff;
4971 			break;
4972 		}
4973 	}
4974 done:
4975 	return rc;
4976 }
4977 
4978 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4979 			  unsigned d)
4980 {
4981 	int rc = X86EMUL_CONTINUE;
4982 
4983 	switch (d) {
4984 	case OpReg:
4985 		decode_register_operand(ctxt, op);
4986 		break;
4987 	case OpImmUByte:
4988 		rc = decode_imm(ctxt, op, 1, false);
4989 		break;
4990 	case OpMem:
4991 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4992 	mem_common:
4993 		*op = ctxt->memop;
4994 		ctxt->memopp = op;
4995 		if (ctxt->d & BitOp)
4996 			fetch_bit_operand(ctxt);
4997 		op->orig_val = op->val;
4998 		break;
4999 	case OpMem64:
5000 		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
5001 		goto mem_common;
5002 	case OpAcc:
5003 		op->type = OP_REG;
5004 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5005 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5006 		fetch_register_operand(op);
5007 		op->orig_val = op->val;
5008 		break;
5009 	case OpAccLo:
5010 		op->type = OP_REG;
5011 		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
5012 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5013 		fetch_register_operand(op);
5014 		op->orig_val = op->val;
5015 		break;
5016 	case OpAccHi:
5017 		if (ctxt->d & ByteOp) {
5018 			op->type = OP_NONE;
5019 			break;
5020 		}
5021 		op->type = OP_REG;
5022 		op->bytes = ctxt->op_bytes;
5023 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5024 		fetch_register_operand(op);
5025 		op->orig_val = op->val;
5026 		break;
5027 	case OpDI:
5028 		op->type = OP_MEM;
5029 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5030 		op->addr.mem.ea =
5031 			register_address(ctxt, VCPU_REGS_RDI);
5032 		op->addr.mem.seg = VCPU_SREG_ES;
5033 		op->val = 0;
5034 		op->count = 1;
5035 		break;
5036 	case OpDX:
5037 		op->type = OP_REG;
5038 		op->bytes = 2;
5039 		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5040 		fetch_register_operand(op);
5041 		break;
5042 	case OpCL:
5043 		op->type = OP_IMM;
5044 		op->bytes = 1;
5045 		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5046 		break;
5047 	case OpImmByte:
5048 		rc = decode_imm(ctxt, op, 1, true);
5049 		break;
5050 	case OpOne:
5051 		op->type = OP_IMM;
5052 		op->bytes = 1;
5053 		op->val = 1;
5054 		break;
5055 	case OpImm:
5056 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5057 		break;
5058 	case OpImm64:
5059 		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5060 		break;
5061 	case OpMem8:
5062 		ctxt->memop.bytes = 1;
5063 		if (ctxt->memop.type == OP_REG) {
5064 			ctxt->memop.addr.reg = decode_register(ctxt,
5065 					ctxt->modrm_rm, true);
5066 			fetch_register_operand(&ctxt->memop);
5067 		}
5068 		goto mem_common;
5069 	case OpMem16:
5070 		ctxt->memop.bytes = 2;
5071 		goto mem_common;
5072 	case OpMem32:
5073 		ctxt->memop.bytes = 4;
5074 		goto mem_common;
5075 	case OpImmU16:
5076 		rc = decode_imm(ctxt, op, 2, false);
5077 		break;
5078 	case OpImmU:
5079 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5080 		break;
5081 	case OpSI:
5082 		op->type = OP_MEM;
5083 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5084 		op->addr.mem.ea =
5085 			register_address(ctxt, VCPU_REGS_RSI);
5086 		op->addr.mem.seg = ctxt->seg_override;
5087 		op->val = 0;
5088 		op->count = 1;
5089 		break;
5090 	case OpXLat:
5091 		op->type = OP_MEM;
5092 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5093 		op->addr.mem.ea =
5094 			address_mask(ctxt,
5095 				reg_read(ctxt, VCPU_REGS_RBX) +
5096 				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5097 		op->addr.mem.seg = ctxt->seg_override;
5098 		op->val = 0;
5099 		break;
5100 	case OpImmFAddr:
5101 		op->type = OP_IMM;
5102 		op->addr.mem.ea = ctxt->_eip;
5103 		op->bytes = ctxt->op_bytes + 2;
5104 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
5105 		break;
5106 	case OpMemFAddr:
5107 		ctxt->memop.bytes = ctxt->op_bytes + 2;
5108 		goto mem_common;
5109 	case OpES:
5110 		op->type = OP_IMM;
5111 		op->val = VCPU_SREG_ES;
5112 		break;
5113 	case OpCS:
5114 		op->type = OP_IMM;
5115 		op->val = VCPU_SREG_CS;
5116 		break;
5117 	case OpSS:
5118 		op->type = OP_IMM;
5119 		op->val = VCPU_SREG_SS;
5120 		break;
5121 	case OpDS:
5122 		op->type = OP_IMM;
5123 		op->val = VCPU_SREG_DS;
5124 		break;
5125 	case OpFS:
5126 		op->type = OP_IMM;
5127 		op->val = VCPU_SREG_FS;
5128 		break;
5129 	case OpGS:
5130 		op->type = OP_IMM;
5131 		op->val = VCPU_SREG_GS;
5132 		break;
5133 	case OpImplicit:
5134 		/* Special instructions do their own operand decoding. */
5135 	default:
5136 		op->type = OP_NONE; /* Disable writeback. */
5137 		break;
5138 	}
5139 
5140 done:
5141 	return rc;
5142 }
5143 
5144 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5145 {
5146 	int rc = X86EMUL_CONTINUE;
5147 	int mode = ctxt->mode;
5148 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5149 	bool op_prefix = false;
5150 	bool has_seg_override = false;
5151 	struct opcode opcode;
5152 	u16 dummy;
5153 	struct desc_struct desc;
5154 
5155 	ctxt->memop.type = OP_NONE;
5156 	ctxt->memopp = NULL;
5157 	ctxt->_eip = ctxt->eip;
5158 	ctxt->fetch.ptr = ctxt->fetch.data;
5159 	ctxt->fetch.end = ctxt->fetch.data + insn_len;
5160 	ctxt->opcode_len = 1;
5161 	if (insn_len > 0)
5162 		memcpy(ctxt->fetch.data, insn, insn_len);
5163 	else {
5164 		rc = __do_insn_fetch_bytes(ctxt, 1);
5165 		if (rc != X86EMUL_CONTINUE)
5166 			goto done;
5167 	}
5168 
5169 	switch (mode) {
5170 	case X86EMUL_MODE_REAL:
5171 	case X86EMUL_MODE_VM86:
5172 		def_op_bytes = def_ad_bytes = 2;
5173 		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5174 		if (desc.d)
5175 			def_op_bytes = def_ad_bytes = 4;
5176 		break;
5177 	case X86EMUL_MODE_PROT16:
5178 		def_op_bytes = def_ad_bytes = 2;
5179 		break;
5180 	case X86EMUL_MODE_PROT32:
5181 		def_op_bytes = def_ad_bytes = 4;
5182 		break;
5183 #ifdef CONFIG_X86_64
5184 	case X86EMUL_MODE_PROT64:
5185 		def_op_bytes = 4;
5186 		def_ad_bytes = 8;
5187 		break;
5188 #endif
5189 	default:
5190 		return EMULATION_FAILED;
5191 	}
5192 
5193 	ctxt->op_bytes = def_op_bytes;
5194 	ctxt->ad_bytes = def_ad_bytes;
5195 
5196 	/* Legacy prefixes. */
5197 	for (;;) {
5198 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
5199 		case 0x66:	/* operand-size override */
5200 			op_prefix = true;
5201 			/* switch between 2/4 bytes */
5202 			ctxt->op_bytes = def_op_bytes ^ 6;
5203 			break;
5204 		case 0x67:	/* address-size override */
5205 			if (mode == X86EMUL_MODE_PROT64)
5206 				/* switch between 4/8 bytes */
5207 				ctxt->ad_bytes = def_ad_bytes ^ 12;
5208 			else
5209 				/* switch between 2/4 bytes */
5210 				ctxt->ad_bytes = def_ad_bytes ^ 6;
5211 			break;
5212 		case 0x26:	/* ES override */
5213 		case 0x2e:	/* CS override */
5214 		case 0x36:	/* SS override */
5215 		case 0x3e:	/* DS override */
5216 			has_seg_override = true;
5217 			ctxt->seg_override = (ctxt->b >> 3) & 3;
5218 			break;
5219 		case 0x64:	/* FS override */
5220 		case 0x65:	/* GS override */
5221 			has_seg_override = true;
5222 			ctxt->seg_override = ctxt->b & 7;
5223 			break;
5224 		case 0x40 ... 0x4f: /* REX */
5225 			if (mode != X86EMUL_MODE_PROT64)
5226 				goto done_prefixes;
5227 			ctxt->rex_prefix = ctxt->b;
5228 			continue;
5229 		case 0xf0:	/* LOCK */
5230 			ctxt->lock_prefix = 1;
5231 			break;
5232 		case 0xf2:	/* REPNE/REPNZ */
5233 		case 0xf3:	/* REP/REPE/REPZ */
5234 			ctxt->rep_prefix = ctxt->b;
5235 			break;
5236 		default:
5237 			goto done_prefixes;
5238 		}
5239 
5240 		/* Any legacy prefix after a REX prefix nullifies its effect. */
5241 
5242 		ctxt->rex_prefix = 0;
5243 	}
5244 
5245 done_prefixes:
5246 
5247 	/* REX prefix. */
5248 	if (ctxt->rex_prefix & 8)
5249 		ctxt->op_bytes = 8;	/* REX.W */
5250 
5251 	/* Opcode byte(s). */
5252 	opcode = opcode_table[ctxt->b];
5253 	/* Two-byte opcode? */
5254 	if (ctxt->b == 0x0f) {
5255 		ctxt->opcode_len = 2;
5256 		ctxt->b = insn_fetch(u8, ctxt);
5257 		opcode = twobyte_table[ctxt->b];
5258 
5259 		/* 0F_38 opcode map */
5260 		if (ctxt->b == 0x38) {
5261 			ctxt->opcode_len = 3;
5262 			ctxt->b = insn_fetch(u8, ctxt);
5263 			opcode = opcode_map_0f_38[ctxt->b];
5264 		}
5265 	}
5266 	ctxt->d = opcode.flags;
5267 
5268 	if (ctxt->d & ModRM)
5269 		ctxt->modrm = insn_fetch(u8, ctxt);
5270 
5271 	/* vex-prefix instructions are not implemented */
5272 	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5273 	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5274 		ctxt->d = NotImpl;
5275 	}
5276 
5277 	while (ctxt->d & GroupMask) {
5278 		switch (ctxt->d & GroupMask) {
5279 		case Group:
5280 			goffset = (ctxt->modrm >> 3) & 7;
5281 			opcode = opcode.u.group[goffset];
5282 			break;
5283 		case GroupDual:
5284 			goffset = (ctxt->modrm >> 3) & 7;
5285 			if ((ctxt->modrm >> 6) == 3)
5286 				opcode = opcode.u.gdual->mod3[goffset];
5287 			else
5288 				opcode = opcode.u.gdual->mod012[goffset];
5289 			break;
5290 		case RMExt:
5291 			goffset = ctxt->modrm & 7;
5292 			opcode = opcode.u.group[goffset];
5293 			break;
5294 		case Prefix:
5295 			if (ctxt->rep_prefix && op_prefix)
5296 				return EMULATION_FAILED;
5297 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5298 			switch (simd_prefix) {
5299 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5300 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5301 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5302 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5303 			}
5304 			break;
5305 		case Escape:
5306 			if (ctxt->modrm > 0xbf)
5307 				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5308 			else
5309 				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5310 			break;
5311 		case InstrDual:
5312 			if ((ctxt->modrm >> 6) == 3)
5313 				opcode = opcode.u.idual->mod3;
5314 			else
5315 				opcode = opcode.u.idual->mod012;
5316 			break;
5317 		case ModeDual:
5318 			if (ctxt->mode == X86EMUL_MODE_PROT64)
5319 				opcode = opcode.u.mdual->mode64;
5320 			else
5321 				opcode = opcode.u.mdual->mode32;
5322 			break;
5323 		default:
5324 			return EMULATION_FAILED;
5325 		}
5326 
5327 		ctxt->d &= ~(u64)GroupMask;
5328 		ctxt->d |= opcode.flags;
5329 	}
5330 
5331 	/* Unrecognised? */
5332 	if (ctxt->d == 0)
5333 		return EMULATION_FAILED;
5334 
5335 	ctxt->execute = opcode.u.execute;
5336 
5337 	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5338 		return EMULATION_FAILED;
5339 
5340 	if (unlikely(ctxt->d &
5341 	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5342 	     No16))) {
5343 		/*
5344 		 * These are copied unconditionally here, and checked unconditionally
5345 		 * in x86_emulate_insn.
5346 		 */
5347 		ctxt->check_perm = opcode.check_perm;
5348 		ctxt->intercept = opcode.intercept;
5349 
5350 		if (ctxt->d & NotImpl)
5351 			return EMULATION_FAILED;
5352 
5353 		if (mode == X86EMUL_MODE_PROT64) {
5354 			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5355 				ctxt->op_bytes = 8;
5356 			else if (ctxt->d & NearBranch)
5357 				ctxt->op_bytes = 8;
5358 		}
5359 
5360 		if (ctxt->d & Op3264) {
5361 			if (mode == X86EMUL_MODE_PROT64)
5362 				ctxt->op_bytes = 8;
5363 			else
5364 				ctxt->op_bytes = 4;
5365 		}
5366 
5367 		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5368 			ctxt->op_bytes = 4;
5369 
5370 		if (ctxt->d & Sse)
5371 			ctxt->op_bytes = 16;
5372 		else if (ctxt->d & Mmx)
5373 			ctxt->op_bytes = 8;
5374 	}
5375 
5376 	/* ModRM and SIB bytes. */
5377 	if (ctxt->d & ModRM) {
5378 		rc = decode_modrm(ctxt, &ctxt->memop);
5379 		if (!has_seg_override) {
5380 			has_seg_override = true;
5381 			ctxt->seg_override = ctxt->modrm_seg;
5382 		}
5383 	} else if (ctxt->d & MemAbs)
5384 		rc = decode_abs(ctxt, &ctxt->memop);
5385 	if (rc != X86EMUL_CONTINUE)
5386 		goto done;
5387 
5388 	if (!has_seg_override)
5389 		ctxt->seg_override = VCPU_SREG_DS;
5390 
5391 	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5392 
5393 	/*
5394 	 * Decode and fetch the source operand: register, memory
5395 	 * or immediate.
5396 	 */
5397 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5398 	if (rc != X86EMUL_CONTINUE)
5399 		goto done;
5400 
5401 	/*
5402 	 * Decode and fetch the second source operand: register, memory
5403 	 * or immediate.
5404 	 */
5405 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5406 	if (rc != X86EMUL_CONTINUE)
5407 		goto done;
5408 
5409 	/* Decode and fetch the destination operand: register or memory. */
5410 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5411 
5412 	if (ctxt->rip_relative && likely(ctxt->memopp))
5413 		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5414 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5415 
5416 done:
5417 	if (rc == X86EMUL_PROPAGATE_FAULT)
5418 		ctxt->have_exception = true;
5419 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5420 }
5421 
5422 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5423 {
5424 	return ctxt->d & PageTable;
5425 }
5426 
5427 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5428 {
5429 	/* The second termination condition only applies for REPE
5430 	 * and REPNE. Test if the repeat string operation prefix is
5431 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5432 	 * corresponding termination condition according to:
5433 	 * 	- if REPE/REPZ and ZF = 0 then done
5434 	 * 	- if REPNE/REPNZ and ZF = 1 then done
5435 	 */
5436 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5437 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5438 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5439 		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5440 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5441 		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5442 		return true;
5443 
5444 	return false;
5445 }
5446 
5447 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5448 {
5449 	int rc;
5450 
5451 	rc = asm_safe("fwait");
5452 
5453 	if (unlikely(rc != X86EMUL_CONTINUE))
5454 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5455 
5456 	return X86EMUL_CONTINUE;
5457 }
5458 
5459 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5460 				       struct operand *op)
5461 {
5462 	if (op->type == OP_MM)
5463 		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5464 }
5465 
5466 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5467 {
5468 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5469 
5470 	if (!(ctxt->d & ByteOp))
5471 		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5472 
5473 	asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5474 	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5475 	      [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5476 	    : "c"(ctxt->src2.val));
5477 
5478 	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5479 	if (!fop) /* exception is returned in fop variable */
5480 		return emulate_de(ctxt);
5481 	return X86EMUL_CONTINUE;
5482 }
5483 
5484 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5485 {
5486 	memset(&ctxt->rip_relative, 0,
5487 	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5488 
5489 	ctxt->io_read.pos = 0;
5490 	ctxt->io_read.end = 0;
5491 	ctxt->mem_read.end = 0;
5492 }
5493 
5494 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5495 {
5496 	const struct x86_emulate_ops *ops = ctxt->ops;
5497 	int rc = X86EMUL_CONTINUE;
5498 	int saved_dst_type = ctxt->dst.type;
5499 	unsigned emul_flags;
5500 
5501 	ctxt->mem_read.pos = 0;
5502 
5503 	/* LOCK prefix is allowed only with some instructions */
5504 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5505 		rc = emulate_ud(ctxt);
5506 		goto done;
5507 	}
5508 
5509 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5510 		rc = emulate_ud(ctxt);
5511 		goto done;
5512 	}
5513 
5514 	emul_flags = ctxt->ops->get_hflags(ctxt);
5515 	if (unlikely(ctxt->d &
5516 		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5517 		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5518 				(ctxt->d & Undefined)) {
5519 			rc = emulate_ud(ctxt);
5520 			goto done;
5521 		}
5522 
5523 		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5524 		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5525 			rc = emulate_ud(ctxt);
5526 			goto done;
5527 		}
5528 
5529 		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5530 			rc = emulate_nm(ctxt);
5531 			goto done;
5532 		}
5533 
5534 		if (ctxt->d & Mmx) {
5535 			rc = flush_pending_x87_faults(ctxt);
5536 			if (rc != X86EMUL_CONTINUE)
5537 				goto done;
5538 			/*
5539 			 * Now that we know the fpu is exception safe, we can fetch
5540 			 * operands from it.
5541 			 */
5542 			fetch_possible_mmx_operand(ctxt, &ctxt->src);
5543 			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5544 			if (!(ctxt->d & Mov))
5545 				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5546 		}
5547 
5548 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5549 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5550 						      X86_ICPT_PRE_EXCEPT);
5551 			if (rc != X86EMUL_CONTINUE)
5552 				goto done;
5553 		}
5554 
5555 		/* Instruction can only be executed in protected mode */
5556 		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5557 			rc = emulate_ud(ctxt);
5558 			goto done;
5559 		}
5560 
5561 		/* Privileged instruction can be executed only in CPL=0 */
5562 		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5563 			if (ctxt->d & PrivUD)
5564 				rc = emulate_ud(ctxt);
5565 			else
5566 				rc = emulate_gp(ctxt, 0);
5567 			goto done;
5568 		}
5569 
5570 		/* Do instruction specific permission checks */
5571 		if (ctxt->d & CheckPerm) {
5572 			rc = ctxt->check_perm(ctxt);
5573 			if (rc != X86EMUL_CONTINUE)
5574 				goto done;
5575 		}
5576 
5577 		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5578 			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5579 						      X86_ICPT_POST_EXCEPT);
5580 			if (rc != X86EMUL_CONTINUE)
5581 				goto done;
5582 		}
5583 
5584 		if (ctxt->rep_prefix && (ctxt->d & String)) {
5585 			/* All REP prefixes have the same first termination condition */
5586 			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5587 				string_registers_quirk(ctxt);
5588 				ctxt->eip = ctxt->_eip;
5589 				ctxt->eflags &= ~X86_EFLAGS_RF;
5590 				goto done;
5591 			}
5592 		}
5593 	}
5594 
5595 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5596 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5597 				    ctxt->src.valptr, ctxt->src.bytes);
5598 		if (rc != X86EMUL_CONTINUE)
5599 			goto done;
5600 		ctxt->src.orig_val64 = ctxt->src.val64;
5601 	}
5602 
5603 	if (ctxt->src2.type == OP_MEM) {
5604 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5605 				    &ctxt->src2.val, ctxt->src2.bytes);
5606 		if (rc != X86EMUL_CONTINUE)
5607 			goto done;
5608 	}
5609 
5610 	if ((ctxt->d & DstMask) == ImplicitOps)
5611 		goto special_insn;
5612 
5613 
5614 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5615 		/* optimisation - avoid slow emulated read if Mov */
5616 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5617 				   &ctxt->dst.val, ctxt->dst.bytes);
5618 		if (rc != X86EMUL_CONTINUE) {
5619 			if (!(ctxt->d & NoWrite) &&
5620 			    rc == X86EMUL_PROPAGATE_FAULT &&
5621 			    ctxt->exception.vector == PF_VECTOR)
5622 				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5623 			goto done;
5624 		}
5625 	}
5626 	/* Copy full 64-bit value for CMPXCHG8B.  */
5627 	ctxt->dst.orig_val64 = ctxt->dst.val64;
5628 
5629 special_insn:
5630 
5631 	if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5632 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5633 					      X86_ICPT_POST_MEMACCESS);
5634 		if (rc != X86EMUL_CONTINUE)
5635 			goto done;
5636 	}
5637 
5638 	if (ctxt->rep_prefix && (ctxt->d & String))
5639 		ctxt->eflags |= X86_EFLAGS_RF;
5640 	else
5641 		ctxt->eflags &= ~X86_EFLAGS_RF;
5642 
5643 	if (ctxt->execute) {
5644 		if (ctxt->d & Fastop) {
5645 			void (*fop)(struct fastop *) = (void *)ctxt->execute;
5646 			rc = fastop(ctxt, fop);
5647 			if (rc != X86EMUL_CONTINUE)
5648 				goto done;
5649 			goto writeback;
5650 		}
5651 		rc = ctxt->execute(ctxt);
5652 		if (rc != X86EMUL_CONTINUE)
5653 			goto done;
5654 		goto writeback;
5655 	}
5656 
5657 	if (ctxt->opcode_len == 2)
5658 		goto twobyte_insn;
5659 	else if (ctxt->opcode_len == 3)
5660 		goto threebyte_insn;
5661 
5662 	switch (ctxt->b) {
5663 	case 0x70 ... 0x7f: /* jcc (short) */
5664 		if (test_cc(ctxt->b, ctxt->eflags))
5665 			rc = jmp_rel(ctxt, ctxt->src.val);
5666 		break;
5667 	case 0x8d: /* lea r16/r32, m */
5668 		ctxt->dst.val = ctxt->src.addr.mem.ea;
5669 		break;
5670 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5671 		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5672 			ctxt->dst.type = OP_NONE;
5673 		else
5674 			rc = em_xchg(ctxt);
5675 		break;
5676 	case 0x98: /* cbw/cwde/cdqe */
5677 		switch (ctxt->op_bytes) {
5678 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5679 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5680 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5681 		}
5682 		break;
5683 	case 0xcc:		/* int3 */
5684 		rc = emulate_int(ctxt, 3);
5685 		break;
5686 	case 0xcd:		/* int n */
5687 		rc = emulate_int(ctxt, ctxt->src.val);
5688 		break;
5689 	case 0xce:		/* into */
5690 		if (ctxt->eflags & X86_EFLAGS_OF)
5691 			rc = emulate_int(ctxt, 4);
5692 		break;
5693 	case 0xe9: /* jmp rel */
5694 	case 0xeb: /* jmp rel short */
5695 		rc = jmp_rel(ctxt, ctxt->src.val);
5696 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5697 		break;
5698 	case 0xf4:              /* hlt */
5699 		ctxt->ops->halt(ctxt);
5700 		break;
5701 	case 0xf5:	/* cmc */
5702 		/* complement carry flag from eflags reg */
5703 		ctxt->eflags ^= X86_EFLAGS_CF;
5704 		break;
5705 	case 0xf8: /* clc */
5706 		ctxt->eflags &= ~X86_EFLAGS_CF;
5707 		break;
5708 	case 0xf9: /* stc */
5709 		ctxt->eflags |= X86_EFLAGS_CF;
5710 		break;
5711 	case 0xfc: /* cld */
5712 		ctxt->eflags &= ~X86_EFLAGS_DF;
5713 		break;
5714 	case 0xfd: /* std */
5715 		ctxt->eflags |= X86_EFLAGS_DF;
5716 		break;
5717 	default:
5718 		goto cannot_emulate;
5719 	}
5720 
5721 	if (rc != X86EMUL_CONTINUE)
5722 		goto done;
5723 
5724 writeback:
5725 	if (ctxt->d & SrcWrite) {
5726 		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5727 		rc = writeback(ctxt, &ctxt->src);
5728 		if (rc != X86EMUL_CONTINUE)
5729 			goto done;
5730 	}
5731 	if (!(ctxt->d & NoWrite)) {
5732 		rc = writeback(ctxt, &ctxt->dst);
5733 		if (rc != X86EMUL_CONTINUE)
5734 			goto done;
5735 	}
5736 
5737 	/*
5738 	 * restore dst type in case the decoding will be reused
5739 	 * (happens for string instruction )
5740 	 */
5741 	ctxt->dst.type = saved_dst_type;
5742 
5743 	if ((ctxt->d & SrcMask) == SrcSI)
5744 		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5745 
5746 	if ((ctxt->d & DstMask) == DstDI)
5747 		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5748 
5749 	if (ctxt->rep_prefix && (ctxt->d & String)) {
5750 		unsigned int count;
5751 		struct read_cache *r = &ctxt->io_read;
5752 		if ((ctxt->d & SrcMask) == SrcSI)
5753 			count = ctxt->src.count;
5754 		else
5755 			count = ctxt->dst.count;
5756 		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5757 
5758 		if (!string_insn_completed(ctxt)) {
5759 			/*
5760 			 * Re-enter guest when pio read ahead buffer is empty
5761 			 * or, if it is not used, after each 1024 iteration.
5762 			 */
5763 			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5764 			    (r->end == 0 || r->end != r->pos)) {
5765 				/*
5766 				 * Reset read cache. Usually happens before
5767 				 * decode, but since instruction is restarted
5768 				 * we have to do it here.
5769 				 */
5770 				ctxt->mem_read.end = 0;
5771 				writeback_registers(ctxt);
5772 				return EMULATION_RESTART;
5773 			}
5774 			goto done; /* skip rip writeback */
5775 		}
5776 		ctxt->eflags &= ~X86_EFLAGS_RF;
5777 	}
5778 
5779 	ctxt->eip = ctxt->_eip;
5780 
5781 done:
5782 	if (rc == X86EMUL_PROPAGATE_FAULT) {
5783 		WARN_ON(ctxt->exception.vector > 0x1f);
5784 		ctxt->have_exception = true;
5785 	}
5786 	if (rc == X86EMUL_INTERCEPTED)
5787 		return EMULATION_INTERCEPTED;
5788 
5789 	if (rc == X86EMUL_CONTINUE)
5790 		writeback_registers(ctxt);
5791 
5792 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5793 
5794 twobyte_insn:
5795 	switch (ctxt->b) {
5796 	case 0x09:		/* wbinvd */
5797 		(ctxt->ops->wbinvd)(ctxt);
5798 		break;
5799 	case 0x08:		/* invd */
5800 	case 0x0d:		/* GrpP (prefetch) */
5801 	case 0x18:		/* Grp16 (prefetch/nop) */
5802 	case 0x1f:		/* nop */
5803 		break;
5804 	case 0x20: /* mov cr, reg */
5805 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5806 		break;
5807 	case 0x21: /* mov from dr to reg */
5808 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5809 		break;
5810 	case 0x40 ... 0x4f:	/* cmov */
5811 		if (test_cc(ctxt->b, ctxt->eflags))
5812 			ctxt->dst.val = ctxt->src.val;
5813 		else if (ctxt->op_bytes != 4)
5814 			ctxt->dst.type = OP_NONE; /* no writeback */
5815 		break;
5816 	case 0x80 ... 0x8f: /* jnz rel, etc*/
5817 		if (test_cc(ctxt->b, ctxt->eflags))
5818 			rc = jmp_rel(ctxt, ctxt->src.val);
5819 		break;
5820 	case 0x90 ... 0x9f:     /* setcc r/m8 */
5821 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5822 		break;
5823 	case 0xb6 ... 0xb7:	/* movzx */
5824 		ctxt->dst.bytes = ctxt->op_bytes;
5825 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5826 						       : (u16) ctxt->src.val;
5827 		break;
5828 	case 0xbe ... 0xbf:	/* movsx */
5829 		ctxt->dst.bytes = ctxt->op_bytes;
5830 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5831 							(s16) ctxt->src.val;
5832 		break;
5833 	default:
5834 		goto cannot_emulate;
5835 	}
5836 
5837 threebyte_insn:
5838 
5839 	if (rc != X86EMUL_CONTINUE)
5840 		goto done;
5841 
5842 	goto writeback;
5843 
5844 cannot_emulate:
5845 	return EMULATION_FAILED;
5846 }
5847 
5848 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5849 {
5850 	invalidate_registers(ctxt);
5851 }
5852 
5853 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5854 {
5855 	writeback_registers(ctxt);
5856 }
5857 
5858 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5859 {
5860 	if (ctxt->rep_prefix && (ctxt->d & String))
5861 		return false;
5862 
5863 	if (ctxt->d & TwoMemOp)
5864 		return false;
5865 
5866 	return true;
5867 }
5868