1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 * emulate.c
4 *
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 *
7 * Copyright (c) 2005 Keir Fraser
8 *
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
11 *
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 *
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
17 *
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19 */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kvm_host.h>
23 #include "kvm_cache_regs.h"
24 #include "kvm_emulate.h"
25 #include <linux/stringify.h>
26 #include <asm/debugreg.h>
27 #include <asm/nospec-branch.h>
28 #include <asm/ibt.h>
29
30 #include "x86.h"
31 #include "tss.h"
32 #include "mmu.h"
33 #include "pmu.h"
34
35 /*
36 * Operand types
37 */
38 #define OpNone 0ull
39 #define OpImplicit 1ull /* No generic decode */
40 #define OpReg 2ull /* Register */
41 #define OpMem 3ull /* Memory */
42 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
43 #define OpDI 5ull /* ES:DI/EDI/RDI */
44 #define OpMem64 6ull /* Memory, 64-bit */
45 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
46 #define OpDX 8ull /* DX register */
47 #define OpCL 9ull /* CL register (for shifts) */
48 #define OpImmByte 10ull /* 8-bit sign extended immediate */
49 #define OpOne 11ull /* Implied 1 */
50 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
51 #define OpMem16 13ull /* Memory operand (16-bit). */
52 #define OpMem32 14ull /* Memory operand (32-bit). */
53 #define OpImmU 15ull /* Immediate operand, zero extended */
54 #define OpSI 16ull /* SI/ESI/RSI */
55 #define OpImmFAddr 17ull /* Immediate far address */
56 #define OpMemFAddr 18ull /* Far address in memory */
57 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
58 #define OpES 20ull /* ES */
59 #define OpCS 21ull /* CS */
60 #define OpSS 22ull /* SS */
61 #define OpDS 23ull /* DS */
62 #define OpFS 24ull /* FS */
63 #define OpGS 25ull /* GS */
64 #define OpMem8 26ull /* 8-bit zero extended memory operand */
65 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
66 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
67 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
68 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
69
70 #define OpBits 5 /* Width of operand field */
71 #define OpMask ((1ull << OpBits) - 1)
72
73 /*
74 * Opcode effective-address decode tables.
75 * Note that we only emulate instructions that have at least one memory
76 * operand (excluding implicit stack references). We assume that stack
77 * references and instruction fetches will never occur in special memory
78 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 * not be handled.
80 */
81
82 /* Operand sizes: 8-bit operands or specified/overridden size. */
83 #define ByteOp (1<<0) /* 8-bit operands. */
84 /* Destination operand type. */
85 #define DstShift 1
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg (OpReg << DstShift)
88 #define DstMem (OpMem << DstShift)
89 #define DstAcc (OpAcc << DstShift)
90 #define DstDI (OpDI << DstShift)
91 #define DstMem64 (OpMem64 << DstShift)
92 #define DstMem16 (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX (OpDX << DstShift)
95 #define DstAccLo (OpAccLo << DstShift)
96 #define DstMask (OpMask << DstShift)
97 /* Source operand type. */
98 #define SrcShift 6
99 #define SrcNone (OpNone << SrcShift)
100 #define SrcReg (OpReg << SrcShift)
101 #define SrcMem (OpMem << SrcShift)
102 #define SrcMem16 (OpMem16 << SrcShift)
103 #define SrcMem32 (OpMem32 << SrcShift)
104 #define SrcImm (OpImm << SrcShift)
105 #define SrcImmByte (OpImmByte << SrcShift)
106 #define SrcOne (OpOne << SrcShift)
107 #define SrcImmUByte (OpImmUByte << SrcShift)
108 #define SrcImmU (OpImmU << SrcShift)
109 #define SrcSI (OpSI << SrcShift)
110 #define SrcXLat (OpXLat << SrcShift)
111 #define SrcImmFAddr (OpImmFAddr << SrcShift)
112 #define SrcMemFAddr (OpMemFAddr << SrcShift)
113 #define SrcAcc (OpAcc << SrcShift)
114 #define SrcImmU16 (OpImmU16 << SrcShift)
115 #define SrcImm64 (OpImm64 << SrcShift)
116 #define SrcDX (OpDX << SrcShift)
117 #define SrcMem8 (OpMem8 << SrcShift)
118 #define SrcAccHi (OpAccHi << SrcShift)
119 #define SrcMask (OpMask << SrcShift)
120 #define BitOp (1<<11)
121 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
122 #define String (1<<13) /* String instruction (rep capable) */
123 #define Stack (1<<14) /* Stack instruction (push/pop) */
124 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
125 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
126 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
127 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
128 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
129 #define Escape (5<<15) /* Escape to coprocessor instruction */
130 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
131 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
132 #define Sse (1<<18) /* SSE Vector instruction */
133 /* Generic ModRM decode. */
134 #define ModRM (1<<19)
135 /* Destination is only written; never read. */
136 #define Mov (1<<20)
137 /* Misc flags */
138 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
139 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
140 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
141 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
142 #define Undefined (1<<25) /* No Such Instruction */
143 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
144 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
145 #define No64 (1<<28)
146 #define PageTable (1 << 29) /* instruction used to write page table */
147 #define NotImpl (1 << 30) /* instruction is not implemented */
148 /* Source 2 operand type */
149 #define Src2Shift (31)
150 #define Src2None (OpNone << Src2Shift)
151 #define Src2Mem (OpMem << Src2Shift)
152 #define Src2CL (OpCL << Src2Shift)
153 #define Src2ImmByte (OpImmByte << Src2Shift)
154 #define Src2One (OpOne << Src2Shift)
155 #define Src2Imm (OpImm << Src2Shift)
156 #define Src2ES (OpES << Src2Shift)
157 #define Src2CS (OpCS << Src2Shift)
158 #define Src2SS (OpSS << Src2Shift)
159 #define Src2DS (OpDS << Src2Shift)
160 #define Src2FS (OpFS << Src2Shift)
161 #define Src2GS (OpGS << Src2Shift)
162 #define Src2Mask (OpMask << Src2Shift)
163 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
164 #define AlignMask ((u64)7 << 41)
165 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
166 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
167 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
168 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
169 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
170 #define NoWrite ((u64)1 << 45) /* No writeback */
171 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
172 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
173 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
174 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
175 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
176 #define NearBranch ((u64)1 << 52) /* Near branches */
177 #define No16 ((u64)1 << 53) /* No 16 bit operand */
178 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
179 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
180 #define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
181
182 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
183
184 #define X2(x...) x, x
185 #define X3(x...) X2(x), x
186 #define X4(x...) X2(x), X2(x)
187 #define X5(x...) X4(x), x
188 #define X6(x...) X4(x), X2(x)
189 #define X7(x...) X4(x), X3(x)
190 #define X8(x...) X4(x), X4(x)
191 #define X16(x...) X8(x), X8(x)
192
193 struct opcode {
194 u64 flags;
195 u8 intercept;
196 u8 pad[7];
197 union {
198 int (*execute)(struct x86_emulate_ctxt *ctxt);
199 const struct opcode *group;
200 const struct group_dual *gdual;
201 const struct gprefix *gprefix;
202 const struct escape *esc;
203 const struct instr_dual *idual;
204 const struct mode_dual *mdual;
205 void (*fastop)(struct fastop *fake);
206 } u;
207 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
208 };
209
210 struct group_dual {
211 struct opcode mod012[8];
212 struct opcode mod3[8];
213 };
214
215 struct gprefix {
216 struct opcode pfx_no;
217 struct opcode pfx_66;
218 struct opcode pfx_f2;
219 struct opcode pfx_f3;
220 };
221
222 struct escape {
223 struct opcode op[8];
224 struct opcode high[64];
225 };
226
227 struct instr_dual {
228 struct opcode mod012;
229 struct opcode mod3;
230 };
231
232 struct mode_dual {
233 struct opcode mode32;
234 struct opcode mode64;
235 };
236
237 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
238
239 enum x86_transfer_type {
240 X86_TRANSFER_NONE,
241 X86_TRANSFER_CALL_JMP,
242 X86_TRANSFER_RET,
243 X86_TRANSFER_TASK_SWITCH,
244 };
245
writeback_registers(struct x86_emulate_ctxt * ctxt)246 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
247 {
248 unsigned long dirty = ctxt->regs_dirty;
249 unsigned reg;
250
251 for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS)
252 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
253 }
254
invalidate_registers(struct x86_emulate_ctxt * ctxt)255 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
256 {
257 ctxt->regs_dirty = 0;
258 ctxt->regs_valid = 0;
259 }
260
261 /*
262 * These EFLAGS bits are restored from saved value during emulation, and
263 * any changes are written back to the saved value after emulation.
264 */
265 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
266 X86_EFLAGS_PF|X86_EFLAGS_CF)
267
268 #ifdef CONFIG_X86_64
269 #define ON64(x) x
270 #else
271 #define ON64(x)
272 #endif
273
274 /*
275 * fastop functions have a special calling convention:
276 *
277 * dst: rax (in/out)
278 * src: rdx (in/out)
279 * src2: rcx (in)
280 * flags: rflags (in/out)
281 * ex: rsi (in:fastop pointer, out:zero if exception)
282 *
283 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
284 * different operand sizes can be reached by calculation, rather than a jump
285 * table (which would be bigger than the code).
286 *
287 * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
288 * and 1 for the straight line speculation INT3, leaves 7 bytes for the
289 * body of the function. Currently none is larger than 4.
290 */
291 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
292
293 #define FASTOP_SIZE 16
294
295 #define __FOP_FUNC(name) \
296 ".align " __stringify(FASTOP_SIZE) " \n\t" \
297 ".type " name ", @function \n\t" \
298 name ":\n\t" \
299 ASM_ENDBR \
300 IBT_NOSEAL(name)
301
302 #define FOP_FUNC(name) \
303 __FOP_FUNC(#name)
304
305 #define __FOP_RET(name) \
306 "11: " ASM_RET \
307 ".size " name ", .-" name "\n\t"
308
309 #define FOP_RET(name) \
310 __FOP_RET(#name)
311
312 #define __FOP_START(op, align) \
313 extern void em_##op(struct fastop *fake); \
314 asm(".pushsection .text, \"ax\" \n\t" \
315 ".global em_" #op " \n\t" \
316 ".align " __stringify(align) " \n\t" \
317 "em_" #op ":\n\t"
318
319 #define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
320
321 #define FOP_END \
322 ".popsection")
323
324 #define __FOPNOP(name) \
325 __FOP_FUNC(name) \
326 __FOP_RET(name)
327
328 #define FOPNOP() \
329 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
330
331 #define FOP1E(op, dst) \
332 __FOP_FUNC(#op "_" #dst) \
333 "10: " #op " %" #dst " \n\t" \
334 __FOP_RET(#op "_" #dst)
335
336 #define FOP1EEX(op, dst) \
337 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
338
339 #define FASTOP1(op) \
340 FOP_START(op) \
341 FOP1E(op##b, al) \
342 FOP1E(op##w, ax) \
343 FOP1E(op##l, eax) \
344 ON64(FOP1E(op##q, rax)) \
345 FOP_END
346
347 /* 1-operand, using src2 (for MUL/DIV r/m) */
348 #define FASTOP1SRC2(op, name) \
349 FOP_START(name) \
350 FOP1E(op, cl) \
351 FOP1E(op, cx) \
352 FOP1E(op, ecx) \
353 ON64(FOP1E(op, rcx)) \
354 FOP_END
355
356 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
357 #define FASTOP1SRC2EX(op, name) \
358 FOP_START(name) \
359 FOP1EEX(op, cl) \
360 FOP1EEX(op, cx) \
361 FOP1EEX(op, ecx) \
362 ON64(FOP1EEX(op, rcx)) \
363 FOP_END
364
365 #define FOP2E(op, dst, src) \
366 __FOP_FUNC(#op "_" #dst "_" #src) \
367 #op " %" #src ", %" #dst " \n\t" \
368 __FOP_RET(#op "_" #dst "_" #src)
369
370 #define FASTOP2(op) \
371 FOP_START(op) \
372 FOP2E(op##b, al, dl) \
373 FOP2E(op##w, ax, dx) \
374 FOP2E(op##l, eax, edx) \
375 ON64(FOP2E(op##q, rax, rdx)) \
376 FOP_END
377
378 /* 2 operand, word only */
379 #define FASTOP2W(op) \
380 FOP_START(op) \
381 FOPNOP() \
382 FOP2E(op##w, ax, dx) \
383 FOP2E(op##l, eax, edx) \
384 ON64(FOP2E(op##q, rax, rdx)) \
385 FOP_END
386
387 /* 2 operand, src is CL */
388 #define FASTOP2CL(op) \
389 FOP_START(op) \
390 FOP2E(op##b, al, cl) \
391 FOP2E(op##w, ax, cl) \
392 FOP2E(op##l, eax, cl) \
393 ON64(FOP2E(op##q, rax, cl)) \
394 FOP_END
395
396 /* 2 operand, src and dest are reversed */
397 #define FASTOP2R(op, name) \
398 FOP_START(name) \
399 FOP2E(op##b, dl, al) \
400 FOP2E(op##w, dx, ax) \
401 FOP2E(op##l, edx, eax) \
402 ON64(FOP2E(op##q, rdx, rax)) \
403 FOP_END
404
405 #define FOP3E(op, dst, src, src2) \
406 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
407 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
408 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
409
410 /* 3-operand, word-only, src2=cl */
411 #define FASTOP3WCL(op) \
412 FOP_START(op) \
413 FOPNOP() \
414 FOP3E(op##w, ax, dx, cl) \
415 FOP3E(op##l, eax, edx, cl) \
416 ON64(FOP3E(op##q, rax, rdx, cl)) \
417 FOP_END
418
419 /* Special case for SETcc - 1 instruction per cc */
420 #define FOP_SETCC(op) \
421 FOP_FUNC(op) \
422 #op " %al \n\t" \
423 FOP_RET(op)
424
425 FOP_START(setcc)
426 FOP_SETCC(seto)
427 FOP_SETCC(setno)
428 FOP_SETCC(setc)
429 FOP_SETCC(setnc)
430 FOP_SETCC(setz)
431 FOP_SETCC(setnz)
432 FOP_SETCC(setbe)
433 FOP_SETCC(setnbe)
434 FOP_SETCC(sets)
435 FOP_SETCC(setns)
436 FOP_SETCC(setp)
437 FOP_SETCC(setnp)
438 FOP_SETCC(setl)
439 FOP_SETCC(setnl)
440 FOP_SETCC(setle)
441 FOP_SETCC(setnle)
442 FOP_END;
443
444 FOP_START(salc)
445 FOP_FUNC(salc)
446 "pushf; sbb %al, %al; popf \n\t"
447 FOP_RET(salc)
448 FOP_END;
449
450 /*
451 * XXX: inoutclob user must know where the argument is being expanded.
452 * Using asm goto would allow us to remove _fault.
453 */
454 #define asm_safe(insn, inoutclob...) \
455 ({ \
456 int _fault = 0; \
457 \
458 asm volatile("1:" insn "\n" \
459 "2:\n" \
460 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
461 : [_fault] "+r"(_fault) inoutclob ); \
462 \
463 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
464 })
465
emulator_check_intercept(struct x86_emulate_ctxt * ctxt,enum x86_intercept intercept,enum x86_intercept_stage stage)466 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
467 enum x86_intercept intercept,
468 enum x86_intercept_stage stage)
469 {
470 struct x86_instruction_info info = {
471 .intercept = intercept,
472 .rep_prefix = ctxt->rep_prefix,
473 .modrm_mod = ctxt->modrm_mod,
474 .modrm_reg = ctxt->modrm_reg,
475 .modrm_rm = ctxt->modrm_rm,
476 .src_val = ctxt->src.val64,
477 .dst_val = ctxt->dst.val64,
478 .src_bytes = ctxt->src.bytes,
479 .dst_bytes = ctxt->dst.bytes,
480 .ad_bytes = ctxt->ad_bytes,
481 .next_rip = ctxt->eip,
482 };
483
484 return ctxt->ops->intercept(ctxt, &info, stage);
485 }
486
assign_masked(ulong * dest,ulong src,ulong mask)487 static void assign_masked(ulong *dest, ulong src, ulong mask)
488 {
489 *dest = (*dest & ~mask) | (src & mask);
490 }
491
assign_register(unsigned long * reg,u64 val,int bytes)492 static void assign_register(unsigned long *reg, u64 val, int bytes)
493 {
494 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
495 switch (bytes) {
496 case 1:
497 *(u8 *)reg = (u8)val;
498 break;
499 case 2:
500 *(u16 *)reg = (u16)val;
501 break;
502 case 4:
503 *reg = (u32)val;
504 break; /* 64b: zero-extend */
505 case 8:
506 *reg = val;
507 break;
508 }
509 }
510
ad_mask(struct x86_emulate_ctxt * ctxt)511 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
512 {
513 return (1UL << (ctxt->ad_bytes << 3)) - 1;
514 }
515
stack_mask(struct x86_emulate_ctxt * ctxt)516 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
517 {
518 u16 sel;
519 struct desc_struct ss;
520
521 if (ctxt->mode == X86EMUL_MODE_PROT64)
522 return ~0UL;
523 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
524 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
525 }
526
stack_size(struct x86_emulate_ctxt * ctxt)527 static int stack_size(struct x86_emulate_ctxt *ctxt)
528 {
529 return (__fls(stack_mask(ctxt)) + 1) >> 3;
530 }
531
532 /* Access/update address held in a register, based on addressing mode. */
533 static inline unsigned long
address_mask(struct x86_emulate_ctxt * ctxt,unsigned long reg)534 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
535 {
536 if (ctxt->ad_bytes == sizeof(unsigned long))
537 return reg;
538 else
539 return reg & ad_mask(ctxt);
540 }
541
542 static inline unsigned long
register_address(struct x86_emulate_ctxt * ctxt,int reg)543 register_address(struct x86_emulate_ctxt *ctxt, int reg)
544 {
545 return address_mask(ctxt, reg_read(ctxt, reg));
546 }
547
masked_increment(ulong * reg,ulong mask,int inc)548 static void masked_increment(ulong *reg, ulong mask, int inc)
549 {
550 assign_masked(reg, *reg + inc, mask);
551 }
552
553 static inline void
register_address_increment(struct x86_emulate_ctxt * ctxt,int reg,int inc)554 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
555 {
556 ulong *preg = reg_rmw(ctxt, reg);
557
558 assign_register(preg, *preg + inc, ctxt->ad_bytes);
559 }
560
rsp_increment(struct x86_emulate_ctxt * ctxt,int inc)561 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
562 {
563 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
564 }
565
desc_limit_scaled(struct desc_struct * desc)566 static u32 desc_limit_scaled(struct desc_struct *desc)
567 {
568 u32 limit = get_desc_limit(desc);
569
570 return desc->g ? (limit << 12) | 0xfff : limit;
571 }
572
seg_base(struct x86_emulate_ctxt * ctxt,int seg)573 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
574 {
575 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
576 return 0;
577
578 return ctxt->ops->get_cached_segment_base(ctxt, seg);
579 }
580
emulate_exception(struct x86_emulate_ctxt * ctxt,int vec,u32 error,bool valid)581 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
582 u32 error, bool valid)
583 {
584 if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt))
585 return X86EMUL_UNHANDLEABLE;
586
587 ctxt->exception.vector = vec;
588 ctxt->exception.error_code = error;
589 ctxt->exception.error_code_valid = valid;
590 return X86EMUL_PROPAGATE_FAULT;
591 }
592
emulate_db(struct x86_emulate_ctxt * ctxt)593 static int emulate_db(struct x86_emulate_ctxt *ctxt)
594 {
595 return emulate_exception(ctxt, DB_VECTOR, 0, false);
596 }
597
emulate_gp(struct x86_emulate_ctxt * ctxt,int err)598 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
599 {
600 return emulate_exception(ctxt, GP_VECTOR, err, true);
601 }
602
emulate_ss(struct x86_emulate_ctxt * ctxt,int err)603 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
604 {
605 return emulate_exception(ctxt, SS_VECTOR, err, true);
606 }
607
emulate_ud(struct x86_emulate_ctxt * ctxt)608 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
609 {
610 return emulate_exception(ctxt, UD_VECTOR, 0, false);
611 }
612
emulate_ts(struct x86_emulate_ctxt * ctxt,int err)613 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
614 {
615 return emulate_exception(ctxt, TS_VECTOR, err, true);
616 }
617
emulate_de(struct x86_emulate_ctxt * ctxt)618 static int emulate_de(struct x86_emulate_ctxt *ctxt)
619 {
620 return emulate_exception(ctxt, DE_VECTOR, 0, false);
621 }
622
emulate_nm(struct x86_emulate_ctxt * ctxt)623 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
624 {
625 return emulate_exception(ctxt, NM_VECTOR, 0, false);
626 }
627
get_segment_selector(struct x86_emulate_ctxt * ctxt,unsigned seg)628 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
629 {
630 u16 selector;
631 struct desc_struct desc;
632
633 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
634 return selector;
635 }
636
set_segment_selector(struct x86_emulate_ctxt * ctxt,u16 selector,unsigned seg)637 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
638 unsigned seg)
639 {
640 u16 dummy;
641 u32 base3;
642 struct desc_struct desc;
643
644 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
645 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
646 }
647
ctxt_virt_addr_bits(struct x86_emulate_ctxt * ctxt)648 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
649 {
650 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
651 }
652
emul_is_noncanonical_address(u64 la,struct x86_emulate_ctxt * ctxt)653 static inline bool emul_is_noncanonical_address(u64 la,
654 struct x86_emulate_ctxt *ctxt)
655 {
656 return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
657 }
658
659 /*
660 * x86 defines three classes of vector instructions: explicitly
661 * aligned, explicitly unaligned, and the rest, which change behaviour
662 * depending on whether they're AVX encoded or not.
663 *
664 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
665 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
666 * 512 bytes of data must be aligned to a 16 byte boundary.
667 */
insn_alignment(struct x86_emulate_ctxt * ctxt,unsigned size)668 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
669 {
670 u64 alignment = ctxt->d & AlignMask;
671
672 if (likely(size < 16))
673 return 1;
674
675 switch (alignment) {
676 case Unaligned:
677 case Avx:
678 return 1;
679 case Aligned16:
680 return 16;
681 case Aligned:
682 default:
683 return size;
684 }
685 }
686
__linearize(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,unsigned * max_size,unsigned size,enum x86emul_mode mode,ulong * linear,unsigned int flags)687 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
688 struct segmented_address addr,
689 unsigned *max_size, unsigned size,
690 enum x86emul_mode mode, ulong *linear,
691 unsigned int flags)
692 {
693 struct desc_struct desc;
694 bool usable;
695 ulong la;
696 u32 lim;
697 u16 sel;
698 u8 va_bits;
699
700 la = seg_base(ctxt, addr.seg) + addr.ea;
701 *max_size = 0;
702 switch (mode) {
703 case X86EMUL_MODE_PROT64:
704 *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags);
705 va_bits = ctxt_virt_addr_bits(ctxt);
706 if (!__is_canonical_address(la, va_bits))
707 goto bad;
708
709 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
710 if (size > *max_size)
711 goto bad;
712 break;
713 default:
714 *linear = la = (u32)la;
715 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
716 addr.seg);
717 if (!usable)
718 goto bad;
719 /* code segment in protected mode or read-only data segment */
720 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) &&
721 (flags & X86EMUL_F_WRITE))
722 goto bad;
723 /* unreadable code segment */
724 if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2))
725 goto bad;
726 lim = desc_limit_scaled(&desc);
727 if (!(desc.type & 8) && (desc.type & 4)) {
728 /* expand-down segment */
729 if (addr.ea <= lim)
730 goto bad;
731 lim = desc.d ? 0xffffffff : 0xffff;
732 }
733 if (addr.ea > lim)
734 goto bad;
735 if (lim == 0xffffffff)
736 *max_size = ~0u;
737 else {
738 *max_size = (u64)lim + 1 - addr.ea;
739 if (size > *max_size)
740 goto bad;
741 }
742 break;
743 }
744 if (la & (insn_alignment(ctxt, size) - 1))
745 return emulate_gp(ctxt, 0);
746 return X86EMUL_CONTINUE;
747 bad:
748 if (addr.seg == VCPU_SREG_SS)
749 return emulate_ss(ctxt, 0);
750 else
751 return emulate_gp(ctxt, 0);
752 }
753
linearize(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,unsigned size,bool write,ulong * linear)754 static int linearize(struct x86_emulate_ctxt *ctxt,
755 struct segmented_address addr,
756 unsigned size, bool write,
757 ulong *linear)
758 {
759 unsigned max_size;
760 return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear,
761 write ? X86EMUL_F_WRITE : 0);
762 }
763
assign_eip(struct x86_emulate_ctxt * ctxt,ulong dst)764 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
765 {
766 ulong linear;
767 int rc;
768 unsigned max_size;
769 struct segmented_address addr = { .seg = VCPU_SREG_CS,
770 .ea = dst };
771
772 if (ctxt->op_bytes != sizeof(unsigned long))
773 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
774 rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
775 X86EMUL_F_FETCH);
776 if (rc == X86EMUL_CONTINUE)
777 ctxt->_eip = addr.ea;
778 return rc;
779 }
780
emulator_recalc_and_set_mode(struct x86_emulate_ctxt * ctxt)781 static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
782 {
783 u64 efer;
784 struct desc_struct cs;
785 u16 selector;
786 u32 base3;
787
788 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
789
790 if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
791 /* Real mode. cpu must not have long mode active */
792 if (efer & EFER_LMA)
793 return X86EMUL_UNHANDLEABLE;
794 ctxt->mode = X86EMUL_MODE_REAL;
795 return X86EMUL_CONTINUE;
796 }
797
798 if (ctxt->eflags & X86_EFLAGS_VM) {
799 /* Protected/VM86 mode. cpu must not have long mode active */
800 if (efer & EFER_LMA)
801 return X86EMUL_UNHANDLEABLE;
802 ctxt->mode = X86EMUL_MODE_VM86;
803 return X86EMUL_CONTINUE;
804 }
805
806 if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
807 return X86EMUL_UNHANDLEABLE;
808
809 if (efer & EFER_LMA) {
810 if (cs.l) {
811 /* Proper long mode */
812 ctxt->mode = X86EMUL_MODE_PROT64;
813 } else if (cs.d) {
814 /* 32 bit compatibility mode*/
815 ctxt->mode = X86EMUL_MODE_PROT32;
816 } else {
817 ctxt->mode = X86EMUL_MODE_PROT16;
818 }
819 } else {
820 /* Legacy 32 bit / 16 bit mode */
821 ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
822 }
823
824 return X86EMUL_CONTINUE;
825 }
826
assign_eip_near(struct x86_emulate_ctxt * ctxt,ulong dst)827 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
828 {
829 return assign_eip(ctxt, dst);
830 }
831
assign_eip_far(struct x86_emulate_ctxt * ctxt,ulong dst)832 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
833 {
834 int rc = emulator_recalc_and_set_mode(ctxt);
835
836 if (rc != X86EMUL_CONTINUE)
837 return rc;
838
839 return assign_eip(ctxt, dst);
840 }
841
jmp_rel(struct x86_emulate_ctxt * ctxt,int rel)842 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
843 {
844 return assign_eip_near(ctxt, ctxt->_eip + rel);
845 }
846
linear_read_system(struct x86_emulate_ctxt * ctxt,ulong linear,void * data,unsigned size)847 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
848 void *data, unsigned size)
849 {
850 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
851 }
852
linear_write_system(struct x86_emulate_ctxt * ctxt,ulong linear,void * data,unsigned int size)853 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
854 ulong linear, void *data,
855 unsigned int size)
856 {
857 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
858 }
859
segmented_read_std(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned size)860 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
861 struct segmented_address addr,
862 void *data,
863 unsigned size)
864 {
865 int rc;
866 ulong linear;
867
868 rc = linearize(ctxt, addr, size, false, &linear);
869 if (rc != X86EMUL_CONTINUE)
870 return rc;
871 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
872 }
873
segmented_write_std(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned int size)874 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
875 struct segmented_address addr,
876 void *data,
877 unsigned int size)
878 {
879 int rc;
880 ulong linear;
881
882 rc = linearize(ctxt, addr, size, true, &linear);
883 if (rc != X86EMUL_CONTINUE)
884 return rc;
885 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
886 }
887
888 /*
889 * Prefetch the remaining bytes of the instruction without crossing page
890 * boundary if they are not in fetch_cache yet.
891 */
__do_insn_fetch_bytes(struct x86_emulate_ctxt * ctxt,int op_size)892 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
893 {
894 int rc;
895 unsigned size, max_size;
896 unsigned long linear;
897 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
898 struct segmented_address addr = { .seg = VCPU_SREG_CS,
899 .ea = ctxt->eip + cur_size };
900
901 /*
902 * We do not know exactly how many bytes will be needed, and
903 * __linearize is expensive, so fetch as much as possible. We
904 * just have to avoid going beyond the 15 byte limit, the end
905 * of the segment, or the end of the page.
906 *
907 * __linearize is called with size 0 so that it does not do any
908 * boundary check itself. Instead, we use max_size to check
909 * against op_size.
910 */
911 rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
912 X86EMUL_F_FETCH);
913 if (unlikely(rc != X86EMUL_CONTINUE))
914 return rc;
915
916 size = min_t(unsigned, 15UL ^ cur_size, max_size);
917 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
918
919 /*
920 * One instruction can only straddle two pages,
921 * and one has been loaded at the beginning of
922 * x86_decode_insn. So, if not enough bytes
923 * still, we must have hit the 15-byte boundary.
924 */
925 if (unlikely(size < op_size))
926 return emulate_gp(ctxt, 0);
927
928 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
929 size, &ctxt->exception);
930 if (unlikely(rc != X86EMUL_CONTINUE))
931 return rc;
932 ctxt->fetch.end += size;
933 return X86EMUL_CONTINUE;
934 }
935
do_insn_fetch_bytes(struct x86_emulate_ctxt * ctxt,unsigned size)936 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
937 unsigned size)
938 {
939 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
940
941 if (unlikely(done_size < size))
942 return __do_insn_fetch_bytes(ctxt, size - done_size);
943 else
944 return X86EMUL_CONTINUE;
945 }
946
947 /* Fetch next part of the instruction being emulated. */
948 #define insn_fetch(_type, _ctxt) \
949 ({ _type _x; \
950 \
951 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
952 if (rc != X86EMUL_CONTINUE) \
953 goto done; \
954 ctxt->_eip += sizeof(_type); \
955 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
956 ctxt->fetch.ptr += sizeof(_type); \
957 _x; \
958 })
959
960 #define insn_fetch_arr(_arr, _size, _ctxt) \
961 ({ \
962 rc = do_insn_fetch_bytes(_ctxt, _size); \
963 if (rc != X86EMUL_CONTINUE) \
964 goto done; \
965 ctxt->_eip += (_size); \
966 memcpy(_arr, ctxt->fetch.ptr, _size); \
967 ctxt->fetch.ptr += (_size); \
968 })
969
970 /*
971 * Given the 'reg' portion of a ModRM byte, and a register block, return a
972 * pointer into the block that addresses the relevant register.
973 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
974 */
decode_register(struct x86_emulate_ctxt * ctxt,u8 modrm_reg,int byteop)975 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
976 int byteop)
977 {
978 void *p;
979 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
980
981 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
982 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
983 else
984 p = reg_rmw(ctxt, modrm_reg);
985 return p;
986 }
987
read_descriptor(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,u16 * size,unsigned long * address,int op_bytes)988 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
989 struct segmented_address addr,
990 u16 *size, unsigned long *address, int op_bytes)
991 {
992 int rc;
993
994 if (op_bytes == 2)
995 op_bytes = 3;
996 *address = 0;
997 rc = segmented_read_std(ctxt, addr, size, 2);
998 if (rc != X86EMUL_CONTINUE)
999 return rc;
1000 addr.ea += 2;
1001 rc = segmented_read_std(ctxt, addr, address, op_bytes);
1002 return rc;
1003 }
1004
1005 FASTOP2(add);
1006 FASTOP2(or);
1007 FASTOP2(adc);
1008 FASTOP2(sbb);
1009 FASTOP2(and);
1010 FASTOP2(sub);
1011 FASTOP2(xor);
1012 FASTOP2(cmp);
1013 FASTOP2(test);
1014
1015 FASTOP1SRC2(mul, mul_ex);
1016 FASTOP1SRC2(imul, imul_ex);
1017 FASTOP1SRC2EX(div, div_ex);
1018 FASTOP1SRC2EX(idiv, idiv_ex);
1019
1020 FASTOP3WCL(shld);
1021 FASTOP3WCL(shrd);
1022
1023 FASTOP2W(imul);
1024
1025 FASTOP1(not);
1026 FASTOP1(neg);
1027 FASTOP1(inc);
1028 FASTOP1(dec);
1029
1030 FASTOP2CL(rol);
1031 FASTOP2CL(ror);
1032 FASTOP2CL(rcl);
1033 FASTOP2CL(rcr);
1034 FASTOP2CL(shl);
1035 FASTOP2CL(shr);
1036 FASTOP2CL(sar);
1037
1038 FASTOP2W(bsf);
1039 FASTOP2W(bsr);
1040 FASTOP2W(bt);
1041 FASTOP2W(bts);
1042 FASTOP2W(btr);
1043 FASTOP2W(btc);
1044
1045 FASTOP2(xadd);
1046
1047 FASTOP2R(cmp, cmp_r);
1048
em_bsf_c(struct x86_emulate_ctxt * ctxt)1049 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1050 {
1051 /* If src is zero, do not writeback, but update flags */
1052 if (ctxt->src.val == 0)
1053 ctxt->dst.type = OP_NONE;
1054 return fastop(ctxt, em_bsf);
1055 }
1056
em_bsr_c(struct x86_emulate_ctxt * ctxt)1057 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1058 {
1059 /* If src is zero, do not writeback, but update flags */
1060 if (ctxt->src.val == 0)
1061 ctxt->dst.type = OP_NONE;
1062 return fastop(ctxt, em_bsr);
1063 }
1064
test_cc(unsigned int condition,unsigned long flags)1065 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1066 {
1067 u8 rc;
1068 void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
1069
1070 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1071 asm("push %[flags]; popf; " CALL_NOSPEC
1072 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1073 return rc;
1074 }
1075
fetch_register_operand(struct operand * op)1076 static void fetch_register_operand(struct operand *op)
1077 {
1078 switch (op->bytes) {
1079 case 1:
1080 op->val = *(u8 *)op->addr.reg;
1081 break;
1082 case 2:
1083 op->val = *(u16 *)op->addr.reg;
1084 break;
1085 case 4:
1086 op->val = *(u32 *)op->addr.reg;
1087 break;
1088 case 8:
1089 op->val = *(u64 *)op->addr.reg;
1090 break;
1091 }
1092 }
1093
em_fninit(struct x86_emulate_ctxt * ctxt)1094 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1095 {
1096 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1097 return emulate_nm(ctxt);
1098
1099 kvm_fpu_get();
1100 asm volatile("fninit");
1101 kvm_fpu_put();
1102 return X86EMUL_CONTINUE;
1103 }
1104
em_fnstcw(struct x86_emulate_ctxt * ctxt)1105 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1106 {
1107 u16 fcw;
1108
1109 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1110 return emulate_nm(ctxt);
1111
1112 kvm_fpu_get();
1113 asm volatile("fnstcw %0": "+m"(fcw));
1114 kvm_fpu_put();
1115
1116 ctxt->dst.val = fcw;
1117
1118 return X86EMUL_CONTINUE;
1119 }
1120
em_fnstsw(struct x86_emulate_ctxt * ctxt)1121 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1122 {
1123 u16 fsw;
1124
1125 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1126 return emulate_nm(ctxt);
1127
1128 kvm_fpu_get();
1129 asm volatile("fnstsw %0": "+m"(fsw));
1130 kvm_fpu_put();
1131
1132 ctxt->dst.val = fsw;
1133
1134 return X86EMUL_CONTINUE;
1135 }
1136
decode_register_operand(struct x86_emulate_ctxt * ctxt,struct operand * op)1137 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1138 struct operand *op)
1139 {
1140 unsigned int reg;
1141
1142 if (ctxt->d & ModRM)
1143 reg = ctxt->modrm_reg;
1144 else
1145 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1146
1147 if (ctxt->d & Sse) {
1148 op->type = OP_XMM;
1149 op->bytes = 16;
1150 op->addr.xmm = reg;
1151 kvm_read_sse_reg(reg, &op->vec_val);
1152 return;
1153 }
1154 if (ctxt->d & Mmx) {
1155 reg &= 7;
1156 op->type = OP_MM;
1157 op->bytes = 8;
1158 op->addr.mm = reg;
1159 return;
1160 }
1161
1162 op->type = OP_REG;
1163 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1164 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1165
1166 fetch_register_operand(op);
1167 op->orig_val = op->val;
1168 }
1169
adjust_modrm_seg(struct x86_emulate_ctxt * ctxt,int base_reg)1170 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1171 {
1172 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1173 ctxt->modrm_seg = VCPU_SREG_SS;
1174 }
1175
decode_modrm(struct x86_emulate_ctxt * ctxt,struct operand * op)1176 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1177 struct operand *op)
1178 {
1179 u8 sib;
1180 int index_reg, base_reg, scale;
1181 int rc = X86EMUL_CONTINUE;
1182 ulong modrm_ea = 0;
1183
1184 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1185 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1186 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1187
1188 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1189 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1190 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1191 ctxt->modrm_seg = VCPU_SREG_DS;
1192
1193 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1194 op->type = OP_REG;
1195 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1196 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1197 ctxt->d & ByteOp);
1198 if (ctxt->d & Sse) {
1199 op->type = OP_XMM;
1200 op->bytes = 16;
1201 op->addr.xmm = ctxt->modrm_rm;
1202 kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1203 return rc;
1204 }
1205 if (ctxt->d & Mmx) {
1206 op->type = OP_MM;
1207 op->bytes = 8;
1208 op->addr.mm = ctxt->modrm_rm & 7;
1209 return rc;
1210 }
1211 fetch_register_operand(op);
1212 return rc;
1213 }
1214
1215 op->type = OP_MEM;
1216
1217 if (ctxt->ad_bytes == 2) {
1218 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1219 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1220 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1221 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1222
1223 /* 16-bit ModR/M decode. */
1224 switch (ctxt->modrm_mod) {
1225 case 0:
1226 if (ctxt->modrm_rm == 6)
1227 modrm_ea += insn_fetch(u16, ctxt);
1228 break;
1229 case 1:
1230 modrm_ea += insn_fetch(s8, ctxt);
1231 break;
1232 case 2:
1233 modrm_ea += insn_fetch(u16, ctxt);
1234 break;
1235 }
1236 switch (ctxt->modrm_rm) {
1237 case 0:
1238 modrm_ea += bx + si;
1239 break;
1240 case 1:
1241 modrm_ea += bx + di;
1242 break;
1243 case 2:
1244 modrm_ea += bp + si;
1245 break;
1246 case 3:
1247 modrm_ea += bp + di;
1248 break;
1249 case 4:
1250 modrm_ea += si;
1251 break;
1252 case 5:
1253 modrm_ea += di;
1254 break;
1255 case 6:
1256 if (ctxt->modrm_mod != 0)
1257 modrm_ea += bp;
1258 break;
1259 case 7:
1260 modrm_ea += bx;
1261 break;
1262 }
1263 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1264 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1265 ctxt->modrm_seg = VCPU_SREG_SS;
1266 modrm_ea = (u16)modrm_ea;
1267 } else {
1268 /* 32/64-bit ModR/M decode. */
1269 if ((ctxt->modrm_rm & 7) == 4) {
1270 sib = insn_fetch(u8, ctxt);
1271 index_reg |= (sib >> 3) & 7;
1272 base_reg |= sib & 7;
1273 scale = sib >> 6;
1274
1275 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1276 modrm_ea += insn_fetch(s32, ctxt);
1277 else {
1278 modrm_ea += reg_read(ctxt, base_reg);
1279 adjust_modrm_seg(ctxt, base_reg);
1280 /* Increment ESP on POP [ESP] */
1281 if ((ctxt->d & IncSP) &&
1282 base_reg == VCPU_REGS_RSP)
1283 modrm_ea += ctxt->op_bytes;
1284 }
1285 if (index_reg != 4)
1286 modrm_ea += reg_read(ctxt, index_reg) << scale;
1287 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1288 modrm_ea += insn_fetch(s32, ctxt);
1289 if (ctxt->mode == X86EMUL_MODE_PROT64)
1290 ctxt->rip_relative = 1;
1291 } else {
1292 base_reg = ctxt->modrm_rm;
1293 modrm_ea += reg_read(ctxt, base_reg);
1294 adjust_modrm_seg(ctxt, base_reg);
1295 }
1296 switch (ctxt->modrm_mod) {
1297 case 1:
1298 modrm_ea += insn_fetch(s8, ctxt);
1299 break;
1300 case 2:
1301 modrm_ea += insn_fetch(s32, ctxt);
1302 break;
1303 }
1304 }
1305 op->addr.mem.ea = modrm_ea;
1306 if (ctxt->ad_bytes != 8)
1307 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1308
1309 done:
1310 return rc;
1311 }
1312
decode_abs(struct x86_emulate_ctxt * ctxt,struct operand * op)1313 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1314 struct operand *op)
1315 {
1316 int rc = X86EMUL_CONTINUE;
1317
1318 op->type = OP_MEM;
1319 switch (ctxt->ad_bytes) {
1320 case 2:
1321 op->addr.mem.ea = insn_fetch(u16, ctxt);
1322 break;
1323 case 4:
1324 op->addr.mem.ea = insn_fetch(u32, ctxt);
1325 break;
1326 case 8:
1327 op->addr.mem.ea = insn_fetch(u64, ctxt);
1328 break;
1329 }
1330 done:
1331 return rc;
1332 }
1333
fetch_bit_operand(struct x86_emulate_ctxt * ctxt)1334 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1335 {
1336 long sv = 0, mask;
1337
1338 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1339 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1340
1341 if (ctxt->src.bytes == 2)
1342 sv = (s16)ctxt->src.val & (s16)mask;
1343 else if (ctxt->src.bytes == 4)
1344 sv = (s32)ctxt->src.val & (s32)mask;
1345 else
1346 sv = (s64)ctxt->src.val & (s64)mask;
1347
1348 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1349 ctxt->dst.addr.mem.ea + (sv >> 3));
1350 }
1351
1352 /* only subword offset */
1353 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1354 }
1355
read_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * dest,unsigned size)1356 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1357 unsigned long addr, void *dest, unsigned size)
1358 {
1359 int rc;
1360 struct read_cache *mc = &ctxt->mem_read;
1361
1362 if (mc->pos < mc->end)
1363 goto read_cached;
1364
1365 if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt))
1366 return X86EMUL_UNHANDLEABLE;
1367
1368 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1369 &ctxt->exception);
1370 if (rc != X86EMUL_CONTINUE)
1371 return rc;
1372
1373 mc->end += size;
1374
1375 read_cached:
1376 memcpy(dest, mc->data + mc->pos, size);
1377 mc->pos += size;
1378 return X86EMUL_CONTINUE;
1379 }
1380
segmented_read(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned size)1381 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1382 struct segmented_address addr,
1383 void *data,
1384 unsigned size)
1385 {
1386 int rc;
1387 ulong linear;
1388
1389 rc = linearize(ctxt, addr, size, false, &linear);
1390 if (rc != X86EMUL_CONTINUE)
1391 return rc;
1392 return read_emulated(ctxt, linear, data, size);
1393 }
1394
segmented_write(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,const void * data,unsigned size)1395 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1396 struct segmented_address addr,
1397 const void *data,
1398 unsigned size)
1399 {
1400 int rc;
1401 ulong linear;
1402
1403 rc = linearize(ctxt, addr, size, true, &linear);
1404 if (rc != X86EMUL_CONTINUE)
1405 return rc;
1406 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1407 &ctxt->exception);
1408 }
1409
segmented_cmpxchg(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,const void * orig_data,const void * data,unsigned size)1410 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1411 struct segmented_address addr,
1412 const void *orig_data, const void *data,
1413 unsigned size)
1414 {
1415 int rc;
1416 ulong linear;
1417
1418 rc = linearize(ctxt, addr, size, true, &linear);
1419 if (rc != X86EMUL_CONTINUE)
1420 return rc;
1421 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1422 size, &ctxt->exception);
1423 }
1424
pio_in_emulated(struct x86_emulate_ctxt * ctxt,unsigned int size,unsigned short port,void * dest)1425 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1426 unsigned int size, unsigned short port,
1427 void *dest)
1428 {
1429 struct read_cache *rc = &ctxt->io_read;
1430
1431 if (rc->pos == rc->end) { /* refill pio read ahead */
1432 unsigned int in_page, n;
1433 unsigned int count = ctxt->rep_prefix ?
1434 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1435 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1436 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1437 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1438 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1439 if (n == 0)
1440 n = 1;
1441 rc->pos = rc->end = 0;
1442 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1443 return 0;
1444 rc->end = n * size;
1445 }
1446
1447 if (ctxt->rep_prefix && (ctxt->d & String) &&
1448 !(ctxt->eflags & X86_EFLAGS_DF)) {
1449 ctxt->dst.data = rc->data + rc->pos;
1450 ctxt->dst.type = OP_MEM_STR;
1451 ctxt->dst.count = (rc->end - rc->pos) / size;
1452 rc->pos = rc->end;
1453 } else {
1454 memcpy(dest, rc->data + rc->pos, size);
1455 rc->pos += size;
1456 }
1457 return 1;
1458 }
1459
read_interrupt_descriptor(struct x86_emulate_ctxt * ctxt,u16 index,struct desc_struct * desc)1460 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1461 u16 index, struct desc_struct *desc)
1462 {
1463 struct desc_ptr dt;
1464 ulong addr;
1465
1466 ctxt->ops->get_idt(ctxt, &dt);
1467
1468 if (dt.size < index * 8 + 7)
1469 return emulate_gp(ctxt, index << 3 | 0x2);
1470
1471 addr = dt.address + index * 8;
1472 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1473 }
1474
get_descriptor_table_ptr(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_ptr * dt)1475 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1476 u16 selector, struct desc_ptr *dt)
1477 {
1478 const struct x86_emulate_ops *ops = ctxt->ops;
1479 u32 base3 = 0;
1480
1481 if (selector & 1 << 2) {
1482 struct desc_struct desc;
1483 u16 sel;
1484
1485 memset(dt, 0, sizeof(*dt));
1486 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1487 VCPU_SREG_LDTR))
1488 return;
1489
1490 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1491 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1492 } else
1493 ops->get_gdt(ctxt, dt);
1494 }
1495
get_descriptor_ptr(struct x86_emulate_ctxt * ctxt,u16 selector,ulong * desc_addr_p)1496 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1497 u16 selector, ulong *desc_addr_p)
1498 {
1499 struct desc_ptr dt;
1500 u16 index = selector >> 3;
1501 ulong addr;
1502
1503 get_descriptor_table_ptr(ctxt, selector, &dt);
1504
1505 if (dt.size < index * 8 + 7)
1506 return emulate_gp(ctxt, selector & 0xfffc);
1507
1508 addr = dt.address + index * 8;
1509
1510 #ifdef CONFIG_X86_64
1511 if (addr >> 32 != 0) {
1512 u64 efer = 0;
1513
1514 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1515 if (!(efer & EFER_LMA))
1516 addr &= (u32)-1;
1517 }
1518 #endif
1519
1520 *desc_addr_p = addr;
1521 return X86EMUL_CONTINUE;
1522 }
1523
1524 /* allowed just for 8 bytes segments */
read_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc,ulong * desc_addr_p)1525 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1526 u16 selector, struct desc_struct *desc,
1527 ulong *desc_addr_p)
1528 {
1529 int rc;
1530
1531 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1532 if (rc != X86EMUL_CONTINUE)
1533 return rc;
1534
1535 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1536 }
1537
1538 /* allowed just for 8 bytes segments */
write_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc)1539 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1540 u16 selector, struct desc_struct *desc)
1541 {
1542 int rc;
1543 ulong addr;
1544
1545 rc = get_descriptor_ptr(ctxt, selector, &addr);
1546 if (rc != X86EMUL_CONTINUE)
1547 return rc;
1548
1549 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1550 }
1551
__load_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,int seg,u8 cpl,enum x86_transfer_type transfer,struct desc_struct * desc)1552 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1553 u16 selector, int seg, u8 cpl,
1554 enum x86_transfer_type transfer,
1555 struct desc_struct *desc)
1556 {
1557 struct desc_struct seg_desc, old_desc;
1558 u8 dpl, rpl;
1559 unsigned err_vec = GP_VECTOR;
1560 u32 err_code = 0;
1561 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1562 ulong desc_addr;
1563 int ret;
1564 u16 dummy;
1565 u32 base3 = 0;
1566
1567 memset(&seg_desc, 0, sizeof(seg_desc));
1568
1569 if (ctxt->mode == X86EMUL_MODE_REAL) {
1570 /* set real mode segment descriptor (keep limit etc. for
1571 * unreal mode) */
1572 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1573 set_desc_base(&seg_desc, selector << 4);
1574 goto load;
1575 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1576 /* VM86 needs a clean new segment descriptor */
1577 set_desc_base(&seg_desc, selector << 4);
1578 set_desc_limit(&seg_desc, 0xffff);
1579 seg_desc.type = 3;
1580 seg_desc.p = 1;
1581 seg_desc.s = 1;
1582 seg_desc.dpl = 3;
1583 goto load;
1584 }
1585
1586 rpl = selector & 3;
1587
1588 /* TR should be in GDT only */
1589 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1590 goto exception;
1591
1592 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1593 if (null_selector) {
1594 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1595 goto exception;
1596
1597 if (seg == VCPU_SREG_SS) {
1598 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1599 goto exception;
1600
1601 /*
1602 * ctxt->ops->set_segment expects the CPL to be in
1603 * SS.DPL, so fake an expand-up 32-bit data segment.
1604 */
1605 seg_desc.type = 3;
1606 seg_desc.p = 1;
1607 seg_desc.s = 1;
1608 seg_desc.dpl = cpl;
1609 seg_desc.d = 1;
1610 seg_desc.g = 1;
1611 }
1612
1613 /* Skip all following checks */
1614 goto load;
1615 }
1616
1617 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1618 if (ret != X86EMUL_CONTINUE)
1619 return ret;
1620
1621 err_code = selector & 0xfffc;
1622 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1623 GP_VECTOR;
1624
1625 /* can't load system descriptor into segment selector */
1626 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1627 if (transfer == X86_TRANSFER_CALL_JMP)
1628 return X86EMUL_UNHANDLEABLE;
1629 goto exception;
1630 }
1631
1632 dpl = seg_desc.dpl;
1633
1634 switch (seg) {
1635 case VCPU_SREG_SS:
1636 /*
1637 * segment is not a writable data segment or segment
1638 * selector's RPL != CPL or DPL != CPL
1639 */
1640 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1641 goto exception;
1642 break;
1643 case VCPU_SREG_CS:
1644 /*
1645 * KVM uses "none" when loading CS as part of emulating Real
1646 * Mode exceptions and IRET (handled above). In all other
1647 * cases, loading CS without a control transfer is a KVM bug.
1648 */
1649 if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE))
1650 goto exception;
1651
1652 if (!(seg_desc.type & 8))
1653 goto exception;
1654
1655 if (transfer == X86_TRANSFER_RET) {
1656 /* RET can never return to an inner privilege level. */
1657 if (rpl < cpl)
1658 goto exception;
1659 /* Outer-privilege level return is not implemented */
1660 if (rpl > cpl)
1661 return X86EMUL_UNHANDLEABLE;
1662 }
1663 if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) {
1664 if (seg_desc.type & 4) {
1665 /* conforming */
1666 if (dpl > rpl)
1667 goto exception;
1668 } else {
1669 /* nonconforming */
1670 if (dpl != rpl)
1671 goto exception;
1672 }
1673 } else { /* X86_TRANSFER_CALL_JMP */
1674 if (seg_desc.type & 4) {
1675 /* conforming */
1676 if (dpl > cpl)
1677 goto exception;
1678 } else {
1679 /* nonconforming */
1680 if (rpl > cpl || dpl != cpl)
1681 goto exception;
1682 }
1683 }
1684 /* in long-mode d/b must be clear if l is set */
1685 if (seg_desc.d && seg_desc.l) {
1686 u64 efer = 0;
1687
1688 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1689 if (efer & EFER_LMA)
1690 goto exception;
1691 }
1692
1693 /* CS(RPL) <- CPL */
1694 selector = (selector & 0xfffc) | cpl;
1695 break;
1696 case VCPU_SREG_TR:
1697 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1698 goto exception;
1699 break;
1700 case VCPU_SREG_LDTR:
1701 if (seg_desc.s || seg_desc.type != 2)
1702 goto exception;
1703 break;
1704 default: /* DS, ES, FS, or GS */
1705 /*
1706 * segment is not a data or readable code segment or
1707 * ((segment is a data or nonconforming code segment)
1708 * and ((RPL > DPL) or (CPL > DPL)))
1709 */
1710 if ((seg_desc.type & 0xa) == 0x8 ||
1711 (((seg_desc.type & 0xc) != 0xc) &&
1712 (rpl > dpl || cpl > dpl)))
1713 goto exception;
1714 break;
1715 }
1716
1717 if (!seg_desc.p) {
1718 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1719 goto exception;
1720 }
1721
1722 if (seg_desc.s) {
1723 /* mark segment as accessed */
1724 if (!(seg_desc.type & 1)) {
1725 seg_desc.type |= 1;
1726 ret = write_segment_descriptor(ctxt, selector,
1727 &seg_desc);
1728 if (ret != X86EMUL_CONTINUE)
1729 return ret;
1730 }
1731 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1732 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1733 if (ret != X86EMUL_CONTINUE)
1734 return ret;
1735 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1736 ((u64)base3 << 32), ctxt))
1737 return emulate_gp(ctxt, err_code);
1738 }
1739
1740 if (seg == VCPU_SREG_TR) {
1741 old_desc = seg_desc;
1742 seg_desc.type |= 2; /* busy */
1743 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1744 sizeof(seg_desc), &ctxt->exception);
1745 if (ret != X86EMUL_CONTINUE)
1746 return ret;
1747 }
1748 load:
1749 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1750 if (desc)
1751 *desc = seg_desc;
1752 return X86EMUL_CONTINUE;
1753 exception:
1754 return emulate_exception(ctxt, err_vec, err_code, true);
1755 }
1756
load_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,int seg)1757 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1758 u16 selector, int seg)
1759 {
1760 u8 cpl = ctxt->ops->cpl(ctxt);
1761
1762 /*
1763 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1764 * they can load it at CPL<3 (Intel's manual says only LSS can,
1765 * but it's wrong).
1766 *
1767 * However, the Intel manual says that putting IST=1/DPL=3 in
1768 * an interrupt gate will result in SS=3 (the AMD manual instead
1769 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1770 * and only forbid it here.
1771 */
1772 if (seg == VCPU_SREG_SS && selector == 3 &&
1773 ctxt->mode == X86EMUL_MODE_PROT64)
1774 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1775
1776 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1777 X86_TRANSFER_NONE, NULL);
1778 }
1779
write_register_operand(struct operand * op)1780 static void write_register_operand(struct operand *op)
1781 {
1782 return assign_register(op->addr.reg, op->val, op->bytes);
1783 }
1784
writeback(struct x86_emulate_ctxt * ctxt,struct operand * op)1785 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1786 {
1787 switch (op->type) {
1788 case OP_REG:
1789 write_register_operand(op);
1790 break;
1791 case OP_MEM:
1792 if (ctxt->lock_prefix)
1793 return segmented_cmpxchg(ctxt,
1794 op->addr.mem,
1795 &op->orig_val,
1796 &op->val,
1797 op->bytes);
1798 else
1799 return segmented_write(ctxt,
1800 op->addr.mem,
1801 &op->val,
1802 op->bytes);
1803 case OP_MEM_STR:
1804 return segmented_write(ctxt,
1805 op->addr.mem,
1806 op->data,
1807 op->bytes * op->count);
1808 case OP_XMM:
1809 kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1810 break;
1811 case OP_MM:
1812 kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1813 break;
1814 case OP_NONE:
1815 /* no writeback */
1816 break;
1817 default:
1818 break;
1819 }
1820 return X86EMUL_CONTINUE;
1821 }
1822
emulate_push(struct x86_emulate_ctxt * ctxt,const void * data,int len)1823 static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len)
1824 {
1825 struct segmented_address addr;
1826
1827 rsp_increment(ctxt, -len);
1828 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1829 addr.seg = VCPU_SREG_SS;
1830
1831 return segmented_write(ctxt, addr, data, len);
1832 }
1833
em_push(struct x86_emulate_ctxt * ctxt)1834 static int em_push(struct x86_emulate_ctxt *ctxt)
1835 {
1836 /* Disable writeback. */
1837 ctxt->dst.type = OP_NONE;
1838 return emulate_push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1839 }
1840
emulate_pop(struct x86_emulate_ctxt * ctxt,void * dest,int len)1841 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1842 void *dest, int len)
1843 {
1844 int rc;
1845 struct segmented_address addr;
1846
1847 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1848 addr.seg = VCPU_SREG_SS;
1849 rc = segmented_read(ctxt, addr, dest, len);
1850 if (rc != X86EMUL_CONTINUE)
1851 return rc;
1852
1853 rsp_increment(ctxt, len);
1854 return rc;
1855 }
1856
em_pop(struct x86_emulate_ctxt * ctxt)1857 static int em_pop(struct x86_emulate_ctxt *ctxt)
1858 {
1859 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1860 }
1861
emulate_popf(struct x86_emulate_ctxt * ctxt,void * dest,int len)1862 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1863 void *dest, int len)
1864 {
1865 int rc;
1866 unsigned long val = 0;
1867 unsigned long change_mask;
1868 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1869 int cpl = ctxt->ops->cpl(ctxt);
1870
1871 rc = emulate_pop(ctxt, &val, len);
1872 if (rc != X86EMUL_CONTINUE)
1873 return rc;
1874
1875 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1876 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1877 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1878 X86_EFLAGS_AC | X86_EFLAGS_ID;
1879
1880 switch(ctxt->mode) {
1881 case X86EMUL_MODE_PROT64:
1882 case X86EMUL_MODE_PROT32:
1883 case X86EMUL_MODE_PROT16:
1884 if (cpl == 0)
1885 change_mask |= X86_EFLAGS_IOPL;
1886 if (cpl <= iopl)
1887 change_mask |= X86_EFLAGS_IF;
1888 break;
1889 case X86EMUL_MODE_VM86:
1890 if (iopl < 3)
1891 return emulate_gp(ctxt, 0);
1892 change_mask |= X86_EFLAGS_IF;
1893 break;
1894 default: /* real mode */
1895 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1896 break;
1897 }
1898
1899 *(unsigned long *)dest =
1900 (ctxt->eflags & ~change_mask) | (val & change_mask);
1901
1902 return rc;
1903 }
1904
em_popf(struct x86_emulate_ctxt * ctxt)1905 static int em_popf(struct x86_emulate_ctxt *ctxt)
1906 {
1907 ctxt->dst.type = OP_REG;
1908 ctxt->dst.addr.reg = &ctxt->eflags;
1909 ctxt->dst.bytes = ctxt->op_bytes;
1910 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1911 }
1912
em_enter(struct x86_emulate_ctxt * ctxt)1913 static int em_enter(struct x86_emulate_ctxt *ctxt)
1914 {
1915 int rc;
1916 unsigned frame_size = ctxt->src.val;
1917 unsigned nesting_level = ctxt->src2.val & 31;
1918 ulong rbp;
1919
1920 if (nesting_level)
1921 return X86EMUL_UNHANDLEABLE;
1922
1923 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1924 rc = emulate_push(ctxt, &rbp, stack_size(ctxt));
1925 if (rc != X86EMUL_CONTINUE)
1926 return rc;
1927 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1928 stack_mask(ctxt));
1929 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1930 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1931 stack_mask(ctxt));
1932 return X86EMUL_CONTINUE;
1933 }
1934
em_leave(struct x86_emulate_ctxt * ctxt)1935 static int em_leave(struct x86_emulate_ctxt *ctxt)
1936 {
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1938 stack_mask(ctxt));
1939 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1940 }
1941
em_push_sreg(struct x86_emulate_ctxt * ctxt)1942 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1943 {
1944 int seg = ctxt->src2.val;
1945
1946 ctxt->src.val = get_segment_selector(ctxt, seg);
1947 if (ctxt->op_bytes == 4) {
1948 rsp_increment(ctxt, -2);
1949 ctxt->op_bytes = 2;
1950 }
1951
1952 return em_push(ctxt);
1953 }
1954
em_pop_sreg(struct x86_emulate_ctxt * ctxt)1955 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1956 {
1957 int seg = ctxt->src2.val;
1958 unsigned long selector = 0;
1959 int rc;
1960
1961 rc = emulate_pop(ctxt, &selector, 2);
1962 if (rc != X86EMUL_CONTINUE)
1963 return rc;
1964
1965 if (seg == VCPU_SREG_SS)
1966 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1967 if (ctxt->op_bytes > 2)
1968 rsp_increment(ctxt, ctxt->op_bytes - 2);
1969
1970 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1971 return rc;
1972 }
1973
em_pusha(struct x86_emulate_ctxt * ctxt)1974 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1975 {
1976 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1977 int rc = X86EMUL_CONTINUE;
1978 int reg = VCPU_REGS_RAX;
1979
1980 while (reg <= VCPU_REGS_RDI) {
1981 (reg == VCPU_REGS_RSP) ?
1982 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1983
1984 rc = em_push(ctxt);
1985 if (rc != X86EMUL_CONTINUE)
1986 return rc;
1987
1988 ++reg;
1989 }
1990
1991 return rc;
1992 }
1993
em_pushf(struct x86_emulate_ctxt * ctxt)1994 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1995 {
1996 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1997 return em_push(ctxt);
1998 }
1999
em_popa(struct x86_emulate_ctxt * ctxt)2000 static int em_popa(struct x86_emulate_ctxt *ctxt)
2001 {
2002 int rc = X86EMUL_CONTINUE;
2003 int reg = VCPU_REGS_RDI;
2004 u32 val = 0;
2005
2006 while (reg >= VCPU_REGS_RAX) {
2007 if (reg == VCPU_REGS_RSP) {
2008 rsp_increment(ctxt, ctxt->op_bytes);
2009 --reg;
2010 }
2011
2012 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2013 if (rc != X86EMUL_CONTINUE)
2014 break;
2015 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2016 --reg;
2017 }
2018 return rc;
2019 }
2020
__emulate_int_real(struct x86_emulate_ctxt * ctxt,int irq)2021 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2022 {
2023 const struct x86_emulate_ops *ops = ctxt->ops;
2024 int rc;
2025 struct desc_ptr dt;
2026 gva_t cs_addr;
2027 gva_t eip_addr;
2028 u16 cs, eip;
2029
2030 /* TODO: Add limit checks */
2031 ctxt->src.val = ctxt->eflags;
2032 rc = em_push(ctxt);
2033 if (rc != X86EMUL_CONTINUE)
2034 return rc;
2035
2036 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2037
2038 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2039 rc = em_push(ctxt);
2040 if (rc != X86EMUL_CONTINUE)
2041 return rc;
2042
2043 ctxt->src.val = ctxt->_eip;
2044 rc = em_push(ctxt);
2045 if (rc != X86EMUL_CONTINUE)
2046 return rc;
2047
2048 ops->get_idt(ctxt, &dt);
2049
2050 eip_addr = dt.address + (irq << 2);
2051 cs_addr = dt.address + (irq << 2) + 2;
2052
2053 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2054 if (rc != X86EMUL_CONTINUE)
2055 return rc;
2056
2057 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2058 if (rc != X86EMUL_CONTINUE)
2059 return rc;
2060
2061 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2062 if (rc != X86EMUL_CONTINUE)
2063 return rc;
2064
2065 ctxt->_eip = eip;
2066
2067 return rc;
2068 }
2069
emulate_int_real(struct x86_emulate_ctxt * ctxt,int irq)2070 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2071 {
2072 int rc;
2073
2074 invalidate_registers(ctxt);
2075 rc = __emulate_int_real(ctxt, irq);
2076 if (rc == X86EMUL_CONTINUE)
2077 writeback_registers(ctxt);
2078 return rc;
2079 }
2080
emulate_int(struct x86_emulate_ctxt * ctxt,int irq)2081 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2082 {
2083 switch(ctxt->mode) {
2084 case X86EMUL_MODE_REAL:
2085 return __emulate_int_real(ctxt, irq);
2086 case X86EMUL_MODE_VM86:
2087 case X86EMUL_MODE_PROT16:
2088 case X86EMUL_MODE_PROT32:
2089 case X86EMUL_MODE_PROT64:
2090 default:
2091 /* Protected mode interrupts unimplemented yet */
2092 return X86EMUL_UNHANDLEABLE;
2093 }
2094 }
2095
emulate_iret_real(struct x86_emulate_ctxt * ctxt)2096 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2097 {
2098 int rc = X86EMUL_CONTINUE;
2099 unsigned long temp_eip = 0;
2100 unsigned long temp_eflags = 0;
2101 unsigned long cs = 0;
2102 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2103 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2104 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2105 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2106 X86_EFLAGS_AC | X86_EFLAGS_ID |
2107 X86_EFLAGS_FIXED;
2108 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2109 X86_EFLAGS_VIP;
2110
2111 /* TODO: Add stack limit check */
2112
2113 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2114
2115 if (rc != X86EMUL_CONTINUE)
2116 return rc;
2117
2118 if (temp_eip & ~0xffff)
2119 return emulate_gp(ctxt, 0);
2120
2121 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2122
2123 if (rc != X86EMUL_CONTINUE)
2124 return rc;
2125
2126 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2127
2128 if (rc != X86EMUL_CONTINUE)
2129 return rc;
2130
2131 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2132
2133 if (rc != X86EMUL_CONTINUE)
2134 return rc;
2135
2136 ctxt->_eip = temp_eip;
2137
2138 if (ctxt->op_bytes == 4)
2139 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2140 else if (ctxt->op_bytes == 2) {
2141 ctxt->eflags &= ~0xffff;
2142 ctxt->eflags |= temp_eflags;
2143 }
2144
2145 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2146 ctxt->eflags |= X86_EFLAGS_FIXED;
2147 ctxt->ops->set_nmi_mask(ctxt, false);
2148
2149 return rc;
2150 }
2151
em_iret(struct x86_emulate_ctxt * ctxt)2152 static int em_iret(struct x86_emulate_ctxt *ctxt)
2153 {
2154 switch(ctxt->mode) {
2155 case X86EMUL_MODE_REAL:
2156 return emulate_iret_real(ctxt);
2157 case X86EMUL_MODE_VM86:
2158 case X86EMUL_MODE_PROT16:
2159 case X86EMUL_MODE_PROT32:
2160 case X86EMUL_MODE_PROT64:
2161 default:
2162 /* iret from protected mode unimplemented yet */
2163 return X86EMUL_UNHANDLEABLE;
2164 }
2165 }
2166
em_jmp_far(struct x86_emulate_ctxt * ctxt)2167 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2168 {
2169 int rc;
2170 unsigned short sel;
2171 struct desc_struct new_desc;
2172 u8 cpl = ctxt->ops->cpl(ctxt);
2173
2174 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2175
2176 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2177 X86_TRANSFER_CALL_JMP,
2178 &new_desc);
2179 if (rc != X86EMUL_CONTINUE)
2180 return rc;
2181
2182 rc = assign_eip_far(ctxt, ctxt->src.val);
2183 /* Error handling is not implemented. */
2184 if (rc != X86EMUL_CONTINUE)
2185 return X86EMUL_UNHANDLEABLE;
2186
2187 return rc;
2188 }
2189
em_jmp_abs(struct x86_emulate_ctxt * ctxt)2190 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2191 {
2192 return assign_eip_near(ctxt, ctxt->src.val);
2193 }
2194
em_call_near_abs(struct x86_emulate_ctxt * ctxt)2195 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2196 {
2197 int rc;
2198 long int old_eip;
2199
2200 old_eip = ctxt->_eip;
2201 rc = assign_eip_near(ctxt, ctxt->src.val);
2202 if (rc != X86EMUL_CONTINUE)
2203 return rc;
2204 ctxt->src.val = old_eip;
2205 rc = em_push(ctxt);
2206 return rc;
2207 }
2208
em_cmpxchg8b(struct x86_emulate_ctxt * ctxt)2209 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2210 {
2211 u64 old = ctxt->dst.orig_val64;
2212
2213 if (ctxt->dst.bytes == 16)
2214 return X86EMUL_UNHANDLEABLE;
2215
2216 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2217 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2218 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2219 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2220 ctxt->eflags &= ~X86_EFLAGS_ZF;
2221 } else {
2222 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2223 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2224
2225 ctxt->eflags |= X86_EFLAGS_ZF;
2226 }
2227 return X86EMUL_CONTINUE;
2228 }
2229
em_ret(struct x86_emulate_ctxt * ctxt)2230 static int em_ret(struct x86_emulate_ctxt *ctxt)
2231 {
2232 int rc;
2233 unsigned long eip = 0;
2234
2235 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2236 if (rc != X86EMUL_CONTINUE)
2237 return rc;
2238
2239 return assign_eip_near(ctxt, eip);
2240 }
2241
em_ret_far(struct x86_emulate_ctxt * ctxt)2242 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2243 {
2244 int rc;
2245 unsigned long eip = 0;
2246 unsigned long cs = 0;
2247 int cpl = ctxt->ops->cpl(ctxt);
2248 struct desc_struct new_desc;
2249
2250 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2251 if (rc != X86EMUL_CONTINUE)
2252 return rc;
2253 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2254 if (rc != X86EMUL_CONTINUE)
2255 return rc;
2256 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2257 X86_TRANSFER_RET,
2258 &new_desc);
2259 if (rc != X86EMUL_CONTINUE)
2260 return rc;
2261 rc = assign_eip_far(ctxt, eip);
2262 /* Error handling is not implemented. */
2263 if (rc != X86EMUL_CONTINUE)
2264 return X86EMUL_UNHANDLEABLE;
2265
2266 return rc;
2267 }
2268
em_ret_far_imm(struct x86_emulate_ctxt * ctxt)2269 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2270 {
2271 int rc;
2272
2273 rc = em_ret_far(ctxt);
2274 if (rc != X86EMUL_CONTINUE)
2275 return rc;
2276 rsp_increment(ctxt, ctxt->src.val);
2277 return X86EMUL_CONTINUE;
2278 }
2279
em_cmpxchg(struct x86_emulate_ctxt * ctxt)2280 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2281 {
2282 /* Save real source value, then compare EAX against destination. */
2283 ctxt->dst.orig_val = ctxt->dst.val;
2284 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2285 ctxt->src.orig_val = ctxt->src.val;
2286 ctxt->src.val = ctxt->dst.orig_val;
2287 fastop(ctxt, em_cmp);
2288
2289 if (ctxt->eflags & X86_EFLAGS_ZF) {
2290 /* Success: write back to memory; no update of EAX */
2291 ctxt->src.type = OP_NONE;
2292 ctxt->dst.val = ctxt->src.orig_val;
2293 } else {
2294 /* Failure: write the value we saw to EAX. */
2295 ctxt->src.type = OP_REG;
2296 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2297 ctxt->src.val = ctxt->dst.orig_val;
2298 /* Create write-cycle to dest by writing the same value */
2299 ctxt->dst.val = ctxt->dst.orig_val;
2300 }
2301 return X86EMUL_CONTINUE;
2302 }
2303
em_lseg(struct x86_emulate_ctxt * ctxt)2304 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2305 {
2306 int seg = ctxt->src2.val;
2307 unsigned short sel;
2308 int rc;
2309
2310 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2311
2312 rc = load_segment_descriptor(ctxt, sel, seg);
2313 if (rc != X86EMUL_CONTINUE)
2314 return rc;
2315
2316 ctxt->dst.val = ctxt->src.val;
2317 return rc;
2318 }
2319
em_rsm(struct x86_emulate_ctxt * ctxt)2320 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2321 {
2322 if (!ctxt->ops->is_smm(ctxt))
2323 return emulate_ud(ctxt);
2324
2325 if (ctxt->ops->leave_smm(ctxt))
2326 ctxt->ops->triple_fault(ctxt);
2327
2328 return emulator_recalc_and_set_mode(ctxt);
2329 }
2330
2331 static void
setup_syscalls_segments(struct desc_struct * cs,struct desc_struct * ss)2332 setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
2333 {
2334 cs->l = 0; /* will be adjusted later */
2335 set_desc_base(cs, 0); /* flat segment */
2336 cs->g = 1; /* 4kb granularity */
2337 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2338 cs->type = 0x0b; /* Read, Execute, Accessed */
2339 cs->s = 1;
2340 cs->dpl = 0; /* will be adjusted later */
2341 cs->p = 1;
2342 cs->d = 1;
2343 cs->avl = 0;
2344
2345 set_desc_base(ss, 0); /* flat segment */
2346 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2347 ss->g = 1; /* 4kb granularity */
2348 ss->s = 1;
2349 ss->type = 0x03; /* Read/Write, Accessed */
2350 ss->d = 1; /* 32bit stack segment */
2351 ss->dpl = 0;
2352 ss->p = 1;
2353 ss->l = 0;
2354 ss->avl = 0;
2355 }
2356
vendor_intel(struct x86_emulate_ctxt * ctxt)2357 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2358 {
2359 u32 eax, ebx, ecx, edx;
2360
2361 eax = ecx = 0;
2362 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2363 return is_guest_vendor_intel(ebx, ecx, edx);
2364 }
2365
em_syscall_is_enabled(struct x86_emulate_ctxt * ctxt)2366 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2367 {
2368 const struct x86_emulate_ops *ops = ctxt->ops;
2369 u32 eax, ebx, ecx, edx;
2370
2371 /*
2372 * syscall should always be enabled in longmode - so only become
2373 * vendor specific (cpuid) if other modes are active...
2374 */
2375 if (ctxt->mode == X86EMUL_MODE_PROT64)
2376 return true;
2377
2378 eax = 0x00000000;
2379 ecx = 0x00000000;
2380 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2381 /*
2382 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2383 * 64bit guest with a 32bit compat-app running will #UD !! While this
2384 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2385 * AMD can't behave like Intel.
2386 */
2387 if (is_guest_vendor_intel(ebx, ecx, edx))
2388 return false;
2389
2390 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2391 is_guest_vendor_hygon(ebx, ecx, edx))
2392 return true;
2393
2394 /*
2395 * default: (not Intel, not AMD, not Hygon), apply Intel's
2396 * stricter rules...
2397 */
2398 return false;
2399 }
2400
em_syscall(struct x86_emulate_ctxt * ctxt)2401 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2402 {
2403 const struct x86_emulate_ops *ops = ctxt->ops;
2404 struct desc_struct cs, ss;
2405 u64 msr_data;
2406 u16 cs_sel, ss_sel;
2407 u64 efer = 0;
2408
2409 /* syscall is not available in real mode */
2410 if (ctxt->mode == X86EMUL_MODE_REAL ||
2411 ctxt->mode == X86EMUL_MODE_VM86)
2412 return emulate_ud(ctxt);
2413
2414 if (!(em_syscall_is_enabled(ctxt)))
2415 return emulate_ud(ctxt);
2416
2417 ops->get_msr(ctxt, MSR_EFER, &efer);
2418 if (!(efer & EFER_SCE))
2419 return emulate_ud(ctxt);
2420
2421 setup_syscalls_segments(&cs, &ss);
2422 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2423 msr_data >>= 32;
2424 cs_sel = (u16)(msr_data & 0xfffc);
2425 ss_sel = (u16)(msr_data + 8);
2426
2427 if (efer & EFER_LMA) {
2428 cs.d = 0;
2429 cs.l = 1;
2430 }
2431 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2432 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2433
2434 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2435 if (efer & EFER_LMA) {
2436 #ifdef CONFIG_X86_64
2437 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2438
2439 ops->get_msr(ctxt,
2440 ctxt->mode == X86EMUL_MODE_PROT64 ?
2441 MSR_LSTAR : MSR_CSTAR, &msr_data);
2442 ctxt->_eip = msr_data;
2443
2444 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2445 ctxt->eflags &= ~msr_data;
2446 ctxt->eflags |= X86_EFLAGS_FIXED;
2447 #endif
2448 } else {
2449 /* legacy mode */
2450 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2451 ctxt->_eip = (u32)msr_data;
2452
2453 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2454 }
2455
2456 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2457 return X86EMUL_CONTINUE;
2458 }
2459
em_sysenter(struct x86_emulate_ctxt * ctxt)2460 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2461 {
2462 const struct x86_emulate_ops *ops = ctxt->ops;
2463 struct desc_struct cs, ss;
2464 u64 msr_data;
2465 u16 cs_sel, ss_sel;
2466 u64 efer = 0;
2467
2468 ops->get_msr(ctxt, MSR_EFER, &efer);
2469 /* inject #GP if in real mode */
2470 if (ctxt->mode == X86EMUL_MODE_REAL)
2471 return emulate_gp(ctxt, 0);
2472
2473 /*
2474 * Not recognized on AMD in compat mode (but is recognized in legacy
2475 * mode).
2476 */
2477 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2478 && !vendor_intel(ctxt))
2479 return emulate_ud(ctxt);
2480
2481 /* sysenter/sysexit have not been tested in 64bit mode. */
2482 if (ctxt->mode == X86EMUL_MODE_PROT64)
2483 return X86EMUL_UNHANDLEABLE;
2484
2485 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2486 if ((msr_data & 0xfffc) == 0x0)
2487 return emulate_gp(ctxt, 0);
2488
2489 setup_syscalls_segments(&cs, &ss);
2490 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2491 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2492 ss_sel = cs_sel + 8;
2493 if (efer & EFER_LMA) {
2494 cs.d = 0;
2495 cs.l = 1;
2496 }
2497
2498 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2499 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2500
2501 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2502 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2503
2504 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2505 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2506 (u32)msr_data;
2507 if (efer & EFER_LMA)
2508 ctxt->mode = X86EMUL_MODE_PROT64;
2509
2510 return X86EMUL_CONTINUE;
2511 }
2512
em_sysexit(struct x86_emulate_ctxt * ctxt)2513 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2514 {
2515 const struct x86_emulate_ops *ops = ctxt->ops;
2516 struct desc_struct cs, ss;
2517 u64 msr_data, rcx, rdx;
2518 int usermode;
2519 u16 cs_sel = 0, ss_sel = 0;
2520
2521 /* inject #GP if in real mode or Virtual 8086 mode */
2522 if (ctxt->mode == X86EMUL_MODE_REAL ||
2523 ctxt->mode == X86EMUL_MODE_VM86)
2524 return emulate_gp(ctxt, 0);
2525
2526 setup_syscalls_segments(&cs, &ss);
2527
2528 if ((ctxt->rex_prefix & 0x8) != 0x0)
2529 usermode = X86EMUL_MODE_PROT64;
2530 else
2531 usermode = X86EMUL_MODE_PROT32;
2532
2533 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2534 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2535
2536 cs.dpl = 3;
2537 ss.dpl = 3;
2538 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2539 switch (usermode) {
2540 case X86EMUL_MODE_PROT32:
2541 cs_sel = (u16)(msr_data + 16);
2542 if ((msr_data & 0xfffc) == 0x0)
2543 return emulate_gp(ctxt, 0);
2544 ss_sel = (u16)(msr_data + 24);
2545 rcx = (u32)rcx;
2546 rdx = (u32)rdx;
2547 break;
2548 case X86EMUL_MODE_PROT64:
2549 cs_sel = (u16)(msr_data + 32);
2550 if (msr_data == 0x0)
2551 return emulate_gp(ctxt, 0);
2552 ss_sel = cs_sel + 8;
2553 cs.d = 0;
2554 cs.l = 1;
2555 if (emul_is_noncanonical_address(rcx, ctxt) ||
2556 emul_is_noncanonical_address(rdx, ctxt))
2557 return emulate_gp(ctxt, 0);
2558 break;
2559 }
2560 cs_sel |= SEGMENT_RPL_MASK;
2561 ss_sel |= SEGMENT_RPL_MASK;
2562
2563 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2564 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2565
2566 ctxt->_eip = rdx;
2567 ctxt->mode = usermode;
2568 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2569
2570 return X86EMUL_CONTINUE;
2571 }
2572
emulator_bad_iopl(struct x86_emulate_ctxt * ctxt)2573 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2574 {
2575 int iopl;
2576 if (ctxt->mode == X86EMUL_MODE_REAL)
2577 return false;
2578 if (ctxt->mode == X86EMUL_MODE_VM86)
2579 return true;
2580 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2581 return ctxt->ops->cpl(ctxt) > iopl;
2582 }
2583
2584 #define VMWARE_PORT_VMPORT (0x5658)
2585 #define VMWARE_PORT_VMRPC (0x5659)
2586
emulator_io_port_access_allowed(struct x86_emulate_ctxt * ctxt,u16 port,u16 len)2587 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2588 u16 port, u16 len)
2589 {
2590 const struct x86_emulate_ops *ops = ctxt->ops;
2591 struct desc_struct tr_seg;
2592 u32 base3;
2593 int r;
2594 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2595 unsigned mask = (1 << len) - 1;
2596 unsigned long base;
2597
2598 /*
2599 * VMware allows access to these ports even if denied
2600 * by TSS I/O permission bitmap. Mimic behavior.
2601 */
2602 if (enable_vmware_backdoor &&
2603 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2604 return true;
2605
2606 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2607 if (!tr_seg.p)
2608 return false;
2609 if (desc_limit_scaled(&tr_seg) < 103)
2610 return false;
2611 base = get_desc_base(&tr_seg);
2612 #ifdef CONFIG_X86_64
2613 base |= ((u64)base3) << 32;
2614 #endif
2615 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2616 if (r != X86EMUL_CONTINUE)
2617 return false;
2618 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2619 return false;
2620 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2621 if (r != X86EMUL_CONTINUE)
2622 return false;
2623 if ((perm >> bit_idx) & mask)
2624 return false;
2625 return true;
2626 }
2627
emulator_io_permitted(struct x86_emulate_ctxt * ctxt,u16 port,u16 len)2628 static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt,
2629 u16 port, u16 len)
2630 {
2631 if (ctxt->perm_ok)
2632 return true;
2633
2634 if (emulator_bad_iopl(ctxt))
2635 if (!emulator_io_port_access_allowed(ctxt, port, len))
2636 return false;
2637
2638 ctxt->perm_ok = true;
2639
2640 return true;
2641 }
2642
string_registers_quirk(struct x86_emulate_ctxt * ctxt)2643 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2644 {
2645 /*
2646 * Intel CPUs mask the counter and pointers in quite strange
2647 * manner when ECX is zero due to REP-string optimizations.
2648 */
2649 #ifdef CONFIG_X86_64
2650 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2651 return;
2652
2653 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2654
2655 switch (ctxt->b) {
2656 case 0xa4: /* movsb */
2657 case 0xa5: /* movsd/w */
2658 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2659 fallthrough;
2660 case 0xaa: /* stosb */
2661 case 0xab: /* stosd/w */
2662 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2663 }
2664 #endif
2665 }
2666
save_state_to_tss16(struct x86_emulate_ctxt * ctxt,struct tss_segment_16 * tss)2667 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2668 struct tss_segment_16 *tss)
2669 {
2670 tss->ip = ctxt->_eip;
2671 tss->flag = ctxt->eflags;
2672 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2673 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2674 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2675 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2676 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2677 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2678 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2679 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2680
2681 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2682 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2683 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2684 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2685 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2686 }
2687
load_state_from_tss16(struct x86_emulate_ctxt * ctxt,struct tss_segment_16 * tss)2688 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2689 struct tss_segment_16 *tss)
2690 {
2691 int ret;
2692 u8 cpl;
2693
2694 ctxt->_eip = tss->ip;
2695 ctxt->eflags = tss->flag | 2;
2696 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2697 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2698 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2699 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2700 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2701 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2702 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2703 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2704
2705 /*
2706 * SDM says that segment selectors are loaded before segment
2707 * descriptors
2708 */
2709 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2710 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2711 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2712 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2713 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2714
2715 cpl = tss->cs & 3;
2716
2717 /*
2718 * Now load segment descriptors. If fault happens at this stage
2719 * it is handled in a context of new task
2720 */
2721 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2722 X86_TRANSFER_TASK_SWITCH, NULL);
2723 if (ret != X86EMUL_CONTINUE)
2724 return ret;
2725 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2726 X86_TRANSFER_TASK_SWITCH, NULL);
2727 if (ret != X86EMUL_CONTINUE)
2728 return ret;
2729 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2730 X86_TRANSFER_TASK_SWITCH, NULL);
2731 if (ret != X86EMUL_CONTINUE)
2732 return ret;
2733 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2734 X86_TRANSFER_TASK_SWITCH, NULL);
2735 if (ret != X86EMUL_CONTINUE)
2736 return ret;
2737 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2738 X86_TRANSFER_TASK_SWITCH, NULL);
2739 if (ret != X86EMUL_CONTINUE)
2740 return ret;
2741
2742 return X86EMUL_CONTINUE;
2743 }
2744
task_switch_16(struct x86_emulate_ctxt * ctxt,u16 old_tss_sel,ulong old_tss_base,struct desc_struct * new_desc)2745 static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
2746 ulong old_tss_base, struct desc_struct *new_desc)
2747 {
2748 struct tss_segment_16 tss_seg;
2749 int ret;
2750 u32 new_tss_base = get_desc_base(new_desc);
2751
2752 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2753 if (ret != X86EMUL_CONTINUE)
2754 return ret;
2755
2756 save_state_to_tss16(ctxt, &tss_seg);
2757
2758 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2759 if (ret != X86EMUL_CONTINUE)
2760 return ret;
2761
2762 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
2763 if (ret != X86EMUL_CONTINUE)
2764 return ret;
2765
2766 if (old_tss_sel != 0xffff) {
2767 tss_seg.prev_task_link = old_tss_sel;
2768
2769 ret = linear_write_system(ctxt, new_tss_base,
2770 &tss_seg.prev_task_link,
2771 sizeof(tss_seg.prev_task_link));
2772 if (ret != X86EMUL_CONTINUE)
2773 return ret;
2774 }
2775
2776 return load_state_from_tss16(ctxt, &tss_seg);
2777 }
2778
save_state_to_tss32(struct x86_emulate_ctxt * ctxt,struct tss_segment_32 * tss)2779 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2780 struct tss_segment_32 *tss)
2781 {
2782 /* CR3 and ldt selector are not saved intentionally */
2783 tss->eip = ctxt->_eip;
2784 tss->eflags = ctxt->eflags;
2785 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2786 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2787 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2788 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2789 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2790 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2791 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2792 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2793
2794 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2795 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2796 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2797 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2798 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2799 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2800 }
2801
load_state_from_tss32(struct x86_emulate_ctxt * ctxt,struct tss_segment_32 * tss)2802 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2803 struct tss_segment_32 *tss)
2804 {
2805 int ret;
2806 u8 cpl;
2807
2808 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2809 return emulate_gp(ctxt, 0);
2810 ctxt->_eip = tss->eip;
2811 ctxt->eflags = tss->eflags | 2;
2812
2813 /* General purpose registers */
2814 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2815 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2816 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2817 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2818 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2819 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2820 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2821 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2822
2823 /*
2824 * SDM says that segment selectors are loaded before segment
2825 * descriptors. This is important because CPL checks will
2826 * use CS.RPL.
2827 */
2828 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2829 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2830 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2831 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2832 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2833 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2834 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2835
2836 /*
2837 * If we're switching between Protected Mode and VM86, we need to make
2838 * sure to update the mode before loading the segment descriptors so
2839 * that the selectors are interpreted correctly.
2840 */
2841 if (ctxt->eflags & X86_EFLAGS_VM) {
2842 ctxt->mode = X86EMUL_MODE_VM86;
2843 cpl = 3;
2844 } else {
2845 ctxt->mode = X86EMUL_MODE_PROT32;
2846 cpl = tss->cs & 3;
2847 }
2848
2849 /*
2850 * Now load segment descriptors. If fault happens at this stage
2851 * it is handled in a context of new task
2852 */
2853 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2854 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2855 if (ret != X86EMUL_CONTINUE)
2856 return ret;
2857 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2858 X86_TRANSFER_TASK_SWITCH, NULL);
2859 if (ret != X86EMUL_CONTINUE)
2860 return ret;
2861 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2862 X86_TRANSFER_TASK_SWITCH, NULL);
2863 if (ret != X86EMUL_CONTINUE)
2864 return ret;
2865 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2866 X86_TRANSFER_TASK_SWITCH, NULL);
2867 if (ret != X86EMUL_CONTINUE)
2868 return ret;
2869 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2870 X86_TRANSFER_TASK_SWITCH, NULL);
2871 if (ret != X86EMUL_CONTINUE)
2872 return ret;
2873 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2874 X86_TRANSFER_TASK_SWITCH, NULL);
2875 if (ret != X86EMUL_CONTINUE)
2876 return ret;
2877 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2878 X86_TRANSFER_TASK_SWITCH, NULL);
2879
2880 return ret;
2881 }
2882
task_switch_32(struct x86_emulate_ctxt * ctxt,u16 old_tss_sel,ulong old_tss_base,struct desc_struct * new_desc)2883 static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
2884 ulong old_tss_base, struct desc_struct *new_desc)
2885 {
2886 struct tss_segment_32 tss_seg;
2887 int ret;
2888 u32 new_tss_base = get_desc_base(new_desc);
2889 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2890 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2891
2892 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2893 if (ret != X86EMUL_CONTINUE)
2894 return ret;
2895
2896 save_state_to_tss32(ctxt, &tss_seg);
2897
2898 /* Only GP registers and segment selectors are saved */
2899 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2900 ldt_sel_offset - eip_offset);
2901 if (ret != X86EMUL_CONTINUE)
2902 return ret;
2903
2904 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
2905 if (ret != X86EMUL_CONTINUE)
2906 return ret;
2907
2908 if (old_tss_sel != 0xffff) {
2909 tss_seg.prev_task_link = old_tss_sel;
2910
2911 ret = linear_write_system(ctxt, new_tss_base,
2912 &tss_seg.prev_task_link,
2913 sizeof(tss_seg.prev_task_link));
2914 if (ret != X86EMUL_CONTINUE)
2915 return ret;
2916 }
2917
2918 return load_state_from_tss32(ctxt, &tss_seg);
2919 }
2920
emulator_do_task_switch(struct x86_emulate_ctxt * ctxt,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)2921 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2922 u16 tss_selector, int idt_index, int reason,
2923 bool has_error_code, u32 error_code)
2924 {
2925 const struct x86_emulate_ops *ops = ctxt->ops;
2926 struct desc_struct curr_tss_desc, next_tss_desc;
2927 int ret;
2928 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2929 ulong old_tss_base =
2930 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2931 u32 desc_limit;
2932 ulong desc_addr, dr7;
2933
2934 /* FIXME: old_tss_base == ~0 ? */
2935
2936 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2937 if (ret != X86EMUL_CONTINUE)
2938 return ret;
2939 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2940 if (ret != X86EMUL_CONTINUE)
2941 return ret;
2942
2943 /* FIXME: check that next_tss_desc is tss */
2944
2945 /*
2946 * Check privileges. The three cases are task switch caused by...
2947 *
2948 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2949 * 2. Exception/IRQ/iret: No check is performed
2950 * 3. jmp/call to TSS/task-gate: No check is performed since the
2951 * hardware checks it before exiting.
2952 */
2953 if (reason == TASK_SWITCH_GATE) {
2954 if (idt_index != -1) {
2955 /* Software interrupts */
2956 struct desc_struct task_gate_desc;
2957 int dpl;
2958
2959 ret = read_interrupt_descriptor(ctxt, idt_index,
2960 &task_gate_desc);
2961 if (ret != X86EMUL_CONTINUE)
2962 return ret;
2963
2964 dpl = task_gate_desc.dpl;
2965 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2966 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2967 }
2968 }
2969
2970 desc_limit = desc_limit_scaled(&next_tss_desc);
2971 if (!next_tss_desc.p ||
2972 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2973 desc_limit < 0x2b)) {
2974 return emulate_ts(ctxt, tss_selector & 0xfffc);
2975 }
2976
2977 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2978 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2979 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2980 }
2981
2982 if (reason == TASK_SWITCH_IRET)
2983 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2984
2985 /* set back link to prev task only if NT bit is set in eflags
2986 note that old_tss_sel is not used after this point */
2987 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2988 old_tss_sel = 0xffff;
2989
2990 if (next_tss_desc.type & 8)
2991 ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
2992 else
2993 ret = task_switch_16(ctxt, old_tss_sel,
2994 old_tss_base, &next_tss_desc);
2995 if (ret != X86EMUL_CONTINUE)
2996 return ret;
2997
2998 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2999 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3000
3001 if (reason != TASK_SWITCH_IRET) {
3002 next_tss_desc.type |= (1 << 1); /* set busy flag */
3003 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3004 }
3005
3006 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3007 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3008
3009 if (has_error_code) {
3010 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3011 ctxt->lock_prefix = 0;
3012 ctxt->src.val = (unsigned long) error_code;
3013 ret = em_push(ctxt);
3014 }
3015
3016 dr7 = ops->get_dr(ctxt, 7);
3017 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3018
3019 return ret;
3020 }
3021
emulator_task_switch(struct x86_emulate_ctxt * ctxt,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)3022 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3023 u16 tss_selector, int idt_index, int reason,
3024 bool has_error_code, u32 error_code)
3025 {
3026 int rc;
3027
3028 invalidate_registers(ctxt);
3029 ctxt->_eip = ctxt->eip;
3030 ctxt->dst.type = OP_NONE;
3031
3032 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3033 has_error_code, error_code);
3034
3035 if (rc == X86EMUL_CONTINUE) {
3036 ctxt->eip = ctxt->_eip;
3037 writeback_registers(ctxt);
3038 }
3039
3040 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3041 }
3042
string_addr_inc(struct x86_emulate_ctxt * ctxt,int reg,struct operand * op)3043 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3044 struct operand *op)
3045 {
3046 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3047
3048 register_address_increment(ctxt, reg, df * op->bytes);
3049 op->addr.mem.ea = register_address(ctxt, reg);
3050 }
3051
em_das(struct x86_emulate_ctxt * ctxt)3052 static int em_das(struct x86_emulate_ctxt *ctxt)
3053 {
3054 u8 al, old_al;
3055 bool af, cf, old_cf;
3056
3057 cf = ctxt->eflags & X86_EFLAGS_CF;
3058 al = ctxt->dst.val;
3059
3060 old_al = al;
3061 old_cf = cf;
3062 cf = false;
3063 af = ctxt->eflags & X86_EFLAGS_AF;
3064 if ((al & 0x0f) > 9 || af) {
3065 al -= 6;
3066 cf = old_cf | (al >= 250);
3067 af = true;
3068 } else {
3069 af = false;
3070 }
3071 if (old_al > 0x99 || old_cf) {
3072 al -= 0x60;
3073 cf = true;
3074 }
3075
3076 ctxt->dst.val = al;
3077 /* Set PF, ZF, SF */
3078 ctxt->src.type = OP_IMM;
3079 ctxt->src.val = 0;
3080 ctxt->src.bytes = 1;
3081 fastop(ctxt, em_or);
3082 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3083 if (cf)
3084 ctxt->eflags |= X86_EFLAGS_CF;
3085 if (af)
3086 ctxt->eflags |= X86_EFLAGS_AF;
3087 return X86EMUL_CONTINUE;
3088 }
3089
em_aam(struct x86_emulate_ctxt * ctxt)3090 static int em_aam(struct x86_emulate_ctxt *ctxt)
3091 {
3092 u8 al, ah;
3093
3094 if (ctxt->src.val == 0)
3095 return emulate_de(ctxt);
3096
3097 al = ctxt->dst.val & 0xff;
3098 ah = al / ctxt->src.val;
3099 al %= ctxt->src.val;
3100
3101 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3102
3103 /* Set PF, ZF, SF */
3104 ctxt->src.type = OP_IMM;
3105 ctxt->src.val = 0;
3106 ctxt->src.bytes = 1;
3107 fastop(ctxt, em_or);
3108
3109 return X86EMUL_CONTINUE;
3110 }
3111
em_aad(struct x86_emulate_ctxt * ctxt)3112 static int em_aad(struct x86_emulate_ctxt *ctxt)
3113 {
3114 u8 al = ctxt->dst.val & 0xff;
3115 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3116
3117 al = (al + (ah * ctxt->src.val)) & 0xff;
3118
3119 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3120
3121 /* Set PF, ZF, SF */
3122 ctxt->src.type = OP_IMM;
3123 ctxt->src.val = 0;
3124 ctxt->src.bytes = 1;
3125 fastop(ctxt, em_or);
3126
3127 return X86EMUL_CONTINUE;
3128 }
3129
em_call(struct x86_emulate_ctxt * ctxt)3130 static int em_call(struct x86_emulate_ctxt *ctxt)
3131 {
3132 int rc;
3133 long rel = ctxt->src.val;
3134
3135 ctxt->src.val = (unsigned long)ctxt->_eip;
3136 rc = jmp_rel(ctxt, rel);
3137 if (rc != X86EMUL_CONTINUE)
3138 return rc;
3139 return em_push(ctxt);
3140 }
3141
em_call_far(struct x86_emulate_ctxt * ctxt)3142 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3143 {
3144 u16 sel, old_cs;
3145 ulong old_eip;
3146 int rc;
3147 struct desc_struct old_desc, new_desc;
3148 const struct x86_emulate_ops *ops = ctxt->ops;
3149 int cpl = ctxt->ops->cpl(ctxt);
3150 enum x86emul_mode prev_mode = ctxt->mode;
3151
3152 old_eip = ctxt->_eip;
3153 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3154
3155 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3156 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3157 X86_TRANSFER_CALL_JMP, &new_desc);
3158 if (rc != X86EMUL_CONTINUE)
3159 return rc;
3160
3161 rc = assign_eip_far(ctxt, ctxt->src.val);
3162 if (rc != X86EMUL_CONTINUE)
3163 goto fail;
3164
3165 ctxt->src.val = old_cs;
3166 rc = em_push(ctxt);
3167 if (rc != X86EMUL_CONTINUE)
3168 goto fail;
3169
3170 ctxt->src.val = old_eip;
3171 rc = em_push(ctxt);
3172 /* If we failed, we tainted the memory, but the very least we should
3173 restore cs */
3174 if (rc != X86EMUL_CONTINUE) {
3175 pr_warn_once("faulting far call emulation tainted memory\n");
3176 goto fail;
3177 }
3178 return rc;
3179 fail:
3180 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3181 ctxt->mode = prev_mode;
3182 return rc;
3183
3184 }
3185
em_ret_near_imm(struct x86_emulate_ctxt * ctxt)3186 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3187 {
3188 int rc;
3189 unsigned long eip = 0;
3190
3191 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3192 if (rc != X86EMUL_CONTINUE)
3193 return rc;
3194 rc = assign_eip_near(ctxt, eip);
3195 if (rc != X86EMUL_CONTINUE)
3196 return rc;
3197 rsp_increment(ctxt, ctxt->src.val);
3198 return X86EMUL_CONTINUE;
3199 }
3200
em_xchg(struct x86_emulate_ctxt * ctxt)3201 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3202 {
3203 /* Write back the register source. */
3204 ctxt->src.val = ctxt->dst.val;
3205 write_register_operand(&ctxt->src);
3206
3207 /* Write back the memory destination with implicit LOCK prefix. */
3208 ctxt->dst.val = ctxt->src.orig_val;
3209 ctxt->lock_prefix = 1;
3210 return X86EMUL_CONTINUE;
3211 }
3212
em_imul_3op(struct x86_emulate_ctxt * ctxt)3213 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3214 {
3215 ctxt->dst.val = ctxt->src2.val;
3216 return fastop(ctxt, em_imul);
3217 }
3218
em_cwd(struct x86_emulate_ctxt * ctxt)3219 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3220 {
3221 ctxt->dst.type = OP_REG;
3222 ctxt->dst.bytes = ctxt->src.bytes;
3223 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3224 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3225
3226 return X86EMUL_CONTINUE;
3227 }
3228
em_rdpid(struct x86_emulate_ctxt * ctxt)3229 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3230 {
3231 u64 tsc_aux = 0;
3232
3233 if (!ctxt->ops->guest_has_rdpid(ctxt))
3234 return emulate_ud(ctxt);
3235
3236 ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
3237 ctxt->dst.val = tsc_aux;
3238 return X86EMUL_CONTINUE;
3239 }
3240
em_rdtsc(struct x86_emulate_ctxt * ctxt)3241 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3242 {
3243 u64 tsc = 0;
3244
3245 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3246 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3247 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3248 return X86EMUL_CONTINUE;
3249 }
3250
em_rdpmc(struct x86_emulate_ctxt * ctxt)3251 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3252 {
3253 u64 pmc;
3254
3255 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3256 return emulate_gp(ctxt, 0);
3257 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3258 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3259 return X86EMUL_CONTINUE;
3260 }
3261
em_mov(struct x86_emulate_ctxt * ctxt)3262 static int em_mov(struct x86_emulate_ctxt *ctxt)
3263 {
3264 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3265 return X86EMUL_CONTINUE;
3266 }
3267
em_movbe(struct x86_emulate_ctxt * ctxt)3268 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3269 {
3270 u16 tmp;
3271
3272 if (!ctxt->ops->guest_has_movbe(ctxt))
3273 return emulate_ud(ctxt);
3274
3275 switch (ctxt->op_bytes) {
3276 case 2:
3277 /*
3278 * From MOVBE definition: "...When the operand size is 16 bits,
3279 * the upper word of the destination register remains unchanged
3280 * ..."
3281 *
3282 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3283 * rules so we have to do the operation almost per hand.
3284 */
3285 tmp = (u16)ctxt->src.val;
3286 ctxt->dst.val &= ~0xffffUL;
3287 ctxt->dst.val |= (unsigned long)swab16(tmp);
3288 break;
3289 case 4:
3290 ctxt->dst.val = swab32((u32)ctxt->src.val);
3291 break;
3292 case 8:
3293 ctxt->dst.val = swab64(ctxt->src.val);
3294 break;
3295 default:
3296 BUG();
3297 }
3298 return X86EMUL_CONTINUE;
3299 }
3300
em_cr_write(struct x86_emulate_ctxt * ctxt)3301 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3302 {
3303 int cr_num = ctxt->modrm_reg;
3304 int r;
3305
3306 if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
3307 return emulate_gp(ctxt, 0);
3308
3309 /* Disable writeback. */
3310 ctxt->dst.type = OP_NONE;
3311
3312 if (cr_num == 0) {
3313 /*
3314 * CR0 write might have updated CR0.PE and/or CR0.PG
3315 * which can affect the cpu's execution mode.
3316 */
3317 r = emulator_recalc_and_set_mode(ctxt);
3318 if (r != X86EMUL_CONTINUE)
3319 return r;
3320 }
3321
3322 return X86EMUL_CONTINUE;
3323 }
3324
em_dr_write(struct x86_emulate_ctxt * ctxt)3325 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3326 {
3327 unsigned long val;
3328
3329 if (ctxt->mode == X86EMUL_MODE_PROT64)
3330 val = ctxt->src.val & ~0ULL;
3331 else
3332 val = ctxt->src.val & ~0U;
3333
3334 /* #UD condition is already handled. */
3335 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3336 return emulate_gp(ctxt, 0);
3337
3338 /* Disable writeback. */
3339 ctxt->dst.type = OP_NONE;
3340 return X86EMUL_CONTINUE;
3341 }
3342
em_wrmsr(struct x86_emulate_ctxt * ctxt)3343 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3344 {
3345 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3346 u64 msr_data;
3347 int r;
3348
3349 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3350 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3351 r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
3352
3353 if (r == X86EMUL_PROPAGATE_FAULT)
3354 return emulate_gp(ctxt, 0);
3355
3356 return r;
3357 }
3358
em_rdmsr(struct x86_emulate_ctxt * ctxt)3359 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3360 {
3361 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3362 u64 msr_data;
3363 int r;
3364
3365 r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
3366
3367 if (r == X86EMUL_PROPAGATE_FAULT)
3368 return emulate_gp(ctxt, 0);
3369
3370 if (r == X86EMUL_CONTINUE) {
3371 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3372 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3373 }
3374 return r;
3375 }
3376
em_store_sreg(struct x86_emulate_ctxt * ctxt,int segment)3377 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3378 {
3379 if (segment > VCPU_SREG_GS &&
3380 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3381 ctxt->ops->cpl(ctxt) > 0)
3382 return emulate_gp(ctxt, 0);
3383
3384 ctxt->dst.val = get_segment_selector(ctxt, segment);
3385 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3386 ctxt->dst.bytes = 2;
3387 return X86EMUL_CONTINUE;
3388 }
3389
em_mov_rm_sreg(struct x86_emulate_ctxt * ctxt)3390 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3391 {
3392 if (ctxt->modrm_reg > VCPU_SREG_GS)
3393 return emulate_ud(ctxt);
3394
3395 return em_store_sreg(ctxt, ctxt->modrm_reg);
3396 }
3397
em_mov_sreg_rm(struct x86_emulate_ctxt * ctxt)3398 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3399 {
3400 u16 sel = ctxt->src.val;
3401
3402 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3403 return emulate_ud(ctxt);
3404
3405 if (ctxt->modrm_reg == VCPU_SREG_SS)
3406 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3407
3408 /* Disable writeback. */
3409 ctxt->dst.type = OP_NONE;
3410 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3411 }
3412
em_sldt(struct x86_emulate_ctxt * ctxt)3413 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3414 {
3415 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3416 }
3417
em_lldt(struct x86_emulate_ctxt * ctxt)3418 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3419 {
3420 u16 sel = ctxt->src.val;
3421
3422 /* Disable writeback. */
3423 ctxt->dst.type = OP_NONE;
3424 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3425 }
3426
em_str(struct x86_emulate_ctxt * ctxt)3427 static int em_str(struct x86_emulate_ctxt *ctxt)
3428 {
3429 return em_store_sreg(ctxt, VCPU_SREG_TR);
3430 }
3431
em_ltr(struct x86_emulate_ctxt * ctxt)3432 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3433 {
3434 u16 sel = ctxt->src.val;
3435
3436 /* Disable writeback. */
3437 ctxt->dst.type = OP_NONE;
3438 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3439 }
3440
em_invlpg(struct x86_emulate_ctxt * ctxt)3441 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3442 {
3443 int rc;
3444 ulong linear;
3445 unsigned int max_size;
3446
3447 rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
3448 &linear, X86EMUL_F_INVLPG);
3449 if (rc == X86EMUL_CONTINUE)
3450 ctxt->ops->invlpg(ctxt, linear);
3451 /* Disable writeback. */
3452 ctxt->dst.type = OP_NONE;
3453 return X86EMUL_CONTINUE;
3454 }
3455
em_clts(struct x86_emulate_ctxt * ctxt)3456 static int em_clts(struct x86_emulate_ctxt *ctxt)
3457 {
3458 ulong cr0;
3459
3460 cr0 = ctxt->ops->get_cr(ctxt, 0);
3461 cr0 &= ~X86_CR0_TS;
3462 ctxt->ops->set_cr(ctxt, 0, cr0);
3463 return X86EMUL_CONTINUE;
3464 }
3465
em_hypercall(struct x86_emulate_ctxt * ctxt)3466 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3467 {
3468 int rc = ctxt->ops->fix_hypercall(ctxt);
3469
3470 if (rc != X86EMUL_CONTINUE)
3471 return rc;
3472
3473 /* Let the processor re-execute the fixed hypercall */
3474 ctxt->_eip = ctxt->eip;
3475 /* Disable writeback. */
3476 ctxt->dst.type = OP_NONE;
3477 return X86EMUL_CONTINUE;
3478 }
3479
emulate_store_desc_ptr(struct x86_emulate_ctxt * ctxt,void (* get)(struct x86_emulate_ctxt * ctxt,struct desc_ptr * ptr))3480 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3481 void (*get)(struct x86_emulate_ctxt *ctxt,
3482 struct desc_ptr *ptr))
3483 {
3484 struct desc_ptr desc_ptr;
3485
3486 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3487 ctxt->ops->cpl(ctxt) > 0)
3488 return emulate_gp(ctxt, 0);
3489
3490 if (ctxt->mode == X86EMUL_MODE_PROT64)
3491 ctxt->op_bytes = 8;
3492 get(ctxt, &desc_ptr);
3493 if (ctxt->op_bytes == 2) {
3494 ctxt->op_bytes = 4;
3495 desc_ptr.address &= 0x00ffffff;
3496 }
3497 /* Disable writeback. */
3498 ctxt->dst.type = OP_NONE;
3499 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3500 &desc_ptr, 2 + ctxt->op_bytes);
3501 }
3502
em_sgdt(struct x86_emulate_ctxt * ctxt)3503 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3504 {
3505 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3506 }
3507
em_sidt(struct x86_emulate_ctxt * ctxt)3508 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3509 {
3510 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3511 }
3512
em_lgdt_lidt(struct x86_emulate_ctxt * ctxt,bool lgdt)3513 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3514 {
3515 struct desc_ptr desc_ptr;
3516 int rc;
3517
3518 if (ctxt->mode == X86EMUL_MODE_PROT64)
3519 ctxt->op_bytes = 8;
3520 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3521 &desc_ptr.size, &desc_ptr.address,
3522 ctxt->op_bytes);
3523 if (rc != X86EMUL_CONTINUE)
3524 return rc;
3525 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3526 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3527 return emulate_gp(ctxt, 0);
3528 if (lgdt)
3529 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3530 else
3531 ctxt->ops->set_idt(ctxt, &desc_ptr);
3532 /* Disable writeback. */
3533 ctxt->dst.type = OP_NONE;
3534 return X86EMUL_CONTINUE;
3535 }
3536
em_lgdt(struct x86_emulate_ctxt * ctxt)3537 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3538 {
3539 return em_lgdt_lidt(ctxt, true);
3540 }
3541
em_lidt(struct x86_emulate_ctxt * ctxt)3542 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3543 {
3544 return em_lgdt_lidt(ctxt, false);
3545 }
3546
em_smsw(struct x86_emulate_ctxt * ctxt)3547 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3548 {
3549 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3550 ctxt->ops->cpl(ctxt) > 0)
3551 return emulate_gp(ctxt, 0);
3552
3553 if (ctxt->dst.type == OP_MEM)
3554 ctxt->dst.bytes = 2;
3555 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3556 return X86EMUL_CONTINUE;
3557 }
3558
em_lmsw(struct x86_emulate_ctxt * ctxt)3559 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3560 {
3561 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3562 | (ctxt->src.val & 0x0f));
3563 ctxt->dst.type = OP_NONE;
3564 return X86EMUL_CONTINUE;
3565 }
3566
em_loop(struct x86_emulate_ctxt * ctxt)3567 static int em_loop(struct x86_emulate_ctxt *ctxt)
3568 {
3569 int rc = X86EMUL_CONTINUE;
3570
3571 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3572 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3573 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3574 rc = jmp_rel(ctxt, ctxt->src.val);
3575
3576 return rc;
3577 }
3578
em_jcxz(struct x86_emulate_ctxt * ctxt)3579 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3580 {
3581 int rc = X86EMUL_CONTINUE;
3582
3583 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3584 rc = jmp_rel(ctxt, ctxt->src.val);
3585
3586 return rc;
3587 }
3588
em_in(struct x86_emulate_ctxt * ctxt)3589 static int em_in(struct x86_emulate_ctxt *ctxt)
3590 {
3591 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3592 &ctxt->dst.val))
3593 return X86EMUL_IO_NEEDED;
3594
3595 return X86EMUL_CONTINUE;
3596 }
3597
em_out(struct x86_emulate_ctxt * ctxt)3598 static int em_out(struct x86_emulate_ctxt *ctxt)
3599 {
3600 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3601 &ctxt->src.val, 1);
3602 /* Disable writeback. */
3603 ctxt->dst.type = OP_NONE;
3604 return X86EMUL_CONTINUE;
3605 }
3606
em_cli(struct x86_emulate_ctxt * ctxt)3607 static int em_cli(struct x86_emulate_ctxt *ctxt)
3608 {
3609 if (emulator_bad_iopl(ctxt))
3610 return emulate_gp(ctxt, 0);
3611
3612 ctxt->eflags &= ~X86_EFLAGS_IF;
3613 return X86EMUL_CONTINUE;
3614 }
3615
em_sti(struct x86_emulate_ctxt * ctxt)3616 static int em_sti(struct x86_emulate_ctxt *ctxt)
3617 {
3618 if (emulator_bad_iopl(ctxt))
3619 return emulate_gp(ctxt, 0);
3620
3621 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3622 ctxt->eflags |= X86_EFLAGS_IF;
3623 return X86EMUL_CONTINUE;
3624 }
3625
em_cpuid(struct x86_emulate_ctxt * ctxt)3626 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3627 {
3628 u32 eax, ebx, ecx, edx;
3629 u64 msr = 0;
3630
3631 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3632 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3633 ctxt->ops->cpl(ctxt)) {
3634 return emulate_gp(ctxt, 0);
3635 }
3636
3637 eax = reg_read(ctxt, VCPU_REGS_RAX);
3638 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3639 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3640 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3641 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3642 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3643 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3644 return X86EMUL_CONTINUE;
3645 }
3646
em_sahf(struct x86_emulate_ctxt * ctxt)3647 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3648 {
3649 u32 flags;
3650
3651 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3652 X86_EFLAGS_SF;
3653 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3654
3655 ctxt->eflags &= ~0xffUL;
3656 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3657 return X86EMUL_CONTINUE;
3658 }
3659
em_lahf(struct x86_emulate_ctxt * ctxt)3660 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3661 {
3662 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3663 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3664 return X86EMUL_CONTINUE;
3665 }
3666
em_bswap(struct x86_emulate_ctxt * ctxt)3667 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3668 {
3669 switch (ctxt->op_bytes) {
3670 #ifdef CONFIG_X86_64
3671 case 8:
3672 asm("bswap %0" : "+r"(ctxt->dst.val));
3673 break;
3674 #endif
3675 default:
3676 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3677 break;
3678 }
3679 return X86EMUL_CONTINUE;
3680 }
3681
em_clflush(struct x86_emulate_ctxt * ctxt)3682 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3683 {
3684 /* emulating clflush regardless of cpuid */
3685 return X86EMUL_CONTINUE;
3686 }
3687
em_clflushopt(struct x86_emulate_ctxt * ctxt)3688 static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3689 {
3690 /* emulating clflushopt regardless of cpuid */
3691 return X86EMUL_CONTINUE;
3692 }
3693
em_movsxd(struct x86_emulate_ctxt * ctxt)3694 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3695 {
3696 ctxt->dst.val = (s32) ctxt->src.val;
3697 return X86EMUL_CONTINUE;
3698 }
3699
check_fxsr(struct x86_emulate_ctxt * ctxt)3700 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3701 {
3702 if (!ctxt->ops->guest_has_fxsr(ctxt))
3703 return emulate_ud(ctxt);
3704
3705 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3706 return emulate_nm(ctxt);
3707
3708 /*
3709 * Don't emulate a case that should never be hit, instead of working
3710 * around a lack of fxsave64/fxrstor64 on old compilers.
3711 */
3712 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3713 return X86EMUL_UNHANDLEABLE;
3714
3715 return X86EMUL_CONTINUE;
3716 }
3717
3718 /*
3719 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3720 * and restore MXCSR.
3721 */
__fxstate_size(int nregs)3722 static size_t __fxstate_size(int nregs)
3723 {
3724 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3725 }
3726
fxstate_size(struct x86_emulate_ctxt * ctxt)3727 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3728 {
3729 bool cr4_osfxsr;
3730 if (ctxt->mode == X86EMUL_MODE_PROT64)
3731 return __fxstate_size(16);
3732
3733 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3734 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3735 }
3736
3737 /*
3738 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3739 * 1) 16 bit mode
3740 * 2) 32 bit mode
3741 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3742 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3743 * save and restore
3744 * 3) 64-bit mode with REX.W prefix
3745 * - like (2), but XMM 8-15 are being saved and restored
3746 * 4) 64-bit mode without REX.W prefix
3747 * - like (3), but FIP and FDP are 64 bit
3748 *
3749 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3750 * desired result. (4) is not emulated.
3751 *
3752 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3753 * and FPU DS) should match.
3754 */
em_fxsave(struct x86_emulate_ctxt * ctxt)3755 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3756 {
3757 struct fxregs_state fx_state;
3758 int rc;
3759
3760 rc = check_fxsr(ctxt);
3761 if (rc != X86EMUL_CONTINUE)
3762 return rc;
3763
3764 kvm_fpu_get();
3765
3766 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3767
3768 kvm_fpu_put();
3769
3770 if (rc != X86EMUL_CONTINUE)
3771 return rc;
3772
3773 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
3774 fxstate_size(ctxt));
3775 }
3776
3777 /*
3778 * FXRSTOR might restore XMM registers not provided by the guest. Fill
3779 * in the host registers (via FXSAVE) instead, so they won't be modified.
3780 * (preemption has to stay disabled until FXRSTOR).
3781 *
3782 * Use noinline to keep the stack for other functions called by callers small.
3783 */
fxregs_fixup(struct fxregs_state * fx_state,const size_t used_size)3784 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
3785 const size_t used_size)
3786 {
3787 struct fxregs_state fx_tmp;
3788 int rc;
3789
3790 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
3791 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
3792 __fxstate_size(16) - used_size);
3793
3794 return rc;
3795 }
3796
em_fxrstor(struct x86_emulate_ctxt * ctxt)3797 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
3798 {
3799 struct fxregs_state fx_state;
3800 int rc;
3801 size_t size;
3802
3803 rc = check_fxsr(ctxt);
3804 if (rc != X86EMUL_CONTINUE)
3805 return rc;
3806
3807 size = fxstate_size(ctxt);
3808 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3809 if (rc != X86EMUL_CONTINUE)
3810 return rc;
3811
3812 kvm_fpu_get();
3813
3814 if (size < __fxstate_size(16)) {
3815 rc = fxregs_fixup(&fx_state, size);
3816 if (rc != X86EMUL_CONTINUE)
3817 goto out;
3818 }
3819
3820 if (fx_state.mxcsr >> 16) {
3821 rc = emulate_gp(ctxt, 0);
3822 goto out;
3823 }
3824
3825 if (rc == X86EMUL_CONTINUE)
3826 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
3827
3828 out:
3829 kvm_fpu_put();
3830
3831 return rc;
3832 }
3833
em_xsetbv(struct x86_emulate_ctxt * ctxt)3834 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
3835 {
3836 u32 eax, ecx, edx;
3837
3838 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
3839 return emulate_ud(ctxt);
3840
3841 eax = reg_read(ctxt, VCPU_REGS_RAX);
3842 edx = reg_read(ctxt, VCPU_REGS_RDX);
3843 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3844
3845 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
3846 return emulate_gp(ctxt, 0);
3847
3848 return X86EMUL_CONTINUE;
3849 }
3850
valid_cr(int nr)3851 static bool valid_cr(int nr)
3852 {
3853 switch (nr) {
3854 case 0:
3855 case 2 ... 4:
3856 case 8:
3857 return true;
3858 default:
3859 return false;
3860 }
3861 }
3862
check_cr_access(struct x86_emulate_ctxt * ctxt)3863 static int check_cr_access(struct x86_emulate_ctxt *ctxt)
3864 {
3865 if (!valid_cr(ctxt->modrm_reg))
3866 return emulate_ud(ctxt);
3867
3868 return X86EMUL_CONTINUE;
3869 }
3870
check_dr_read(struct x86_emulate_ctxt * ctxt)3871 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3872 {
3873 int dr = ctxt->modrm_reg;
3874 u64 cr4;
3875
3876 if (dr > 7)
3877 return emulate_ud(ctxt);
3878
3879 cr4 = ctxt->ops->get_cr(ctxt, 4);
3880 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3881 return emulate_ud(ctxt);
3882
3883 if (ctxt->ops->get_dr(ctxt, 7) & DR7_GD) {
3884 ulong dr6;
3885
3886 dr6 = ctxt->ops->get_dr(ctxt, 6);
3887 dr6 &= ~DR_TRAP_BITS;
3888 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
3889 ctxt->ops->set_dr(ctxt, 6, dr6);
3890 return emulate_db(ctxt);
3891 }
3892
3893 return X86EMUL_CONTINUE;
3894 }
3895
check_dr_write(struct x86_emulate_ctxt * ctxt)3896 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3897 {
3898 u64 new_val = ctxt->src.val64;
3899 int dr = ctxt->modrm_reg;
3900
3901 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3902 return emulate_gp(ctxt, 0);
3903
3904 return check_dr_read(ctxt);
3905 }
3906
check_svme(struct x86_emulate_ctxt * ctxt)3907 static int check_svme(struct x86_emulate_ctxt *ctxt)
3908 {
3909 u64 efer = 0;
3910
3911 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3912
3913 if (!(efer & EFER_SVME))
3914 return emulate_ud(ctxt);
3915
3916 return X86EMUL_CONTINUE;
3917 }
3918
check_svme_pa(struct x86_emulate_ctxt * ctxt)3919 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3920 {
3921 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3922
3923 /* Valid physical address? */
3924 if (rax & 0xffff000000000000ULL)
3925 return emulate_gp(ctxt, 0);
3926
3927 return check_svme(ctxt);
3928 }
3929
check_rdtsc(struct x86_emulate_ctxt * ctxt)3930 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3931 {
3932 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3933
3934 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3935 return emulate_gp(ctxt, 0);
3936
3937 return X86EMUL_CONTINUE;
3938 }
3939
check_rdpmc(struct x86_emulate_ctxt * ctxt)3940 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3941 {
3942 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3943 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3944
3945 /*
3946 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
3947 * in Ring3 when CR4.PCE=0.
3948 */
3949 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
3950 return X86EMUL_CONTINUE;
3951
3952 /*
3953 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE
3954 * check however is unnecessary because CPL is always 0 outside
3955 * protected mode.
3956 */
3957 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3958 ctxt->ops->check_rdpmc_early(ctxt, rcx))
3959 return emulate_gp(ctxt, 0);
3960
3961 return X86EMUL_CONTINUE;
3962 }
3963
check_perm_in(struct x86_emulate_ctxt * ctxt)3964 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3965 {
3966 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3967 if (!emulator_io_permitted(ctxt, ctxt->src.val, ctxt->dst.bytes))
3968 return emulate_gp(ctxt, 0);
3969
3970 return X86EMUL_CONTINUE;
3971 }
3972
check_perm_out(struct x86_emulate_ctxt * ctxt)3973 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3974 {
3975 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3976 if (!emulator_io_permitted(ctxt, ctxt->dst.val, ctxt->src.bytes))
3977 return emulate_gp(ctxt, 0);
3978
3979 return X86EMUL_CONTINUE;
3980 }
3981
3982 #define D(_y) { .flags = (_y) }
3983 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3984 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3985 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3986 #define N D(NotImpl)
3987 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3988 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3989 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3990 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3991 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3992 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3993 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3994 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3995 #define II(_f, _e, _i) \
3996 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3997 #define IIP(_f, _e, _i, _p) \
3998 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3999 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4000 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4001
4002 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4003 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4004 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4005 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4006 #define I2bvIP(_f, _e, _i, _p) \
4007 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4008
4009 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4010 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4011 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4012
4013 static const struct opcode group7_rm0[] = {
4014 N,
4015 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4016 N, N, N, N, N, N,
4017 };
4018
4019 static const struct opcode group7_rm1[] = {
4020 DI(SrcNone | Priv, monitor),
4021 DI(SrcNone | Priv, mwait),
4022 N, N, N, N, N, N,
4023 };
4024
4025 static const struct opcode group7_rm2[] = {
4026 N,
4027 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4028 N, N, N, N, N, N,
4029 };
4030
4031 static const struct opcode group7_rm3[] = {
4032 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4033 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4034 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4035 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4036 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4037 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4038 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4039 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4040 };
4041
4042 static const struct opcode group7_rm7[] = {
4043 N,
4044 DIP(SrcNone, rdtscp, check_rdtsc),
4045 N, N, N, N, N, N,
4046 };
4047
4048 static const struct opcode group1[] = {
4049 F(Lock, em_add),
4050 F(Lock | PageTable, em_or),
4051 F(Lock, em_adc),
4052 F(Lock, em_sbb),
4053 F(Lock | PageTable, em_and),
4054 F(Lock, em_sub),
4055 F(Lock, em_xor),
4056 F(NoWrite, em_cmp),
4057 };
4058
4059 static const struct opcode group1A[] = {
4060 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4061 };
4062
4063 static const struct opcode group2[] = {
4064 F(DstMem | ModRM, em_rol),
4065 F(DstMem | ModRM, em_ror),
4066 F(DstMem | ModRM, em_rcl),
4067 F(DstMem | ModRM, em_rcr),
4068 F(DstMem | ModRM, em_shl),
4069 F(DstMem | ModRM, em_shr),
4070 F(DstMem | ModRM, em_shl),
4071 F(DstMem | ModRM, em_sar),
4072 };
4073
4074 static const struct opcode group3[] = {
4075 F(DstMem | SrcImm | NoWrite, em_test),
4076 F(DstMem | SrcImm | NoWrite, em_test),
4077 F(DstMem | SrcNone | Lock, em_not),
4078 F(DstMem | SrcNone | Lock, em_neg),
4079 F(DstXacc | Src2Mem, em_mul_ex),
4080 F(DstXacc | Src2Mem, em_imul_ex),
4081 F(DstXacc | Src2Mem, em_div_ex),
4082 F(DstXacc | Src2Mem, em_idiv_ex),
4083 };
4084
4085 static const struct opcode group4[] = {
4086 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4087 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4088 N, N, N, N, N, N,
4089 };
4090
4091 static const struct opcode group5[] = {
4092 F(DstMem | SrcNone | Lock, em_inc),
4093 F(DstMem | SrcNone | Lock, em_dec),
4094 I(SrcMem | NearBranch | IsBranch, em_call_near_abs),
4095 I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4096 I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
4097 I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4098 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4099 };
4100
4101 static const struct opcode group6[] = {
4102 II(Prot | DstMem, em_sldt, sldt),
4103 II(Prot | DstMem, em_str, str),
4104 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4105 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4106 N, N, N, N,
4107 };
4108
4109 static const struct group_dual group7 = { {
4110 II(Mov | DstMem, em_sgdt, sgdt),
4111 II(Mov | DstMem, em_sidt, sidt),
4112 II(SrcMem | Priv, em_lgdt, lgdt),
4113 II(SrcMem | Priv, em_lidt, lidt),
4114 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4115 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4116 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4117 }, {
4118 EXT(0, group7_rm0),
4119 EXT(0, group7_rm1),
4120 EXT(0, group7_rm2),
4121 EXT(0, group7_rm3),
4122 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4123 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4124 EXT(0, group7_rm7),
4125 } };
4126
4127 static const struct opcode group8[] = {
4128 N, N, N, N,
4129 F(DstMem | SrcImmByte | NoWrite, em_bt),
4130 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4131 F(DstMem | SrcImmByte | Lock, em_btr),
4132 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4133 };
4134
4135 /*
4136 * The "memory" destination is actually always a register, since we come
4137 * from the register case of group9.
4138 */
4139 static const struct gprefix pfx_0f_c7_7 = {
4140 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4141 };
4142
4143
4144 static const struct group_dual group9 = { {
4145 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4146 }, {
4147 N, N, N, N, N, N, N,
4148 GP(0, &pfx_0f_c7_7),
4149 } };
4150
4151 static const struct opcode group11[] = {
4152 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4153 X7(D(Undefined)),
4154 };
4155
4156 static const struct gprefix pfx_0f_ae_7 = {
4157 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4158 };
4159
4160 static const struct group_dual group15 = { {
4161 I(ModRM | Aligned16, em_fxsave),
4162 I(ModRM | Aligned16, em_fxrstor),
4163 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4164 }, {
4165 N, N, N, N, N, N, N, N,
4166 } };
4167
4168 static const struct gprefix pfx_0f_6f_0f_7f = {
4169 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4170 };
4171
4172 static const struct instr_dual instr_dual_0f_2b = {
4173 I(0, em_mov), N
4174 };
4175
4176 static const struct gprefix pfx_0f_2b = {
4177 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4178 };
4179
4180 static const struct gprefix pfx_0f_10_0f_11 = {
4181 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4182 };
4183
4184 static const struct gprefix pfx_0f_28_0f_29 = {
4185 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4186 };
4187
4188 static const struct gprefix pfx_0f_e7 = {
4189 N, I(Sse, em_mov), N, N,
4190 };
4191
4192 static const struct escape escape_d9 = { {
4193 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4194 }, {
4195 /* 0xC0 - 0xC7 */
4196 N, N, N, N, N, N, N, N,
4197 /* 0xC8 - 0xCF */
4198 N, N, N, N, N, N, N, N,
4199 /* 0xD0 - 0xC7 */
4200 N, N, N, N, N, N, N, N,
4201 /* 0xD8 - 0xDF */
4202 N, N, N, N, N, N, N, N,
4203 /* 0xE0 - 0xE7 */
4204 N, N, N, N, N, N, N, N,
4205 /* 0xE8 - 0xEF */
4206 N, N, N, N, N, N, N, N,
4207 /* 0xF0 - 0xF7 */
4208 N, N, N, N, N, N, N, N,
4209 /* 0xF8 - 0xFF */
4210 N, N, N, N, N, N, N, N,
4211 } };
4212
4213 static const struct escape escape_db = { {
4214 N, N, N, N, N, N, N, N,
4215 }, {
4216 /* 0xC0 - 0xC7 */
4217 N, N, N, N, N, N, N, N,
4218 /* 0xC8 - 0xCF */
4219 N, N, N, N, N, N, N, N,
4220 /* 0xD0 - 0xC7 */
4221 N, N, N, N, N, N, N, N,
4222 /* 0xD8 - 0xDF */
4223 N, N, N, N, N, N, N, N,
4224 /* 0xE0 - 0xE7 */
4225 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4226 /* 0xE8 - 0xEF */
4227 N, N, N, N, N, N, N, N,
4228 /* 0xF0 - 0xF7 */
4229 N, N, N, N, N, N, N, N,
4230 /* 0xF8 - 0xFF */
4231 N, N, N, N, N, N, N, N,
4232 } };
4233
4234 static const struct escape escape_dd = { {
4235 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4236 }, {
4237 /* 0xC0 - 0xC7 */
4238 N, N, N, N, N, N, N, N,
4239 /* 0xC8 - 0xCF */
4240 N, N, N, N, N, N, N, N,
4241 /* 0xD0 - 0xC7 */
4242 N, N, N, N, N, N, N, N,
4243 /* 0xD8 - 0xDF */
4244 N, N, N, N, N, N, N, N,
4245 /* 0xE0 - 0xE7 */
4246 N, N, N, N, N, N, N, N,
4247 /* 0xE8 - 0xEF */
4248 N, N, N, N, N, N, N, N,
4249 /* 0xF0 - 0xF7 */
4250 N, N, N, N, N, N, N, N,
4251 /* 0xF8 - 0xFF */
4252 N, N, N, N, N, N, N, N,
4253 } };
4254
4255 static const struct instr_dual instr_dual_0f_c3 = {
4256 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4257 };
4258
4259 static const struct mode_dual mode_dual_63 = {
4260 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4261 };
4262
4263 static const struct instr_dual instr_dual_8d = {
4264 D(DstReg | SrcMem | ModRM | NoAccess), N
4265 };
4266
4267 static const struct opcode opcode_table[256] = {
4268 /* 0x00 - 0x07 */
4269 F6ALU(Lock, em_add),
4270 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4271 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4272 /* 0x08 - 0x0F */
4273 F6ALU(Lock | PageTable, em_or),
4274 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4275 N,
4276 /* 0x10 - 0x17 */
4277 F6ALU(Lock, em_adc),
4278 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4279 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4280 /* 0x18 - 0x1F */
4281 F6ALU(Lock, em_sbb),
4282 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4283 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4284 /* 0x20 - 0x27 */
4285 F6ALU(Lock | PageTable, em_and), N, N,
4286 /* 0x28 - 0x2F */
4287 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4288 /* 0x30 - 0x37 */
4289 F6ALU(Lock, em_xor), N, N,
4290 /* 0x38 - 0x3F */
4291 F6ALU(NoWrite, em_cmp), N, N,
4292 /* 0x40 - 0x4F */
4293 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4294 /* 0x50 - 0x57 */
4295 X8(I(SrcReg | Stack, em_push)),
4296 /* 0x58 - 0x5F */
4297 X8(I(DstReg | Stack, em_pop)),
4298 /* 0x60 - 0x67 */
4299 I(ImplicitOps | Stack | No64, em_pusha),
4300 I(ImplicitOps | Stack | No64, em_popa),
4301 N, MD(ModRM, &mode_dual_63),
4302 N, N, N, N,
4303 /* 0x68 - 0x6F */
4304 I(SrcImm | Mov | Stack, em_push),
4305 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4306 I(SrcImmByte | Mov | Stack, em_push),
4307 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4308 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4309 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4310 /* 0x70 - 0x7F */
4311 X16(D(SrcImmByte | NearBranch | IsBranch)),
4312 /* 0x80 - 0x87 */
4313 G(ByteOp | DstMem | SrcImm, group1),
4314 G(DstMem | SrcImm, group1),
4315 G(ByteOp | DstMem | SrcImm | No64, group1),
4316 G(DstMem | SrcImmByte, group1),
4317 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4318 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4319 /* 0x88 - 0x8F */
4320 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4321 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4322 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4323 ID(0, &instr_dual_8d),
4324 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4325 G(0, group1A),
4326 /* 0x90 - 0x97 */
4327 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4328 /* 0x98 - 0x9F */
4329 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4330 I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
4331 II(ImplicitOps | Stack, em_pushf, pushf),
4332 II(ImplicitOps | Stack, em_popf, popf),
4333 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4334 /* 0xA0 - 0xA7 */
4335 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4336 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4337 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4338 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4339 /* 0xA8 - 0xAF */
4340 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4341 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4342 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4343 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4344 /* 0xB0 - 0xB7 */
4345 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4346 /* 0xB8 - 0xBF */
4347 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4348 /* 0xC0 - 0xC7 */
4349 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4350 I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4351 I(ImplicitOps | NearBranch | IsBranch, em_ret),
4352 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4353 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4354 G(ByteOp, group11), G(0, group11),
4355 /* 0xC8 - 0xCF */
4356 I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4357 I(Stack | IsBranch, em_leave),
4358 I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4359 I(ImplicitOps | IsBranch, em_ret_far),
4360 D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4361 D(ImplicitOps | No64 | IsBranch),
4362 II(ImplicitOps | IsBranch, em_iret, iret),
4363 /* 0xD0 - 0xD7 */
4364 G(Src2One | ByteOp, group2), G(Src2One, group2),
4365 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4366 I(DstAcc | SrcImmUByte | No64, em_aam),
4367 I(DstAcc | SrcImmUByte | No64, em_aad),
4368 F(DstAcc | ByteOp | No64, em_salc),
4369 I(DstAcc | SrcXLat | ByteOp, em_mov),
4370 /* 0xD8 - 0xDF */
4371 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4372 /* 0xE0 - 0xE7 */
4373 X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4374 I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4375 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4376 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4377 /* 0xE8 - 0xEF */
4378 I(SrcImm | NearBranch | IsBranch, em_call),
4379 D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4380 I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4381 D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4382 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4383 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4384 /* 0xF0 - 0xF7 */
4385 N, DI(ImplicitOps, icebp), N, N,
4386 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4387 G(ByteOp, group3), G(0, group3),
4388 /* 0xF8 - 0xFF */
4389 D(ImplicitOps), D(ImplicitOps),
4390 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4391 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4392 };
4393
4394 static const struct opcode twobyte_table[256] = {
4395 /* 0x00 - 0x0F */
4396 G(0, group6), GD(0, &group7), N, N,
4397 N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
4398 II(ImplicitOps | Priv, em_clts, clts), N,
4399 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4400 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4401 /* 0x10 - 0x1F */
4402 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4403 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4404 N, N, N, N, N, N,
4405 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4406 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4407 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4408 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4409 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4410 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4411 /* 0x20 - 0x2F */
4412 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4413 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4414 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4415 check_cr_access),
4416 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4417 check_dr_write),
4418 N, N, N, N,
4419 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4420 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4421 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4422 N, N, N, N,
4423 /* 0x30 - 0x3F */
4424 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4425 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4426 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4427 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4428 I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4429 I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
4430 N, N,
4431 N, N, N, N, N, N, N, N,
4432 /* 0x40 - 0x4F */
4433 X16(D(DstReg | SrcMem | ModRM)),
4434 /* 0x50 - 0x5F */
4435 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4436 /* 0x60 - 0x6F */
4437 N, N, N, N,
4438 N, N, N, N,
4439 N, N, N, N,
4440 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4441 /* 0x70 - 0x7F */
4442 N, N, N, N,
4443 N, N, N, N,
4444 N, N, N, N,
4445 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4446 /* 0x80 - 0x8F */
4447 X16(D(SrcImm | NearBranch | IsBranch)),
4448 /* 0x90 - 0x9F */
4449 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4450 /* 0xA0 - 0xA7 */
4451 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4452 II(ImplicitOps, em_cpuid, cpuid),
4453 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4454 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4455 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4456 /* 0xA8 - 0xAF */
4457 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4458 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4459 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4460 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4461 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4462 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4463 /* 0xB0 - 0xB7 */
4464 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4465 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4466 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4467 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4468 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4469 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4470 /* 0xB8 - 0xBF */
4471 N, N,
4472 G(BitOp, group8),
4473 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4474 I(DstReg | SrcMem | ModRM, em_bsf_c),
4475 I(DstReg | SrcMem | ModRM, em_bsr_c),
4476 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4477 /* 0xC0 - 0xC7 */
4478 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4479 N, ID(0, &instr_dual_0f_c3),
4480 N, N, N, GD(0, &group9),
4481 /* 0xC8 - 0xCF */
4482 X8(I(DstReg, em_bswap)),
4483 /* 0xD0 - 0xDF */
4484 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4485 /* 0xE0 - 0xEF */
4486 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4487 N, N, N, N, N, N, N, N,
4488 /* 0xF0 - 0xFF */
4489 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4490 };
4491
4492 static const struct instr_dual instr_dual_0f_38_f0 = {
4493 I(DstReg | SrcMem | Mov, em_movbe), N
4494 };
4495
4496 static const struct instr_dual instr_dual_0f_38_f1 = {
4497 I(DstMem | SrcReg | Mov, em_movbe), N
4498 };
4499
4500 static const struct gprefix three_byte_0f_38_f0 = {
4501 ID(0, &instr_dual_0f_38_f0), ID(0, &instr_dual_0f_38_f0), N, N
4502 };
4503
4504 static const struct gprefix three_byte_0f_38_f1 = {
4505 ID(0, &instr_dual_0f_38_f1), ID(0, &instr_dual_0f_38_f1), N, N
4506 };
4507
4508 /*
4509 * Insns below are selected by the prefix which indexed by the third opcode
4510 * byte.
4511 */
4512 static const struct opcode opcode_map_0f_38[256] = {
4513 /* 0x00 - 0x7f */
4514 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4515 /* 0x80 - 0xef */
4516 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4517 /* 0xf0 - 0xf1 */
4518 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4519 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4520 /* 0xf2 - 0xff */
4521 N, N, X4(N), X8(N)
4522 };
4523
4524 #undef D
4525 #undef N
4526 #undef G
4527 #undef GD
4528 #undef I
4529 #undef GP
4530 #undef EXT
4531 #undef MD
4532 #undef ID
4533
4534 #undef D2bv
4535 #undef D2bvIP
4536 #undef I2bv
4537 #undef I2bvIP
4538 #undef I6ALU
4539
imm_size(struct x86_emulate_ctxt * ctxt)4540 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4541 {
4542 unsigned size;
4543
4544 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4545 if (size == 8)
4546 size = 4;
4547 return size;
4548 }
4549
decode_imm(struct x86_emulate_ctxt * ctxt,struct operand * op,unsigned size,bool sign_extension)4550 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4551 unsigned size, bool sign_extension)
4552 {
4553 int rc = X86EMUL_CONTINUE;
4554
4555 op->type = OP_IMM;
4556 op->bytes = size;
4557 op->addr.mem.ea = ctxt->_eip;
4558 /* NB. Immediates are sign-extended as necessary. */
4559 switch (op->bytes) {
4560 case 1:
4561 op->val = insn_fetch(s8, ctxt);
4562 break;
4563 case 2:
4564 op->val = insn_fetch(s16, ctxt);
4565 break;
4566 case 4:
4567 op->val = insn_fetch(s32, ctxt);
4568 break;
4569 case 8:
4570 op->val = insn_fetch(s64, ctxt);
4571 break;
4572 }
4573 if (!sign_extension) {
4574 switch (op->bytes) {
4575 case 1:
4576 op->val &= 0xff;
4577 break;
4578 case 2:
4579 op->val &= 0xffff;
4580 break;
4581 case 4:
4582 op->val &= 0xffffffff;
4583 break;
4584 }
4585 }
4586 done:
4587 return rc;
4588 }
4589
decode_operand(struct x86_emulate_ctxt * ctxt,struct operand * op,unsigned d)4590 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4591 unsigned d)
4592 {
4593 int rc = X86EMUL_CONTINUE;
4594
4595 switch (d) {
4596 case OpReg:
4597 decode_register_operand(ctxt, op);
4598 break;
4599 case OpImmUByte:
4600 rc = decode_imm(ctxt, op, 1, false);
4601 break;
4602 case OpMem:
4603 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4604 mem_common:
4605 *op = ctxt->memop;
4606 ctxt->memopp = op;
4607 if (ctxt->d & BitOp)
4608 fetch_bit_operand(ctxt);
4609 op->orig_val = op->val;
4610 break;
4611 case OpMem64:
4612 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4613 goto mem_common;
4614 case OpAcc:
4615 op->type = OP_REG;
4616 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4617 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4618 fetch_register_operand(op);
4619 op->orig_val = op->val;
4620 break;
4621 case OpAccLo:
4622 op->type = OP_REG;
4623 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4624 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4625 fetch_register_operand(op);
4626 op->orig_val = op->val;
4627 break;
4628 case OpAccHi:
4629 if (ctxt->d & ByteOp) {
4630 op->type = OP_NONE;
4631 break;
4632 }
4633 op->type = OP_REG;
4634 op->bytes = ctxt->op_bytes;
4635 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4636 fetch_register_operand(op);
4637 op->orig_val = op->val;
4638 break;
4639 case OpDI:
4640 op->type = OP_MEM;
4641 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4642 op->addr.mem.ea =
4643 register_address(ctxt, VCPU_REGS_RDI);
4644 op->addr.mem.seg = VCPU_SREG_ES;
4645 op->val = 0;
4646 op->count = 1;
4647 break;
4648 case OpDX:
4649 op->type = OP_REG;
4650 op->bytes = 2;
4651 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4652 fetch_register_operand(op);
4653 break;
4654 case OpCL:
4655 op->type = OP_IMM;
4656 op->bytes = 1;
4657 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4658 break;
4659 case OpImmByte:
4660 rc = decode_imm(ctxt, op, 1, true);
4661 break;
4662 case OpOne:
4663 op->type = OP_IMM;
4664 op->bytes = 1;
4665 op->val = 1;
4666 break;
4667 case OpImm:
4668 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4669 break;
4670 case OpImm64:
4671 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4672 break;
4673 case OpMem8:
4674 ctxt->memop.bytes = 1;
4675 if (ctxt->memop.type == OP_REG) {
4676 ctxt->memop.addr.reg = decode_register(ctxt,
4677 ctxt->modrm_rm, true);
4678 fetch_register_operand(&ctxt->memop);
4679 }
4680 goto mem_common;
4681 case OpMem16:
4682 ctxt->memop.bytes = 2;
4683 goto mem_common;
4684 case OpMem32:
4685 ctxt->memop.bytes = 4;
4686 goto mem_common;
4687 case OpImmU16:
4688 rc = decode_imm(ctxt, op, 2, false);
4689 break;
4690 case OpImmU:
4691 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4692 break;
4693 case OpSI:
4694 op->type = OP_MEM;
4695 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4696 op->addr.mem.ea =
4697 register_address(ctxt, VCPU_REGS_RSI);
4698 op->addr.mem.seg = ctxt->seg_override;
4699 op->val = 0;
4700 op->count = 1;
4701 break;
4702 case OpXLat:
4703 op->type = OP_MEM;
4704 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4705 op->addr.mem.ea =
4706 address_mask(ctxt,
4707 reg_read(ctxt, VCPU_REGS_RBX) +
4708 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4709 op->addr.mem.seg = ctxt->seg_override;
4710 op->val = 0;
4711 break;
4712 case OpImmFAddr:
4713 op->type = OP_IMM;
4714 op->addr.mem.ea = ctxt->_eip;
4715 op->bytes = ctxt->op_bytes + 2;
4716 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4717 break;
4718 case OpMemFAddr:
4719 ctxt->memop.bytes = ctxt->op_bytes + 2;
4720 goto mem_common;
4721 case OpES:
4722 op->type = OP_IMM;
4723 op->val = VCPU_SREG_ES;
4724 break;
4725 case OpCS:
4726 op->type = OP_IMM;
4727 op->val = VCPU_SREG_CS;
4728 break;
4729 case OpSS:
4730 op->type = OP_IMM;
4731 op->val = VCPU_SREG_SS;
4732 break;
4733 case OpDS:
4734 op->type = OP_IMM;
4735 op->val = VCPU_SREG_DS;
4736 break;
4737 case OpFS:
4738 op->type = OP_IMM;
4739 op->val = VCPU_SREG_FS;
4740 break;
4741 case OpGS:
4742 op->type = OP_IMM;
4743 op->val = VCPU_SREG_GS;
4744 break;
4745 case OpImplicit:
4746 /* Special instructions do their own operand decoding. */
4747 default:
4748 op->type = OP_NONE; /* Disable writeback. */
4749 break;
4750 }
4751
4752 done:
4753 return rc;
4754 }
4755
x86_decode_insn(struct x86_emulate_ctxt * ctxt,void * insn,int insn_len,int emulation_type)4756 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
4757 {
4758 int rc = X86EMUL_CONTINUE;
4759 int mode = ctxt->mode;
4760 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4761 bool op_prefix = false;
4762 bool has_seg_override = false;
4763 struct opcode opcode;
4764 u16 dummy;
4765 struct desc_struct desc;
4766
4767 ctxt->memop.type = OP_NONE;
4768 ctxt->memopp = NULL;
4769 ctxt->_eip = ctxt->eip;
4770 ctxt->fetch.ptr = ctxt->fetch.data;
4771 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4772 ctxt->opcode_len = 1;
4773 ctxt->intercept = x86_intercept_none;
4774 if (insn_len > 0)
4775 memcpy(ctxt->fetch.data, insn, insn_len);
4776 else {
4777 rc = __do_insn_fetch_bytes(ctxt, 1);
4778 if (rc != X86EMUL_CONTINUE)
4779 goto done;
4780 }
4781
4782 switch (mode) {
4783 case X86EMUL_MODE_REAL:
4784 case X86EMUL_MODE_VM86:
4785 def_op_bytes = def_ad_bytes = 2;
4786 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4787 if (desc.d)
4788 def_op_bytes = def_ad_bytes = 4;
4789 break;
4790 case X86EMUL_MODE_PROT16:
4791 def_op_bytes = def_ad_bytes = 2;
4792 break;
4793 case X86EMUL_MODE_PROT32:
4794 def_op_bytes = def_ad_bytes = 4;
4795 break;
4796 #ifdef CONFIG_X86_64
4797 case X86EMUL_MODE_PROT64:
4798 def_op_bytes = 4;
4799 def_ad_bytes = 8;
4800 break;
4801 #endif
4802 default:
4803 return EMULATION_FAILED;
4804 }
4805
4806 ctxt->op_bytes = def_op_bytes;
4807 ctxt->ad_bytes = def_ad_bytes;
4808
4809 /* Legacy prefixes. */
4810 for (;;) {
4811 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4812 case 0x66: /* operand-size override */
4813 op_prefix = true;
4814 /* switch between 2/4 bytes */
4815 ctxt->op_bytes = def_op_bytes ^ 6;
4816 break;
4817 case 0x67: /* address-size override */
4818 if (mode == X86EMUL_MODE_PROT64)
4819 /* switch between 4/8 bytes */
4820 ctxt->ad_bytes = def_ad_bytes ^ 12;
4821 else
4822 /* switch between 2/4 bytes */
4823 ctxt->ad_bytes = def_ad_bytes ^ 6;
4824 break;
4825 case 0x26: /* ES override */
4826 has_seg_override = true;
4827 ctxt->seg_override = VCPU_SREG_ES;
4828 break;
4829 case 0x2e: /* CS override */
4830 has_seg_override = true;
4831 ctxt->seg_override = VCPU_SREG_CS;
4832 break;
4833 case 0x36: /* SS override */
4834 has_seg_override = true;
4835 ctxt->seg_override = VCPU_SREG_SS;
4836 break;
4837 case 0x3e: /* DS override */
4838 has_seg_override = true;
4839 ctxt->seg_override = VCPU_SREG_DS;
4840 break;
4841 case 0x64: /* FS override */
4842 has_seg_override = true;
4843 ctxt->seg_override = VCPU_SREG_FS;
4844 break;
4845 case 0x65: /* GS override */
4846 has_seg_override = true;
4847 ctxt->seg_override = VCPU_SREG_GS;
4848 break;
4849 case 0x40 ... 0x4f: /* REX */
4850 if (mode != X86EMUL_MODE_PROT64)
4851 goto done_prefixes;
4852 ctxt->rex_prefix = ctxt->b;
4853 continue;
4854 case 0xf0: /* LOCK */
4855 ctxt->lock_prefix = 1;
4856 break;
4857 case 0xf2: /* REPNE/REPNZ */
4858 case 0xf3: /* REP/REPE/REPZ */
4859 ctxt->rep_prefix = ctxt->b;
4860 break;
4861 default:
4862 goto done_prefixes;
4863 }
4864
4865 /* Any legacy prefix after a REX prefix nullifies its effect. */
4866
4867 ctxt->rex_prefix = 0;
4868 }
4869
4870 done_prefixes:
4871
4872 /* REX prefix. */
4873 if (ctxt->rex_prefix & 8)
4874 ctxt->op_bytes = 8; /* REX.W */
4875
4876 /* Opcode byte(s). */
4877 opcode = opcode_table[ctxt->b];
4878 /* Two-byte opcode? */
4879 if (ctxt->b == 0x0f) {
4880 ctxt->opcode_len = 2;
4881 ctxt->b = insn_fetch(u8, ctxt);
4882 opcode = twobyte_table[ctxt->b];
4883
4884 /* 0F_38 opcode map */
4885 if (ctxt->b == 0x38) {
4886 ctxt->opcode_len = 3;
4887 ctxt->b = insn_fetch(u8, ctxt);
4888 opcode = opcode_map_0f_38[ctxt->b];
4889 }
4890 }
4891 ctxt->d = opcode.flags;
4892
4893 if (ctxt->d & ModRM)
4894 ctxt->modrm = insn_fetch(u8, ctxt);
4895
4896 /* vex-prefix instructions are not implemented */
4897 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4898 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4899 ctxt->d = NotImpl;
4900 }
4901
4902 while (ctxt->d & GroupMask) {
4903 switch (ctxt->d & GroupMask) {
4904 case Group:
4905 goffset = (ctxt->modrm >> 3) & 7;
4906 opcode = opcode.u.group[goffset];
4907 break;
4908 case GroupDual:
4909 goffset = (ctxt->modrm >> 3) & 7;
4910 if ((ctxt->modrm >> 6) == 3)
4911 opcode = opcode.u.gdual->mod3[goffset];
4912 else
4913 opcode = opcode.u.gdual->mod012[goffset];
4914 break;
4915 case RMExt:
4916 goffset = ctxt->modrm & 7;
4917 opcode = opcode.u.group[goffset];
4918 break;
4919 case Prefix:
4920 if (ctxt->rep_prefix && op_prefix)
4921 return EMULATION_FAILED;
4922 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4923 switch (simd_prefix) {
4924 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4925 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4926 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4927 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4928 }
4929 break;
4930 case Escape:
4931 if (ctxt->modrm > 0xbf) {
4932 size_t size = ARRAY_SIZE(opcode.u.esc->high);
4933 u32 index = array_index_nospec(
4934 ctxt->modrm - 0xc0, size);
4935
4936 opcode = opcode.u.esc->high[index];
4937 } else {
4938 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4939 }
4940 break;
4941 case InstrDual:
4942 if ((ctxt->modrm >> 6) == 3)
4943 opcode = opcode.u.idual->mod3;
4944 else
4945 opcode = opcode.u.idual->mod012;
4946 break;
4947 case ModeDual:
4948 if (ctxt->mode == X86EMUL_MODE_PROT64)
4949 opcode = opcode.u.mdual->mode64;
4950 else
4951 opcode = opcode.u.mdual->mode32;
4952 break;
4953 default:
4954 return EMULATION_FAILED;
4955 }
4956
4957 ctxt->d &= ~(u64)GroupMask;
4958 ctxt->d |= opcode.flags;
4959 }
4960
4961 ctxt->is_branch = opcode.flags & IsBranch;
4962
4963 /* Unrecognised? */
4964 if (ctxt->d == 0)
4965 return EMULATION_FAILED;
4966
4967 ctxt->execute = opcode.u.execute;
4968
4969 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
4970 likely(!(ctxt->d & EmulateOnUD)))
4971 return EMULATION_FAILED;
4972
4973 if (unlikely(ctxt->d &
4974 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4975 No16))) {
4976 /*
4977 * These are copied unconditionally here, and checked unconditionally
4978 * in x86_emulate_insn.
4979 */
4980 ctxt->check_perm = opcode.check_perm;
4981 ctxt->intercept = opcode.intercept;
4982
4983 if (ctxt->d & NotImpl)
4984 return EMULATION_FAILED;
4985
4986 if (mode == X86EMUL_MODE_PROT64) {
4987 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4988 ctxt->op_bytes = 8;
4989 else if (ctxt->d & NearBranch)
4990 ctxt->op_bytes = 8;
4991 }
4992
4993 if (ctxt->d & Op3264) {
4994 if (mode == X86EMUL_MODE_PROT64)
4995 ctxt->op_bytes = 8;
4996 else
4997 ctxt->op_bytes = 4;
4998 }
4999
5000 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5001 ctxt->op_bytes = 4;
5002
5003 if (ctxt->d & Sse)
5004 ctxt->op_bytes = 16;
5005 else if (ctxt->d & Mmx)
5006 ctxt->op_bytes = 8;
5007 }
5008
5009 /* ModRM and SIB bytes. */
5010 if (ctxt->d & ModRM) {
5011 rc = decode_modrm(ctxt, &ctxt->memop);
5012 if (!has_seg_override) {
5013 has_seg_override = true;
5014 ctxt->seg_override = ctxt->modrm_seg;
5015 }
5016 } else if (ctxt->d & MemAbs)
5017 rc = decode_abs(ctxt, &ctxt->memop);
5018 if (rc != X86EMUL_CONTINUE)
5019 goto done;
5020
5021 if (!has_seg_override)
5022 ctxt->seg_override = VCPU_SREG_DS;
5023
5024 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5025
5026 /*
5027 * Decode and fetch the source operand: register, memory
5028 * or immediate.
5029 */
5030 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5031 if (rc != X86EMUL_CONTINUE)
5032 goto done;
5033
5034 /*
5035 * Decode and fetch the second source operand: register, memory
5036 * or immediate.
5037 */
5038 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5039 if (rc != X86EMUL_CONTINUE)
5040 goto done;
5041
5042 /* Decode and fetch the destination operand: register or memory. */
5043 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5044
5045 if (ctxt->rip_relative && likely(ctxt->memopp))
5046 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5047 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5048
5049 done:
5050 if (rc == X86EMUL_PROPAGATE_FAULT)
5051 ctxt->have_exception = true;
5052 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5053 }
5054
x86_page_table_writing_insn(struct x86_emulate_ctxt * ctxt)5055 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5056 {
5057 return ctxt->d & PageTable;
5058 }
5059
string_insn_completed(struct x86_emulate_ctxt * ctxt)5060 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5061 {
5062 /* The second termination condition only applies for REPE
5063 * and REPNE. Test if the repeat string operation prefix is
5064 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5065 * corresponding termination condition according to:
5066 * - if REPE/REPZ and ZF = 0 then done
5067 * - if REPNE/REPNZ and ZF = 1 then done
5068 */
5069 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5070 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5071 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5072 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5073 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5074 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5075 return true;
5076
5077 return false;
5078 }
5079
flush_pending_x87_faults(struct x86_emulate_ctxt * ctxt)5080 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5081 {
5082 int rc;
5083
5084 kvm_fpu_get();
5085 rc = asm_safe("fwait");
5086 kvm_fpu_put();
5087
5088 if (unlikely(rc != X86EMUL_CONTINUE))
5089 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5090
5091 return X86EMUL_CONTINUE;
5092 }
5093
fetch_possible_mmx_operand(struct operand * op)5094 static void fetch_possible_mmx_operand(struct operand *op)
5095 {
5096 if (op->type == OP_MM)
5097 kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5098 }
5099
fastop(struct x86_emulate_ctxt * ctxt,fastop_t fop)5100 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5101 {
5102 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5103
5104 if (!(ctxt->d & ByteOp))
5105 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5106
5107 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5108 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5109 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5110 : "c"(ctxt->src2.val));
5111
5112 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5113 if (!fop) /* exception is returned in fop variable */
5114 return emulate_de(ctxt);
5115 return X86EMUL_CONTINUE;
5116 }
5117
init_decode_cache(struct x86_emulate_ctxt * ctxt)5118 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5119 {
5120 /* Clear fields that are set conditionally but read without a guard. */
5121 ctxt->rip_relative = false;
5122 ctxt->rex_prefix = 0;
5123 ctxt->lock_prefix = 0;
5124 ctxt->rep_prefix = 0;
5125 ctxt->regs_valid = 0;
5126 ctxt->regs_dirty = 0;
5127
5128 ctxt->io_read.pos = 0;
5129 ctxt->io_read.end = 0;
5130 ctxt->mem_read.end = 0;
5131 }
5132
x86_emulate_insn(struct x86_emulate_ctxt * ctxt)5133 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5134 {
5135 const struct x86_emulate_ops *ops = ctxt->ops;
5136 int rc = X86EMUL_CONTINUE;
5137 int saved_dst_type = ctxt->dst.type;
5138 bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt);
5139
5140 ctxt->mem_read.pos = 0;
5141
5142 /* LOCK prefix is allowed only with some instructions */
5143 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5144 rc = emulate_ud(ctxt);
5145 goto done;
5146 }
5147
5148 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5149 rc = emulate_ud(ctxt);
5150 goto done;
5151 }
5152
5153 if (unlikely(ctxt->d &
5154 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5155 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5156 (ctxt->d & Undefined)) {
5157 rc = emulate_ud(ctxt);
5158 goto done;
5159 }
5160
5161 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5162 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5163 rc = emulate_ud(ctxt);
5164 goto done;
5165 }
5166
5167 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5168 rc = emulate_nm(ctxt);
5169 goto done;
5170 }
5171
5172 if (ctxt->d & Mmx) {
5173 rc = flush_pending_x87_faults(ctxt);
5174 if (rc != X86EMUL_CONTINUE)
5175 goto done;
5176 /*
5177 * Now that we know the fpu is exception safe, we can fetch
5178 * operands from it.
5179 */
5180 fetch_possible_mmx_operand(&ctxt->src);
5181 fetch_possible_mmx_operand(&ctxt->src2);
5182 if (!(ctxt->d & Mov))
5183 fetch_possible_mmx_operand(&ctxt->dst);
5184 }
5185
5186 if (unlikely(is_guest_mode) && ctxt->intercept) {
5187 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5188 X86_ICPT_PRE_EXCEPT);
5189 if (rc != X86EMUL_CONTINUE)
5190 goto done;
5191 }
5192
5193 /* Instruction can only be executed in protected mode */
5194 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5195 rc = emulate_ud(ctxt);
5196 goto done;
5197 }
5198
5199 /* Privileged instruction can be executed only in CPL=0 */
5200 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5201 if (ctxt->d & PrivUD)
5202 rc = emulate_ud(ctxt);
5203 else
5204 rc = emulate_gp(ctxt, 0);
5205 goto done;
5206 }
5207
5208 /* Do instruction specific permission checks */
5209 if (ctxt->d & CheckPerm) {
5210 rc = ctxt->check_perm(ctxt);
5211 if (rc != X86EMUL_CONTINUE)
5212 goto done;
5213 }
5214
5215 if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5216 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5217 X86_ICPT_POST_EXCEPT);
5218 if (rc != X86EMUL_CONTINUE)
5219 goto done;
5220 }
5221
5222 if (ctxt->rep_prefix && (ctxt->d & String)) {
5223 /* All REP prefixes have the same first termination condition */
5224 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5225 string_registers_quirk(ctxt);
5226 ctxt->eip = ctxt->_eip;
5227 ctxt->eflags &= ~X86_EFLAGS_RF;
5228 goto done;
5229 }
5230 }
5231 }
5232
5233 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5234 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5235 ctxt->src.valptr, ctxt->src.bytes);
5236 if (rc != X86EMUL_CONTINUE)
5237 goto done;
5238 ctxt->src.orig_val64 = ctxt->src.val64;
5239 }
5240
5241 if (ctxt->src2.type == OP_MEM) {
5242 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5243 &ctxt->src2.val, ctxt->src2.bytes);
5244 if (rc != X86EMUL_CONTINUE)
5245 goto done;
5246 }
5247
5248 if ((ctxt->d & DstMask) == ImplicitOps)
5249 goto special_insn;
5250
5251
5252 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5253 /* optimisation - avoid slow emulated read if Mov */
5254 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5255 &ctxt->dst.val, ctxt->dst.bytes);
5256 if (rc != X86EMUL_CONTINUE) {
5257 if (!(ctxt->d & NoWrite) &&
5258 rc == X86EMUL_PROPAGATE_FAULT &&
5259 ctxt->exception.vector == PF_VECTOR)
5260 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5261 goto done;
5262 }
5263 }
5264 /* Copy full 64-bit value for CMPXCHG8B. */
5265 ctxt->dst.orig_val64 = ctxt->dst.val64;
5266
5267 special_insn:
5268
5269 if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5270 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5271 X86_ICPT_POST_MEMACCESS);
5272 if (rc != X86EMUL_CONTINUE)
5273 goto done;
5274 }
5275
5276 if (ctxt->rep_prefix && (ctxt->d & String))
5277 ctxt->eflags |= X86_EFLAGS_RF;
5278 else
5279 ctxt->eflags &= ~X86_EFLAGS_RF;
5280
5281 if (ctxt->execute) {
5282 if (ctxt->d & Fastop)
5283 rc = fastop(ctxt, ctxt->fop);
5284 else
5285 rc = ctxt->execute(ctxt);
5286 if (rc != X86EMUL_CONTINUE)
5287 goto done;
5288 goto writeback;
5289 }
5290
5291 if (ctxt->opcode_len == 2)
5292 goto twobyte_insn;
5293 else if (ctxt->opcode_len == 3)
5294 goto threebyte_insn;
5295
5296 switch (ctxt->b) {
5297 case 0x70 ... 0x7f: /* jcc (short) */
5298 if (test_cc(ctxt->b, ctxt->eflags))
5299 rc = jmp_rel(ctxt, ctxt->src.val);
5300 break;
5301 case 0x8d: /* lea r16/r32, m */
5302 ctxt->dst.val = ctxt->src.addr.mem.ea;
5303 break;
5304 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5305 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5306 ctxt->dst.type = OP_NONE;
5307 else
5308 rc = em_xchg(ctxt);
5309 break;
5310 case 0x98: /* cbw/cwde/cdqe */
5311 switch (ctxt->op_bytes) {
5312 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5313 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5314 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5315 }
5316 break;
5317 case 0xcc: /* int3 */
5318 rc = emulate_int(ctxt, 3);
5319 break;
5320 case 0xcd: /* int n */
5321 rc = emulate_int(ctxt, ctxt->src.val);
5322 break;
5323 case 0xce: /* into */
5324 if (ctxt->eflags & X86_EFLAGS_OF)
5325 rc = emulate_int(ctxt, 4);
5326 break;
5327 case 0xe9: /* jmp rel */
5328 case 0xeb: /* jmp rel short */
5329 rc = jmp_rel(ctxt, ctxt->src.val);
5330 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5331 break;
5332 case 0xf4: /* hlt */
5333 ctxt->ops->halt(ctxt);
5334 break;
5335 case 0xf5: /* cmc */
5336 /* complement carry flag from eflags reg */
5337 ctxt->eflags ^= X86_EFLAGS_CF;
5338 break;
5339 case 0xf8: /* clc */
5340 ctxt->eflags &= ~X86_EFLAGS_CF;
5341 break;
5342 case 0xf9: /* stc */
5343 ctxt->eflags |= X86_EFLAGS_CF;
5344 break;
5345 case 0xfc: /* cld */
5346 ctxt->eflags &= ~X86_EFLAGS_DF;
5347 break;
5348 case 0xfd: /* std */
5349 ctxt->eflags |= X86_EFLAGS_DF;
5350 break;
5351 default:
5352 goto cannot_emulate;
5353 }
5354
5355 if (rc != X86EMUL_CONTINUE)
5356 goto done;
5357
5358 writeback:
5359 if (ctxt->d & SrcWrite) {
5360 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5361 rc = writeback(ctxt, &ctxt->src);
5362 if (rc != X86EMUL_CONTINUE)
5363 goto done;
5364 }
5365 if (!(ctxt->d & NoWrite)) {
5366 rc = writeback(ctxt, &ctxt->dst);
5367 if (rc != X86EMUL_CONTINUE)
5368 goto done;
5369 }
5370
5371 /*
5372 * restore dst type in case the decoding will be reused
5373 * (happens for string instruction )
5374 */
5375 ctxt->dst.type = saved_dst_type;
5376
5377 if ((ctxt->d & SrcMask) == SrcSI)
5378 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5379
5380 if ((ctxt->d & DstMask) == DstDI)
5381 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5382
5383 if (ctxt->rep_prefix && (ctxt->d & String)) {
5384 unsigned int count;
5385 struct read_cache *r = &ctxt->io_read;
5386 if ((ctxt->d & SrcMask) == SrcSI)
5387 count = ctxt->src.count;
5388 else
5389 count = ctxt->dst.count;
5390 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5391
5392 if (!string_insn_completed(ctxt)) {
5393 /*
5394 * Re-enter guest when pio read ahead buffer is empty
5395 * or, if it is not used, after each 1024 iteration.
5396 */
5397 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5398 (r->end == 0 || r->end != r->pos)) {
5399 /*
5400 * Reset read cache. Usually happens before
5401 * decode, but since instruction is restarted
5402 * we have to do it here.
5403 */
5404 ctxt->mem_read.end = 0;
5405 writeback_registers(ctxt);
5406 return EMULATION_RESTART;
5407 }
5408 goto done; /* skip rip writeback */
5409 }
5410 ctxt->eflags &= ~X86_EFLAGS_RF;
5411 }
5412
5413 ctxt->eip = ctxt->_eip;
5414 if (ctxt->mode != X86EMUL_MODE_PROT64)
5415 ctxt->eip = (u32)ctxt->_eip;
5416
5417 done:
5418 if (rc == X86EMUL_PROPAGATE_FAULT) {
5419 if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5420 return EMULATION_FAILED;
5421 ctxt->have_exception = true;
5422 }
5423 if (rc == X86EMUL_INTERCEPTED)
5424 return EMULATION_INTERCEPTED;
5425
5426 if (rc == X86EMUL_CONTINUE)
5427 writeback_registers(ctxt);
5428
5429 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5430
5431 twobyte_insn:
5432 switch (ctxt->b) {
5433 case 0x09: /* wbinvd */
5434 (ctxt->ops->wbinvd)(ctxt);
5435 break;
5436 case 0x08: /* invd */
5437 case 0x0d: /* GrpP (prefetch) */
5438 case 0x18: /* Grp16 (prefetch/nop) */
5439 case 0x1f: /* nop */
5440 break;
5441 case 0x20: /* mov cr, reg */
5442 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5443 break;
5444 case 0x21: /* mov from dr to reg */
5445 ctxt->dst.val = ops->get_dr(ctxt, ctxt->modrm_reg);
5446 break;
5447 case 0x40 ... 0x4f: /* cmov */
5448 if (test_cc(ctxt->b, ctxt->eflags))
5449 ctxt->dst.val = ctxt->src.val;
5450 else if (ctxt->op_bytes != 4)
5451 ctxt->dst.type = OP_NONE; /* no writeback */
5452 break;
5453 case 0x80 ... 0x8f: /* jnz rel, etc*/
5454 if (test_cc(ctxt->b, ctxt->eflags))
5455 rc = jmp_rel(ctxt, ctxt->src.val);
5456 break;
5457 case 0x90 ... 0x9f: /* setcc r/m8 */
5458 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5459 break;
5460 case 0xb6 ... 0xb7: /* movzx */
5461 ctxt->dst.bytes = ctxt->op_bytes;
5462 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5463 : (u16) ctxt->src.val;
5464 break;
5465 case 0xbe ... 0xbf: /* movsx */
5466 ctxt->dst.bytes = ctxt->op_bytes;
5467 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5468 (s16) ctxt->src.val;
5469 break;
5470 default:
5471 goto cannot_emulate;
5472 }
5473
5474 threebyte_insn:
5475
5476 if (rc != X86EMUL_CONTINUE)
5477 goto done;
5478
5479 goto writeback;
5480
5481 cannot_emulate:
5482 return EMULATION_FAILED;
5483 }
5484
emulator_invalidate_register_cache(struct x86_emulate_ctxt * ctxt)5485 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5486 {
5487 invalidate_registers(ctxt);
5488 }
5489
emulator_writeback_register_cache(struct x86_emulate_ctxt * ctxt)5490 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5491 {
5492 writeback_registers(ctxt);
5493 }
5494
emulator_can_use_gpa(struct x86_emulate_ctxt * ctxt)5495 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5496 {
5497 if (ctxt->rep_prefix && (ctxt->d & String))
5498 return false;
5499
5500 if (ctxt->d & TwoMemOp)
5501 return false;
5502
5503 return true;
5504 }
5505