1 /* $NetBSD: libnvmm_x86.c,v 1.43 2020/12/27 20:56:14 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 * All rights reserved.
6 *
7 * This code is part of the NVMM hypervisor.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <unistd.h>
37 #include <fcntl.h>
38 #include <errno.h>
39 #include <sys/ioctl.h>
40 #include <sys/mman.h>
41 #include <machine/vmparam.h>
42 #include <machine/pte.h>
43 #include <machine/psl.h>
44
45 #define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
46 #define __cacheline_aligned __attribute__((__aligned__(64)))
47
48 #include <x86/specialreg.h>
49
50 /* -------------------------------------------------------------------------- */
51
52 /*
53 * Undocumented debugging function. Helpful.
54 */
55 int
nvmm_vcpu_dump(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu)56 nvmm_vcpu_dump(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
57 {
58 struct nvmm_x64_state *state = vcpu->state;
59 uint16_t *attr;
60 size_t i;
61 int ret;
62
63 const char *segnames[] = {
64 "ES", "CS", "SS", "DS", "FS", "GS", "GDT", "IDT", "LDT", "TR"
65 };
66
67 ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_ALL);
68 if (ret == -1)
69 return -1;
70
71 printf("+ VCPU id=%d\n", (int)vcpu->cpuid);
72 printf("| -> RAX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RAX]);
73 printf("| -> RCX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RCX]);
74 printf("| -> RDX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RDX]);
75 printf("| -> RBX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RBX]);
76 printf("| -> RSP=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RSP]);
77 printf("| -> RBP=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RBP]);
78 printf("| -> RSI=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RSI]);
79 printf("| -> RDI=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RDI]);
80 printf("| -> RIP=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RIP]);
81 printf("| -> RFLAGS=%p\n", (void *)state->gprs[NVMM_X64_GPR_RFLAGS]);
82 for (i = 0; i < NVMM_X64_NSEG; i++) {
83 attr = (uint16_t *)&state->segs[i].attrib;
84 printf("| -> %s: sel=0x%x base=%"PRIx64", limit=%x, "
85 "attrib=%x [type=%d,l=%d,def=%d]\n",
86 segnames[i],
87 state->segs[i].selector,
88 state->segs[i].base,
89 state->segs[i].limit,
90 *attr,
91 state->segs[i].attrib.type,
92 state->segs[i].attrib.l,
93 state->segs[i].attrib.def);
94 }
95 printf("| -> MSR_EFER=%"PRIx64"\n", state->msrs[NVMM_X64_MSR_EFER]);
96 printf("| -> CR0=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR0]);
97 printf("| -> CR3=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR3]);
98 printf("| -> CR4=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR4]);
99 printf("| -> CR8=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR8]);
100
101 return 0;
102 }
103
104 /* -------------------------------------------------------------------------- */
105
106 #define PTE32_L1_SHIFT 12
107 #define PTE32_L2_SHIFT 22
108
109 #define PTE32_L2_MASK 0xffc00000
110 #define PTE32_L1_MASK 0x003ff000
111
112 #define PTE32_L2_FRAME (PTE32_L2_MASK)
113 #define PTE32_L1_FRAME (PTE32_L2_FRAME|PTE32_L1_MASK)
114
115 #define pte32_l1idx(va) (((va) & PTE32_L1_MASK) >> PTE32_L1_SHIFT)
116 #define pte32_l2idx(va) (((va) & PTE32_L2_MASK) >> PTE32_L2_SHIFT)
117
118 #define CR3_FRAME_32BIT __BITS(31, 12)
119
120 typedef uint32_t pte_32bit_t;
121
122 static int
x86_gva_to_gpa_32bit(struct nvmm_machine * mach,uint64_t cr3,gvaddr_t gva,gpaddr_t * gpa,bool has_pse,nvmm_prot_t * prot)123 x86_gva_to_gpa_32bit(struct nvmm_machine *mach, uint64_t cr3,
124 gvaddr_t gva, gpaddr_t *gpa, bool has_pse, nvmm_prot_t *prot)
125 {
126 gpaddr_t L2gpa, L1gpa;
127 uintptr_t L2hva, L1hva;
128 pte_32bit_t *pdir, pte;
129 nvmm_prot_t pageprot;
130
131 /* We begin with an RWXU access. */
132 *prot = NVMM_PROT_ALL;
133
134 /* Parse L2. */
135 L2gpa = (cr3 & CR3_FRAME_32BIT);
136 if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1)
137 return -1;
138 pdir = (pte_32bit_t *)L2hva;
139 pte = pdir[pte32_l2idx(gva)];
140 if ((pte & PTE_P) == 0)
141 return -1;
142 if ((pte & PTE_U) == 0)
143 *prot &= ~NVMM_PROT_USER;
144 if ((pte & PTE_W) == 0)
145 *prot &= ~NVMM_PROT_WRITE;
146 if ((pte & PTE_PS) && !has_pse)
147 return -1;
148 if (pte & PTE_PS) {
149 *gpa = (pte & PTE32_L2_FRAME);
150 *gpa = *gpa + (gva & PTE32_L1_MASK);
151 return 0;
152 }
153
154 /* Parse L1. */
155 L1gpa = (pte & PTE_FRAME);
156 if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1)
157 return -1;
158 pdir = (pte_32bit_t *)L1hva;
159 pte = pdir[pte32_l1idx(gva)];
160 if ((pte & PTE_P) == 0)
161 return -1;
162 if ((pte & PTE_U) == 0)
163 *prot &= ~NVMM_PROT_USER;
164 if ((pte & PTE_W) == 0)
165 *prot &= ~NVMM_PROT_WRITE;
166 if (pte & PTE_PS)
167 return -1;
168
169 *gpa = (pte & PTE_FRAME);
170 return 0;
171 }
172
173 /* -------------------------------------------------------------------------- */
174
175 #define PTE32_PAE_L1_SHIFT 12
176 #define PTE32_PAE_L2_SHIFT 21
177 #define PTE32_PAE_L3_SHIFT 30
178
179 #define PTE32_PAE_L3_MASK 0xc0000000
180 #define PTE32_PAE_L2_MASK 0x3fe00000
181 #define PTE32_PAE_L1_MASK 0x001ff000
182
183 #define PTE32_PAE_L3_FRAME (PTE32_PAE_L3_MASK)
184 #define PTE32_PAE_L2_FRAME (PTE32_PAE_L3_FRAME|PTE32_PAE_L2_MASK)
185 #define PTE32_PAE_L1_FRAME (PTE32_PAE_L2_FRAME|PTE32_PAE_L1_MASK)
186
187 #define pte32_pae_l1idx(va) (((va) & PTE32_PAE_L1_MASK) >> PTE32_PAE_L1_SHIFT)
188 #define pte32_pae_l2idx(va) (((va) & PTE32_PAE_L2_MASK) >> PTE32_PAE_L2_SHIFT)
189 #define pte32_pae_l3idx(va) (((va) & PTE32_PAE_L3_MASK) >> PTE32_PAE_L3_SHIFT)
190
191 #define CR3_FRAME_32BIT_PAE __BITS(31, 5)
192
193 typedef uint64_t pte_32bit_pae_t;
194
195 static int
x86_gva_to_gpa_32bit_pae(struct nvmm_machine * mach,uint64_t cr3,gvaddr_t gva,gpaddr_t * gpa,nvmm_prot_t * prot)196 x86_gva_to_gpa_32bit_pae(struct nvmm_machine *mach, uint64_t cr3,
197 gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot)
198 {
199 gpaddr_t L3gpa, L2gpa, L1gpa;
200 uintptr_t L3hva, L2hva, L1hva;
201 pte_32bit_pae_t *pdir, pte;
202 nvmm_prot_t pageprot;
203
204 /* We begin with an RWXU access. */
205 *prot = NVMM_PROT_ALL;
206
207 /* Parse L3. */
208 L3gpa = (cr3 & CR3_FRAME_32BIT_PAE);
209 if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva, &pageprot) == -1)
210 return -1;
211 pdir = (pte_32bit_pae_t *)L3hva;
212 pte = pdir[pte32_pae_l3idx(gva)];
213 if ((pte & PTE_P) == 0)
214 return -1;
215 if (pte & PTE_NX)
216 *prot &= ~NVMM_PROT_EXEC;
217 if (pte & PTE_PS)
218 return -1;
219
220 /* Parse L2. */
221 L2gpa = (pte & PTE_FRAME);
222 if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1)
223 return -1;
224 pdir = (pte_32bit_pae_t *)L2hva;
225 pte = pdir[pte32_pae_l2idx(gva)];
226 if ((pte & PTE_P) == 0)
227 return -1;
228 if ((pte & PTE_U) == 0)
229 *prot &= ~NVMM_PROT_USER;
230 if ((pte & PTE_W) == 0)
231 *prot &= ~NVMM_PROT_WRITE;
232 if (pte & PTE_NX)
233 *prot &= ~NVMM_PROT_EXEC;
234 if (pte & PTE_PS) {
235 *gpa = (pte & PTE32_PAE_L2_FRAME);
236 *gpa = *gpa + (gva & PTE32_PAE_L1_MASK);
237 return 0;
238 }
239
240 /* Parse L1. */
241 L1gpa = (pte & PTE_FRAME);
242 if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1)
243 return -1;
244 pdir = (pte_32bit_pae_t *)L1hva;
245 pte = pdir[pte32_pae_l1idx(gva)];
246 if ((pte & PTE_P) == 0)
247 return -1;
248 if ((pte & PTE_U) == 0)
249 *prot &= ~NVMM_PROT_USER;
250 if ((pte & PTE_W) == 0)
251 *prot &= ~NVMM_PROT_WRITE;
252 if (pte & PTE_NX)
253 *prot &= ~NVMM_PROT_EXEC;
254 if (pte & PTE_PS)
255 return -1;
256
257 *gpa = (pte & PTE_FRAME);
258 return 0;
259 }
260
261 /* -------------------------------------------------------------------------- */
262
263 #define PTE64_L1_SHIFT 12
264 #define PTE64_L2_SHIFT 21
265 #define PTE64_L3_SHIFT 30
266 #define PTE64_L4_SHIFT 39
267
268 #define PTE64_L4_MASK 0x0000ff8000000000
269 #define PTE64_L3_MASK 0x0000007fc0000000
270 #define PTE64_L2_MASK 0x000000003fe00000
271 #define PTE64_L1_MASK 0x00000000001ff000
272
273 #define PTE64_L4_FRAME PTE64_L4_MASK
274 #define PTE64_L3_FRAME (PTE64_L4_FRAME|PTE64_L3_MASK)
275 #define PTE64_L2_FRAME (PTE64_L3_FRAME|PTE64_L2_MASK)
276 #define PTE64_L1_FRAME (PTE64_L2_FRAME|PTE64_L1_MASK)
277
278 #define pte64_l1idx(va) (((va) & PTE64_L1_MASK) >> PTE64_L1_SHIFT)
279 #define pte64_l2idx(va) (((va) & PTE64_L2_MASK) >> PTE64_L2_SHIFT)
280 #define pte64_l3idx(va) (((va) & PTE64_L3_MASK) >> PTE64_L3_SHIFT)
281 #define pte64_l4idx(va) (((va) & PTE64_L4_MASK) >> PTE64_L4_SHIFT)
282
283 #define CR3_FRAME_64BIT __BITS(51, 12)
284
285 typedef uint64_t pte_64bit_t;
286
287 static inline bool
x86_gva_64bit_canonical(gvaddr_t gva)288 x86_gva_64bit_canonical(gvaddr_t gva)
289 {
290 /* Bits 63:47 must have the same value. */
291 #define SIGN_EXTEND 0xffff800000000000ULL
292 return (gva & SIGN_EXTEND) == 0 || (gva & SIGN_EXTEND) == SIGN_EXTEND;
293 }
294
295 static int
x86_gva_to_gpa_64bit(struct nvmm_machine * mach,uint64_t cr3,gvaddr_t gva,gpaddr_t * gpa,nvmm_prot_t * prot)296 x86_gva_to_gpa_64bit(struct nvmm_machine *mach, uint64_t cr3,
297 gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot)
298 {
299 gpaddr_t L4gpa, L3gpa, L2gpa, L1gpa;
300 uintptr_t L4hva, L3hva, L2hva, L1hva;
301 pte_64bit_t *pdir, pte;
302 nvmm_prot_t pageprot;
303
304 /* We begin with an RWXU access. */
305 *prot = NVMM_PROT_ALL;
306
307 if (!x86_gva_64bit_canonical(gva))
308 return -1;
309
310 /* Parse L4. */
311 L4gpa = (cr3 & CR3_FRAME_64BIT);
312 if (nvmm_gpa_to_hva(mach, L4gpa, &L4hva, &pageprot) == -1)
313 return -1;
314 pdir = (pte_64bit_t *)L4hva;
315 pte = pdir[pte64_l4idx(gva)];
316 if ((pte & PTE_P) == 0)
317 return -1;
318 if ((pte & PTE_U) == 0)
319 *prot &= ~NVMM_PROT_USER;
320 if ((pte & PTE_W) == 0)
321 *prot &= ~NVMM_PROT_WRITE;
322 if (pte & PTE_NX)
323 *prot &= ~NVMM_PROT_EXEC;
324 if (pte & PTE_PS)
325 return -1;
326
327 /* Parse L3. */
328 L3gpa = (pte & PTE_FRAME);
329 if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva, &pageprot) == -1)
330 return -1;
331 pdir = (pte_64bit_t *)L3hva;
332 pte = pdir[pte64_l3idx(gva)];
333 if ((pte & PTE_P) == 0)
334 return -1;
335 if ((pte & PTE_U) == 0)
336 *prot &= ~NVMM_PROT_USER;
337 if ((pte & PTE_W) == 0)
338 *prot &= ~NVMM_PROT_WRITE;
339 if (pte & PTE_NX)
340 *prot &= ~NVMM_PROT_EXEC;
341 if (pte & PTE_PS) {
342 *gpa = (pte & PTE64_L3_FRAME);
343 *gpa = *gpa + (gva & (PTE64_L2_MASK|PTE64_L1_MASK));
344 return 0;
345 }
346
347 /* Parse L2. */
348 L2gpa = (pte & PTE_FRAME);
349 if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1)
350 return -1;
351 pdir = (pte_64bit_t *)L2hva;
352 pte = pdir[pte64_l2idx(gva)];
353 if ((pte & PTE_P) == 0)
354 return -1;
355 if ((pte & PTE_U) == 0)
356 *prot &= ~NVMM_PROT_USER;
357 if ((pte & PTE_W) == 0)
358 *prot &= ~NVMM_PROT_WRITE;
359 if (pte & PTE_NX)
360 *prot &= ~NVMM_PROT_EXEC;
361 if (pte & PTE_PS) {
362 *gpa = (pte & PTE64_L2_FRAME);
363 *gpa = *gpa + (gva & PTE64_L1_MASK);
364 return 0;
365 }
366
367 /* Parse L1. */
368 L1gpa = (pte & PTE_FRAME);
369 if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1)
370 return -1;
371 pdir = (pte_64bit_t *)L1hva;
372 pte = pdir[pte64_l1idx(gva)];
373 if ((pte & PTE_P) == 0)
374 return -1;
375 if ((pte & PTE_U) == 0)
376 *prot &= ~NVMM_PROT_USER;
377 if ((pte & PTE_W) == 0)
378 *prot &= ~NVMM_PROT_WRITE;
379 if (pte & PTE_NX)
380 *prot &= ~NVMM_PROT_EXEC;
381 if (pte & PTE_PS)
382 return -1;
383
384 *gpa = (pte & PTE_FRAME);
385 return 0;
386 }
387
388 static inline int
x86_gva_to_gpa(struct nvmm_machine * mach,struct nvmm_x64_state * state,gvaddr_t gva,gpaddr_t * gpa,nvmm_prot_t * prot)389 x86_gva_to_gpa(struct nvmm_machine *mach, struct nvmm_x64_state *state,
390 gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot)
391 {
392 bool is_pae, is_lng, has_pse;
393 uint64_t cr3;
394 size_t off;
395 int ret;
396
397 if ((state->crs[NVMM_X64_CR_CR0] & CR0_PG) == 0) {
398 /* No paging. */
399 *prot = NVMM_PROT_ALL;
400 *gpa = gva;
401 return 0;
402 }
403
404 off = (gva & PAGE_MASK);
405 gva &= ~PAGE_MASK;
406
407 is_pae = (state->crs[NVMM_X64_CR_CR4] & CR4_PAE) != 0;
408 is_lng = (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) != 0;
409 has_pse = (state->crs[NVMM_X64_CR_CR4] & CR4_PSE) != 0;
410 cr3 = state->crs[NVMM_X64_CR_CR3];
411
412 if (is_pae && is_lng) {
413 /* 64bit */
414 ret = x86_gva_to_gpa_64bit(mach, cr3, gva, gpa, prot);
415 } else if (is_pae && !is_lng) {
416 /* 32bit PAE */
417 ret = x86_gva_to_gpa_32bit_pae(mach, cr3, gva, gpa, prot);
418 } else if (!is_pae && !is_lng) {
419 /* 32bit */
420 ret = x86_gva_to_gpa_32bit(mach, cr3, gva, gpa, has_pse, prot);
421 } else {
422 ret = -1;
423 }
424
425 if (ret == -1) {
426 errno = EFAULT;
427 }
428
429 *gpa = *gpa + off;
430
431 return ret;
432 }
433
434 int
nvmm_gva_to_gpa(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu,gvaddr_t gva,gpaddr_t * gpa,nvmm_prot_t * prot)435 nvmm_gva_to_gpa(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
436 gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot)
437 {
438 struct nvmm_x64_state *state = vcpu->state;
439 int ret;
440
441 ret = nvmm_vcpu_getstate(mach, vcpu,
442 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
443 if (ret == -1)
444 return -1;
445
446 return x86_gva_to_gpa(mach, state, gva, gpa, prot);
447 }
448
449 /* -------------------------------------------------------------------------- */
450
451 #define DISASSEMBLER_BUG() \
452 do { \
453 errno = EINVAL; \
454 return -1; \
455 } while (0);
456
457 static inline bool
is_long_mode(struct nvmm_x64_state * state)458 is_long_mode(struct nvmm_x64_state *state)
459 {
460 return (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) != 0;
461 }
462
463 static inline bool
is_64bit(struct nvmm_x64_state * state)464 is_64bit(struct nvmm_x64_state *state)
465 {
466 return (state->segs[NVMM_X64_SEG_CS].attrib.l != 0);
467 }
468
469 static inline bool
is_32bit(struct nvmm_x64_state * state)470 is_32bit(struct nvmm_x64_state *state)
471 {
472 return (state->segs[NVMM_X64_SEG_CS].attrib.l == 0) &&
473 (state->segs[NVMM_X64_SEG_CS].attrib.def == 1);
474 }
475
476 static inline bool
is_16bit(struct nvmm_x64_state * state)477 is_16bit(struct nvmm_x64_state *state)
478 {
479 return (state->segs[NVMM_X64_SEG_CS].attrib.l == 0) &&
480 (state->segs[NVMM_X64_SEG_CS].attrib.def == 0);
481 }
482
483 static int
segment_check(struct nvmm_x64_state_seg * seg,gvaddr_t gva,size_t size)484 segment_check(struct nvmm_x64_state_seg *seg, gvaddr_t gva, size_t size)
485 {
486 uint64_t limit;
487
488 /*
489 * This is incomplete. We should check topdown, etc, really that's
490 * tiring.
491 */
492 if (__predict_false(!seg->attrib.p)) {
493 goto error;
494 }
495
496 limit = (uint64_t)seg->limit + 1;
497 if (__predict_true(seg->attrib.g)) {
498 limit *= PAGE_SIZE;
499 }
500
501 if (__predict_false(gva + size > limit)) {
502 goto error;
503 }
504
505 return 0;
506
507 error:
508 errno = EFAULT;
509 return -1;
510 }
511
512 static inline void
segment_apply(struct nvmm_x64_state_seg * seg,gvaddr_t * gva)513 segment_apply(struct nvmm_x64_state_seg *seg, gvaddr_t *gva)
514 {
515 *gva += seg->base;
516 }
517
518 static inline uint64_t
size_to_mask(size_t size)519 size_to_mask(size_t size)
520 {
521 switch (size) {
522 case 1:
523 return 0x00000000000000FF;
524 case 2:
525 return 0x000000000000FFFF;
526 case 4:
527 return 0x00000000FFFFFFFF;
528 case 8:
529 default:
530 return 0xFFFFFFFFFFFFFFFF;
531 }
532 }
533
534 static uint64_t
rep_get_cnt(struct nvmm_x64_state * state,size_t adsize)535 rep_get_cnt(struct nvmm_x64_state *state, size_t adsize)
536 {
537 uint64_t mask, cnt;
538
539 mask = size_to_mask(adsize);
540 cnt = state->gprs[NVMM_X64_GPR_RCX] & mask;
541
542 return cnt;
543 }
544
545 static void
rep_set_cnt(struct nvmm_x64_state * state,size_t adsize,uint64_t cnt)546 rep_set_cnt(struct nvmm_x64_state *state, size_t adsize, uint64_t cnt)
547 {
548 uint64_t mask;
549
550 /* XXX: should we zero-extend? */
551 mask = size_to_mask(adsize);
552 state->gprs[NVMM_X64_GPR_RCX] &= ~mask;
553 state->gprs[NVMM_X64_GPR_RCX] |= cnt;
554 }
555
556 static int
read_guest_memory(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu,gvaddr_t gva,uint8_t * data,size_t size)557 read_guest_memory(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
558 gvaddr_t gva, uint8_t *data, size_t size)
559 {
560 struct nvmm_x64_state *state = vcpu->state;
561 struct nvmm_mem mem;
562 nvmm_prot_t prot;
563 gpaddr_t gpa;
564 uintptr_t hva;
565 bool is_mmio;
566 int ret, remain;
567
568 ret = x86_gva_to_gpa(mach, state, gva, &gpa, &prot);
569 if (__predict_false(ret == -1)) {
570 return -1;
571 }
572 if (__predict_false(!(prot & NVMM_PROT_READ))) {
573 errno = EFAULT;
574 return -1;
575 }
576
577 if ((gva & PAGE_MASK) + size > PAGE_SIZE) {
578 remain = ((gva & PAGE_MASK) + size - PAGE_SIZE);
579 } else {
580 remain = 0;
581 }
582 size -= remain;
583
584 ret = nvmm_gpa_to_hva(mach, gpa, &hva, &prot);
585 is_mmio = (ret == -1);
586
587 if (is_mmio) {
588 mem.mach = mach;
589 mem.vcpu = vcpu;
590 mem.data = data;
591 mem.gpa = gpa;
592 mem.write = false;
593 mem.size = size;
594 (*vcpu->cbs.mem)(&mem);
595 } else {
596 if (__predict_false(!(prot & NVMM_PROT_READ))) {
597 errno = EFAULT;
598 return -1;
599 }
600 memcpy(data, (uint8_t *)hva, size);
601 }
602
603 if (remain > 0) {
604 ret = read_guest_memory(mach, vcpu, gva + size,
605 data + size, remain);
606 } else {
607 ret = 0;
608 }
609
610 return ret;
611 }
612
613 static int
write_guest_memory(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu,gvaddr_t gva,uint8_t * data,size_t size)614 write_guest_memory(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
615 gvaddr_t gva, uint8_t *data, size_t size)
616 {
617 struct nvmm_x64_state *state = vcpu->state;
618 struct nvmm_mem mem;
619 nvmm_prot_t prot;
620 gpaddr_t gpa;
621 uintptr_t hva;
622 bool is_mmio;
623 int ret, remain;
624
625 ret = x86_gva_to_gpa(mach, state, gva, &gpa, &prot);
626 if (__predict_false(ret == -1)) {
627 return -1;
628 }
629 if (__predict_false(!(prot & NVMM_PROT_WRITE))) {
630 errno = EFAULT;
631 return -1;
632 }
633
634 if ((gva & PAGE_MASK) + size > PAGE_SIZE) {
635 remain = ((gva & PAGE_MASK) + size - PAGE_SIZE);
636 } else {
637 remain = 0;
638 }
639 size -= remain;
640
641 ret = nvmm_gpa_to_hva(mach, gpa, &hva, &prot);
642 is_mmio = (ret == -1);
643
644 if (is_mmio) {
645 mem.mach = mach;
646 mem.vcpu = vcpu;
647 mem.data = data;
648 mem.gpa = gpa;
649 mem.write = true;
650 mem.size = size;
651 (*vcpu->cbs.mem)(&mem);
652 } else {
653 if (__predict_false(!(prot & NVMM_PROT_WRITE))) {
654 errno = EFAULT;
655 return -1;
656 }
657 memcpy((uint8_t *)hva, data, size);
658 }
659
660 if (remain > 0) {
661 ret = write_guest_memory(mach, vcpu, gva + size,
662 data + size, remain);
663 } else {
664 ret = 0;
665 }
666
667 return ret;
668 }
669
670 /* -------------------------------------------------------------------------- */
671
672 static int fetch_segment(struct nvmm_machine *, struct nvmm_vcpu *);
673
674 #define NVMM_IO_BATCH_SIZE 32
675
676 static int
assist_io_batch(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu,struct nvmm_io * io,gvaddr_t gva,uint64_t cnt)677 assist_io_batch(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
678 struct nvmm_io *io, gvaddr_t gva, uint64_t cnt)
679 {
680 uint8_t iobuf[NVMM_IO_BATCH_SIZE];
681 size_t i, iosize, iocnt;
682 int ret;
683
684 cnt = MIN(cnt, NVMM_IO_BATCH_SIZE);
685 iosize = MIN(io->size * cnt, NVMM_IO_BATCH_SIZE);
686 iocnt = iosize / io->size;
687
688 io->data = iobuf;
689
690 if (!io->in) {
691 ret = read_guest_memory(mach, vcpu, gva, iobuf, iosize);
692 if (ret == -1)
693 return -1;
694 }
695
696 for (i = 0; i < iocnt; i++) {
697 (*vcpu->cbs.io)(io);
698 io->data += io->size;
699 }
700
701 if (io->in) {
702 ret = write_guest_memory(mach, vcpu, gva, iobuf, iosize);
703 if (ret == -1)
704 return -1;
705 }
706
707 return iocnt;
708 }
709
710 int
nvmm_assist_io(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu)711 nvmm_assist_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
712 {
713 struct nvmm_x64_state *state = vcpu->state;
714 struct nvmm_vcpu_exit *exit = vcpu->exit;
715 struct nvmm_io io;
716 uint64_t cnt = 0; /* GCC */
717 uint8_t iobuf[8];
718 int iocnt = 1;
719 gvaddr_t gva = 0; /* GCC */
720 int reg = 0; /* GCC */
721 int ret, seg;
722 bool psld = false;
723
724 if (__predict_false(exit->reason != NVMM_VCPU_EXIT_IO)) {
725 errno = EINVAL;
726 return -1;
727 }
728
729 io.mach = mach;
730 io.vcpu = vcpu;
731 io.port = exit->u.io.port;
732 io.in = exit->u.io.in;
733 io.size = exit->u.io.operand_size;
734 io.data = iobuf;
735
736 ret = nvmm_vcpu_getstate(mach, vcpu,
737 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
738 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
739 if (ret == -1)
740 return -1;
741
742 if (exit->u.io.rep) {
743 cnt = rep_get_cnt(state, exit->u.io.address_size);
744 if (__predict_false(cnt == 0)) {
745 state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
746 goto out;
747 }
748 }
749
750 if (__predict_false(state->gprs[NVMM_X64_GPR_RFLAGS] & PSL_D)) {
751 psld = true;
752 }
753
754 /*
755 * Determine GVA.
756 */
757 if (exit->u.io.str) {
758 if (io.in) {
759 reg = NVMM_X64_GPR_RDI;
760 } else {
761 reg = NVMM_X64_GPR_RSI;
762 }
763
764 gva = state->gprs[reg];
765 gva &= size_to_mask(exit->u.io.address_size);
766
767 if (exit->u.io.seg != -1) {
768 seg = exit->u.io.seg;
769 } else {
770 if (io.in) {
771 seg = NVMM_X64_SEG_ES;
772 } else {
773 seg = fetch_segment(mach, vcpu);
774 if (seg == -1)
775 return -1;
776 }
777 }
778
779 if (__predict_true(is_long_mode(state))) {
780 if (seg == NVMM_X64_SEG_GS || seg == NVMM_X64_SEG_FS) {
781 segment_apply(&state->segs[seg], &gva);
782 }
783 } else {
784 ret = segment_check(&state->segs[seg], gva, io.size);
785 if (ret == -1)
786 return -1;
787 segment_apply(&state->segs[seg], &gva);
788 }
789
790 if (exit->u.io.rep && !psld) {
791 iocnt = assist_io_batch(mach, vcpu, &io, gva, cnt);
792 if (iocnt == -1)
793 return -1;
794 goto done;
795 }
796 }
797
798 if (!io.in) {
799 if (!exit->u.io.str) {
800 memcpy(io.data, &state->gprs[NVMM_X64_GPR_RAX], io.size);
801 } else {
802 ret = read_guest_memory(mach, vcpu, gva, io.data,
803 io.size);
804 if (ret == -1)
805 return -1;
806 }
807 }
808
809 (*vcpu->cbs.io)(&io);
810
811 if (io.in) {
812 if (!exit->u.io.str) {
813 memcpy(&state->gprs[NVMM_X64_GPR_RAX], io.data, io.size);
814 if (io.size == 4) {
815 /* Zero-extend to 64 bits. */
816 state->gprs[NVMM_X64_GPR_RAX] &= size_to_mask(4);
817 }
818 } else {
819 ret = write_guest_memory(mach, vcpu, gva, io.data,
820 io.size);
821 if (ret == -1)
822 return -1;
823 }
824 }
825
826 done:
827 if (exit->u.io.str) {
828 if (__predict_false(psld)) {
829 state->gprs[reg] -= iocnt * io.size;
830 } else {
831 state->gprs[reg] += iocnt * io.size;
832 }
833 }
834
835 if (exit->u.io.rep) {
836 cnt -= iocnt;
837 rep_set_cnt(state, exit->u.io.address_size, cnt);
838 if (cnt == 0) {
839 state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
840 }
841 } else {
842 state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
843 }
844
845 out:
846 ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
847 if (ret == -1)
848 return -1;
849
850 return 0;
851 }
852
853 /* -------------------------------------------------------------------------- */
854
855 struct x86_emul {
856 bool readreg;
857 bool backprop;
858 bool notouch;
859 void (*func)(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
860 };
861
862 static void x86_func_or(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
863 static void x86_func_and(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
864 static void x86_func_xchg(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
865 static void x86_func_sub(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
866 static void x86_func_xor(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
867 static void x86_func_cmp(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
868 static void x86_func_test(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
869 static void x86_func_mov(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
870 static void x86_func_stos(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
871 static void x86_func_lods(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
872
873 static const struct x86_emul x86_emul_or = {
874 .readreg = true,
875 .func = x86_func_or
876 };
877
878 static const struct x86_emul x86_emul_and = {
879 .readreg = true,
880 .func = x86_func_and
881 };
882
883 static const struct x86_emul x86_emul_xchg = {
884 .readreg = true,
885 .backprop = true,
886 .func = x86_func_xchg
887 };
888
889 static const struct x86_emul x86_emul_sub = {
890 .readreg = true,
891 .func = x86_func_sub
892 };
893
894 static const struct x86_emul x86_emul_xor = {
895 .readreg = true,
896 .func = x86_func_xor
897 };
898
899 static const struct x86_emul x86_emul_cmp = {
900 .notouch = true,
901 .func = x86_func_cmp
902 };
903
904 static const struct x86_emul x86_emul_test = {
905 .notouch = true,
906 .func = x86_func_test
907 };
908
909 static const struct x86_emul x86_emul_mov = {
910 .func = x86_func_mov
911 };
912
913 static const struct x86_emul x86_emul_stos = {
914 .func = x86_func_stos
915 };
916
917 static const struct x86_emul x86_emul_lods = {
918 .func = x86_func_lods
919 };
920
921 /* Legacy prefixes. */
922 #define LEG_LOCK 0xF0
923 #define LEG_REPN 0xF2
924 #define LEG_REP 0xF3
925 #define LEG_OVR_CS 0x2E
926 #define LEG_OVR_SS 0x36
927 #define LEG_OVR_DS 0x3E
928 #define LEG_OVR_ES 0x26
929 #define LEG_OVR_FS 0x64
930 #define LEG_OVR_GS 0x65
931 #define LEG_OPR_OVR 0x66
932 #define LEG_ADR_OVR 0x67
933
934 struct x86_legpref {
935 bool opr_ovr:1;
936 bool adr_ovr:1;
937 bool rep:1;
938 bool repn:1;
939 bool repe:1;
940 int8_t seg;
941 };
942
943 struct x86_rexpref {
944 bool b:1;
945 bool x:1;
946 bool r:1;
947 bool w:1;
948 bool present:1;
949 };
950
951 struct x86_reg {
952 int num; /* NVMM GPR state index */
953 uint64_t mask;
954 };
955
956 struct x86_dualreg {
957 int reg1;
958 int reg2;
959 };
960
961 enum x86_disp_type {
962 DISP_NONE,
963 DISP_0,
964 DISP_1,
965 DISP_2,
966 DISP_4
967 };
968
969 struct x86_disp {
970 enum x86_disp_type type;
971 uint64_t data; /* 4 bytes, but can be sign-extended */
972 };
973
974 struct x86_regmodrm {
975 uint8_t mod:2;
976 uint8_t reg:3;
977 uint8_t rm:3;
978 };
979
980 struct x86_immediate {
981 uint64_t data;
982 };
983
984 struct x86_sib {
985 uint8_t scale;
986 const struct x86_reg *idx;
987 const struct x86_reg *bas;
988 };
989
990 enum x86_store_type {
991 STORE_NONE,
992 STORE_REG,
993 STORE_DUALREG,
994 STORE_IMM,
995 STORE_SIB,
996 STORE_DMO
997 };
998
999 struct x86_store {
1000 enum x86_store_type type;
1001 union {
1002 const struct x86_reg *reg;
1003 struct x86_dualreg dualreg;
1004 struct x86_immediate imm;
1005 struct x86_sib sib;
1006 uint64_t dmo;
1007 } u;
1008 struct x86_disp disp;
1009 int hardseg;
1010 };
1011
1012 struct x86_instr {
1013 uint8_t len;
1014 struct x86_legpref legpref;
1015 struct x86_rexpref rexpref;
1016 struct x86_regmodrm regmodrm;
1017 uint8_t operand_size;
1018 uint8_t address_size;
1019 uint64_t zeroextend_mask;
1020
1021 const struct x86_opcode *opcode;
1022 const struct x86_emul *emul;
1023
1024 struct x86_store src;
1025 struct x86_store dst;
1026 struct x86_store *strm;
1027 };
1028
1029 struct x86_decode_fsm {
1030 /* vcpu */
1031 bool is64bit;
1032 bool is32bit;
1033 bool is16bit;
1034
1035 /* fsm */
1036 int (*fn)(struct x86_decode_fsm *, struct x86_instr *);
1037 uint8_t *buf;
1038 uint8_t *end;
1039 };
1040
1041 struct x86_opcode {
1042 bool valid:1;
1043 bool regmodrm:1;
1044 bool regtorm:1;
1045 bool dmo:1;
1046 bool todmo:1;
1047 bool movs:1;
1048 bool cmps:1;
1049 bool stos:1;
1050 bool lods:1;
1051 bool szoverride:1;
1052 bool group1:1;
1053 bool group3:1;
1054 bool group11:1;
1055 bool immediate:1;
1056 uint8_t defsize;
1057 uint8_t flags;
1058 const struct x86_emul *emul;
1059 };
1060
1061 struct x86_group_entry {
1062 const struct x86_emul *emul;
1063 };
1064
1065 #define OPSIZE_BYTE 0x01
1066 #define OPSIZE_WORD 0x02 /* 2 bytes */
1067 #define OPSIZE_DOUB 0x04 /* 4 bytes */
1068 #define OPSIZE_QUAD 0x08 /* 8 bytes */
1069
1070 #define FLAG_imm8 0x01
1071 #define FLAG_immz 0x02
1072 #define FLAG_ze 0x04
1073
1074 static const struct x86_group_entry group1[8] __cacheline_aligned = {
1075 [1] = { .emul = &x86_emul_or },
1076 [4] = { .emul = &x86_emul_and },
1077 [6] = { .emul = &x86_emul_xor },
1078 [7] = { .emul = &x86_emul_cmp }
1079 };
1080
1081 static const struct x86_group_entry group3[8] __cacheline_aligned = {
1082 [0] = { .emul = &x86_emul_test },
1083 [1] = { .emul = &x86_emul_test }
1084 };
1085
1086 static const struct x86_group_entry group11[8] __cacheline_aligned = {
1087 [0] = { .emul = &x86_emul_mov }
1088 };
1089
1090 static const struct x86_opcode primary_opcode_table[256] __cacheline_aligned = {
1091 /*
1092 * Group1
1093 */
1094 [0x80] = {
1095 /* Eb, Ib */
1096 .valid = true,
1097 .regmodrm = true,
1098 .regtorm = true,
1099 .szoverride = false,
1100 .defsize = OPSIZE_BYTE,
1101 .group1 = true,
1102 .immediate = true,
1103 .emul = NULL /* group1 */
1104 },
1105 [0x81] = {
1106 /* Ev, Iz */
1107 .valid = true,
1108 .regmodrm = true,
1109 .regtorm = true,
1110 .szoverride = true,
1111 .defsize = -1,
1112 .group1 = true,
1113 .immediate = true,
1114 .flags = FLAG_immz,
1115 .emul = NULL /* group1 */
1116 },
1117 [0x83] = {
1118 /* Ev, Ib */
1119 .valid = true,
1120 .regmodrm = true,
1121 .regtorm = true,
1122 .szoverride = true,
1123 .defsize = -1,
1124 .group1 = true,
1125 .immediate = true,
1126 .flags = FLAG_imm8,
1127 .emul = NULL /* group1 */
1128 },
1129
1130 /*
1131 * Group3
1132 */
1133 [0xF6] = {
1134 /* Eb, Ib */
1135 .valid = true,
1136 .regmodrm = true,
1137 .regtorm = true,
1138 .szoverride = false,
1139 .defsize = OPSIZE_BYTE,
1140 .group3 = true,
1141 .immediate = true,
1142 .emul = NULL /* group3 */
1143 },
1144 [0xF7] = {
1145 /* Ev, Iz */
1146 .valid = true,
1147 .regmodrm = true,
1148 .regtorm = true,
1149 .szoverride = true,
1150 .defsize = -1,
1151 .group3 = true,
1152 .immediate = true,
1153 .flags = FLAG_immz,
1154 .emul = NULL /* group3 */
1155 },
1156
1157 /*
1158 * Group11
1159 */
1160 [0xC6] = {
1161 /* Eb, Ib */
1162 .valid = true,
1163 .regmodrm = true,
1164 .regtorm = true,
1165 .szoverride = false,
1166 .defsize = OPSIZE_BYTE,
1167 .group11 = true,
1168 .immediate = true,
1169 .emul = NULL /* group11 */
1170 },
1171 [0xC7] = {
1172 /* Ev, Iz */
1173 .valid = true,
1174 .regmodrm = true,
1175 .regtorm = true,
1176 .szoverride = true,
1177 .defsize = -1,
1178 .group11 = true,
1179 .immediate = true,
1180 .flags = FLAG_immz,
1181 .emul = NULL /* group11 */
1182 },
1183
1184 /*
1185 * OR
1186 */
1187 [0x08] = {
1188 /* Eb, Gb */
1189 .valid = true,
1190 .regmodrm = true,
1191 .regtorm = true,
1192 .szoverride = false,
1193 .defsize = OPSIZE_BYTE,
1194 .emul = &x86_emul_or
1195 },
1196 [0x09] = {
1197 /* Ev, Gv */
1198 .valid = true,
1199 .regmodrm = true,
1200 .regtorm = true,
1201 .szoverride = true,
1202 .defsize = -1,
1203 .emul = &x86_emul_or
1204 },
1205 [0x0A] = {
1206 /* Gb, Eb */
1207 .valid = true,
1208 .regmodrm = true,
1209 .regtorm = false,
1210 .szoverride = false,
1211 .defsize = OPSIZE_BYTE,
1212 .emul = &x86_emul_or
1213 },
1214 [0x0B] = {
1215 /* Gv, Ev */
1216 .valid = true,
1217 .regmodrm = true,
1218 .regtorm = false,
1219 .szoverride = true,
1220 .defsize = -1,
1221 .emul = &x86_emul_or
1222 },
1223
1224 /*
1225 * AND
1226 */
1227 [0x20] = {
1228 /* Eb, Gb */
1229 .valid = true,
1230 .regmodrm = true,
1231 .regtorm = true,
1232 .szoverride = false,
1233 .defsize = OPSIZE_BYTE,
1234 .emul = &x86_emul_and
1235 },
1236 [0x21] = {
1237 /* Ev, Gv */
1238 .valid = true,
1239 .regmodrm = true,
1240 .regtorm = true,
1241 .szoverride = true,
1242 .defsize = -1,
1243 .emul = &x86_emul_and
1244 },
1245 [0x22] = {
1246 /* Gb, Eb */
1247 .valid = true,
1248 .regmodrm = true,
1249 .regtorm = false,
1250 .szoverride = false,
1251 .defsize = OPSIZE_BYTE,
1252 .emul = &x86_emul_and
1253 },
1254 [0x23] = {
1255 /* Gv, Ev */
1256 .valid = true,
1257 .regmodrm = true,
1258 .regtorm = false,
1259 .szoverride = true,
1260 .defsize = -1,
1261 .emul = &x86_emul_and
1262 },
1263
1264 /*
1265 * SUB
1266 */
1267 [0x28] = {
1268 /* Eb, Gb */
1269 .valid = true,
1270 .regmodrm = true,
1271 .regtorm = true,
1272 .szoverride = false,
1273 .defsize = OPSIZE_BYTE,
1274 .emul = &x86_emul_sub
1275 },
1276 [0x29] = {
1277 /* Ev, Gv */
1278 .valid = true,
1279 .regmodrm = true,
1280 .regtorm = true,
1281 .szoverride = true,
1282 .defsize = -1,
1283 .emul = &x86_emul_sub
1284 },
1285 [0x2A] = {
1286 /* Gb, Eb */
1287 .valid = true,
1288 .regmodrm = true,
1289 .regtorm = false,
1290 .szoverride = false,
1291 .defsize = OPSIZE_BYTE,
1292 .emul = &x86_emul_sub
1293 },
1294 [0x2B] = {
1295 /* Gv, Ev */
1296 .valid = true,
1297 .regmodrm = true,
1298 .regtorm = false,
1299 .szoverride = true,
1300 .defsize = -1,
1301 .emul = &x86_emul_sub
1302 },
1303
1304 /*
1305 * XOR
1306 */
1307 [0x30] = {
1308 /* Eb, Gb */
1309 .valid = true,
1310 .regmodrm = true,
1311 .regtorm = true,
1312 .szoverride = false,
1313 .defsize = OPSIZE_BYTE,
1314 .emul = &x86_emul_xor
1315 },
1316 [0x31] = {
1317 /* Ev, Gv */
1318 .valid = true,
1319 .regmodrm = true,
1320 .regtorm = true,
1321 .szoverride = true,
1322 .defsize = -1,
1323 .emul = &x86_emul_xor
1324 },
1325 [0x32] = {
1326 /* Gb, Eb */
1327 .valid = true,
1328 .regmodrm = true,
1329 .regtorm = false,
1330 .szoverride = false,
1331 .defsize = OPSIZE_BYTE,
1332 .emul = &x86_emul_xor
1333 },
1334 [0x33] = {
1335 /* Gv, Ev */
1336 .valid = true,
1337 .regmodrm = true,
1338 .regtorm = false,
1339 .szoverride = true,
1340 .defsize = -1,
1341 .emul = &x86_emul_xor
1342 },
1343
1344 /*
1345 * XCHG
1346 */
1347 [0x86] = {
1348 /* Eb, Gb */
1349 .valid = true,
1350 .regmodrm = true,
1351 .regtorm = true,
1352 .szoverride = false,
1353 .defsize = OPSIZE_BYTE,
1354 .emul = &x86_emul_xchg
1355 },
1356 [0x87] = {
1357 /* Ev, Gv */
1358 .valid = true,
1359 .regmodrm = true,
1360 .regtorm = true,
1361 .szoverride = true,
1362 .defsize = -1,
1363 .emul = &x86_emul_xchg
1364 },
1365
1366 /*
1367 * MOV
1368 */
1369 [0x88] = {
1370 /* Eb, Gb */
1371 .valid = true,
1372 .regmodrm = true,
1373 .regtorm = true,
1374 .szoverride = false,
1375 .defsize = OPSIZE_BYTE,
1376 .emul = &x86_emul_mov
1377 },
1378 [0x89] = {
1379 /* Ev, Gv */
1380 .valid = true,
1381 .regmodrm = true,
1382 .regtorm = true,
1383 .szoverride = true,
1384 .defsize = -1,
1385 .emul = &x86_emul_mov
1386 },
1387 [0x8A] = {
1388 /* Gb, Eb */
1389 .valid = true,
1390 .regmodrm = true,
1391 .regtorm = false,
1392 .szoverride = false,
1393 .defsize = OPSIZE_BYTE,
1394 .emul = &x86_emul_mov
1395 },
1396 [0x8B] = {
1397 /* Gv, Ev */
1398 .valid = true,
1399 .regmodrm = true,
1400 .regtorm = false,
1401 .szoverride = true,
1402 .defsize = -1,
1403 .emul = &x86_emul_mov
1404 },
1405 [0xA0] = {
1406 /* AL, Ob */
1407 .valid = true,
1408 .dmo = true,
1409 .todmo = false,
1410 .szoverride = false,
1411 .defsize = OPSIZE_BYTE,
1412 .emul = &x86_emul_mov
1413 },
1414 [0xA1] = {
1415 /* rAX, Ov */
1416 .valid = true,
1417 .dmo = true,
1418 .todmo = false,
1419 .szoverride = true,
1420 .defsize = -1,
1421 .emul = &x86_emul_mov
1422 },
1423 [0xA2] = {
1424 /* Ob, AL */
1425 .valid = true,
1426 .dmo = true,
1427 .todmo = true,
1428 .szoverride = false,
1429 .defsize = OPSIZE_BYTE,
1430 .emul = &x86_emul_mov
1431 },
1432 [0xA3] = {
1433 /* Ov, rAX */
1434 .valid = true,
1435 .dmo = true,
1436 .todmo = true,
1437 .szoverride = true,
1438 .defsize = -1,
1439 .emul = &x86_emul_mov
1440 },
1441
1442 /*
1443 * MOVS
1444 */
1445 [0xA4] = {
1446 /* Yb, Xb */
1447 .valid = true,
1448 .movs = true,
1449 .szoverride = false,
1450 .defsize = OPSIZE_BYTE,
1451 .emul = NULL
1452 },
1453 [0xA5] = {
1454 /* Yv, Xv */
1455 .valid = true,
1456 .movs = true,
1457 .szoverride = true,
1458 .defsize = -1,
1459 .emul = NULL
1460 },
1461
1462 /*
1463 * CMPS
1464 */
1465 [0xA6] = {
1466 /* Yb, Xb */
1467 .valid = true,
1468 .cmps = true,
1469 .szoverride = false,
1470 .defsize = OPSIZE_BYTE,
1471 .emul = NULL
1472 },
1473 [0xA7] = {
1474 /* Yv, Xv */
1475 .valid = true,
1476 .cmps = true,
1477 .szoverride = true,
1478 .defsize = -1,
1479 .emul = NULL
1480 },
1481
1482 /*
1483 * STOS
1484 */
1485 [0xAA] = {
1486 /* Yb, AL */
1487 .valid = true,
1488 .stos = true,
1489 .szoverride = false,
1490 .defsize = OPSIZE_BYTE,
1491 .emul = &x86_emul_stos
1492 },
1493 [0xAB] = {
1494 /* Yv, rAX */
1495 .valid = true,
1496 .stos = true,
1497 .szoverride = true,
1498 .defsize = -1,
1499 .emul = &x86_emul_stos
1500 },
1501
1502 /*
1503 * LODS
1504 */
1505 [0xAC] = {
1506 /* AL, Xb */
1507 .valid = true,
1508 .lods = true,
1509 .szoverride = false,
1510 .defsize = OPSIZE_BYTE,
1511 .emul = &x86_emul_lods
1512 },
1513 [0xAD] = {
1514 /* rAX, Xv */
1515 .valid = true,
1516 .lods = true,
1517 .szoverride = true,
1518 .defsize = -1,
1519 .emul = &x86_emul_lods
1520 },
1521 };
1522
1523 static const struct x86_opcode secondary_opcode_table[256] __cacheline_aligned = {
1524 /*
1525 * MOVZX
1526 */
1527 [0xB6] = {
1528 /* Gv, Eb */
1529 .valid = true,
1530 .regmodrm = true,
1531 .regtorm = false,
1532 .szoverride = true,
1533 .defsize = OPSIZE_BYTE,
1534 .flags = FLAG_ze,
1535 .emul = &x86_emul_mov
1536 },
1537 [0xB7] = {
1538 /* Gv, Ew */
1539 .valid = true,
1540 .regmodrm = true,
1541 .regtorm = false,
1542 .szoverride = true,
1543 .defsize = OPSIZE_WORD,
1544 .flags = FLAG_ze,
1545 .emul = &x86_emul_mov
1546 },
1547 };
1548
1549 static const struct x86_reg gpr_map__rip = { NVMM_X64_GPR_RIP, 0xFFFFFFFFFFFFFFFF };
1550
1551 /* [REX-present][enc][opsize] */
1552 static const struct x86_reg gpr_map__special[2][4][8] __cacheline_aligned = {
1553 [false] = {
1554 /* No REX prefix. */
1555 [0b00] = {
1556 [0] = { NVMM_X64_GPR_RAX, 0x000000000000FF00 }, /* AH */
1557 [1] = { NVMM_X64_GPR_RSP, 0x000000000000FFFF }, /* SP */
1558 [2] = { -1, 0 },
1559 [3] = { NVMM_X64_GPR_RSP, 0x00000000FFFFFFFF }, /* ESP */
1560 [4] = { -1, 0 },
1561 [5] = { -1, 0 },
1562 [6] = { -1, 0 },
1563 [7] = { -1, 0 },
1564 },
1565 [0b01] = {
1566 [0] = { NVMM_X64_GPR_RCX, 0x000000000000FF00 }, /* CH */
1567 [1] = { NVMM_X64_GPR_RBP, 0x000000000000FFFF }, /* BP */
1568 [2] = { -1, 0 },
1569 [3] = { NVMM_X64_GPR_RBP, 0x00000000FFFFFFFF }, /* EBP */
1570 [4] = { -1, 0 },
1571 [5] = { -1, 0 },
1572 [6] = { -1, 0 },
1573 [7] = { -1, 0 },
1574 },
1575 [0b10] = {
1576 [0] = { NVMM_X64_GPR_RDX, 0x000000000000FF00 }, /* DH */
1577 [1] = { NVMM_X64_GPR_RSI, 0x000000000000FFFF }, /* SI */
1578 [2] = { -1, 0 },
1579 [3] = { NVMM_X64_GPR_RSI, 0x00000000FFFFFFFF }, /* ESI */
1580 [4] = { -1, 0 },
1581 [5] = { -1, 0 },
1582 [6] = { -1, 0 },
1583 [7] = { -1, 0 },
1584 },
1585 [0b11] = {
1586 [0] = { NVMM_X64_GPR_RBX, 0x000000000000FF00 }, /* BH */
1587 [1] = { NVMM_X64_GPR_RDI, 0x000000000000FFFF }, /* DI */
1588 [2] = { -1, 0 },
1589 [3] = { NVMM_X64_GPR_RDI, 0x00000000FFFFFFFF }, /* EDI */
1590 [4] = { -1, 0 },
1591 [5] = { -1, 0 },
1592 [6] = { -1, 0 },
1593 [7] = { -1, 0 },
1594 }
1595 },
1596 [true] = {
1597 /* Has REX prefix. */
1598 [0b00] = {
1599 [0] = { NVMM_X64_GPR_RSP, 0x00000000000000FF }, /* SPL */
1600 [1] = { NVMM_X64_GPR_RSP, 0x000000000000FFFF }, /* SP */
1601 [2] = { -1, 0 },
1602 [3] = { NVMM_X64_GPR_RSP, 0x00000000FFFFFFFF }, /* ESP */
1603 [4] = { -1, 0 },
1604 [5] = { -1, 0 },
1605 [6] = { -1, 0 },
1606 [7] = { NVMM_X64_GPR_RSP, 0xFFFFFFFFFFFFFFFF }, /* RSP */
1607 },
1608 [0b01] = {
1609 [0] = { NVMM_X64_GPR_RBP, 0x00000000000000FF }, /* BPL */
1610 [1] = { NVMM_X64_GPR_RBP, 0x000000000000FFFF }, /* BP */
1611 [2] = { -1, 0 },
1612 [3] = { NVMM_X64_GPR_RBP, 0x00000000FFFFFFFF }, /* EBP */
1613 [4] = { -1, 0 },
1614 [5] = { -1, 0 },
1615 [6] = { -1, 0 },
1616 [7] = { NVMM_X64_GPR_RBP, 0xFFFFFFFFFFFFFFFF }, /* RBP */
1617 },
1618 [0b10] = {
1619 [0] = { NVMM_X64_GPR_RSI, 0x00000000000000FF }, /* SIL */
1620 [1] = { NVMM_X64_GPR_RSI, 0x000000000000FFFF }, /* SI */
1621 [2] = { -1, 0 },
1622 [3] = { NVMM_X64_GPR_RSI, 0x00000000FFFFFFFF }, /* ESI */
1623 [4] = { -1, 0 },
1624 [5] = { -1, 0 },
1625 [6] = { -1, 0 },
1626 [7] = { NVMM_X64_GPR_RSI, 0xFFFFFFFFFFFFFFFF }, /* RSI */
1627 },
1628 [0b11] = {
1629 [0] = { NVMM_X64_GPR_RDI, 0x00000000000000FF }, /* DIL */
1630 [1] = { NVMM_X64_GPR_RDI, 0x000000000000FFFF }, /* DI */
1631 [2] = { -1, 0 },
1632 [3] = { NVMM_X64_GPR_RDI, 0x00000000FFFFFFFF }, /* EDI */
1633 [4] = { -1, 0 },
1634 [5] = { -1, 0 },
1635 [6] = { -1, 0 },
1636 [7] = { NVMM_X64_GPR_RDI, 0xFFFFFFFFFFFFFFFF }, /* RDI */
1637 }
1638 }
1639 };
1640
1641 /* [depends][enc][size] */
1642 static const struct x86_reg gpr_map[2][8][8] __cacheline_aligned = {
1643 [false] = {
1644 /* Not extended. */
1645 [0b000] = {
1646 [0] = { NVMM_X64_GPR_RAX, 0x00000000000000FF }, /* AL */
1647 [1] = { NVMM_X64_GPR_RAX, 0x000000000000FFFF }, /* AX */
1648 [2] = { -1, 0 },
1649 [3] = { NVMM_X64_GPR_RAX, 0x00000000FFFFFFFF }, /* EAX */
1650 [4] = { -1, 0 },
1651 [5] = { -1, 0 },
1652 [6] = { -1, 0 },
1653 [7] = { NVMM_X64_GPR_RAX, 0xFFFFFFFFFFFFFFFF }, /* RAX */
1654 },
1655 [0b001] = {
1656 [0] = { NVMM_X64_GPR_RCX, 0x00000000000000FF }, /* CL */
1657 [1] = { NVMM_X64_GPR_RCX, 0x000000000000FFFF }, /* CX */
1658 [2] = { -1, 0 },
1659 [3] = { NVMM_X64_GPR_RCX, 0x00000000FFFFFFFF }, /* ECX */
1660 [4] = { -1, 0 },
1661 [5] = { -1, 0 },
1662 [6] = { -1, 0 },
1663 [7] = { NVMM_X64_GPR_RCX, 0xFFFFFFFFFFFFFFFF }, /* RCX */
1664 },
1665 [0b010] = {
1666 [0] = { NVMM_X64_GPR_RDX, 0x00000000000000FF }, /* DL */
1667 [1] = { NVMM_X64_GPR_RDX, 0x000000000000FFFF }, /* DX */
1668 [2] = { -1, 0 },
1669 [3] = { NVMM_X64_GPR_RDX, 0x00000000FFFFFFFF }, /* EDX */
1670 [4] = { -1, 0 },
1671 [5] = { -1, 0 },
1672 [6] = { -1, 0 },
1673 [7] = { NVMM_X64_GPR_RDX, 0xFFFFFFFFFFFFFFFF }, /* RDX */
1674 },
1675 [0b011] = {
1676 [0] = { NVMM_X64_GPR_RBX, 0x00000000000000FF }, /* BL */
1677 [1] = { NVMM_X64_GPR_RBX, 0x000000000000FFFF }, /* BX */
1678 [2] = { -1, 0 },
1679 [3] = { NVMM_X64_GPR_RBX, 0x00000000FFFFFFFF }, /* EBX */
1680 [4] = { -1, 0 },
1681 [5] = { -1, 0 },
1682 [6] = { -1, 0 },
1683 [7] = { NVMM_X64_GPR_RBX, 0xFFFFFFFFFFFFFFFF }, /* RBX */
1684 },
1685 [0b100] = {
1686 [0] = { -1, 0 }, /* SPECIAL */
1687 [1] = { -1, 0 }, /* SPECIAL */
1688 [2] = { -1, 0 },
1689 [3] = { -1, 0 }, /* SPECIAL */
1690 [4] = { -1, 0 },
1691 [5] = { -1, 0 },
1692 [6] = { -1, 0 },
1693 [7] = { -1, 0 }, /* SPECIAL */
1694 },
1695 [0b101] = {
1696 [0] = { -1, 0 }, /* SPECIAL */
1697 [1] = { -1, 0 }, /* SPECIAL */
1698 [2] = { -1, 0 },
1699 [3] = { -1, 0 }, /* SPECIAL */
1700 [4] = { -1, 0 },
1701 [5] = { -1, 0 },
1702 [6] = { -1, 0 },
1703 [7] = { -1, 0 }, /* SPECIAL */
1704 },
1705 [0b110] = {
1706 [0] = { -1, 0 }, /* SPECIAL */
1707 [1] = { -1, 0 }, /* SPECIAL */
1708 [2] = { -1, 0 },
1709 [3] = { -1, 0 }, /* SPECIAL */
1710 [4] = { -1, 0 },
1711 [5] = { -1, 0 },
1712 [6] = { -1, 0 },
1713 [7] = { -1, 0 }, /* SPECIAL */
1714 },
1715 [0b111] = {
1716 [0] = { -1, 0 }, /* SPECIAL */
1717 [1] = { -1, 0 }, /* SPECIAL */
1718 [2] = { -1, 0 },
1719 [3] = { -1, 0 }, /* SPECIAL */
1720 [4] = { -1, 0 },
1721 [5] = { -1, 0 },
1722 [6] = { -1, 0 },
1723 [7] = { -1, 0 }, /* SPECIAL */
1724 },
1725 },
1726 [true] = {
1727 /* Extended. */
1728 [0b000] = {
1729 [0] = { NVMM_X64_GPR_R8, 0x00000000000000FF }, /* R8B */
1730 [1] = { NVMM_X64_GPR_R8, 0x000000000000FFFF }, /* R8W */
1731 [2] = { -1, 0 },
1732 [3] = { NVMM_X64_GPR_R8, 0x00000000FFFFFFFF }, /* R8D */
1733 [4] = { -1, 0 },
1734 [5] = { -1, 0 },
1735 [6] = { -1, 0 },
1736 [7] = { NVMM_X64_GPR_R8, 0xFFFFFFFFFFFFFFFF }, /* R8 */
1737 },
1738 [0b001] = {
1739 [0] = { NVMM_X64_GPR_R9, 0x00000000000000FF }, /* R9B */
1740 [1] = { NVMM_X64_GPR_R9, 0x000000000000FFFF }, /* R9W */
1741 [2] = { -1, 0 },
1742 [3] = { NVMM_X64_GPR_R9, 0x00000000FFFFFFFF }, /* R9D */
1743 [4] = { -1, 0 },
1744 [5] = { -1, 0 },
1745 [6] = { -1, 0 },
1746 [7] = { NVMM_X64_GPR_R9, 0xFFFFFFFFFFFFFFFF }, /* R9 */
1747 },
1748 [0b010] = {
1749 [0] = { NVMM_X64_GPR_R10, 0x00000000000000FF }, /* R10B */
1750 [1] = { NVMM_X64_GPR_R10, 0x000000000000FFFF }, /* R10W */
1751 [2] = { -1, 0 },
1752 [3] = { NVMM_X64_GPR_R10, 0x00000000FFFFFFFF }, /* R10D */
1753 [4] = { -1, 0 },
1754 [5] = { -1, 0 },
1755 [6] = { -1, 0 },
1756 [7] = { NVMM_X64_GPR_R10, 0xFFFFFFFFFFFFFFFF }, /* R10 */
1757 },
1758 [0b011] = {
1759 [0] = { NVMM_X64_GPR_R11, 0x00000000000000FF }, /* R11B */
1760 [1] = { NVMM_X64_GPR_R11, 0x000000000000FFFF }, /* R11W */
1761 [2] = { -1, 0 },
1762 [3] = { NVMM_X64_GPR_R11, 0x00000000FFFFFFFF }, /* R11D */
1763 [4] = { -1, 0 },
1764 [5] = { -1, 0 },
1765 [6] = { -1, 0 },
1766 [7] = { NVMM_X64_GPR_R11, 0xFFFFFFFFFFFFFFFF }, /* R11 */
1767 },
1768 [0b100] = {
1769 [0] = { NVMM_X64_GPR_R12, 0x00000000000000FF }, /* R12B */
1770 [1] = { NVMM_X64_GPR_R12, 0x000000000000FFFF }, /* R12W */
1771 [2] = { -1, 0 },
1772 [3] = { NVMM_X64_GPR_R12, 0x00000000FFFFFFFF }, /* R12D */
1773 [4] = { -1, 0 },
1774 [5] = { -1, 0 },
1775 [6] = { -1, 0 },
1776 [7] = { NVMM_X64_GPR_R12, 0xFFFFFFFFFFFFFFFF }, /* R12 */
1777 },
1778 [0b101] = {
1779 [0] = { NVMM_X64_GPR_R13, 0x00000000000000FF }, /* R13B */
1780 [1] = { NVMM_X64_GPR_R13, 0x000000000000FFFF }, /* R13W */
1781 [2] = { -1, 0 },
1782 [3] = { NVMM_X64_GPR_R13, 0x00000000FFFFFFFF }, /* R13D */
1783 [4] = { -1, 0 },
1784 [5] = { -1, 0 },
1785 [6] = { -1, 0 },
1786 [7] = { NVMM_X64_GPR_R13, 0xFFFFFFFFFFFFFFFF }, /* R13 */
1787 },
1788 [0b110] = {
1789 [0] = { NVMM_X64_GPR_R14, 0x00000000000000FF }, /* R14B */
1790 [1] = { NVMM_X64_GPR_R14, 0x000000000000FFFF }, /* R14W */
1791 [2] = { -1, 0 },
1792 [3] = { NVMM_X64_GPR_R14, 0x00000000FFFFFFFF }, /* R14D */
1793 [4] = { -1, 0 },
1794 [5] = { -1, 0 },
1795 [6] = { -1, 0 },
1796 [7] = { NVMM_X64_GPR_R14, 0xFFFFFFFFFFFFFFFF }, /* R14 */
1797 },
1798 [0b111] = {
1799 [0] = { NVMM_X64_GPR_R15, 0x00000000000000FF }, /* R15B */
1800 [1] = { NVMM_X64_GPR_R15, 0x000000000000FFFF }, /* R15W */
1801 [2] = { -1, 0 },
1802 [3] = { NVMM_X64_GPR_R15, 0x00000000FFFFFFFF }, /* R15D */
1803 [4] = { -1, 0 },
1804 [5] = { -1, 0 },
1805 [6] = { -1, 0 },
1806 [7] = { NVMM_X64_GPR_R15, 0xFFFFFFFFFFFFFFFF }, /* R15 */
1807 },
1808 }
1809 };
1810
1811 /* [enc] */
1812 static const int gpr_dual_reg1_rm[8] __cacheline_aligned = {
1813 [0b000] = NVMM_X64_GPR_RBX, /* BX (+SI) */
1814 [0b001] = NVMM_X64_GPR_RBX, /* BX (+DI) */
1815 [0b010] = NVMM_X64_GPR_RBP, /* BP (+SI) */
1816 [0b011] = NVMM_X64_GPR_RBP, /* BP (+DI) */
1817 [0b100] = NVMM_X64_GPR_RSI, /* SI */
1818 [0b101] = NVMM_X64_GPR_RDI, /* DI */
1819 [0b110] = NVMM_X64_GPR_RBP, /* BP */
1820 [0b111] = NVMM_X64_GPR_RBX, /* BX */
1821 };
1822
1823 static int
node_overflow(struct x86_decode_fsm * fsm,struct x86_instr * instr)1824 node_overflow(struct x86_decode_fsm *fsm, struct x86_instr *instr)
1825 {
1826 fsm->fn = NULL;
1827 return -1;
1828 }
1829
1830 static int
fsm_read(struct x86_decode_fsm * fsm,uint8_t * bytes,size_t n)1831 fsm_read(struct x86_decode_fsm *fsm, uint8_t *bytes, size_t n)
1832 {
1833 if (fsm->buf + n > fsm->end) {
1834 return -1;
1835 }
1836 memcpy(bytes, fsm->buf, n);
1837 return 0;
1838 }
1839
1840 static inline void
fsm_advance(struct x86_decode_fsm * fsm,size_t n,int (* fn)(struct x86_decode_fsm *,struct x86_instr *))1841 fsm_advance(struct x86_decode_fsm *fsm, size_t n,
1842 int (*fn)(struct x86_decode_fsm *, struct x86_instr *))
1843 {
1844 fsm->buf += n;
1845 if (fsm->buf > fsm->end) {
1846 fsm->fn = node_overflow;
1847 } else {
1848 fsm->fn = fn;
1849 }
1850 }
1851
1852 static const struct x86_reg *
resolve_special_register(struct x86_instr * instr,uint8_t enc,size_t regsize)1853 resolve_special_register(struct x86_instr *instr, uint8_t enc, size_t regsize)
1854 {
1855 enc &= 0b11;
1856 if (regsize == 8) {
1857 /* May be 64bit without REX */
1858 return &gpr_map__special[1][enc][regsize-1];
1859 }
1860 return &gpr_map__special[instr->rexpref.present][enc][regsize-1];
1861 }
1862
1863 /*
1864 * Special node, for MOVS. Fake two displacements of zero on the source and
1865 * destination registers.
1866 */
1867 static int
node_movs(struct x86_decode_fsm * fsm,struct x86_instr * instr)1868 node_movs(struct x86_decode_fsm *fsm, struct x86_instr *instr)
1869 {
1870 size_t adrsize;
1871
1872 adrsize = instr->address_size;
1873
1874 /* DS:RSI */
1875 instr->src.type = STORE_REG;
1876 instr->src.u.reg = &gpr_map__special[1][2][adrsize-1];
1877 instr->src.disp.type = DISP_0;
1878
1879 /* ES:RDI, force ES */
1880 instr->dst.type = STORE_REG;
1881 instr->dst.u.reg = &gpr_map__special[1][3][adrsize-1];
1882 instr->dst.disp.type = DISP_0;
1883 instr->dst.hardseg = NVMM_X64_SEG_ES;
1884
1885 fsm_advance(fsm, 0, NULL);
1886
1887 return 0;
1888 }
1889
1890 /*
1891 * Special node, for CMPS. Fake two displacements of zero on the source and
1892 * destination registers.
1893 * XXX coded as clone of movs as its similar in register usage
1894 * XXX might be merged with node_movs()
1895 */
1896 static int
node_cmps(struct x86_decode_fsm * fsm,struct x86_instr * instr)1897 node_cmps(struct x86_decode_fsm *fsm, struct x86_instr *instr)
1898 {
1899 size_t adrsize;
1900
1901 adrsize = instr->address_size;
1902
1903 /* DS:RSI */
1904 instr->src.type = STORE_REG;
1905 instr->src.u.reg = &gpr_map__special[1][2][adrsize-1];
1906 instr->src.disp.type = DISP_0;
1907
1908 /* ES:RDI, force ES */
1909 instr->dst.type = STORE_REG;
1910 instr->dst.u.reg = &gpr_map__special[1][3][adrsize-1];
1911 instr->dst.disp.type = DISP_0;
1912 instr->dst.hardseg = NVMM_X64_SEG_ES;
1913
1914 fsm_advance(fsm, 0, NULL);
1915
1916 return 0;
1917 }
1918
1919 /*
1920 * Special node, for STOS and LODS. Fake a displacement of zero on the
1921 * destination register.
1922 */
1923 static int
node_stlo(struct x86_decode_fsm * fsm,struct x86_instr * instr)1924 node_stlo(struct x86_decode_fsm *fsm, struct x86_instr *instr)
1925 {
1926 const struct x86_opcode *opcode = instr->opcode;
1927 struct x86_store *stlo, *streg;
1928 size_t adrsize, regsize;
1929
1930 adrsize = instr->address_size;
1931 regsize = instr->operand_size;
1932
1933 if (opcode->stos) {
1934 streg = &instr->src;
1935 stlo = &instr->dst;
1936 } else {
1937 streg = &instr->dst;
1938 stlo = &instr->src;
1939 }
1940
1941 streg->type = STORE_REG;
1942 streg->u.reg = &gpr_map[0][0][regsize-1]; /* ?AX */
1943
1944 stlo->type = STORE_REG;
1945 if (opcode->stos) {
1946 /* ES:RDI, force ES */
1947 stlo->u.reg = &gpr_map__special[1][3][adrsize-1];
1948 stlo->hardseg = NVMM_X64_SEG_ES;
1949 } else {
1950 /* DS:RSI */
1951 stlo->u.reg = &gpr_map__special[1][2][adrsize-1];
1952 }
1953 stlo->disp.type = DISP_0;
1954
1955 fsm_advance(fsm, 0, NULL);
1956
1957 return 0;
1958 }
1959
1960 static int
node_dmo(struct x86_decode_fsm * fsm,struct x86_instr * instr)1961 node_dmo(struct x86_decode_fsm *fsm, struct x86_instr *instr)
1962 {
1963 const struct x86_opcode *opcode = instr->opcode;
1964 struct x86_store *stdmo, *streg;
1965 size_t adrsize, regsize;
1966
1967 adrsize = instr->address_size;
1968 regsize = instr->operand_size;
1969
1970 if (opcode->todmo) {
1971 streg = &instr->src;
1972 stdmo = &instr->dst;
1973 } else {
1974 streg = &instr->dst;
1975 stdmo = &instr->src;
1976 }
1977
1978 streg->type = STORE_REG;
1979 streg->u.reg = &gpr_map[0][0][regsize-1]; /* ?AX */
1980
1981 stdmo->type = STORE_DMO;
1982 if (fsm_read(fsm, (uint8_t *)&stdmo->u.dmo, adrsize) == -1) {
1983 return -1;
1984 }
1985 fsm_advance(fsm, adrsize, NULL);
1986
1987 return 0;
1988 }
1989
1990 static inline uint64_t
sign_extend(uint64_t val,int size)1991 sign_extend(uint64_t val, int size)
1992 {
1993 if (size == 1) {
1994 if (val & __BIT(7))
1995 val |= 0xFFFFFFFFFFFFFF00;
1996 } else if (size == 2) {
1997 if (val & __BIT(15))
1998 val |= 0xFFFFFFFFFFFF0000;
1999 } else if (size == 4) {
2000 if (val & __BIT(31))
2001 val |= 0xFFFFFFFF00000000;
2002 }
2003 return val;
2004 }
2005
2006 static int
node_immediate(struct x86_decode_fsm * fsm,struct x86_instr * instr)2007 node_immediate(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2008 {
2009 const struct x86_opcode *opcode = instr->opcode;
2010 struct x86_store *store;
2011 uint8_t immsize;
2012 size_t sesize = 0;
2013
2014 /* The immediate is the source */
2015 store = &instr->src;
2016 immsize = instr->operand_size;
2017
2018 if (opcode->flags & FLAG_imm8) {
2019 sesize = immsize;
2020 immsize = 1;
2021 } else if ((opcode->flags & FLAG_immz) && (immsize == 8)) {
2022 sesize = immsize;
2023 immsize = 4;
2024 }
2025
2026 store->type = STORE_IMM;
2027 if (fsm_read(fsm, (uint8_t *)&store->u.imm.data, immsize) == -1) {
2028 return -1;
2029 }
2030 fsm_advance(fsm, immsize, NULL);
2031
2032 if (sesize != 0) {
2033 store->u.imm.data = sign_extend(store->u.imm.data, sesize);
2034 }
2035
2036 return 0;
2037 }
2038
2039 static int
node_disp(struct x86_decode_fsm * fsm,struct x86_instr * instr)2040 node_disp(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2041 {
2042 const struct x86_opcode *opcode = instr->opcode;
2043 uint64_t data = 0;
2044 size_t n;
2045
2046 if (instr->strm->disp.type == DISP_1) {
2047 n = 1;
2048 } else if (instr->strm->disp.type == DISP_2) {
2049 n = 2;
2050 } else if (instr->strm->disp.type == DISP_4) {
2051 n = 4;
2052 } else {
2053 DISASSEMBLER_BUG();
2054 }
2055
2056 if (fsm_read(fsm, (uint8_t *)&data, n) == -1) {
2057 return -1;
2058 }
2059
2060 if (__predict_true(fsm->is64bit)) {
2061 data = sign_extend(data, n);
2062 }
2063
2064 instr->strm->disp.data = data;
2065
2066 if (opcode->immediate) {
2067 fsm_advance(fsm, n, node_immediate);
2068 } else {
2069 fsm_advance(fsm, n, NULL);
2070 }
2071
2072 return 0;
2073 }
2074
2075 /*
2076 * Special node to handle 16bit addressing encoding, which can reference two
2077 * registers at once.
2078 */
2079 static int
node_dual(struct x86_decode_fsm * fsm,struct x86_instr * instr)2080 node_dual(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2081 {
2082 int reg1, reg2;
2083
2084 reg1 = gpr_dual_reg1_rm[instr->regmodrm.rm];
2085
2086 if (instr->regmodrm.rm == 0b000 ||
2087 instr->regmodrm.rm == 0b010) {
2088 reg2 = NVMM_X64_GPR_RSI;
2089 } else if (instr->regmodrm.rm == 0b001 ||
2090 instr->regmodrm.rm == 0b011) {
2091 reg2 = NVMM_X64_GPR_RDI;
2092 } else {
2093 DISASSEMBLER_BUG();
2094 }
2095
2096 instr->strm->type = STORE_DUALREG;
2097 instr->strm->u.dualreg.reg1 = reg1;
2098 instr->strm->u.dualreg.reg2 = reg2;
2099
2100 if (instr->strm->disp.type == DISP_NONE) {
2101 DISASSEMBLER_BUG();
2102 } else if (instr->strm->disp.type == DISP_0) {
2103 /* Indirect register addressing mode */
2104 if (instr->opcode->immediate) {
2105 fsm_advance(fsm, 1, node_immediate);
2106 } else {
2107 fsm_advance(fsm, 1, NULL);
2108 }
2109 } else {
2110 fsm_advance(fsm, 1, node_disp);
2111 }
2112
2113 return 0;
2114 }
2115
2116 static const struct x86_reg *
get_register_idx(struct x86_instr * instr,uint8_t index)2117 get_register_idx(struct x86_instr *instr, uint8_t index)
2118 {
2119 uint8_t enc = index;
2120 const struct x86_reg *reg;
2121 size_t regsize;
2122
2123 regsize = instr->address_size;
2124 reg = &gpr_map[instr->rexpref.x][enc][regsize-1];
2125
2126 if (reg->num == -1) {
2127 reg = resolve_special_register(instr, enc, regsize);
2128 }
2129
2130 return reg;
2131 }
2132
2133 static const struct x86_reg *
get_register_bas(struct x86_instr * instr,uint8_t base)2134 get_register_bas(struct x86_instr *instr, uint8_t base)
2135 {
2136 uint8_t enc = base;
2137 const struct x86_reg *reg;
2138 size_t regsize;
2139
2140 regsize = instr->address_size;
2141 reg = &gpr_map[instr->rexpref.b][enc][regsize-1];
2142 if (reg->num == -1) {
2143 reg = resolve_special_register(instr, enc, regsize);
2144 }
2145
2146 return reg;
2147 }
2148
2149 static int
node_sib(struct x86_decode_fsm * fsm,struct x86_instr * instr)2150 node_sib(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2151 {
2152 const struct x86_opcode *opcode;
2153 uint8_t scale, index, base;
2154 bool noindex, nobase;
2155 uint8_t byte;
2156
2157 if (fsm_read(fsm, &byte, sizeof(byte)) == -1) {
2158 return -1;
2159 }
2160
2161 scale = ((byte & 0b11000000) >> 6);
2162 index = ((byte & 0b00111000) >> 3);
2163 base = ((byte & 0b00000111) >> 0);
2164
2165 opcode = instr->opcode;
2166
2167 noindex = false;
2168 nobase = false;
2169
2170 if (index == 0b100 && !instr->rexpref.x) {
2171 /* Special case: the index is null */
2172 noindex = true;
2173 }
2174
2175 if (instr->regmodrm.mod == 0b00 && base == 0b101) {
2176 /* Special case: the base is null + disp32 */
2177 instr->strm->disp.type = DISP_4;
2178 nobase = true;
2179 }
2180
2181 instr->strm->type = STORE_SIB;
2182 instr->strm->u.sib.scale = (1 << scale);
2183 if (!noindex)
2184 instr->strm->u.sib.idx = get_register_idx(instr, index);
2185 if (!nobase)
2186 instr->strm->u.sib.bas = get_register_bas(instr, base);
2187
2188 /* May have a displacement, or an immediate */
2189 if (instr->strm->disp.type == DISP_1 ||
2190 instr->strm->disp.type == DISP_2 ||
2191 instr->strm->disp.type == DISP_4) {
2192 fsm_advance(fsm, 1, node_disp);
2193 } else if (opcode->immediate) {
2194 fsm_advance(fsm, 1, node_immediate);
2195 } else {
2196 fsm_advance(fsm, 1, NULL);
2197 }
2198
2199 return 0;
2200 }
2201
2202 static const struct x86_reg *
get_register_reg(struct x86_instr * instr,const struct x86_opcode * opcode)2203 get_register_reg(struct x86_instr *instr, const struct x86_opcode *opcode)
2204 {
2205 uint8_t enc = instr->regmodrm.reg;
2206 const struct x86_reg *reg;
2207 size_t regsize;
2208
2209 regsize = instr->operand_size;
2210
2211 reg = &gpr_map[instr->rexpref.r][enc][regsize-1];
2212 if (reg->num == -1) {
2213 reg = resolve_special_register(instr, enc, regsize);
2214 }
2215
2216 return reg;
2217 }
2218
2219 static const struct x86_reg *
get_register_rm(struct x86_instr * instr,const struct x86_opcode * opcode)2220 get_register_rm(struct x86_instr *instr, const struct x86_opcode *opcode)
2221 {
2222 uint8_t enc = instr->regmodrm.rm;
2223 const struct x86_reg *reg;
2224 size_t regsize;
2225
2226 if (instr->strm->disp.type == DISP_NONE) {
2227 regsize = instr->operand_size;
2228 } else {
2229 /* Indirect access, the size is that of the address. */
2230 regsize = instr->address_size;
2231 }
2232
2233 reg = &gpr_map[instr->rexpref.b][enc][regsize-1];
2234 if (reg->num == -1) {
2235 reg = resolve_special_register(instr, enc, regsize);
2236 }
2237
2238 return reg;
2239 }
2240
2241 static inline bool
has_sib(struct x86_instr * instr)2242 has_sib(struct x86_instr *instr)
2243 {
2244 return (instr->address_size != 2 && /* no SIB in 16bit addressing */
2245 instr->regmodrm.mod != 0b11 &&
2246 instr->regmodrm.rm == 0b100);
2247 }
2248
2249 static inline bool
is_rip_relative(struct x86_decode_fsm * fsm,struct x86_instr * instr)2250 is_rip_relative(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2251 {
2252 return (fsm->is64bit && /* RIP-relative only in 64bit mode */
2253 instr->regmodrm.mod == 0b00 &&
2254 instr->regmodrm.rm == 0b101);
2255 }
2256
2257 static inline bool
is_disp32_only(struct x86_decode_fsm * fsm,struct x86_instr * instr)2258 is_disp32_only(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2259 {
2260 return (!fsm->is64bit && /* no disp32-only in 64bit mode */
2261 instr->address_size != 2 && /* no disp32-only in 16bit addressing */
2262 instr->regmodrm.mod == 0b00 &&
2263 instr->regmodrm.rm == 0b101);
2264 }
2265
2266 static inline bool
is_disp16_only(struct x86_decode_fsm * fsm,struct x86_instr * instr)2267 is_disp16_only(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2268 {
2269 return (instr->address_size == 2 && /* disp16-only only in 16bit addr */
2270 instr->regmodrm.mod == 0b00 &&
2271 instr->regmodrm.rm == 0b110);
2272 }
2273
2274 static inline bool
is_dual(struct x86_decode_fsm * fsm,struct x86_instr * instr)2275 is_dual(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2276 {
2277 return (instr->address_size == 2 &&
2278 instr->regmodrm.mod != 0b11 &&
2279 instr->regmodrm.rm <= 0b011);
2280 }
2281
2282 static enum x86_disp_type
get_disp_type(struct x86_instr * instr)2283 get_disp_type(struct x86_instr *instr)
2284 {
2285 switch (instr->regmodrm.mod) {
2286 case 0b00: /* indirect */
2287 return DISP_0;
2288 case 0b01: /* indirect+1 */
2289 return DISP_1;
2290 case 0b10: /* indirect+{2,4} */
2291 if (__predict_false(instr->address_size == 2)) {
2292 return DISP_2;
2293 }
2294 return DISP_4;
2295 case 0b11: /* direct */
2296 default: /* llvm */
2297 return DISP_NONE;
2298 }
2299 __unreachable();
2300 }
2301
2302 static int
node_regmodrm(struct x86_decode_fsm * fsm,struct x86_instr * instr)2303 node_regmodrm(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2304 {
2305 struct x86_store *strg, *strm;
2306 const struct x86_opcode *opcode;
2307 const struct x86_reg *reg;
2308 uint8_t byte;
2309
2310 if (fsm_read(fsm, &byte, sizeof(byte)) == -1) {
2311 return -1;
2312 }
2313
2314 opcode = instr->opcode;
2315
2316 instr->regmodrm.rm = ((byte & 0b00000111) >> 0);
2317 instr->regmodrm.reg = ((byte & 0b00111000) >> 3);
2318 instr->regmodrm.mod = ((byte & 0b11000000) >> 6);
2319
2320 if (opcode->regtorm) {
2321 strg = &instr->src;
2322 strm = &instr->dst;
2323 } else { /* RM to REG */
2324 strm = &instr->src;
2325 strg = &instr->dst;
2326 }
2327
2328 /* Save for later use. */
2329 instr->strm = strm;
2330
2331 /*
2332 * Special cases: Groups. The REG field of REGMODRM is the index in
2333 * the group. op1 gets overwritten in the Immediate node, if any.
2334 */
2335 if (opcode->group1) {
2336 if (group1[instr->regmodrm.reg].emul == NULL) {
2337 return -1;
2338 }
2339 instr->emul = group1[instr->regmodrm.reg].emul;
2340 } else if (opcode->group3) {
2341 if (group3[instr->regmodrm.reg].emul == NULL) {
2342 return -1;
2343 }
2344 instr->emul = group3[instr->regmodrm.reg].emul;
2345 } else if (opcode->group11) {
2346 if (group11[instr->regmodrm.reg].emul == NULL) {
2347 return -1;
2348 }
2349 instr->emul = group11[instr->regmodrm.reg].emul;
2350 }
2351
2352 if (!opcode->immediate) {
2353 reg = get_register_reg(instr, opcode);
2354 if (reg == NULL) {
2355 return -1;
2356 }
2357 strg->type = STORE_REG;
2358 strg->u.reg = reg;
2359 }
2360
2361 /* The displacement applies to RM. */
2362 strm->disp.type = get_disp_type(instr);
2363
2364 if (has_sib(instr)) {
2365 /* Overwrites RM */
2366 fsm_advance(fsm, 1, node_sib);
2367 return 0;
2368 }
2369
2370 if (is_rip_relative(fsm, instr)) {
2371 /* Overwrites RM */
2372 strm->type = STORE_REG;
2373 strm->u.reg = &gpr_map__rip;
2374 strm->disp.type = DISP_4;
2375 fsm_advance(fsm, 1, node_disp);
2376 return 0;
2377 }
2378
2379 if (is_disp32_only(fsm, instr)) {
2380 /* Overwrites RM */
2381 strm->type = STORE_REG;
2382 strm->u.reg = NULL;
2383 strm->disp.type = DISP_4;
2384 fsm_advance(fsm, 1, node_disp);
2385 return 0;
2386 }
2387
2388 if (__predict_false(is_disp16_only(fsm, instr))) {
2389 /* Overwrites RM */
2390 strm->type = STORE_REG;
2391 strm->u.reg = NULL;
2392 strm->disp.type = DISP_2;
2393 fsm_advance(fsm, 1, node_disp);
2394 return 0;
2395 }
2396
2397 if (__predict_false(is_dual(fsm, instr))) {
2398 /* Overwrites RM */
2399 fsm_advance(fsm, 0, node_dual);
2400 return 0;
2401 }
2402
2403 reg = get_register_rm(instr, opcode);
2404 if (reg == NULL) {
2405 return -1;
2406 }
2407 strm->type = STORE_REG;
2408 strm->u.reg = reg;
2409
2410 if (strm->disp.type == DISP_NONE) {
2411 /* Direct register addressing mode */
2412 if (opcode->immediate) {
2413 fsm_advance(fsm, 1, node_immediate);
2414 } else {
2415 fsm_advance(fsm, 1, NULL);
2416 }
2417 } else if (strm->disp.type == DISP_0) {
2418 /* Indirect register addressing mode */
2419 if (opcode->immediate) {
2420 fsm_advance(fsm, 1, node_immediate);
2421 } else {
2422 fsm_advance(fsm, 1, NULL);
2423 }
2424 } else {
2425 fsm_advance(fsm, 1, node_disp);
2426 }
2427
2428 return 0;
2429 }
2430
2431 static size_t
get_operand_size(struct x86_decode_fsm * fsm,struct x86_instr * instr)2432 get_operand_size(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2433 {
2434 const struct x86_opcode *opcode = instr->opcode;
2435 int opsize;
2436
2437 /* Get the opsize */
2438 if (!opcode->szoverride) {
2439 opsize = opcode->defsize;
2440 } else if (instr->rexpref.present && instr->rexpref.w) {
2441 opsize = 8;
2442 } else {
2443 if (!fsm->is16bit) {
2444 if (instr->legpref.opr_ovr) {
2445 opsize = 2;
2446 } else {
2447 opsize = 4;
2448 }
2449 } else { /* 16bit */
2450 if (instr->legpref.opr_ovr) {
2451 opsize = 4;
2452 } else {
2453 opsize = 2;
2454 }
2455 }
2456 }
2457
2458 return opsize;
2459 }
2460
2461 static size_t
get_address_size(struct x86_decode_fsm * fsm,struct x86_instr * instr)2462 get_address_size(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2463 {
2464 if (fsm->is64bit) {
2465 if (__predict_false(instr->legpref.adr_ovr)) {
2466 return 4;
2467 }
2468 return 8;
2469 }
2470
2471 if (fsm->is32bit) {
2472 if (__predict_false(instr->legpref.adr_ovr)) {
2473 return 2;
2474 }
2475 return 4;
2476 }
2477
2478 /* 16bit. */
2479 if (__predict_false(instr->legpref.adr_ovr)) {
2480 return 4;
2481 }
2482 return 2;
2483 }
2484
2485 static int
node_primary_opcode(struct x86_decode_fsm * fsm,struct x86_instr * instr)2486 node_primary_opcode(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2487 {
2488 const struct x86_opcode *opcode;
2489 uint8_t byte;
2490
2491 if (fsm_read(fsm, &byte, sizeof(byte)) == -1) {
2492 return -1;
2493 }
2494
2495 opcode = &primary_opcode_table[byte];
2496 if (__predict_false(!opcode->valid)) {
2497 return -1;
2498 }
2499
2500 instr->opcode = opcode;
2501 instr->emul = opcode->emul;
2502 instr->operand_size = get_operand_size(fsm, instr);
2503 instr->address_size = get_address_size(fsm, instr);
2504
2505 if (fsm->is64bit && (instr->operand_size == 4)) {
2506 /* Zero-extend to 64 bits. */
2507 instr->zeroextend_mask = ~size_to_mask(4);
2508 }
2509
2510 if (opcode->regmodrm) {
2511 fsm_advance(fsm, 1, node_regmodrm);
2512 } else if (opcode->dmo) {
2513 /* Direct-Memory Offsets */
2514 fsm_advance(fsm, 1, node_dmo);
2515 } else if (opcode->stos || opcode->lods) {
2516 fsm_advance(fsm, 1, node_stlo);
2517 } else if (opcode->movs) {
2518 fsm_advance(fsm, 1, node_movs);
2519 } else if (opcode->cmps) {
2520 fsm_advance(fsm, 1, node_cmps);
2521 } else {
2522 return -1;
2523 }
2524
2525 return 0;
2526 }
2527
2528 static int
node_secondary_opcode(struct x86_decode_fsm * fsm,struct x86_instr * instr)2529 node_secondary_opcode(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2530 {
2531 const struct x86_opcode *opcode;
2532 uint8_t byte;
2533
2534 if (fsm_read(fsm, &byte, sizeof(byte)) == -1) {
2535 return -1;
2536 }
2537
2538 opcode = &secondary_opcode_table[byte];
2539 if (__predict_false(!opcode->valid)) {
2540 return -1;
2541 }
2542
2543 instr->opcode = opcode;
2544 instr->emul = opcode->emul;
2545 instr->operand_size = get_operand_size(fsm, instr);
2546 instr->address_size = get_address_size(fsm, instr);
2547
2548 if (fsm->is64bit && (instr->operand_size == 4)) {
2549 /* Zero-extend to 64 bits. */
2550 instr->zeroextend_mask = ~size_to_mask(4);
2551 }
2552
2553 if (opcode->flags & FLAG_ze) {
2554 /*
2555 * Compute the mask for zero-extend. Update the operand size,
2556 * we move fewer bytes.
2557 */
2558 instr->zeroextend_mask |= size_to_mask(instr->operand_size);
2559 instr->zeroextend_mask &= ~size_to_mask(opcode->defsize);
2560 instr->operand_size = opcode->defsize;
2561 }
2562
2563 if (opcode->regmodrm) {
2564 fsm_advance(fsm, 1, node_regmodrm);
2565 } else {
2566 return -1;
2567 }
2568
2569 return 0;
2570 }
2571
2572 static int
node_main(struct x86_decode_fsm * fsm,struct x86_instr * instr)2573 node_main(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2574 {
2575 uint8_t byte;
2576
2577 #define ESCAPE 0x0F
2578 #define VEX_1 0xC5
2579 #define VEX_2 0xC4
2580 #define XOP 0x8F
2581
2582 if (fsm_read(fsm, &byte, sizeof(byte)) == -1) {
2583 return -1;
2584 }
2585
2586 /*
2587 * We don't take XOP. It is AMD-specific, and it was removed shortly
2588 * after being introduced.
2589 */
2590 if (byte == ESCAPE) {
2591 fsm_advance(fsm, 1, node_secondary_opcode);
2592 } else if (!instr->rexpref.present) {
2593 if (byte == VEX_1) {
2594 return -1;
2595 } else if (byte == VEX_2) {
2596 return -1;
2597 } else {
2598 fsm->fn = node_primary_opcode;
2599 }
2600 } else {
2601 fsm->fn = node_primary_opcode;
2602 }
2603
2604 return 0;
2605 }
2606
2607 static int
node_rex_prefix(struct x86_decode_fsm * fsm,struct x86_instr * instr)2608 node_rex_prefix(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2609 {
2610 struct x86_rexpref *rexpref = &instr->rexpref;
2611 uint8_t byte;
2612 size_t n = 0;
2613
2614 if (fsm_read(fsm, &byte, sizeof(byte)) == -1) {
2615 return -1;
2616 }
2617
2618 if (byte >= 0x40 && byte <= 0x4F) {
2619 if (__predict_false(!fsm->is64bit)) {
2620 return -1;
2621 }
2622 rexpref->b = ((byte & 0x1) != 0);
2623 rexpref->x = ((byte & 0x2) != 0);
2624 rexpref->r = ((byte & 0x4) != 0);
2625 rexpref->w = ((byte & 0x8) != 0);
2626 rexpref->present = true;
2627 n = 1;
2628 }
2629
2630 fsm_advance(fsm, n, node_main);
2631 return 0;
2632 }
2633
2634 static int
node_legacy_prefix(struct x86_decode_fsm * fsm,struct x86_instr * instr)2635 node_legacy_prefix(struct x86_decode_fsm *fsm, struct x86_instr *instr)
2636 {
2637 uint8_t byte;
2638
2639 if (fsm_read(fsm, &byte, sizeof(byte)) == -1) {
2640 return -1;
2641 }
2642
2643 if (byte == LEG_OPR_OVR) {
2644 instr->legpref.opr_ovr = 1;
2645 } else if (byte == LEG_OVR_DS) {
2646 instr->legpref.seg = NVMM_X64_SEG_DS;
2647 } else if (byte == LEG_OVR_ES) {
2648 instr->legpref.seg = NVMM_X64_SEG_ES;
2649 } else if (byte == LEG_REP) {
2650 instr->legpref.rep = 1;
2651 } else if (byte == LEG_OVR_GS) {
2652 instr->legpref.seg = NVMM_X64_SEG_GS;
2653 } else if (byte == LEG_OVR_FS) {
2654 instr->legpref.seg = NVMM_X64_SEG_FS;
2655 } else if (byte == LEG_ADR_OVR) {
2656 instr->legpref.adr_ovr = 1;
2657 } else if (byte == LEG_OVR_CS) {
2658 instr->legpref.seg = NVMM_X64_SEG_CS;
2659 } else if (byte == LEG_OVR_SS) {
2660 instr->legpref.seg = NVMM_X64_SEG_SS;
2661 } else if (byte == LEG_REPN) {
2662 instr->legpref.repn = 1;
2663 } else if (byte == LEG_LOCK) {
2664 /* ignore */
2665 } else {
2666 /* not a legacy prefix */
2667 fsm_advance(fsm, 0, node_rex_prefix);
2668 return 0;
2669 }
2670
2671 fsm_advance(fsm, 1, node_legacy_prefix);
2672 return 0;
2673 }
2674
2675 static int
x86_decode(uint8_t * inst_bytes,size_t inst_len,struct x86_instr * instr,struct nvmm_x64_state * state)2676 x86_decode(uint8_t *inst_bytes, size_t inst_len, struct x86_instr *instr,
2677 struct nvmm_x64_state *state)
2678 {
2679 struct x86_decode_fsm fsm;
2680 int ret;
2681
2682 memset(instr, 0, sizeof(*instr));
2683 instr->legpref.seg = -1;
2684 instr->src.hardseg = -1;
2685 instr->dst.hardseg = -1;
2686
2687 fsm.is64bit = is_64bit(state);
2688 fsm.is32bit = is_32bit(state);
2689 fsm.is16bit = is_16bit(state);
2690
2691 fsm.fn = node_legacy_prefix;
2692 fsm.buf = inst_bytes;
2693 fsm.end = inst_bytes + inst_len;
2694
2695 while (fsm.fn != NULL) {
2696 ret = (*fsm.fn)(&fsm, instr);
2697 if (ret == -1) {
2698 #ifdef NVMM_DEBUG
2699 printf("\n%s debug: unrecognized instruction found " \
2700 "with max length %ld : [ ", __func__, inst_len);
2701 for (uint i = 0; i < inst_len; i++)
2702 printf("%02x ", inst_bytes[i]);
2703 printf("]\n");
2704 fflush(stdout);
2705 #endif
2706 return -1;
2707 }
2708 }
2709
2710 instr->len = fsm.buf - inst_bytes;
2711
2712 return 0;
2713 }
2714
2715 /* -------------------------------------------------------------------------- */
2716
2717 #define EXEC_INSTR(sz, instr) \
2718 static uint##sz##_t \
2719 exec_##instr##sz(uint##sz##_t op1, uint##sz##_t op2, uint64_t *rflags) \
2720 { \
2721 uint##sz##_t res; \
2722 __asm __volatile ( \
2723 #instr" %2, %3;" \
2724 "mov %3, %1;" \
2725 "pushfq;" \
2726 "popq %0" \
2727 : "=r" (*rflags), "=r" (res) \
2728 : "r" (op1), "r" (op2)); \
2729 return res; \
2730 }
2731
2732 #define EXEC_DISPATCHER(instr) \
2733 static uint64_t \
2734 exec_##instr(uint64_t op1, uint64_t op2, uint64_t *rflags, size_t opsize) \
2735 { \
2736 switch (opsize) { \
2737 case 1: \
2738 return exec_##instr##8(op1, op2, rflags); \
2739 case 2: \
2740 return exec_##instr##16(op1, op2, rflags); \
2741 case 4: \
2742 return exec_##instr##32(op1, op2, rflags); \
2743 default: \
2744 return exec_##instr##64(op1, op2, rflags); \
2745 } \
2746 }
2747
2748 /* SUB: ret = op1 - op2 */
2749 #define PSL_SUB_MASK (PSL_V|PSL_C|PSL_Z|PSL_N|PSL_PF|PSL_AF)
2750 EXEC_INSTR(8, sub)
2751 EXEC_INSTR(16, sub)
2752 EXEC_INSTR(32, sub)
2753 EXEC_INSTR(64, sub)
EXEC_DISPATCHER(sub)2754 EXEC_DISPATCHER(sub)
2755
2756 /* OR: ret = op1 | op2 */
2757 #define PSL_OR_MASK (PSL_V|PSL_C|PSL_Z|PSL_N|PSL_PF)
2758 EXEC_INSTR(8, or)
2759 EXEC_INSTR(16, or)
2760 EXEC_INSTR(32, or)
2761 EXEC_INSTR(64, or)
2762 EXEC_DISPATCHER(or)
2763
2764 /* AND: ret = op1 & op2 */
2765 #define PSL_AND_MASK (PSL_V|PSL_C|PSL_Z|PSL_N|PSL_PF)
2766 EXEC_INSTR(8, and)
2767 EXEC_INSTR(16, and)
2768 EXEC_INSTR(32, and)
2769 EXEC_INSTR(64, and)
2770 EXEC_DISPATCHER(and)
2771
2772 /* XOR: ret = op1 ^ op2 */
2773 #define PSL_XOR_MASK (PSL_V|PSL_C|PSL_Z|PSL_N|PSL_PF)
2774 EXEC_INSTR(8, xor)
2775 EXEC_INSTR(16, xor)
2776 EXEC_INSTR(32, xor)
2777 EXEC_INSTR(64, xor)
2778 EXEC_DISPATCHER(xor)
2779
2780 /* -------------------------------------------------------------------------- */
2781
2782 /*
2783 * Emulation functions. We don't care about the order of the operands, except
2784 * for SUB, CMP and TEST. For these ones we look at mem->write to determine who
2785 * is op1 and who is op2.
2786 */
2787
2788 static void
2789 x86_func_or(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2790 {
2791 uint64_t *retval = (uint64_t *)mem->data;
2792 const bool write = mem->write;
2793 uint64_t *op1, op2, fl, ret;
2794
2795 op1 = (uint64_t *)mem->data;
2796 op2 = 0;
2797
2798 /* Fetch the value to be OR'ed (op2). */
2799 mem->data = (uint8_t *)&op2;
2800 mem->write = false;
2801 (*vcpu->cbs.mem)(mem);
2802
2803 /* Perform the OR. */
2804 ret = exec_or(*op1, op2, &fl, mem->size);
2805
2806 if (write) {
2807 /* Write back the result. */
2808 mem->data = (uint8_t *)&ret;
2809 mem->write = true;
2810 (*vcpu->cbs.mem)(mem);
2811 } else {
2812 /* Return data to the caller. */
2813 *retval = ret;
2814 }
2815
2816 gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_OR_MASK;
2817 gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_OR_MASK);
2818 }
2819
2820 static void
x86_func_and(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)2821 x86_func_and(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2822 {
2823 uint64_t *retval = (uint64_t *)mem->data;
2824 const bool write = mem->write;
2825 uint64_t *op1, op2, fl, ret;
2826
2827 op1 = (uint64_t *)mem->data;
2828 op2 = 0;
2829
2830 /* Fetch the value to be AND'ed (op2). */
2831 mem->data = (uint8_t *)&op2;
2832 mem->write = false;
2833 (*vcpu->cbs.mem)(mem);
2834
2835 /* Perform the AND. */
2836 ret = exec_and(*op1, op2, &fl, mem->size);
2837
2838 if (write) {
2839 /* Write back the result. */
2840 mem->data = (uint8_t *)&ret;
2841 mem->write = true;
2842 (*vcpu->cbs.mem)(mem);
2843 } else {
2844 /* Return data to the caller. */
2845 *retval = ret;
2846 }
2847
2848 gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_AND_MASK;
2849 gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_AND_MASK);
2850 }
2851
2852 static void
x86_func_xchg(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)2853 x86_func_xchg(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2854 {
2855 uint64_t *op1, op2;
2856
2857 op1 = (uint64_t *)mem->data;
2858 op2 = 0;
2859
2860 /* Fetch op2. */
2861 mem->data = (uint8_t *)&op2;
2862 mem->write = false;
2863 (*vcpu->cbs.mem)(mem);
2864
2865 /* Write op1 in op2. */
2866 mem->data = (uint8_t *)op1;
2867 mem->write = true;
2868 (*vcpu->cbs.mem)(mem);
2869
2870 /* Write op2 in op1. */
2871 *op1 = op2;
2872 }
2873
2874 static void
x86_func_sub(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)2875 x86_func_sub(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2876 {
2877 uint64_t *retval = (uint64_t *)mem->data;
2878 const bool write = mem->write;
2879 uint64_t *op1, *op2, fl, ret;
2880 uint64_t tmp;
2881 bool memop1;
2882
2883 memop1 = !mem->write;
2884 op1 = memop1 ? &tmp : (uint64_t *)mem->data;
2885 op2 = memop1 ? (uint64_t *)mem->data : &tmp;
2886
2887 /* Fetch the value to be SUB'ed (op1 or op2). */
2888 mem->data = (uint8_t *)&tmp;
2889 mem->write = false;
2890 (*vcpu->cbs.mem)(mem);
2891
2892 /* Perform the SUB. */
2893 ret = exec_sub(*op1, *op2, &fl, mem->size);
2894
2895 if (write) {
2896 /* Write back the result. */
2897 mem->data = (uint8_t *)&ret;
2898 mem->write = true;
2899 (*vcpu->cbs.mem)(mem);
2900 } else {
2901 /* Return data to the caller. */
2902 *retval = ret;
2903 }
2904
2905 gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_SUB_MASK;
2906 gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_SUB_MASK);
2907 }
2908
2909 static void
x86_func_xor(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)2910 x86_func_xor(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2911 {
2912 uint64_t *retval = (uint64_t *)mem->data;
2913 const bool write = mem->write;
2914 uint64_t *op1, op2, fl, ret;
2915
2916 op1 = (uint64_t *)mem->data;
2917 op2 = 0;
2918
2919 /* Fetch the value to be XOR'ed (op2). */
2920 mem->data = (uint8_t *)&op2;
2921 mem->write = false;
2922 (*vcpu->cbs.mem)(mem);
2923
2924 /* Perform the XOR. */
2925 ret = exec_xor(*op1, op2, &fl, mem->size);
2926
2927 if (write) {
2928 /* Write back the result. */
2929 mem->data = (uint8_t *)&ret;
2930 mem->write = true;
2931 (*vcpu->cbs.mem)(mem);
2932 } else {
2933 /* Return data to the caller. */
2934 *retval = ret;
2935 }
2936
2937 gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_XOR_MASK;
2938 gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_XOR_MASK);
2939 }
2940
2941 static void
x86_func_cmp(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)2942 x86_func_cmp(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2943 {
2944 uint64_t *op1, *op2, fl;
2945 uint64_t tmp;
2946 bool memop1;
2947
2948 memop1 = !mem->write;
2949 op1 = memop1 ? &tmp : (uint64_t *)mem->data;
2950 op2 = memop1 ? (uint64_t *)mem->data : &tmp;
2951
2952 /* Fetch the value to be CMP'ed (op1 or op2). */
2953 mem->data = (uint8_t *)&tmp;
2954 mem->write = false;
2955 (*vcpu->cbs.mem)(mem);
2956
2957 /* Perform the CMP. */
2958 exec_sub(*op1, *op2, &fl, mem->size);
2959
2960 gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_SUB_MASK;
2961 gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_SUB_MASK);
2962 }
2963
2964 static void
x86_func_test(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)2965 x86_func_test(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2966 {
2967 uint64_t *op1, *op2, fl;
2968 uint64_t tmp;
2969 bool memop1;
2970
2971 memop1 = !mem->write;
2972 op1 = memop1 ? &tmp : (uint64_t *)mem->data;
2973 op2 = memop1 ? (uint64_t *)mem->data : &tmp;
2974
2975 /* Fetch the value to be TEST'ed (op1 or op2). */
2976 mem->data = (uint8_t *)&tmp;
2977 mem->write = false;
2978 (*vcpu->cbs.mem)(mem);
2979
2980 /* Perform the TEST. */
2981 exec_and(*op1, *op2, &fl, mem->size);
2982
2983 gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_AND_MASK;
2984 gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_AND_MASK);
2985 }
2986
2987 static void
x86_func_mov(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)2988 x86_func_mov(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2989 {
2990 /*
2991 * Nothing special, just move without emulation.
2992 */
2993 (*vcpu->cbs.mem)(mem);
2994 }
2995
2996 static void
x86_func_stos(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)2997 x86_func_stos(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
2998 {
2999 /*
3000 * Just move, and update RDI.
3001 */
3002 (*vcpu->cbs.mem)(mem);
3003
3004 if (gprs[NVMM_X64_GPR_RFLAGS] & PSL_D) {
3005 gprs[NVMM_X64_GPR_RDI] -= mem->size;
3006 } else {
3007 gprs[NVMM_X64_GPR_RDI] += mem->size;
3008 }
3009 }
3010
3011 static void
x86_func_lods(struct nvmm_vcpu * vcpu,struct nvmm_mem * mem,uint64_t * gprs)3012 x86_func_lods(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
3013 {
3014 /*
3015 * Just move, and update RSI.
3016 */
3017 (*vcpu->cbs.mem)(mem);
3018
3019 if (gprs[NVMM_X64_GPR_RFLAGS] & PSL_D) {
3020 gprs[NVMM_X64_GPR_RSI] -= mem->size;
3021 } else {
3022 gprs[NVMM_X64_GPR_RSI] += mem->size;
3023 }
3024 }
3025
3026 /* -------------------------------------------------------------------------- */
3027
3028 static inline uint64_t
gpr_read_address(struct x86_instr * instr,struct nvmm_x64_state * state,int gpr)3029 gpr_read_address(struct x86_instr *instr, struct nvmm_x64_state *state, int gpr)
3030 {
3031 uint64_t val;
3032
3033 val = state->gprs[gpr];
3034 val &= size_to_mask(instr->address_size);
3035
3036 return val;
3037 }
3038
3039 static int
store_to_gva(struct nvmm_x64_state * state,struct x86_instr * instr,struct x86_store * store,gvaddr_t * gvap,size_t size)3040 store_to_gva(struct nvmm_x64_state *state, struct x86_instr *instr,
3041 struct x86_store *store, gvaddr_t *gvap, size_t size)
3042 {
3043 struct x86_sib *sib;
3044 gvaddr_t gva = 0;
3045 uint64_t reg;
3046 int ret, seg;
3047
3048 if (store->type == STORE_SIB) {
3049 sib = &store->u.sib;
3050 if (sib->bas != NULL)
3051 gva += gpr_read_address(instr, state, sib->bas->num);
3052 if (sib->idx != NULL) {
3053 reg = gpr_read_address(instr, state, sib->idx->num);
3054 gva += sib->scale * reg;
3055 }
3056 } else if (store->type == STORE_REG) {
3057 if (store->u.reg == NULL) {
3058 /* The base is null. Happens with disp32-only and
3059 * disp16-only. */
3060 } else {
3061 gva = gpr_read_address(instr, state, store->u.reg->num);
3062 }
3063 } else if (store->type == STORE_DUALREG) {
3064 gva = gpr_read_address(instr, state, store->u.dualreg.reg1) +
3065 gpr_read_address(instr, state, store->u.dualreg.reg2);
3066 } else {
3067 gva = store->u.dmo;
3068 }
3069
3070 if (store->disp.type != DISP_NONE) {
3071 gva += store->disp.data;
3072 }
3073
3074 if (store->hardseg != -1) {
3075 seg = store->hardseg;
3076 } else {
3077 if (__predict_false(instr->legpref.seg != -1)) {
3078 seg = instr->legpref.seg;
3079 } else {
3080 seg = NVMM_X64_SEG_DS;
3081 }
3082 }
3083
3084 if (__predict_true(is_long_mode(state))) {
3085 if (seg == NVMM_X64_SEG_GS || seg == NVMM_X64_SEG_FS) {
3086 segment_apply(&state->segs[seg], &gva);
3087 }
3088 } else {
3089 ret = segment_check(&state->segs[seg], gva, size);
3090 if (ret == -1)
3091 return -1;
3092 segment_apply(&state->segs[seg], &gva);
3093 }
3094
3095 *gvap = gva;
3096 return 0;
3097 }
3098
3099 static int
fetch_segment(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu)3100 fetch_segment(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
3101 {
3102 struct nvmm_x64_state *state = vcpu->state;
3103 uint8_t inst_bytes[5], byte;
3104 size_t i, fetchsize;
3105 gvaddr_t gva;
3106 int ret, seg;
3107
3108 fetchsize = sizeof(inst_bytes);
3109
3110 gva = state->gprs[NVMM_X64_GPR_RIP];
3111 if (__predict_false(!is_long_mode(state))) {
3112 ret = segment_check(&state->segs[NVMM_X64_SEG_CS], gva,
3113 fetchsize);
3114 if (ret == -1)
3115 return -1;
3116 segment_apply(&state->segs[NVMM_X64_SEG_CS], &gva);
3117 }
3118
3119 ret = read_guest_memory(mach, vcpu, gva, inst_bytes, fetchsize);
3120 if (ret == -1)
3121 return -1;
3122
3123 seg = NVMM_X64_SEG_DS;
3124 for (i = 0; i < fetchsize; i++) {
3125 byte = inst_bytes[i];
3126
3127 if (byte == LEG_OVR_DS) {
3128 seg = NVMM_X64_SEG_DS;
3129 } else if (byte == LEG_OVR_ES) {
3130 seg = NVMM_X64_SEG_ES;
3131 } else if (byte == LEG_OVR_GS) {
3132 seg = NVMM_X64_SEG_GS;
3133 } else if (byte == LEG_OVR_FS) {
3134 seg = NVMM_X64_SEG_FS;
3135 } else if (byte == LEG_OVR_CS) {
3136 seg = NVMM_X64_SEG_CS;
3137 } else if (byte == LEG_OVR_SS) {
3138 seg = NVMM_X64_SEG_SS;
3139 } else if (byte == LEG_OPR_OVR) {
3140 /* nothing */
3141 } else if (byte == LEG_ADR_OVR) {
3142 /* nothing */
3143 } else if (byte == LEG_REP) {
3144 /* nothing */
3145 } else if (byte == LEG_REPN) {
3146 /* nothing */
3147 } else if (byte == LEG_LOCK) {
3148 /* nothing */
3149 } else {
3150 return seg;
3151 }
3152 }
3153
3154 return seg;
3155 }
3156
3157 static int
fetch_instruction(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu,struct nvmm_vcpu_exit * exit)3158 fetch_instruction(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
3159 struct nvmm_vcpu_exit *exit)
3160 {
3161 struct nvmm_x64_state *state = vcpu->state;
3162 size_t fetchsize;
3163 gvaddr_t gva;
3164 int ret;
3165
3166 fetchsize = sizeof(exit->u.mem.inst_bytes);
3167
3168 gva = state->gprs[NVMM_X64_GPR_RIP];
3169 if (__predict_false(!is_long_mode(state))) {
3170 ret = segment_check(&state->segs[NVMM_X64_SEG_CS], gva,
3171 fetchsize);
3172 if (ret == -1)
3173 return -1;
3174 segment_apply(&state->segs[NVMM_X64_SEG_CS], &gva);
3175 }
3176
3177 ret = read_guest_memory(mach, vcpu, gva, exit->u.mem.inst_bytes,
3178 fetchsize);
3179 if (ret == -1)
3180 return -1;
3181
3182 exit->u.mem.inst_len = fetchsize;
3183
3184 return 0;
3185 }
3186
3187 static int
assist_mem_movs(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu,struct x86_instr * instr)3188 assist_mem_movs(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
3189 struct x86_instr *instr)
3190 {
3191 struct nvmm_x64_state *state = vcpu->state;
3192 uint64_t *gprs;
3193 uint8_t data[8];
3194 gvaddr_t gva;
3195 size_t size;
3196 int ret;
3197
3198 size = instr->operand_size;
3199 gprs = state->gprs;
3200
3201 /* Source. */
3202 ret = store_to_gva(state, instr, &instr->src, &gva, size);
3203 if (ret == -1)
3204 return -1;
3205 ret = read_guest_memory(mach, vcpu, gva, data, size);
3206 if (ret == -1)
3207 return -1;
3208
3209 /* Destination. */
3210 ret = store_to_gva(state, instr, &instr->dst, &gva, size);
3211 if (ret == -1)
3212 return -1;
3213 ret = write_guest_memory(mach, vcpu, gva, data, size);
3214 if (ret == -1)
3215 return -1;
3216
3217 /*
3218 * Inlined x86_func_movs() call
3219 * (*instr->emul->func)(vcpu, &mem, state->gprs);
3220 */
3221
3222 if (gprs[NVMM_X64_GPR_RFLAGS] & PSL_D) {
3223 gprs[NVMM_X64_GPR_RSI] -= size;
3224 gprs[NVMM_X64_GPR_RDI] -= size;
3225 } else {
3226 gprs[NVMM_X64_GPR_RSI] += size;
3227 gprs[NVMM_X64_GPR_RDI] += size;
3228 }
3229
3230 return 0;
3231 }
3232
3233 static int
assist_mem_cmps(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu,struct x86_instr * instr)3234 assist_mem_cmps(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
3235 struct x86_instr *instr)
3236 {
3237 struct nvmm_x64_state *state = vcpu->state;
3238 uint64_t *gprs, op1, op2, fl;
3239 uint8_t data1[8], data2[8];
3240 gvaddr_t gva;
3241 size_t size;
3242 int ret;
3243
3244 size = instr->operand_size;
3245 gprs = state->gprs;
3246
3247 /* Source 1. */
3248 ret = store_to_gva(state, instr, &instr->src, &gva, size);
3249 if (ret == -1)
3250 return -1;
3251 ret = read_guest_memory(mach, vcpu, gva, data1, size);
3252 if (ret == -1)
3253 return -1;
3254
3255 /* Source 2. */
3256 ret = store_to_gva(state, instr, &instr->dst, &gva, size);
3257 if (ret == -1)
3258 return -1;
3259 ret = read_guest_memory(mach, vcpu, gva, data2, size);
3260 if (ret == -1)
3261 return -1;
3262
3263 /*
3264 * Inlined x86_func_cmps() call
3265 * (*instr->emul->func)(vcpu, &mem, state->gprs);
3266 */
3267
3268 /* Perform the CMP. */
3269 op1 = *((uint64_t *) data1);
3270 op2 = *((uint64_t *) data2);
3271 exec_sub(op1, op2, &fl, size);
3272
3273 gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_SUB_MASK;
3274 gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_SUB_MASK);
3275
3276 if (gprs[NVMM_X64_GPR_RFLAGS] & PSL_D) {
3277 gprs[NVMM_X64_GPR_RSI] -= size;
3278 gprs[NVMM_X64_GPR_RDI] -= size;
3279 } else {
3280 gprs[NVMM_X64_GPR_RSI] += size;
3281 gprs[NVMM_X64_GPR_RDI] += size;
3282 }
3283
3284 return 0;
3285 }
3286
3287 static int
assist_mem_single(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu,struct x86_instr * instr)3288 assist_mem_single(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
3289 struct x86_instr *instr)
3290 {
3291 struct nvmm_x64_state *state = vcpu->state;
3292 struct nvmm_vcpu_exit *exit = vcpu->exit;
3293 struct nvmm_mem mem;
3294 uint8_t membuf[8];
3295 uint64_t val;
3296
3297 memset(membuf, 0, sizeof(membuf));
3298
3299 mem.mach = mach;
3300 mem.vcpu = vcpu;
3301 mem.gpa = exit->u.mem.gpa;
3302 mem.size = instr->operand_size;
3303 mem.data = membuf;
3304
3305 /* Determine the direction. */
3306 switch (instr->src.type) {
3307 case STORE_REG:
3308 if (instr->src.disp.type != DISP_NONE) {
3309 /* Indirect access. */
3310 mem.write = false;
3311 } else {
3312 /* Direct access. */
3313 mem.write = true;
3314 }
3315 break;
3316 case STORE_DUALREG:
3317 if (instr->src.disp.type == DISP_NONE) {
3318 DISASSEMBLER_BUG();
3319 }
3320 mem.write = false;
3321 break;
3322 case STORE_IMM:
3323 mem.write = true;
3324 break;
3325 case STORE_SIB:
3326 mem.write = false;
3327 break;
3328 case STORE_DMO:
3329 mem.write = false;
3330 break;
3331 default:
3332 DISASSEMBLER_BUG();
3333 }
3334
3335 if (mem.write) {
3336 switch (instr->src.type) {
3337 case STORE_REG:
3338 /* The instruction was "reg -> mem". Fetch the register
3339 * in membuf. */
3340 if (__predict_false(instr->src.disp.type != DISP_NONE)) {
3341 DISASSEMBLER_BUG();
3342 }
3343 val = state->gprs[instr->src.u.reg->num];
3344 val = __SHIFTOUT(val, instr->src.u.reg->mask);
3345 memcpy(mem.data, &val, mem.size);
3346 break;
3347 case STORE_IMM:
3348 /* The instruction was "imm -> mem". Fetch the immediate
3349 * in membuf. */
3350 memcpy(mem.data, &instr->src.u.imm.data, mem.size);
3351 break;
3352 default:
3353 DISASSEMBLER_BUG();
3354 }
3355 } else if (instr->emul->readreg) {
3356 /* The instruction was "mem -> reg", but the value of the
3357 * register matters for the emul func. Fetch it in membuf. */
3358 if (__predict_false(instr->dst.type != STORE_REG)) {
3359 DISASSEMBLER_BUG();
3360 }
3361 if (__predict_false(instr->dst.disp.type != DISP_NONE)) {
3362 DISASSEMBLER_BUG();
3363 }
3364 val = state->gprs[instr->dst.u.reg->num];
3365 val = __SHIFTOUT(val, instr->dst.u.reg->mask);
3366 memcpy(mem.data, &val, mem.size);
3367 }
3368
3369 (*instr->emul->func)(vcpu, &mem, state->gprs);
3370
3371 if (instr->emul->notouch) {
3372 /* We're done. */
3373 return 0;
3374 }
3375
3376 if (!mem.write) {
3377 /* The instruction was "mem -> reg". The emul func has filled
3378 * membuf with the memory content. Install membuf in the
3379 * register. */
3380 if (__predict_false(instr->dst.type != STORE_REG)) {
3381 DISASSEMBLER_BUG();
3382 }
3383 if (__predict_false(instr->dst.disp.type != DISP_NONE)) {
3384 DISASSEMBLER_BUG();
3385 }
3386 memcpy(&val, membuf, sizeof(uint64_t));
3387 val = __SHIFTIN(val, instr->dst.u.reg->mask);
3388 state->gprs[instr->dst.u.reg->num] &= ~instr->dst.u.reg->mask;
3389 state->gprs[instr->dst.u.reg->num] |= val;
3390 state->gprs[instr->dst.u.reg->num] &= ~instr->zeroextend_mask;
3391 } else if (instr->emul->backprop) {
3392 /* The instruction was "reg -> mem", but the memory must be
3393 * back-propagated to the register. Install membuf in the
3394 * register. */
3395 if (__predict_false(instr->src.type != STORE_REG)) {
3396 DISASSEMBLER_BUG();
3397 }
3398 if (__predict_false(instr->src.disp.type != DISP_NONE)) {
3399 DISASSEMBLER_BUG();
3400 }
3401 memcpy(&val, membuf, sizeof(uint64_t));
3402 val = __SHIFTIN(val, instr->src.u.reg->mask);
3403 state->gprs[instr->src.u.reg->num] &= ~instr->src.u.reg->mask;
3404 state->gprs[instr->src.u.reg->num] |= val;
3405 state->gprs[instr->src.u.reg->num] &= ~instr->zeroextend_mask;
3406 }
3407
3408 return 0;
3409 }
3410
3411 int
nvmm_assist_mem(struct nvmm_machine * mach,struct nvmm_vcpu * vcpu)3412 nvmm_assist_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
3413 {
3414 struct nvmm_x64_state *state = vcpu->state;
3415 struct nvmm_vcpu_exit *exit = vcpu->exit;
3416 struct x86_instr instr;
3417 uint64_t cnt = 0; /* GCC */
3418 int ret;
3419
3420 if (__predict_false(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
3421 errno = EINVAL;
3422 return -1;
3423 }
3424
3425 ret = nvmm_vcpu_getstate(mach, vcpu,
3426 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
3427 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
3428 if (ret == -1)
3429 return -1;
3430
3431 if (exit->u.mem.inst_len == 0) {
3432 /*
3433 * The instruction was not fetched from the kernel. Fetch
3434 * it ourselves.
3435 */
3436 ret = fetch_instruction(mach, vcpu, exit);
3437 if (ret == -1)
3438 return -1;
3439 }
3440
3441 ret = x86_decode(exit->u.mem.inst_bytes, exit->u.mem.inst_len,
3442 &instr, state);
3443 if (ret == -1) {
3444 errno = ENODEV;
3445 return -1;
3446 }
3447
3448 if (instr.legpref.rep || instr.legpref.repn) {
3449 cnt = rep_get_cnt(state, instr.address_size);
3450 if (__predict_false(cnt == 0)) {
3451 state->gprs[NVMM_X64_GPR_RIP] += instr.len;
3452 goto out;
3453 }
3454 }
3455
3456 if (instr.opcode->movs) {
3457 ret = assist_mem_movs(mach, vcpu, &instr);
3458 } else if (instr.opcode->cmps) {
3459 instr.legpref.repe = !instr.legpref.repn;
3460 ret = assist_mem_cmps(mach, vcpu, &instr);
3461 } else {
3462 ret = assist_mem_single(mach, vcpu, &instr);
3463 }
3464 if (ret == -1) {
3465 errno = ENODEV;
3466 return -1;
3467 }
3468
3469 if (instr.legpref.rep || instr.legpref.repn) {
3470 cnt -= 1;
3471 rep_set_cnt(state, instr.address_size, cnt);
3472 if (cnt == 0) {
3473 state->gprs[NVMM_X64_GPR_RIP] += instr.len;
3474 } else if (__predict_false(instr.legpref.repn)) {
3475 /* repn */
3476 if (state->gprs[NVMM_X64_GPR_RFLAGS] & PSL_Z) {
3477 state->gprs[NVMM_X64_GPR_RIP] += instr.len;
3478 }
3479 } else if (__predict_false(instr.legpref.repe)) {
3480 /* repe */
3481 if ((state->gprs[NVMM_X64_GPR_RFLAGS] & PSL_Z) == 0) {
3482 state->gprs[NVMM_X64_GPR_RIP] += instr.len;
3483 }
3484 }
3485 } else {
3486 state->gprs[NVMM_X64_GPR_RIP] += instr.len;
3487 }
3488
3489 out:
3490 ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
3491 if (ret == -1)
3492 return -1;
3493
3494 return 0;
3495 }
3496