xref: /qemu/target/i386/hvf/x86.h (revision ab9056ff)
1 /*
2  * Copyright (C) 2016 Veertu Inc,
3  * Copyright (C) 2017 Veertu Inc,
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef HVF_X86_H
20 #define HVF_X86_H
21 
22 typedef struct x86_register {
23     union {
24         struct {
25             uint64_t rrx;               /* full 64 bit */
26         };
27         struct {
28             uint32_t erx;               /* low 32 bit part */
29             uint32_t hi32_unused1;
30         };
31         struct {
32             uint16_t rx;                /* low 16 bit part */
33             uint16_t hi16_unused1;
34             uint32_t hi32_unused2;
35         };
36         struct {
37             uint8_t lx;                 /* low 8 bit part */
38             uint8_t hx;                 /* high 8 bit */
39             uint16_t hi16_unused2;
40             uint32_t hi32_unused3;
41         };
42     };
43 } __attribute__ ((__packed__)) x86_register;
44 
45 typedef enum x86_rflags {
46     RFLAGS_CF       = (1L << 0),
47     RFLAGS_PF       = (1L << 2),
48     RFLAGS_AF       = (1L << 4),
49     RFLAGS_ZF       = (1L << 6),
50     RFLAGS_SF       = (1L << 7),
51     RFLAGS_TF       = (1L << 8),
52     RFLAGS_IF       = (1L << 9),
53     RFLAGS_DF       = (1L << 10),
54     RFLAGS_OF       = (1L << 11),
55     RFLAGS_IOPL     = (3L << 12),
56     RFLAGS_NT       = (1L << 14),
57     RFLAGS_RF       = (1L << 16),
58     RFLAGS_VM       = (1L << 17),
59     RFLAGS_AC       = (1L << 18),
60     RFLAGS_VIF      = (1L << 19),
61     RFLAGS_VIP      = (1L << 20),
62     RFLAGS_ID       = (1L << 21),
63 } x86_rflags;
64 
65 /* rflags register */
66 typedef struct x86_reg_flags {
67     union {
68         struct {
69             uint64_t rflags;
70         };
71         struct {
72             uint32_t eflags;
73             uint32_t hi32_unused1;
74         };
75         struct {
76             uint32_t cf:1;
77             uint32_t unused1:1;
78             uint32_t pf:1;
79             uint32_t unused2:1;
80             uint32_t af:1;
81             uint32_t unused3:1;
82             uint32_t zf:1;
83             uint32_t sf:1;
84             uint32_t tf:1;
85             uint32_t ief:1;
86             uint32_t df:1;
87             uint32_t of:1;
88             uint32_t iopl:2;
89             uint32_t nt:1;
90             uint32_t unused4:1;
91             uint32_t rf:1;
92             uint32_t vm:1;
93             uint32_t ac:1;
94             uint32_t vif:1;
95             uint32_t vip:1;
96             uint32_t id:1;
97             uint32_t unused5:10;
98             uint32_t hi32_unused2;
99         };
100     };
101 } __attribute__ ((__packed__)) x86_reg_flags;
102 
103 typedef enum x86_reg_cr0 {
104     CR0_PE =            (1L << 0),
105     CR0_MP =            (1L << 1),
106     CR0_EM =            (1L << 2),
107     CR0_TS =            (1L << 3),
108     CR0_ET =            (1L << 4),
109     CR0_NE =            (1L << 5),
110     CR0_WP =            (1L << 16),
111     CR0_AM =            (1L << 18),
112     CR0_NW =            (1L << 29),
113     CR0_CD =            (1L << 30),
114     CR0_PG =            (1L << 31),
115 } x86_reg_cr0;
116 
117 typedef enum x86_reg_cr4 {
118     CR4_VME =            (1L << 0),
119     CR4_PVI =            (1L << 1),
120     CR4_TSD =            (1L << 2),
121     CR4_DE  =            (1L << 3),
122     CR4_PSE =            (1L << 4),
123     CR4_PAE =            (1L << 5),
124     CR4_MSE =            (1L << 6),
125     CR4_PGE =            (1L << 7),
126     CR4_PCE =            (1L << 8),
127     CR4_OSFXSR =         (1L << 9),
128     CR4_OSXMMEXCPT =     (1L << 10),
129     CR4_VMXE =           (1L << 13),
130     CR4_SMXE =           (1L << 14),
131     CR4_FSGSBASE =       (1L << 16),
132     CR4_PCIDE =          (1L << 17),
133     CR4_OSXSAVE =        (1L << 18),
134     CR4_SMEP =           (1L << 20),
135 } x86_reg_cr4;
136 
137 /* 16 bit Task State Segment */
138 typedef struct x86_tss_segment16 {
139     uint16_t link;
140     uint16_t sp0;
141     uint16_t ss0;
142     uint32_t sp1;
143     uint16_t ss1;
144     uint32_t sp2;
145     uint16_t ss2;
146     uint16_t ip;
147     uint16_t flags;
148     uint16_t ax;
149     uint16_t cx;
150     uint16_t dx;
151     uint16_t bx;
152     uint16_t sp;
153     uint16_t bp;
154     uint16_t si;
155     uint16_t di;
156     uint16_t es;
157     uint16_t cs;
158     uint16_t ss;
159     uint16_t ds;
160     uint16_t ldtr;
161 } __attribute__((packed)) x86_tss_segment16;
162 
163 /* 32 bit Task State Segment */
164 typedef struct x86_tss_segment32 {
165     uint32_t prev_tss;
166     uint32_t esp0;
167     uint32_t ss0;
168     uint32_t esp1;
169     uint32_t ss1;
170     uint32_t esp2;
171     uint32_t ss2;
172     uint32_t cr3;
173     uint32_t eip;
174     uint32_t eflags;
175     uint32_t eax;
176     uint32_t ecx;
177     uint32_t edx;
178     uint32_t ebx;
179     uint32_t esp;
180     uint32_t ebp;
181     uint32_t esi;
182     uint32_t edi;
183     uint32_t es;
184     uint32_t cs;
185     uint32_t ss;
186     uint32_t ds;
187     uint32_t fs;
188     uint32_t gs;
189     uint32_t ldt;
190     uint16_t trap;
191     uint16_t iomap_base;
192 } __attribute__ ((__packed__)) x86_tss_segment32;
193 
194 /* 64 bit Task State Segment */
195 typedef struct x86_tss_segment64 {
196     uint32_t unused;
197     uint64_t rsp0;
198     uint64_t rsp1;
199     uint64_t rsp2;
200     uint64_t unused1;
201     uint64_t ist1;
202     uint64_t ist2;
203     uint64_t ist3;
204     uint64_t ist4;
205     uint64_t ist5;
206     uint64_t ist6;
207     uint64_t ist7;
208     uint64_t unused2;
209     uint16_t unused3;
210     uint16_t iomap_base;
211 } __attribute__ ((__packed__)) x86_tss_segment64;
212 
213 /* segment descriptors */
214 typedef struct x86_segment_descriptor {
215     uint64_t    limit0:16;
216     uint64_t    base0:16;
217     uint64_t    base1:8;
218     uint64_t    type:4;
219     uint64_t    s:1;
220     uint64_t    dpl:2;
221     uint64_t    p:1;
222     uint64_t    limit1:4;
223     uint64_t    avl:1;
224     uint64_t    l:1;
225     uint64_t    db:1;
226     uint64_t    g:1;
227     uint64_t    base2:8;
228 } __attribute__ ((__packed__)) x86_segment_descriptor;
229 
230 static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
231 {
232     return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
233 }
234 
235 static inline void x86_set_segment_base(x86_segment_descriptor *desc,
236                                         uint32_t base)
237 {
238     desc->base2 = base >> 24;
239     desc->base1 = (base >> 16) & 0xff;
240     desc->base0 = base & 0xffff;
241 }
242 
243 static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
244 {
245     uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
246     if (desc->g) {
247         return (limit << 12) | 0xfff;
248     }
249     return limit;
250 }
251 
252 static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
253                                          uint32_t limit)
254 {
255     desc->limit0 = limit & 0xffff;
256     desc->limit1 = limit >> 16;
257 }
258 
259 typedef struct x86_call_gate {
260     uint64_t offset0:16;
261     uint64_t selector:16;
262     uint64_t param_count:4;
263     uint64_t reserved:3;
264     uint64_t type:4;
265     uint64_t dpl:1;
266     uint64_t p:1;
267     uint64_t offset1:16;
268 } __attribute__ ((__packed__)) x86_call_gate;
269 
270 static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
271 {
272     return (uint32_t)((gate->offset1 << 16) | gate->offset0);
273 }
274 
275 #define LDT_SEL     0
276 #define GDT_SEL     1
277 
278 typedef struct x68_segment_selector {
279     union {
280         uint16_t sel;
281         struct {
282             uint16_t rpl:3;
283             uint16_t ti:1;
284             uint16_t index:12;
285         };
286     };
287 } __attribute__ ((__packed__)) x68_segment_selector;
288 
289 typedef struct lazy_flags {
290     target_ulong result;
291     target_ulong auxbits;
292 } lazy_flags;
293 
294 /* Definition of hvf_x86_state is here */
295 struct HVFX86EmulatorState {
296     int interruptable;
297     uint64_t fetch_rip;
298     uint64_t rip;
299     struct x86_register regs[16];
300     struct x86_reg_flags   rflags;
301     struct lazy_flags   lflags;
302     uint8_t mmio_buf[4096];
303 };
304 
305 /* useful register access  macros */
306 #define RIP(cpu)    (cpu->hvf_emul->rip)
307 #define EIP(cpu)    ((uint32_t)cpu->hvf_emul->rip)
308 #define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)
309 #define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)
310 
311 #define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)
312 #define RAX(cpu)        RRX(cpu, R_EAX)
313 #define RCX(cpu)        RRX(cpu, R_ECX)
314 #define RDX(cpu)        RRX(cpu, R_EDX)
315 #define RBX(cpu)        RRX(cpu, R_EBX)
316 #define RSP(cpu)        RRX(cpu, R_ESP)
317 #define RBP(cpu)        RRX(cpu, R_EBP)
318 #define RSI(cpu)        RRX(cpu, R_ESI)
319 #define RDI(cpu)        RRX(cpu, R_EDI)
320 #define R8(cpu)         RRX(cpu, R_R8)
321 #define R9(cpu)         RRX(cpu, R_R9)
322 #define R10(cpu)        RRX(cpu, R_R10)
323 #define R11(cpu)        RRX(cpu, R_R11)
324 #define R12(cpu)        RRX(cpu, R_R12)
325 #define R13(cpu)        RRX(cpu, R_R13)
326 #define R14(cpu)        RRX(cpu, R_R14)
327 #define R15(cpu)        RRX(cpu, R_R15)
328 
329 #define ERX(cpu, reg)   (cpu->hvf_emul->regs[reg].erx)
330 #define EAX(cpu)        ERX(cpu, R_EAX)
331 #define ECX(cpu)        ERX(cpu, R_ECX)
332 #define EDX(cpu)        ERX(cpu, R_EDX)
333 #define EBX(cpu)        ERX(cpu, R_EBX)
334 #define ESP(cpu)        ERX(cpu, R_ESP)
335 #define EBP(cpu)        ERX(cpu, R_EBP)
336 #define ESI(cpu)        ERX(cpu, R_ESI)
337 #define EDI(cpu)        ERX(cpu, R_EDI)
338 
339 #define RX(cpu, reg)   (cpu->hvf_emul->regs[reg].rx)
340 #define AX(cpu)        RX(cpu, R_EAX)
341 #define CX(cpu)        RX(cpu, R_ECX)
342 #define DX(cpu)        RX(cpu, R_EDX)
343 #define BP(cpu)        RX(cpu, R_EBP)
344 #define SP(cpu)        RX(cpu, R_ESP)
345 #define BX(cpu)        RX(cpu, R_EBX)
346 #define SI(cpu)        RX(cpu, R_ESI)
347 #define DI(cpu)        RX(cpu, R_EDI)
348 
349 #define RL(cpu, reg)   (cpu->hvf_emul->regs[reg].lx)
350 #define AL(cpu)        RL(cpu, R_EAX)
351 #define CL(cpu)        RL(cpu, R_ECX)
352 #define DL(cpu)        RL(cpu, R_EDX)
353 #define BL(cpu)        RL(cpu, R_EBX)
354 
355 #define RH(cpu, reg)   (cpu->hvf_emul->regs[reg].hx)
356 #define AH(cpu)        RH(cpu, R_EAX)
357 #define CH(cpu)        RH(cpu, R_ECX)
358 #define DH(cpu)        RH(cpu, R_EDX)
359 #define BH(cpu)        RH(cpu, R_EBX)
360 
361 /* deal with GDT/LDT descriptors in memory */
362 bool x86_read_segment_descriptor(struct CPUState *cpu,
363                                  struct x86_segment_descriptor *desc,
364                                  x68_segment_selector sel);
365 bool x86_write_segment_descriptor(struct CPUState *cpu,
366                                   struct x86_segment_descriptor *desc,
367                                   x68_segment_selector sel);
368 
369 bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
370                         int gate);
371 
372 /* helpers */
373 bool x86_is_protected(struct CPUState *cpu);
374 bool x86_is_real(struct CPUState *cpu);
375 bool x86_is_v8086(struct CPUState *cpu);
376 bool x86_is_long_mode(struct CPUState *cpu);
377 bool x86_is_long64_mode(struct CPUState *cpu);
378 bool x86_is_paging_mode(struct CPUState *cpu);
379 bool x86_is_pae_enabled(struct CPUState *cpu);
380 
381 enum X86Seg;
382 target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg);
383 target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
384                               enum X86Seg seg);
385 target_ulong linear_rip(struct CPUState *cpu, target_ulong rip);
386 
387 static inline uint64_t rdtscp(void)
388 {
389     uint64_t tsc;
390     __asm__ __volatile__("rdtscp; "         /* serializing read of tsc */
391                          "shl $32,%%rdx; "  /* shift higher 32 bits stored in rdx up */
392                          "or %%rdx,%%rax"   /* and or onto rax */
393                          : "=a"(tsc)        /* output to tsc variable */
394                          :
395                          : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
396 
397     return tsc;
398 }
399 
400 #endif
401