xref: /qemu/target/i386/hvf/x86.h (revision abff1abf)
1 /*
2  * Copyright (C) 2016 Veertu Inc,
3  * Copyright (C) 2017 Veertu Inc,
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef HVF_X86_H
20 #define HVF_X86_H
21 
22 typedef struct x86_register {
23     union {
24         struct {
25             uint64_t rrx;               /* full 64 bit */
26         };
27         struct {
28             uint32_t erx;               /* low 32 bit part */
29             uint32_t hi32_unused1;
30         };
31         struct {
32             uint16_t rx;                /* low 16 bit part */
33             uint16_t hi16_unused1;
34             uint32_t hi32_unused2;
35         };
36         struct {
37             uint8_t lx;                 /* low 8 bit part */
38             uint8_t hx;                 /* high 8 bit */
39             uint16_t hi16_unused2;
40             uint32_t hi32_unused3;
41         };
42     };
43 } __attribute__ ((__packed__)) x86_register;
44 
45 typedef enum x86_reg_cr0 {
46     CR0_PE =            (1L << 0),
47     CR0_MP =            (1L << 1),
48     CR0_EM =            (1L << 2),
49     CR0_TS =            (1L << 3),
50     CR0_ET =            (1L << 4),
51     CR0_NE =            (1L << 5),
52     CR0_WP =            (1L << 16),
53     CR0_AM =            (1L << 18),
54     CR0_NW =            (1L << 29),
55     CR0_CD =            (1L << 30),
56     CR0_PG =            (1L << 31),
57 } x86_reg_cr0;
58 
59 typedef enum x86_reg_cr4 {
60     CR4_VME =            (1L << 0),
61     CR4_PVI =            (1L << 1),
62     CR4_TSD =            (1L << 2),
63     CR4_DE  =            (1L << 3),
64     CR4_PSE =            (1L << 4),
65     CR4_PAE =            (1L << 5),
66     CR4_MSE =            (1L << 6),
67     CR4_PGE =            (1L << 7),
68     CR4_PCE =            (1L << 8),
69     CR4_OSFXSR =         (1L << 9),
70     CR4_OSXMMEXCPT =     (1L << 10),
71     CR4_VMXE =           (1L << 13),
72     CR4_SMXE =           (1L << 14),
73     CR4_FSGSBASE =       (1L << 16),
74     CR4_PCIDE =          (1L << 17),
75     CR4_OSXSAVE =        (1L << 18),
76     CR4_SMEP =           (1L << 20),
77 } x86_reg_cr4;
78 
79 /* 16 bit Task State Segment */
80 typedef struct x86_tss_segment16 {
81     uint16_t link;
82     uint16_t sp0;
83     uint16_t ss0;
84     uint32_t sp1;
85     uint16_t ss1;
86     uint32_t sp2;
87     uint16_t ss2;
88     uint16_t ip;
89     uint16_t flags;
90     uint16_t ax;
91     uint16_t cx;
92     uint16_t dx;
93     uint16_t bx;
94     uint16_t sp;
95     uint16_t bp;
96     uint16_t si;
97     uint16_t di;
98     uint16_t es;
99     uint16_t cs;
100     uint16_t ss;
101     uint16_t ds;
102     uint16_t ldtr;
103 } __attribute__((packed)) x86_tss_segment16;
104 
105 /* 32 bit Task State Segment */
106 typedef struct x86_tss_segment32 {
107     uint32_t prev_tss;
108     uint32_t esp0;
109     uint32_t ss0;
110     uint32_t esp1;
111     uint32_t ss1;
112     uint32_t esp2;
113     uint32_t ss2;
114     uint32_t cr3;
115     uint32_t eip;
116     uint32_t eflags;
117     uint32_t eax;
118     uint32_t ecx;
119     uint32_t edx;
120     uint32_t ebx;
121     uint32_t esp;
122     uint32_t ebp;
123     uint32_t esi;
124     uint32_t edi;
125     uint32_t es;
126     uint32_t cs;
127     uint32_t ss;
128     uint32_t ds;
129     uint32_t fs;
130     uint32_t gs;
131     uint32_t ldt;
132     uint16_t trap;
133     uint16_t iomap_base;
134 } __attribute__ ((__packed__)) x86_tss_segment32;
135 
136 /* 64 bit Task State Segment */
137 typedef struct x86_tss_segment64 {
138     uint32_t unused;
139     uint64_t rsp0;
140     uint64_t rsp1;
141     uint64_t rsp2;
142     uint64_t unused1;
143     uint64_t ist1;
144     uint64_t ist2;
145     uint64_t ist3;
146     uint64_t ist4;
147     uint64_t ist5;
148     uint64_t ist6;
149     uint64_t ist7;
150     uint64_t unused2;
151     uint16_t unused3;
152     uint16_t iomap_base;
153 } __attribute__ ((__packed__)) x86_tss_segment64;
154 
155 /* segment descriptors */
156 typedef struct x86_segment_descriptor {
157     uint64_t    limit0:16;
158     uint64_t    base0:16;
159     uint64_t    base1:8;
160     uint64_t    type:4;
161     uint64_t    s:1;
162     uint64_t    dpl:2;
163     uint64_t    p:1;
164     uint64_t    limit1:4;
165     uint64_t    avl:1;
166     uint64_t    l:1;
167     uint64_t    db:1;
168     uint64_t    g:1;
169     uint64_t    base2:8;
170 } __attribute__ ((__packed__)) x86_segment_descriptor;
171 
172 static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
173 {
174     return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
175 }
176 
177 static inline void x86_set_segment_base(x86_segment_descriptor *desc,
178                                         uint32_t base)
179 {
180     desc->base2 = base >> 24;
181     desc->base1 = (base >> 16) & 0xff;
182     desc->base0 = base & 0xffff;
183 }
184 
185 static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
186 {
187     uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
188     if (desc->g) {
189         return (limit << 12) | 0xfff;
190     }
191     return limit;
192 }
193 
194 static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
195                                          uint32_t limit)
196 {
197     desc->limit0 = limit & 0xffff;
198     desc->limit1 = limit >> 16;
199 }
200 
201 typedef struct x86_call_gate {
202     uint64_t offset0:16;
203     uint64_t selector:16;
204     uint64_t param_count:4;
205     uint64_t reserved:3;
206     uint64_t type:4;
207     uint64_t dpl:1;
208     uint64_t p:1;
209     uint64_t offset1:16;
210 } __attribute__ ((__packed__)) x86_call_gate;
211 
212 static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
213 {
214     return (uint32_t)((gate->offset1 << 16) | gate->offset0);
215 }
216 
217 #define LDT_SEL     0
218 #define GDT_SEL     1
219 
220 typedef struct x68_segment_selector {
221     union {
222         uint16_t sel;
223         struct {
224             uint16_t rpl:3;
225             uint16_t ti:1;
226             uint16_t index:12;
227         };
228     };
229 } __attribute__ ((__packed__)) x68_segment_selector;
230 
231 /* useful register access  macros */
232 #define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
233 
234 #define RRX(cpu, reg)   (x86_reg(cpu, reg)->rrx)
235 #define RAX(cpu)        RRX(cpu, R_EAX)
236 #define RCX(cpu)        RRX(cpu, R_ECX)
237 #define RDX(cpu)        RRX(cpu, R_EDX)
238 #define RBX(cpu)        RRX(cpu, R_EBX)
239 #define RSP(cpu)        RRX(cpu, R_ESP)
240 #define RBP(cpu)        RRX(cpu, R_EBP)
241 #define RSI(cpu)        RRX(cpu, R_ESI)
242 #define RDI(cpu)        RRX(cpu, R_EDI)
243 #define R8(cpu)         RRX(cpu, R_R8)
244 #define R9(cpu)         RRX(cpu, R_R9)
245 #define R10(cpu)        RRX(cpu, R_R10)
246 #define R11(cpu)        RRX(cpu, R_R11)
247 #define R12(cpu)        RRX(cpu, R_R12)
248 #define R13(cpu)        RRX(cpu, R_R13)
249 #define R14(cpu)        RRX(cpu, R_R14)
250 #define R15(cpu)        RRX(cpu, R_R15)
251 
252 #define ERX(cpu, reg)   (x86_reg(cpu, reg)->erx)
253 #define EAX(cpu)        ERX(cpu, R_EAX)
254 #define ECX(cpu)        ERX(cpu, R_ECX)
255 #define EDX(cpu)        ERX(cpu, R_EDX)
256 #define EBX(cpu)        ERX(cpu, R_EBX)
257 #define ESP(cpu)        ERX(cpu, R_ESP)
258 #define EBP(cpu)        ERX(cpu, R_EBP)
259 #define ESI(cpu)        ERX(cpu, R_ESI)
260 #define EDI(cpu)        ERX(cpu, R_EDI)
261 
262 #define RX(cpu, reg)   (x86_reg(cpu, reg)->rx)
263 #define AX(cpu)        RX(cpu, R_EAX)
264 #define CX(cpu)        RX(cpu, R_ECX)
265 #define DX(cpu)        RX(cpu, R_EDX)
266 #define BP(cpu)        RX(cpu, R_EBP)
267 #define SP(cpu)        RX(cpu, R_ESP)
268 #define BX(cpu)        RX(cpu, R_EBX)
269 #define SI(cpu)        RX(cpu, R_ESI)
270 #define DI(cpu)        RX(cpu, R_EDI)
271 
272 #define RL(cpu, reg)   (x86_reg(cpu, reg)->lx)
273 #define AL(cpu)        RL(cpu, R_EAX)
274 #define CL(cpu)        RL(cpu, R_ECX)
275 #define DL(cpu)        RL(cpu, R_EDX)
276 #define BL(cpu)        RL(cpu, R_EBX)
277 
278 #define RH(cpu, reg)   (x86_reg(cpu, reg)->hx)
279 #define AH(cpu)        RH(cpu, R_EAX)
280 #define CH(cpu)        RH(cpu, R_ECX)
281 #define DH(cpu)        RH(cpu, R_EDX)
282 #define BH(cpu)        RH(cpu, R_EBX)
283 
284 /* deal with GDT/LDT descriptors in memory */
285 bool x86_read_segment_descriptor(struct CPUState *cpu,
286                                  struct x86_segment_descriptor *desc,
287                                  x68_segment_selector sel);
288 bool x86_write_segment_descriptor(struct CPUState *cpu,
289                                   struct x86_segment_descriptor *desc,
290                                   x68_segment_selector sel);
291 
292 bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
293                         int gate);
294 
295 /* helpers */
296 bool x86_is_protected(struct CPUState *cpu);
297 bool x86_is_real(struct CPUState *cpu);
298 bool x86_is_v8086(struct CPUState *cpu);
299 bool x86_is_long_mode(struct CPUState *cpu);
300 bool x86_is_long64_mode(struct CPUState *cpu);
301 bool x86_is_paging_mode(struct CPUState *cpu);
302 bool x86_is_pae_enabled(struct CPUState *cpu);
303 
304 enum X86Seg;
305 target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg);
306 target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
307                               enum X86Seg seg);
308 target_ulong linear_rip(struct CPUState *cpu, target_ulong rip);
309 
310 static inline uint64_t rdtscp(void)
311 {
312     uint64_t tsc;
313     __asm__ __volatile__("rdtscp; "         /* serializing read of tsc */
314                          "shl $32,%%rdx; "  /* shift higher 32 bits stored in rdx up */
315                          "or %%rdx,%%rax"   /* and or onto rax */
316                          : "=a"(tsc)        /* output to tsc variable */
317                          :
318                          : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
319 
320     return tsc;
321 }
322 
323 #endif
324