xref: /qemu/target/i386/hvf/x86.h (revision b2a3cbb8)
1 /*
2  * Copyright (C) 2016 Veertu Inc,
3  * Copyright (C) 2017 Veertu Inc,
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2.1 of the License, or (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef HVF_X86_H
20 #define HVF_X86_H
21 
22 typedef struct x86_register {
23     union {
24         struct {
25             uint64_t rrx;               /* full 64 bit */
26         };
27         struct {
28             uint32_t erx;               /* low 32 bit part */
29             uint32_t hi32_unused1;
30         };
31         struct {
32             uint16_t rx;                /* low 16 bit part */
33             uint16_t hi16_unused1;
34             uint32_t hi32_unused2;
35         };
36         struct {
37             uint8_t lx;                 /* low 8 bit part */
38             uint8_t hx;                 /* high 8 bit */
39             uint16_t hi16_unused2;
40             uint32_t hi32_unused3;
41         };
42     };
43 } __attribute__ ((__packed__)) x86_register;
44 
45 /* 16 bit Task State Segment */
46 typedef struct x86_tss_segment16 {
47     uint16_t link;
48     uint16_t sp0;
49     uint16_t ss0;
50     uint32_t sp1;
51     uint16_t ss1;
52     uint32_t sp2;
53     uint16_t ss2;
54     uint16_t ip;
55     uint16_t flags;
56     uint16_t ax;
57     uint16_t cx;
58     uint16_t dx;
59     uint16_t bx;
60     uint16_t sp;
61     uint16_t bp;
62     uint16_t si;
63     uint16_t di;
64     uint16_t es;
65     uint16_t cs;
66     uint16_t ss;
67     uint16_t ds;
68     uint16_t ldtr;
69 } __attribute__((packed)) x86_tss_segment16;
70 
71 /* 32 bit Task State Segment */
72 typedef struct x86_tss_segment32 {
73     uint32_t prev_tss;
74     uint32_t esp0;
75     uint32_t ss0;
76     uint32_t esp1;
77     uint32_t ss1;
78     uint32_t esp2;
79     uint32_t ss2;
80     uint32_t cr3;
81     uint32_t eip;
82     uint32_t eflags;
83     uint32_t eax;
84     uint32_t ecx;
85     uint32_t edx;
86     uint32_t ebx;
87     uint32_t esp;
88     uint32_t ebp;
89     uint32_t esi;
90     uint32_t edi;
91     uint32_t es;
92     uint32_t cs;
93     uint32_t ss;
94     uint32_t ds;
95     uint32_t fs;
96     uint32_t gs;
97     uint32_t ldt;
98     uint16_t trap;
99     uint16_t iomap_base;
100 } __attribute__ ((__packed__)) x86_tss_segment32;
101 
102 /* 64 bit Task State Segment */
103 typedef struct x86_tss_segment64 {
104     uint32_t unused;
105     uint64_t rsp0;
106     uint64_t rsp1;
107     uint64_t rsp2;
108     uint64_t unused1;
109     uint64_t ist1;
110     uint64_t ist2;
111     uint64_t ist3;
112     uint64_t ist4;
113     uint64_t ist5;
114     uint64_t ist6;
115     uint64_t ist7;
116     uint64_t unused2;
117     uint16_t unused3;
118     uint16_t iomap_base;
119 } __attribute__ ((__packed__)) x86_tss_segment64;
120 
121 /* segment descriptors */
122 typedef struct x86_segment_descriptor {
123     uint64_t    limit0:16;
124     uint64_t    base0:16;
125     uint64_t    base1:8;
126     uint64_t    type:4;
127     uint64_t    s:1;
128     uint64_t    dpl:2;
129     uint64_t    p:1;
130     uint64_t    limit1:4;
131     uint64_t    avl:1;
132     uint64_t    l:1;
133     uint64_t    db:1;
134     uint64_t    g:1;
135     uint64_t    base2:8;
136 } __attribute__ ((__packed__)) x86_segment_descriptor;
137 
138 static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
139 {
140     return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
141 }
142 
143 static inline void x86_set_segment_base(x86_segment_descriptor *desc,
144                                         uint32_t base)
145 {
146     desc->base2 = base >> 24;
147     desc->base1 = (base >> 16) & 0xff;
148     desc->base0 = base & 0xffff;
149 }
150 
151 static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
152 {
153     uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
154     if (desc->g) {
155         return (limit << 12) | 0xfff;
156     }
157     return limit;
158 }
159 
160 static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
161                                          uint32_t limit)
162 {
163     desc->limit0 = limit & 0xffff;
164     desc->limit1 = limit >> 16;
165 }
166 
167 typedef struct x86_call_gate {
168     uint64_t offset0:16;
169     uint64_t selector:16;
170     uint64_t param_count:4;
171     uint64_t reserved:3;
172     uint64_t type:4;
173     uint64_t dpl:1;
174     uint64_t p:1;
175     uint64_t offset1:16;
176 } __attribute__ ((__packed__)) x86_call_gate;
177 
178 static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
179 {
180     return (uint32_t)((gate->offset1 << 16) | gate->offset0);
181 }
182 
183 #define GDT_SEL     0
184 #define LDT_SEL     1
185 
186 typedef struct x68_segment_selector {
187     union {
188         uint16_t sel;
189         struct {
190             uint16_t rpl:2;
191             uint16_t ti:1;
192             uint16_t index:13;
193         };
194     };
195 } __attribute__ ((__packed__)) x68_segment_selector;
196 
197 /* useful register access  macros */
198 #define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
199 
200 #define RRX(cpu, reg)   (x86_reg(cpu, reg)->rrx)
201 #define RAX(cpu)        RRX(cpu, R_EAX)
202 #define RCX(cpu)        RRX(cpu, R_ECX)
203 #define RDX(cpu)        RRX(cpu, R_EDX)
204 #define RBX(cpu)        RRX(cpu, R_EBX)
205 #define RSP(cpu)        RRX(cpu, R_ESP)
206 #define RBP(cpu)        RRX(cpu, R_EBP)
207 #define RSI(cpu)        RRX(cpu, R_ESI)
208 #define RDI(cpu)        RRX(cpu, R_EDI)
209 #define R8(cpu)         RRX(cpu, R_R8)
210 #define R9(cpu)         RRX(cpu, R_R9)
211 #define R10(cpu)        RRX(cpu, R_R10)
212 #define R11(cpu)        RRX(cpu, R_R11)
213 #define R12(cpu)        RRX(cpu, R_R12)
214 #define R13(cpu)        RRX(cpu, R_R13)
215 #define R14(cpu)        RRX(cpu, R_R14)
216 #define R15(cpu)        RRX(cpu, R_R15)
217 
218 #define ERX(cpu, reg)   (x86_reg(cpu, reg)->erx)
219 #define EAX(cpu)        ERX(cpu, R_EAX)
220 #define ECX(cpu)        ERX(cpu, R_ECX)
221 #define EDX(cpu)        ERX(cpu, R_EDX)
222 #define EBX(cpu)        ERX(cpu, R_EBX)
223 #define ESP(cpu)        ERX(cpu, R_ESP)
224 #define EBP(cpu)        ERX(cpu, R_EBP)
225 #define ESI(cpu)        ERX(cpu, R_ESI)
226 #define EDI(cpu)        ERX(cpu, R_EDI)
227 
228 #define RX(cpu, reg)   (x86_reg(cpu, reg)->rx)
229 #define AX(cpu)        RX(cpu, R_EAX)
230 #define CX(cpu)        RX(cpu, R_ECX)
231 #define DX(cpu)        RX(cpu, R_EDX)
232 #define BP(cpu)        RX(cpu, R_EBP)
233 #define SP(cpu)        RX(cpu, R_ESP)
234 #define BX(cpu)        RX(cpu, R_EBX)
235 #define SI(cpu)        RX(cpu, R_ESI)
236 #define DI(cpu)        RX(cpu, R_EDI)
237 
238 #define RL(cpu, reg)   (x86_reg(cpu, reg)->lx)
239 #define AL(cpu)        RL(cpu, R_EAX)
240 #define CL(cpu)        RL(cpu, R_ECX)
241 #define DL(cpu)        RL(cpu, R_EDX)
242 #define BL(cpu)        RL(cpu, R_EBX)
243 
244 #define RH(cpu, reg)   (x86_reg(cpu, reg)->hx)
245 #define AH(cpu)        RH(cpu, R_EAX)
246 #define CH(cpu)        RH(cpu, R_ECX)
247 #define DH(cpu)        RH(cpu, R_EDX)
248 #define BH(cpu)        RH(cpu, R_EBX)
249 
250 /* deal with GDT/LDT descriptors in memory */
251 bool x86_read_segment_descriptor(struct CPUState *cpu,
252                                  struct x86_segment_descriptor *desc,
253                                  x68_segment_selector sel);
254 bool x86_write_segment_descriptor(struct CPUState *cpu,
255                                   struct x86_segment_descriptor *desc,
256                                   x68_segment_selector sel);
257 
258 bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
259                         int gate);
260 
261 /* helpers */
262 bool x86_is_protected(struct CPUState *cpu);
263 bool x86_is_real(struct CPUState *cpu);
264 bool x86_is_v8086(struct CPUState *cpu);
265 bool x86_is_long_mode(struct CPUState *cpu);
266 bool x86_is_long64_mode(struct CPUState *cpu);
267 bool x86_is_paging_mode(struct CPUState *cpu);
268 bool x86_is_pae_enabled(struct CPUState *cpu);
269 
270 enum X86Seg;
271 target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg);
272 target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
273                               enum X86Seg seg);
274 target_ulong linear_rip(struct CPUState *cpu, target_ulong rip);
275 
276 static inline uint64_t rdtscp(void)
277 {
278     uint64_t tsc;
279     __asm__ __volatile__("rdtscp; "         /* serializing read of tsc */
280                          "shl $32,%%rdx; "  /* shift higher 32 bits stored in rdx up */
281                          "or %%rdx,%%rax"   /* and or onto rax */
282                          : "=a"(tsc)        /* output to tsc variable */
283                          :
284                          : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
285 
286     return tsc;
287 }
288 
289 #endif
290