xref: /qemu/target/i386/tcg/sysemu/misc_helper.c (revision 213ff024)
1 /*
2  *  x86 misc helpers - sysemu code
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/address-spaces.h"
26 #include "tcg/helper-tcg.h"
27 
28 void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
29 {
30     address_space_stb(&address_space_io, port, data,
31                       cpu_get_mem_attrs(env), NULL);
32 }
33 
34 target_ulong helper_inb(CPUX86State *env, uint32_t port)
35 {
36     return address_space_ldub(&address_space_io, port,
37                               cpu_get_mem_attrs(env), NULL);
38 }
39 
40 void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
41 {
42     address_space_stw(&address_space_io, port, data,
43                       cpu_get_mem_attrs(env), NULL);
44 }
45 
46 target_ulong helper_inw(CPUX86State *env, uint32_t port)
47 {
48     return address_space_lduw(&address_space_io, port,
49                               cpu_get_mem_attrs(env), NULL);
50 }
51 
52 void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
53 {
54     address_space_stl(&address_space_io, port, data,
55                       cpu_get_mem_attrs(env), NULL);
56 }
57 
58 target_ulong helper_inl(CPUX86State *env, uint32_t port)
59 {
60     return address_space_ldl(&address_space_io, port,
61                              cpu_get_mem_attrs(env), NULL);
62 }
63 
64 target_ulong helper_read_crN(CPUX86State *env, int reg)
65 {
66     target_ulong val;
67 
68     switch (reg) {
69     default:
70         val = env->cr[reg];
71         break;
72     case 8:
73         if (!(env->hflags2 & HF2_VINTR_MASK)) {
74             val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
75         } else {
76             val = env->v_tpr;
77         }
78         break;
79     }
80     return val;
81 }
82 
83 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
84 {
85     switch (reg) {
86     case 0:
87         /*
88         * If we reach this point, the CR0 write intercept is disabled.
89         * But we could still exit if the hypervisor has requested the selective
90         * intercept for bits other than TS and MP
91         */
92         if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) &&
93             ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) {
94             cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC());
95         }
96         cpu_x86_update_cr0(env, t0);
97         break;
98     case 3:
99         cpu_x86_update_cr3(env, t0);
100         break;
101     case 4:
102         if (t0 & cr4_reserved_bits(env)) {
103             cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
104         }
105         if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
106             (env->hflags & HF_CS64_MASK)) {
107             raise_exception_ra(env, EXCP0D_GPF, GETPC());
108         }
109         cpu_x86_update_cr4(env, t0);
110         break;
111     case 8:
112         if (!(env->hflags2 & HF2_VINTR_MASK)) {
113             qemu_mutex_lock_iothread();
114             cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
115             qemu_mutex_unlock_iothread();
116         }
117         env->v_tpr = t0 & 0x0f;
118         break;
119     default:
120         env->cr[reg] = t0;
121         break;
122     }
123 }
124 
125 void helper_wrmsr(CPUX86State *env)
126 {
127     uint64_t val;
128     CPUState *cs = env_cpu(env);
129 
130     cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
131 
132     val = ((uint32_t)env->regs[R_EAX]) |
133         ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
134 
135     switch ((uint32_t)env->regs[R_ECX]) {
136     case MSR_IA32_SYSENTER_CS:
137         env->sysenter_cs = val & 0xffff;
138         break;
139     case MSR_IA32_SYSENTER_ESP:
140         env->sysenter_esp = val;
141         break;
142     case MSR_IA32_SYSENTER_EIP:
143         env->sysenter_eip = val;
144         break;
145     case MSR_IA32_APICBASE:
146         cpu_set_apic_base(env_archcpu(env)->apic_state, val);
147         break;
148     case MSR_EFER:
149         {
150             uint64_t update_mask;
151 
152             update_mask = 0;
153             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
154                 update_mask |= MSR_EFER_SCE;
155             }
156             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
157                 update_mask |= MSR_EFER_LME;
158             }
159             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
160                 update_mask |= MSR_EFER_FFXSR;
161             }
162             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
163                 update_mask |= MSR_EFER_NXE;
164             }
165             if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
166                 update_mask |= MSR_EFER_SVME;
167             }
168             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
169                 update_mask |= MSR_EFER_FFXSR;
170             }
171             cpu_load_efer(env, (env->efer & ~update_mask) |
172                           (val & update_mask));
173         }
174         break;
175     case MSR_STAR:
176         env->star = val;
177         break;
178     case MSR_PAT:
179         env->pat = val;
180         break;
181     case MSR_IA32_PKRS:
182         if (val & 0xFFFFFFFF00000000ull) {
183             goto error;
184         }
185         env->pkrs = val;
186         tlb_flush(cs);
187         break;
188     case MSR_VM_HSAVE_PA:
189         env->vm_hsave = val;
190         break;
191 #ifdef TARGET_X86_64
192     case MSR_LSTAR:
193         env->lstar = val;
194         break;
195     case MSR_CSTAR:
196         env->cstar = val;
197         break;
198     case MSR_FMASK:
199         env->fmask = val;
200         break;
201     case MSR_FSBASE:
202         env->segs[R_FS].base = val;
203         break;
204     case MSR_GSBASE:
205         env->segs[R_GS].base = val;
206         break;
207     case MSR_KERNELGSBASE:
208         env->kernelgsbase = val;
209         break;
210 #endif
211     case MSR_MTRRphysBase(0):
212     case MSR_MTRRphysBase(1):
213     case MSR_MTRRphysBase(2):
214     case MSR_MTRRphysBase(3):
215     case MSR_MTRRphysBase(4):
216     case MSR_MTRRphysBase(5):
217     case MSR_MTRRphysBase(6):
218     case MSR_MTRRphysBase(7):
219         env->mtrr_var[((uint32_t)env->regs[R_ECX] -
220                        MSR_MTRRphysBase(0)) / 2].base = val;
221         break;
222     case MSR_MTRRphysMask(0):
223     case MSR_MTRRphysMask(1):
224     case MSR_MTRRphysMask(2):
225     case MSR_MTRRphysMask(3):
226     case MSR_MTRRphysMask(4):
227     case MSR_MTRRphysMask(5):
228     case MSR_MTRRphysMask(6):
229     case MSR_MTRRphysMask(7):
230         env->mtrr_var[((uint32_t)env->regs[R_ECX] -
231                        MSR_MTRRphysMask(0)) / 2].mask = val;
232         break;
233     case MSR_MTRRfix64K_00000:
234         env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
235                         MSR_MTRRfix64K_00000] = val;
236         break;
237     case MSR_MTRRfix16K_80000:
238     case MSR_MTRRfix16K_A0000:
239         env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
240                         MSR_MTRRfix16K_80000 + 1] = val;
241         break;
242     case MSR_MTRRfix4K_C0000:
243     case MSR_MTRRfix4K_C8000:
244     case MSR_MTRRfix4K_D0000:
245     case MSR_MTRRfix4K_D8000:
246     case MSR_MTRRfix4K_E0000:
247     case MSR_MTRRfix4K_E8000:
248     case MSR_MTRRfix4K_F0000:
249     case MSR_MTRRfix4K_F8000:
250         env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
251                         MSR_MTRRfix4K_C0000 + 3] = val;
252         break;
253     case MSR_MTRRdefType:
254         env->mtrr_deftype = val;
255         break;
256     case MSR_MCG_STATUS:
257         env->mcg_status = val;
258         break;
259     case MSR_MCG_CTL:
260         if ((env->mcg_cap & MCG_CTL_P)
261             && (val == 0 || val == ~(uint64_t)0)) {
262             env->mcg_ctl = val;
263         }
264         break;
265     case MSR_TSC_AUX:
266         env->tsc_aux = val;
267         break;
268     case MSR_IA32_MISC_ENABLE:
269         env->msr_ia32_misc_enable = val;
270         break;
271     case MSR_IA32_BNDCFGS:
272         /* FIXME: #GP if reserved bits are set.  */
273         /* FIXME: Extend highest implemented bit of linear address.  */
274         env->msr_bndcfgs = val;
275         cpu_sync_bndcs_hflags(env);
276         break;
277     default:
278         if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
279             && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
280             (4 * env->mcg_cap & 0xff)) {
281             uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
282             if ((offset & 0x3) != 0
283                 || (val == 0 || val == ~(uint64_t)0)) {
284                 env->mce_banks[offset] = val;
285             }
286             break;
287         }
288         /* XXX: exception? */
289         break;
290     }
291     return;
292 error:
293     raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
294 }
295 
296 void helper_rdmsr(CPUX86State *env)
297 {
298     X86CPU *x86_cpu = env_archcpu(env);
299     uint64_t val;
300 
301     cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
302 
303     switch ((uint32_t)env->regs[R_ECX]) {
304     case MSR_IA32_SYSENTER_CS:
305         val = env->sysenter_cs;
306         break;
307     case MSR_IA32_SYSENTER_ESP:
308         val = env->sysenter_esp;
309         break;
310     case MSR_IA32_SYSENTER_EIP:
311         val = env->sysenter_eip;
312         break;
313     case MSR_IA32_APICBASE:
314         val = cpu_get_apic_base(env_archcpu(env)->apic_state);
315         break;
316     case MSR_EFER:
317         val = env->efer;
318         break;
319     case MSR_STAR:
320         val = env->star;
321         break;
322     case MSR_PAT:
323         val = env->pat;
324         break;
325     case MSR_IA32_PKRS:
326         val = env->pkrs;
327         break;
328     case MSR_VM_HSAVE_PA:
329         val = env->vm_hsave;
330         break;
331     case MSR_IA32_PERF_STATUS:
332         /* tsc_increment_by_tick */
333         val = 1000ULL;
334         /* CPU multiplier */
335         val |= (((uint64_t)4ULL) << 40);
336         break;
337 #ifdef TARGET_X86_64
338     case MSR_LSTAR:
339         val = env->lstar;
340         break;
341     case MSR_CSTAR:
342         val = env->cstar;
343         break;
344     case MSR_FMASK:
345         val = env->fmask;
346         break;
347     case MSR_FSBASE:
348         val = env->segs[R_FS].base;
349         break;
350     case MSR_GSBASE:
351         val = env->segs[R_GS].base;
352         break;
353     case MSR_KERNELGSBASE:
354         val = env->kernelgsbase;
355         break;
356     case MSR_TSC_AUX:
357         val = env->tsc_aux;
358         break;
359 #endif
360     case MSR_SMI_COUNT:
361         val = env->msr_smi_count;
362         break;
363     case MSR_MTRRphysBase(0):
364     case MSR_MTRRphysBase(1):
365     case MSR_MTRRphysBase(2):
366     case MSR_MTRRphysBase(3):
367     case MSR_MTRRphysBase(4):
368     case MSR_MTRRphysBase(5):
369     case MSR_MTRRphysBase(6):
370     case MSR_MTRRphysBase(7):
371         val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
372                              MSR_MTRRphysBase(0)) / 2].base;
373         break;
374     case MSR_MTRRphysMask(0):
375     case MSR_MTRRphysMask(1):
376     case MSR_MTRRphysMask(2):
377     case MSR_MTRRphysMask(3):
378     case MSR_MTRRphysMask(4):
379     case MSR_MTRRphysMask(5):
380     case MSR_MTRRphysMask(6):
381     case MSR_MTRRphysMask(7):
382         val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
383                              MSR_MTRRphysMask(0)) / 2].mask;
384         break;
385     case MSR_MTRRfix64K_00000:
386         val = env->mtrr_fixed[0];
387         break;
388     case MSR_MTRRfix16K_80000:
389     case MSR_MTRRfix16K_A0000:
390         val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
391                               MSR_MTRRfix16K_80000 + 1];
392         break;
393     case MSR_MTRRfix4K_C0000:
394     case MSR_MTRRfix4K_C8000:
395     case MSR_MTRRfix4K_D0000:
396     case MSR_MTRRfix4K_D8000:
397     case MSR_MTRRfix4K_E0000:
398     case MSR_MTRRfix4K_E8000:
399     case MSR_MTRRfix4K_F0000:
400     case MSR_MTRRfix4K_F8000:
401         val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
402                               MSR_MTRRfix4K_C0000 + 3];
403         break;
404     case MSR_MTRRdefType:
405         val = env->mtrr_deftype;
406         break;
407     case MSR_MTRRcap:
408         if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
409             val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
410                 MSR_MTRRcap_WC_SUPPORTED;
411         } else {
412             /* XXX: exception? */
413             val = 0;
414         }
415         break;
416     case MSR_MCG_CAP:
417         val = env->mcg_cap;
418         break;
419     case MSR_MCG_CTL:
420         if (env->mcg_cap & MCG_CTL_P) {
421             val = env->mcg_ctl;
422         } else {
423             val = 0;
424         }
425         break;
426     case MSR_MCG_STATUS:
427         val = env->mcg_status;
428         break;
429     case MSR_IA32_MISC_ENABLE:
430         val = env->msr_ia32_misc_enable;
431         break;
432     case MSR_IA32_BNDCFGS:
433         val = env->msr_bndcfgs;
434         break;
435      case MSR_IA32_UCODE_REV:
436         val = x86_cpu->ucode_rev;
437         break;
438     default:
439         if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
440             && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
441             (4 * env->mcg_cap & 0xff)) {
442             uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
443             val = env->mce_banks[offset];
444             break;
445         }
446         /* XXX: exception? */
447         val = 0;
448         break;
449     }
450     env->regs[R_EAX] = (uint32_t)(val);
451     env->regs[R_EDX] = (uint32_t)(val >> 32);
452 }
453 
454 void helper_flush_page(CPUX86State *env, target_ulong addr)
455 {
456     tlb_flush_page(env_cpu(env), addr);
457 }
458 
459 static void QEMU_NORETURN do_hlt(CPUX86State *env)
460 {
461     CPUState *cs = env_cpu(env);
462 
463     env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
464     cs->halted = 1;
465     cs->exception_index = EXCP_HLT;
466     cpu_loop_exit(cs);
467 }
468 
469 void QEMU_NORETURN helper_hlt(CPUX86State *env, int next_eip_addend)
470 {
471     cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC());
472     env->eip += next_eip_addend;
473 
474     do_hlt(env);
475 }
476 
477 void helper_monitor(CPUX86State *env, target_ulong ptr)
478 {
479     if ((uint32_t)env->regs[R_ECX] != 0) {
480         raise_exception_ra(env, EXCP0D_GPF, GETPC());
481     }
482     /* XXX: store address? */
483     cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
484 }
485 
486 void QEMU_NORETURN helper_mwait(CPUX86State *env, int next_eip_addend)
487 {
488     CPUState *cs = env_cpu(env);
489 
490     if ((uint32_t)env->regs[R_ECX] != 0) {
491         raise_exception_ra(env, EXCP0D_GPF, GETPC());
492     }
493     cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
494     env->eip += next_eip_addend;
495 
496     /* XXX: not complete but not completely erroneous */
497     if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
498         do_pause(env);
499     } else {
500         do_hlt(env);
501     }
502 }
503