1 /*
2  *  x86 misc helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/address-spaces.h"
27 
helper_outb(CPUX86State * env,uint32_t port,uint32_t data)28 void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
29 {
30 #ifdef CONFIG_USER_ONLY
31     fprintf(stderr, "outb: port=0x%04x, data=%02x\n", port, data);
32 #else
33     address_space_stb(&address_space_io, port, data,
34                       cpu_get_mem_attrs(env), NULL);
35 #endif
36 }
37 
helper_inb(CPUX86State * env,uint32_t port)38 target_ulong helper_inb(CPUX86State *env, uint32_t port)
39 {
40 #ifdef CONFIG_USER_ONLY
41     fprintf(stderr, "inb: port=0x%04x\n", port);
42     return 0;
43 #else
44     return address_space_ldub(&address_space_io, port,
45                               cpu_get_mem_attrs(env), NULL);
46 #endif
47 }
48 
helper_outw(CPUX86State * env,uint32_t port,uint32_t data)49 void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
50 {
51 #ifdef CONFIG_USER_ONLY
52     fprintf(stderr, "outw: port=0x%04x, data=%04x\n", port, data);
53 #else
54     address_space_stw(&address_space_io, port, data,
55                       cpu_get_mem_attrs(env), NULL);
56 #endif
57 }
58 
helper_inw(CPUX86State * env,uint32_t port)59 target_ulong helper_inw(CPUX86State *env, uint32_t port)
60 {
61 #ifdef CONFIG_USER_ONLY
62     fprintf(stderr, "inw: port=0x%04x\n", port);
63     return 0;
64 #else
65     return address_space_lduw(&address_space_io, port,
66                               cpu_get_mem_attrs(env), NULL);
67 #endif
68 }
69 
helper_outl(CPUX86State * env,uint32_t port,uint32_t data)70 void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
71 {
72 #ifdef CONFIG_USER_ONLY
73     fprintf(stderr, "outw: port=0x%04x, data=%08x\n", port, data);
74 #else
75     address_space_stl(&address_space_io, port, data,
76                       cpu_get_mem_attrs(env), NULL);
77 #endif
78 }
79 
helper_inl(CPUX86State * env,uint32_t port)80 target_ulong helper_inl(CPUX86State *env, uint32_t port)
81 {
82 #ifdef CONFIG_USER_ONLY
83     fprintf(stderr, "inl: port=0x%04x\n", port);
84     return 0;
85 #else
86     return address_space_ldl(&address_space_io, port,
87                              cpu_get_mem_attrs(env), NULL);
88 #endif
89 }
90 
helper_into(CPUX86State * env,int next_eip_addend)91 void helper_into(CPUX86State *env, int next_eip_addend)
92 {
93     int eflags;
94 
95     eflags = cpu_cc_compute_all(env, CC_OP);
96     if (eflags & CC_O) {
97         raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
98     }
99 }
100 
helper_cpuid(CPUX86State * env)101 void helper_cpuid(CPUX86State *env)
102 {
103     uint32_t eax, ebx, ecx, edx;
104 
105     cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0, GETPC());
106 
107     cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
108                   &eax, &ebx, &ecx, &edx);
109     env->regs[R_EAX] = eax;
110     env->regs[R_EBX] = ebx;
111     env->regs[R_ECX] = ecx;
112     env->regs[R_EDX] = edx;
113 }
114 
115 #if defined(CONFIG_USER_ONLY)
helper_read_crN(CPUX86State * env,int reg)116 target_ulong helper_read_crN(CPUX86State *env, int reg)
117 {
118     return 0;
119 }
120 
helper_write_crN(CPUX86State * env,int reg,target_ulong t0)121 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
122 {
123 }
124 #else
helper_read_crN(CPUX86State * env,int reg)125 target_ulong helper_read_crN(CPUX86State *env, int reg)
126 {
127     target_ulong val;
128 
129     cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC());
130     switch (reg) {
131     default:
132         val = env->cr[reg];
133         break;
134     case 8:
135         if (!(env->hflags2 & HF2_VINTR_MASK)) {
136             val = cpu_get_apic_tpr(x86_env_get_cpu(env)->apic_state);
137         } else {
138             val = env->v_tpr;
139         }
140         break;
141     }
142     return val;
143 }
144 
helper_write_crN(CPUX86State * env,int reg,target_ulong t0)145 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
146 {
147     cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC());
148     switch (reg) {
149     case 0:
150         cpu_x86_update_cr0(env, t0);
151         break;
152     case 3:
153         cpu_x86_update_cr3(env, t0);
154         break;
155     case 4:
156         cpu_x86_update_cr4(env, t0);
157         break;
158     case 8:
159         if (!(env->hflags2 & HF2_VINTR_MASK)) {
160             qemu_mutex_lock_iothread();
161             cpu_set_apic_tpr(x86_env_get_cpu(env)->apic_state, t0);
162             qemu_mutex_unlock_iothread();
163         }
164         env->v_tpr = t0 & 0x0f;
165         break;
166     default:
167         env->cr[reg] = t0;
168         break;
169     }
170 }
171 #endif
172 
helper_lmsw(CPUX86State * env,target_ulong t0)173 void helper_lmsw(CPUX86State *env, target_ulong t0)
174 {
175     /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
176        if already set to one. */
177     t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
178     helper_write_crN(env, 0, t0);
179 }
180 
helper_invlpg(CPUX86State * env,target_ulong addr)181 void helper_invlpg(CPUX86State *env, target_ulong addr)
182 {
183     X86CPU *cpu = x86_env_get_cpu(env);
184 
185     cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0, GETPC());
186     tlb_flush_page(CPU(cpu), addr);
187 }
188 
helper_rdtsc(CPUX86State * env)189 void helper_rdtsc(CPUX86State *env)
190 {
191     uint64_t val;
192 
193     if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
194         raise_exception_ra(env, EXCP0D_GPF, GETPC());
195     }
196     cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0, GETPC());
197 
198     val = cpu_get_tsc(env) + env->tsc_offset;
199     env->regs[R_EAX] = (uint32_t)(val);
200     env->regs[R_EDX] = (uint32_t)(val >> 32);
201 }
202 
helper_rdtscp(CPUX86State * env)203 void helper_rdtscp(CPUX86State *env)
204 {
205     helper_rdtsc(env);
206     env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
207 }
208 
helper_rdpmc(CPUX86State * env)209 void helper_rdpmc(CPUX86State *env)
210 {
211     if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
212         raise_exception_ra(env, EXCP0D_GPF, GETPC());
213     }
214     cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0, GETPC());
215 
216     /* currently unimplemented */
217     qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
218     raise_exception_err(env, EXCP06_ILLOP, 0);
219 }
220 
221 #if defined(CONFIG_USER_ONLY)
helper_wrmsr(CPUX86State * env)222 void helper_wrmsr(CPUX86State *env)
223 {
224 }
225 
helper_rdmsr(CPUX86State * env)226 void helper_rdmsr(CPUX86State *env)
227 {
228 }
229 #else
helper_wrmsr(CPUX86State * env)230 void helper_wrmsr(CPUX86State *env)
231 {
232     uint64_t val;
233 
234     cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
235 
236     val = ((uint32_t)env->regs[R_EAX]) |
237         ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
238 
239     switch ((uint32_t)env->regs[R_ECX]) {
240     case MSR_IA32_SYSENTER_CS:
241         env->sysenter_cs = val & 0xffff;
242         break;
243     case MSR_IA32_SYSENTER_ESP:
244         env->sysenter_esp = val;
245         break;
246     case MSR_IA32_SYSENTER_EIP:
247         env->sysenter_eip = val;
248         break;
249     case MSR_IA32_APICBASE:
250         cpu_set_apic_base(x86_env_get_cpu(env)->apic_state, val);
251         break;
252     case MSR_EFER:
253         {
254             uint64_t update_mask;
255 
256             update_mask = 0;
257             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
258                 update_mask |= MSR_EFER_SCE;
259             }
260             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
261                 update_mask |= MSR_EFER_LME;
262             }
263             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
264                 update_mask |= MSR_EFER_FFXSR;
265             }
266             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
267                 update_mask |= MSR_EFER_NXE;
268             }
269             if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
270                 update_mask |= MSR_EFER_SVME;
271             }
272             if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
273                 update_mask |= MSR_EFER_FFXSR;
274             }
275             cpu_load_efer(env, (env->efer & ~update_mask) |
276                           (val & update_mask));
277         }
278         break;
279     case MSR_STAR:
280         env->star = val;
281         break;
282     case MSR_PAT:
283         env->pat = val;
284         break;
285     case MSR_VM_HSAVE_PA:
286         env->vm_hsave = val;
287         break;
288 #ifdef TARGET_X86_64
289     case MSR_LSTAR:
290         env->lstar = val;
291         break;
292     case MSR_CSTAR:
293         env->cstar = val;
294         break;
295     case MSR_FMASK:
296         env->fmask = val;
297         break;
298     case MSR_FSBASE:
299         env->segs[R_FS].base = val;
300         break;
301     case MSR_GSBASE:
302         env->segs[R_GS].base = val;
303         break;
304     case MSR_KERNELGSBASE:
305         env->kernelgsbase = val;
306         break;
307 #endif
308     case MSR_MTRRphysBase(0):
309     case MSR_MTRRphysBase(1):
310     case MSR_MTRRphysBase(2):
311     case MSR_MTRRphysBase(3):
312     case MSR_MTRRphysBase(4):
313     case MSR_MTRRphysBase(5):
314     case MSR_MTRRphysBase(6):
315     case MSR_MTRRphysBase(7):
316         env->mtrr_var[((uint32_t)env->regs[R_ECX] -
317                        MSR_MTRRphysBase(0)) / 2].base = val;
318         break;
319     case MSR_MTRRphysMask(0):
320     case MSR_MTRRphysMask(1):
321     case MSR_MTRRphysMask(2):
322     case MSR_MTRRphysMask(3):
323     case MSR_MTRRphysMask(4):
324     case MSR_MTRRphysMask(5):
325     case MSR_MTRRphysMask(6):
326     case MSR_MTRRphysMask(7):
327         env->mtrr_var[((uint32_t)env->regs[R_ECX] -
328                        MSR_MTRRphysMask(0)) / 2].mask = val;
329         break;
330     case MSR_MTRRfix64K_00000:
331         env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
332                         MSR_MTRRfix64K_00000] = val;
333         break;
334     case MSR_MTRRfix16K_80000:
335     case MSR_MTRRfix16K_A0000:
336         env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
337                         MSR_MTRRfix16K_80000 + 1] = val;
338         break;
339     case MSR_MTRRfix4K_C0000:
340     case MSR_MTRRfix4K_C8000:
341     case MSR_MTRRfix4K_D0000:
342     case MSR_MTRRfix4K_D8000:
343     case MSR_MTRRfix4K_E0000:
344     case MSR_MTRRfix4K_E8000:
345     case MSR_MTRRfix4K_F0000:
346     case MSR_MTRRfix4K_F8000:
347         env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
348                         MSR_MTRRfix4K_C0000 + 3] = val;
349         break;
350     case MSR_MTRRdefType:
351         env->mtrr_deftype = val;
352         break;
353     case MSR_MCG_STATUS:
354         env->mcg_status = val;
355         break;
356     case MSR_MCG_CTL:
357         if ((env->mcg_cap & MCG_CTL_P)
358             && (val == 0 || val == ~(uint64_t)0)) {
359             env->mcg_ctl = val;
360         }
361         break;
362     case MSR_TSC_AUX:
363         env->tsc_aux = val;
364         break;
365     case MSR_IA32_MISC_ENABLE:
366         env->msr_ia32_misc_enable = val;
367         break;
368     case MSR_IA32_BNDCFGS:
369         /* FIXME: #GP if reserved bits are set.  */
370         /* FIXME: Extend highest implemented bit of linear address.  */
371         env->msr_bndcfgs = val;
372         cpu_sync_bndcs_hflags(env);
373         break;
374     default:
375         if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
376             && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
377             (4 * env->mcg_cap & 0xff)) {
378             uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
379             if ((offset & 0x3) != 0
380                 || (val == 0 || val == ~(uint64_t)0)) {
381                 env->mce_banks[offset] = val;
382             }
383             break;
384         }
385         /* XXX: exception? */
386         break;
387     }
388 }
389 
helper_rdmsr(CPUX86State * env)390 void helper_rdmsr(CPUX86State *env)
391 {
392     uint64_t val;
393 
394     cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
395 
396     switch ((uint32_t)env->regs[R_ECX]) {
397     case MSR_IA32_SYSENTER_CS:
398         val = env->sysenter_cs;
399         break;
400     case MSR_IA32_SYSENTER_ESP:
401         val = env->sysenter_esp;
402         break;
403     case MSR_IA32_SYSENTER_EIP:
404         val = env->sysenter_eip;
405         break;
406     case MSR_IA32_APICBASE:
407         val = cpu_get_apic_base(x86_env_get_cpu(env)->apic_state);
408         break;
409     case MSR_EFER:
410         val = env->efer;
411         break;
412     case MSR_STAR:
413         val = env->star;
414         break;
415     case MSR_PAT:
416         val = env->pat;
417         break;
418     case MSR_VM_HSAVE_PA:
419         val = env->vm_hsave;
420         break;
421     case MSR_IA32_PERF_STATUS:
422         /* tsc_increment_by_tick */
423         val = 1000ULL;
424         /* CPU multiplier */
425         val |= (((uint64_t)4ULL) << 40);
426         break;
427 #ifdef TARGET_X86_64
428     case MSR_LSTAR:
429         val = env->lstar;
430         break;
431     case MSR_CSTAR:
432         val = env->cstar;
433         break;
434     case MSR_FMASK:
435         val = env->fmask;
436         break;
437     case MSR_FSBASE:
438         val = env->segs[R_FS].base;
439         break;
440     case MSR_GSBASE:
441         val = env->segs[R_GS].base;
442         break;
443     case MSR_KERNELGSBASE:
444         val = env->kernelgsbase;
445         break;
446     case MSR_TSC_AUX:
447         val = env->tsc_aux;
448         break;
449 #endif
450     case MSR_SMI_COUNT:
451         val = env->msr_smi_count;
452         break;
453     case MSR_MTRRphysBase(0):
454     case MSR_MTRRphysBase(1):
455     case MSR_MTRRphysBase(2):
456     case MSR_MTRRphysBase(3):
457     case MSR_MTRRphysBase(4):
458     case MSR_MTRRphysBase(5):
459     case MSR_MTRRphysBase(6):
460     case MSR_MTRRphysBase(7):
461         val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
462                              MSR_MTRRphysBase(0)) / 2].base;
463         break;
464     case MSR_MTRRphysMask(0):
465     case MSR_MTRRphysMask(1):
466     case MSR_MTRRphysMask(2):
467     case MSR_MTRRphysMask(3):
468     case MSR_MTRRphysMask(4):
469     case MSR_MTRRphysMask(5):
470     case MSR_MTRRphysMask(6):
471     case MSR_MTRRphysMask(7):
472         val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
473                              MSR_MTRRphysMask(0)) / 2].mask;
474         break;
475     case MSR_MTRRfix64K_00000:
476         val = env->mtrr_fixed[0];
477         break;
478     case MSR_MTRRfix16K_80000:
479     case MSR_MTRRfix16K_A0000:
480         val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
481                               MSR_MTRRfix16K_80000 + 1];
482         break;
483     case MSR_MTRRfix4K_C0000:
484     case MSR_MTRRfix4K_C8000:
485     case MSR_MTRRfix4K_D0000:
486     case MSR_MTRRfix4K_D8000:
487     case MSR_MTRRfix4K_E0000:
488     case MSR_MTRRfix4K_E8000:
489     case MSR_MTRRfix4K_F0000:
490     case MSR_MTRRfix4K_F8000:
491         val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
492                               MSR_MTRRfix4K_C0000 + 3];
493         break;
494     case MSR_MTRRdefType:
495         val = env->mtrr_deftype;
496         break;
497     case MSR_MTRRcap:
498         if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
499             val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
500                 MSR_MTRRcap_WC_SUPPORTED;
501         } else {
502             /* XXX: exception? */
503             val = 0;
504         }
505         break;
506     case MSR_MCG_CAP:
507         val = env->mcg_cap;
508         break;
509     case MSR_MCG_CTL:
510         if (env->mcg_cap & MCG_CTL_P) {
511             val = env->mcg_ctl;
512         } else {
513             val = 0;
514         }
515         break;
516     case MSR_MCG_STATUS:
517         val = env->mcg_status;
518         break;
519     case MSR_IA32_MISC_ENABLE:
520         val = env->msr_ia32_misc_enable;
521         break;
522     case MSR_IA32_BNDCFGS:
523         val = env->msr_bndcfgs;
524         break;
525     default:
526         if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
527             && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
528             (4 * env->mcg_cap & 0xff)) {
529             uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
530             val = env->mce_banks[offset];
531             break;
532         }
533         /* XXX: exception? */
534         val = 0;
535         break;
536     }
537     env->regs[R_EAX] = (uint32_t)(val);
538     env->regs[R_EDX] = (uint32_t)(val >> 32);
539 }
540 #endif
541 
do_pause(X86CPU * cpu)542 static void do_pause(X86CPU *cpu)
543 {
544     CPUState *cs = CPU(cpu);
545 
546     /* Just let another CPU run.  */
547     cs->exception_index = EXCP_INTERRUPT;
548     cpu_loop_exit(cs);
549 }
550 
do_hlt(X86CPU * cpu)551 static void do_hlt(X86CPU *cpu)
552 {
553     CPUState *cs = CPU(cpu);
554     CPUX86State *env = &cpu->env;
555 
556     env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
557     cs->halted = 1;
558     cs->exception_index = EXCP_HLT;
559     cpu_loop_exit(cs);
560 }
561 
helper_hlt(CPUX86State * env,int next_eip_addend)562 void helper_hlt(CPUX86State *env, int next_eip_addend)
563 {
564     X86CPU *cpu = x86_env_get_cpu(env);
565 
566     cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC());
567     env->eip += next_eip_addend;
568 
569     do_hlt(cpu);
570 }
571 
helper_monitor(CPUX86State * env,target_ulong ptr)572 void helper_monitor(CPUX86State *env, target_ulong ptr)
573 {
574     if ((uint32_t)env->regs[R_ECX] != 0) {
575         raise_exception_ra(env, EXCP0D_GPF, GETPC());
576     }
577     /* XXX: store address? */
578     cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
579 }
580 
helper_mwait(CPUX86State * env,int next_eip_addend)581 void helper_mwait(CPUX86State *env, int next_eip_addend)
582 {
583     CPUState *cs;
584     X86CPU *cpu;
585 
586     if ((uint32_t)env->regs[R_ECX] != 0) {
587         raise_exception_ra(env, EXCP0D_GPF, GETPC());
588     }
589     cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
590     env->eip += next_eip_addend;
591 
592     cpu = x86_env_get_cpu(env);
593     cs = CPU(cpu);
594     /* XXX: not complete but not completely erroneous */
595     if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
596         do_pause(cpu);
597     } else {
598         do_hlt(cpu);
599     }
600 }
601 
helper_pause(CPUX86State * env,int next_eip_addend)602 void helper_pause(CPUX86State *env, int next_eip_addend)
603 {
604     X86CPU *cpu = x86_env_get_cpu(env);
605 
606     cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC());
607     env->eip += next_eip_addend;
608 
609     do_pause(cpu);
610 }
611 
helper_debug(CPUX86State * env)612 void helper_debug(CPUX86State *env)
613 {
614     CPUState *cs = CPU(x86_env_get_cpu(env));
615 
616     cs->exception_index = EXCP_DEBUG;
617     cpu_loop_exit(cs);
618 }
619 
helper_rdpkru(CPUX86State * env,uint32_t ecx)620 uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
621 {
622     if ((env->cr[4] & CR4_PKE_MASK) == 0) {
623         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
624     }
625     if (ecx != 0) {
626         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
627     }
628 
629     return env->pkru;
630 }
631 
helper_wrpkru(CPUX86State * env,uint32_t ecx,uint64_t val)632 void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
633 {
634     CPUState *cs = CPU(x86_env_get_cpu(env));
635 
636     if ((env->cr[4] & CR4_PKE_MASK) == 0) {
637         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
638     }
639     if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) {
640         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
641     }
642 
643     env->pkru = val;
644     tlb_flush(cs);
645 }
646