1*69e0a03cSPaolo Bonzini /* 2*69e0a03cSPaolo Bonzini * Copyright (C) 2016 Veertu Inc, 3*69e0a03cSPaolo Bonzini * Copyright (C) 2017 Google Inc, 4*69e0a03cSPaolo Bonzini * 5*69e0a03cSPaolo Bonzini * This program is free software; you can redistribute it and/or 6*69e0a03cSPaolo Bonzini * modify it under the terms of the GNU Lesser General Public 7*69e0a03cSPaolo Bonzini * License as published by the Free Software Foundation; either 8*69e0a03cSPaolo Bonzini * version 2 of the License, or (at your option) any later version. 9*69e0a03cSPaolo Bonzini * 10*69e0a03cSPaolo Bonzini * This program is distributed in the hope that it will be useful, 11*69e0a03cSPaolo Bonzini * but WITHOUT ANY WARRANTY; without even the implied warranty of 12*69e0a03cSPaolo Bonzini * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13*69e0a03cSPaolo Bonzini * Lesser General Public License for more details. 14*69e0a03cSPaolo Bonzini * 15*69e0a03cSPaolo Bonzini * You should have received a copy of the GNU Lesser General Public 16*69e0a03cSPaolo Bonzini * License along with this program; if not, see <http://www.gnu.org/licenses/>. 17*69e0a03cSPaolo Bonzini */ 18*69e0a03cSPaolo Bonzini 19*69e0a03cSPaolo Bonzini #include "qemu/osdep.h" 20*69e0a03cSPaolo Bonzini 21*69e0a03cSPaolo Bonzini #include "qemu-common.h" 22*69e0a03cSPaolo Bonzini #include "x86_decode.h" 23*69e0a03cSPaolo Bonzini #include "x86_emu.h" 24*69e0a03cSPaolo Bonzini #include "vmcs.h" 25*69e0a03cSPaolo Bonzini #include "vmx.h" 26*69e0a03cSPaolo Bonzini #include "x86_mmu.h" 27*69e0a03cSPaolo Bonzini #include "x86_descr.h" 28*69e0a03cSPaolo Bonzini 29*69e0a03cSPaolo Bonzini /* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var) 30*69e0a03cSPaolo Bonzini { 31*69e0a03cSPaolo Bonzini uint32_t ar; 32*69e0a03cSPaolo Bonzini 33*69e0a03cSPaolo Bonzini if (!var->p) { 34*69e0a03cSPaolo Bonzini ar = 1 << 16; 35*69e0a03cSPaolo Bonzini return ar; 36*69e0a03cSPaolo Bonzini } 37*69e0a03cSPaolo Bonzini 38*69e0a03cSPaolo Bonzini ar = var->type & 15; 39*69e0a03cSPaolo Bonzini ar |= (var->s & 1) << 4; 40*69e0a03cSPaolo Bonzini ar |= (var->dpl & 3) << 5; 41*69e0a03cSPaolo Bonzini ar |= (var->p & 1) << 7; 42*69e0a03cSPaolo Bonzini ar |= (var->avl & 1) << 12; 43*69e0a03cSPaolo Bonzini ar |= (var->l & 1) << 13; 44*69e0a03cSPaolo Bonzini ar |= (var->db & 1) << 14; 45*69e0a03cSPaolo Bonzini ar |= (var->g & 1) << 15; 46*69e0a03cSPaolo Bonzini return ar; 47*69e0a03cSPaolo Bonzini }*/ 48*69e0a03cSPaolo Bonzini 49*69e0a03cSPaolo Bonzini bool x86_read_segment_descriptor(struct CPUState *cpu, 50*69e0a03cSPaolo Bonzini struct x86_segment_descriptor *desc, 51*69e0a03cSPaolo Bonzini x68_segment_selector sel) 52*69e0a03cSPaolo Bonzini { 53*69e0a03cSPaolo Bonzini addr_t base; 54*69e0a03cSPaolo Bonzini uint32_t limit; 55*69e0a03cSPaolo Bonzini 56*69e0a03cSPaolo Bonzini ZERO_INIT(*desc); 57*69e0a03cSPaolo Bonzini /* valid gdt descriptors start from index 1 */ 58*69e0a03cSPaolo Bonzini if (!sel.index && GDT_SEL == sel.ti) { 59*69e0a03cSPaolo Bonzini return false; 60*69e0a03cSPaolo Bonzini } 61*69e0a03cSPaolo Bonzini 62*69e0a03cSPaolo Bonzini if (GDT_SEL == sel.ti) { 63*69e0a03cSPaolo Bonzini base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE); 64*69e0a03cSPaolo Bonzini limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT); 65*69e0a03cSPaolo Bonzini } else { 66*69e0a03cSPaolo Bonzini base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE); 67*69e0a03cSPaolo Bonzini limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT); 68*69e0a03cSPaolo Bonzini } 69*69e0a03cSPaolo Bonzini 70*69e0a03cSPaolo Bonzini if (sel.index * 8 >= limit) { 71*69e0a03cSPaolo Bonzini return false; 72*69e0a03cSPaolo Bonzini } 73*69e0a03cSPaolo Bonzini 74*69e0a03cSPaolo Bonzini vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc)); 75*69e0a03cSPaolo Bonzini return true; 76*69e0a03cSPaolo Bonzini } 77*69e0a03cSPaolo Bonzini 78*69e0a03cSPaolo Bonzini bool x86_write_segment_descriptor(struct CPUState *cpu, 79*69e0a03cSPaolo Bonzini struct x86_segment_descriptor *desc, 80*69e0a03cSPaolo Bonzini x68_segment_selector sel) 81*69e0a03cSPaolo Bonzini { 82*69e0a03cSPaolo Bonzini addr_t base; 83*69e0a03cSPaolo Bonzini uint32_t limit; 84*69e0a03cSPaolo Bonzini 85*69e0a03cSPaolo Bonzini if (GDT_SEL == sel.ti) { 86*69e0a03cSPaolo Bonzini base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE); 87*69e0a03cSPaolo Bonzini limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT); 88*69e0a03cSPaolo Bonzini } else { 89*69e0a03cSPaolo Bonzini base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE); 90*69e0a03cSPaolo Bonzini limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT); 91*69e0a03cSPaolo Bonzini } 92*69e0a03cSPaolo Bonzini 93*69e0a03cSPaolo Bonzini if (sel.index * 8 >= limit) { 94*69e0a03cSPaolo Bonzini printf("%s: gdt limit\n", __func__); 95*69e0a03cSPaolo Bonzini return false; 96*69e0a03cSPaolo Bonzini } 97*69e0a03cSPaolo Bonzini vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc)); 98*69e0a03cSPaolo Bonzini return true; 99*69e0a03cSPaolo Bonzini } 100*69e0a03cSPaolo Bonzini 101*69e0a03cSPaolo Bonzini bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, 102*69e0a03cSPaolo Bonzini int gate) 103*69e0a03cSPaolo Bonzini { 104*69e0a03cSPaolo Bonzini addr_t base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE); 105*69e0a03cSPaolo Bonzini uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT); 106*69e0a03cSPaolo Bonzini 107*69e0a03cSPaolo Bonzini ZERO_INIT(*idt_desc); 108*69e0a03cSPaolo Bonzini if (gate * 8 >= limit) { 109*69e0a03cSPaolo Bonzini printf("%s: idt limit\n", __func__); 110*69e0a03cSPaolo Bonzini return false; 111*69e0a03cSPaolo Bonzini } 112*69e0a03cSPaolo Bonzini 113*69e0a03cSPaolo Bonzini vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc)); 114*69e0a03cSPaolo Bonzini return true; 115*69e0a03cSPaolo Bonzini } 116*69e0a03cSPaolo Bonzini 117*69e0a03cSPaolo Bonzini bool x86_is_protected(struct CPUState *cpu) 118*69e0a03cSPaolo Bonzini { 119*69e0a03cSPaolo Bonzini uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0); 120*69e0a03cSPaolo Bonzini return cr0 & CR0_PE; 121*69e0a03cSPaolo Bonzini } 122*69e0a03cSPaolo Bonzini 123*69e0a03cSPaolo Bonzini bool x86_is_real(struct CPUState *cpu) 124*69e0a03cSPaolo Bonzini { 125*69e0a03cSPaolo Bonzini return !x86_is_protected(cpu); 126*69e0a03cSPaolo Bonzini } 127*69e0a03cSPaolo Bonzini 128*69e0a03cSPaolo Bonzini bool x86_is_v8086(struct CPUState *cpu) 129*69e0a03cSPaolo Bonzini { 130*69e0a03cSPaolo Bonzini X86CPU *x86_cpu = X86_CPU(cpu); 131*69e0a03cSPaolo Bonzini CPUX86State *env = &x86_cpu->env; 132*69e0a03cSPaolo Bonzini return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM); 133*69e0a03cSPaolo Bonzini } 134*69e0a03cSPaolo Bonzini 135*69e0a03cSPaolo Bonzini bool x86_is_long_mode(struct CPUState *cpu) 136*69e0a03cSPaolo Bonzini { 137*69e0a03cSPaolo Bonzini return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & EFER_LMA; 138*69e0a03cSPaolo Bonzini } 139*69e0a03cSPaolo Bonzini 140*69e0a03cSPaolo Bonzini bool x86_is_long64_mode(struct CPUState *cpu) 141*69e0a03cSPaolo Bonzini { 142*69e0a03cSPaolo Bonzini struct vmx_segment desc; 143*69e0a03cSPaolo Bonzini vmx_read_segment_descriptor(cpu, &desc, REG_SEG_CS); 144*69e0a03cSPaolo Bonzini 145*69e0a03cSPaolo Bonzini return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1); 146*69e0a03cSPaolo Bonzini } 147*69e0a03cSPaolo Bonzini 148*69e0a03cSPaolo Bonzini bool x86_is_paging_mode(struct CPUState *cpu) 149*69e0a03cSPaolo Bonzini { 150*69e0a03cSPaolo Bonzini uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0); 151*69e0a03cSPaolo Bonzini return cr0 & CR0_PG; 152*69e0a03cSPaolo Bonzini } 153*69e0a03cSPaolo Bonzini 154*69e0a03cSPaolo Bonzini bool x86_is_pae_enabled(struct CPUState *cpu) 155*69e0a03cSPaolo Bonzini { 156*69e0a03cSPaolo Bonzini uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4); 157*69e0a03cSPaolo Bonzini return cr4 & CR4_PAE; 158*69e0a03cSPaolo Bonzini } 159*69e0a03cSPaolo Bonzini 160*69e0a03cSPaolo Bonzini addr_t linear_addr(struct CPUState *cpu, addr_t addr, x86_reg_segment seg) 161*69e0a03cSPaolo Bonzini { 162*69e0a03cSPaolo Bonzini return vmx_read_segment_base(cpu, seg) + addr; 163*69e0a03cSPaolo Bonzini } 164*69e0a03cSPaolo Bonzini 165*69e0a03cSPaolo Bonzini addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size, 166*69e0a03cSPaolo Bonzini x86_reg_segment seg) 167*69e0a03cSPaolo Bonzini { 168*69e0a03cSPaolo Bonzini switch (size) { 169*69e0a03cSPaolo Bonzini case 2: 170*69e0a03cSPaolo Bonzini addr = (uint16_t)addr; 171*69e0a03cSPaolo Bonzini break; 172*69e0a03cSPaolo Bonzini case 4: 173*69e0a03cSPaolo Bonzini addr = (uint32_t)addr; 174*69e0a03cSPaolo Bonzini break; 175*69e0a03cSPaolo Bonzini default: 176*69e0a03cSPaolo Bonzini break; 177*69e0a03cSPaolo Bonzini } 178*69e0a03cSPaolo Bonzini return linear_addr(cpu, addr, seg); 179*69e0a03cSPaolo Bonzini } 180*69e0a03cSPaolo Bonzini 181*69e0a03cSPaolo Bonzini addr_t linear_rip(struct CPUState *cpu, addr_t rip) 182*69e0a03cSPaolo Bonzini { 183*69e0a03cSPaolo Bonzini return linear_addr(cpu, rip, REG_SEG_CS); 184*69e0a03cSPaolo Bonzini } 185