1 /* 2 * x86 exception helpers - sysemu code 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/cpu_ldst.h" 23 #include "exec/exec-all.h" 24 #include "exec/page-protection.h" 25 #include "tcg/helper-tcg.h" 26 27 typedef struct TranslateParams { 28 target_ulong addr; 29 target_ulong cr3; 30 int pg_mode; 31 int mmu_idx; 32 int ptw_idx; 33 MMUAccessType access_type; 34 } TranslateParams; 35 36 typedef struct TranslateResult { 37 hwaddr paddr; 38 int prot; 39 int page_size; 40 } TranslateResult; 41 42 typedef enum TranslateFaultStage2 { 43 S2_NONE, 44 S2_GPA, 45 S2_GPT, 46 } TranslateFaultStage2; 47 48 typedef struct TranslateFault { 49 int exception_index; 50 int error_code; 51 target_ulong cr2; 52 TranslateFaultStage2 stage2; 53 } TranslateFault; 54 55 typedef struct PTETranslate { 56 CPUX86State *env; 57 TranslateFault *err; 58 int ptw_idx; 59 void *haddr; 60 hwaddr gaddr; 61 } PTETranslate; 62 63 static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra) 64 { 65 CPUTLBEntryFull *full; 66 int flags; 67 68 inout->gaddr = addr; 69 flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE, 70 inout->ptw_idx, true, &inout->haddr, &full, ra); 71 72 if (unlikely(flags & TLB_INVALID_MASK)) { 73 TranslateFault *err = inout->err; 74 75 assert(inout->ptw_idx == MMU_NESTED_IDX); 76 *err = (TranslateFault){ 77 .error_code = inout->env->error_code, 78 .cr2 = addr, 79 .stage2 = S2_GPT, 80 }; 81 return false; 82 } 83 return true; 84 } 85 86 static inline uint32_t ptw_ldl(const PTETranslate *in, uint64_t ra) 87 { 88 if (likely(in->haddr)) { 89 return ldl_p(in->haddr); 90 } 91 return cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra); 92 } 93 94 static inline uint64_t ptw_ldq(const PTETranslate *in, uint64_t ra) 95 { 96 if (likely(in->haddr)) { 97 return ldq_p(in->haddr); 98 } 99 return cpu_ldq_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, ra); 100 } 101 102 /* 103 * Note that we can use a 32-bit cmpxchg for all page table entries, 104 * even 64-bit ones, because PG_PRESENT_MASK, PG_ACCESSED_MASK and 105 * PG_DIRTY_MASK are all in the low 32 bits. 106 */ 107 static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new) 108 { 109 uint32_t cmp; 110 111 /* Does x86 really perform a rmw cycle on mmio for ptw? */ 112 start_exclusive(); 113 cmp = cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); 114 if (cmp == old) { 115 cpu_stl_mmuidx_ra(in->env, in->gaddr, new, in->ptw_idx, 0); 116 } 117 end_exclusive(); 118 return cmp == old; 119 } 120 121 static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set) 122 { 123 if (set & ~old) { 124 uint32_t new = old | set; 125 if (likely(in->haddr)) { 126 old = cpu_to_le32(old); 127 new = cpu_to_le32(new); 128 return qatomic_cmpxchg((uint32_t *)in->haddr, old, new) == old; 129 } 130 return ptw_setl_slow(in, old, new); 131 } 132 return true; 133 } 134 135 static bool mmu_translate(CPUX86State *env, const TranslateParams *in, 136 TranslateResult *out, TranslateFault *err, 137 uint64_t ra) 138 { 139 const target_ulong addr = in->addr; 140 const int pg_mode = in->pg_mode; 141 const bool is_user = is_mmu_index_user(in->mmu_idx); 142 const MMUAccessType access_type = in->access_type; 143 uint64_t ptep, pte, rsvd_mask; 144 PTETranslate pte_trans = { 145 .env = env, 146 .err = err, 147 .ptw_idx = in->ptw_idx, 148 }; 149 hwaddr pte_addr, paddr; 150 uint32_t pkr; 151 int page_size; 152 int error_code; 153 154 restart_all: 155 rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits); 156 rsvd_mask &= PG_ADDRESS_MASK; 157 if (!(pg_mode & PG_MODE_NXE)) { 158 rsvd_mask |= PG_NX_MASK; 159 } 160 161 if (pg_mode & PG_MODE_PAE) { 162 #ifdef TARGET_X86_64 163 if (pg_mode & PG_MODE_LMA) { 164 if (pg_mode & PG_MODE_LA57) { 165 /* 166 * Page table level 5 167 */ 168 pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3); 169 if (!ptw_translate(&pte_trans, pte_addr, ra)) { 170 return false; 171 } 172 restart_5: 173 pte = ptw_ldq(&pte_trans, ra); 174 if (!(pte & PG_PRESENT_MASK)) { 175 goto do_fault; 176 } 177 if (pte & (rsvd_mask | PG_PSE_MASK)) { 178 goto do_fault_rsvd; 179 } 180 if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 181 goto restart_5; 182 } 183 ptep = pte ^ PG_NX_MASK; 184 } else { 185 pte = in->cr3; 186 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 187 } 188 189 /* 190 * Page table level 4 191 */ 192 pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3); 193 if (!ptw_translate(&pte_trans, pte_addr, ra)) { 194 return false; 195 } 196 restart_4: 197 pte = ptw_ldq(&pte_trans, ra); 198 if (!(pte & PG_PRESENT_MASK)) { 199 goto do_fault; 200 } 201 if (pte & (rsvd_mask | PG_PSE_MASK)) { 202 goto do_fault_rsvd; 203 } 204 if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 205 goto restart_4; 206 } 207 ptep &= pte ^ PG_NX_MASK; 208 209 /* 210 * Page table level 3 211 */ 212 pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3); 213 if (!ptw_translate(&pte_trans, pte_addr, ra)) { 214 return false; 215 } 216 restart_3_lma: 217 pte = ptw_ldq(&pte_trans, ra); 218 if (!(pte & PG_PRESENT_MASK)) { 219 goto do_fault; 220 } 221 if (pte & rsvd_mask) { 222 goto do_fault_rsvd; 223 } 224 if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 225 goto restart_3_lma; 226 } 227 ptep &= pte ^ PG_NX_MASK; 228 if (pte & PG_PSE_MASK) { 229 /* 1 GB page */ 230 page_size = 1024 * 1024 * 1024; 231 goto do_check_protect; 232 } 233 } else 234 #endif 235 { 236 /* 237 * Page table level 3 238 */ 239 pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18); 240 if (!ptw_translate(&pte_trans, pte_addr, ra)) { 241 return false; 242 } 243 rsvd_mask |= PG_HI_USER_MASK; 244 restart_3_nolma: 245 pte = ptw_ldq(&pte_trans, ra); 246 if (!(pte & PG_PRESENT_MASK)) { 247 goto do_fault; 248 } 249 if (pte & (rsvd_mask | PG_NX_MASK)) { 250 goto do_fault_rsvd; 251 } 252 if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 253 goto restart_3_nolma; 254 } 255 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 256 } 257 258 /* 259 * Page table level 2 260 */ 261 pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3); 262 if (!ptw_translate(&pte_trans, pte_addr, ra)) { 263 return false; 264 } 265 restart_2_pae: 266 pte = ptw_ldq(&pte_trans, ra); 267 if (!(pte & PG_PRESENT_MASK)) { 268 goto do_fault; 269 } 270 if (pte & rsvd_mask) { 271 goto do_fault_rsvd; 272 } 273 if (pte & PG_PSE_MASK) { 274 /* 2 MB page */ 275 page_size = 2048 * 1024; 276 ptep &= pte ^ PG_NX_MASK; 277 goto do_check_protect; 278 } 279 if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 280 goto restart_2_pae; 281 } 282 ptep &= pte ^ PG_NX_MASK; 283 284 /* 285 * Page table level 1 286 */ 287 pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3); 288 if (!ptw_translate(&pte_trans, pte_addr, ra)) { 289 return false; 290 } 291 pte = ptw_ldq(&pte_trans, ra); 292 if (!(pte & PG_PRESENT_MASK)) { 293 goto do_fault; 294 } 295 if (pte & rsvd_mask) { 296 goto do_fault_rsvd; 297 } 298 /* combine pde and pte nx, user and rw protections */ 299 ptep &= pte ^ PG_NX_MASK; 300 page_size = 4096; 301 } else { 302 /* 303 * Page table level 2 304 */ 305 pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc); 306 if (!ptw_translate(&pte_trans, pte_addr, ra)) { 307 return false; 308 } 309 restart_2_nopae: 310 pte = ptw_ldl(&pte_trans, ra); 311 if (!(pte & PG_PRESENT_MASK)) { 312 goto do_fault; 313 } 314 ptep = pte | PG_NX_MASK; 315 316 /* if PSE bit is set, then we use a 4MB page */ 317 if ((pte & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) { 318 page_size = 4096 * 1024; 319 /* 320 * Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. 321 * Leave bits 20-13 in place for setting accessed/dirty bits below. 322 */ 323 pte = (uint32_t)pte | ((pte & 0x1fe000LL) << (32 - 13)); 324 rsvd_mask = 0x200000; 325 goto do_check_protect_pse36; 326 } 327 if (!ptw_setl(&pte_trans, pte, PG_ACCESSED_MASK)) { 328 goto restart_2_nopae; 329 } 330 331 /* 332 * Page table level 1 333 */ 334 pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc); 335 if (!ptw_translate(&pte_trans, pte_addr, ra)) { 336 return false; 337 } 338 pte = ptw_ldl(&pte_trans, ra); 339 if (!(pte & PG_PRESENT_MASK)) { 340 goto do_fault; 341 } 342 /* combine pde and pte user and rw protections */ 343 ptep &= pte | PG_NX_MASK; 344 page_size = 4096; 345 rsvd_mask = 0; 346 } 347 348 do_check_protect: 349 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; 350 do_check_protect_pse36: 351 if (pte & rsvd_mask) { 352 goto do_fault_rsvd; 353 } 354 ptep ^= PG_NX_MASK; 355 356 /* can the page can be put in the TLB? prot will tell us */ 357 if (is_user && !(ptep & PG_USER_MASK)) { 358 goto do_fault_protect; 359 } 360 361 int prot = 0; 362 if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) { 363 prot |= PAGE_READ; 364 if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) { 365 prot |= PAGE_WRITE; 366 } 367 } 368 if (!(ptep & PG_NX_MASK) && 369 (is_user || 370 !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) { 371 prot |= PAGE_EXEC; 372 } 373 374 if (ptep & PG_USER_MASK) { 375 pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0; 376 } else { 377 pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0; 378 } 379 if (pkr) { 380 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; 381 uint32_t pkr_ad = (pkr >> pk * 2) & 1; 382 uint32_t pkr_wd = (pkr >> pk * 2) & 2; 383 uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 384 385 if (pkr_ad) { 386 pkr_prot &= ~(PAGE_READ | PAGE_WRITE); 387 } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) { 388 pkr_prot &= ~PAGE_WRITE; 389 } 390 if ((pkr_prot & (1 << access_type)) == 0) { 391 goto do_fault_pk_protect; 392 } 393 prot &= pkr_prot; 394 } 395 396 if ((prot & (1 << access_type)) == 0) { 397 goto do_fault_protect; 398 } 399 400 /* yes, it can! */ 401 { 402 uint32_t set = PG_ACCESSED_MASK; 403 if (access_type == MMU_DATA_STORE) { 404 set |= PG_DIRTY_MASK; 405 } else if (!(pte & PG_DIRTY_MASK)) { 406 /* 407 * Only set write access if already dirty... 408 * otherwise wait for dirty access. 409 */ 410 prot &= ~PAGE_WRITE; 411 } 412 if (!ptw_setl(&pte_trans, pte, set)) { 413 /* 414 * We can arrive here from any of 3 levels and 2 formats. 415 * The only safe thing is to restart the entire lookup. 416 */ 417 goto restart_all; 418 } 419 } 420 421 /* merge offset within page */ 422 paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1)); 423 424 /* 425 * Note that NPT is walked (for both paging structures and final guest 426 * addresses) using the address with the A20 bit set. 427 */ 428 if (in->ptw_idx == MMU_NESTED_IDX) { 429 CPUTLBEntryFull *full; 430 int flags, nested_page_size; 431 432 flags = probe_access_full(env, paddr, 0, access_type, 433 MMU_NESTED_IDX, true, 434 &pte_trans.haddr, &full, 0); 435 if (unlikely(flags & TLB_INVALID_MASK)) { 436 *err = (TranslateFault){ 437 .error_code = env->error_code, 438 .cr2 = paddr, 439 .stage2 = S2_GPA, 440 }; 441 return false; 442 } 443 444 /* Merge stage1 & stage2 protection bits. */ 445 prot &= full->prot; 446 447 /* Re-verify resulting protection. */ 448 if ((prot & (1 << access_type)) == 0) { 449 goto do_fault_protect; 450 } 451 452 /* Merge stage1 & stage2 addresses to final physical address. */ 453 nested_page_size = 1 << full->lg_page_size; 454 paddr = (full->phys_addr & ~(nested_page_size - 1)) 455 | (paddr & (nested_page_size - 1)); 456 457 /* 458 * Use the larger of stage1 & stage2 page sizes, so that 459 * invalidation works. 460 */ 461 if (nested_page_size > page_size) { 462 page_size = nested_page_size; 463 } 464 } 465 466 out->paddr = paddr & x86_get_a20_mask(env); 467 out->prot = prot; 468 out->page_size = page_size; 469 return true; 470 471 do_fault_rsvd: 472 error_code = PG_ERROR_RSVD_MASK; 473 goto do_fault_cont; 474 do_fault_protect: 475 error_code = PG_ERROR_P_MASK; 476 goto do_fault_cont; 477 do_fault_pk_protect: 478 assert(access_type != MMU_INST_FETCH); 479 error_code = PG_ERROR_PK_MASK | PG_ERROR_P_MASK; 480 goto do_fault_cont; 481 do_fault: 482 error_code = 0; 483 do_fault_cont: 484 if (is_user) { 485 error_code |= PG_ERROR_U_MASK; 486 } 487 switch (access_type) { 488 case MMU_DATA_LOAD: 489 break; 490 case MMU_DATA_STORE: 491 error_code |= PG_ERROR_W_MASK; 492 break; 493 case MMU_INST_FETCH: 494 if (pg_mode & (PG_MODE_NXE | PG_MODE_SMEP)) { 495 error_code |= PG_ERROR_I_D_MASK; 496 } 497 break; 498 } 499 *err = (TranslateFault){ 500 .exception_index = EXCP0E_PAGE, 501 .error_code = error_code, 502 .cr2 = addr, 503 }; 504 return false; 505 } 506 507 static G_NORETURN void raise_stage2(CPUX86State *env, TranslateFault *err, 508 uintptr_t retaddr) 509 { 510 uint64_t exit_info_1 = err->error_code; 511 512 switch (err->stage2) { 513 case S2_GPT: 514 exit_info_1 |= SVM_NPTEXIT_GPT; 515 break; 516 case S2_GPA: 517 exit_info_1 |= SVM_NPTEXIT_GPA; 518 break; 519 default: 520 g_assert_not_reached(); 521 } 522 523 x86_stq_phys(env_cpu(env), 524 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 525 err->cr2); 526 cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, retaddr); 527 } 528 529 static bool get_physical_address(CPUX86State *env, vaddr addr, 530 MMUAccessType access_type, int mmu_idx, 531 TranslateResult *out, TranslateFault *err, 532 uint64_t ra) 533 { 534 TranslateParams in; 535 bool use_stage2 = env->hflags2 & HF2_NPT_MASK; 536 537 in.addr = addr; 538 in.access_type = access_type; 539 540 switch (mmu_idx) { 541 case MMU_PHYS_IDX: 542 break; 543 544 case MMU_NESTED_IDX: 545 if (likely(use_stage2)) { 546 in.cr3 = env->nested_cr3; 547 in.pg_mode = env->nested_pg_mode; 548 in.mmu_idx = 549 env->nested_pg_mode & PG_MODE_LMA ? MMU_USER64_IDX : MMU_USER32_IDX; 550 in.ptw_idx = MMU_PHYS_IDX; 551 552 if (!mmu_translate(env, &in, out, err, ra)) { 553 err->stage2 = S2_GPA; 554 return false; 555 } 556 return true; 557 } 558 break; 559 560 default: 561 if (is_mmu_index_32(mmu_idx)) { 562 addr = (uint32_t)addr; 563 } 564 565 if (likely(env->cr[0] & CR0_PG_MASK)) { 566 in.cr3 = env->cr[3]; 567 in.mmu_idx = mmu_idx; 568 in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX; 569 in.pg_mode = get_pg_mode(env); 570 571 if (in.pg_mode & PG_MODE_LMA) { 572 /* test virtual address sign extension */ 573 int shift = in.pg_mode & PG_MODE_LA57 ? 56 : 47; 574 int64_t sext = (int64_t)addr >> shift; 575 if (sext != 0 && sext != -1) { 576 *err = (TranslateFault){ 577 .exception_index = EXCP0D_GPF, 578 .cr2 = addr, 579 }; 580 return false; 581 } 582 } 583 return mmu_translate(env, &in, out, err, ra); 584 } 585 break; 586 } 587 588 /* No translation needed. */ 589 out->paddr = addr & x86_get_a20_mask(env); 590 out->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 591 out->page_size = TARGET_PAGE_SIZE; 592 return true; 593 } 594 595 bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, 596 MMUAccessType access_type, int mmu_idx, 597 bool probe, uintptr_t retaddr) 598 { 599 CPUX86State *env = cpu_env(cs); 600 TranslateResult out; 601 TranslateFault err; 602 603 if (get_physical_address(env, addr, access_type, mmu_idx, &out, &err, 604 retaddr)) { 605 /* 606 * Even if 4MB pages, we map only one 4KB page in the cache to 607 * avoid filling it too fast. 608 */ 609 assert(out.prot & (1 << access_type)); 610 tlb_set_page_with_attrs(cs, addr & TARGET_PAGE_MASK, 611 out.paddr & TARGET_PAGE_MASK, 612 cpu_get_mem_attrs(env), 613 out.prot, mmu_idx, out.page_size); 614 return true; 615 } 616 617 if (probe) { 618 /* This will be used if recursing for stage2 translation. */ 619 env->error_code = err.error_code; 620 return false; 621 } 622 623 if (err.stage2 != S2_NONE) { 624 raise_stage2(env, &err, retaddr); 625 } 626 627 if (env->intercept_exceptions & (1 << err.exception_index)) { 628 /* cr2 is not modified in case of exceptions */ 629 x86_stq_phys(cs, env->vm_vmcb + 630 offsetof(struct vmcb, control.exit_info_2), 631 err.cr2); 632 } else { 633 env->cr[2] = err.cr2; 634 } 635 raise_exception_err_ra(env, err.exception_index, err.error_code, retaddr); 636 } 637 638 G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 639 MMUAccessType access_type, 640 int mmu_idx, uintptr_t retaddr) 641 { 642 X86CPU *cpu = X86_CPU(cs); 643 handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr); 644 } 645