1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM64_KVM_HYP_SWITCH_H__ 8 #define __ARM64_KVM_HYP_SWITCH_H__ 9 10 #include <hyp/adjust_pc.h> 11 #include <hyp/fault.h> 12 13 #include <linux/arm-smccc.h> 14 #include <linux/kvm_host.h> 15 #include <linux/types.h> 16 #include <linux/jump_label.h> 17 #include <uapi/linux/psci.h> 18 19 #include <kvm/arm_psci.h> 20 21 #include <asm/barrier.h> 22 #include <asm/cpufeature.h> 23 #include <asm/extable.h> 24 #include <asm/kprobes.h> 25 #include <asm/kvm_asm.h> 26 #include <asm/kvm_emulate.h> 27 #include <asm/kvm_hyp.h> 28 #include <asm/kvm_mmu.h> 29 #include <asm/kvm_nested.h> 30 #include <asm/fpsimd.h> 31 #include <asm/debug-monitors.h> 32 #include <asm/processor.h> 33 #include <asm/traps.h> 34 35 struct kvm_exception_table_entry { 36 int insn, fixup; 37 }; 38 39 extern struct kvm_exception_table_entry __start___kvm_ex_table; 40 extern struct kvm_exception_table_entry __stop___kvm_ex_table; 41 42 /* Check whether the FP regs are owned by the guest */ 43 static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu) 44 { 45 return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED; 46 } 47 48 /* Save the 32-bit only FPSIMD system register state */ 49 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) 50 { 51 if (!vcpu_el1_is_32bit(vcpu)) 52 return; 53 54 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2); 55 } 56 57 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) 58 { 59 /* 60 * We are about to set CPTR_EL2.TFP to trap all floating point 61 * register accesses to EL2, however, the ARM ARM clearly states that 62 * traps are only taken to EL2 if the operation would not otherwise 63 * trap to EL1. Therefore, always make sure that for 32-bit guests, 64 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. 65 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to 66 * it will cause an exception. 67 */ 68 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { 69 write_sysreg(1 << 30, fpexc32_el2); 70 isb(); 71 } 72 } 73 74 #define compute_clr_set(vcpu, reg, clr, set) \ 75 do { \ 76 u64 hfg; \ 77 hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \ 78 set |= hfg & __ ## reg ## _MASK; \ 79 clr |= ~hfg & __ ## reg ## _nMASK; \ 80 } while(0) 81 82 #define update_fgt_traps_cs(vcpu, reg, clr, set) \ 83 do { \ 84 struct kvm_cpu_context *hctxt = \ 85 &this_cpu_ptr(&kvm_host_data)->host_ctxt; \ 86 u64 c = 0, s = 0; \ 87 \ 88 ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \ 89 compute_clr_set(vcpu, reg, c, s); \ 90 s |= set; \ 91 c |= clr; \ 92 if (c || s) { \ 93 u64 val = __ ## reg ## _nMASK; \ 94 val |= s; \ 95 val &= ~c; \ 96 write_sysreg_s(val, SYS_ ## reg); \ 97 } \ 98 } while(0) 99 100 #define update_fgt_traps(vcpu, reg) \ 101 update_fgt_traps_cs(vcpu, reg, 0, 0) 102 103 /* 104 * Validate the fine grain trap masks. 105 * Check that the masks do not overlap and that all bits are accounted for. 106 */ 107 #define CHECK_FGT_MASKS(reg) \ 108 do { \ 109 BUILD_BUG_ON((__ ## reg ## _MASK) & (__ ## reg ## _nMASK)); \ 110 BUILD_BUG_ON(~((__ ## reg ## _RES0) ^ (__ ## reg ## _MASK) ^ \ 111 (__ ## reg ## _nMASK))); \ 112 } while(0) 113 114 static inline bool cpu_has_amu(void) 115 { 116 u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); 117 118 return cpuid_feature_extract_unsigned_field(pfr0, 119 ID_AA64PFR0_EL1_AMU_SHIFT); 120 } 121 122 static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) 123 { 124 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 125 u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; 126 u64 r_val, w_val; 127 128 CHECK_FGT_MASKS(HFGRTR_EL2); 129 CHECK_FGT_MASKS(HFGWTR_EL2); 130 CHECK_FGT_MASKS(HFGITR_EL2); 131 CHECK_FGT_MASKS(HDFGRTR_EL2); 132 CHECK_FGT_MASKS(HDFGWTR_EL2); 133 CHECK_FGT_MASKS(HAFGRTR_EL2); 134 CHECK_FGT_MASKS(HCRX_EL2); 135 136 if (!cpus_have_final_cap(ARM64_HAS_FGT)) 137 return; 138 139 ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2); 140 ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2); 141 142 if (cpus_have_final_cap(ARM64_SME)) { 143 tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; 144 145 r_clr |= tmp; 146 w_clr |= tmp; 147 } 148 149 /* 150 * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD. 151 */ 152 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) 153 w_set |= HFGxTR_EL2_TCR_EL1_MASK; 154 155 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { 156 compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set); 157 compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set); 158 } 159 160 /* The default to trap everything not handled or supported in KVM. */ 161 tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 | 162 HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1; 163 164 r_val = __HFGRTR_EL2_nMASK & ~tmp; 165 r_val |= r_set; 166 r_val &= ~r_clr; 167 168 w_val = __HFGWTR_EL2_nMASK & ~tmp; 169 w_val |= w_set; 170 w_val &= ~w_clr; 171 172 write_sysreg_s(r_val, SYS_HFGRTR_EL2); 173 write_sysreg_s(w_val, SYS_HFGWTR_EL2); 174 175 if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) 176 return; 177 178 update_fgt_traps(vcpu, HFGITR_EL2); 179 update_fgt_traps(vcpu, HDFGRTR_EL2); 180 update_fgt_traps(vcpu, HDFGWTR_EL2); 181 182 if (cpu_has_amu()) 183 update_fgt_traps(vcpu, HAFGRTR_EL2); 184 } 185 186 static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) 187 { 188 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 189 190 if (!cpus_have_final_cap(ARM64_HAS_FGT)) 191 return; 192 193 write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2); 194 write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); 195 196 if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) 197 return; 198 199 write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2); 200 write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2); 201 write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2); 202 203 if (cpu_has_amu()) 204 write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2); 205 } 206 207 static inline void __activate_traps_common(struct kvm_vcpu *vcpu) 208 { 209 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ 210 write_sysreg(1 << 15, hstr_el2); 211 212 /* 213 * Make sure we trap PMU access from EL0 to EL2. Also sanitize 214 * PMSELR_EL0 to make sure it never contains the cycle 215 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at 216 * EL1 instead of being trapped to EL2. 217 */ 218 if (kvm_arm_support_pmu_v3()) { 219 struct kvm_cpu_context *hctxt; 220 221 write_sysreg(0, pmselr_el0); 222 223 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 224 ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); 225 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); 226 vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); 227 } 228 229 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); 230 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 231 232 if (cpus_have_final_cap(ARM64_HAS_HCX)) { 233 u64 hcrx = HCRX_GUEST_FLAGS; 234 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { 235 u64 clr = 0, set = 0; 236 237 compute_clr_set(vcpu, HCRX_EL2, clr, set); 238 239 hcrx |= set; 240 hcrx &= ~clr; 241 } 242 243 write_sysreg_s(hcrx, SYS_HCRX_EL2); 244 } 245 246 __activate_traps_hfgxtr(vcpu); 247 } 248 249 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) 250 { 251 write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); 252 253 write_sysreg(0, hstr_el2); 254 if (kvm_arm_support_pmu_v3()) { 255 struct kvm_cpu_context *hctxt; 256 257 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 258 write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); 259 vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); 260 } 261 262 if (cpus_have_final_cap(ARM64_HAS_HCX)) 263 write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); 264 265 __deactivate_traps_hfgxtr(vcpu); 266 } 267 268 static inline void ___activate_traps(struct kvm_vcpu *vcpu) 269 { 270 u64 hcr = vcpu->arch.hcr_el2; 271 272 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) 273 hcr |= HCR_TVM; 274 275 write_sysreg(hcr, hcr_el2); 276 277 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) 278 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); 279 } 280 281 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) 282 { 283 /* 284 * If we pended a virtual abort, preserve it until it gets 285 * cleared. See D1.14.3 (Virtual Interrupts) for details, but 286 * the crucial bit is "On taking a vSError interrupt, 287 * HCR_EL2.VSE is cleared to 0." 288 */ 289 if (vcpu->arch.hcr_el2 & HCR_VSE) { 290 vcpu->arch.hcr_el2 &= ~HCR_VSE; 291 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; 292 } 293 } 294 295 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) 296 { 297 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); 298 } 299 300 static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code) 301 { 302 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 303 arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2); 304 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); 305 306 /* 307 * Finish potential single step before executing the prologue 308 * instruction. 309 */ 310 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; 311 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 312 313 return true; 314 } 315 316 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) 317 { 318 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); 319 __sve_restore_state(vcpu_sve_pffr(vcpu), 320 &vcpu->arch.ctxt.fp_regs.fpsr); 321 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); 322 } 323 324 /* 325 * We trap the first access to the FP/SIMD to save the host context and 326 * restore the guest context lazily. 327 * If FP/SIMD is not implemented, handle the trap and inject an undefined 328 * instruction exception to the guest. Similarly for trapped SVE accesses. 329 */ 330 static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) 331 { 332 bool sve_guest; 333 u8 esr_ec; 334 u64 reg; 335 336 if (!system_supports_fpsimd()) 337 return false; 338 339 sve_guest = vcpu_has_sve(vcpu); 340 esr_ec = kvm_vcpu_trap_get_class(vcpu); 341 342 /* Only handle traps the vCPU can support here: */ 343 switch (esr_ec) { 344 case ESR_ELx_EC_FP_ASIMD: 345 break; 346 case ESR_ELx_EC_SVE: 347 if (!sve_guest) 348 return false; 349 break; 350 default: 351 return false; 352 } 353 354 /* Valid trap. Switch the context: */ 355 356 /* First disable enough traps to allow us to update the registers */ 357 if (has_vhe() || has_hvhe()) { 358 reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN; 359 if (sve_guest) 360 reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; 361 362 sysreg_clear_set(cpacr_el1, 0, reg); 363 } else { 364 reg = CPTR_EL2_TFP; 365 if (sve_guest) 366 reg |= CPTR_EL2_TZ; 367 368 sysreg_clear_set(cptr_el2, reg, 0); 369 } 370 isb(); 371 372 /* Write out the host state if it's in the registers */ 373 if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED) 374 __fpsimd_save_state(vcpu->arch.host_fpsimd_state); 375 376 /* Restore the guest state */ 377 if (sve_guest) 378 __hyp_sve_restore_guest(vcpu); 379 else 380 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); 381 382 /* Skip restoring fpexc32 for AArch64 guests */ 383 if (!(read_sysreg(hcr_el2) & HCR_RW)) 384 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); 385 386 vcpu->arch.fp_state = FP_STATE_GUEST_OWNED; 387 388 return true; 389 } 390 391 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) 392 { 393 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 394 int rt = kvm_vcpu_sys_get_rt(vcpu); 395 u64 val = vcpu_get_reg(vcpu, rt); 396 397 /* 398 * The normal sysreg handling code expects to see the traps, 399 * let's not do anything here. 400 */ 401 if (vcpu->arch.hcr_el2 & HCR_TVM) 402 return false; 403 404 switch (sysreg) { 405 case SYS_SCTLR_EL1: 406 write_sysreg_el1(val, SYS_SCTLR); 407 break; 408 case SYS_TTBR0_EL1: 409 write_sysreg_el1(val, SYS_TTBR0); 410 break; 411 case SYS_TTBR1_EL1: 412 write_sysreg_el1(val, SYS_TTBR1); 413 break; 414 case SYS_TCR_EL1: 415 write_sysreg_el1(val, SYS_TCR); 416 break; 417 case SYS_ESR_EL1: 418 write_sysreg_el1(val, SYS_ESR); 419 break; 420 case SYS_FAR_EL1: 421 write_sysreg_el1(val, SYS_FAR); 422 break; 423 case SYS_AFSR0_EL1: 424 write_sysreg_el1(val, SYS_AFSR0); 425 break; 426 case SYS_AFSR1_EL1: 427 write_sysreg_el1(val, SYS_AFSR1); 428 break; 429 case SYS_MAIR_EL1: 430 write_sysreg_el1(val, SYS_MAIR); 431 break; 432 case SYS_AMAIR_EL1: 433 write_sysreg_el1(val, SYS_AMAIR); 434 break; 435 case SYS_CONTEXTIDR_EL1: 436 write_sysreg_el1(val, SYS_CONTEXTIDR); 437 break; 438 default: 439 return false; 440 } 441 442 __kvm_skip_instr(vcpu); 443 return true; 444 } 445 446 static inline bool esr_is_ptrauth_trap(u64 esr) 447 { 448 switch (esr_sys64_to_sysreg(esr)) { 449 case SYS_APIAKEYLO_EL1: 450 case SYS_APIAKEYHI_EL1: 451 case SYS_APIBKEYLO_EL1: 452 case SYS_APIBKEYHI_EL1: 453 case SYS_APDAKEYLO_EL1: 454 case SYS_APDAKEYHI_EL1: 455 case SYS_APDBKEYLO_EL1: 456 case SYS_APDBKEYHI_EL1: 457 case SYS_APGAKEYLO_EL1: 458 case SYS_APGAKEYHI_EL1: 459 return true; 460 } 461 462 return false; 463 } 464 465 #define __ptrauth_save_key(ctxt, key) \ 466 do { \ 467 u64 __val; \ 468 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ 469 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ 470 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ 471 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \ 472 } while(0) 473 474 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 475 476 static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) 477 { 478 struct kvm_cpu_context *ctxt; 479 u64 val; 480 481 if (!vcpu_has_ptrauth(vcpu)) 482 return false; 483 484 ctxt = this_cpu_ptr(&kvm_hyp_ctxt); 485 __ptrauth_save_key(ctxt, APIA); 486 __ptrauth_save_key(ctxt, APIB); 487 __ptrauth_save_key(ctxt, APDA); 488 __ptrauth_save_key(ctxt, APDB); 489 __ptrauth_save_key(ctxt, APGA); 490 491 vcpu_ptrauth_enable(vcpu); 492 493 val = read_sysreg(hcr_el2); 494 val |= (HCR_API | HCR_APK); 495 write_sysreg(val, hcr_el2); 496 497 return true; 498 } 499 500 static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) 501 { 502 struct arch_timer_context *ctxt; 503 u32 sysreg; 504 u64 val; 505 506 /* 507 * We only get here for 64bit guests, 32bit guests will hit 508 * the long and winding road all the way to the standard 509 * handling. Yes, it sucks to be irrelevant. 510 */ 511 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 512 513 switch (sysreg) { 514 case SYS_CNTPCT_EL0: 515 case SYS_CNTPCTSS_EL0: 516 if (vcpu_has_nv(vcpu)) { 517 if (is_hyp_ctxt(vcpu)) { 518 ctxt = vcpu_hptimer(vcpu); 519 break; 520 } 521 522 /* Check for guest hypervisor trapping */ 523 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); 524 if (!vcpu_el2_e2h_is_set(vcpu)) 525 val = (val & CNTHCTL_EL1PCTEN) << 10; 526 527 if (!(val & (CNTHCTL_EL1PCTEN << 10))) 528 return false; 529 } 530 531 ctxt = vcpu_ptimer(vcpu); 532 break; 533 default: 534 return false; 535 } 536 537 val = arch_timer_read_cntpct_el0(); 538 539 if (ctxt->offset.vm_offset) 540 val -= *kern_hyp_va(ctxt->offset.vm_offset); 541 if (ctxt->offset.vcpu_offset) 542 val -= *kern_hyp_va(ctxt->offset.vcpu_offset); 543 544 vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); 545 __kvm_skip_instr(vcpu); 546 return true; 547 } 548 549 static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) 550 { 551 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 552 int rt = kvm_vcpu_sys_get_rt(vcpu); 553 u64 val = vcpu_get_reg(vcpu, rt); 554 555 if (sysreg != SYS_TCR_EL1) 556 return false; 557 558 /* 559 * Affected parts do not advertise support for hardware Access Flag / 560 * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying 561 * control bits are still functional. The architecture requires these be 562 * RES0 on systems that do not implement FEAT_HAFDBS. 563 * 564 * Uphold the requirements of the architecture by masking guest writes 565 * to TCR_EL1.{HA,HD} here. 566 */ 567 val &= ~(TCR_HD | TCR_HA); 568 write_sysreg_el1(val, SYS_TCR); 569 __kvm_skip_instr(vcpu); 570 return true; 571 } 572 573 static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) 574 { 575 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && 576 handle_tx2_tvm(vcpu)) 577 return true; 578 579 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) && 580 handle_ampere1_tcr(vcpu)) 581 return true; 582 583 if (static_branch_unlikely(&vgic_v3_cpuif_trap) && 584 __vgic_v3_perform_cpuif_access(vcpu) == 1) 585 return true; 586 587 if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) 588 return kvm_hyp_handle_ptrauth(vcpu, exit_code); 589 590 if (kvm_hyp_handle_cntpct(vcpu)) 591 return true; 592 593 return false; 594 } 595 596 static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) 597 { 598 if (static_branch_unlikely(&vgic_v3_cpuif_trap) && 599 __vgic_v3_perform_cpuif_access(vcpu) == 1) 600 return true; 601 602 return false; 603 } 604 605 static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) 606 { 607 if (!__populate_fault_info(vcpu)) 608 return true; 609 610 return false; 611 } 612 static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 613 __alias(kvm_hyp_handle_memory_fault); 614 static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 615 __alias(kvm_hyp_handle_memory_fault); 616 617 static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 618 { 619 if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) 620 return true; 621 622 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { 623 bool valid; 624 625 valid = kvm_vcpu_trap_is_translation_fault(vcpu) && 626 kvm_vcpu_dabt_isvalid(vcpu) && 627 !kvm_vcpu_abt_issea(vcpu) && 628 !kvm_vcpu_abt_iss1tw(vcpu); 629 630 if (valid) { 631 int ret = __vgic_v2_perform_cpuif_access(vcpu); 632 633 if (ret == 1) 634 return true; 635 636 /* Promote an illegal access to an SError.*/ 637 if (ret == -1) 638 *exit_code = ARM_EXCEPTION_EL1_SERROR; 639 } 640 } 641 642 return false; 643 } 644 645 typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); 646 647 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); 648 649 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code); 650 651 /* 652 * Allow the hypervisor to handle the exit with an exit handler if it has one. 653 * 654 * Returns true if the hypervisor handled the exit, and control should go back 655 * to the guest, or false if it hasn't. 656 */ 657 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) 658 { 659 const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); 660 exit_handler_fn fn; 661 662 fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; 663 664 if (fn) 665 return fn(vcpu, exit_code); 666 667 return false; 668 } 669 670 static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code) 671 { 672 /* 673 * Check for the conditions of Cortex-A510's #2077057. When these occur 674 * SPSR_EL2 can't be trusted, but isn't needed either as it is 675 * unchanged from the value in vcpu_gp_regs(vcpu)->pstate. 676 * Are we single-stepping the guest, and took a PAC exception from the 677 * active-not-pending state? 678 */ 679 if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) && 680 vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 681 *vcpu_cpsr(vcpu) & DBG_SPSR_SS && 682 ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC) 683 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 684 685 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); 686 } 687 688 /* 689 * Return true when we were able to fixup the guest exit and should return to 690 * the guest, false when we should restore the host state and return to the 691 * main run loop. 692 */ 693 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) 694 { 695 /* 696 * Save PSTATE early so that we can evaluate the vcpu mode 697 * early on. 698 */ 699 synchronize_vcpu_pstate(vcpu, exit_code); 700 701 /* 702 * Check whether we want to repaint the state one way or 703 * another. 704 */ 705 early_exit_filter(vcpu, exit_code); 706 707 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) 708 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); 709 710 if (ARM_SERROR_PENDING(*exit_code) && 711 ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) { 712 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); 713 714 /* 715 * HVC already have an adjusted PC, which we need to 716 * correct in order to return to after having injected 717 * the SError. 718 * 719 * SMC, on the other hand, is *trapped*, meaning its 720 * preferred return address is the SMC itself. 721 */ 722 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64) 723 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR); 724 } 725 726 /* 727 * We're using the raw exception code in order to only process 728 * the trap if no SError is pending. We will come back to the 729 * same PC once the SError has been injected, and replay the 730 * trapping instruction. 731 */ 732 if (*exit_code != ARM_EXCEPTION_TRAP) 733 goto exit; 734 735 /* Check if there's an exit handler and allow it to handle the exit. */ 736 if (kvm_hyp_handle_exit(vcpu, exit_code)) 737 goto guest; 738 exit: 739 /* Return to the host kernel and handle the exit */ 740 return false; 741 742 guest: 743 /* Re-enter the guest */ 744 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); 745 return true; 746 } 747 748 static inline void __kvm_unexpected_el2_exception(void) 749 { 750 extern char __guest_exit_panic[]; 751 unsigned long addr, fixup; 752 struct kvm_exception_table_entry *entry, *end; 753 unsigned long elr_el2 = read_sysreg(elr_el2); 754 755 entry = &__start___kvm_ex_table; 756 end = &__stop___kvm_ex_table; 757 758 while (entry < end) { 759 addr = (unsigned long)&entry->insn + entry->insn; 760 fixup = (unsigned long)&entry->fixup + entry->fixup; 761 762 if (addr != elr_el2) { 763 entry++; 764 continue; 765 } 766 767 write_sysreg(fixup, elr_el2); 768 return; 769 } 770 771 /* Trigger a panic after restoring the hyp context. */ 772 write_sysreg(__guest_exit_panic, elr_el2); 773 } 774 775 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ 776