1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <linux/psci.h>
21 #include <asm/arch_gicv3.h>
22 #include <asm/barrier.h>
23 #include <asm/cpufeature.h>
24 #include <asm/cputype.h>
25 #include <asm/daifflags.h>
26 #include <asm/fpsimd.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/thread_info.h>
30
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40
41 #define KVM_VCPU_MAX_FEATURES 7
42
43 #define KVM_REQ_SLEEP \
44 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
47 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
48 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
49
50 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51 KVM_DIRTY_LOG_INITIALLY_SET)
52
53 /*
54 * Mode of operation configurable with kvm-arm.mode early param.
55 * See Documentation/admin-guide/kernel-parameters.txt for more information.
56 */
57 enum kvm_mode {
58 KVM_MODE_DEFAULT,
59 KVM_MODE_PROTECTED,
60 };
61 enum kvm_mode kvm_get_mode(void);
62
63 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
64
65 extern unsigned int kvm_sve_max_vl;
66 int kvm_arm_init_sve(void);
67
68 int __attribute_const__ kvm_target_cpu(void);
69 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
70 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
71
72 struct kvm_vmid {
73 /* The VMID generation used for the virt. memory system */
74 u64 vmid_gen;
75 u32 vmid;
76 };
77
78 struct kvm_s2_mmu {
79 struct kvm_vmid vmid;
80
81 /*
82 * stage2 entry level table
83 *
84 * Two kvm_s2_mmu structures in the same VM can point to the same
85 * pgd here. This happens when running a guest using a
86 * translation regime that isn't affected by its own stage-2
87 * translation, such as a non-VHE hypervisor running at vEL2, or
88 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
89 * canonical stage-2 page tables.
90 */
91 phys_addr_t pgd_phys;
92 struct kvm_pgtable *pgt;
93
94 /* The last vcpu id that ran on each physical CPU */
95 int __percpu *last_vcpu_ran;
96
97 struct kvm_arch *arch;
98 };
99
100 struct kvm_arch_memory_slot {
101 };
102
103 struct kvm_arch {
104 struct kvm_s2_mmu mmu;
105
106 /* VTCR_EL2 value for this VM */
107 u64 vtcr;
108
109 /* The maximum number of vCPUs depends on the used GIC model */
110 int max_vcpus;
111
112 /* Interrupt controller */
113 struct vgic_dist vgic;
114
115 /* Mandated version of PSCI */
116 u32 psci_version;
117
118 /*
119 * If we encounter a data abort without valid instruction syndrome
120 * information, report this to user space. User space can (and
121 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
122 * supported.
123 */
124 bool return_nisv_io_abort_to_user;
125
126 /*
127 * VM-wide PMU filter, implemented as a bitmap and big enough for
128 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
129 */
130 unsigned long *pmu_filter;
131 unsigned int pmuver;
132
133 u8 pfr0_csv2;
134 u8 pfr0_csv3;
135 };
136
137 struct kvm_vcpu_fault_info {
138 u32 esr_el2; /* Hyp Syndrom Register */
139 u64 far_el2; /* Hyp Fault Address Register */
140 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
141 u64 disr_el1; /* Deferred [SError] Status Register */
142 };
143
144 enum vcpu_sysreg {
145 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
146 MPIDR_EL1, /* MultiProcessor Affinity Register */
147 CSSELR_EL1, /* Cache Size Selection Register */
148 SCTLR_EL1, /* System Control Register */
149 ACTLR_EL1, /* Auxiliary Control Register */
150 CPACR_EL1, /* Coprocessor Access Control */
151 ZCR_EL1, /* SVE Control */
152 TTBR0_EL1, /* Translation Table Base Register 0 */
153 TTBR1_EL1, /* Translation Table Base Register 1 */
154 TCR_EL1, /* Translation Control Register */
155 ESR_EL1, /* Exception Syndrome Register */
156 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
157 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
158 FAR_EL1, /* Fault Address Register */
159 MAIR_EL1, /* Memory Attribute Indirection Register */
160 VBAR_EL1, /* Vector Base Address Register */
161 CONTEXTIDR_EL1, /* Context ID Register */
162 TPIDR_EL0, /* Thread ID, User R/W */
163 TPIDRRO_EL0, /* Thread ID, User R/O */
164 TPIDR_EL1, /* Thread ID, Privileged */
165 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
166 CNTKCTL_EL1, /* Timer Control Register (EL1) */
167 PAR_EL1, /* Physical Address Register */
168 MDSCR_EL1, /* Monitor Debug System Control Register */
169 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
170 DISR_EL1, /* Deferred Interrupt Status Register */
171
172 /* Performance Monitors Registers */
173 PMCR_EL0, /* Control Register */
174 PMSELR_EL0, /* Event Counter Selection Register */
175 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
176 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
177 PMCCNTR_EL0, /* Cycle Counter Register */
178 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
179 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
180 PMCCFILTR_EL0, /* Cycle Count Filter Register */
181 PMCNTENSET_EL0, /* Count Enable Set Register */
182 PMINTENSET_EL1, /* Interrupt Enable Set Register */
183 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
184 PMSWINC_EL0, /* Software Increment Register */
185 PMUSERENR_EL0, /* User Enable Register */
186
187 /* Pointer Authentication Registers in a strict increasing order. */
188 APIAKEYLO_EL1,
189 APIAKEYHI_EL1,
190 APIBKEYLO_EL1,
191 APIBKEYHI_EL1,
192 APDAKEYLO_EL1,
193 APDAKEYHI_EL1,
194 APDBKEYLO_EL1,
195 APDBKEYHI_EL1,
196 APGAKEYLO_EL1,
197 APGAKEYHI_EL1,
198
199 ELR_EL1,
200 SP_EL1,
201 SPSR_EL1,
202
203 CNTVOFF_EL2,
204 CNTV_CVAL_EL0,
205 CNTV_CTL_EL0,
206 CNTP_CVAL_EL0,
207 CNTP_CTL_EL0,
208
209 /* 32bit specific registers. Keep them at the end of the range */
210 DACR32_EL2, /* Domain Access Control Register */
211 IFSR32_EL2, /* Instruction Fault Status Register */
212 FPEXC32_EL2, /* Floating-Point Exception Control Register */
213 DBGVCR32_EL2, /* Debug Vector Catch Register */
214
215 NR_SYS_REGS /* Nothing after this line! */
216 };
217
218 struct kvm_cpu_context {
219 struct user_pt_regs regs; /* sp = sp_el0 */
220
221 u64 spsr_abt;
222 u64 spsr_und;
223 u64 spsr_irq;
224 u64 spsr_fiq;
225
226 struct user_fpsimd_state fp_regs;
227
228 u64 sys_regs[NR_SYS_REGS];
229
230 struct kvm_vcpu *__hyp_running_vcpu;
231 };
232
233 struct kvm_pmu_events {
234 u32 events_host;
235 u32 events_guest;
236 };
237
238 struct kvm_host_data {
239 struct kvm_cpu_context host_ctxt;
240 struct kvm_pmu_events pmu_events;
241 };
242
243 struct kvm_host_psci_config {
244 /* PSCI version used by host. */
245 u32 version;
246
247 /* Function IDs used by host if version is v0.1. */
248 struct psci_0_1_function_ids function_ids_0_1;
249
250 bool psci_0_1_cpu_suspend_implemented;
251 bool psci_0_1_cpu_on_implemented;
252 bool psci_0_1_cpu_off_implemented;
253 bool psci_0_1_migrate_implemented;
254 };
255
256 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
257 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
258
259 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
260 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
261
262 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
263 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
264
265 struct vcpu_reset_state {
266 unsigned long pc;
267 unsigned long r0;
268 bool be;
269 bool reset;
270 };
271
272 struct kvm_vcpu_arch {
273 struct kvm_cpu_context ctxt;
274 void *sve_state;
275 unsigned int sve_max_vl;
276
277 /* Stage 2 paging state used by the hardware on next switch */
278 struct kvm_s2_mmu *hw_mmu;
279
280 /* HYP configuration */
281 u64 hcr_el2;
282 u32 mdcr_el2;
283
284 /* Exception Information */
285 struct kvm_vcpu_fault_info fault;
286
287 /* State of various workarounds, see kvm_asm.h for bit assignment */
288 u64 workaround_flags;
289
290 /* Miscellaneous vcpu state flags */
291 u64 flags;
292
293 /*
294 * We maintain more than a single set of debug registers to support
295 * debugging the guest from the host and to maintain separate host and
296 * guest state during world switches. vcpu_debug_state are the debug
297 * registers of the vcpu as the guest sees them. host_debug_state are
298 * the host registers which are saved and restored during
299 * world switches. external_debug_state contains the debug
300 * values we want to debug the guest. This is set via the
301 * KVM_SET_GUEST_DEBUG ioctl.
302 *
303 * debug_ptr points to the set of debug registers that should be loaded
304 * onto the hardware when running the guest.
305 */
306 struct kvm_guest_debug_arch *debug_ptr;
307 struct kvm_guest_debug_arch vcpu_debug_state;
308 struct kvm_guest_debug_arch external_debug_state;
309
310 struct thread_info *host_thread_info; /* hyp VA */
311 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
312
313 struct {
314 /* {Break,watch}point registers */
315 struct kvm_guest_debug_arch regs;
316 /* Statistical profiling extension */
317 u64 pmscr_el1;
318 /* Self-hosted trace */
319 u64 trfcr_el1;
320 } host_debug_state;
321
322 /* VGIC state */
323 struct vgic_cpu vgic_cpu;
324 struct arch_timer_cpu timer_cpu;
325 struct kvm_pmu pmu;
326
327 /*
328 * Anything that is not used directly from assembly code goes
329 * here.
330 */
331
332 /*
333 * Guest registers we preserve during guest debugging.
334 *
335 * These shadow registers are updated by the kvm_handle_sys_reg
336 * trap handler if the guest accesses or updates them while we
337 * are using guest debug.
338 */
339 struct {
340 u32 mdscr_el1;
341 } guest_debug_preserved;
342
343 /* vcpu power-off state */
344 bool power_off;
345
346 /* Don't run the guest (internal implementation need) */
347 bool pause;
348
349 /* Cache some mmu pages needed inside spinlock regions */
350 struct kvm_mmu_memory_cache mmu_page_cache;
351
352 /* Target CPU and feature flags */
353 int target;
354 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
355
356 /* Detect first run of a vcpu */
357 bool has_run_once;
358
359 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
360 u64 vsesr_el2;
361
362 /* Additional reset state */
363 struct vcpu_reset_state reset_state;
364
365 /* True when deferrable sysregs are loaded on the physical CPU,
366 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
367 bool sysregs_loaded_on_cpu;
368
369 /* Guest PV state */
370 struct {
371 u64 last_steal;
372 gpa_t base;
373 } steal;
374 };
375
376 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
377 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
378 sve_ffr_offset((vcpu)->arch.sve_max_vl))
379
380 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
381
382 #define vcpu_sve_state_size(vcpu) ({ \
383 size_t __size_ret; \
384 unsigned int __vcpu_vq; \
385 \
386 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
387 __size_ret = 0; \
388 } else { \
389 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
390 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
391 } \
392 \
393 __size_ret; \
394 })
395
396 /* vcpu_arch flags field values: */
397 #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
398 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
399 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
400 #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
401 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
402 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
403 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
404 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
405 #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
406 #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
407 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
408 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
409
410 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
411 KVM_GUESTDBG_USE_SW_BP | \
412 KVM_GUESTDBG_USE_HW | \
413 KVM_GUESTDBG_SINGLESTEP)
414 /*
415 * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
416 * take the following values:
417 *
418 * For AArch32 EL1:
419 */
420 #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
421 #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
422 #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
423 /* For AArch64: */
424 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
425 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
426 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
427 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
428 #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
429 #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
430
431 /*
432 * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
433 * set together with an exception...
434 */
435 #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
436
437 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
438 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
439
440 #ifdef CONFIG_ARM64_PTR_AUTH
441 #define vcpu_has_ptrauth(vcpu) \
442 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
443 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
444 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
445 #else
446 #define vcpu_has_ptrauth(vcpu) false
447 #endif
448
449 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
450
451 /*
452 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
453 * memory backed version of a register, and not the one most recently
454 * accessed by a running VCPU. For example, for userspace access or
455 * for system registers that are never context switched, but only
456 * emulated.
457 */
458 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
459
460 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
461
462 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
463
464 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
465 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
466
__vcpu_read_sys_reg_from_cpu(int reg,u64 * val)467 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
468 {
469 /*
470 * *** VHE ONLY ***
471 *
472 * System registers listed in the switch are not saved on every
473 * exit from the guest but are only saved on vcpu_put.
474 *
475 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
476 * should never be listed below, because the guest cannot modify its
477 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
478 * thread when emulating cross-VCPU communication.
479 */
480 if (!has_vhe())
481 return false;
482
483 switch (reg) {
484 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
485 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
486 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
487 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
488 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
489 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
490 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
491 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
492 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
493 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
494 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
495 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
496 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
497 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
498 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
499 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
500 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
501 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
502 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
503 case PAR_EL1: *val = read_sysreg_par(); break;
504 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
505 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
506 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
507 default: return false;
508 }
509
510 return true;
511 }
512
__vcpu_write_sys_reg_to_cpu(u64 val,int reg)513 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
514 {
515 /*
516 * *** VHE ONLY ***
517 *
518 * System registers listed in the switch are not restored on every
519 * entry to the guest but are only restored on vcpu_load.
520 *
521 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
522 * should never be listed below, because the MPIDR should only be set
523 * once, before running the VCPU, and never changed later.
524 */
525 if (!has_vhe())
526 return false;
527
528 switch (reg) {
529 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
530 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
531 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
532 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
533 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
534 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
535 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
536 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
537 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
538 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
539 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
540 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
541 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
542 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
543 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
544 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
545 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
546 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
547 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
548 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
549 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
550 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
551 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
552 default: return false;
553 }
554
555 return true;
556 }
557
558 struct kvm_vm_stat {
559 ulong remote_tlb_flush;
560 };
561
562 struct kvm_vcpu_stat {
563 u64 halt_successful_poll;
564 u64 halt_attempted_poll;
565 u64 halt_poll_success_ns;
566 u64 halt_poll_fail_ns;
567 u64 halt_poll_invalid;
568 u64 halt_wakeup;
569 u64 hvc_exit_stat;
570 u64 wfe_exit_stat;
571 u64 wfi_exit_stat;
572 u64 mmio_exit_user;
573 u64 mmio_exit_kernel;
574 u64 exits;
575 };
576
577 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
578 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
579 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
580 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
581 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
582
583 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
584 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
585 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
586 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
587
588 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
589 struct kvm_vcpu_events *events);
590
591 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
592 struct kvm_vcpu_events *events);
593
594 #define KVM_ARCH_WANT_MMU_NOTIFIER
595
596 void kvm_arm_halt_guest(struct kvm *kvm);
597 void kvm_arm_resume_guest(struct kvm *kvm);
598
599 #ifndef __KVM_NVHE_HYPERVISOR__
600 #define kvm_call_hyp_nvhe(f, ...) \
601 ({ \
602 struct arm_smccc_res res; \
603 \
604 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
605 ##__VA_ARGS__, &res); \
606 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
607 \
608 res.a1; \
609 })
610
611 /*
612 * The couple of isb() below are there to guarantee the same behaviour
613 * on VHE as on !VHE, where the eret to EL1 acts as a context
614 * synchronization event.
615 */
616 #define kvm_call_hyp(f, ...) \
617 do { \
618 if (has_vhe()) { \
619 f(__VA_ARGS__); \
620 isb(); \
621 } else { \
622 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
623 } \
624 } while(0)
625
626 #define kvm_call_hyp_ret(f, ...) \
627 ({ \
628 typeof(f(__VA_ARGS__)) ret; \
629 \
630 if (has_vhe()) { \
631 ret = f(__VA_ARGS__); \
632 isb(); \
633 } else { \
634 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
635 } \
636 \
637 ret; \
638 })
639 #else /* __KVM_NVHE_HYPERVISOR__ */
640 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
641 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
642 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
643 #endif /* __KVM_NVHE_HYPERVISOR__ */
644
645 void force_vm_exit(const cpumask_t *mask);
646
647 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
648 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
649
650 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
651 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
652 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
653 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
654 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
655 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
656
657 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
658
659 void kvm_sys_reg_table_init(void);
660
661 /* MMIO helpers */
662 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
663 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
664
665 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
666 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
667
668 int kvm_perf_init(void);
669 int kvm_perf_teardown(void);
670
671 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
672 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
673 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
674
675 bool kvm_arm_pvtime_supported(void);
676 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
677 struct kvm_device_attr *attr);
678 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
679 struct kvm_device_attr *attr);
680 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
681 struct kvm_device_attr *attr);
682
kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch * vcpu_arch)683 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
684 {
685 vcpu_arch->steal.base = GPA_INVALID;
686 }
687
kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch * vcpu_arch)688 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
689 {
690 return (vcpu_arch->steal.base != GPA_INVALID);
691 }
692
693 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
694
695 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
696
697 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
698
kvm_init_host_cpu_context(struct kvm_cpu_context * cpu_ctxt)699 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
700 {
701 /* The host's MPIDR is immutable, so let's set it up at boot time */
702 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
703 }
704
705 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
706
kvm_arch_hardware_unsetup(void)707 static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)708 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)709 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
kvm_arch_vcpu_block_finish(struct kvm_vcpu * vcpu)710 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
711
712 void kvm_arm_init_debug(void);
713 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
714 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
715 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
716 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
717 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
718 struct kvm_device_attr *attr);
719 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
720 struct kvm_device_attr *attr);
721 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
722 struct kvm_device_attr *attr);
723
724 /* Guest/host FPSIMD coordination helpers */
725 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
726 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
727 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
728 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
729
kvm_pmu_counter_deferred(struct perf_event_attr * attr)730 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
731 {
732 return (!has_vhe() && attr->exclude_host);
733 }
734
735 /* Flags for host debug state */
736 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
737 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
738
739 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)740 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
741 {
742 return kvm_arch_vcpu_run_map_fp(vcpu);
743 }
744
745 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
746 void kvm_clr_pmu_events(u32 clr);
747
748 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
749 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
750 #else
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)751 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
kvm_clr_pmu_events(u32 clr)752 static inline void kvm_clr_pmu_events(u32 clr) {}
753 #endif
754
755 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
756 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
757
758 int kvm_set_ipa_limit(void);
759
760 #define __KVM_HAVE_ARCH_VM_ALLOC
761 struct kvm *kvm_arch_alloc_vm(void);
762 void kvm_arch_free_vm(struct kvm *kvm);
763
764 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
765
766 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
767 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
768
769 #define kvm_arm_vcpu_sve_finalized(vcpu) \
770 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
771
772 #define kvm_vcpu_has_pmu(vcpu) \
773 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
774
775 int kvm_trng_call(struct kvm_vcpu *vcpu);
776 #ifdef CONFIG_KVM
777 extern phys_addr_t hyp_mem_base;
778 extern phys_addr_t hyp_mem_size;
779 void __init kvm_hyp_reserve(void);
780 #else
kvm_hyp_reserve(void)781 static inline void kvm_hyp_reserve(void) { }
782 #endif
783
784 #endif /* __ARM64_KVM_HOST_H__ */
785