1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #ifndef __ARM64_KVM_HYP_SYSREG_SR_H__
8 #define __ARM64_KVM_HYP_SYSREG_SR_H__
9
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
12
13 #include <asm/kprobes.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_emulate.h>
16 #include <asm/kvm_hyp.h>
17 #include <asm/kvm_mmu.h>
18
19 static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);
20
__sysreg_save_common_state(struct kvm_cpu_context * ctxt)21 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
22 {
23 ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1);
24
25 // POR_EL0 can affect uaccess, so must be saved/restored early.
26 if (ctxt_has_s1poe(ctxt))
27 ctxt_sys_reg(ctxt, POR_EL0) = read_sysreg_s(SYS_POR_EL0);
28 }
29
__sysreg_save_user_state(struct kvm_cpu_context * ctxt)30 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
31 {
32 ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
33 ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
34 }
35
ctxt_to_vcpu(struct kvm_cpu_context * ctxt)36 static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
37 {
38 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
39
40 if (!vcpu)
41 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
42
43 return vcpu;
44 }
45
ctxt_has_mte(struct kvm_cpu_context * ctxt)46 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
47 {
48 struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
49
50 return kvm_has_mte(kern_hyp_va(vcpu->kvm));
51 }
52
ctxt_has_s1pie(struct kvm_cpu_context * ctxt)53 static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt)
54 {
55 struct kvm_vcpu *vcpu;
56
57 if (!cpus_have_final_cap(ARM64_HAS_S1PIE))
58 return false;
59
60 vcpu = ctxt_to_vcpu(ctxt);
61 return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP);
62 }
63
ctxt_has_tcrx(struct kvm_cpu_context * ctxt)64 static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt)
65 {
66 struct kvm_vcpu *vcpu;
67
68 if (!cpus_have_final_cap(ARM64_HAS_TCR2))
69 return false;
70
71 vcpu = ctxt_to_vcpu(ctxt);
72 return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, TCRX, IMP);
73 }
74
ctxt_has_s1poe(struct kvm_cpu_context * ctxt)75 static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
76 {
77 struct kvm_vcpu *vcpu;
78
79 if (!system_supports_poe())
80 return false;
81
82 vcpu = ctxt_to_vcpu(ctxt);
83 return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1POE, IMP);
84 }
85
__sysreg_save_el1_state(struct kvm_cpu_context * ctxt)86 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
87 {
88 ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
89 ctxt_sys_reg(ctxt, CPACR_EL1) = read_sysreg_el1(SYS_CPACR);
90 ctxt_sys_reg(ctxt, TTBR0_EL1) = read_sysreg_el1(SYS_TTBR0);
91 ctxt_sys_reg(ctxt, TTBR1_EL1) = read_sysreg_el1(SYS_TTBR1);
92 ctxt_sys_reg(ctxt, TCR_EL1) = read_sysreg_el1(SYS_TCR);
93 if (ctxt_has_tcrx(ctxt)) {
94 ctxt_sys_reg(ctxt, TCR2_EL1) = read_sysreg_el1(SYS_TCR2);
95
96 if (ctxt_has_s1pie(ctxt)) {
97 ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
98 ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
99 }
100
101 if (ctxt_has_s1poe(ctxt))
102 ctxt_sys_reg(ctxt, POR_EL1) = read_sysreg_el1(SYS_POR);
103 }
104 ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR);
105 ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0);
106 ctxt_sys_reg(ctxt, AFSR1_EL1) = read_sysreg_el1(SYS_AFSR1);
107 ctxt_sys_reg(ctxt, FAR_EL1) = read_sysreg_el1(SYS_FAR);
108 ctxt_sys_reg(ctxt, MAIR_EL1) = read_sysreg_el1(SYS_MAIR);
109 ctxt_sys_reg(ctxt, VBAR_EL1) = read_sysreg_el1(SYS_VBAR);
110 ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
111 ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
112 ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
113 ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
114 ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
115
116 if (ctxt_has_mte(ctxt)) {
117 ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR);
118 ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1);
119 }
120
121 ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
122 ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
123 ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
124 }
125
__sysreg_save_el2_return_state(struct kvm_cpu_context * ctxt)126 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
127 {
128 ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
129 /*
130 * Guest PSTATE gets saved at guest fixup time in all
131 * cases. We still need to handle the nVHE host side here.
132 */
133 if (!has_vhe() && ctxt->__hyp_running_vcpu)
134 ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
135
136 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
137 ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
138 }
139
__sysreg_restore_common_state(struct kvm_cpu_context * ctxt)140 static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
141 {
142 write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1);
143
144 // POR_EL0 can affect uaccess, so must be saved/restored early.
145 if (ctxt_has_s1poe(ctxt))
146 write_sysreg_s(ctxt_sys_reg(ctxt, POR_EL0), SYS_POR_EL0);
147 }
148
__sysreg_restore_user_state(struct kvm_cpu_context * ctxt)149 static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
150 {
151 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0);
152 write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0);
153 }
154
__sysreg_restore_el1_state(struct kvm_cpu_context * ctxt)155 static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
156 {
157 write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2);
158
159 if (has_vhe() ||
160 !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
161 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
162 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
163 } else if (!ctxt->__hyp_running_vcpu) {
164 /*
165 * Must only be done for guest registers, hence the context
166 * test. We're coming from the host, so SCTLR.M is already
167 * set. Pairs with nVHE's __activate_traps().
168 */
169 write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) |
170 TCR_EPD1_MASK | TCR_EPD0_MASK),
171 SYS_TCR);
172 isb();
173 }
174
175 write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1), SYS_CPACR);
176 write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1), SYS_TTBR0);
177 write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1), SYS_TTBR1);
178 if (ctxt_has_tcrx(ctxt)) {
179 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR2_EL1), SYS_TCR2);
180
181 if (ctxt_has_s1pie(ctxt)) {
182 write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
183 write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
184 }
185
186 if (ctxt_has_s1poe(ctxt))
187 write_sysreg_el1(ctxt_sys_reg(ctxt, POR_EL1), SYS_POR);
188 }
189 write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR);
190 write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
191 write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1), SYS_AFSR1);
192 write_sysreg_el1(ctxt_sys_reg(ctxt, FAR_EL1), SYS_FAR);
193 write_sysreg_el1(ctxt_sys_reg(ctxt, MAIR_EL1), SYS_MAIR);
194 write_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1), SYS_VBAR);
195 write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
196 write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
197 write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
198 write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
199 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
200
201 if (ctxt_has_mte(ctxt)) {
202 write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR);
203 write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1);
204 }
205
206 if (!has_vhe() &&
207 cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
208 ctxt->__hyp_running_vcpu) {
209 /*
210 * Must only be done for host registers, hence the context
211 * test. Pairs with nVHE's __deactivate_traps().
212 */
213 isb();
214 /*
215 * At this stage, and thanks to the above isb(), S2 is
216 * deconfigured and disabled. We can now restore the host's
217 * S1 configuration: SCTLR, and only then TCR.
218 */
219 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
220 isb();
221 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
222 }
223
224 write_sysreg(ctxt_sys_reg(ctxt, SP_EL1), sp_el1);
225 write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR);
226 write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1), SYS_SPSR);
227 }
228
229 /* Read the VCPU state's PSTATE, but translate (v)EL2 to EL1. */
to_hw_pstate(const struct kvm_cpu_context * ctxt)230 static inline u64 to_hw_pstate(const struct kvm_cpu_context *ctxt)
231 {
232 u64 mode = ctxt->regs.pstate & (PSR_MODE_MASK | PSR_MODE32_BIT);
233
234 switch (mode) {
235 case PSR_MODE_EL2t:
236 mode = PSR_MODE_EL1t;
237 break;
238 case PSR_MODE_EL2h:
239 mode = PSR_MODE_EL1h;
240 break;
241 }
242
243 return (ctxt->regs.pstate & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
244 }
245
__sysreg_restore_el2_return_state(struct kvm_cpu_context * ctxt)246 static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
247 {
248 u64 pstate = to_hw_pstate(ctxt);
249 u64 mode = pstate & PSR_AA32_MODE_MASK;
250
251 /*
252 * Safety check to ensure we're setting the CPU up to enter the guest
253 * in a less privileged mode.
254 *
255 * If we are attempting a return to EL2 or higher in AArch64 state,
256 * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
257 * we'll take an illegal exception state exception immediately after
258 * the ERET to the guest. Attempts to return to AArch32 Hyp will
259 * result in an illegal exception return because EL2's execution state
260 * is determined by SCR_EL3.RW.
261 */
262 if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
263 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
264
265 write_sysreg_el2(ctxt->regs.pc, SYS_ELR);
266 write_sysreg_el2(pstate, SYS_SPSR);
267
268 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
269 write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
270 }
271
__sysreg32_save_state(struct kvm_vcpu * vcpu)272 static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
273 {
274 if (!vcpu_el1_is_32bit(vcpu))
275 return;
276
277 vcpu->arch.ctxt.spsr_abt = read_sysreg(spsr_abt);
278 vcpu->arch.ctxt.spsr_und = read_sysreg(spsr_und);
279 vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
280 vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
281
282 __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
283 __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
284
285 if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
286 __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
287 }
288
__sysreg32_restore_state(struct kvm_vcpu * vcpu)289 static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
290 {
291 if (!vcpu_el1_is_32bit(vcpu))
292 return;
293
294 write_sysreg(vcpu->arch.ctxt.spsr_abt, spsr_abt);
295 write_sysreg(vcpu->arch.ctxt.spsr_und, spsr_und);
296 write_sysreg(vcpu->arch.ctxt.spsr_irq, spsr_irq);
297 write_sysreg(vcpu->arch.ctxt.spsr_fiq, spsr_fiq);
298
299 write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
300 write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
301
302 if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
303 write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
304 }
305
306 #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */
307