xref: /linux/arch/arm64/kvm/hyp/nvhe/sys_regs.c (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #include <linux/irqchip/arm-gic-v3.h>
8 
9 #include <asm/kvm_asm.h>
10 #include <asm/kvm_mmu.h>
11 
12 #include <hyp/adjust_pc.h>
13 
14 #include <nvhe/fixed_config.h>
15 
16 #include "../../sys_regs.h"
17 
18 /*
19  * Copies of the host's CPU features registers holding sanitized values at hyp.
20  */
21 u64 id_aa64pfr0_el1_sys_val;
22 u64 id_aa64pfr1_el1_sys_val;
23 u64 id_aa64isar0_el1_sys_val;
24 u64 id_aa64isar1_el1_sys_val;
25 u64 id_aa64isar2_el1_sys_val;
26 u64 id_aa64mmfr0_el1_sys_val;
27 u64 id_aa64mmfr1_el1_sys_val;
28 u64 id_aa64mmfr2_el1_sys_val;
29 
30 /*
31  * Inject an unknown/undefined exception to an AArch64 guest while most of its
32  * sysregs are live.
33  */
34 static void inject_undef64(struct kvm_vcpu *vcpu)
35 {
36 	u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
37 
38 	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
39 	*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
40 
41 	kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
42 
43 	__kvm_adjust_pc(vcpu);
44 
45 	write_sysreg_el1(esr, SYS_ESR);
46 	write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
47 	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
48 	write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
49 }
50 
51 /*
52  * Returns the restricted features values of the feature register based on the
53  * limitations in restrict_fields.
54  * A feature id field value of 0b0000 does not impose any restrictions.
55  * Note: Use only for unsigned feature field values.
56  */
57 static u64 get_restricted_features_unsigned(u64 sys_reg_val,
58 					    u64 restrict_fields)
59 {
60 	u64 value = 0UL;
61 	u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
62 
63 	/*
64 	 * According to the Arm Architecture Reference Manual, feature fields
65 	 * use increasing values to indicate increases in functionality.
66 	 * Iterate over the restricted feature fields and calculate the minimum
67 	 * unsigned value between the one supported by the system, and what the
68 	 * value is being restricted to.
69 	 */
70 	while (sys_reg_val && restrict_fields) {
71 		value |= min(sys_reg_val & mask, restrict_fields & mask);
72 		sys_reg_val &= ~mask;
73 		restrict_fields &= ~mask;
74 		mask <<= ARM64_FEATURE_FIELD_BITS;
75 	}
76 
77 	return value;
78 }
79 
80 /*
81  * Functions that return the value of feature id registers for protected VMs
82  * based on allowed features, system features, and KVM support.
83  */
84 
85 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
86 {
87 	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
88 	u64 set_mask = 0;
89 	u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
90 
91 	set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
92 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
93 
94 	/* Spectre and Meltdown mitigation in KVM */
95 	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
96 			       (u64)kvm->arch.pfr0_csv2);
97 	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
98 			       (u64)kvm->arch.pfr0_csv3);
99 
100 	return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
101 }
102 
103 static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
104 {
105 	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
106 	u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
107 
108 	if (!kvm_has_mte(kvm))
109 		allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
110 
111 	return id_aa64pfr1_el1_sys_val & allow_mask;
112 }
113 
114 static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
115 {
116 	/*
117 	 * No support for Scalable Vectors, therefore, hyp has no sanitized
118 	 * copy of the feature id register.
119 	 */
120 	BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL);
121 	return 0;
122 }
123 
124 static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
125 {
126 	/*
127 	 * No support for debug, including breakpoints, and watchpoints,
128 	 * therefore, pKVM has no sanitized copy of the feature id register.
129 	 */
130 	BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL);
131 	return 0;
132 }
133 
134 static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu)
135 {
136 	/*
137 	 * No support for debug, therefore, hyp has no sanitized copy of the
138 	 * feature id register.
139 	 */
140 	BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL);
141 	return 0;
142 }
143 
144 static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu)
145 {
146 	/*
147 	 * No support for implementation defined features, therefore, hyp has no
148 	 * sanitized copy of the feature id register.
149 	 */
150 	BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL);
151 	return 0;
152 }
153 
154 static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu)
155 {
156 	/*
157 	 * No support for implementation defined features, therefore, hyp has no
158 	 * sanitized copy of the feature id register.
159 	 */
160 	BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL);
161 	return 0;
162 }
163 
164 static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
165 {
166 	return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW;
167 }
168 
169 static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
170 {
171 	u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
172 
173 	if (!vcpu_has_ptrauth(vcpu))
174 		allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
175 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
176 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
177 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
178 
179 	return id_aa64isar1_el1_sys_val & allow_mask;
180 }
181 
182 static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
183 {
184 	u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
185 
186 	if (!vcpu_has_ptrauth(vcpu))
187 		allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
188 				ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
189 
190 	return id_aa64isar2_el1_sys_val & allow_mask;
191 }
192 
193 static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
194 {
195 	u64 set_mask;
196 
197 	set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
198 		PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
199 
200 	return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
201 }
202 
203 static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
204 {
205 	return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW;
206 }
207 
208 static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
209 {
210 	return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
211 }
212 
213 /* Read a sanitized cpufeature ID register by its encoding */
214 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
215 {
216 	switch (id) {
217 	case SYS_ID_AA64PFR0_EL1:
218 		return get_pvm_id_aa64pfr0(vcpu);
219 	case SYS_ID_AA64PFR1_EL1:
220 		return get_pvm_id_aa64pfr1(vcpu);
221 	case SYS_ID_AA64ZFR0_EL1:
222 		return get_pvm_id_aa64zfr0(vcpu);
223 	case SYS_ID_AA64DFR0_EL1:
224 		return get_pvm_id_aa64dfr0(vcpu);
225 	case SYS_ID_AA64DFR1_EL1:
226 		return get_pvm_id_aa64dfr1(vcpu);
227 	case SYS_ID_AA64AFR0_EL1:
228 		return get_pvm_id_aa64afr0(vcpu);
229 	case SYS_ID_AA64AFR1_EL1:
230 		return get_pvm_id_aa64afr1(vcpu);
231 	case SYS_ID_AA64ISAR0_EL1:
232 		return get_pvm_id_aa64isar0(vcpu);
233 	case SYS_ID_AA64ISAR1_EL1:
234 		return get_pvm_id_aa64isar1(vcpu);
235 	case SYS_ID_AA64ISAR2_EL1:
236 		return get_pvm_id_aa64isar2(vcpu);
237 	case SYS_ID_AA64MMFR0_EL1:
238 		return get_pvm_id_aa64mmfr0(vcpu);
239 	case SYS_ID_AA64MMFR1_EL1:
240 		return get_pvm_id_aa64mmfr1(vcpu);
241 	case SYS_ID_AA64MMFR2_EL1:
242 		return get_pvm_id_aa64mmfr2(vcpu);
243 	default:
244 		/* Unhandled ID register, RAZ */
245 		return 0;
246 	}
247 }
248 
249 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
250 		       struct sys_reg_desc const *r)
251 {
252 	return pvm_read_id_reg(vcpu, reg_to_encoding(r));
253 }
254 
255 /* Handler to RAZ/WI sysregs */
256 static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
257 			      const struct sys_reg_desc *r)
258 {
259 	if (!p->is_write)
260 		p->regval = 0;
261 
262 	return true;
263 }
264 
265 /*
266  * Accessor for AArch32 feature id registers.
267  *
268  * The value of these registers is "unknown" according to the spec if AArch32
269  * isn't supported.
270  */
271 static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
272 				  struct sys_reg_params *p,
273 				  const struct sys_reg_desc *r)
274 {
275 	if (p->is_write) {
276 		inject_undef64(vcpu);
277 		return false;
278 	}
279 
280 	/*
281 	 * No support for AArch32 guests, therefore, pKVM has no sanitized copy
282 	 * of AArch32 feature id registers.
283 	 */
284 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
285 		     PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
286 
287 	return pvm_access_raz_wi(vcpu, p, r);
288 }
289 
290 /*
291  * Accessor for AArch64 feature id registers.
292  *
293  * If access is allowed, set the regval to the protected VM's view of the
294  * register and return true.
295  * Otherwise, inject an undefined exception and return false.
296  */
297 static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu,
298 				  struct sys_reg_params *p,
299 				  const struct sys_reg_desc *r)
300 {
301 	if (p->is_write) {
302 		inject_undef64(vcpu);
303 		return false;
304 	}
305 
306 	p->regval = read_id_reg(vcpu, r);
307 	return true;
308 }
309 
310 static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
311 			     struct sys_reg_params *p,
312 			     const struct sys_reg_desc *r)
313 {
314 	/* pVMs only support GICv3. 'nuf said. */
315 	if (!p->is_write)
316 		p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE;
317 
318 	return true;
319 }
320 
321 /* Mark the specified system register as an AArch32 feature id register. */
322 #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 }
323 
324 /* Mark the specified system register as an AArch64 feature id register. */
325 #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
326 
327 /*
328  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
329  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
330  * (1 <= crm < 8, 0 <= Op2 < 8).
331  */
332 #define ID_UNALLOCATED(crm, op2) {			\
333 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
334 	.access = pvm_access_id_aarch64,		\
335 }
336 
337 /* Mark the specified system register as Read-As-Zero/Write-Ignored */
338 #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
339 
340 /* Mark the specified system register as not being handled in hyp. */
341 #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL }
342 
343 /*
344  * Architected system registers.
345  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
346  *
347  * NOTE: Anything not explicitly listed here is *restricted by default*, i.e.,
348  * it will lead to injecting an exception into the guest.
349  */
350 static const struct sys_reg_desc pvm_sys_reg_descs[] = {
351 	/* Cache maintenance by set/way operations are restricted. */
352 
353 	/* Debug and Trace Registers are restricted. */
354 
355 	/* AArch64 mappings of the AArch32 ID registers */
356 	/* CRm=1 */
357 	AARCH32(SYS_ID_PFR0_EL1),
358 	AARCH32(SYS_ID_PFR1_EL1),
359 	AARCH32(SYS_ID_DFR0_EL1),
360 	AARCH32(SYS_ID_AFR0_EL1),
361 	AARCH32(SYS_ID_MMFR0_EL1),
362 	AARCH32(SYS_ID_MMFR1_EL1),
363 	AARCH32(SYS_ID_MMFR2_EL1),
364 	AARCH32(SYS_ID_MMFR3_EL1),
365 
366 	/* CRm=2 */
367 	AARCH32(SYS_ID_ISAR0_EL1),
368 	AARCH32(SYS_ID_ISAR1_EL1),
369 	AARCH32(SYS_ID_ISAR2_EL1),
370 	AARCH32(SYS_ID_ISAR3_EL1),
371 	AARCH32(SYS_ID_ISAR4_EL1),
372 	AARCH32(SYS_ID_ISAR5_EL1),
373 	AARCH32(SYS_ID_MMFR4_EL1),
374 	AARCH32(SYS_ID_ISAR6_EL1),
375 
376 	/* CRm=3 */
377 	AARCH32(SYS_MVFR0_EL1),
378 	AARCH32(SYS_MVFR1_EL1),
379 	AARCH32(SYS_MVFR2_EL1),
380 	ID_UNALLOCATED(3,3),
381 	AARCH32(SYS_ID_PFR2_EL1),
382 	AARCH32(SYS_ID_DFR1_EL1),
383 	AARCH32(SYS_ID_MMFR5_EL1),
384 	ID_UNALLOCATED(3,7),
385 
386 	/* AArch64 ID registers */
387 	/* CRm=4 */
388 	AARCH64(SYS_ID_AA64PFR0_EL1),
389 	AARCH64(SYS_ID_AA64PFR1_EL1),
390 	ID_UNALLOCATED(4,2),
391 	ID_UNALLOCATED(4,3),
392 	AARCH64(SYS_ID_AA64ZFR0_EL1),
393 	ID_UNALLOCATED(4,5),
394 	ID_UNALLOCATED(4,6),
395 	ID_UNALLOCATED(4,7),
396 	AARCH64(SYS_ID_AA64DFR0_EL1),
397 	AARCH64(SYS_ID_AA64DFR1_EL1),
398 	ID_UNALLOCATED(5,2),
399 	ID_UNALLOCATED(5,3),
400 	AARCH64(SYS_ID_AA64AFR0_EL1),
401 	AARCH64(SYS_ID_AA64AFR1_EL1),
402 	ID_UNALLOCATED(5,6),
403 	ID_UNALLOCATED(5,7),
404 	AARCH64(SYS_ID_AA64ISAR0_EL1),
405 	AARCH64(SYS_ID_AA64ISAR1_EL1),
406 	AARCH64(SYS_ID_AA64ISAR2_EL1),
407 	ID_UNALLOCATED(6,3),
408 	ID_UNALLOCATED(6,4),
409 	ID_UNALLOCATED(6,5),
410 	ID_UNALLOCATED(6,6),
411 	ID_UNALLOCATED(6,7),
412 	AARCH64(SYS_ID_AA64MMFR0_EL1),
413 	AARCH64(SYS_ID_AA64MMFR1_EL1),
414 	AARCH64(SYS_ID_AA64MMFR2_EL1),
415 	ID_UNALLOCATED(7,3),
416 	ID_UNALLOCATED(7,4),
417 	ID_UNALLOCATED(7,5),
418 	ID_UNALLOCATED(7,6),
419 	ID_UNALLOCATED(7,7),
420 
421 	/* Scalable Vector Registers are restricted. */
422 
423 	RAZ_WI(SYS_ERRIDR_EL1),
424 	RAZ_WI(SYS_ERRSELR_EL1),
425 	RAZ_WI(SYS_ERXFR_EL1),
426 	RAZ_WI(SYS_ERXCTLR_EL1),
427 	RAZ_WI(SYS_ERXSTATUS_EL1),
428 	RAZ_WI(SYS_ERXADDR_EL1),
429 	RAZ_WI(SYS_ERXMISC0_EL1),
430 	RAZ_WI(SYS_ERXMISC1_EL1),
431 
432 	/* Performance Monitoring Registers are restricted. */
433 
434 	/* Limited Ordering Regions Registers are restricted. */
435 
436 	HOST_HANDLED(SYS_ICC_SGI1R_EL1),
437 	HOST_HANDLED(SYS_ICC_ASGI1R_EL1),
438 	HOST_HANDLED(SYS_ICC_SGI0R_EL1),
439 	{ SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, },
440 
441 	HOST_HANDLED(SYS_CCSIDR_EL1),
442 	HOST_HANDLED(SYS_CLIDR_EL1),
443 	HOST_HANDLED(SYS_CSSELR_EL1),
444 	HOST_HANDLED(SYS_CTR_EL0),
445 
446 	/* Performance Monitoring Registers are restricted. */
447 
448 	/* Activity Monitoring Registers are restricted. */
449 
450 	HOST_HANDLED(SYS_CNTP_TVAL_EL0),
451 	HOST_HANDLED(SYS_CNTP_CTL_EL0),
452 	HOST_HANDLED(SYS_CNTP_CVAL_EL0),
453 
454 	/* Performance Monitoring Registers are restricted. */
455 };
456 
457 /*
458  * Checks that the sysreg table is unique and in-order.
459  *
460  * Returns 0 if the table is consistent, or 1 otherwise.
461  */
462 int kvm_check_pvm_sysreg_table(void)
463 {
464 	unsigned int i;
465 
466 	for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) {
467 		if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0)
468 			return 1;
469 	}
470 
471 	return 0;
472 }
473 
474 /*
475  * Handler for protected VM MSR, MRS or System instruction execution.
476  *
477  * Returns true if the hypervisor has handled the exit, and control should go
478  * back to the guest, or false if it hasn't, to be handled by the host.
479  */
480 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
481 {
482 	const struct sys_reg_desc *r;
483 	struct sys_reg_params params;
484 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
485 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
486 
487 	params = esr_sys64_to_params(esr);
488 	params.regval = vcpu_get_reg(vcpu, Rt);
489 
490 	r = find_reg(&params, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs));
491 
492 	/* Undefined (RESTRICTED). */
493 	if (r == NULL) {
494 		inject_undef64(vcpu);
495 		return true;
496 	}
497 
498 	/* Handled by the host (HOST_HANDLED) */
499 	if (r->access == NULL)
500 		return false;
501 
502 	/* Handled by hyp: skip instruction if instructed to do so. */
503 	if (r->access(vcpu, &params, r))
504 		__kvm_skip_instr(vcpu);
505 
506 	if (!params.is_write)
507 		vcpu_set_reg(vcpu, Rt, params.regval);
508 
509 	return true;
510 }
511 
512 /*
513  * Handler for protected VM restricted exceptions.
514  *
515  * Inject an undefined exception into the guest and return true to indicate that
516  * the hypervisor has handled the exit, and control should go back to the guest.
517  */
518 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
519 {
520 	inject_undef64(vcpu);
521 	return true;
522 }
523