xref: /linux/arch/arm64/kvm/hyp/nvhe/sys_regs.c (revision 42b9fed3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #include <linux/irqchip/arm-gic-v3.h>
8 
9 #include <asm/kvm_asm.h>
10 #include <asm/kvm_mmu.h>
11 
12 #include <hyp/adjust_pc.h>
13 
14 #include <nvhe/fixed_config.h>
15 
16 #include "../../sys_regs.h"
17 
18 /*
19  * Copies of the host's CPU features registers holding sanitized values at hyp.
20  */
21 u64 id_aa64pfr0_el1_sys_val;
22 u64 id_aa64pfr1_el1_sys_val;
23 u64 id_aa64isar0_el1_sys_val;
24 u64 id_aa64isar1_el1_sys_val;
25 u64 id_aa64isar2_el1_sys_val;
26 u64 id_aa64mmfr0_el1_sys_val;
27 u64 id_aa64mmfr1_el1_sys_val;
28 u64 id_aa64mmfr2_el1_sys_val;
29 u64 id_aa64smfr0_el1_sys_val;
30 
31 /*
32  * Inject an unknown/undefined exception to an AArch64 guest while most of its
33  * sysregs are live.
34  */
inject_undef64(struct kvm_vcpu * vcpu)35 static void inject_undef64(struct kvm_vcpu *vcpu)
36 {
37 	u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
38 
39 	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
40 	*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
41 
42 	kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
43 
44 	__kvm_adjust_pc(vcpu);
45 
46 	write_sysreg_el1(esr, SYS_ESR);
47 	write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
48 	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
49 	write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
50 }
51 
52 /*
53  * Returns the restricted features values of the feature register based on the
54  * limitations in restrict_fields.
55  * A feature id field value of 0b0000 does not impose any restrictions.
56  * Note: Use only for unsigned feature field values.
57  */
get_restricted_features_unsigned(u64 sys_reg_val,u64 restrict_fields)58 static u64 get_restricted_features_unsigned(u64 sys_reg_val,
59 					    u64 restrict_fields)
60 {
61 	u64 value = 0UL;
62 	u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
63 
64 	/*
65 	 * According to the Arm Architecture Reference Manual, feature fields
66 	 * use increasing values to indicate increases in functionality.
67 	 * Iterate over the restricted feature fields and calculate the minimum
68 	 * unsigned value between the one supported by the system, and what the
69 	 * value is being restricted to.
70 	 */
71 	while (sys_reg_val && restrict_fields) {
72 		value |= min(sys_reg_val & mask, restrict_fields & mask);
73 		sys_reg_val &= ~mask;
74 		restrict_fields &= ~mask;
75 		mask <<= ARM64_FEATURE_FIELD_BITS;
76 	}
77 
78 	return value;
79 }
80 
81 /*
82  * Functions that return the value of feature id registers for protected VMs
83  * based on allowed features, system features, and KVM support.
84  */
85 
get_pvm_id_aa64pfr0(const struct kvm_vcpu * vcpu)86 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
87 {
88 	u64 set_mask = 0;
89 	u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
90 
91 	set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
92 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
93 
94 	return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
95 }
96 
get_pvm_id_aa64pfr1(const struct kvm_vcpu * vcpu)97 static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
98 {
99 	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
100 	u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
101 
102 	if (!kvm_has_mte(kvm))
103 		allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
104 
105 	return id_aa64pfr1_el1_sys_val & allow_mask;
106 }
107 
get_pvm_id_aa64zfr0(const struct kvm_vcpu * vcpu)108 static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
109 {
110 	/*
111 	 * No support for Scalable Vectors, therefore, hyp has no sanitized
112 	 * copy of the feature id register.
113 	 */
114 	BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL);
115 	return 0;
116 }
117 
get_pvm_id_aa64dfr0(const struct kvm_vcpu * vcpu)118 static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
119 {
120 	/*
121 	 * No support for debug, including breakpoints, and watchpoints,
122 	 * therefore, pKVM has no sanitized copy of the feature id register.
123 	 */
124 	BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL);
125 	return 0;
126 }
127 
get_pvm_id_aa64dfr1(const struct kvm_vcpu * vcpu)128 static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu)
129 {
130 	/*
131 	 * No support for debug, therefore, hyp has no sanitized copy of the
132 	 * feature id register.
133 	 */
134 	BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL);
135 	return 0;
136 }
137 
get_pvm_id_aa64afr0(const struct kvm_vcpu * vcpu)138 static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu)
139 {
140 	/*
141 	 * No support for implementation defined features, therefore, hyp has no
142 	 * sanitized copy of the feature id register.
143 	 */
144 	BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL);
145 	return 0;
146 }
147 
get_pvm_id_aa64afr1(const struct kvm_vcpu * vcpu)148 static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu)
149 {
150 	/*
151 	 * No support for implementation defined features, therefore, hyp has no
152 	 * sanitized copy of the feature id register.
153 	 */
154 	BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL);
155 	return 0;
156 }
157 
get_pvm_id_aa64isar0(const struct kvm_vcpu * vcpu)158 static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
159 {
160 	return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW;
161 }
162 
get_pvm_id_aa64isar1(const struct kvm_vcpu * vcpu)163 static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
164 {
165 	u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
166 
167 	if (!vcpu_has_ptrauth(vcpu))
168 		allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
169 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
170 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
171 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
172 
173 	return id_aa64isar1_el1_sys_val & allow_mask;
174 }
175 
get_pvm_id_aa64isar2(const struct kvm_vcpu * vcpu)176 static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
177 {
178 	u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
179 
180 	if (!vcpu_has_ptrauth(vcpu))
181 		allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
182 				ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
183 
184 	return id_aa64isar2_el1_sys_val & allow_mask;
185 }
186 
get_pvm_id_aa64mmfr0(const struct kvm_vcpu * vcpu)187 static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
188 {
189 	u64 set_mask;
190 
191 	set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
192 		PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
193 
194 	return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
195 }
196 
get_pvm_id_aa64mmfr1(const struct kvm_vcpu * vcpu)197 static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
198 {
199 	return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW;
200 }
201 
get_pvm_id_aa64mmfr2(const struct kvm_vcpu * vcpu)202 static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
203 {
204 	return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
205 }
206 
207 /* Read a sanitized cpufeature ID register by its encoding */
pvm_read_id_reg(const struct kvm_vcpu * vcpu,u32 id)208 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
209 {
210 	switch (id) {
211 	case SYS_ID_AA64PFR0_EL1:
212 		return get_pvm_id_aa64pfr0(vcpu);
213 	case SYS_ID_AA64PFR1_EL1:
214 		return get_pvm_id_aa64pfr1(vcpu);
215 	case SYS_ID_AA64ZFR0_EL1:
216 		return get_pvm_id_aa64zfr0(vcpu);
217 	case SYS_ID_AA64DFR0_EL1:
218 		return get_pvm_id_aa64dfr0(vcpu);
219 	case SYS_ID_AA64DFR1_EL1:
220 		return get_pvm_id_aa64dfr1(vcpu);
221 	case SYS_ID_AA64AFR0_EL1:
222 		return get_pvm_id_aa64afr0(vcpu);
223 	case SYS_ID_AA64AFR1_EL1:
224 		return get_pvm_id_aa64afr1(vcpu);
225 	case SYS_ID_AA64ISAR0_EL1:
226 		return get_pvm_id_aa64isar0(vcpu);
227 	case SYS_ID_AA64ISAR1_EL1:
228 		return get_pvm_id_aa64isar1(vcpu);
229 	case SYS_ID_AA64ISAR2_EL1:
230 		return get_pvm_id_aa64isar2(vcpu);
231 	case SYS_ID_AA64MMFR0_EL1:
232 		return get_pvm_id_aa64mmfr0(vcpu);
233 	case SYS_ID_AA64MMFR1_EL1:
234 		return get_pvm_id_aa64mmfr1(vcpu);
235 	case SYS_ID_AA64MMFR2_EL1:
236 		return get_pvm_id_aa64mmfr2(vcpu);
237 	default:
238 		/* Unhandled ID register, RAZ */
239 		return 0;
240 	}
241 }
242 
read_id_reg(const struct kvm_vcpu * vcpu,struct sys_reg_desc const * r)243 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
244 		       struct sys_reg_desc const *r)
245 {
246 	return pvm_read_id_reg(vcpu, reg_to_encoding(r));
247 }
248 
249 /* Handler to RAZ/WI sysregs */
pvm_access_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)250 static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
251 			      const struct sys_reg_desc *r)
252 {
253 	if (!p->is_write)
254 		p->regval = 0;
255 
256 	return true;
257 }
258 
259 /*
260  * Accessor for AArch32 feature id registers.
261  *
262  * The value of these registers is "unknown" according to the spec if AArch32
263  * isn't supported.
264  */
pvm_access_id_aarch32(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)265 static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
266 				  struct sys_reg_params *p,
267 				  const struct sys_reg_desc *r)
268 {
269 	if (p->is_write) {
270 		inject_undef64(vcpu);
271 		return false;
272 	}
273 
274 	/*
275 	 * No support for AArch32 guests, therefore, pKVM has no sanitized copy
276 	 * of AArch32 feature id registers.
277 	 */
278 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
279 		     PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_EL1_IMP);
280 
281 	return pvm_access_raz_wi(vcpu, p, r);
282 }
283 
284 /*
285  * Accessor for AArch64 feature id registers.
286  *
287  * If access is allowed, set the regval to the protected VM's view of the
288  * register and return true.
289  * Otherwise, inject an undefined exception and return false.
290  */
pvm_access_id_aarch64(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)291 static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu,
292 				  struct sys_reg_params *p,
293 				  const struct sys_reg_desc *r)
294 {
295 	if (p->is_write) {
296 		inject_undef64(vcpu);
297 		return false;
298 	}
299 
300 	p->regval = read_id_reg(vcpu, r);
301 	return true;
302 }
303 
pvm_gic_read_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)304 static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
305 			     struct sys_reg_params *p,
306 			     const struct sys_reg_desc *r)
307 {
308 	/* pVMs only support GICv3. 'nuf said. */
309 	if (!p->is_write)
310 		p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE;
311 
312 	return true;
313 }
314 
315 /* Mark the specified system register as an AArch32 feature id register. */
316 #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 }
317 
318 /* Mark the specified system register as an AArch64 feature id register. */
319 #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
320 
321 /*
322  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
323  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
324  * (1 <= crm < 8, 0 <= Op2 < 8).
325  */
326 #define ID_UNALLOCATED(crm, op2) {			\
327 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
328 	.access = pvm_access_id_aarch64,		\
329 }
330 
331 /* Mark the specified system register as Read-As-Zero/Write-Ignored */
332 #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
333 
334 /* Mark the specified system register as not being handled in hyp. */
335 #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL }
336 
337 /*
338  * Architected system registers.
339  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
340  *
341  * NOTE: Anything not explicitly listed here is *restricted by default*, i.e.,
342  * it will lead to injecting an exception into the guest.
343  */
344 static const struct sys_reg_desc pvm_sys_reg_descs[] = {
345 	/* Cache maintenance by set/way operations are restricted. */
346 
347 	/* Debug and Trace Registers are restricted. */
348 
349 	/* AArch64 mappings of the AArch32 ID registers */
350 	/* CRm=1 */
351 	AARCH32(SYS_ID_PFR0_EL1),
352 	AARCH32(SYS_ID_PFR1_EL1),
353 	AARCH32(SYS_ID_DFR0_EL1),
354 	AARCH32(SYS_ID_AFR0_EL1),
355 	AARCH32(SYS_ID_MMFR0_EL1),
356 	AARCH32(SYS_ID_MMFR1_EL1),
357 	AARCH32(SYS_ID_MMFR2_EL1),
358 	AARCH32(SYS_ID_MMFR3_EL1),
359 
360 	/* CRm=2 */
361 	AARCH32(SYS_ID_ISAR0_EL1),
362 	AARCH32(SYS_ID_ISAR1_EL1),
363 	AARCH32(SYS_ID_ISAR2_EL1),
364 	AARCH32(SYS_ID_ISAR3_EL1),
365 	AARCH32(SYS_ID_ISAR4_EL1),
366 	AARCH32(SYS_ID_ISAR5_EL1),
367 	AARCH32(SYS_ID_MMFR4_EL1),
368 	AARCH32(SYS_ID_ISAR6_EL1),
369 
370 	/* CRm=3 */
371 	AARCH32(SYS_MVFR0_EL1),
372 	AARCH32(SYS_MVFR1_EL1),
373 	AARCH32(SYS_MVFR2_EL1),
374 	ID_UNALLOCATED(3,3),
375 	AARCH32(SYS_ID_PFR2_EL1),
376 	AARCH32(SYS_ID_DFR1_EL1),
377 	AARCH32(SYS_ID_MMFR5_EL1),
378 	ID_UNALLOCATED(3,7),
379 
380 	/* AArch64 ID registers */
381 	/* CRm=4 */
382 	AARCH64(SYS_ID_AA64PFR0_EL1),
383 	AARCH64(SYS_ID_AA64PFR1_EL1),
384 	ID_UNALLOCATED(4,2),
385 	ID_UNALLOCATED(4,3),
386 	AARCH64(SYS_ID_AA64ZFR0_EL1),
387 	ID_UNALLOCATED(4,5),
388 	ID_UNALLOCATED(4,6),
389 	ID_UNALLOCATED(4,7),
390 	AARCH64(SYS_ID_AA64DFR0_EL1),
391 	AARCH64(SYS_ID_AA64DFR1_EL1),
392 	ID_UNALLOCATED(5,2),
393 	ID_UNALLOCATED(5,3),
394 	AARCH64(SYS_ID_AA64AFR0_EL1),
395 	AARCH64(SYS_ID_AA64AFR1_EL1),
396 	ID_UNALLOCATED(5,6),
397 	ID_UNALLOCATED(5,7),
398 	AARCH64(SYS_ID_AA64ISAR0_EL1),
399 	AARCH64(SYS_ID_AA64ISAR1_EL1),
400 	AARCH64(SYS_ID_AA64ISAR2_EL1),
401 	ID_UNALLOCATED(6,3),
402 	ID_UNALLOCATED(6,4),
403 	ID_UNALLOCATED(6,5),
404 	ID_UNALLOCATED(6,6),
405 	ID_UNALLOCATED(6,7),
406 	AARCH64(SYS_ID_AA64MMFR0_EL1),
407 	AARCH64(SYS_ID_AA64MMFR1_EL1),
408 	AARCH64(SYS_ID_AA64MMFR2_EL1),
409 	ID_UNALLOCATED(7,3),
410 	ID_UNALLOCATED(7,4),
411 	ID_UNALLOCATED(7,5),
412 	ID_UNALLOCATED(7,6),
413 	ID_UNALLOCATED(7,7),
414 
415 	/* Scalable Vector Registers are restricted. */
416 
417 	RAZ_WI(SYS_ERRIDR_EL1),
418 	RAZ_WI(SYS_ERRSELR_EL1),
419 	RAZ_WI(SYS_ERXFR_EL1),
420 	RAZ_WI(SYS_ERXCTLR_EL1),
421 	RAZ_WI(SYS_ERXSTATUS_EL1),
422 	RAZ_WI(SYS_ERXADDR_EL1),
423 	RAZ_WI(SYS_ERXMISC0_EL1),
424 	RAZ_WI(SYS_ERXMISC1_EL1),
425 
426 	/* Performance Monitoring Registers are restricted. */
427 
428 	/* Limited Ordering Regions Registers are restricted. */
429 
430 	HOST_HANDLED(SYS_ICC_SGI1R_EL1),
431 	HOST_HANDLED(SYS_ICC_ASGI1R_EL1),
432 	HOST_HANDLED(SYS_ICC_SGI0R_EL1),
433 	{ SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, },
434 
435 	HOST_HANDLED(SYS_CCSIDR_EL1),
436 	HOST_HANDLED(SYS_CLIDR_EL1),
437 	HOST_HANDLED(SYS_CSSELR_EL1),
438 	HOST_HANDLED(SYS_CTR_EL0),
439 
440 	/* Performance Monitoring Registers are restricted. */
441 
442 	/* Activity Monitoring Registers are restricted. */
443 
444 	HOST_HANDLED(SYS_CNTP_TVAL_EL0),
445 	HOST_HANDLED(SYS_CNTP_CTL_EL0),
446 	HOST_HANDLED(SYS_CNTP_CVAL_EL0),
447 
448 	/* Performance Monitoring Registers are restricted. */
449 };
450 
451 /*
452  * Checks that the sysreg table is unique and in-order.
453  *
454  * Returns 0 if the table is consistent, or 1 otherwise.
455  */
kvm_check_pvm_sysreg_table(void)456 int kvm_check_pvm_sysreg_table(void)
457 {
458 	unsigned int i;
459 
460 	for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) {
461 		if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0)
462 			return 1;
463 	}
464 
465 	return 0;
466 }
467 
468 /*
469  * Handler for protected VM MSR, MRS or System instruction execution.
470  *
471  * Returns true if the hypervisor has handled the exit, and control should go
472  * back to the guest, or false if it hasn't, to be handled by the host.
473  */
kvm_handle_pvm_sysreg(struct kvm_vcpu * vcpu,u64 * exit_code)474 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
475 {
476 	const struct sys_reg_desc *r;
477 	struct sys_reg_params params;
478 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
479 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
480 
481 	params = esr_sys64_to_params(esr);
482 	params.regval = vcpu_get_reg(vcpu, Rt);
483 
484 	r = find_reg(&params, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs));
485 
486 	/* Undefined (RESTRICTED). */
487 	if (r == NULL) {
488 		inject_undef64(vcpu);
489 		return true;
490 	}
491 
492 	/* Handled by the host (HOST_HANDLED) */
493 	if (r->access == NULL)
494 		return false;
495 
496 	/* Handled by hyp: skip instruction if instructed to do so. */
497 	if (r->access(vcpu, &params, r))
498 		__kvm_skip_instr(vcpu);
499 
500 	if (!params.is_write)
501 		vcpu_set_reg(vcpu, Rt, params.regval);
502 
503 	return true;
504 }
505 
506 /*
507  * Handler for protected VM restricted exceptions.
508  *
509  * Inject an undefined exception into the guest and return true to indicate that
510  * the hypervisor has handled the exit, and control should go back to the guest.
511  */
kvm_handle_pvm_restricted(struct kvm_vcpu * vcpu,u64 * exit_code)512 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
513 {
514 	inject_undef64(vcpu);
515 	return true;
516 }
517