xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision e91c37f1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
19 
20 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
21 
22 #define KVM_ISA_EXT_ARR(ext)		\
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24 
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 	/* Single letter extensions (alphabetically sorted) */
28 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 	/* Multi letter extensions (alphabetically sorted) */
37 	KVM_ISA_EXT_ARR(SMSTATEEN),
38 	KVM_ISA_EXT_ARR(SSAIA),
39 	KVM_ISA_EXT_ARR(SSTC),
40 	KVM_ISA_EXT_ARR(SVINVAL),
41 	KVM_ISA_EXT_ARR(SVNAPOT),
42 	KVM_ISA_EXT_ARR(SVPBMT),
43 	KVM_ISA_EXT_ARR(ZBA),
44 	KVM_ISA_EXT_ARR(ZBB),
45 	KVM_ISA_EXT_ARR(ZBS),
46 	KVM_ISA_EXT_ARR(ZICBOM),
47 	KVM_ISA_EXT_ARR(ZICBOZ),
48 	KVM_ISA_EXT_ARR(ZICNTR),
49 	KVM_ISA_EXT_ARR(ZICOND),
50 	KVM_ISA_EXT_ARR(ZICSR),
51 	KVM_ISA_EXT_ARR(ZIFENCEI),
52 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
53 	KVM_ISA_EXT_ARR(ZIHPM),
54 };
55 
56 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
57 {
58 	unsigned long i;
59 
60 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
61 		if (kvm_isa_ext_arr[i] == base_ext)
62 			return i;
63 	}
64 
65 	return KVM_RISCV_ISA_EXT_MAX;
66 }
67 
68 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
69 {
70 	switch (ext) {
71 	case KVM_RISCV_ISA_EXT_H:
72 		return false;
73 	case KVM_RISCV_ISA_EXT_V:
74 		return riscv_v_vstate_ctrl_user_allowed();
75 	default:
76 		break;
77 	}
78 
79 	return true;
80 }
81 
82 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
83 {
84 	switch (ext) {
85 	/* Extensions which don't have any mechanism to disable */
86 	case KVM_RISCV_ISA_EXT_A:
87 	case KVM_RISCV_ISA_EXT_C:
88 	case KVM_RISCV_ISA_EXT_I:
89 	case KVM_RISCV_ISA_EXT_M:
90 	case KVM_RISCV_ISA_EXT_SSTC:
91 	case KVM_RISCV_ISA_EXT_SVINVAL:
92 	case KVM_RISCV_ISA_EXT_SVNAPOT:
93 	case KVM_RISCV_ISA_EXT_ZBA:
94 	case KVM_RISCV_ISA_EXT_ZBB:
95 	case KVM_RISCV_ISA_EXT_ZBS:
96 	case KVM_RISCV_ISA_EXT_ZICNTR:
97 	case KVM_RISCV_ISA_EXT_ZICOND:
98 	case KVM_RISCV_ISA_EXT_ZICSR:
99 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
100 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
101 	case KVM_RISCV_ISA_EXT_ZIHPM:
102 		return false;
103 	/* Extensions which can be disabled using Smstateen */
104 	case KVM_RISCV_ISA_EXT_SSAIA:
105 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
106 	default:
107 		break;
108 	}
109 
110 	return true;
111 }
112 
113 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
114 {
115 	unsigned long host_isa, i;
116 
117 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
118 		host_isa = kvm_isa_ext_arr[i];
119 		if (__riscv_isa_extension_available(NULL, host_isa) &&
120 		    kvm_riscv_vcpu_isa_enable_allowed(i))
121 			set_bit(host_isa, vcpu->arch.isa);
122 	}
123 }
124 
125 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
126 					 const struct kvm_one_reg *reg)
127 {
128 	unsigned long __user *uaddr =
129 			(unsigned long __user *)(unsigned long)reg->addr;
130 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
131 					    KVM_REG_SIZE_MASK |
132 					    KVM_REG_RISCV_CONFIG);
133 	unsigned long reg_val;
134 
135 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
136 		return -EINVAL;
137 
138 	switch (reg_num) {
139 	case KVM_REG_RISCV_CONFIG_REG(isa):
140 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
141 		break;
142 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
143 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
144 			return -ENOENT;
145 		reg_val = riscv_cbom_block_size;
146 		break;
147 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
148 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
149 			return -ENOENT;
150 		reg_val = riscv_cboz_block_size;
151 		break;
152 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
153 		reg_val = vcpu->arch.mvendorid;
154 		break;
155 	case KVM_REG_RISCV_CONFIG_REG(marchid):
156 		reg_val = vcpu->arch.marchid;
157 		break;
158 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
159 		reg_val = vcpu->arch.mimpid;
160 		break;
161 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
162 		reg_val = satp_mode >> SATP_MODE_SHIFT;
163 		break;
164 	default:
165 		return -ENOENT;
166 	}
167 
168 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
169 		return -EFAULT;
170 
171 	return 0;
172 }
173 
174 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
175 					 const struct kvm_one_reg *reg)
176 {
177 	unsigned long __user *uaddr =
178 			(unsigned long __user *)(unsigned long)reg->addr;
179 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
180 					    KVM_REG_SIZE_MASK |
181 					    KVM_REG_RISCV_CONFIG);
182 	unsigned long i, isa_ext, reg_val;
183 
184 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
185 		return -EINVAL;
186 
187 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
188 		return -EFAULT;
189 
190 	switch (reg_num) {
191 	case KVM_REG_RISCV_CONFIG_REG(isa):
192 		/*
193 		 * This ONE REG interface is only defined for
194 		 * single letter extensions.
195 		 */
196 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
197 			return -EINVAL;
198 
199 		/*
200 		 * Return early (i.e. do nothing) if reg_val is the same
201 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
202 		 */
203 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
204 			break;
205 
206 		if (!vcpu->arch.ran_atleast_once) {
207 			/* Ignore the enable/disable request for certain extensions */
208 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
209 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
210 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
211 					reg_val &= ~BIT(i);
212 					continue;
213 				}
214 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
215 					if (reg_val & BIT(i))
216 						reg_val &= ~BIT(i);
217 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
218 					if (!(reg_val & BIT(i)))
219 						reg_val |= BIT(i);
220 			}
221 			reg_val &= riscv_isa_extension_base(NULL);
222 			/* Do not modify anything beyond single letter extensions */
223 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
224 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
225 			vcpu->arch.isa[0] = reg_val;
226 			kvm_riscv_vcpu_fp_reset(vcpu);
227 		} else {
228 			return -EBUSY;
229 		}
230 		break;
231 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
232 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
233 			return -ENOENT;
234 		if (reg_val != riscv_cbom_block_size)
235 			return -EINVAL;
236 		break;
237 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
238 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
239 			return -ENOENT;
240 		if (reg_val != riscv_cboz_block_size)
241 			return -EINVAL;
242 		break;
243 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
244 		if (reg_val == vcpu->arch.mvendorid)
245 			break;
246 		if (!vcpu->arch.ran_atleast_once)
247 			vcpu->arch.mvendorid = reg_val;
248 		else
249 			return -EBUSY;
250 		break;
251 	case KVM_REG_RISCV_CONFIG_REG(marchid):
252 		if (reg_val == vcpu->arch.marchid)
253 			break;
254 		if (!vcpu->arch.ran_atleast_once)
255 			vcpu->arch.marchid = reg_val;
256 		else
257 			return -EBUSY;
258 		break;
259 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
260 		if (reg_val == vcpu->arch.mimpid)
261 			break;
262 		if (!vcpu->arch.ran_atleast_once)
263 			vcpu->arch.mimpid = reg_val;
264 		else
265 			return -EBUSY;
266 		break;
267 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
268 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
269 			return -EINVAL;
270 		break;
271 	default:
272 		return -ENOENT;
273 	}
274 
275 	return 0;
276 }
277 
278 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
279 				       const struct kvm_one_reg *reg)
280 {
281 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
282 	unsigned long __user *uaddr =
283 			(unsigned long __user *)(unsigned long)reg->addr;
284 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
285 					    KVM_REG_SIZE_MASK |
286 					    KVM_REG_RISCV_CORE);
287 	unsigned long reg_val;
288 
289 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
290 		return -EINVAL;
291 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
292 		return -ENOENT;
293 
294 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
295 		reg_val = cntx->sepc;
296 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
297 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
298 		reg_val = ((unsigned long *)cntx)[reg_num];
299 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
300 		reg_val = (cntx->sstatus & SR_SPP) ?
301 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
302 	else
303 		return -ENOENT;
304 
305 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
306 		return -EFAULT;
307 
308 	return 0;
309 }
310 
311 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
312 				       const struct kvm_one_reg *reg)
313 {
314 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
315 	unsigned long __user *uaddr =
316 			(unsigned long __user *)(unsigned long)reg->addr;
317 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
318 					    KVM_REG_SIZE_MASK |
319 					    KVM_REG_RISCV_CORE);
320 	unsigned long reg_val;
321 
322 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
323 		return -EINVAL;
324 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
325 		return -ENOENT;
326 
327 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
328 		return -EFAULT;
329 
330 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
331 		cntx->sepc = reg_val;
332 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
333 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
334 		((unsigned long *)cntx)[reg_num] = reg_val;
335 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
336 		if (reg_val == KVM_RISCV_MODE_S)
337 			cntx->sstatus |= SR_SPP;
338 		else
339 			cntx->sstatus &= ~SR_SPP;
340 	} else
341 		return -ENOENT;
342 
343 	return 0;
344 }
345 
346 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
347 					  unsigned long reg_num,
348 					  unsigned long *out_val)
349 {
350 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
351 
352 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
353 		return -ENOENT;
354 
355 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
356 		kvm_riscv_vcpu_flush_interrupts(vcpu);
357 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
358 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
359 	} else
360 		*out_val = ((unsigned long *)csr)[reg_num];
361 
362 	return 0;
363 }
364 
365 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
366 					  unsigned long reg_num,
367 					  unsigned long reg_val)
368 {
369 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
370 
371 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
372 		return -ENOENT;
373 
374 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
375 		reg_val &= VSIP_VALID_MASK;
376 		reg_val <<= VSIP_TO_HVIP_SHIFT;
377 	}
378 
379 	((unsigned long *)csr)[reg_num] = reg_val;
380 
381 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
382 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
383 
384 	return 0;
385 }
386 
387 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
388 						   unsigned long reg_num,
389 						   unsigned long reg_val)
390 {
391 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
392 
393 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
394 		sizeof(unsigned long))
395 		return -EINVAL;
396 
397 	((unsigned long *)csr)[reg_num] = reg_val;
398 	return 0;
399 }
400 
401 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
402 					    unsigned long reg_num,
403 					    unsigned long *out_val)
404 {
405 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
406 
407 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
408 		sizeof(unsigned long))
409 		return -EINVAL;
410 
411 	*out_val = ((unsigned long *)csr)[reg_num];
412 	return 0;
413 }
414 
415 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
416 				      const struct kvm_one_reg *reg)
417 {
418 	int rc;
419 	unsigned long __user *uaddr =
420 			(unsigned long __user *)(unsigned long)reg->addr;
421 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
422 					    KVM_REG_SIZE_MASK |
423 					    KVM_REG_RISCV_CSR);
424 	unsigned long reg_val, reg_subtype;
425 
426 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
427 		return -EINVAL;
428 
429 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
430 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
431 	switch (reg_subtype) {
432 	case KVM_REG_RISCV_CSR_GENERAL:
433 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
434 		break;
435 	case KVM_REG_RISCV_CSR_AIA:
436 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
437 		break;
438 	case KVM_REG_RISCV_CSR_SMSTATEEN:
439 		rc = -EINVAL;
440 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
441 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
442 							      &reg_val);
443 		break;
444 	default:
445 		rc = -ENOENT;
446 		break;
447 	}
448 	if (rc)
449 		return rc;
450 
451 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
452 		return -EFAULT;
453 
454 	return 0;
455 }
456 
457 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
458 				      const struct kvm_one_reg *reg)
459 {
460 	int rc;
461 	unsigned long __user *uaddr =
462 			(unsigned long __user *)(unsigned long)reg->addr;
463 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
464 					    KVM_REG_SIZE_MASK |
465 					    KVM_REG_RISCV_CSR);
466 	unsigned long reg_val, reg_subtype;
467 
468 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
469 		return -EINVAL;
470 
471 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
472 		return -EFAULT;
473 
474 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
475 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
476 	switch (reg_subtype) {
477 	case KVM_REG_RISCV_CSR_GENERAL:
478 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
479 		break;
480 	case KVM_REG_RISCV_CSR_AIA:
481 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
482 		break;
483 	case KVM_REG_RISCV_CSR_SMSTATEEN:
484 		rc = -EINVAL;
485 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
486 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
487 							      reg_val);
488 		break;
489 	default:
490 		rc = -ENOENT;
491 		break;
492 	}
493 	if (rc)
494 		return rc;
495 
496 	return 0;
497 }
498 
499 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
500 					 unsigned long reg_num,
501 					 unsigned long *reg_val)
502 {
503 	unsigned long host_isa_ext;
504 
505 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
506 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
507 		return -ENOENT;
508 
509 	host_isa_ext = kvm_isa_ext_arr[reg_num];
510 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
511 		return -ENOENT;
512 
513 	*reg_val = 0;
514 	if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
515 		*reg_val = 1; /* Mark the given extension as available */
516 
517 	return 0;
518 }
519 
520 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
521 					 unsigned long reg_num,
522 					 unsigned long reg_val)
523 {
524 	unsigned long host_isa_ext;
525 
526 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
527 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
528 		return -ENOENT;
529 
530 	host_isa_ext = kvm_isa_ext_arr[reg_num];
531 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
532 		return -ENOENT;
533 
534 	if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
535 		return 0;
536 
537 	if (!vcpu->arch.ran_atleast_once) {
538 		/*
539 		 * All multi-letter extension and a few single letter
540 		 * extension can be disabled
541 		 */
542 		if (reg_val == 1 &&
543 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
544 			set_bit(host_isa_ext, vcpu->arch.isa);
545 		else if (!reg_val &&
546 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
547 			clear_bit(host_isa_ext, vcpu->arch.isa);
548 		else
549 			return -EINVAL;
550 		kvm_riscv_vcpu_fp_reset(vcpu);
551 	} else {
552 		return -EBUSY;
553 	}
554 
555 	return 0;
556 }
557 
558 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
559 					unsigned long reg_num,
560 					unsigned long *reg_val)
561 {
562 	unsigned long i, ext_id, ext_val;
563 
564 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
565 		return -ENOENT;
566 
567 	for (i = 0; i < BITS_PER_LONG; i++) {
568 		ext_id = i + reg_num * BITS_PER_LONG;
569 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
570 			break;
571 
572 		ext_val = 0;
573 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
574 		if (ext_val)
575 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
576 	}
577 
578 	return 0;
579 }
580 
581 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
582 					unsigned long reg_num,
583 					unsigned long reg_val, bool enable)
584 {
585 	unsigned long i, ext_id;
586 
587 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
588 		return -ENOENT;
589 
590 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
591 		ext_id = i + reg_num * BITS_PER_LONG;
592 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
593 			break;
594 
595 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
596 	}
597 
598 	return 0;
599 }
600 
601 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
602 					  const struct kvm_one_reg *reg)
603 {
604 	int rc;
605 	unsigned long __user *uaddr =
606 			(unsigned long __user *)(unsigned long)reg->addr;
607 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
608 					    KVM_REG_SIZE_MASK |
609 					    KVM_REG_RISCV_ISA_EXT);
610 	unsigned long reg_val, reg_subtype;
611 
612 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
613 		return -EINVAL;
614 
615 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
616 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
617 
618 	reg_val = 0;
619 	switch (reg_subtype) {
620 	case KVM_REG_RISCV_ISA_SINGLE:
621 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
622 		break;
623 	case KVM_REG_RISCV_ISA_MULTI_EN:
624 	case KVM_REG_RISCV_ISA_MULTI_DIS:
625 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
626 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
627 			reg_val = ~reg_val;
628 		break;
629 	default:
630 		rc = -ENOENT;
631 	}
632 	if (rc)
633 		return rc;
634 
635 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
636 		return -EFAULT;
637 
638 	return 0;
639 }
640 
641 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
642 					  const struct kvm_one_reg *reg)
643 {
644 	unsigned long __user *uaddr =
645 			(unsigned long __user *)(unsigned long)reg->addr;
646 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
647 					    KVM_REG_SIZE_MASK |
648 					    KVM_REG_RISCV_ISA_EXT);
649 	unsigned long reg_val, reg_subtype;
650 
651 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
652 		return -EINVAL;
653 
654 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
655 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
656 
657 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
658 		return -EFAULT;
659 
660 	switch (reg_subtype) {
661 	case KVM_REG_RISCV_ISA_SINGLE:
662 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
663 	case KVM_REG_RISCV_SBI_MULTI_EN:
664 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
665 	case KVM_REG_RISCV_SBI_MULTI_DIS:
666 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
667 	default:
668 		return -ENOENT;
669 	}
670 
671 	return 0;
672 }
673 
674 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
675 				u64 __user *uindices)
676 {
677 	int n = 0;
678 
679 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
680 		 i++) {
681 		u64 size;
682 		u64 reg;
683 
684 		/*
685 		 * Avoid reporting config reg if the corresponding extension
686 		 * was not available.
687 		 */
688 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
689 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
690 			continue;
691 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
692 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
693 			continue;
694 
695 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
696 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
697 
698 		if (uindices) {
699 			if (put_user(reg, uindices))
700 				return -EFAULT;
701 			uindices++;
702 		}
703 
704 		n++;
705 	}
706 
707 	return n;
708 }
709 
710 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
711 {
712 	return copy_config_reg_indices(vcpu, NULL);
713 }
714 
715 static inline unsigned long num_core_regs(void)
716 {
717 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
718 }
719 
720 static int copy_core_reg_indices(u64 __user *uindices)
721 {
722 	int n = num_core_regs();
723 
724 	for (int i = 0; i < n; i++) {
725 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
726 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
727 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
728 
729 		if (uindices) {
730 			if (put_user(reg, uindices))
731 				return -EFAULT;
732 			uindices++;
733 		}
734 	}
735 
736 	return n;
737 }
738 
739 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
740 {
741 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
742 
743 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
744 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
745 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
746 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
747 
748 	return n;
749 }
750 
751 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
752 				u64 __user *uindices)
753 {
754 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
755 	int n2 = 0, n3 = 0;
756 
757 	/* copy general csr regs */
758 	for (int i = 0; i < n1; i++) {
759 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
760 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
761 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
762 				  KVM_REG_RISCV_CSR_GENERAL | i;
763 
764 		if (uindices) {
765 			if (put_user(reg, uindices))
766 				return -EFAULT;
767 			uindices++;
768 		}
769 	}
770 
771 	/* copy AIA csr regs */
772 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
773 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
774 
775 		for (int i = 0; i < n2; i++) {
776 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
777 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
778 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
779 					  KVM_REG_RISCV_CSR_AIA | i;
780 
781 			if (uindices) {
782 				if (put_user(reg, uindices))
783 					return -EFAULT;
784 				uindices++;
785 			}
786 		}
787 	}
788 
789 	/* copy Smstateen csr regs */
790 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
791 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
792 
793 		for (int i = 0; i < n3; i++) {
794 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
795 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
796 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
797 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
798 
799 			if (uindices) {
800 				if (put_user(reg, uindices))
801 					return -EFAULT;
802 				uindices++;
803 			}
804 		}
805 	}
806 
807 	return n1 + n2 + n3;
808 }
809 
810 static inline unsigned long num_timer_regs(void)
811 {
812 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
813 }
814 
815 static int copy_timer_reg_indices(u64 __user *uindices)
816 {
817 	int n = num_timer_regs();
818 
819 	for (int i = 0; i < n; i++) {
820 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
821 			  KVM_REG_RISCV_TIMER | i;
822 
823 		if (uindices) {
824 			if (put_user(reg, uindices))
825 				return -EFAULT;
826 			uindices++;
827 		}
828 	}
829 
830 	return n;
831 }
832 
833 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
834 {
835 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
836 
837 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
838 		return sizeof(cntx->fp.f) / sizeof(u32);
839 	else
840 		return 0;
841 }
842 
843 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
844 				u64 __user *uindices)
845 {
846 	int n = num_fp_f_regs(vcpu);
847 
848 	for (int i = 0; i < n; i++) {
849 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
850 			  KVM_REG_RISCV_FP_F | i;
851 
852 		if (uindices) {
853 			if (put_user(reg, uindices))
854 				return -EFAULT;
855 			uindices++;
856 		}
857 	}
858 
859 	return n;
860 }
861 
862 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
863 {
864 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
865 
866 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
867 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
868 	else
869 		return 0;
870 }
871 
872 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
873 				u64 __user *uindices)
874 {
875 	int i;
876 	int n = num_fp_d_regs(vcpu);
877 	u64 reg;
878 
879 	/* copy fp.d.f indices */
880 	for (i = 0; i < n-1; i++) {
881 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
882 		      KVM_REG_RISCV_FP_D | i;
883 
884 		if (uindices) {
885 			if (put_user(reg, uindices))
886 				return -EFAULT;
887 			uindices++;
888 		}
889 	}
890 
891 	/* copy fp.d.fcsr indices */
892 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
893 	if (uindices) {
894 		if (put_user(reg, uindices))
895 			return -EFAULT;
896 		uindices++;
897 	}
898 
899 	return n;
900 }
901 
902 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
903 				u64 __user *uindices)
904 {
905 	unsigned int n = 0;
906 	unsigned long isa_ext;
907 
908 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
909 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
910 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
911 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
912 
913 		isa_ext = kvm_isa_ext_arr[i];
914 		if (!__riscv_isa_extension_available(NULL, isa_ext))
915 			continue;
916 
917 		if (uindices) {
918 			if (put_user(reg, uindices))
919 				return -EFAULT;
920 			uindices++;
921 		}
922 
923 		n++;
924 	}
925 
926 	return n;
927 }
928 
929 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
930 {
931 	return copy_isa_ext_reg_indices(vcpu, NULL);;
932 }
933 
934 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
935 {
936 	unsigned int n = 0;
937 
938 	for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
939 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
940 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
941 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
942 			  KVM_REG_RISCV_SBI_SINGLE | i;
943 
944 		if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
945 			continue;
946 
947 		if (uindices) {
948 			if (put_user(reg, uindices))
949 				return -EFAULT;
950 			uindices++;
951 		}
952 
953 		n++;
954 	}
955 
956 	return n;
957 }
958 
959 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
960 {
961 	return copy_sbi_ext_reg_indices(vcpu, NULL);
962 }
963 
964 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
965 {
966 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
967 	int total = 0;
968 
969 	if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
970 		u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
971 		int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
972 
973 		for (int i = 0; i < n; i++) {
974 			u64 reg = KVM_REG_RISCV | size |
975 				  KVM_REG_RISCV_SBI_STATE |
976 				  KVM_REG_RISCV_SBI_STA | i;
977 
978 			if (uindices) {
979 				if (put_user(reg, uindices))
980 					return -EFAULT;
981 				uindices++;
982 			}
983 		}
984 
985 		total += n;
986 	}
987 
988 	return total;
989 }
990 
991 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
992 {
993 	return copy_sbi_reg_indices(vcpu, NULL);
994 }
995 
996 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
997 {
998 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
999 		return 0;
1000 
1001 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1002 	return 37;
1003 }
1004 
1005 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1006 				u64 __user *uindices)
1007 {
1008 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1009 	int n = num_vector_regs(vcpu);
1010 	u64 reg, size;
1011 	int i;
1012 
1013 	if (n == 0)
1014 		return 0;
1015 
1016 	/* copy vstart, vl, vtype, vcsr and vlenb */
1017 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1018 	for (i = 0; i < 5; i++) {
1019 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1020 
1021 		if (uindices) {
1022 			if (put_user(reg, uindices))
1023 				return -EFAULT;
1024 			uindices++;
1025 		}
1026 	}
1027 
1028 	/* vector_regs have a variable 'vlenb' size */
1029 	size = __builtin_ctzl(cntx->vector.vlenb);
1030 	size <<= KVM_REG_SIZE_SHIFT;
1031 	for (i = 0; i < 32; i++) {
1032 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1033 			KVM_REG_RISCV_VECTOR_REG(i);
1034 
1035 		if (uindices) {
1036 			if (put_user(reg, uindices))
1037 				return -EFAULT;
1038 			uindices++;
1039 		}
1040 	}
1041 
1042 	return n;
1043 }
1044 
1045 /*
1046  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1047  *
1048  * This is for all registers.
1049  */
1050 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1051 {
1052 	unsigned long res = 0;
1053 
1054 	res += num_config_regs(vcpu);
1055 	res += num_core_regs();
1056 	res += num_csr_regs(vcpu);
1057 	res += num_timer_regs();
1058 	res += num_fp_f_regs(vcpu);
1059 	res += num_fp_d_regs(vcpu);
1060 	res += num_vector_regs(vcpu);
1061 	res += num_isa_ext_regs(vcpu);
1062 	res += num_sbi_ext_regs(vcpu);
1063 	res += num_sbi_regs(vcpu);
1064 
1065 	return res;
1066 }
1067 
1068 /*
1069  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1070  */
1071 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1072 				    u64 __user *uindices)
1073 {
1074 	int ret;
1075 
1076 	ret = copy_config_reg_indices(vcpu, uindices);
1077 	if (ret < 0)
1078 		return ret;
1079 	uindices += ret;
1080 
1081 	ret = copy_core_reg_indices(uindices);
1082 	if (ret < 0)
1083 		return ret;
1084 	uindices += ret;
1085 
1086 	ret = copy_csr_reg_indices(vcpu, uindices);
1087 	if (ret < 0)
1088 		return ret;
1089 	uindices += ret;
1090 
1091 	ret = copy_timer_reg_indices(uindices);
1092 	if (ret < 0)
1093 		return ret;
1094 	uindices += ret;
1095 
1096 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1097 	if (ret < 0)
1098 		return ret;
1099 	uindices += ret;
1100 
1101 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1102 	if (ret < 0)
1103 		return ret;
1104 	uindices += ret;
1105 
1106 	ret = copy_vector_reg_indices(vcpu, uindices);
1107 	if (ret < 0)
1108 		return ret;
1109 	uindices += ret;
1110 
1111 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1112 	if (ret < 0)
1113 		return ret;
1114 	uindices += ret;
1115 
1116 	ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1117 	if (ret < 0)
1118 		return ret;
1119 	uindices += ret;
1120 
1121 	ret = copy_sbi_reg_indices(vcpu, uindices);
1122 	if (ret < 0)
1123 		return ret;
1124 	uindices += ret;
1125 
1126 	return 0;
1127 }
1128 
1129 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1130 			   const struct kvm_one_reg *reg)
1131 {
1132 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1133 	case KVM_REG_RISCV_CONFIG:
1134 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1135 	case KVM_REG_RISCV_CORE:
1136 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1137 	case KVM_REG_RISCV_CSR:
1138 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1139 	case KVM_REG_RISCV_TIMER:
1140 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1141 	case KVM_REG_RISCV_FP_F:
1142 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1143 						 KVM_REG_RISCV_FP_F);
1144 	case KVM_REG_RISCV_FP_D:
1145 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1146 						 KVM_REG_RISCV_FP_D);
1147 	case KVM_REG_RISCV_VECTOR:
1148 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1149 	case KVM_REG_RISCV_ISA_EXT:
1150 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1151 	case KVM_REG_RISCV_SBI_EXT:
1152 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1153 	case KVM_REG_RISCV_SBI_STATE:
1154 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1155 	default:
1156 		break;
1157 	}
1158 
1159 	return -ENOENT;
1160 }
1161 
1162 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1163 			   const struct kvm_one_reg *reg)
1164 {
1165 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1166 	case KVM_REG_RISCV_CONFIG:
1167 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1168 	case KVM_REG_RISCV_CORE:
1169 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1170 	case KVM_REG_RISCV_CSR:
1171 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1172 	case KVM_REG_RISCV_TIMER:
1173 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1174 	case KVM_REG_RISCV_FP_F:
1175 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1176 						 KVM_REG_RISCV_FP_F);
1177 	case KVM_REG_RISCV_FP_D:
1178 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1179 						 KVM_REG_RISCV_FP_D);
1180 	case KVM_REG_RISCV_VECTOR:
1181 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1182 	case KVM_REG_RISCV_ISA_EXT:
1183 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1184 	case KVM_REG_RISCV_SBI_EXT:
1185 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1186 	case KVM_REG_RISCV_SBI_STATE:
1187 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1188 	default:
1189 		break;
1190 	}
1191 
1192 	return -ENOENT;
1193 }
1194