xref: /linux/arch/loongarch/kvm/vcpu.c (revision 021bc4b9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17 	KVM_GENERIC_VCPU_STATS(),
18 	STATS_DESC_COUNTER(VCPU, int_exits),
19 	STATS_DESC_COUNTER(VCPU, idle_exits),
20 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 	STATS_DESC_COUNTER(VCPU, signal_exits),
22 };
23 
24 const struct kvm_stats_header kvm_vcpu_stats_header = {
25 	.name_size = KVM_STATS_NAME_SIZE,
26 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
27 	.id_offset = sizeof(struct kvm_stats_header),
28 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
29 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
30 		       sizeof(kvm_vcpu_stats_desc),
31 };
32 
33 /*
34  * kvm_check_requests - check and handle pending vCPU requests
35  *
36  * Return: RESUME_GUEST if we should enter the guest
37  *         RESUME_HOST  if we should exit to userspace
38  */
39 static int kvm_check_requests(struct kvm_vcpu *vcpu)
40 {
41 	if (!kvm_request_pending(vcpu))
42 		return RESUME_GUEST;
43 
44 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
45 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
46 
47 	if (kvm_dirty_ring_check_request(vcpu))
48 		return RESUME_HOST;
49 
50 	return RESUME_GUEST;
51 }
52 
53 /*
54  * Check and handle pending signal and vCPU requests etc
55  * Run with irq enabled and preempt enabled
56  *
57  * Return: RESUME_GUEST if we should enter the guest
58  *         RESUME_HOST  if we should exit to userspace
59  *         < 0 if we should exit to userspace, where the return value
60  *         indicates an error
61  */
62 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
63 {
64 	int ret;
65 
66 	/*
67 	 * Check conditions before entering the guest
68 	 */
69 	ret = xfer_to_guest_mode_handle_work(vcpu);
70 	if (ret < 0)
71 		return ret;
72 
73 	ret = kvm_check_requests(vcpu);
74 
75 	return ret;
76 }
77 
78 /*
79  * Called with irq enabled
80  *
81  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
82  *         Others if we should exit to userspace
83  */
84 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
85 {
86 	int ret;
87 
88 	do {
89 		ret = kvm_enter_guest_check(vcpu);
90 		if (ret != RESUME_GUEST)
91 			break;
92 
93 		/*
94 		 * Handle vcpu timer, interrupts, check requests and
95 		 * check vmid before vcpu enter guest
96 		 */
97 		local_irq_disable();
98 		kvm_deliver_intr(vcpu);
99 		kvm_deliver_exception(vcpu);
100 		/* Make sure the vcpu mode has been written */
101 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
102 		kvm_check_vpid(vcpu);
103 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
104 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
105 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
106 
107 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
108 			/* make sure the vcpu mode has been written */
109 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
110 			local_irq_enable();
111 			ret = -EAGAIN;
112 		}
113 	} while (ret != RESUME_GUEST);
114 
115 	return ret;
116 }
117 
118 /*
119  * Return 1 for resume guest and "<= 0" for resume host.
120  */
121 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
122 {
123 	int ret = RESUME_GUEST;
124 	unsigned long estat = vcpu->arch.host_estat;
125 	u32 intr = estat & 0x1fff; /* Ignore NMI */
126 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
127 
128 	vcpu->mode = OUTSIDE_GUEST_MODE;
129 
130 	/* Set a default exit reason */
131 	run->exit_reason = KVM_EXIT_UNKNOWN;
132 
133 	guest_timing_exit_irqoff();
134 	guest_state_exit_irqoff();
135 	local_irq_enable();
136 
137 	trace_kvm_exit(vcpu, ecode);
138 	if (ecode) {
139 		ret = kvm_handle_fault(vcpu, ecode);
140 	} else {
141 		WARN(!intr, "vm exiting with suspicious irq\n");
142 		++vcpu->stat.int_exits;
143 	}
144 
145 	if (ret == RESUME_GUEST)
146 		ret = kvm_pre_enter_guest(vcpu);
147 
148 	if (ret != RESUME_GUEST) {
149 		local_irq_disable();
150 		return ret;
151 	}
152 
153 	guest_timing_enter_irqoff();
154 	guest_state_enter_irqoff();
155 	trace_kvm_reenter(vcpu);
156 
157 	return RESUME_GUEST;
158 }
159 
160 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
161 {
162 	return !!(vcpu->arch.irq_pending) &&
163 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
164 }
165 
166 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
167 {
168 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
169 }
170 
171 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
172 {
173 	return false;
174 }
175 
176 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
177 {
178 	return VM_FAULT_SIGBUS;
179 }
180 
181 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
182 				  struct kvm_translation *tr)
183 {
184 	return -EINVAL;
185 }
186 
187 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
188 {
189 	int ret;
190 
191 	/* Protect from TOD sync and vcpu_load/put() */
192 	preempt_disable();
193 	ret = kvm_pending_timer(vcpu) ||
194 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
195 	preempt_enable();
196 
197 	return ret;
198 }
199 
200 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
201 {
202 	int i;
203 
204 	kvm_debug("vCPU Register Dump:\n");
205 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
206 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
207 
208 	for (i = 0; i < 32; i += 4) {
209 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
210 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
211 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
212 	}
213 
214 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
215 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
216 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
217 
218 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
219 
220 	return 0;
221 }
222 
223 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
224 				struct kvm_mp_state *mp_state)
225 {
226 	*mp_state = vcpu->arch.mp_state;
227 
228 	return 0;
229 }
230 
231 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
232 				struct kvm_mp_state *mp_state)
233 {
234 	int ret = 0;
235 
236 	switch (mp_state->mp_state) {
237 	case KVM_MP_STATE_RUNNABLE:
238 		vcpu->arch.mp_state = *mp_state;
239 		break;
240 	default:
241 		ret = -EINVAL;
242 	}
243 
244 	return ret;
245 }
246 
247 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
248 					struct kvm_guest_debug *dbg)
249 {
250 	return -EINVAL;
251 }
252 
253 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
254 {
255 	unsigned long gintc;
256 	struct loongarch_csrs *csr = vcpu->arch.csr;
257 
258 	if (get_gcsr_flag(id) & INVALID_GCSR)
259 		return -EINVAL;
260 
261 	if (id == LOONGARCH_CSR_ESTAT) {
262 		/* ESTAT IP0~IP7 get from GINTC */
263 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
264 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
265 		return 0;
266 	}
267 
268 	/*
269 	 * Get software CSR state since software state is consistent
270 	 * with hardware for synchronous ioctl
271 	 */
272 	*val = kvm_read_sw_gcsr(csr, id);
273 
274 	return 0;
275 }
276 
277 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
278 {
279 	int ret = 0, gintc;
280 	struct loongarch_csrs *csr = vcpu->arch.csr;
281 
282 	if (get_gcsr_flag(id) & INVALID_GCSR)
283 		return -EINVAL;
284 
285 	if (id == LOONGARCH_CSR_ESTAT) {
286 		/* ESTAT IP0~IP7 inject through GINTC */
287 		gintc = (val >> 2) & 0xff;
288 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
289 
290 		gintc = val & ~(0xffUL << 2);
291 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
292 
293 		return ret;
294 	}
295 
296 	kvm_write_sw_gcsr(csr, id, val);
297 
298 	return ret;
299 }
300 
301 static int _kvm_get_cpucfg(int id, u64 *v)
302 {
303 	int ret = 0;
304 
305 	if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
306 		return -EINVAL;
307 
308 	switch (id) {
309 	case 2:
310 		/* Return CPUCFG2 features which have been supported by KVM */
311 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
312 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
313 		     CPUCFG2_LAM;
314 		/*
315 		 * If LSX is supported by CPU, it is also supported by KVM,
316 		 * as we implement it.
317 		 */
318 		if (cpu_has_lsx)
319 			*v |= CPUCFG2_LSX;
320 		/*
321 		 * if LASX is supported by CPU, it is also supported by KVM,
322 		 * as we implement it.
323 		 */
324 		if (cpu_has_lasx)
325 			*v |= CPUCFG2_LASX;
326 
327 		break;
328 	default:
329 		ret = -EINVAL;
330 		break;
331 	}
332 	return ret;
333 }
334 
335 static int kvm_check_cpucfg(int id, u64 val)
336 {
337 	u64 mask;
338 	int ret = 0;
339 
340 	if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
341 		return -EINVAL;
342 
343 	if (_kvm_get_cpucfg(id, &mask))
344 		return ret;
345 
346 	switch (id) {
347 	case 2:
348 		/* CPUCFG2 features checking */
349 		if (val & ~mask)
350 			/* The unsupported features should not be set */
351 			ret = -EINVAL;
352 		else if (!(val & CPUCFG2_LLFTP))
353 			/* The LLFTP must be set, as guest must has a constant timer */
354 			ret = -EINVAL;
355 		else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
356 			/* Single and double float point must both be set when enable FP */
357 			ret = -EINVAL;
358 		else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
359 			/* FP should be set when enable LSX */
360 			ret = -EINVAL;
361 		else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
362 			/* LSX, FP should be set when enable LASX, and FP has been checked before. */
363 			ret = -EINVAL;
364 		break;
365 	default:
366 		break;
367 	}
368 	return ret;
369 }
370 
371 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
372 		const struct kvm_one_reg *reg, u64 *v)
373 {
374 	int id, ret = 0;
375 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
376 
377 	switch (type) {
378 	case KVM_REG_LOONGARCH_CSR:
379 		id = KVM_GET_IOC_CSR_IDX(reg->id);
380 		ret = _kvm_getcsr(vcpu, id, v);
381 		break;
382 	case KVM_REG_LOONGARCH_CPUCFG:
383 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
384 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
385 			*v = vcpu->arch.cpucfg[id];
386 		else
387 			ret = -EINVAL;
388 		break;
389 	case KVM_REG_LOONGARCH_KVM:
390 		switch (reg->id) {
391 		case KVM_REG_LOONGARCH_COUNTER:
392 			*v = drdtime() + vcpu->kvm->arch.time_offset;
393 			break;
394 		default:
395 			ret = -EINVAL;
396 			break;
397 		}
398 		break;
399 	default:
400 		ret = -EINVAL;
401 		break;
402 	}
403 
404 	return ret;
405 }
406 
407 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
408 {
409 	int ret = 0;
410 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
411 
412 	switch (size) {
413 	case KVM_REG_SIZE_U64:
414 		ret = kvm_get_one_reg(vcpu, reg, &v);
415 		if (ret)
416 			return ret;
417 		ret = put_user(v, (u64 __user *)(long)reg->addr);
418 		break;
419 	default:
420 		ret = -EINVAL;
421 		break;
422 	}
423 
424 	return ret;
425 }
426 
427 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
428 			const struct kvm_one_reg *reg, u64 v)
429 {
430 	int id, ret = 0;
431 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
432 
433 	switch (type) {
434 	case KVM_REG_LOONGARCH_CSR:
435 		id = KVM_GET_IOC_CSR_IDX(reg->id);
436 		ret = _kvm_setcsr(vcpu, id, v);
437 		break;
438 	case KVM_REG_LOONGARCH_CPUCFG:
439 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
440 		ret = kvm_check_cpucfg(id, v);
441 		if (ret)
442 			break;
443 		vcpu->arch.cpucfg[id] = (u32)v;
444 		break;
445 	case KVM_REG_LOONGARCH_KVM:
446 		switch (reg->id) {
447 		case KVM_REG_LOONGARCH_COUNTER:
448 			/*
449 			 * gftoffset is relative with board, not vcpu
450 			 * only set for the first time for smp system
451 			 */
452 			if (vcpu->vcpu_id == 0)
453 				vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
454 			break;
455 		case KVM_REG_LOONGARCH_VCPU_RESET:
456 			kvm_reset_timer(vcpu);
457 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
458 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
459 			break;
460 		default:
461 			ret = -EINVAL;
462 			break;
463 		}
464 		break;
465 	default:
466 		ret = -EINVAL;
467 		break;
468 	}
469 
470 	return ret;
471 }
472 
473 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
474 {
475 	int ret = 0;
476 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
477 
478 	switch (size) {
479 	case KVM_REG_SIZE_U64:
480 		ret = get_user(v, (u64 __user *)(long)reg->addr);
481 		if (ret)
482 			return ret;
483 		break;
484 	default:
485 		return -EINVAL;
486 	}
487 
488 	return kvm_set_one_reg(vcpu, reg, v);
489 }
490 
491 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
492 {
493 	return -ENOIOCTLCMD;
494 }
495 
496 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
497 {
498 	return -ENOIOCTLCMD;
499 }
500 
501 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
502 {
503 	int i;
504 
505 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
506 		regs->gpr[i] = vcpu->arch.gprs[i];
507 
508 	regs->pc = vcpu->arch.pc;
509 
510 	return 0;
511 }
512 
513 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
514 {
515 	int i;
516 
517 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
518 		vcpu->arch.gprs[i] = regs->gpr[i];
519 
520 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
521 	vcpu->arch.pc = regs->pc;
522 
523 	return 0;
524 }
525 
526 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
527 				     struct kvm_enable_cap *cap)
528 {
529 	/* FPU is enabled by default, will support LSX/LASX later. */
530 	return -EINVAL;
531 }
532 
533 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
534 					 struct kvm_device_attr *attr)
535 {
536 	switch (attr->attr) {
537 	case 2:
538 		return 0;
539 	default:
540 		return -ENXIO;
541 	}
542 
543 	return -ENXIO;
544 }
545 
546 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
547 				       struct kvm_device_attr *attr)
548 {
549 	int ret = -ENXIO;
550 
551 	switch (attr->group) {
552 	case KVM_LOONGARCH_VCPU_CPUCFG:
553 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
554 		break;
555 	default:
556 		break;
557 	}
558 
559 	return ret;
560 }
561 
562 static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
563 					 struct kvm_device_attr *attr)
564 {
565 	int ret = 0;
566 	uint64_t val;
567 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
568 
569 	ret = _kvm_get_cpucfg(attr->attr, &val);
570 	if (ret)
571 		return ret;
572 
573 	put_user(val, uaddr);
574 
575 	return ret;
576 }
577 
578 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
579 				       struct kvm_device_attr *attr)
580 {
581 	int ret = -ENXIO;
582 
583 	switch (attr->group) {
584 	case KVM_LOONGARCH_VCPU_CPUCFG:
585 		ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
586 		break;
587 	default:
588 		break;
589 	}
590 
591 	return ret;
592 }
593 
594 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
595 					 struct kvm_device_attr *attr)
596 {
597 	return -ENXIO;
598 }
599 
600 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
601 				       struct kvm_device_attr *attr)
602 {
603 	int ret = -ENXIO;
604 
605 	switch (attr->group) {
606 	case KVM_LOONGARCH_VCPU_CPUCFG:
607 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
608 		break;
609 	default:
610 		break;
611 	}
612 
613 	return ret;
614 }
615 
616 long kvm_arch_vcpu_ioctl(struct file *filp,
617 			 unsigned int ioctl, unsigned long arg)
618 {
619 	long r;
620 	struct kvm_device_attr attr;
621 	void __user *argp = (void __user *)arg;
622 	struct kvm_vcpu *vcpu = filp->private_data;
623 
624 	/*
625 	 * Only software CSR should be modified
626 	 *
627 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
628 	 * should be used. Since CSR registers owns by this vcpu, if switch
629 	 * to other vcpus, other vcpus need reload CSR registers.
630 	 *
631 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
632 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
633 	 * aux_inuse flag and reload CSR registers form software.
634 	 */
635 
636 	switch (ioctl) {
637 	case KVM_SET_ONE_REG:
638 	case KVM_GET_ONE_REG: {
639 		struct kvm_one_reg reg;
640 
641 		r = -EFAULT;
642 		if (copy_from_user(&reg, argp, sizeof(reg)))
643 			break;
644 		if (ioctl == KVM_SET_ONE_REG) {
645 			r = kvm_set_reg(vcpu, &reg);
646 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
647 		} else
648 			r = kvm_get_reg(vcpu, &reg);
649 		break;
650 	}
651 	case KVM_ENABLE_CAP: {
652 		struct kvm_enable_cap cap;
653 
654 		r = -EFAULT;
655 		if (copy_from_user(&cap, argp, sizeof(cap)))
656 			break;
657 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
658 		break;
659 	}
660 	case KVM_HAS_DEVICE_ATTR: {
661 		r = -EFAULT;
662 		if (copy_from_user(&attr, argp, sizeof(attr)))
663 			break;
664 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
665 		break;
666 	}
667 	case KVM_GET_DEVICE_ATTR: {
668 		r = -EFAULT;
669 		if (copy_from_user(&attr, argp, sizeof(attr)))
670 			break;
671 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
672 		break;
673 	}
674 	case KVM_SET_DEVICE_ATTR: {
675 		r = -EFAULT;
676 		if (copy_from_user(&attr, argp, sizeof(attr)))
677 			break;
678 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
679 		break;
680 	}
681 	default:
682 		r = -ENOIOCTLCMD;
683 		break;
684 	}
685 
686 	return r;
687 }
688 
689 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
690 {
691 	int i = 0;
692 
693 	fpu->fcc = vcpu->arch.fpu.fcc;
694 	fpu->fcsr = vcpu->arch.fpu.fcsr;
695 	for (i = 0; i < NUM_FPU_REGS; i++)
696 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
697 
698 	return 0;
699 }
700 
701 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
702 {
703 	int i = 0;
704 
705 	vcpu->arch.fpu.fcc = fpu->fcc;
706 	vcpu->arch.fpu.fcsr = fpu->fcsr;
707 	for (i = 0; i < NUM_FPU_REGS; i++)
708 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
709 
710 	return 0;
711 }
712 
713 /* Enable FPU and restore context */
714 void kvm_own_fpu(struct kvm_vcpu *vcpu)
715 {
716 	preempt_disable();
717 
718 	/* Enable FPU */
719 	set_csr_euen(CSR_EUEN_FPEN);
720 
721 	kvm_restore_fpu(&vcpu->arch.fpu);
722 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
723 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
724 
725 	preempt_enable();
726 }
727 
728 #ifdef CONFIG_CPU_HAS_LSX
729 /* Enable LSX and restore context */
730 int kvm_own_lsx(struct kvm_vcpu *vcpu)
731 {
732 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
733 		return -EINVAL;
734 
735 	preempt_disable();
736 
737 	/* Enable LSX for guest */
738 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
739 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
740 	case KVM_LARCH_FPU:
741 		/*
742 		 * Guest FPU state already loaded,
743 		 * only restore upper LSX state
744 		 */
745 		_restore_lsx_upper(&vcpu->arch.fpu);
746 		break;
747 	default:
748 		/* Neither FP or LSX already active,
749 		 * restore full LSX state
750 		 */
751 		kvm_restore_lsx(&vcpu->arch.fpu);
752 		break;
753 	}
754 
755 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
756 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
757 	preempt_enable();
758 
759 	return 0;
760 }
761 #endif
762 
763 #ifdef CONFIG_CPU_HAS_LASX
764 /* Enable LASX and restore context */
765 int kvm_own_lasx(struct kvm_vcpu *vcpu)
766 {
767 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
768 		return -EINVAL;
769 
770 	preempt_disable();
771 
772 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
773 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
774 	case KVM_LARCH_LSX:
775 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
776 		/* Guest LSX state already loaded, only restore upper LASX state */
777 		_restore_lasx_upper(&vcpu->arch.fpu);
778 		break;
779 	case KVM_LARCH_FPU:
780 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
781 		_restore_lsx_upper(&vcpu->arch.fpu);
782 		_restore_lasx_upper(&vcpu->arch.fpu);
783 		break;
784 	default:
785 		/* Neither FP or LSX already active, restore full LASX state */
786 		kvm_restore_lasx(&vcpu->arch.fpu);
787 		break;
788 	}
789 
790 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
791 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
792 	preempt_enable();
793 
794 	return 0;
795 }
796 #endif
797 
798 /* Save context and disable FPU */
799 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
800 {
801 	preempt_disable();
802 
803 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
804 		kvm_save_lasx(&vcpu->arch.fpu);
805 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
806 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
807 
808 		/* Disable LASX & LSX & FPU */
809 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
810 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
811 		kvm_save_lsx(&vcpu->arch.fpu);
812 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
813 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
814 
815 		/* Disable LSX & FPU */
816 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
817 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
818 		kvm_save_fpu(&vcpu->arch.fpu);
819 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
820 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
821 
822 		/* Disable FPU */
823 		clear_csr_euen(CSR_EUEN_FPEN);
824 	}
825 
826 	preempt_enable();
827 }
828 
829 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
830 {
831 	int intr = (int)irq->irq;
832 
833 	if (intr > 0)
834 		kvm_queue_irq(vcpu, intr);
835 	else if (intr < 0)
836 		kvm_dequeue_irq(vcpu, -intr);
837 	else {
838 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
839 		return -EINVAL;
840 	}
841 
842 	kvm_vcpu_kick(vcpu);
843 
844 	return 0;
845 }
846 
847 long kvm_arch_vcpu_async_ioctl(struct file *filp,
848 			       unsigned int ioctl, unsigned long arg)
849 {
850 	void __user *argp = (void __user *)arg;
851 	struct kvm_vcpu *vcpu = filp->private_data;
852 
853 	if (ioctl == KVM_INTERRUPT) {
854 		struct kvm_interrupt irq;
855 
856 		if (copy_from_user(&irq, argp, sizeof(irq)))
857 			return -EFAULT;
858 
859 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
860 
861 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
862 	}
863 
864 	return -ENOIOCTLCMD;
865 }
866 
867 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
868 {
869 	return 0;
870 }
871 
872 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
873 {
874 	unsigned long timer_hz;
875 	struct loongarch_csrs *csr;
876 
877 	vcpu->arch.vpid = 0;
878 
879 	hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
880 	vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
881 
882 	vcpu->arch.handle_exit = kvm_handle_exit;
883 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
884 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
885 	if (!vcpu->arch.csr)
886 		return -ENOMEM;
887 
888 	/*
889 	 * All kvm exceptions share one exception entry, and host <-> guest
890 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
891 	 */
892 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
893 
894 	/* Init */
895 	vcpu->arch.last_sched_cpu = -1;
896 
897 	/*
898 	 * Initialize guest register state to valid architectural reset state.
899 	 */
900 	timer_hz = calc_const_freq();
901 	kvm_init_timer(vcpu, timer_hz);
902 
903 	/* Set Initialize mode for guest */
904 	csr = vcpu->arch.csr;
905 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
906 
907 	/* Set cpuid */
908 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
909 
910 	/* Start with no pending virtual guest interrupts */
911 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
912 
913 	return 0;
914 }
915 
916 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
917 {
918 }
919 
920 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
921 {
922 	int cpu;
923 	struct kvm_context *context;
924 
925 	hrtimer_cancel(&vcpu->arch.swtimer);
926 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
927 	kfree(vcpu->arch.csr);
928 
929 	/*
930 	 * If the vCPU is freed and reused as another vCPU, we don't want the
931 	 * matching pointer wrongly hanging around in last_vcpu.
932 	 */
933 	for_each_possible_cpu(cpu) {
934 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
935 		if (context->last_vcpu == vcpu)
936 			context->last_vcpu = NULL;
937 	}
938 }
939 
940 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
941 {
942 	bool migrated;
943 	struct kvm_context *context;
944 	struct loongarch_csrs *csr = vcpu->arch.csr;
945 
946 	/*
947 	 * Have we migrated to a different CPU?
948 	 * If so, any old guest TLB state may be stale.
949 	 */
950 	migrated = (vcpu->arch.last_sched_cpu != cpu);
951 
952 	/*
953 	 * Was this the last vCPU to run on this CPU?
954 	 * If not, any old guest state from this vCPU will have been clobbered.
955 	 */
956 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
957 	if (migrated || (context->last_vcpu != vcpu))
958 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
959 	context->last_vcpu = vcpu;
960 
961 	/* Restore timer state regardless */
962 	kvm_restore_timer(vcpu);
963 
964 	/* Control guest page CCA attribute */
965 	change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
966 
967 	/* Don't bother restoring registers multiple times unless necessary */
968 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
969 		return 0;
970 
971 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
972 
973 	/* Restore guest CSR registers */
974 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
975 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
976 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
977 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
978 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
979 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
980 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
981 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
982 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
983 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
984 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
985 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
986 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
987 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
988 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
989 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
990 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
991 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
992 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
993 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
994 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
995 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
996 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
997 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
998 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
999 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1000 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1001 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1002 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1003 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1004 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1005 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1006 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1007 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1008 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1009 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1010 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1011 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1012 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1013 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1014 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1015 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1016 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1017 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1018 
1019 	/* Restore Root.GINTC from unused Guest.GINTC register */
1020 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1021 
1022 	/*
1023 	 * We should clear linked load bit to break interrupted atomics. This
1024 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1025 	 * the previous vCPU.
1026 	 */
1027 	if (vcpu->kvm->created_vcpus > 1)
1028 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1029 
1030 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1031 
1032 	return 0;
1033 }
1034 
1035 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1036 {
1037 	unsigned long flags;
1038 
1039 	local_irq_save(flags);
1040 	/* Restore guest state to registers */
1041 	_kvm_vcpu_load(vcpu, cpu);
1042 	local_irq_restore(flags);
1043 }
1044 
1045 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1046 {
1047 	struct loongarch_csrs *csr = vcpu->arch.csr;
1048 
1049 	kvm_lose_fpu(vcpu);
1050 
1051 	/*
1052 	 * Update CSR state from hardware if software CSR state is stale,
1053 	 * most CSR registers are kept unchanged during process context
1054 	 * switch except CSR registers like remaining timer tick value and
1055 	 * injected interrupt state.
1056 	 */
1057 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1058 		goto out;
1059 
1060 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1061 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1062 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1063 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1064 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1065 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1066 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1067 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1068 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1069 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1070 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1071 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1072 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1073 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1074 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1075 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1076 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1077 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1078 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1079 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1080 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1081 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1082 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1083 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1084 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1085 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1086 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1087 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1088 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1089 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1090 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1091 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1092 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1093 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1094 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1095 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1096 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1097 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1098 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1099 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1100 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1101 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1102 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1103 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1104 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1105 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1106 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1107 
1108 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1109 
1110 out:
1111 	kvm_save_timer(vcpu);
1112 	/* Save Root.GINTC into unused Guest.GINTC register */
1113 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1114 
1115 	return 0;
1116 }
1117 
1118 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1119 {
1120 	int cpu;
1121 	unsigned long flags;
1122 
1123 	local_irq_save(flags);
1124 	cpu = smp_processor_id();
1125 	vcpu->arch.last_sched_cpu = cpu;
1126 
1127 	/* Save guest state in registers */
1128 	_kvm_vcpu_put(vcpu, cpu);
1129 	local_irq_restore(flags);
1130 }
1131 
1132 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1133 {
1134 	int r = -EINTR;
1135 	struct kvm_run *run = vcpu->run;
1136 
1137 	if (vcpu->mmio_needed) {
1138 		if (!vcpu->mmio_is_write)
1139 			kvm_complete_mmio_read(vcpu, run);
1140 		vcpu->mmio_needed = 0;
1141 	}
1142 
1143 	if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1144 		if (!run->iocsr_io.is_write)
1145 			kvm_complete_iocsr_read(vcpu, run);
1146 	}
1147 
1148 	if (run->immediate_exit)
1149 		return r;
1150 
1151 	/* Clear exit_reason */
1152 	run->exit_reason = KVM_EXIT_UNKNOWN;
1153 	lose_fpu(1);
1154 	vcpu_load(vcpu);
1155 	kvm_sigset_activate(vcpu);
1156 	r = kvm_pre_enter_guest(vcpu);
1157 	if (r != RESUME_GUEST)
1158 		goto out;
1159 
1160 	guest_timing_enter_irqoff();
1161 	guest_state_enter_irqoff();
1162 	trace_kvm_enter(vcpu);
1163 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1164 
1165 	trace_kvm_out(vcpu);
1166 	/*
1167 	 * Guest exit is already recorded at kvm_handle_exit()
1168 	 * return value must not be RESUME_GUEST
1169 	 */
1170 	local_irq_enable();
1171 out:
1172 	kvm_sigset_deactivate(vcpu);
1173 	vcpu_put(vcpu);
1174 
1175 	return r;
1176 }
1177