1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * irq.c: API for in kernel interrupt controller
4  * Copyright (c) 2007, Intel Corporation.
5  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
9  */
10 
11 #include <linux/export.h>
12 #include <linux/kvm_host.h>
13 
14 #include "irq.h"
15 #include "i8254.h"
16 #include "x86.h"
17 #include "xen.h"
18 
19 /*
20  * check if there are pending timer events
21  * to be processed.
22  */
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)23 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
24 {
25 	if (lapic_in_kernel(vcpu))
26 		return apic_has_pending_timer(vcpu);
27 
28 	return 0;
29 }
30 EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
31 
32 /*
33  * check if there is a pending userspace external interrupt
34  */
pending_userspace_extint(struct kvm_vcpu * v)35 static int pending_userspace_extint(struct kvm_vcpu *v)
36 {
37 	return v->arch.pending_external_vector != -1;
38 }
39 
40 /*
41  * check if there is pending interrupt from
42  * non-APIC source without intack.
43  */
kvm_cpu_has_extint(struct kvm_vcpu * v)44 int kvm_cpu_has_extint(struct kvm_vcpu *v)
45 {
46 	/*
47 	 * FIXME: interrupt.injected represents an interrupt whose
48 	 * side-effects have already been applied (e.g. bit from IRR
49 	 * already moved to ISR). Therefore, it is incorrect to rely
50 	 * on interrupt.injected to know if there is a pending
51 	 * interrupt in the user-mode LAPIC.
52 	 * This leads to nVMX/nSVM not be able to distinguish
53 	 * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
54 	 * pending interrupt or should re-inject an injected
55 	 * interrupt.
56 	 */
57 	if (!lapic_in_kernel(v))
58 		return v->arch.interrupt.injected;
59 
60 	if (kvm_xen_has_interrupt(v))
61 		return 1;
62 
63 	if (!kvm_apic_accept_pic_intr(v))
64 		return 0;
65 
66 	if (irqchip_split(v->kvm))
67 		return pending_userspace_extint(v);
68 	else
69 		return v->kvm->arch.vpic->output;
70 }
71 
72 /*
73  * check if there is injectable interrupt:
74  * when virtual interrupt delivery enabled,
75  * interrupt from apic will handled by hardware,
76  * we don't need to check it here.
77  */
kvm_cpu_has_injectable_intr(struct kvm_vcpu * v)78 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
79 {
80 	if (kvm_cpu_has_extint(v))
81 		return 1;
82 
83 	if (!is_guest_mode(v) && kvm_vcpu_apicv_active(v))
84 		return 0;
85 
86 	return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
87 }
88 EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
89 
90 /*
91  * check if there is pending interrupt without
92  * intack.
93  */
kvm_cpu_has_interrupt(struct kvm_vcpu * v)94 int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
95 {
96 	if (kvm_cpu_has_extint(v))
97 		return 1;
98 
99 	return kvm_apic_has_interrupt(v) != -1;	/* LAPIC */
100 }
101 EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
102 
103 /*
104  * Read pending interrupt(from non-APIC source)
105  * vector and intack.
106  */
kvm_cpu_get_extint(struct kvm_vcpu * v)107 static int kvm_cpu_get_extint(struct kvm_vcpu *v)
108 {
109 	if (!kvm_cpu_has_extint(v)) {
110 		WARN_ON(!lapic_in_kernel(v));
111 		return -1;
112 	}
113 
114 	if (!lapic_in_kernel(v))
115 		return v->arch.interrupt.nr;
116 
117 	if (kvm_xen_has_interrupt(v))
118 		return v->kvm->arch.xen.upcall_vector;
119 
120 	if (irqchip_split(v->kvm)) {
121 		int vector = v->arch.pending_external_vector;
122 
123 		v->arch.pending_external_vector = -1;
124 		return vector;
125 	} else
126 		return kvm_pic_read_irq(v->kvm); /* PIC */
127 }
128 
129 /*
130  * Read pending interrupt vector and intack.
131  */
kvm_cpu_get_interrupt(struct kvm_vcpu * v)132 int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
133 {
134 	int vector = kvm_cpu_get_extint(v);
135 	if (vector != -1)
136 		return vector;			/* PIC */
137 
138 	return kvm_get_apic_interrupt(v);	/* APIC */
139 }
140 EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
141 
kvm_inject_pending_timer_irqs(struct kvm_vcpu * vcpu)142 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
143 {
144 	if (lapic_in_kernel(vcpu))
145 		kvm_inject_apic_timer_irqs(vcpu);
146 }
147 EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
148 
__kvm_migrate_timers(struct kvm_vcpu * vcpu)149 void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
150 {
151 	__kvm_migrate_apic_timer(vcpu);
152 	__kvm_migrate_pit_timer(vcpu);
153 	static_call_cond(kvm_x86_migrate_timers)(vcpu);
154 }
155 
kvm_arch_irqfd_allowed(struct kvm * kvm,struct kvm_irqfd * args)156 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
157 {
158 	bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
159 
160 	return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
161 }
162