xref: /freebsd/sys/amd64/vmm/vmm_lapic.c (revision fdafd315)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/smp.h>
32 
33 #include <x86/specialreg.h>
34 #include <x86/apicreg.h>
35 
36 #include <machine/vmm.h>
37 #include "vmm_ktr.h"
38 #include "vmm_lapic.h"
39 #include "vlapic.h"
40 
41 /*
42  * Some MSI message definitions
43  */
44 #define	MSI_X86_ADDR_MASK	0xfff00000
45 #define	MSI_X86_ADDR_BASE	0xfee00000
46 #define	MSI_X86_ADDR_RH		0x00000008	/* Redirection Hint */
47 #define	MSI_X86_ADDR_LOG	0x00000004	/* Destination Mode */
48 
49 int
lapic_set_intr(struct vcpu * vcpu,int vector,bool level)50 lapic_set_intr(struct vcpu *vcpu, int vector, bool level)
51 {
52 	struct vlapic *vlapic;
53 
54 	/*
55 	 * According to section "Maskable Hardware Interrupts" in Intel SDM
56 	 * vectors 16 through 255 can be delivered through the local APIC.
57 	 */
58 	if (vector < 16 || vector > 255)
59 		return (EINVAL);
60 
61 	vlapic = vm_lapic(vcpu);
62 	if (vlapic_set_intr_ready(vlapic, vector, level))
63 		vcpu_notify_event(vcpu, true);
64 	return (0);
65 }
66 
67 int
lapic_set_local_intr(struct vm * vm,struct vcpu * vcpu,int vector)68 lapic_set_local_intr(struct vm *vm, struct vcpu *vcpu, int vector)
69 {
70 	struct vlapic *vlapic;
71 	cpuset_t dmask;
72 	int cpu, error;
73 
74 	if (vcpu == NULL) {
75 		error = 0;
76 		dmask = vm_active_cpus(vm);
77 		CPU_FOREACH_ISSET(cpu, &dmask) {
78 			vlapic = vm_lapic(vm_vcpu(vm, cpu));
79 			error = vlapic_trigger_lvt(vlapic, vector);
80 			if (error)
81 				break;
82 		}
83 	} else {
84 		vlapic = vm_lapic(vcpu);
85 		error = vlapic_trigger_lvt(vlapic, vector);
86 	}
87 
88 	return (error);
89 }
90 
91 int
lapic_intr_msi(struct vm * vm,uint64_t addr,uint64_t msg)92 lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
93 {
94 	int delmode, vec;
95 	uint32_t dest;
96 	bool phys;
97 
98 	VM_CTR2(vm, "lapic MSI addr: %#lx msg: %#lx", addr, msg);
99 
100 	if ((addr & MSI_X86_ADDR_MASK) != MSI_X86_ADDR_BASE) {
101 		VM_CTR1(vm, "lapic MSI invalid addr %#lx", addr);
102 		return (-1);
103 	}
104 
105 	/*
106 	 * Extract the x86-specific fields from the MSI addr/msg
107 	 * params according to the Intel Arch spec, Vol3 Ch 10.
108 	 *
109 	 * The PCI specification does not support level triggered
110 	 * MSI/MSI-X so ignore trigger level in 'msg'.
111 	 *
112 	 * The 'dest' is interpreted as a logical APIC ID if both
113 	 * the Redirection Hint and Destination Mode are '1' and
114 	 * physical otherwise.
115 	 */
116 	dest = (addr >> 12) & 0xff;
117 	phys = ((addr & (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG)) !=
118 	    (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG));
119 	delmode = msg & APIC_DELMODE_MASK;
120 	vec = msg & 0xff;
121 
122 	VM_CTR3(vm, "lapic MSI %s dest %#x, vec %d",
123 	    phys ? "physical" : "logical", dest, vec);
124 
125 	vlapic_deliver_intr(vm, LAPIC_TRIG_EDGE, dest, phys, delmode, vec);
126 	return (0);
127 }
128 
129 static bool
x2apic_msr(u_int msr)130 x2apic_msr(u_int msr)
131 {
132 	return (msr >= 0x800 && msr <= 0xBFF);
133 }
134 
135 static u_int
x2apic_msr_to_regoff(u_int msr)136 x2apic_msr_to_regoff(u_int msr)
137 {
138 
139 	return ((msr - 0x800) << 4);
140 }
141 
142 bool
lapic_msr(u_int msr)143 lapic_msr(u_int msr)
144 {
145 
146 	return (x2apic_msr(msr) || msr == MSR_APICBASE);
147 }
148 
149 int
lapic_rdmsr(struct vcpu * vcpu,u_int msr,uint64_t * rval,bool * retu)150 lapic_rdmsr(struct vcpu *vcpu, u_int msr, uint64_t *rval, bool *retu)
151 {
152 	int error;
153 	u_int offset;
154 	struct vlapic *vlapic;
155 
156 	vlapic = vm_lapic(vcpu);
157 
158 	if (msr == MSR_APICBASE) {
159 		*rval = vlapic_get_apicbase(vlapic);
160 		error = 0;
161 	} else {
162 		offset = x2apic_msr_to_regoff(msr);
163 		error = vlapic_read(vlapic, 0, offset, rval, retu);
164 	}
165 
166 	return (error);
167 }
168 
169 int
lapic_wrmsr(struct vcpu * vcpu,u_int msr,uint64_t val,bool * retu)170 lapic_wrmsr(struct vcpu *vcpu, u_int msr, uint64_t val, bool *retu)
171 {
172 	int error;
173 	u_int offset;
174 	struct vlapic *vlapic;
175 
176 	vlapic = vm_lapic(vcpu);
177 
178 	if (msr == MSR_APICBASE) {
179 		error = vlapic_set_apicbase(vlapic, val);
180 	} else {
181 		offset = x2apic_msr_to_regoff(msr);
182 		error = vlapic_write(vlapic, 0, offset, val, retu);
183 	}
184 
185 	return (error);
186 }
187 
188 int
lapic_mmio_write(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size,void * arg)189 lapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size,
190 		 void *arg)
191 {
192 	int error;
193 	uint64_t off;
194 	struct vlapic *vlapic;
195 
196 	off = gpa - DEFAULT_APIC_BASE;
197 
198 	/*
199 	 * Memory mapped local apic accesses must be 4 bytes wide and
200 	 * aligned on a 16-byte boundary.
201 	 */
202 	if (size != 4 || off & 0xf)
203 		return (EINVAL);
204 
205 	vlapic = vm_lapic(vcpu);
206 	error = vlapic_write(vlapic, 1, off, wval, arg);
207 	return (error);
208 }
209 
210 int
lapic_mmio_read(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size,void * arg)211 lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size,
212 		void *arg)
213 {
214 	int error;
215 	uint64_t off;
216 	struct vlapic *vlapic;
217 
218 	off = gpa - DEFAULT_APIC_BASE;
219 
220 	/*
221 	 * Memory mapped local apic accesses should be aligned on a
222 	 * 16-byte boundary.  They are also suggested to be 4 bytes
223 	 * wide, alas not all OSes follow suggestions.
224 	 */
225 	off &= ~3;
226 	if (off & 0xf)
227 		return (EINVAL);
228 
229 	vlapic = vm_lapic(vcpu);
230 	error = vlapic_read(vlapic, 1, off, rval, arg);
231 	return (error);
232 }
233