xref: /freebsd/sys/x86/iommu/intel_intrmap.c (revision 1f474190)
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/memdesc.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/rwlock.h>
43 #include <sys/sysctl.h>
44 #include <sys/taskqueue.h>
45 #include <sys/tree.h>
46 #include <sys/vmem.h>
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <machine/bus.h>
55 #include <machine/intr_machdep.h>
56 #include <x86/include/apicreg.h>
57 #include <x86/include/apicvar.h>
58 #include <x86/include/busdma_impl.h>
59 #include <dev/iommu/busdma_iommu.h>
60 #include <x86/iommu/intel_reg.h>
61 #include <x86/iommu/intel_dmar.h>
62 #include <x86/iommu/iommu_intrmap.h>
63 
64 static struct dmar_unit *dmar_ir_find(device_t src, uint16_t *rid,
65     int *is_dmar);
66 static void dmar_ir_program_irte(struct dmar_unit *unit, u_int idx,
67     uint64_t low, uint16_t rid);
68 static int dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie);
69 
70 int
71 iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
72 {
73 	struct dmar_unit *unit;
74 	vmem_addr_t vmem_res;
75 	u_int idx, i;
76 	int error;
77 
78 	unit = dmar_ir_find(src, NULL, NULL);
79 	if (unit == NULL || !unit->ir_enabled) {
80 		for (i = 0; i < count; i++)
81 			cookies[i] = -1;
82 		return (EOPNOTSUPP);
83 	}
84 
85 	error = vmem_alloc(unit->irtids, count, M_FIRSTFIT | M_NOWAIT,
86 	    &vmem_res);
87 	if (error != 0) {
88 		KASSERT(error != EOPNOTSUPP,
89 		    ("impossible EOPNOTSUPP from vmem"));
90 		return (error);
91 	}
92 	idx = vmem_res;
93 	for (i = 0; i < count; i++)
94 		cookies[i] = idx + i;
95 	return (0);
96 }
97 
98 int
99 iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
100     uint64_t *addr, uint32_t *data)
101 {
102 	struct dmar_unit *unit;
103 	uint64_t low;
104 	uint16_t rid;
105 	int is_dmar;
106 
107 	unit = dmar_ir_find(src, &rid, &is_dmar);
108 	if (is_dmar) {
109 		KASSERT(unit == NULL, ("DMAR cannot translate itself"));
110 
111 		/*
112 		 * See VT-d specification, 5.1.6 Remapping Hardware -
113 		 * Interrupt Programming.
114 		 */
115 		*data = vector;
116 		*addr = MSI_INTEL_ADDR_BASE | ((cpu & 0xff) << 12);
117 		if (x2apic_mode)
118 			*addr |= ((uint64_t)cpu & 0xffffff00) << 32;
119 		else
120 			KASSERT(cpu <= 0xff, ("cpu id too big %d", cpu));
121 		return (0);
122 	}
123 	if (unit == NULL || !unit->ir_enabled || cookie == -1)
124 		return (EOPNOTSUPP);
125 
126 	low = (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
127 	    DMAR_IRTE1_DST_xAPIC(cpu)) | DMAR_IRTE1_V(vector) |
128 	    DMAR_IRTE1_DLM_FM | DMAR_IRTE1_TM_EDGE | DMAR_IRTE1_RH_DIRECT |
129 	    DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
130 	dmar_ir_program_irte(unit, cookie, low, rid);
131 
132 	if (addr != NULL) {
133 		/*
134 		 * See VT-d specification, 5.1.5.2 MSI and MSI-X
135 		 * Register Programming.
136 		 */
137 		*addr = MSI_INTEL_ADDR_BASE | ((cookie & 0x7fff) << 5) |
138 		    ((cookie & 0x8000) << 2) | 0x18;
139 		*data = 0;
140 	}
141 	return (0);
142 }
143 
144 int
145 iommu_unmap_msi_intr(device_t src, u_int cookie)
146 {
147 	struct dmar_unit *unit;
148 
149 	if (cookie == -1)
150 		return (0);
151 	unit = dmar_ir_find(src, NULL, NULL);
152 	return (dmar_ir_free_irte(unit, cookie));
153 }
154 
155 int
156 iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
157     bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
158 {
159 	struct dmar_unit *unit;
160 	vmem_addr_t vmem_res;
161 	uint64_t low, iorte;
162 	u_int idx;
163 	int error;
164 	uint16_t rid;
165 
166 	unit = dmar_find_ioapic(ioapic_id, &rid);
167 	if (unit == NULL || !unit->ir_enabled) {
168 		*cookie = -1;
169 		return (EOPNOTSUPP);
170 	}
171 
172 	error = vmem_alloc(unit->irtids, 1, M_FIRSTFIT | M_NOWAIT, &vmem_res);
173 	if (error != 0) {
174 		KASSERT(error != EOPNOTSUPP,
175 		    ("impossible EOPNOTSUPP from vmem"));
176 		return (error);
177 	}
178 	idx = vmem_res;
179 	low = 0;
180 	switch (irq) {
181 	case IRQ_EXTINT:
182 		low |= DMAR_IRTE1_DLM_ExtINT;
183 		break;
184 	case IRQ_NMI:
185 		low |= DMAR_IRTE1_DLM_NMI;
186 		break;
187 	case IRQ_SMI:
188 		low |= DMAR_IRTE1_DLM_SMI;
189 		break;
190 	default:
191 		KASSERT(vector != 0, ("No vector for IRQ %u", irq));
192 		low |= DMAR_IRTE1_DLM_FM | DMAR_IRTE1_V(vector);
193 		break;
194 	}
195 	low |= (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
196 	    DMAR_IRTE1_DST_xAPIC(cpu)) |
197 	    (edge ? DMAR_IRTE1_TM_EDGE : DMAR_IRTE1_TM_LEVEL) |
198 	    DMAR_IRTE1_RH_DIRECT | DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
199 	dmar_ir_program_irte(unit, idx, low, rid);
200 
201 	if (hi != NULL) {
202 		/*
203 		 * See VT-d specification, 5.1.5.1 I/OxAPIC
204 		 * Programming.
205 		 */
206 		iorte = (1ULL << 48) | ((uint64_t)(idx & 0x7fff) << 49) |
207 		    ((idx & 0x8000) != 0 ? (1 << 11) : 0) |
208 		    (edge ? IOART_TRGREDG : IOART_TRGRLVL) |
209 		    (activehi ? IOART_INTAHI : IOART_INTALO) |
210 		    IOART_DELFIXED | vector;
211 		*hi = iorte >> 32;
212 		*lo = iorte;
213 	}
214 	*cookie = idx;
215 	return (0);
216 }
217 
218 int
219 iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
220 {
221 	struct dmar_unit *unit;
222 	u_int idx;
223 
224 	idx = *cookie;
225 	if (idx == -1)
226 		return (0);
227 	*cookie = -1;
228 	unit = dmar_find_ioapic(ioapic_id, NULL);
229 	KASSERT(unit != NULL && unit->ir_enabled,
230 	    ("unmap: cookie %d unit %p", idx, unit));
231 	return (dmar_ir_free_irte(unit, idx));
232 }
233 
234 static struct dmar_unit *
235 dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
236 {
237 	devclass_t src_class;
238 	struct dmar_unit *unit;
239 
240 	/*
241 	 * We need to determine if the interrupt source generates FSB
242 	 * interrupts.  If yes, it is either DMAR, in which case
243 	 * interrupts are not remapped.  Or it is HPET, and interrupts
244 	 * are remapped.  For HPET, source id is reported by HPET
245 	 * record in DMAR ACPI table.
246 	 */
247 	if (is_dmar != NULL)
248 		*is_dmar = FALSE;
249 	src_class = device_get_devclass(src);
250 	if (src_class == devclass_find("dmar")) {
251 		unit = NULL;
252 		if (is_dmar != NULL)
253 			*is_dmar = TRUE;
254 	} else if (src_class == devclass_find("hpet")) {
255 		unit = dmar_find_hpet(src, rid);
256 	} else {
257 		unit = dmar_find(src, bootverbose);
258 		if (unit != NULL && rid != NULL)
259 			iommu_get_requester(src, rid);
260 	}
261 	return (unit);
262 }
263 
264 static void
265 dmar_ir_program_irte(struct dmar_unit *unit, u_int idx, uint64_t low,
266     uint16_t rid)
267 {
268 	dmar_irte_t *irte;
269 	uint64_t high;
270 
271 	KASSERT(idx < unit->irte_cnt,
272 	    ("bad cookie %d %d", idx, unit->irte_cnt));
273 	irte = &(unit->irt[idx]);
274 	high = DMAR_IRTE2_SVT_RID | DMAR_IRTE2_SQ_RID |
275 	    DMAR_IRTE2_SID_RID(rid);
276 	if (bootverbose) {
277 		device_printf(unit->dev,
278 		    "programming irte[%d] rid %#x high %#jx low %#jx\n",
279 		    idx, rid, (uintmax_t)high, (uintmax_t)low);
280 	}
281 	DMAR_LOCK(unit);
282 	if ((irte->irte1 & DMAR_IRTE1_P) != 0) {
283 		/*
284 		 * The rte is already valid.  Assume that the request
285 		 * is to remap the interrupt for balancing.  Only low
286 		 * word of rte needs to be changed.  Assert that the
287 		 * high word contains expected value.
288 		 */
289 		KASSERT(irte->irte2 == high,
290 		    ("irte2 mismatch, %jx %jx", (uintmax_t)irte->irte2,
291 		    (uintmax_t)high));
292 		dmar_pte_update(&irte->irte1, low);
293 	} else {
294 		dmar_pte_store(&irte->irte2, high);
295 		dmar_pte_store(&irte->irte1, low);
296 	}
297 	dmar_qi_invalidate_iec(unit, idx, 1);
298 	DMAR_UNLOCK(unit);
299 
300 }
301 
302 static int
303 dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie)
304 {
305 	dmar_irte_t *irte;
306 
307 	KASSERT(unit != NULL && unit->ir_enabled,
308 	    ("unmap: cookie %d unit %p", cookie, unit));
309 	KASSERT(cookie < unit->irte_cnt,
310 	    ("bad cookie %u %u", cookie, unit->irte_cnt));
311 	irte = &(unit->irt[cookie]);
312 	dmar_pte_clear(&irte->irte1);
313 	dmar_pte_clear(&irte->irte2);
314 	DMAR_LOCK(unit);
315 	dmar_qi_invalidate_iec(unit, cookie, 1);
316 	DMAR_UNLOCK(unit);
317 	vmem_free(unit->irtids, cookie, 1);
318 	return (0);
319 }
320 
321 static u_int
322 clp2(u_int v)
323 {
324 
325 	return (powerof2(v) ? v : 1 << fls(v));
326 }
327 
328 int
329 dmar_init_irt(struct dmar_unit *unit)
330 {
331 
332 	if ((unit->hw_ecap & DMAR_ECAP_IR) == 0)
333 		return (0);
334 	unit->ir_enabled = 1;
335 	TUNABLE_INT_FETCH("hw.dmar.ir", &unit->ir_enabled);
336 	if (!unit->ir_enabled)
337 		return (0);
338 	if (!unit->qi_enabled) {
339 		unit->ir_enabled = 0;
340 		if (bootverbose)
341 			device_printf(unit->dev,
342 	     "QI disabled, disabling interrupt remapping\n");
343 		return (0);
344 	}
345 	unit->irte_cnt = clp2(num_io_irqs);
346 	unit->irt = (dmar_irte_t *)(uintptr_t)kmem_alloc_contig(
347 	    unit->irte_cnt * sizeof(dmar_irte_t), M_ZERO | M_WAITOK, 0,
348 	    dmar_high, PAGE_SIZE, 0, DMAR_IS_COHERENT(unit) ?
349 	    VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
350 	if (unit->irt == NULL)
351 		return (ENOMEM);
352 	unit->irt_phys = pmap_kextract((vm_offset_t)unit->irt);
353 	unit->irtids = vmem_create("dmarirt", 0, unit->irte_cnt, 1, 0,
354 	    M_FIRSTFIT | M_NOWAIT);
355 	DMAR_LOCK(unit);
356 	dmar_load_irt_ptr(unit);
357 	dmar_qi_invalidate_iec_glob(unit);
358 	DMAR_UNLOCK(unit);
359 
360 	/*
361 	 * Initialize mappings for already configured interrupt pins.
362 	 * Required, because otherwise the interrupts fault without
363 	 * irtes.
364 	 */
365 	intr_reprogram();
366 
367 	DMAR_LOCK(unit);
368 	dmar_enable_ir(unit);
369 	DMAR_UNLOCK(unit);
370 	return (0);
371 }
372 
373 void
374 dmar_fini_irt(struct dmar_unit *unit)
375 {
376 
377 	unit->ir_enabled = 0;
378 	if (unit->irt != NULL) {
379 		dmar_disable_ir(unit);
380 		dmar_qi_invalidate_iec_glob(unit);
381 		vmem_destroy(unit->irtids);
382 		kmem_free((vm_offset_t)unit->irt, unit->irte_cnt *
383 		    sizeof(dmar_irte_t));
384 	}
385 }
386