xref: /freebsd/sys/x86/iommu/intel_intrmap.c (revision c697fb7f)
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6  * under sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/memdesc.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/rwlock.h>
43 #include <sys/taskqueue.h>
44 #include <sys/tree.h>
45 #include <sys/vmem.h>
46 #include <machine/bus.h>
47 #include <machine/intr_machdep.h>
48 #include <vm/vm.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <x86/include/apicreg.h>
54 #include <x86/include/apicvar.h>
55 #include <x86/include/busdma_impl.h>
56 #include <x86/iommu/intel_reg.h>
57 #include <x86/iommu/busdma_dmar.h>
58 #include <dev/pci/pcireg.h>
59 #include <x86/iommu/intel_dmar.h>
60 #include <dev/pci/pcivar.h>
61 #include <x86/iommu/iommu_intrmap.h>
62 
63 static struct dmar_unit *dmar_ir_find(device_t src, uint16_t *rid,
64     int *is_dmar);
65 static void dmar_ir_program_irte(struct dmar_unit *unit, u_int idx,
66     uint64_t low, uint16_t rid);
67 static int dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie);
68 
69 int
70 iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
71 {
72 	struct dmar_unit *unit;
73 	vmem_addr_t vmem_res;
74 	u_int idx, i;
75 	int error;
76 
77 	unit = dmar_ir_find(src, NULL, NULL);
78 	if (unit == NULL || !unit->ir_enabled) {
79 		for (i = 0; i < count; i++)
80 			cookies[i] = -1;
81 		return (EOPNOTSUPP);
82 	}
83 
84 	error = vmem_alloc(unit->irtids, count, M_FIRSTFIT | M_NOWAIT,
85 	    &vmem_res);
86 	if (error != 0) {
87 		KASSERT(error != EOPNOTSUPP,
88 		    ("impossible EOPNOTSUPP from vmem"));
89 		return (error);
90 	}
91 	idx = vmem_res;
92 	for (i = 0; i < count; i++)
93 		cookies[i] = idx + i;
94 	return (0);
95 }
96 
97 int
98 iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
99     uint64_t *addr, uint32_t *data)
100 {
101 	struct dmar_unit *unit;
102 	uint64_t low;
103 	uint16_t rid;
104 	int is_dmar;
105 
106 	unit = dmar_ir_find(src, &rid, &is_dmar);
107 	if (is_dmar) {
108 		KASSERT(unit == NULL, ("DMAR cannot translate itself"));
109 
110 		/*
111 		 * See VT-d specification, 5.1.6 Remapping Hardware -
112 		 * Interrupt Programming.
113 		 */
114 		*data = vector;
115 		*addr = MSI_INTEL_ADDR_BASE | ((cpu & 0xff) << 12);
116 		if (x2apic_mode)
117 			*addr |= ((uint64_t)cpu & 0xffffff00) << 32;
118 		else
119 			KASSERT(cpu <= 0xff, ("cpu id too big %d", cpu));
120 		return (0);
121 	}
122 	if (unit == NULL || !unit->ir_enabled || cookie == -1)
123 		return (EOPNOTSUPP);
124 
125 	low = (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
126 	    DMAR_IRTE1_DST_xAPIC(cpu)) | DMAR_IRTE1_V(vector) |
127 	    DMAR_IRTE1_DLM_FM | DMAR_IRTE1_TM_EDGE | DMAR_IRTE1_RH_DIRECT |
128 	    DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
129 	dmar_ir_program_irte(unit, cookie, low, rid);
130 
131 	if (addr != NULL) {
132 		/*
133 		 * See VT-d specification, 5.1.5.2 MSI and MSI-X
134 		 * Register Programming.
135 		 */
136 		*addr = MSI_INTEL_ADDR_BASE | ((cookie & 0x7fff) << 5) |
137 		    ((cookie & 0x8000) << 2) | 0x18;
138 		*data = 0;
139 	}
140 	return (0);
141 }
142 
143 int
144 iommu_unmap_msi_intr(device_t src, u_int cookie)
145 {
146 	struct dmar_unit *unit;
147 
148 	if (cookie == -1)
149 		return (0);
150 	unit = dmar_ir_find(src, NULL, NULL);
151 	return (dmar_ir_free_irte(unit, cookie));
152 }
153 
154 int
155 iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
156     bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
157 {
158 	struct dmar_unit *unit;
159 	vmem_addr_t vmem_res;
160 	uint64_t low, iorte;
161 	u_int idx;
162 	int error;
163 	uint16_t rid;
164 
165 	unit = dmar_find_ioapic(ioapic_id, &rid);
166 	if (unit == NULL || !unit->ir_enabled) {
167 		*cookie = -1;
168 		return (EOPNOTSUPP);
169 	}
170 
171 	error = vmem_alloc(unit->irtids, 1, M_FIRSTFIT | M_NOWAIT, &vmem_res);
172 	if (error != 0) {
173 		KASSERT(error != EOPNOTSUPP,
174 		    ("impossible EOPNOTSUPP from vmem"));
175 		return (error);
176 	}
177 	idx = vmem_res;
178 	low = 0;
179 	switch (irq) {
180 	case IRQ_EXTINT:
181 		low |= DMAR_IRTE1_DLM_ExtINT;
182 		break;
183 	case IRQ_NMI:
184 		low |= DMAR_IRTE1_DLM_NMI;
185 		break;
186 	case IRQ_SMI:
187 		low |= DMAR_IRTE1_DLM_SMI;
188 		break;
189 	default:
190 		KASSERT(vector != 0, ("No vector for IRQ %u", irq));
191 		low |= DMAR_IRTE1_DLM_FM | DMAR_IRTE1_V(vector);
192 		break;
193 	}
194 	low |= (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
195 	    DMAR_IRTE1_DST_xAPIC(cpu)) |
196 	    (edge ? DMAR_IRTE1_TM_EDGE : DMAR_IRTE1_TM_LEVEL) |
197 	    DMAR_IRTE1_RH_DIRECT | DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
198 	dmar_ir_program_irte(unit, idx, low, rid);
199 
200 	if (hi != NULL) {
201 		/*
202 		 * See VT-d specification, 5.1.5.1 I/OxAPIC
203 		 * Programming.
204 		 */
205 		iorte = (1ULL << 48) | ((uint64_t)(idx & 0x7fff) << 49) |
206 		    ((idx & 0x8000) != 0 ? (1 << 11) : 0) |
207 		    (edge ? IOART_TRGREDG : IOART_TRGRLVL) |
208 		    (activehi ? IOART_INTAHI : IOART_INTALO) |
209 		    IOART_DELFIXED | vector;
210 		*hi = iorte >> 32;
211 		*lo = iorte;
212 	}
213 	*cookie = idx;
214 	return (0);
215 }
216 
217 int
218 iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
219 {
220 	struct dmar_unit *unit;
221 	u_int idx;
222 
223 	idx = *cookie;
224 	if (idx == -1)
225 		return (0);
226 	*cookie = -1;
227 	unit = dmar_find_ioapic(ioapic_id, NULL);
228 	KASSERT(unit != NULL && unit->ir_enabled,
229 	    ("unmap: cookie %d unit %p", idx, unit));
230 	return (dmar_ir_free_irte(unit, idx));
231 }
232 
233 static struct dmar_unit *
234 dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
235 {
236 	devclass_t src_class;
237 	struct dmar_unit *unit;
238 
239 	/*
240 	 * We need to determine if the interrupt source generates FSB
241 	 * interrupts.  If yes, it is either DMAR, in which case
242 	 * interrupts are not remapped.  Or it is HPET, and interrupts
243 	 * are remapped.  For HPET, source id is reported by HPET
244 	 * record in DMAR ACPI table.
245 	 */
246 	if (is_dmar != NULL)
247 		*is_dmar = FALSE;
248 	src_class = device_get_devclass(src);
249 	if (src_class == devclass_find("dmar")) {
250 		unit = NULL;
251 		if (is_dmar != NULL)
252 			*is_dmar = TRUE;
253 	} else if (src_class == devclass_find("hpet")) {
254 		unit = dmar_find_hpet(src, rid);
255 	} else {
256 		unit = dmar_find(src, bootverbose);
257 		if (unit != NULL && rid != NULL)
258 			dmar_get_requester(src, rid);
259 	}
260 	return (unit);
261 }
262 
263 static void
264 dmar_ir_program_irte(struct dmar_unit *unit, u_int idx, uint64_t low,
265     uint16_t rid)
266 {
267 	dmar_irte_t *irte;
268 	uint64_t high;
269 
270 	KASSERT(idx < unit->irte_cnt,
271 	    ("bad cookie %d %d", idx, unit->irte_cnt));
272 	irte = &(unit->irt[idx]);
273 	high = DMAR_IRTE2_SVT_RID | DMAR_IRTE2_SQ_RID |
274 	    DMAR_IRTE2_SID_RID(rid);
275 	if (bootverbose) {
276 		device_printf(unit->dev,
277 		    "programming irte[%d] rid %#x high %#jx low %#jx\n",
278 		    idx, rid, (uintmax_t)high, (uintmax_t)low);
279 	}
280 	DMAR_LOCK(unit);
281 	if ((irte->irte1 & DMAR_IRTE1_P) != 0) {
282 		/*
283 		 * The rte is already valid.  Assume that the request
284 		 * is to remap the interrupt for balancing.  Only low
285 		 * word of rte needs to be changed.  Assert that the
286 		 * high word contains expected value.
287 		 */
288 		KASSERT(irte->irte2 == high,
289 		    ("irte2 mismatch, %jx %jx", (uintmax_t)irte->irte2,
290 		    (uintmax_t)high));
291 		dmar_pte_update(&irte->irte1, low);
292 	} else {
293 		dmar_pte_store(&irte->irte2, high);
294 		dmar_pte_store(&irte->irte1, low);
295 	}
296 	dmar_qi_invalidate_iec(unit, idx, 1);
297 	DMAR_UNLOCK(unit);
298 
299 }
300 
301 static int
302 dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie)
303 {
304 	dmar_irte_t *irte;
305 
306 	KASSERT(unit != NULL && unit->ir_enabled,
307 	    ("unmap: cookie %d unit %p", cookie, unit));
308 	KASSERT(cookie < unit->irte_cnt,
309 	    ("bad cookie %u %u", cookie, unit->irte_cnt));
310 	irte = &(unit->irt[cookie]);
311 	dmar_pte_clear(&irte->irte1);
312 	dmar_pte_clear(&irte->irte2);
313 	DMAR_LOCK(unit);
314 	dmar_qi_invalidate_iec(unit, cookie, 1);
315 	DMAR_UNLOCK(unit);
316 	vmem_free(unit->irtids, cookie, 1);
317 	return (0);
318 }
319 
320 static u_int
321 clp2(u_int v)
322 {
323 
324 	return (powerof2(v) ? v : 1 << fls(v));
325 }
326 
327 int
328 dmar_init_irt(struct dmar_unit *unit)
329 {
330 
331 	if ((unit->hw_ecap & DMAR_ECAP_IR) == 0)
332 		return (0);
333 	unit->ir_enabled = 1;
334 	TUNABLE_INT_FETCH("hw.dmar.ir", &unit->ir_enabled);
335 	if (!unit->ir_enabled)
336 		return (0);
337 	if (!unit->qi_enabled) {
338 		unit->ir_enabled = 0;
339 		if (bootverbose)
340 			device_printf(unit->dev,
341 	     "QI disabled, disabling interrupt remapping\n");
342 		return (0);
343 	}
344 	unit->irte_cnt = clp2(num_io_irqs);
345 	unit->irt = (dmar_irte_t *)(uintptr_t)kmem_alloc_contig(
346 	    unit->irte_cnt * sizeof(dmar_irte_t), M_ZERO | M_WAITOK, 0,
347 	    dmar_high, PAGE_SIZE, 0, DMAR_IS_COHERENT(unit) ?
348 	    VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
349 	if (unit->irt == NULL)
350 		return (ENOMEM);
351 	unit->irt_phys = pmap_kextract((vm_offset_t)unit->irt);
352 	unit->irtids = vmem_create("dmarirt", 0, unit->irte_cnt, 1, 0,
353 	    M_FIRSTFIT | M_NOWAIT);
354 	DMAR_LOCK(unit);
355 	dmar_load_irt_ptr(unit);
356 	dmar_qi_invalidate_iec_glob(unit);
357 	DMAR_UNLOCK(unit);
358 
359 	/*
360 	 * Initialize mappings for already configured interrupt pins.
361 	 * Required, because otherwise the interrupts fault without
362 	 * irtes.
363 	 */
364 	intr_reprogram();
365 
366 	DMAR_LOCK(unit);
367 	dmar_enable_ir(unit);
368 	DMAR_UNLOCK(unit);
369 	return (0);
370 }
371 
372 void
373 dmar_fini_irt(struct dmar_unit *unit)
374 {
375 
376 	unit->ir_enabled = 0;
377 	if (unit->irt != NULL) {
378 		dmar_disable_ir(unit);
379 		dmar_qi_invalidate_iec_glob(unit);
380 		vmem_destroy(unit->irtids);
381 		kmem_free((vm_offset_t)unit->irt, unit->irte_cnt *
382 		    sizeof(dmar_irte_t));
383 	}
384 }
385