xref: /freebsd/sys/arm64/iommu/iommu.c (revision ac4ddc46)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Portions of this work was supported by Innovate UK project 105694,
12  * "Digital Security by Design (DSbD) Technology Platform Prototype".
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include "opt_platform.h"
37 
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/memdesc.h>
43 #include <sys/tree.h>
44 #include <sys/taskqueue.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/sx.h>
48 #include <sys/sysctl.h>
49 #include <vm/vm.h>
50 
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <machine/bus.h>
54 #include <dev/iommu/busdma_iommu.h>
55 #include <machine/vmparam.h>
56 
57 #ifdef FDT
58 #include <dev/fdt/fdt_common.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 #endif
62 
63 #include "iommu.h"
64 #include "iommu_if.h"
65 
66 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
67 
68 #define	IOMMU_LIST_LOCK()		sx_xlock(&iommu_sx)
69 #define	IOMMU_LIST_UNLOCK()		sx_xunlock(&iommu_sx)
70 #define	IOMMU_LIST_ASSERT_LOCKED()	sx_assert(&iommu_sx, SA_XLOCKED)
71 
72 #define dprintf(fmt, ...)
73 
74 static struct sx iommu_sx;
75 
76 struct iommu_entry {
77 	struct iommu_unit *iommu;
78 	LIST_ENTRY(iommu_entry) next;
79 };
80 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
81 
82 static int
iommu_domain_unmap_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,int flags)83 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
84     iommu_gaddr_t size, int flags)
85 {
86 	struct iommu_unit *iommu;
87 	int error;
88 
89 	iommu = iodom->iommu;
90 
91 	error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
92 
93 	return (error);
94 }
95 
96 static int
iommu_domain_map_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,vm_page_t * ma,uint64_t eflags,int flags)97 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
98     iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
99 {
100 	struct iommu_unit *iommu;
101 	vm_prot_t prot;
102 	vm_offset_t va;
103 	int error;
104 
105 	dprintf("%s: base %lx, size %lx\n", __func__, base, size);
106 
107 	prot = 0;
108 	if (eflags & IOMMU_MAP_ENTRY_READ)
109 		prot |= VM_PROT_READ;
110 	if (eflags & IOMMU_MAP_ENTRY_WRITE)
111 		prot |= VM_PROT_WRITE;
112 
113 	va = base;
114 
115 	iommu = iodom->iommu;
116 
117 	error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
118 
119 	return (error);
120 }
121 
122 static const struct iommu_domain_map_ops domain_map_ops = {
123 	.map = iommu_domain_map_buf,
124 	.unmap = iommu_domain_unmap_buf,
125 };
126 
127 static struct iommu_domain *
iommu_domain_alloc(struct iommu_unit * iommu)128 iommu_domain_alloc(struct iommu_unit *iommu)
129 {
130 	struct iommu_domain *iodom;
131 
132 	iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
133 	if (iodom == NULL)
134 		return (NULL);
135 
136 	KASSERT(iodom->end != 0, ("domain end is not set"));
137 
138 	iommu_domain_init(iommu, iodom, &domain_map_ops);
139 	iodom->iommu = iommu;
140 	iommu_gas_init_domain(iodom);
141 
142 	return (iodom);
143 }
144 
145 static int
iommu_domain_free(struct iommu_domain * iodom)146 iommu_domain_free(struct iommu_domain *iodom)
147 {
148 	struct iommu_unit *iommu;
149 
150 	iommu = iodom->iommu;
151 
152 	IOMMU_LOCK(iommu);
153 
154 	if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
155 		IOMMU_DOMAIN_LOCK(iodom);
156 		iommu_gas_fini_domain(iodom);
157 		IOMMU_DOMAIN_UNLOCK(iodom);
158 	}
159 
160 	iommu_domain_fini(iodom);
161 
162 	IOMMU_DOMAIN_FREE(iommu->dev, iodom);
163 	IOMMU_UNLOCK(iommu);
164 
165 	return (0);
166 }
167 
168 static void
iommu_tag_init(struct iommu_domain * iodom,struct bus_dma_tag_iommu * t)169 iommu_tag_init(struct iommu_domain *iodom, struct bus_dma_tag_iommu *t)
170 {
171 	bus_addr_t maxaddr;
172 
173 	maxaddr = MIN(iodom->end, BUS_SPACE_MAXADDR);
174 
175 	t->common.impl = &bus_dma_iommu_impl;
176 	t->common.alignment = 1;
177 	t->common.boundary = 0;
178 	t->common.lowaddr = maxaddr;
179 	t->common.highaddr = maxaddr;
180 	t->common.maxsize = maxaddr;
181 	t->common.nsegments = BUS_SPACE_UNRESTRICTED;
182 	t->common.maxsegsz = maxaddr;
183 }
184 
185 static struct iommu_ctx *
iommu_ctx_alloc(device_t requester,struct iommu_domain * iodom,bool disabled)186 iommu_ctx_alloc(device_t requester, struct iommu_domain *iodom, bool disabled)
187 {
188 	struct iommu_unit *iommu;
189 	struct iommu_ctx *ioctx;
190 
191 	iommu = iodom->iommu;
192 
193 	ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, requester, disabled);
194 	if (ioctx == NULL)
195 		return (NULL);
196 
197 	ioctx->domain = iodom;
198 
199 	return (ioctx);
200 }
201 
202 static int
iommu_ctx_init(device_t requester,struct iommu_ctx * ioctx)203 iommu_ctx_init(device_t requester, struct iommu_ctx *ioctx)
204 {
205 	struct bus_dma_tag_iommu *tag;
206 	struct iommu_domain *iodom;
207 	struct iommu_unit *iommu;
208 	int error;
209 
210 	iodom = ioctx->domain;
211 	iommu = iodom->iommu;
212 
213 	error = IOMMU_CTX_INIT(iommu->dev, ioctx);
214 	if (error)
215 		return (error);
216 
217 	tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
218 	    M_IOMMU, M_WAITOK | M_ZERO);
219 	tag->owner = requester;
220 	tag->ctx = ioctx;
221 	tag->ctx->domain = iodom;
222 
223 	iommu_tag_init(iodom, tag);
224 
225 	return (error);
226 }
227 
228 static struct iommu_unit *
iommu_lookup(device_t dev)229 iommu_lookup(device_t dev)
230 {
231 	struct iommu_entry *entry;
232 	struct iommu_unit *iommu;
233 
234 	IOMMU_LIST_LOCK();
235 	LIST_FOREACH(entry, &iommu_list, next) {
236 		iommu = entry->iommu;
237 		if (iommu->dev == dev) {
238 			IOMMU_LIST_UNLOCK();
239 			return (iommu);
240 		}
241 	}
242 	IOMMU_LIST_UNLOCK();
243 
244 	return (NULL);
245 }
246 
247 #ifdef FDT
248 struct iommu_ctx *
iommu_get_ctx_ofw(device_t dev,int channel)249 iommu_get_ctx_ofw(device_t dev, int channel)
250 {
251 	struct iommu_domain *iodom;
252 	struct iommu_unit *iommu;
253 	struct iommu_ctx *ioctx;
254 	phandle_t node, parent;
255 	device_t iommu_dev;
256 	pcell_t *cells;
257 	int niommus;
258 	int ncells;
259 	int error;
260 
261 	node = ofw_bus_get_node(dev);
262 	if (node <= 0) {
263 		device_printf(dev,
264 		    "%s called on not ofw based device.\n", __func__);
265 		return (NULL);
266 	}
267 
268 	error = ofw_bus_parse_xref_list_get_length(node,
269 	    "iommus", "#iommu-cells", &niommus);
270 	if (error) {
271 		device_printf(dev, "%s can't get iommu list.\n", __func__);
272 		return (NULL);
273 	}
274 
275 	if (niommus == 0) {
276 		device_printf(dev, "%s iommu list is empty.\n", __func__);
277 		return (NULL);
278 	}
279 
280 	error = ofw_bus_parse_xref_list_alloc(node, "iommus", "#iommu-cells",
281 	    channel, &parent, &ncells, &cells);
282 	if (error != 0) {
283 		device_printf(dev, "%s can't get iommu device xref.\n",
284 		    __func__);
285 		return (NULL);
286 	}
287 
288 	iommu_dev = OF_device_from_xref(parent);
289 	if (iommu_dev == NULL) {
290 		device_printf(dev, "%s can't get iommu device.\n", __func__);
291 		return (NULL);
292 	}
293 
294 	iommu = iommu_lookup(iommu_dev);
295 	if (iommu == NULL) {
296 		device_printf(dev, "%s can't lookup iommu.\n", __func__);
297 		return (NULL);
298 	}
299 
300 	/*
301 	 * In our current configuration we have a domain per each ctx,
302 	 * so allocate a domain first.
303 	 */
304 	iodom = iommu_domain_alloc(iommu);
305 	if (iodom == NULL) {
306 		device_printf(dev, "%s can't allocate domain.\n", __func__);
307 		return (NULL);
308 	}
309 
310 	ioctx = iommu_ctx_alloc(dev, iodom, false);
311 	if (ioctx == NULL) {
312 		iommu_domain_free(iodom);
313 		return (NULL);
314 	}
315 
316 	ioctx->domain = iodom;
317 
318 	error = IOMMU_OFW_MD_DATA(iommu->dev, ioctx, cells, ncells);
319 	if (error) {
320 		device_printf(dev, "%s can't set MD data\n", __func__);
321 		return (NULL);
322 	}
323 
324 	error = iommu_ctx_init(dev, ioctx);
325 	if (error) {
326 		IOMMU_CTX_FREE(iommu->dev, ioctx);
327 		iommu_domain_free(iodom);
328 		return (NULL);
329 	}
330 
331 	return (ioctx);
332 }
333 #endif
334 
335 struct iommu_ctx *
iommu_get_ctx(struct iommu_unit * iommu,device_t requester,uint16_t rid,bool disabled,bool rmrr)336 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
337     uint16_t rid, bool disabled, bool rmrr)
338 {
339 	struct iommu_domain *iodom;
340 	struct iommu_ctx *ioctx;
341 	int error;
342 
343 	IOMMU_LOCK(iommu);
344 	ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
345 	if (ioctx) {
346 		IOMMU_UNLOCK(iommu);
347 		return (ioctx);
348 	}
349 	IOMMU_UNLOCK(iommu);
350 
351 	/*
352 	 * In our current configuration we have a domain per each ctx.
353 	 * So allocate a domain first.
354 	 */
355 	iodom = iommu_domain_alloc(iommu);
356 	if (iodom == NULL)
357 		return (NULL);
358 
359 	ioctx = iommu_ctx_alloc(requester, iodom, disabled);
360 	if (ioctx == NULL) {
361 		iommu_domain_free(iodom);
362 		return (NULL);
363 	}
364 
365 	error = iommu_ctx_init(requester, ioctx);
366 	if (error) {
367 		IOMMU_CTX_FREE(iommu->dev, ioctx);
368 		iommu_domain_free(iodom);
369 		return (NULL);
370 	}
371 
372 	return (ioctx);
373 }
374 
375 void
iommu_free_ctx_locked(struct iommu_unit * iommu,struct iommu_ctx * ioctx)376 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
377 {
378 	struct bus_dma_tag_iommu *tag;
379 
380 	IOMMU_ASSERT_LOCKED(iommu);
381 
382 	tag = ioctx->tag;
383 
384 	IOMMU_CTX_FREE(iommu->dev, ioctx);
385 
386 	free(tag, M_IOMMU);
387 }
388 
389 void
iommu_free_ctx(struct iommu_ctx * ioctx)390 iommu_free_ctx(struct iommu_ctx *ioctx)
391 {
392 	struct iommu_unit *iommu;
393 	struct iommu_domain *iodom;
394 	int error;
395 
396 	iodom = ioctx->domain;
397 	iommu = iodom->iommu;
398 
399 	IOMMU_LOCK(iommu);
400 	iommu_free_ctx_locked(iommu, ioctx);
401 	IOMMU_UNLOCK(iommu);
402 
403 	/* Since we have a domain per each ctx, remove the domain too. */
404 	error = iommu_domain_free(iodom);
405 	if (error)
406 		device_printf(iommu->dev, "Could not free a domain\n");
407 }
408 
409 static void
iommu_domain_free_entry(struct iommu_map_entry * entry,bool free)410 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
411 {
412 	iommu_gas_free_space(entry);
413 
414 	if (free)
415 		iommu_gas_free_entry(entry);
416 	else
417 		entry->flags = 0;
418 }
419 
420 void
iommu_domain_unload(struct iommu_domain * iodom,struct iommu_map_entries_tailq * entries,bool cansleep)421 iommu_domain_unload(struct iommu_domain *iodom,
422     struct iommu_map_entries_tailq *entries, bool cansleep)
423 {
424 	struct iommu_map_entry *entry, *entry1;
425 	int error __diagused;
426 
427 	TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
428 		KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
429 		    ("not mapped entry %p %p", iodom, entry));
430 		error = iodom->ops->unmap(iodom, entry->start, entry->end -
431 		    entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
432 		KASSERT(error == 0, ("unmap %p error %d", iodom, error));
433 		TAILQ_REMOVE(entries, entry, dmamap_link);
434 		iommu_domain_free_entry(entry, true);
435         }
436 
437 	if (TAILQ_EMPTY(entries))
438 		return;
439 
440 	panic("entries map is not empty");
441 }
442 
443 int
iommu_register(struct iommu_unit * iommu)444 iommu_register(struct iommu_unit *iommu)
445 {
446 	struct iommu_entry *entry;
447 
448 	mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
449 
450 	entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
451 	entry->iommu = iommu;
452 
453 	IOMMU_LIST_LOCK();
454 	LIST_INSERT_HEAD(&iommu_list, entry, next);
455 	IOMMU_LIST_UNLOCK();
456 
457 	iommu_init_busdma(iommu);
458 
459 	return (0);
460 }
461 
462 int
iommu_unregister(struct iommu_unit * iommu)463 iommu_unregister(struct iommu_unit *iommu)
464 {
465 	struct iommu_entry *entry, *tmp;
466 
467 	IOMMU_LIST_LOCK();
468 	LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
469 		if (entry->iommu == iommu) {
470 			LIST_REMOVE(entry, next);
471 			free(entry, M_IOMMU);
472 		}
473 	}
474 	IOMMU_LIST_UNLOCK();
475 
476 	iommu_fini_busdma(iommu);
477 
478 	mtx_destroy(&iommu->lock);
479 
480 	return (0);
481 }
482 
483 struct iommu_unit *
iommu_find(device_t dev,bool verbose)484 iommu_find(device_t dev, bool verbose)
485 {
486 	struct iommu_entry *entry;
487 	struct iommu_unit *iommu;
488 	int error;
489 
490 	IOMMU_LIST_LOCK();
491 	LIST_FOREACH(entry, &iommu_list, next) {
492 		iommu = entry->iommu;
493 		error = IOMMU_FIND(iommu->dev, dev);
494 		if (error == 0) {
495 			IOMMU_LIST_UNLOCK();
496 			return (entry->iommu);
497 		}
498 	}
499 	IOMMU_LIST_UNLOCK();
500 
501 	return (NULL);
502 }
503 
504 void
iommu_domain_unload_entry(struct iommu_map_entry * entry,bool free,bool cansleep __unused)505 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
506     bool cansleep __unused)
507 {
508 
509 	dprintf("%s\n", __func__);
510 
511 	iommu_domain_free_entry(entry, free);
512 }
513 
514 static void
iommu_init(void)515 iommu_init(void)
516 {
517 
518 	sx_init(&iommu_sx, "IOMMU list");
519 }
520 
521 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
522