xref: /freebsd/sys/arm64/iommu/iommu.c (revision 41ce5498)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Portions of this work was supported by Innovate UK project 105694,
12  * "Digital Security by Design (DSbD) Technology Platform Prototype".
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include "opt_platform.h"
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/memdesc.h>
46 #include <sys/tree.h>
47 #include <sys/taskqueue.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/sx.h>
51 #include <sys/sysctl.h>
52 #include <vm/vm.h>
53 
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <machine/bus.h>
57 #include <dev/iommu/busdma_iommu.h>
58 #include <machine/vmparam.h>
59 
60 #ifdef FDT
61 #include <dev/fdt/fdt_common.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 #endif
65 
66 #include "iommu.h"
67 #include "iommu_if.h"
68 
69 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
70 
71 #define	IOMMU_LIST_LOCK()		sx_xlock(&iommu_sx)
72 #define	IOMMU_LIST_UNLOCK()		sx_xunlock(&iommu_sx)
73 #define	IOMMU_LIST_ASSERT_LOCKED()	sx_assert(&iommu_sx, SA_XLOCKED)
74 
75 #define dprintf(fmt, ...)
76 
77 static struct sx iommu_sx;
78 
79 struct iommu_entry {
80 	struct iommu_unit *iommu;
81 	LIST_ENTRY(iommu_entry) next;
82 };
83 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
84 
85 static int
86 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
87     iommu_gaddr_t size, int flags)
88 {
89 	struct iommu_unit *iommu;
90 	int error;
91 
92 	iommu = iodom->iommu;
93 
94 	error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
95 
96 	return (error);
97 }
98 
99 static int
100 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
101     iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
102 {
103 	struct iommu_unit *iommu;
104 	vm_prot_t prot;
105 	vm_offset_t va;
106 	int error;
107 
108 	dprintf("%s: base %lx, size %lx\n", __func__, base, size);
109 
110 	prot = 0;
111 	if (eflags & IOMMU_MAP_ENTRY_READ)
112 		prot |= VM_PROT_READ;
113 	if (eflags & IOMMU_MAP_ENTRY_WRITE)
114 		prot |= VM_PROT_WRITE;
115 
116 	va = base;
117 
118 	iommu = iodom->iommu;
119 
120 	error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
121 
122 	return (error);
123 }
124 
125 static const struct iommu_domain_map_ops domain_map_ops = {
126 	.map = iommu_domain_map_buf,
127 	.unmap = iommu_domain_unmap_buf,
128 };
129 
130 static struct iommu_domain *
131 iommu_domain_alloc(struct iommu_unit *iommu)
132 {
133 	struct iommu_domain *iodom;
134 
135 	iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
136 	if (iodom == NULL)
137 		return (NULL);
138 
139 	iommu_domain_init(iommu, iodom, &domain_map_ops);
140 	iodom->end = VM_MAXUSER_ADDRESS;
141 	iodom->iommu = iommu;
142 	iommu_gas_init_domain(iodom);
143 
144 	return (iodom);
145 }
146 
147 static int
148 iommu_domain_free(struct iommu_domain *iodom)
149 {
150 	struct iommu_unit *iommu;
151 
152 	iommu = iodom->iommu;
153 
154 	IOMMU_LOCK(iommu);
155 
156 	if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
157 		IOMMU_DOMAIN_LOCK(iodom);
158 		iommu_gas_fini_domain(iodom);
159 		IOMMU_DOMAIN_UNLOCK(iodom);
160 	}
161 
162 	iommu_domain_fini(iodom);
163 
164 	IOMMU_DOMAIN_FREE(iommu->dev, iodom);
165 	IOMMU_UNLOCK(iommu);
166 
167 	return (0);
168 }
169 
170 static void
171 iommu_tag_init(struct bus_dma_tag_iommu *t)
172 {
173 	bus_addr_t maxaddr;
174 
175 	maxaddr = BUS_SPACE_MAXADDR;
176 
177 	t->common.ref_count = 0;
178 	t->common.impl = &bus_dma_iommu_impl;
179 	t->common.alignment = 1;
180 	t->common.boundary = 0;
181 	t->common.lowaddr = maxaddr;
182 	t->common.highaddr = maxaddr;
183 	t->common.maxsize = maxaddr;
184 	t->common.nsegments = BUS_SPACE_UNRESTRICTED;
185 	t->common.maxsegsz = maxaddr;
186 }
187 
188 static struct iommu_ctx *
189 iommu_ctx_alloc(device_t requester, struct iommu_domain *iodom, bool disabled)
190 {
191 	struct iommu_unit *iommu;
192 	struct iommu_ctx *ioctx;
193 
194 	iommu = iodom->iommu;
195 
196 	ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, requester, disabled);
197 	if (ioctx == NULL)
198 		return (NULL);
199 
200 	ioctx->domain = iodom;
201 
202 	return (ioctx);
203 }
204 
205 static int
206 iommu_ctx_init(device_t requester, struct iommu_ctx *ioctx)
207 {
208 	struct bus_dma_tag_iommu *tag;
209 	struct iommu_domain *iodom;
210 	struct iommu_unit *iommu;
211 	int error;
212 
213 	iodom = ioctx->domain;
214 	iommu = iodom->iommu;
215 
216 	error = IOMMU_CTX_INIT(iommu->dev, ioctx);
217 	if (error)
218 		return (error);
219 
220 	tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
221 	    M_IOMMU, M_WAITOK | M_ZERO);
222 	tag->owner = requester;
223 	tag->ctx = ioctx;
224 	tag->ctx->domain = iodom;
225 
226 	iommu_tag_init(tag);
227 
228 	return (error);
229 }
230 
231 static struct iommu_unit *
232 iommu_lookup(device_t dev)
233 {
234 	struct iommu_entry *entry;
235 	struct iommu_unit *iommu;
236 
237 	IOMMU_LIST_LOCK();
238 	LIST_FOREACH(entry, &iommu_list, next) {
239 		iommu = entry->iommu;
240 		if (iommu->dev == dev) {
241 			IOMMU_LIST_UNLOCK();
242 			return (iommu);
243 		}
244 	}
245 	IOMMU_LIST_UNLOCK();
246 
247 	return (NULL);
248 }
249 
250 struct iommu_ctx *
251 iommu_get_ctx_ofw(device_t dev, int channel)
252 {
253 	struct iommu_domain *iodom;
254 	struct iommu_unit *iommu;
255 	struct iommu_ctx *ioctx;
256 	phandle_t node, parent;
257 	device_t iommu_dev;
258 	pcell_t *cells;
259 	int niommus;
260 	int ncells;
261 	int error;
262 
263 	node = ofw_bus_get_node(dev);
264 	if (node <= 0) {
265 		device_printf(dev,
266 		    "%s called on not ofw based device.\n", __func__);
267 		return (NULL);
268 	}
269 
270 	error = ofw_bus_parse_xref_list_get_length(node,
271 	    "iommus", "#iommu-cells", &niommus);
272 	if (error) {
273 		device_printf(dev, "%s can't get iommu list.\n", __func__);
274 		return (NULL);
275 	}
276 
277 	if (niommus == 0) {
278 		device_printf(dev, "%s iommu list is empty.\n", __func__);
279 		return (NULL);
280 	}
281 
282 	error = ofw_bus_parse_xref_list_alloc(node, "iommus", "#iommu-cells",
283 	    channel, &parent, &ncells, &cells);
284 	if (error != 0) {
285 		device_printf(dev, "%s can't get iommu device xref.\n",
286 		    __func__);
287 		return (NULL);
288 	}
289 
290 	iommu_dev = OF_device_from_xref(parent);
291 	if (iommu_dev == NULL) {
292 		device_printf(dev, "%s can't get iommu device.\n", __func__);
293 		return (NULL);
294 	}
295 
296 	iommu = iommu_lookup(iommu_dev);
297 	if (iommu == NULL) {
298 		device_printf(dev, "%s can't lookup iommu.\n", __func__);
299 		return (NULL);
300 	}
301 
302 	/*
303 	 * In our current configuration we have a domain per each ctx,
304 	 * so allocate a domain first.
305 	 */
306 	iodom = iommu_domain_alloc(iommu);
307 	if (iodom == NULL) {
308 		device_printf(dev, "%s can't allocate domain.\n", __func__);
309 		return (NULL);
310 	}
311 
312 	ioctx = iommu_ctx_alloc(dev, iodom, false);
313 	if (ioctx == NULL) {
314 		iommu_domain_free(iodom);
315 		return (NULL);
316 	}
317 
318 	ioctx->domain = iodom;
319 
320 	error = IOMMU_OFW_MD_DATA(iommu->dev, ioctx, cells, ncells);
321 	if (error) {
322 		device_printf(dev, "%s can't set MD data\n", __func__);
323 		return (NULL);
324 	}
325 
326 	error = iommu_ctx_init(dev, ioctx);
327 	if (error) {
328 		IOMMU_CTX_FREE(iommu->dev, ioctx);
329 		iommu_domain_free(iodom);
330 		return (NULL);
331 	}
332 
333 	return (ioctx);
334 }
335 
336 struct iommu_ctx *
337 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
338     uint16_t rid, bool disabled, bool rmrr)
339 {
340 	struct iommu_domain *iodom;
341 	struct iommu_ctx *ioctx;
342 	int error;
343 
344 	IOMMU_LOCK(iommu);
345 	ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
346 	if (ioctx) {
347 		IOMMU_UNLOCK(iommu);
348 		return (ioctx);
349 	}
350 	IOMMU_UNLOCK(iommu);
351 
352 	/*
353 	 * In our current configuration we have a domain per each ctx.
354 	 * So allocate a domain first.
355 	 */
356 	iodom = iommu_domain_alloc(iommu);
357 	if (iodom == NULL)
358 		return (NULL);
359 
360 	ioctx = iommu_ctx_alloc(requester, iodom, disabled);
361 	if (ioctx == NULL) {
362 		iommu_domain_free(iodom);
363 		return (NULL);
364 	}
365 
366 	error = iommu_ctx_init(requester, ioctx);
367 	if (error) {
368 		IOMMU_CTX_FREE(iommu->dev, ioctx);
369 		iommu_domain_free(iodom);
370 		return (NULL);
371 	}
372 
373 	return (ioctx);
374 }
375 
376 void
377 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
378 {
379 	struct bus_dma_tag_iommu *tag;
380 
381 	IOMMU_ASSERT_LOCKED(iommu);
382 
383 	tag = ioctx->tag;
384 
385 	IOMMU_CTX_FREE(iommu->dev, ioctx);
386 
387 	free(tag, M_IOMMU);
388 }
389 
390 void
391 iommu_free_ctx(struct iommu_ctx *ioctx)
392 {
393 	struct iommu_unit *iommu;
394 	struct iommu_domain *iodom;
395 	int error;
396 
397 	iodom = ioctx->domain;
398 	iommu = iodom->iommu;
399 
400 	IOMMU_LOCK(iommu);
401 	iommu_free_ctx_locked(iommu, ioctx);
402 	IOMMU_UNLOCK(iommu);
403 
404 	/* Since we have a domain per each ctx, remove the domain too. */
405 	error = iommu_domain_free(iodom);
406 	if (error)
407 		device_printf(iommu->dev, "Could not free a domain\n");
408 }
409 
410 static void
411 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
412 {
413 	struct iommu_domain *iodom;
414 
415 	iodom = entry->domain;
416 
417 	IOMMU_DOMAIN_LOCK(iodom);
418 	iommu_gas_free_space(iodom, entry);
419 	IOMMU_DOMAIN_UNLOCK(iodom);
420 
421 	if (free)
422 		iommu_gas_free_entry(iodom, entry);
423 	else
424 		entry->flags = 0;
425 }
426 
427 void
428 iommu_domain_unload(struct iommu_domain *iodom,
429     struct iommu_map_entries_tailq *entries, bool cansleep)
430 {
431 	struct iommu_map_entry *entry, *entry1;
432 	int error __diagused;
433 
434 	TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
435 		KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
436 		    ("not mapped entry %p %p", iodom, entry));
437 		error = iodom->ops->unmap(iodom, entry->start, entry->end -
438 		    entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
439 		KASSERT(error == 0, ("unmap %p error %d", iodom, error));
440 		TAILQ_REMOVE(entries, entry, dmamap_link);
441 		iommu_domain_free_entry(entry, true);
442         }
443 
444 	if (TAILQ_EMPTY(entries))
445 		return;
446 
447 	panic("entries map is not empty");
448 }
449 
450 int
451 iommu_register(struct iommu_unit *iommu)
452 {
453 	struct iommu_entry *entry;
454 
455 	mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
456 
457 	entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
458 	entry->iommu = iommu;
459 
460 	IOMMU_LIST_LOCK();
461 	LIST_INSERT_HEAD(&iommu_list, entry, next);
462 	IOMMU_LIST_UNLOCK();
463 
464 	iommu_init_busdma(iommu);
465 
466 	return (0);
467 }
468 
469 int
470 iommu_unregister(struct iommu_unit *iommu)
471 {
472 	struct iommu_entry *entry, *tmp;
473 
474 	IOMMU_LIST_LOCK();
475 	LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
476 		if (entry->iommu == iommu) {
477 			LIST_REMOVE(entry, next);
478 			free(entry, M_IOMMU);
479 		}
480 	}
481 	IOMMU_LIST_UNLOCK();
482 
483 	iommu_fini_busdma(iommu);
484 
485 	mtx_destroy(&iommu->lock);
486 
487 	return (0);
488 }
489 
490 struct iommu_unit *
491 iommu_find(device_t dev, bool verbose)
492 {
493 	struct iommu_entry *entry;
494 	struct iommu_unit *iommu;
495 	int error;
496 
497 	IOMMU_LIST_LOCK();
498 	LIST_FOREACH(entry, &iommu_list, next) {
499 		iommu = entry->iommu;
500 		error = IOMMU_FIND(iommu->dev, dev);
501 		if (error == 0) {
502 			IOMMU_LIST_UNLOCK();
503 			return (entry->iommu);
504 		}
505 	}
506 	IOMMU_LIST_UNLOCK();
507 
508 	return (NULL);
509 }
510 
511 void
512 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
513 {
514 
515 	dprintf("%s\n", __func__);
516 
517 	iommu_domain_free_entry(entry, free);
518 }
519 
520 static void
521 iommu_init(void)
522 {
523 
524 	sx_init(&iommu_sx, "IOMMU list");
525 }
526 
527 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
528