1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Portions of this work was supported by Innovate UK project 105694,
12 * "Digital Security by Design (DSbD) Technology Platform Prototype".
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include "opt_platform.h"
37
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/memdesc.h>
43 #include <sys/tree.h>
44 #include <sys/taskqueue.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/sx.h>
48 #include <sys/sysctl.h>
49 #include <vm/vm.h>
50
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <machine/bus.h>
54 #include <dev/iommu/busdma_iommu.h>
55 #include <machine/vmparam.h>
56
57 #ifdef FDT
58 #include <dev/fdt/fdt_common.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 #endif
62
63 #include "iommu.h"
64 #include "iommu_if.h"
65
66 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
67
68 #define IOMMU_LIST_LOCK() sx_xlock(&iommu_sx)
69 #define IOMMU_LIST_UNLOCK() sx_xunlock(&iommu_sx)
70 #define IOMMU_LIST_ASSERT_LOCKED() sx_assert(&iommu_sx, SA_XLOCKED)
71
72 #define dprintf(fmt, ...)
73
74 static struct sx iommu_sx;
75
76 struct iommu_entry {
77 struct iommu_unit *iommu;
78 LIST_ENTRY(iommu_entry) next;
79 };
80 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
81
82 static int
iommu_domain_unmap_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,int flags)83 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
84 iommu_gaddr_t size, int flags)
85 {
86 struct iommu_unit *iommu;
87 int error;
88
89 iommu = iodom->iommu;
90
91 error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
92
93 return (error);
94 }
95
96 static int
iommu_domain_map_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,vm_page_t * ma,uint64_t eflags,int flags)97 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
98 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
99 {
100 struct iommu_unit *iommu;
101 vm_prot_t prot;
102 vm_offset_t va;
103 int error;
104
105 dprintf("%s: base %lx, size %lx\n", __func__, base, size);
106
107 prot = 0;
108 if (eflags & IOMMU_MAP_ENTRY_READ)
109 prot |= VM_PROT_READ;
110 if (eflags & IOMMU_MAP_ENTRY_WRITE)
111 prot |= VM_PROT_WRITE;
112
113 va = base;
114
115 iommu = iodom->iommu;
116
117 error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
118
119 return (error);
120 }
121
122 static const struct iommu_domain_map_ops domain_map_ops = {
123 .map = iommu_domain_map_buf,
124 .unmap = iommu_domain_unmap_buf,
125 };
126
127 static struct iommu_domain *
iommu_domain_alloc(struct iommu_unit * iommu)128 iommu_domain_alloc(struct iommu_unit *iommu)
129 {
130 struct iommu_domain *iodom;
131
132 iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
133 if (iodom == NULL)
134 return (NULL);
135
136 KASSERT(iodom->end != 0, ("domain end is not set"));
137
138 iommu_domain_init(iommu, iodom, &domain_map_ops);
139 iodom->iommu = iommu;
140 iommu_gas_init_domain(iodom);
141
142 return (iodom);
143 }
144
145 static int
iommu_domain_free(struct iommu_domain * iodom)146 iommu_domain_free(struct iommu_domain *iodom)
147 {
148 struct iommu_unit *iommu;
149
150 iommu = iodom->iommu;
151
152 IOMMU_LOCK(iommu);
153
154 if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
155 IOMMU_DOMAIN_LOCK(iodom);
156 iommu_gas_fini_domain(iodom);
157 IOMMU_DOMAIN_UNLOCK(iodom);
158 }
159
160 iommu_domain_fini(iodom);
161
162 IOMMU_DOMAIN_FREE(iommu->dev, iodom);
163 IOMMU_UNLOCK(iommu);
164
165 return (0);
166 }
167
168 static void
iommu_tag_init(struct iommu_domain * iodom,struct bus_dma_tag_iommu * t)169 iommu_tag_init(struct iommu_domain *iodom, struct bus_dma_tag_iommu *t)
170 {
171 bus_addr_t maxaddr;
172
173 maxaddr = MIN(iodom->end, BUS_SPACE_MAXADDR);
174
175 t->common.ref_count = 0;
176 t->common.impl = &bus_dma_iommu_impl;
177 t->common.alignment = 1;
178 t->common.boundary = 0;
179 t->common.lowaddr = maxaddr;
180 t->common.highaddr = maxaddr;
181 t->common.maxsize = maxaddr;
182 t->common.nsegments = BUS_SPACE_UNRESTRICTED;
183 t->common.maxsegsz = maxaddr;
184 }
185
186 static struct iommu_ctx *
iommu_ctx_alloc(device_t requester,struct iommu_domain * iodom,bool disabled)187 iommu_ctx_alloc(device_t requester, struct iommu_domain *iodom, bool disabled)
188 {
189 struct iommu_unit *iommu;
190 struct iommu_ctx *ioctx;
191
192 iommu = iodom->iommu;
193
194 ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, requester, disabled);
195 if (ioctx == NULL)
196 return (NULL);
197
198 ioctx->domain = iodom;
199
200 return (ioctx);
201 }
202
203 static int
iommu_ctx_init(device_t requester,struct iommu_ctx * ioctx)204 iommu_ctx_init(device_t requester, struct iommu_ctx *ioctx)
205 {
206 struct bus_dma_tag_iommu *tag;
207 struct iommu_domain *iodom;
208 struct iommu_unit *iommu;
209 int error;
210
211 iodom = ioctx->domain;
212 iommu = iodom->iommu;
213
214 error = IOMMU_CTX_INIT(iommu->dev, ioctx);
215 if (error)
216 return (error);
217
218 tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
219 M_IOMMU, M_WAITOK | M_ZERO);
220 tag->owner = requester;
221 tag->ctx = ioctx;
222 tag->ctx->domain = iodom;
223
224 iommu_tag_init(iodom, tag);
225
226 return (error);
227 }
228
229 static struct iommu_unit *
iommu_lookup(device_t dev)230 iommu_lookup(device_t dev)
231 {
232 struct iommu_entry *entry;
233 struct iommu_unit *iommu;
234
235 IOMMU_LIST_LOCK();
236 LIST_FOREACH(entry, &iommu_list, next) {
237 iommu = entry->iommu;
238 if (iommu->dev == dev) {
239 IOMMU_LIST_UNLOCK();
240 return (iommu);
241 }
242 }
243 IOMMU_LIST_UNLOCK();
244
245 return (NULL);
246 }
247
248 #ifdef FDT
249 struct iommu_ctx *
iommu_get_ctx_ofw(device_t dev,int channel)250 iommu_get_ctx_ofw(device_t dev, int channel)
251 {
252 struct iommu_domain *iodom;
253 struct iommu_unit *iommu;
254 struct iommu_ctx *ioctx;
255 phandle_t node, parent;
256 device_t iommu_dev;
257 pcell_t *cells;
258 int niommus;
259 int ncells;
260 int error;
261
262 node = ofw_bus_get_node(dev);
263 if (node <= 0) {
264 device_printf(dev,
265 "%s called on not ofw based device.\n", __func__);
266 return (NULL);
267 }
268
269 error = ofw_bus_parse_xref_list_get_length(node,
270 "iommus", "#iommu-cells", &niommus);
271 if (error) {
272 device_printf(dev, "%s can't get iommu list.\n", __func__);
273 return (NULL);
274 }
275
276 if (niommus == 0) {
277 device_printf(dev, "%s iommu list is empty.\n", __func__);
278 return (NULL);
279 }
280
281 error = ofw_bus_parse_xref_list_alloc(node, "iommus", "#iommu-cells",
282 channel, &parent, &ncells, &cells);
283 if (error != 0) {
284 device_printf(dev, "%s can't get iommu device xref.\n",
285 __func__);
286 return (NULL);
287 }
288
289 iommu_dev = OF_device_from_xref(parent);
290 if (iommu_dev == NULL) {
291 device_printf(dev, "%s can't get iommu device.\n", __func__);
292 return (NULL);
293 }
294
295 iommu = iommu_lookup(iommu_dev);
296 if (iommu == NULL) {
297 device_printf(dev, "%s can't lookup iommu.\n", __func__);
298 return (NULL);
299 }
300
301 /*
302 * In our current configuration we have a domain per each ctx,
303 * so allocate a domain first.
304 */
305 iodom = iommu_domain_alloc(iommu);
306 if (iodom == NULL) {
307 device_printf(dev, "%s can't allocate domain.\n", __func__);
308 return (NULL);
309 }
310
311 ioctx = iommu_ctx_alloc(dev, iodom, false);
312 if (ioctx == NULL) {
313 iommu_domain_free(iodom);
314 return (NULL);
315 }
316
317 ioctx->domain = iodom;
318
319 error = IOMMU_OFW_MD_DATA(iommu->dev, ioctx, cells, ncells);
320 if (error) {
321 device_printf(dev, "%s can't set MD data\n", __func__);
322 return (NULL);
323 }
324
325 error = iommu_ctx_init(dev, ioctx);
326 if (error) {
327 IOMMU_CTX_FREE(iommu->dev, ioctx);
328 iommu_domain_free(iodom);
329 return (NULL);
330 }
331
332 return (ioctx);
333 }
334 #endif
335
336 struct iommu_ctx *
iommu_get_ctx(struct iommu_unit * iommu,device_t requester,uint16_t rid,bool disabled,bool rmrr)337 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
338 uint16_t rid, bool disabled, bool rmrr)
339 {
340 struct iommu_domain *iodom;
341 struct iommu_ctx *ioctx;
342 int error;
343
344 IOMMU_LOCK(iommu);
345 ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
346 if (ioctx) {
347 IOMMU_UNLOCK(iommu);
348 return (ioctx);
349 }
350 IOMMU_UNLOCK(iommu);
351
352 /*
353 * In our current configuration we have a domain per each ctx.
354 * So allocate a domain first.
355 */
356 iodom = iommu_domain_alloc(iommu);
357 if (iodom == NULL)
358 return (NULL);
359
360 ioctx = iommu_ctx_alloc(requester, iodom, disabled);
361 if (ioctx == NULL) {
362 iommu_domain_free(iodom);
363 return (NULL);
364 }
365
366 error = iommu_ctx_init(requester, ioctx);
367 if (error) {
368 IOMMU_CTX_FREE(iommu->dev, ioctx);
369 iommu_domain_free(iodom);
370 return (NULL);
371 }
372
373 return (ioctx);
374 }
375
376 void
iommu_free_ctx_locked(struct iommu_unit * iommu,struct iommu_ctx * ioctx)377 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
378 {
379 struct bus_dma_tag_iommu *tag;
380
381 IOMMU_ASSERT_LOCKED(iommu);
382
383 tag = ioctx->tag;
384
385 IOMMU_CTX_FREE(iommu->dev, ioctx);
386
387 free(tag, M_IOMMU);
388 }
389
390 void
iommu_free_ctx(struct iommu_ctx * ioctx)391 iommu_free_ctx(struct iommu_ctx *ioctx)
392 {
393 struct iommu_unit *iommu;
394 struct iommu_domain *iodom;
395 int error;
396
397 iodom = ioctx->domain;
398 iommu = iodom->iommu;
399
400 IOMMU_LOCK(iommu);
401 iommu_free_ctx_locked(iommu, ioctx);
402 IOMMU_UNLOCK(iommu);
403
404 /* Since we have a domain per each ctx, remove the domain too. */
405 error = iommu_domain_free(iodom);
406 if (error)
407 device_printf(iommu->dev, "Could not free a domain\n");
408 }
409
410 static void
iommu_domain_free_entry(struct iommu_map_entry * entry,bool free)411 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
412 {
413 iommu_gas_free_space(entry);
414
415 if (free)
416 iommu_gas_free_entry(entry);
417 else
418 entry->flags = 0;
419 }
420
421 void
iommu_domain_unload(struct iommu_domain * iodom,struct iommu_map_entries_tailq * entries,bool cansleep)422 iommu_domain_unload(struct iommu_domain *iodom,
423 struct iommu_map_entries_tailq *entries, bool cansleep)
424 {
425 struct iommu_map_entry *entry, *entry1;
426 int error __diagused;
427
428 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
429 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
430 ("not mapped entry %p %p", iodom, entry));
431 error = iodom->ops->unmap(iodom, entry->start, entry->end -
432 entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
433 KASSERT(error == 0, ("unmap %p error %d", iodom, error));
434 TAILQ_REMOVE(entries, entry, dmamap_link);
435 iommu_domain_free_entry(entry, true);
436 }
437
438 if (TAILQ_EMPTY(entries))
439 return;
440
441 panic("entries map is not empty");
442 }
443
444 int
iommu_register(struct iommu_unit * iommu)445 iommu_register(struct iommu_unit *iommu)
446 {
447 struct iommu_entry *entry;
448
449 mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
450
451 entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
452 entry->iommu = iommu;
453
454 IOMMU_LIST_LOCK();
455 LIST_INSERT_HEAD(&iommu_list, entry, next);
456 IOMMU_LIST_UNLOCK();
457
458 iommu_init_busdma(iommu);
459
460 return (0);
461 }
462
463 int
iommu_unregister(struct iommu_unit * iommu)464 iommu_unregister(struct iommu_unit *iommu)
465 {
466 struct iommu_entry *entry, *tmp;
467
468 IOMMU_LIST_LOCK();
469 LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
470 if (entry->iommu == iommu) {
471 LIST_REMOVE(entry, next);
472 free(entry, M_IOMMU);
473 }
474 }
475 IOMMU_LIST_UNLOCK();
476
477 iommu_fini_busdma(iommu);
478
479 mtx_destroy(&iommu->lock);
480
481 return (0);
482 }
483
484 struct iommu_unit *
iommu_find(device_t dev,bool verbose)485 iommu_find(device_t dev, bool verbose)
486 {
487 struct iommu_entry *entry;
488 struct iommu_unit *iommu;
489 int error;
490
491 IOMMU_LIST_LOCK();
492 LIST_FOREACH(entry, &iommu_list, next) {
493 iommu = entry->iommu;
494 error = IOMMU_FIND(iommu->dev, dev);
495 if (error == 0) {
496 IOMMU_LIST_UNLOCK();
497 return (entry->iommu);
498 }
499 }
500 IOMMU_LIST_UNLOCK();
501
502 return (NULL);
503 }
504
505 void
iommu_domain_unload_entry(struct iommu_map_entry * entry,bool free,bool cansleep __unused)506 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
507 bool cansleep __unused)
508 {
509
510 dprintf("%s\n", __func__);
511
512 iommu_domain_free_entry(entry, free);
513 }
514
515 static void
iommu_init(void)516 iommu_init(void)
517 {
518
519 sx_init(&iommu_sx, "IOMMU list");
520 }
521
522 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
523