xref: /freebsd/sys/dev/iommu/busdma_iommu.c (revision 1f1e2261)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/domainset.h>
37 #include <sys/malloc.h>
38 #include <sys/bus.h>
39 #include <sys/conf.h>
40 #include <sys/interrupt.h>
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/memdesc.h>
46 #include <sys/msan.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/rman.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/uio.h>
53 #include <sys/vmem.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <vm/vm.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_map.h>
62 #include <dev/iommu/iommu.h>
63 #include <machine/atomic.h>
64 #include <machine/bus.h>
65 #include <machine/md_var.h>
66 #include <machine/iommu.h>
67 #include <dev/iommu/busdma_iommu.h>
68 
69 /*
70  * busdma_iommu.c, the implementation of the busdma(9) interface using
71  * IOMMU units from Intel VT-d.
72  */
73 
74 static bool
75 iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
76 {
77 	char str[128], *env;
78 	int default_bounce;
79 	bool ret;
80 	static const char bounce_str[] = "bounce";
81 	static const char iommu_str[] = "iommu";
82 	static const char dmar_str[] = "dmar"; /* compatibility */
83 
84 	default_bounce = 0;
85 	env = kern_getenv("hw.busdma.default");
86 	if (env != NULL) {
87 		if (strcmp(env, bounce_str) == 0)
88 			default_bounce = 1;
89 		else if (strcmp(env, iommu_str) == 0 ||
90 		    strcmp(env, dmar_str) == 0)
91 			default_bounce = 0;
92 		freeenv(env);
93 	}
94 
95 	snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d",
96 	    domain, bus, slot, func);
97 	env = kern_getenv(str);
98 	if (env == NULL)
99 		return (default_bounce != 0);
100 	if (strcmp(env, bounce_str) == 0)
101 		ret = true;
102 	else if (strcmp(env, iommu_str) == 0 ||
103 	    strcmp(env, dmar_str) == 0)
104 		ret = false;
105 	else
106 		ret = default_bounce != 0;
107 	freeenv(env);
108 	return (ret);
109 }
110 
111 /*
112  * Given original device, find the requester ID that will be seen by
113  * the IOMMU unit and used for page table lookup.  PCI bridges may take
114  * ownership of transactions from downstream devices, so it may not be
115  * the same as the BSF of the target device.  In those cases, all
116  * devices downstream of the bridge must share a single mapping
117  * domain, and must collectively be assigned to use either IOMMU or
118  * bounce mapping.
119  */
120 device_t
121 iommu_get_requester(device_t dev, uint16_t *rid)
122 {
123 	devclass_t pci_class;
124 	device_t l, pci, pcib, pcip, pcibp, requester;
125 	int cap_offset;
126 	uint16_t pcie_flags;
127 	bool bridge_is_pcie;
128 
129 	pci_class = devclass_find("pci");
130 	l = requester = dev;
131 
132 	*rid = pci_get_rid(dev);
133 
134 	/*
135 	 * Walk the bridge hierarchy from the target device to the
136 	 * host port to find the translating bridge nearest the IOMMU
137 	 * unit.
138 	 */
139 	for (;;) {
140 		pci = device_get_parent(l);
141 		KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
142 		    "for %s", device_get_name(dev), device_get_name(l)));
143 		KASSERT(device_get_devclass(pci) == pci_class,
144 		    ("iommu_get_requester(%s): non-pci parent %s for %s",
145 		    device_get_name(dev), device_get_name(pci),
146 		    device_get_name(l)));
147 
148 		pcib = device_get_parent(pci);
149 		KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge "
150 		    "for %s", device_get_name(dev), device_get_name(pci)));
151 
152 		/*
153 		 * The parent of our "bridge" isn't another PCI bus,
154 		 * so pcib isn't a PCI->PCI bridge but rather a host
155 		 * port, and the requester ID won't be translated
156 		 * further.
157 		 */
158 		pcip = device_get_parent(pcib);
159 		if (device_get_devclass(pcip) != pci_class)
160 			break;
161 		pcibp = device_get_parent(pcip);
162 
163 		if (pci_find_cap(l, PCIY_EXPRESS, &cap_offset) == 0) {
164 			/*
165 			 * Do not stop the loop even if the target
166 			 * device is PCIe, because it is possible (but
167 			 * unlikely) to have a PCI->PCIe bridge
168 			 * somewhere in the hierarchy.
169 			 */
170 			l = pcib;
171 		} else {
172 			/*
173 			 * Device is not PCIe, it cannot be seen as a
174 			 * requester by IOMMU unit.  Check whether the
175 			 * bridge is PCIe.
176 			 */
177 			bridge_is_pcie = pci_find_cap(pcib, PCIY_EXPRESS,
178 			    &cap_offset) == 0;
179 			requester = pcib;
180 
181 			/*
182 			 * Check for a buggy PCIe/PCI bridge that
183 			 * doesn't report the express capability.  If
184 			 * the bridge above it is express but isn't a
185 			 * PCI bridge, then we know pcib is actually a
186 			 * PCIe/PCI bridge.
187 			 */
188 			if (!bridge_is_pcie && pci_find_cap(pcibp,
189 			    PCIY_EXPRESS, &cap_offset) == 0) {
190 				pcie_flags = pci_read_config(pcibp,
191 				    cap_offset + PCIER_FLAGS, 2);
192 				if ((pcie_flags & PCIEM_FLAGS_TYPE) !=
193 				    PCIEM_TYPE_PCI_BRIDGE)
194 					bridge_is_pcie = true;
195 			}
196 
197 			if (bridge_is_pcie) {
198 				/*
199 				 * The current device is not PCIe, but
200 				 * the bridge above it is.  This is a
201 				 * PCIe->PCI bridge.  Assume that the
202 				 * requester ID will be the secondary
203 				 * bus number with slot and function
204 				 * set to zero.
205 				 *
206 				 * XXX: Doesn't handle the case where
207 				 * the bridge is PCIe->PCI-X, and the
208 				 * bridge will only take ownership of
209 				 * requests in some cases.  We should
210 				 * provide context entries with the
211 				 * same page tables for taken and
212 				 * non-taken transactions.
213 				 */
214 				*rid = PCI_RID(pci_get_bus(l), 0, 0);
215 				l = pcibp;
216 			} else {
217 				/*
218 				 * Neither the device nor the bridge
219 				 * above it are PCIe.  This is a
220 				 * conventional PCI->PCI bridge, which
221 				 * will use the bridge's BSF as the
222 				 * requester ID.
223 				 */
224 				*rid = pci_get_rid(pcib);
225 				l = pcib;
226 			}
227 		}
228 	}
229 	return (requester);
230 }
231 
232 struct iommu_ctx *
233 iommu_instantiate_ctx(struct iommu_unit *unit, device_t dev, bool rmrr)
234 {
235 	device_t requester;
236 	struct iommu_ctx *ctx;
237 	bool disabled;
238 	uint16_t rid;
239 
240 	requester = iommu_get_requester(dev, &rid);
241 
242 	/*
243 	 * If the user requested the IOMMU disabled for the device, we
244 	 * cannot disable the IOMMU unit, due to possibility of other
245 	 * devices on the same IOMMU unit still requiring translation.
246 	 * Instead provide the identity mapping for the device
247 	 * context.
248 	 */
249 	disabled = iommu_bus_dma_is_dev_disabled(pci_get_domain(requester),
250 	    pci_get_bus(requester), pci_get_slot(requester),
251 	    pci_get_function(requester));
252 	ctx = iommu_get_ctx(unit, requester, rid, disabled, rmrr);
253 	if (ctx == NULL)
254 		return (NULL);
255 	if (disabled) {
256 		/*
257 		 * Keep the first reference on context, release the
258 		 * later refs.
259 		 */
260 		IOMMU_LOCK(unit);
261 		if ((ctx->flags & IOMMU_CTX_DISABLED) == 0) {
262 			ctx->flags |= IOMMU_CTX_DISABLED;
263 			IOMMU_UNLOCK(unit);
264 		} else {
265 			iommu_free_ctx_locked(unit, ctx);
266 		}
267 		ctx = NULL;
268 	}
269 	return (ctx);
270 }
271 
272 struct iommu_ctx *
273 iommu_get_dev_ctx(device_t dev)
274 {
275 	struct iommu_unit *unit;
276 
277 	unit = iommu_find(dev, bootverbose);
278 	/* Not in scope of any IOMMU ? */
279 	if (unit == NULL)
280 		return (NULL);
281 	if (!unit->dma_enabled)
282 		return (NULL);
283 
284 #if defined(__amd64__) || defined(__i386__)
285 	dmar_quirks_pre_use(unit);
286 	dmar_instantiate_rmrr_ctxs(unit);
287 #endif
288 
289 	return (iommu_instantiate_ctx(unit, dev, false));
290 }
291 
292 bus_dma_tag_t
293 iommu_get_dma_tag(device_t dev, device_t child)
294 {
295 	struct iommu_ctx *ctx;
296 	bus_dma_tag_t res;
297 
298 	ctx = iommu_get_dev_ctx(child);
299 	if (ctx == NULL)
300 		return (NULL);
301 
302 	res = (bus_dma_tag_t)ctx->tag;
303 	return (res);
304 }
305 
306 bool
307 bus_dma_iommu_set_buswide(device_t dev)
308 {
309 	struct iommu_unit *unit;
310 	device_t parent;
311 	u_int busno, slot, func;
312 
313 	parent = device_get_parent(dev);
314 	if (device_get_devclass(parent) != devclass_find("pci"))
315 		return (false);
316 	unit = iommu_find(dev, bootverbose);
317 	if (unit == NULL)
318 		return (false);
319 	busno = pci_get_bus(dev);
320 	slot = pci_get_slot(dev);
321 	func = pci_get_function(dev);
322 	if (slot != 0 || func != 0) {
323 		if (bootverbose) {
324 			device_printf(dev,
325 			    "iommu%d pci%d:%d:%d requested buswide busdma\n",
326 			    unit->unit, busno, slot, func);
327 		}
328 		return (false);
329 	}
330 	iommu_set_buswide_ctx(unit, busno);
331 	return (true);
332 }
333 
334 void
335 iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno)
336 {
337 
338 	MPASS(busno <= PCI_BUSMAX);
339 	IOMMU_LOCK(unit);
340 	unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
341 	    1 << (busno % (NBBY * sizeof(uint32_t)));
342 	IOMMU_UNLOCK(unit);
343 }
344 
345 bool
346 iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno)
347 {
348 
349 	MPASS(busno <= PCI_BUSMAX);
350 	return ((unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] &
351 	    (1U << (busno % (NBBY * sizeof(uint32_t))))) != 0);
352 }
353 
354 static MALLOC_DEFINE(M_IOMMU_DMAMAP, "iommu_dmamap", "IOMMU DMA Map");
355 
356 static void iommu_bus_schedule_dmamap(struct iommu_unit *unit,
357     struct bus_dmamap_iommu *map);
358 
359 static int
360 iommu_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
361     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
362     bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
363     int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
364     void *lockfuncarg, bus_dma_tag_t *dmat)
365 {
366 	struct bus_dma_tag_iommu *newtag, *oldtag;
367 	int error;
368 
369 	*dmat = NULL;
370 	error = common_bus_dma_tag_create(parent != NULL ?
371 	    &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment,
372 	    boundary, lowaddr, highaddr, filter, filterarg, maxsize,
373 	    nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
374 	    sizeof(struct bus_dma_tag_iommu), (void **)&newtag);
375 	if (error != 0)
376 		goto out;
377 
378 	oldtag = (struct bus_dma_tag_iommu *)parent;
379 	newtag->common.impl = &bus_dma_iommu_impl;
380 	newtag->ctx = oldtag->ctx;
381 	newtag->owner = oldtag->owner;
382 
383 	*dmat = (bus_dma_tag_t)newtag;
384 out:
385 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
386 	    __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
387 	    error);
388 	return (error);
389 }
390 
391 static int
392 iommu_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
393 {
394 
395 	return (0);
396 }
397 
398 static int
399 iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
400 {
401 	struct bus_dma_tag_iommu *dmat, *parent;
402 	struct bus_dma_tag_iommu *dmat_copy __unused;
403 	int error;
404 
405 	error = 0;
406 	dmat_copy = dmat = (struct bus_dma_tag_iommu *)dmat1;
407 
408 	if (dmat != NULL) {
409 		if (dmat->map_count != 0) {
410 			error = EBUSY;
411 			goto out;
412 		}
413 		while (dmat != NULL) {
414 			parent = (struct bus_dma_tag_iommu *)dmat->common.parent;
415 			if (atomic_fetchadd_int(&dmat->common.ref_count, -1) ==
416 			    1) {
417 				if (dmat == dmat->ctx->tag)
418 					iommu_free_ctx(dmat->ctx);
419 				free(dmat->segments, M_IOMMU_DMAMAP);
420 				free(dmat, M_DEVBUF);
421 				dmat = parent;
422 			} else
423 				dmat = NULL;
424 		}
425 	}
426 out:
427 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
428 	return (error);
429 }
430 
431 static bool
432 iommu_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
433 {
434 
435 	return (false);
436 }
437 
438 static int
439 iommu_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
440 {
441 	struct bus_dma_tag_iommu *tag;
442 	struct bus_dmamap_iommu *map;
443 
444 	tag = (struct bus_dma_tag_iommu *)dmat;
445 	map = malloc_domainset(sizeof(*map), M_IOMMU_DMAMAP,
446 	    DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
447 	if (map == NULL) {
448 		*mapp = NULL;
449 		return (ENOMEM);
450 	}
451 	if (tag->segments == NULL) {
452 		tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) *
453 		    tag->common.nsegments, M_IOMMU_DMAMAP,
454 		    DOMAINSET_PREF(tag->common.domain), M_NOWAIT);
455 		if (tag->segments == NULL) {
456 			free(map, M_IOMMU_DMAMAP);
457 			*mapp = NULL;
458 			return (ENOMEM);
459 		}
460 	}
461 	TAILQ_INIT(&map->map_entries);
462 	map->tag = tag;
463 	map->locked = true;
464 	map->cansleep = false;
465 	tag->map_count++;
466 	*mapp = (bus_dmamap_t)map;
467 
468 	return (0);
469 }
470 
471 static int
472 iommu_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
473 {
474 	struct bus_dma_tag_iommu *tag;
475 	struct bus_dmamap_iommu *map;
476 	struct iommu_domain *domain;
477 
478 	tag = (struct bus_dma_tag_iommu *)dmat;
479 	map = (struct bus_dmamap_iommu *)map1;
480 	if (map != NULL) {
481 		domain = tag->ctx->domain;
482 		IOMMU_DOMAIN_LOCK(domain);
483 		if (!TAILQ_EMPTY(&map->map_entries)) {
484 			IOMMU_DOMAIN_UNLOCK(domain);
485 			return (EBUSY);
486 		}
487 		IOMMU_DOMAIN_UNLOCK(domain);
488 		free(map, M_IOMMU_DMAMAP);
489 	}
490 	tag->map_count--;
491 	return (0);
492 }
493 
494 
495 static int
496 iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
497     bus_dmamap_t *mapp)
498 {
499 	struct bus_dma_tag_iommu *tag;
500 	struct bus_dmamap_iommu *map;
501 	int error, mflags;
502 	vm_memattr_t attr;
503 
504 	error = iommu_bus_dmamap_create(dmat, flags, mapp);
505 	if (error != 0)
506 		return (error);
507 
508 	mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK;
509 	mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0;
510 	attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE :
511 	    VM_MEMATTR_DEFAULT;
512 
513 	tag = (struct bus_dma_tag_iommu *)dmat;
514 	map = (struct bus_dmamap_iommu *)*mapp;
515 
516 	if (tag->common.maxsize < PAGE_SIZE &&
517 	    tag->common.alignment <= tag->common.maxsize &&
518 	    attr == VM_MEMATTR_DEFAULT) {
519 		*vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
520 		    DOMAINSET_PREF(tag->common.domain), mflags);
521 		map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
522 	} else {
523 		*vaddr = (void *)kmem_alloc_attr_domainset(
524 		    DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
525 		    mflags, 0ul, BUS_SPACE_MAXADDR, attr);
526 		map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
527 	}
528 	if (*vaddr == NULL) {
529 		iommu_bus_dmamap_destroy(dmat, *mapp);
530 		*mapp = NULL;
531 		return (ENOMEM);
532 	}
533 	return (0);
534 }
535 
536 static void
537 iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
538 {
539 	struct bus_dma_tag_iommu *tag;
540 	struct bus_dmamap_iommu *map;
541 
542 	tag = (struct bus_dma_tag_iommu *)dmat;
543 	map = (struct bus_dmamap_iommu *)map1;
544 
545 	if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) {
546 		free(vaddr, M_DEVBUF);
547 		map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC;
548 	} else {
549 		KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
550 		    ("iommu_bus_dmamem_free for non alloced map %p", map));
551 		kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
552 		map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
553 	}
554 
555 	iommu_bus_dmamap_destroy(dmat, map1);
556 }
557 
558 static int
559 iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
560     struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
561     int flags, bus_dma_segment_t *segs, int *segp,
562     struct iommu_map_entries_tailq *unroll_list)
563 {
564 	struct iommu_ctx *ctx;
565 	struct iommu_domain *domain;
566 	struct iommu_map_entry *entry;
567 	bus_size_t buflen1;
568 	int error, e_flags, idx, gas_flags, seg;
569 
570 	KASSERT(offset < IOMMU_PAGE_SIZE, ("offset %d", offset));
571 	if (segs == NULL)
572 		segs = tag->segments;
573 	ctx = tag->ctx;
574 	domain = ctx->domain;
575 	e_flags = IOMMU_MAP_ENTRY_READ |
576 	    ((flags & BUS_DMA_NOWRITE) == 0 ? IOMMU_MAP_ENTRY_WRITE : 0);
577 	seg = *segp;
578 	error = 0;
579 	idx = 0;
580 	while (buflen > 0) {
581 		seg++;
582 		if (seg >= tag->common.nsegments) {
583 			error = EFBIG;
584 			break;
585 		}
586 		buflen1 = buflen > tag->common.maxsegsz ?
587 		    tag->common.maxsegsz : buflen;
588 
589 		/*
590 		 * (Too) optimistically allow split if there are more
591 		 * then one segments left.
592 		 */
593 		gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0;
594 		if (seg + 1 < tag->common.nsegments)
595 			gas_flags |= IOMMU_MF_CANSPLIT;
596 
597 		error = iommu_gas_map(domain, &tag->common, buflen1,
598 		    offset, e_flags, gas_flags, ma + idx, &entry);
599 		if (error != 0)
600 			break;
601 		/* Update buflen1 in case buffer split. */
602 		if (buflen1 > entry->end - entry->start - offset)
603 			buflen1 = entry->end - entry->start - offset;
604 
605 		KASSERT(vm_addr_align_ok(entry->start + offset,
606 		    tag->common.alignment),
607 		    ("alignment failed: ctx %p start 0x%jx offset %x "
608 		    "align 0x%jx", ctx, (uintmax_t)entry->start, offset,
609 		    (uintmax_t)tag->common.alignment));
610 		KASSERT(entry->end <= tag->common.lowaddr ||
611 		    entry->start >= tag->common.highaddr,
612 		    ("entry placement failed: ctx %p start 0x%jx end 0x%jx "
613 		    "lowaddr 0x%jx highaddr 0x%jx", ctx,
614 		    (uintmax_t)entry->start, (uintmax_t)entry->end,
615 		    (uintmax_t)tag->common.lowaddr,
616 		    (uintmax_t)tag->common.highaddr));
617 		KASSERT(vm_addr_bound_ok(entry->start + offset, buflen1,
618 		    tag->common.boundary),
619 		    ("boundary failed: ctx %p start 0x%jx end 0x%jx "
620 		    "boundary 0x%jx", ctx, (uintmax_t)entry->start,
621 		    (uintmax_t)entry->end, (uintmax_t)tag->common.boundary));
622 		KASSERT(buflen1 <= tag->common.maxsegsz,
623 		    ("segment too large: ctx %p start 0x%jx end 0x%jx "
624 		    "buflen1 0x%jx maxsegsz 0x%jx", ctx,
625 		    (uintmax_t)entry->start, (uintmax_t)entry->end,
626 		    (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
627 
628 		IOMMU_DOMAIN_LOCK(domain);
629 		TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
630 		entry->flags |= IOMMU_MAP_ENTRY_MAP;
631 		IOMMU_DOMAIN_UNLOCK(domain);
632 		TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
633 
634 		segs[seg].ds_addr = entry->start + offset;
635 		segs[seg].ds_len = buflen1;
636 
637 		idx += OFF_TO_IDX(offset + buflen1);
638 		offset += buflen1;
639 		offset &= IOMMU_PAGE_MASK;
640 		buflen -= buflen1;
641 	}
642 	if (error == 0)
643 		*segp = seg;
644 	return (error);
645 }
646 
647 static int
648 iommu_bus_dmamap_load_something(struct bus_dma_tag_iommu *tag,
649     struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
650     int flags, bus_dma_segment_t *segs, int *segp)
651 {
652 	struct iommu_ctx *ctx;
653 	struct iommu_domain *domain;
654 	struct iommu_map_entry *entry, *entry1;
655 	struct iommu_map_entries_tailq unroll_list;
656 	int error;
657 
658 	ctx = tag->ctx;
659 	domain = ctx->domain;
660 	atomic_add_long(&ctx->loads, 1);
661 
662 	TAILQ_INIT(&unroll_list);
663 	error = iommu_bus_dmamap_load_something1(tag, map, ma, offset,
664 	    buflen, flags, segs, segp, &unroll_list);
665 	if (error != 0) {
666 		/*
667 		 * The busdma interface does not allow us to report
668 		 * partial buffer load, so unfortunately we have to
669 		 * revert all work done.
670 		 */
671 		IOMMU_DOMAIN_LOCK(domain);
672 		TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
673 		    entry1) {
674 			/*
675 			 * No entries other than what we have created
676 			 * during the failed run might have been
677 			 * inserted there in between, since we own ctx
678 			 * pglock.
679 			 */
680 			TAILQ_REMOVE(&map->map_entries, entry, dmamap_link);
681 			TAILQ_REMOVE(&unroll_list, entry, unroll_link);
682 			TAILQ_INSERT_TAIL(&domain->unload_entries, entry,
683 			    dmamap_link);
684 		}
685 		IOMMU_DOMAIN_UNLOCK(domain);
686 		taskqueue_enqueue(domain->iommu->delayed_taskqueue,
687 		    &domain->unload_task);
688 	}
689 
690 	if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 &&
691 	    !map->cansleep)
692 		error = EINPROGRESS;
693 	if (error == EINPROGRESS)
694 		iommu_bus_schedule_dmamap(domain->iommu, map);
695 	return (error);
696 }
697 
698 static int
699 iommu_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
700     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
701     bus_dma_segment_t *segs, int *segp)
702 {
703 	struct bus_dma_tag_iommu *tag;
704 	struct bus_dmamap_iommu *map;
705 
706 	tag = (struct bus_dma_tag_iommu *)dmat;
707 	map = (struct bus_dmamap_iommu *)map1;
708 	return (iommu_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
709 	    flags, segs, segp));
710 }
711 
712 static int
713 iommu_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
714     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
715     int *segp)
716 {
717 	struct bus_dma_tag_iommu *tag;
718 	struct bus_dmamap_iommu *map;
719 	vm_page_t *ma, fma;
720 	vm_paddr_t pstart, pend, paddr;
721 	int error, i, ma_cnt, mflags, offset;
722 
723 	tag = (struct bus_dma_tag_iommu *)dmat;
724 	map = (struct bus_dmamap_iommu *)map1;
725 	pstart = trunc_page(buf);
726 	pend = round_page(buf + buflen);
727 	offset = buf & PAGE_MASK;
728 	ma_cnt = OFF_TO_IDX(pend - pstart);
729 	mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
730 	ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags);
731 	if (ma == NULL)
732 		return (ENOMEM);
733 	fma = NULL;
734 	for (i = 0; i < ma_cnt; i++) {
735 		paddr = pstart + ptoa(i);
736 		ma[i] = PHYS_TO_VM_PAGE(paddr);
737 		if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
738 			/*
739 			 * If PHYS_TO_VM_PAGE() returned NULL or the
740 			 * vm_page was not initialized we'll use a
741 			 * fake page.
742 			 */
743 			if (fma == NULL) {
744 				fma = malloc(sizeof(struct vm_page) * ma_cnt,
745 				    M_DEVBUF, M_ZERO | mflags);
746 				if (fma == NULL) {
747 					free(ma, M_DEVBUF);
748 					return (ENOMEM);
749 				}
750 			}
751 			vm_page_initfake(&fma[i], pstart + ptoa(i),
752 			    VM_MEMATTR_DEFAULT);
753 			ma[i] = &fma[i];
754 		}
755 	}
756 	error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
757 	    flags, segs, segp);
758 	free(fma, M_DEVBUF);
759 	free(ma, M_DEVBUF);
760 	return (error);
761 }
762 
763 static int
764 iommu_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
765     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
766     int *segp)
767 {
768 	struct bus_dma_tag_iommu *tag;
769 	struct bus_dmamap_iommu *map;
770 	vm_page_t *ma, fma;
771 	vm_paddr_t pstart, pend, paddr;
772 	int error, i, ma_cnt, mflags, offset;
773 
774 	tag = (struct bus_dma_tag_iommu *)dmat;
775 	map = (struct bus_dmamap_iommu *)map1;
776 	pstart = trunc_page((vm_offset_t)buf);
777 	pend = round_page((vm_offset_t)buf + buflen);
778 	offset = (vm_offset_t)buf & PAGE_MASK;
779 	ma_cnt = OFF_TO_IDX(pend - pstart);
780 	mflags = map->cansleep ? M_WAITOK : M_NOWAIT;
781 	ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, mflags);
782 	if (ma == NULL)
783 		return (ENOMEM);
784 	fma = NULL;
785 	for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
786 		if (pmap == kernel_pmap)
787 			paddr = pmap_kextract(pstart);
788 		else
789 			paddr = pmap_extract(pmap, pstart);
790 		ma[i] = PHYS_TO_VM_PAGE(paddr);
791 		if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
792 			/*
793 			 * If PHYS_TO_VM_PAGE() returned NULL or the
794 			 * vm_page was not initialized we'll use a
795 			 * fake page.
796 			 */
797 			if (fma == NULL) {
798 				fma = malloc(sizeof(struct vm_page) * ma_cnt,
799 				    M_DEVBUF, M_ZERO | mflags);
800 				if (fma == NULL) {
801 					free(ma, M_DEVBUF);
802 					return (ENOMEM);
803 				}
804 			}
805 			vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT);
806 			ma[i] = &fma[i];
807 		}
808 	}
809 	error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
810 	    flags, segs, segp);
811 	free(ma, M_DEVBUF);
812 	free(fma, M_DEVBUF);
813 	return (error);
814 }
815 
816 static void
817 iommu_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
818     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
819 {
820 	struct bus_dmamap_iommu *map;
821 
822 	if (map1 == NULL)
823 		return;
824 	map = (struct bus_dmamap_iommu *)map1;
825 	map->mem = *mem;
826 	map->tag = (struct bus_dma_tag_iommu *)dmat;
827 	map->callback = callback;
828 	map->callback_arg = callback_arg;
829 }
830 
831 static bus_dma_segment_t *
832 iommu_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
833     bus_dma_segment_t *segs, int nsegs, int error)
834 {
835 	struct bus_dma_tag_iommu *tag;
836 	struct bus_dmamap_iommu *map;
837 
838 	tag = (struct bus_dma_tag_iommu *)dmat;
839 	map = (struct bus_dmamap_iommu *)map1;
840 
841 	if (!map->locked) {
842 		KASSERT(map->cansleep,
843 		    ("map not locked and not sleepable context %p", map));
844 
845 		/*
846 		 * We are called from the delayed context.  Relock the
847 		 * driver.
848 		 */
849 		(tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK);
850 		map->locked = true;
851 	}
852 
853 	if (segs == NULL)
854 		segs = tag->segments;
855 	return (segs);
856 }
857 
858 /*
859  * The limitations of busdma KPI forces the iommu to perform the actual
860  * unload, consisting of the unmapping of the map entries page tables,
861  * from the delayed context on i386, since page table page mapping
862  * might require a sleep to be successfull.  The unfortunate
863  * consequence is that the DMA requests can be served some time after
864  * the bus_dmamap_unload() call returned.
865  *
866  * On amd64, we assume that sf allocation cannot fail.
867  */
868 static void
869 iommu_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
870 {
871 	struct bus_dma_tag_iommu *tag;
872 	struct bus_dmamap_iommu *map;
873 	struct iommu_ctx *ctx;
874 	struct iommu_domain *domain;
875 #ifndef IOMMU_DOMAIN_UNLOAD_SLEEP
876 	struct iommu_map_entries_tailq entries;
877 #endif
878 
879 	tag = (struct bus_dma_tag_iommu *)dmat;
880 	map = (struct bus_dmamap_iommu *)map1;
881 	ctx = tag->ctx;
882 	domain = ctx->domain;
883 	atomic_add_long(&ctx->unloads, 1);
884 
885 #if defined(IOMMU_DOMAIN_UNLOAD_SLEEP)
886 	IOMMU_DOMAIN_LOCK(domain);
887 	TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link);
888 	IOMMU_DOMAIN_UNLOCK(domain);
889 	taskqueue_enqueue(domain->iommu->delayed_taskqueue,
890 	    &domain->unload_task);
891 #else
892 	TAILQ_INIT(&entries);
893 	IOMMU_DOMAIN_LOCK(domain);
894 	TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
895 	IOMMU_DOMAIN_UNLOCK(domain);
896 	THREAD_NO_SLEEPING();
897 	iommu_domain_unload(domain, &entries, false);
898 	THREAD_SLEEPING_OK();
899 	KASSERT(TAILQ_EMPTY(&entries), ("lazy iommu_ctx_unload %p", ctx));
900 #endif
901 }
902 
903 static void
904 iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map1,
905     bus_dmasync_op_t op)
906 {
907 	struct bus_dmamap_iommu *map __unused;
908 
909 	map = (struct bus_dmamap_iommu *)map1;
910 	kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
911 }
912 
913 #ifdef KMSAN
914 static void
915 iommu_bus_dmamap_load_kmsan(bus_dmamap_t map1, struct memdesc *mem)
916 {
917 	struct bus_dmamap_iommu *map;
918 
919 	map = (struct bus_dmamap_iommu *)map1;
920 	if (map == NULL)
921 		return;
922 	memcpy(&map->kmsan_mem, mem, sizeof(struct memdesc));
923 }
924 #endif
925 
926 struct bus_dma_impl bus_dma_iommu_impl = {
927 	.tag_create = iommu_bus_dma_tag_create,
928 	.tag_destroy = iommu_bus_dma_tag_destroy,
929 	.tag_set_domain = iommu_bus_dma_tag_set_domain,
930 	.id_mapped = iommu_bus_dma_id_mapped,
931 	.map_create = iommu_bus_dmamap_create,
932 	.map_destroy = iommu_bus_dmamap_destroy,
933 	.mem_alloc = iommu_bus_dmamem_alloc,
934 	.mem_free = iommu_bus_dmamem_free,
935 	.load_phys = iommu_bus_dmamap_load_phys,
936 	.load_buffer = iommu_bus_dmamap_load_buffer,
937 	.load_ma = iommu_bus_dmamap_load_ma,
938 	.map_waitok = iommu_bus_dmamap_waitok,
939 	.map_complete = iommu_bus_dmamap_complete,
940 	.map_unload = iommu_bus_dmamap_unload,
941 	.map_sync = iommu_bus_dmamap_sync,
942 #ifdef KMSAN
943 	.load_kmsan = iommu_bus_dmamap_load_kmsan,
944 #endif
945 };
946 
947 static void
948 iommu_bus_task_dmamap(void *arg, int pending)
949 {
950 	struct bus_dma_tag_iommu *tag;
951 	struct bus_dmamap_iommu *map;
952 	struct iommu_unit *unit;
953 
954 	unit = arg;
955 	IOMMU_LOCK(unit);
956 	while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
957 		TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
958 		IOMMU_UNLOCK(unit);
959 		tag = map->tag;
960 		map->cansleep = true;
961 		map->locked = false;
962 		bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map,
963 		    &map->mem, map->callback, map->callback_arg,
964 		    BUS_DMA_WAITOK);
965 		map->cansleep = false;
966 		if (map->locked) {
967 			(tag->common.lockfunc)(tag->common.lockfuncarg,
968 			    BUS_DMA_UNLOCK);
969 		} else
970 			map->locked = true;
971 		map->cansleep = false;
972 		IOMMU_LOCK(unit);
973 	}
974 	IOMMU_UNLOCK(unit);
975 }
976 
977 static void
978 iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map)
979 {
980 
981 	map->locked = false;
982 	IOMMU_LOCK(unit);
983 	TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
984 	IOMMU_UNLOCK(unit);
985 	taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
986 }
987 
988 int
989 iommu_init_busdma(struct iommu_unit *unit)
990 {
991 	int error;
992 
993 	unit->dma_enabled = 1;
994 	error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled);
995 	if (error == 0) /* compatibility */
996 		TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
997 	TAILQ_INIT(&unit->delayed_maps);
998 	TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit);
999 	unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK,
1000 	    taskqueue_thread_enqueue, &unit->delayed_taskqueue);
1001 	taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
1002 	    "iommu%d busdma taskq", unit->unit);
1003 	return (0);
1004 }
1005 
1006 void
1007 iommu_fini_busdma(struct iommu_unit *unit)
1008 {
1009 
1010 	if (unit->delayed_taskqueue == NULL)
1011 		return;
1012 
1013 	taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task);
1014 	taskqueue_free(unit->delayed_taskqueue);
1015 	unit->delayed_taskqueue = NULL;
1016 }
1017 
1018 int
1019 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1,
1020     vm_paddr_t start, vm_size_t length, int flags)
1021 {
1022 	struct bus_dma_tag_common *tc;
1023 	struct bus_dma_tag_iommu *tag;
1024 	struct bus_dmamap_iommu *map;
1025 	struct iommu_ctx *ctx;
1026 	struct iommu_domain *domain;
1027 	struct iommu_map_entry *entry;
1028 	vm_page_t *ma;
1029 	vm_size_t i;
1030 	int error;
1031 	bool waitok;
1032 
1033 	MPASS((start & PAGE_MASK) == 0);
1034 	MPASS((length & PAGE_MASK) == 0);
1035 	MPASS(length > 0);
1036 	MPASS(start + length >= start);
1037 	MPASS((flags & ~(BUS_DMA_NOWAIT | BUS_DMA_NOWRITE)) == 0);
1038 
1039 	tc = (struct bus_dma_tag_common *)dmat;
1040 	if (tc->impl != &bus_dma_iommu_impl)
1041 		return (0);
1042 
1043 	tag = (struct bus_dma_tag_iommu *)dmat;
1044 	ctx = tag->ctx;
1045 	domain = ctx->domain;
1046 	map = (struct bus_dmamap_iommu *)map1;
1047 	waitok = (flags & BUS_DMA_NOWAIT) != 0;
1048 
1049 	entry = iommu_gas_alloc_entry(domain, waitok ? 0 : IOMMU_PGF_WAITOK);
1050 	if (entry == NULL)
1051 		return (ENOMEM);
1052 	entry->start = start;
1053 	entry->end = start + length;
1054 	ma = malloc(sizeof(vm_page_t) * atop(length), M_TEMP, waitok ?
1055 	    M_WAITOK : M_NOWAIT);
1056 	if (ma == NULL) {
1057 		iommu_gas_free_entry(domain, entry);
1058 		return (ENOMEM);
1059 	}
1060 	for (i = 0; i < atop(length); i++) {
1061 		ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
1062 		    VM_MEMATTR_DEFAULT);
1063 	}
1064 	error = iommu_gas_map_region(domain, entry, IOMMU_MAP_ENTRY_READ |
1065 	    ((flags & BUS_DMA_NOWRITE) ? 0 : IOMMU_MAP_ENTRY_WRITE),
1066 	    waitok ? IOMMU_MF_CANWAIT : 0, ma);
1067 	if (error == 0) {
1068 		IOMMU_DOMAIN_LOCK(domain);
1069 		TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
1070 		entry->flags |= IOMMU_MAP_ENTRY_MAP;
1071 		IOMMU_DOMAIN_UNLOCK(domain);
1072 	} else {
1073 		iommu_domain_unload_entry(entry, true);
1074 	}
1075 	for (i = 0; i < atop(length); i++)
1076 		vm_page_putfake(ma[i]);
1077 	free(ma, M_TEMP);
1078 	return (error);
1079 }
1080 
1081 static void
1082 iommu_domain_unload_task(void *arg, int pending)
1083 {
1084 	struct iommu_domain *domain;
1085 	struct iommu_map_entries_tailq entries;
1086 
1087 	domain = arg;
1088 	TAILQ_INIT(&entries);
1089 
1090 	for (;;) {
1091 		IOMMU_DOMAIN_LOCK(domain);
1092 		TAILQ_SWAP(&domain->unload_entries, &entries,
1093 		    iommu_map_entry, dmamap_link);
1094 		IOMMU_DOMAIN_UNLOCK(domain);
1095 		if (TAILQ_EMPTY(&entries))
1096 			break;
1097 		iommu_domain_unload(domain, &entries, true);
1098 	}
1099 }
1100 
1101 void
1102 iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain,
1103     const struct iommu_domain_map_ops *ops)
1104 {
1105 
1106 	domain->ops = ops;
1107 	domain->iommu = unit;
1108 
1109 	TASK_INIT(&domain->unload_task, 0, iommu_domain_unload_task, domain);
1110 	RB_INIT(&domain->rb_root);
1111 	TAILQ_INIT(&domain->unload_entries);
1112 	mtx_init(&domain->lock, "iodom", NULL, MTX_DEF);
1113 }
1114 
1115 void
1116 iommu_domain_fini(struct iommu_domain *domain)
1117 {
1118 
1119 	mtx_destroy(&domain->lock);
1120 }
1121