1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013 The FreeBSD Foundation
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/bus.h>
35 #include <sys/interrupt.h>
36 #include <sys/kernel.h>
37 #include <sys/ktr.h>
38 #include <sys/lock.h>
39 #include <sys/memdesc.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/rwlock.h>
43 #include <sys/rman.h>
44 #include <sys/sf_buf.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
47 #include <sys/tree.h>
48 #include <sys/uio.h>
49 #include <sys/vmem.h>
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vm_map.h>
57 #include <dev/pci/pcireg.h>
58 #include <machine/atomic.h>
59 #include <machine/bus.h>
60 #include <machine/cpu.h>
61 #include <machine/md_var.h>
62 #include <machine/specialreg.h>
63 #include <x86/include/busdma_impl.h>
64 #include <dev/iommu/busdma_iommu.h>
65 #include <x86/iommu/intel_reg.h>
66 #include <x86/iommu/x86_iommu.h>
67 #include <x86/iommu/intel_dmar.h>
68
69 static int domain_unmap_buf_locked(struct dmar_domain *domain,
70 iommu_gaddr_t base, iommu_gaddr_t size, int flags);
71
72 /*
73 * The cache of the identity mapping page tables for the DMARs. Using
74 * the cache saves significant amount of memory for page tables by
75 * reusing the page tables, since usually DMARs are identical and have
76 * the same capabilities. Still, cache records the information needed
77 * to match DMAR capabilities and page table format, to correctly
78 * handle different DMARs.
79 */
80
81 struct idpgtbl {
82 iommu_gaddr_t maxaddr; /* Page table covers the guest address
83 range [0..maxaddr) */
84 int pglvl; /* Total page table levels ignoring
85 superpages */
86 int leaf; /* The last materialized page table
87 level, it is non-zero if superpages
88 are supported */
89 vm_object_t pgtbl_obj; /* The page table pages */
90 LIST_ENTRY(idpgtbl) link;
91 };
92
93 static struct sx idpgtbl_lock;
94 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl");
95 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls);
96 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
97 "Intel DMAR Identity mappings cache elements");
98
99 /*
100 * Build the next level of the page tables for the identity mapping.
101 * - lvl is the level to build;
102 * - idx is the index of the page table page in the pgtbl_obj, which is
103 * being allocated filled now;
104 * - addr is the starting address in the bus address space which is
105 * mapped by the page table page.
106 */
107 static void
domain_idmap_nextlvl(struct idpgtbl * tbl,int lvl,vm_pindex_t idx,iommu_gaddr_t addr)108 domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
109 iommu_gaddr_t addr)
110 {
111 vm_page_t m1;
112 iommu_pte_t *pte;
113 struct sf_buf *sf;
114 iommu_gaddr_t f, pg_sz;
115 vm_pindex_t base;
116 int i;
117
118 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
119 if (addr >= tbl->maxaddr)
120 return;
121 (void)iommu_pgalloc(tbl->pgtbl_obj, idx, IOMMU_PGF_OBJL |
122 IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO);
123 base = idx * IOMMU_NPTEPG + 1; /* Index of the first child page of idx */
124 pg_sz = pglvl_page_size(tbl->pglvl, lvl);
125 if (lvl != tbl->leaf) {
126 for (i = 0, f = addr; i < IOMMU_NPTEPG; i++, f += pg_sz)
127 domain_idmap_nextlvl(tbl, lvl + 1, base + i, f);
128 }
129 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
130 pte = iommu_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf);
131 if (lvl == tbl->leaf) {
132 for (i = 0, f = addr; i < IOMMU_NPTEPG; i++, f += pg_sz) {
133 if (f >= tbl->maxaddr)
134 break;
135 pte[i].pte = (DMAR_PTE_ADDR_MASK & f) |
136 DMAR_PTE_R | DMAR_PTE_W;
137 }
138 } else {
139 for (i = 0, f = addr; i < IOMMU_NPTEPG; i++, f += pg_sz) {
140 if (f >= tbl->maxaddr)
141 break;
142 m1 = iommu_pgalloc(tbl->pgtbl_obj, base + i,
143 IOMMU_PGF_NOALLOC);
144 KASSERT(m1 != NULL, ("lost page table page"));
145 pte[i].pte = (DMAR_PTE_ADDR_MASK &
146 VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
147 }
148 }
149 /* domain_get_idmap_pgtbl flushes CPU cache if needed. */
150 iommu_unmap_pgtbl(sf);
151 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
152 }
153
154 /*
155 * Find a ready and compatible identity-mapping page table in the
156 * cache. If not found, populate the identity-mapping page table for
157 * the context, up to the maxaddr. The maxaddr byte is allowed to be
158 * not mapped, which is aligned with the definition of Maxmem as the
159 * highest usable physical address + 1. If superpages are used, the
160 * maxaddr is typically mapped.
161 */
162 vm_object_t
domain_get_idmap_pgtbl(struct dmar_domain * domain,iommu_gaddr_t maxaddr)163 domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr)
164 {
165 struct dmar_unit *unit;
166 struct idpgtbl *tbl;
167 vm_object_t res;
168 vm_page_t m;
169 int leaf, i;
170
171 leaf = 0; /* silence gcc */
172
173 /*
174 * First, determine where to stop the paging structures.
175 */
176 for (i = 0; i < domain->pglvl; i++) {
177 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) {
178 leaf = i;
179 break;
180 }
181 }
182
183 /*
184 * Search the cache for a compatible page table. Qualified
185 * page table must map up to maxaddr, its level must be
186 * supported by the DMAR and leaf should be equal to the
187 * calculated value. The later restriction could be lifted
188 * but I believe it is currently impossible to have any
189 * deviations for existing hardware.
190 */
191 sx_slock(&idpgtbl_lock);
192 LIST_FOREACH(tbl, &idpgtbls, link) {
193 if (tbl->maxaddr >= maxaddr &&
194 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
195 tbl->leaf == leaf) {
196 res = tbl->pgtbl_obj;
197 vm_object_reference(res);
198 sx_sunlock(&idpgtbl_lock);
199 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
200 goto end;
201 }
202 }
203
204 /*
205 * Not found in cache, relock the cache into exclusive mode to
206 * be able to add element, and recheck cache again after the
207 * relock.
208 */
209 sx_sunlock(&idpgtbl_lock);
210 sx_xlock(&idpgtbl_lock);
211 LIST_FOREACH(tbl, &idpgtbls, link) {
212 if (tbl->maxaddr >= maxaddr &&
213 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
214 tbl->leaf == leaf) {
215 res = tbl->pgtbl_obj;
216 vm_object_reference(res);
217 sx_xunlock(&idpgtbl_lock);
218 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
219 return (res);
220 }
221 }
222
223 /*
224 * Still not found, create new page table.
225 */
226 tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK);
227 tbl->pglvl = domain->pglvl;
228 tbl->leaf = leaf;
229 tbl->maxaddr = maxaddr;
230 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
231 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
232 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
233 domain_idmap_nextlvl(tbl, 0, 0, 0);
234 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
235 LIST_INSERT_HEAD(&idpgtbls, tbl, link);
236 res = tbl->pgtbl_obj;
237 vm_object_reference(res);
238 sx_xunlock(&idpgtbl_lock);
239
240 end:
241 /*
242 * Table was found or created.
243 *
244 * If DMAR does not snoop paging structures accesses, flush
245 * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent
246 * argument was possibly invalid at the time of the identity
247 * page table creation, since DMAR which was passed at the
248 * time of creation could be coherent, while current DMAR is
249 * not.
250 *
251 * If DMAR cannot look into the chipset write buffer, flush it
252 * as well.
253 */
254 unit = domain->dmar;
255 if (!DMAR_IS_COHERENT(unit)) {
256 VM_OBJECT_WLOCK(res);
257 for (m = vm_page_lookup(res, 0); m != NULL;
258 m = vm_page_next(m))
259 pmap_invalidate_cache_pages(&m, 1);
260 VM_OBJECT_WUNLOCK(res);
261 }
262 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
263 DMAR_LOCK(unit);
264 dmar_flush_write_bufs(unit);
265 DMAR_UNLOCK(unit);
266 }
267
268 return (res);
269 }
270
271 /*
272 * Return a reference to the identity mapping page table to the cache.
273 */
274 void
put_idmap_pgtbl(vm_object_t obj)275 put_idmap_pgtbl(vm_object_t obj)
276 {
277 struct idpgtbl *tbl, *tbl1;
278 vm_object_t rmobj;
279
280 sx_slock(&idpgtbl_lock);
281 KASSERT(obj->ref_count >= 2, ("lost cache reference"));
282 vm_object_deallocate(obj);
283
284 /*
285 * Cache always owns one last reference on the page table object.
286 * If there is an additional reference, object must stay.
287 */
288 if (obj->ref_count > 1) {
289 sx_sunlock(&idpgtbl_lock);
290 return;
291 }
292
293 /*
294 * Cache reference is the last, remove cache element and free
295 * page table object, returning the page table pages to the
296 * system.
297 */
298 sx_sunlock(&idpgtbl_lock);
299 sx_xlock(&idpgtbl_lock);
300 LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) {
301 rmobj = tbl->pgtbl_obj;
302 if (rmobj->ref_count == 1) {
303 LIST_REMOVE(tbl, link);
304 atomic_subtract_int(&iommu_tbl_pagecnt,
305 rmobj->resident_page_count);
306 vm_object_deallocate(rmobj);
307 free(tbl, M_DMAR_IDPGTBL);
308 }
309 }
310 sx_xunlock(&idpgtbl_lock);
311 }
312
313 /*
314 * The core routines to map and unmap host pages at the given guest
315 * address. Support superpages.
316 */
317
318 /*
319 * Index of the pte for the guest address base in the page table at
320 * the level lvl.
321 */
322 static int
domain_pgtbl_pte_off(struct dmar_domain * domain,iommu_gaddr_t base,int lvl)323 domain_pgtbl_pte_off(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
324 {
325
326 base >>= IOMMU_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
327 IOMMU_NPTEPGSHIFT;
328 return (base & IOMMU_PTEMASK);
329 }
330
331 /*
332 * Returns the page index of the page table page in the page table
333 * object, which maps the given address base at the page table level
334 * lvl.
335 */
336 static vm_pindex_t
domain_pgtbl_get_pindex(struct dmar_domain * domain,iommu_gaddr_t base,int lvl)337 domain_pgtbl_get_pindex(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
338 {
339 vm_pindex_t idx, pidx;
340 int i;
341
342 KASSERT(lvl >= 0 && lvl < domain->pglvl,
343 ("wrong lvl %p %d", domain, lvl));
344
345 for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) {
346 idx = domain_pgtbl_pte_off(domain, base, i) +
347 pidx * IOMMU_NPTEPG + 1;
348 }
349 return (idx);
350 }
351
352 static iommu_pte_t *
domain_pgtbl_map_pte(struct dmar_domain * domain,iommu_gaddr_t base,int lvl,int flags,vm_pindex_t * idxp,struct sf_buf ** sf)353 domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
354 int flags, vm_pindex_t *idxp, struct sf_buf **sf)
355 {
356 vm_page_t m;
357 struct sf_buf *sfp;
358 iommu_pte_t *pte, *ptep;
359 vm_pindex_t idx, idx1;
360
361 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
362 KASSERT((flags & IOMMU_PGF_OBJL) != 0, ("lost PGF_OBJL"));
363
364 idx = domain_pgtbl_get_pindex(domain, base, lvl);
365 if (*sf != NULL && idx == *idxp) {
366 pte = (iommu_pte_t *)sf_buf_kva(*sf);
367 } else {
368 if (*sf != NULL)
369 iommu_unmap_pgtbl(*sf);
370 *idxp = idx;
371 retry:
372 pte = iommu_map_pgtbl(domain->pgtbl_obj, idx, flags, sf);
373 if (pte == NULL) {
374 KASSERT(lvl > 0,
375 ("lost root page table page %p", domain));
376 /*
377 * Page table page does not exist, allocate
378 * it and create a pte in the preceeding page level
379 * to reference the allocated page table page.
380 */
381 m = iommu_pgalloc(domain->pgtbl_obj, idx, flags |
382 IOMMU_PGF_ZERO);
383 if (m == NULL)
384 return (NULL);
385
386 /*
387 * Prevent potential free while pgtbl_obj is
388 * unlocked in the recursive call to
389 * domain_pgtbl_map_pte(), if other thread did
390 * pte write and clean while the lock is
391 * dropped.
392 */
393 m->ref_count++;
394
395 sfp = NULL;
396 ptep = domain_pgtbl_map_pte(domain, base, lvl - 1,
397 flags, &idx1, &sfp);
398 if (ptep == NULL) {
399 KASSERT(m->pindex != 0,
400 ("loosing root page %p", domain));
401 m->ref_count--;
402 iommu_pgfree(domain->pgtbl_obj, m->pindex,
403 flags);
404 return (NULL);
405 }
406 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
407 VM_PAGE_TO_PHYS(m));
408 dmar_flush_pte_to_ram(domain->dmar, ptep);
409 sf_buf_page(sfp)->ref_count += 1;
410 m->ref_count--;
411 iommu_unmap_pgtbl(sfp);
412 /* Only executed once. */
413 goto retry;
414 }
415 }
416 pte += domain_pgtbl_pte_off(domain, base, lvl);
417 return (pte);
418 }
419
420 static int
domain_map_buf_locked(struct dmar_domain * domain,iommu_gaddr_t base,iommu_gaddr_t size,vm_page_t * ma,uint64_t pflags,int flags)421 domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
422 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
423 {
424 iommu_pte_t *pte;
425 struct sf_buf *sf;
426 iommu_gaddr_t pg_sz, base1;
427 vm_pindex_t pi, c, idx, run_sz;
428 int lvl;
429 bool superpage;
430
431 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
432
433 base1 = base;
434 flags |= IOMMU_PGF_OBJL;
435 TD_PREP_PINNED_ASSERT;
436
437 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
438 pi += run_sz) {
439 for (lvl = 0, c = 0, superpage = false;; lvl++) {
440 pg_sz = domain_page_size(domain, lvl);
441 run_sz = pg_sz >> IOMMU_PAGE_SHIFT;
442 if (lvl == domain->pglvl - 1)
443 break;
444 /*
445 * Check if the current base suitable for the
446 * superpage mapping. First, verify the level.
447 */
448 if (!domain_is_sp_lvl(domain, lvl))
449 continue;
450 /*
451 * Next, look at the size of the mapping and
452 * alignment of both guest and host addresses.
453 */
454 if (size < pg_sz || (base & (pg_sz - 1)) != 0 ||
455 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0)
456 continue;
457 /* All passed, check host pages contiguouty. */
458 if (c == 0) {
459 for (c = 1; c < run_sz; c++) {
460 if (VM_PAGE_TO_PHYS(ma[pi + c]) !=
461 VM_PAGE_TO_PHYS(ma[pi + c - 1]) +
462 PAGE_SIZE)
463 break;
464 }
465 }
466 if (c >= run_sz) {
467 superpage = true;
468 break;
469 }
470 }
471 KASSERT(size >= pg_sz,
472 ("mapping loop overflow %p %jx %jx %jx", domain,
473 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
474 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
475 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
476 if (pte == NULL) {
477 KASSERT((flags & IOMMU_PGF_WAITOK) == 0,
478 ("failed waitable pte alloc %p", domain));
479 if (sf != NULL)
480 iommu_unmap_pgtbl(sf);
481 domain_unmap_buf_locked(domain, base1, base - base1,
482 flags);
483 TD_PINNED_ASSERT;
484 return (ENOMEM);
485 }
486 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
487 (superpage ? DMAR_PTE_SP : 0));
488 dmar_flush_pte_to_ram(domain->dmar, pte);
489 sf_buf_page(sf)->ref_count += 1;
490 }
491 if (sf != NULL)
492 iommu_unmap_pgtbl(sf);
493 TD_PINNED_ASSERT;
494 return (0);
495 }
496
497 static int
domain_map_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,vm_page_t * ma,uint64_t eflags,int flags)498 domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
499 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
500 {
501 struct dmar_domain *domain;
502 struct dmar_unit *unit;
503 uint64_t pflags;
504 int error;
505
506 pflags = ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
507 ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
508 ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
509 ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0);
510
511 domain = IODOM2DOM(iodom);
512 unit = domain->dmar;
513
514 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
515 ("modifying idmap pagetable domain %p", domain));
516 KASSERT((base & IOMMU_PAGE_MASK) == 0,
517 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
518 (uintmax_t)size));
519 KASSERT((size & IOMMU_PAGE_MASK) == 0,
520 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
521 (uintmax_t)size));
522 KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base,
523 (uintmax_t)size));
524 KASSERT(base < (1ULL << domain->agaw),
525 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
526 (uintmax_t)size, domain->agaw));
527 KASSERT(base + size < (1ULL << domain->agaw),
528 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
529 (uintmax_t)size, domain->agaw));
530 KASSERT(base + size > base,
531 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
532 (uintmax_t)size));
533 KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0,
534 ("neither read nor write %jx", (uintmax_t)pflags));
535 KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP |
536 DMAR_PTE_TM)) == 0,
537 ("invalid pte flags %jx", (uintmax_t)pflags));
538 KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
539 (unit->hw_ecap & DMAR_ECAP_SC) != 0,
540 ("PTE_SNP for dmar without snoop control %p %jx",
541 domain, (uintmax_t)pflags));
542 KASSERT((pflags & DMAR_PTE_TM) == 0 ||
543 (unit->hw_ecap & DMAR_ECAP_DI) != 0,
544 ("PTE_TM for dmar without DIOTLB %p %jx",
545 domain, (uintmax_t)pflags));
546 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
547
548 DMAR_DOMAIN_PGLOCK(domain);
549 error = domain_map_buf_locked(domain, base, size, ma, pflags, flags);
550 DMAR_DOMAIN_PGUNLOCK(domain);
551 if (error != 0)
552 return (error);
553
554 if ((unit->hw_cap & DMAR_CAP_CM) != 0)
555 domain_flush_iotlb_sync(domain, base, size);
556 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
557 /* See 11.1 Write Buffer Flushing. */
558 DMAR_LOCK(unit);
559 dmar_flush_write_bufs(unit);
560 DMAR_UNLOCK(unit);
561 }
562 return (0);
563 }
564
565 static void domain_unmap_clear_pte(struct dmar_domain *domain,
566 iommu_gaddr_t base, int lvl, int flags, iommu_pte_t *pte,
567 struct sf_buf **sf, bool free_fs);
568
569 static void
domain_free_pgtbl_pde(struct dmar_domain * domain,iommu_gaddr_t base,int lvl,int flags)570 domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base,
571 int lvl, int flags)
572 {
573 struct sf_buf *sf;
574 iommu_pte_t *pde;
575 vm_pindex_t idx;
576
577 sf = NULL;
578 pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
579 domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true);
580 }
581
582 static void
domain_unmap_clear_pte(struct dmar_domain * domain,iommu_gaddr_t base,int lvl,int flags,iommu_pte_t * pte,struct sf_buf ** sf,bool free_sf)583 domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
584 int flags, iommu_pte_t *pte, struct sf_buf **sf, bool free_sf)
585 {
586 vm_page_t m;
587
588 dmar_pte_clear(&pte->pte);
589 dmar_flush_pte_to_ram(domain->dmar, pte);
590 m = sf_buf_page(*sf);
591 if (free_sf) {
592 iommu_unmap_pgtbl(*sf);
593 *sf = NULL;
594 }
595 m->ref_count--;
596 if (m->ref_count != 0)
597 return;
598 KASSERT(lvl != 0,
599 ("lost reference (lvl) on root pg domain %p base %jx lvl %d",
600 domain, (uintmax_t)base, lvl));
601 KASSERT(m->pindex != 0,
602 ("lost reference (idx) on root pg domain %p base %jx lvl %d",
603 domain, (uintmax_t)base, lvl));
604 iommu_pgfree(domain->pgtbl_obj, m->pindex, flags);
605 domain_free_pgtbl_pde(domain, base, lvl - 1, flags);
606 }
607
608 /*
609 * Assumes that the unmap is never partial.
610 */
611 static int
domain_unmap_buf_locked(struct dmar_domain * domain,iommu_gaddr_t base,iommu_gaddr_t size,int flags)612 domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
613 iommu_gaddr_t size, int flags)
614 {
615 iommu_pte_t *pte;
616 struct sf_buf *sf;
617 vm_pindex_t idx;
618 iommu_gaddr_t pg_sz;
619 int lvl;
620
621 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
622 if (size == 0)
623 return (0);
624
625 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
626 ("modifying idmap pagetable domain %p", domain));
627 KASSERT((base & IOMMU_PAGE_MASK) == 0,
628 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
629 (uintmax_t)size));
630 KASSERT((size & IOMMU_PAGE_MASK) == 0,
631 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
632 (uintmax_t)size));
633 KASSERT(base < (1ULL << domain->agaw),
634 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
635 (uintmax_t)size, domain->agaw));
636 KASSERT(base + size < (1ULL << domain->agaw),
637 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
638 (uintmax_t)size, domain->agaw));
639 KASSERT(base + size > base,
640 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
641 (uintmax_t)size));
642 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
643
644 pg_sz = 0; /* silence gcc */
645 flags |= IOMMU_PGF_OBJL;
646 TD_PREP_PINNED_ASSERT;
647
648 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
649 for (lvl = 0; lvl < domain->pglvl; lvl++) {
650 if (lvl != domain->pglvl - 1 &&
651 !domain_is_sp_lvl(domain, lvl))
652 continue;
653 pg_sz = domain_page_size(domain, lvl);
654 if (pg_sz > size)
655 continue;
656 pte = domain_pgtbl_map_pte(domain, base, lvl, flags,
657 &idx, &sf);
658 KASSERT(pte != NULL,
659 ("sleeping or page missed %p %jx %d 0x%x",
660 domain, (uintmax_t)base, lvl, flags));
661 if ((pte->pte & DMAR_PTE_SP) != 0 ||
662 lvl == domain->pglvl - 1) {
663 domain_unmap_clear_pte(domain, base, lvl,
664 flags, pte, &sf, false);
665 break;
666 }
667 }
668 KASSERT(size >= pg_sz,
669 ("unmapping loop overflow %p %jx %jx %jx", domain,
670 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
671 }
672 if (sf != NULL)
673 iommu_unmap_pgtbl(sf);
674 /*
675 * See 11.1 Write Buffer Flushing for an explanation why RWBF
676 * can be ignored there.
677 */
678
679 TD_PINNED_ASSERT;
680 return (0);
681 }
682
683 static int
domain_unmap_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,int flags)684 domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
685 iommu_gaddr_t size, int flags)
686 {
687 struct dmar_domain *domain;
688 int error;
689
690 domain = IODOM2DOM(iodom);
691
692 DMAR_DOMAIN_PGLOCK(domain);
693 error = domain_unmap_buf_locked(domain, base, size, flags);
694 DMAR_DOMAIN_PGUNLOCK(domain);
695 return (error);
696 }
697
698 int
domain_alloc_pgtbl(struct dmar_domain * domain)699 domain_alloc_pgtbl(struct dmar_domain *domain)
700 {
701 vm_page_t m;
702
703 KASSERT(domain->pgtbl_obj == NULL,
704 ("already initialized %p", domain));
705
706 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
707 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
708 DMAR_DOMAIN_PGLOCK(domain);
709 m = iommu_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK |
710 IOMMU_PGF_ZERO | IOMMU_PGF_OBJL);
711 /* No implicit free of the top level page table page. */
712 m->ref_count = 1;
713 DMAR_DOMAIN_PGUNLOCK(domain);
714 DMAR_LOCK(domain->dmar);
715 domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED;
716 DMAR_UNLOCK(domain->dmar);
717 return (0);
718 }
719
720 void
domain_free_pgtbl(struct dmar_domain * domain)721 domain_free_pgtbl(struct dmar_domain *domain)
722 {
723 vm_object_t obj;
724 vm_page_t m;
725
726 obj = domain->pgtbl_obj;
727 if (obj == NULL) {
728 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
729 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0,
730 ("lost pagetable object domain %p", domain));
731 return;
732 }
733 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
734 domain->pgtbl_obj = NULL;
735
736 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) {
737 put_idmap_pgtbl(obj);
738 domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP;
739 return;
740 }
741
742 /* Obliterate ref_counts */
743 VM_OBJECT_ASSERT_WLOCKED(obj);
744 for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
745 m->ref_count = 0;
746 VM_OBJECT_WUNLOCK(obj);
747 vm_object_deallocate(obj);
748 }
749
750 static inline uint64_t
domain_wait_iotlb_flush(struct dmar_unit * unit,uint64_t wt,int iro)751 domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
752 {
753 uint64_t iotlbr;
754
755 dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT |
756 DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt);
757 for (;;) {
758 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF);
759 if ((iotlbr & DMAR_IOTLB_IVT) == 0)
760 break;
761 cpu_spinwait();
762 }
763 return (iotlbr);
764 }
765
766 void
domain_flush_iotlb_sync(struct dmar_domain * domain,iommu_gaddr_t base,iommu_gaddr_t size)767 domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
768 iommu_gaddr_t size)
769 {
770 struct dmar_unit *unit;
771 iommu_gaddr_t isize;
772 uint64_t iotlbr;
773 int am, iro;
774
775 unit = domain->dmar;
776 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
777 unit->iommu.unit));
778 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
779 DMAR_LOCK(unit);
780 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
781 iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
782 DMAR_IOTLB_DID(domain->domain), iro);
783 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
784 DMAR_IOTLB_IAIG_INVLD,
785 ("dmar%d: invalidation failed %jx", unit->iommu.unit,
786 (uintmax_t)iotlbr));
787 } else {
788 for (; size > 0; base += isize, size -= isize) {
789 am = calc_am(unit, base, size, &isize);
790 dmar_write8(unit, iro, base | am);
791 iotlbr = domain_wait_iotlb_flush(unit,
792 DMAR_IOTLB_IIRG_PAGE |
793 DMAR_IOTLB_DID(domain->domain), iro);
794 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
795 DMAR_IOTLB_IAIG_INVLD,
796 ("dmar%d: PSI invalidation failed "
797 "iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
798 unit->iommu.unit, (uintmax_t)iotlbr,
799 (uintmax_t)base, (uintmax_t)size, am));
800 /*
801 * Any non-page granularity covers whole guest
802 * address space for the domain.
803 */
804 if ((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
805 DMAR_IOTLB_IAIG_PAGE)
806 break;
807 }
808 }
809 DMAR_UNLOCK(unit);
810 }
811
812 const struct iommu_domain_map_ops dmar_domain_map_ops = {
813 .map = domain_map_buf,
814 .unmap = domain_unmap_buf,
815 };
816