xref: /freebsd/sys/arm64/iommu/iommu_pmap.c (revision 5d3e7166)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com>
5  * Copyright (c) 2014-2021 Andrew Turner
6  * Copyright (c) 2014-2016 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * This work was supported by Innovate UK project 105694, "Digital Security
10  * by Design (DSbD) Technology Platform Prototype".
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  *	Manages physical address maps for ARM SMMUv3 and ARM Mali GPU.
39  */
40 
41 #include "opt_vm.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_radix.h>
57 
58 #include <machine/machdep.h>
59 
60 #include <arm64/iommu/iommu_pmap.h>
61 #include <arm64/iommu/iommu_pte.h>
62 
63 #define	IOMMU_PAGE_SIZE		4096
64 
65 #define	NL0PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
66 #define	NL1PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
67 #define	NL2PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
68 #define	NL3PG		(IOMMU_PAGE_SIZE/(sizeof (pt_entry_t)))
69 
70 #define	NUL0E		IOMMU_L0_ENTRIES
71 #define	NUL1E		(NUL0E * NL1PG)
72 #define	NUL2E		(NUL1E * NL2PG)
73 
74 #define	smmu_l0_pindex(v)	(NUL2E + NUL1E + ((v) >> IOMMU_L0_SHIFT))
75 #define	smmu_l1_pindex(v)	(NUL2E + ((v) >> IOMMU_L1_SHIFT))
76 #define	smmu_l2_pindex(v)	((v) >> IOMMU_L2_SHIFT)
77 
78 #define	smmu_l0_index(va)	(((va) >> IOMMU_L0_SHIFT) & IOMMU_L0_ADDR_MASK)
79 #define	smmu_l1_index(va)	(((va) >> IOMMU_L1_SHIFT) & IOMMU_Ln_ADDR_MASK)
80 #define	smmu_l2_index(va)	(((va) >> IOMMU_L2_SHIFT) & IOMMU_Ln_ADDR_MASK)
81 #define	smmu_l3_index(va)	(((va) >> IOMMU_L3_SHIFT) & IOMMU_Ln_ADDR_MASK)
82 
83 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex);
84 static void _smmu_pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
85     struct spglist *free);
86 
87 /*
88  * These load the old table data and store the new value.
89  * They need to be atomic as the System MMU may write to the table at
90  * the same time as the CPU.
91  */
92 #define	smmu_pmap_load(table)		(*table)
93 #define	smmu_pmap_clear(table)		atomic_store_64(table, 0)
94 #define	smmu_pmap_store(table, entry)	atomic_store_64(table, entry)
95 
96 /********************/
97 /* Inline functions */
98 /********************/
99 
100 static __inline pd_entry_t *
101 smmu_pmap_l0(pmap_t pmap, vm_offset_t va)
102 {
103 
104 	return (&pmap->pm_l0[smmu_l0_index(va)]);
105 }
106 
107 static __inline pd_entry_t *
108 smmu_pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
109 {
110 	pd_entry_t *l1;
111 
112 	l1 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l0) & ~ATTR_MASK);
113 	return (&l1[smmu_l1_index(va)]);
114 }
115 
116 static __inline pd_entry_t *
117 smmu_pmap_l1(pmap_t pmap, vm_offset_t va)
118 {
119 	pd_entry_t *l0;
120 
121 	l0 = smmu_pmap_l0(pmap, va);
122 	if ((smmu_pmap_load(l0) & ATTR_DESCR_MASK) != IOMMU_L0_TABLE)
123 		return (NULL);
124 
125 	return (smmu_pmap_l0_to_l1(l0, va));
126 }
127 
128 static __inline pd_entry_t *
129 smmu_pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
130 {
131 	pd_entry_t l1, *l2p;
132 
133 	l1 = smmu_pmap_load(l1p);
134 
135 	/*
136 	 * The valid bit may be clear if pmap_update_entry() is concurrently
137 	 * modifying the entry, so for KVA only the entry type may be checked.
138 	 */
139 	KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0,
140 	    ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
141 	KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
142 	    ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
143 	l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK);
144 	return (&l2p[smmu_l2_index(va)]);
145 }
146 
147 static __inline pd_entry_t *
148 smmu_pmap_l2(pmap_t pmap, vm_offset_t va)
149 {
150 	pd_entry_t *l1;
151 
152 	l1 = smmu_pmap_l1(pmap, va);
153 	if ((smmu_pmap_load(l1) & ATTR_DESCR_MASK) != IOMMU_L1_TABLE)
154 		return (NULL);
155 
156 	return (smmu_pmap_l1_to_l2(l1, va));
157 }
158 
159 static __inline pt_entry_t *
160 smmu_pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
161 {
162 	pd_entry_t l2;
163 	pt_entry_t *l3p;
164 
165 	l2 = smmu_pmap_load(l2p);
166 
167 	/*
168 	 * The valid bit may be clear if pmap_update_entry() is concurrently
169 	 * modifying the entry, so for KVA only the entry type may be checked.
170 	 */
171 	KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0,
172 	    ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
173 	KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
174 	    ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
175 	l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK);
176 	return (&l3p[smmu_l3_index(va)]);
177 }
178 
179 /*
180  * Returns the lowest valid pde for a given virtual address.
181  * The next level may or may not point to a valid page or block.
182  */
183 static __inline pd_entry_t *
184 smmu_pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
185 {
186 	pd_entry_t *l0, *l1, *l2, desc;
187 
188 	l0 = smmu_pmap_l0(pmap, va);
189 	desc = smmu_pmap_load(l0) & ATTR_DESCR_MASK;
190 	if (desc != IOMMU_L0_TABLE) {
191 		*level = -1;
192 		return (NULL);
193 	}
194 
195 	l1 = smmu_pmap_l0_to_l1(l0, va);
196 	desc = smmu_pmap_load(l1) & ATTR_DESCR_MASK;
197 	if (desc != IOMMU_L1_TABLE) {
198 		*level = 0;
199 		return (l0);
200 	}
201 
202 	l2 = smmu_pmap_l1_to_l2(l1, va);
203 	desc = smmu_pmap_load(l2) & ATTR_DESCR_MASK;
204 	if (desc != IOMMU_L2_TABLE) {
205 		*level = 1;
206 		return (l1);
207 	}
208 
209 	*level = 2;
210 	return (l2);
211 }
212 
213 /*
214  * Returns the lowest valid pte block or table entry for a given virtual
215  * address. If there are no valid entries return NULL and set the level to
216  * the first invalid level.
217  */
218 static __inline pt_entry_t *
219 smmu_pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
220 {
221 	pd_entry_t *l1, *l2, desc;
222 	pt_entry_t *l3;
223 
224 	l1 = smmu_pmap_l1(pmap, va);
225 	if (l1 == NULL) {
226 		*level = 0;
227 		return (NULL);
228 	}
229 	desc = smmu_pmap_load(l1) & ATTR_DESCR_MASK;
230 	if (desc == IOMMU_L1_BLOCK) {
231 		*level = 1;
232 		return (l1);
233 	}
234 
235 	if (desc != IOMMU_L1_TABLE) {
236 		*level = 1;
237 		return (NULL);
238 	}
239 
240 	l2 = smmu_pmap_l1_to_l2(l1, va);
241 	desc = smmu_pmap_load(l2) & ATTR_DESCR_MASK;
242 	if (desc == IOMMU_L2_BLOCK) {
243 		*level = 2;
244 		return (l2);
245 	}
246 
247 	if (desc != IOMMU_L2_TABLE) {
248 		*level = 2;
249 		return (NULL);
250 	}
251 
252 	*level = 3;
253 	l3 = smmu_pmap_l2_to_l3(l2, va);
254 	if ((smmu_pmap_load(l3) & ATTR_DESCR_MASK) != IOMMU_L3_PAGE)
255 		return (NULL);
256 
257 	return (l3);
258 }
259 
260 static __inline int
261 smmu_pmap_l3_valid(pt_entry_t l3)
262 {
263 
264 	return ((l3 & ATTR_DESCR_MASK) == IOMMU_L3_PAGE);
265 }
266 
267 CTASSERT(IOMMU_L1_BLOCK == IOMMU_L2_BLOCK);
268 
269 static __inline void
270 smmu_pmap_resident_count_inc(pmap_t pmap, int count)
271 {
272 
273 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
274 	pmap->pm_stats.resident_count += count;
275 }
276 
277 static __inline void
278 smmu_pmap_resident_count_dec(pmap_t pmap, int count)
279 {
280 
281 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
282 	KASSERT(pmap->pm_stats.resident_count >= count,
283 	    ("pmap %p resident count underflow %ld %d", pmap,
284 	    pmap->pm_stats.resident_count, count));
285 	pmap->pm_stats.resident_count -= count;
286 }
287 
288 /***************************************************
289  * Page table page management routines.....
290  ***************************************************/
291 /*
292  * Schedule the specified unused page table page to be freed.  Specifically,
293  * add the page to the specified list of pages that will be released to the
294  * physical memory manager after the TLB has been updated.
295  */
296 static __inline void
297 smmu_pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
298     boolean_t set_PG_ZERO)
299 {
300 
301 	if (set_PG_ZERO)
302 		m->flags |= PG_ZERO;
303 	else
304 		m->flags &= ~PG_ZERO;
305 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
306 }
307 
308 /***************************************************
309  * Low level mapping routines.....
310  ***************************************************/
311 
312 /*
313  * Decrements a page table page's reference count, which is used to record the
314  * number of valid page table entries within the page.  If the reference count
315  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
316  * page table page was unmapped and FALSE otherwise.
317  */
318 static inline boolean_t
319 smmu_pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
320     struct spglist *free)
321 {
322 
323 	--m->ref_count;
324 	if (m->ref_count == 0) {
325 		_smmu_pmap_unwire_l3(pmap, va, m, free);
326 		return (TRUE);
327 	} else
328 		return (FALSE);
329 }
330 
331 static void
332 _smmu_pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
333     struct spglist *free)
334 {
335 
336 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
337 	/*
338 	 * unmap the page table page
339 	 */
340 	if (m->pindex >= (NUL2E + NUL1E)) {
341 		/* l1 page */
342 		pd_entry_t *l0;
343 
344 		l0 = smmu_pmap_l0(pmap, va);
345 		smmu_pmap_clear(l0);
346 	} else if (m->pindex >= NUL2E) {
347 		/* l2 page */
348 		pd_entry_t *l1;
349 
350 		l1 = smmu_pmap_l1(pmap, va);
351 		smmu_pmap_clear(l1);
352 	} else {
353 		/* l3 page */
354 		pd_entry_t *l2;
355 
356 		l2 = smmu_pmap_l2(pmap, va);
357 		smmu_pmap_clear(l2);
358 	}
359 	smmu_pmap_resident_count_dec(pmap, 1);
360 	if (m->pindex < NUL2E) {
361 		/* We just released an l3, unhold the matching l2 */
362 		pd_entry_t *l1, tl1;
363 		vm_page_t l2pg;
364 
365 		l1 = smmu_pmap_l1(pmap, va);
366 		tl1 = smmu_pmap_load(l1);
367 		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
368 		smmu_pmap_unwire_l3(pmap, va, l2pg, free);
369 	} else if (m->pindex < (NUL2E + NUL1E)) {
370 		/* We just released an l2, unhold the matching l1 */
371 		pd_entry_t *l0, tl0;
372 		vm_page_t l1pg;
373 
374 		l0 = smmu_pmap_l0(pmap, va);
375 		tl0 = smmu_pmap_load(l0);
376 		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
377 		smmu_pmap_unwire_l3(pmap, va, l1pg, free);
378 	}
379 
380 	/*
381 	 * Put page on a list so that it is released after
382 	 * *ALL* TLB shootdown is done
383 	 */
384 	smmu_pmap_add_delayed_free_list(m, free, TRUE);
385 }
386 
387 int
388 smmu_pmap_pinit(pmap_t pmap)
389 {
390 	vm_page_t m;
391 
392 	/*
393 	 * allocate the l0 page
394 	 */
395 	m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
396 	    VM_ALLOC_ZERO);
397 	pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m);
398 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
399 
400 	vm_radix_init(&pmap->pm_root);
401 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
402 
403 	pmap->pm_levels = 4;
404 	pmap->pm_ttbr = VM_PAGE_TO_PHYS(m);
405 
406 	return (1);
407 }
408 
409 /*
410  * This routine is called if the desired page table page does not exist.
411  *
412  * If page table page allocation fails, this routine may sleep before
413  * returning NULL.  It sleeps only if a lock pointer was given.
414  *
415  * Note: If a page allocation fails at page table level two or three,
416  * one or two pages may be held during the wait, only to be released
417  * afterwards.  This conservative approach is easily argued to avoid
418  * race conditions.
419  */
420 static vm_page_t
421 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex)
422 {
423 	vm_page_t m, l1pg, l2pg;
424 
425 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
426 
427 	/*
428 	 * Allocate a page table page.
429 	 */
430 	if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
431 		/*
432 		 * Indicate the need to retry.  While waiting, the page table
433 		 * page may have been allocated.
434 		 */
435 		return (NULL);
436 	}
437 	m->pindex = ptepindex;
438 
439 	/*
440 	 * Because of AArch64's weak memory consistency model, we must have a
441 	 * barrier here to ensure that the stores for zeroing "m", whether by
442 	 * pmap_zero_page() or an earlier function, are visible before adding
443 	 * "m" to the page table.  Otherwise, a page table walk by another
444 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
445 	 * PTE within "m".
446 	 */
447 	dmb(ishst);
448 
449 	/*
450 	 * Map the pagetable page into the process address space, if
451 	 * it isn't already there.
452 	 */
453 
454 	if (ptepindex >= (NUL2E + NUL1E)) {
455 		pd_entry_t *l0;
456 		vm_pindex_t l0index;
457 
458 		l0index = ptepindex - (NUL2E + NUL1E);
459 		l0 = &pmap->pm_l0[l0index];
460 		smmu_pmap_store(l0, VM_PAGE_TO_PHYS(m) | IOMMU_L0_TABLE);
461 	} else if (ptepindex >= NUL2E) {
462 		vm_pindex_t l0index, l1index;
463 		pd_entry_t *l0, *l1;
464 		pd_entry_t tl0;
465 
466 		l1index = ptepindex - NUL2E;
467 		l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
468 
469 		l0 = &pmap->pm_l0[l0index];
470 		tl0 = smmu_pmap_load(l0);
471 		if (tl0 == 0) {
472 			/* recurse for allocating page dir */
473 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index)
474 			    == NULL) {
475 				vm_page_unwire_noq(m);
476 				vm_page_free_zero(m);
477 				return (NULL);
478 			}
479 		} else {
480 			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
481 			l1pg->ref_count++;
482 		}
483 
484 		l1 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l0) &~ATTR_MASK);
485 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
486 		smmu_pmap_store(l1, VM_PAGE_TO_PHYS(m) | IOMMU_L1_TABLE);
487 	} else {
488 		vm_pindex_t l0index, l1index;
489 		pd_entry_t *l0, *l1, *l2;
490 		pd_entry_t tl0, tl1;
491 
492 		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
493 		l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
494 
495 		l0 = &pmap->pm_l0[l0index];
496 		tl0 = smmu_pmap_load(l0);
497 		if (tl0 == 0) {
498 			/* recurse for allocating page dir */
499 			if (_pmap_alloc_l3(pmap, NUL2E + l1index) == NULL) {
500 				vm_page_unwire_noq(m);
501 				vm_page_free_zero(m);
502 				return (NULL);
503 			}
504 			tl0 = smmu_pmap_load(l0);
505 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
506 			l1 = &l1[l1index & Ln_ADDR_MASK];
507 		} else {
508 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
509 			l1 = &l1[l1index & Ln_ADDR_MASK];
510 			tl1 = smmu_pmap_load(l1);
511 			if (tl1 == 0) {
512 				/* recurse for allocating page dir */
513 				if (_pmap_alloc_l3(pmap, NUL2E + l1index)
514 				    == NULL) {
515 					vm_page_unwire_noq(m);
516 					vm_page_free_zero(m);
517 					return (NULL);
518 				}
519 			} else {
520 				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
521 				l2pg->ref_count++;
522 			}
523 		}
524 
525 		l2 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l1) &~ATTR_MASK);
526 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
527 		smmu_pmap_store(l2, VM_PAGE_TO_PHYS(m) | IOMMU_L2_TABLE);
528 	}
529 
530 	smmu_pmap_resident_count_inc(pmap, 1);
531 
532 	return (m);
533 }
534 
535 /***************************************************
536  * Pmap allocation/deallocation routines.
537  ***************************************************/
538 
539 /*
540  * Release any resources held by the given physical map.
541  * Called when a pmap initialized by pmap_pinit is being released.
542  * Should only be called if the map contains no valid mappings.
543  */
544 void
545 smmu_pmap_release(pmap_t pmap)
546 {
547 	vm_page_t m;
548 
549 	KASSERT(pmap->pm_stats.resident_count == 0,
550 	    ("pmap_release: pmap resident count %ld != 0",
551 	    pmap->pm_stats.resident_count));
552 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
553 	    ("pmap_release: pmap has reserved page table page(s)"));
554 
555 	m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
556 	vm_page_unwire_noq(m);
557 	vm_page_free_zero(m);
558 }
559 
560 /***************************************************
561  * page management routines.
562  ***************************************************/
563 
564 /*
565  * Add a single Mali GPU entry. This function does not sleep.
566  */
567 int
568 pmap_gpu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
569     vm_prot_t prot, u_int flags)
570 {
571 	pd_entry_t *pde;
572 	pt_entry_t new_l3;
573 	pt_entry_t orig_l3 __diagused;
574 	pt_entry_t *l3;
575 	vm_page_t mpte;
576 	pd_entry_t *l1p;
577 	pd_entry_t *l2p;
578 	int lvl;
579 	int rv;
580 
581 	KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU"));
582 	KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
583 	KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
584 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
585 
586 	new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | IOMMU_L3_BLOCK);
587 
588 	if ((prot & VM_PROT_WRITE) != 0)
589 		new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
590 	if ((prot & VM_PROT_READ) != 0)
591 		new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
592 	if ((prot & VM_PROT_EXECUTE) == 0)
593 		new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL);
594 
595 	CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa);
596 
597 	PMAP_LOCK(pmap);
598 
599 	/*
600 	 * In the case that a page table page is not
601 	 * resident, we are creating it here.
602 	 */
603 retry:
604 	pde = smmu_pmap_pde(pmap, va, &lvl);
605 	if (pde != NULL && lvl == 2) {
606 		l3 = smmu_pmap_l2_to_l3(pde, va);
607 	} else {
608 		mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va));
609 		if (mpte == NULL) {
610 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
611 			rv = KERN_RESOURCE_SHORTAGE;
612 			goto out;
613 		}
614 
615 		/*
616 		 * Ensure newly created l1, l2 are visible to GPU.
617 		 * l0 is already visible by similar call in panfrost driver.
618 		 * The cache entry for l3 handled below.
619 		 */
620 
621 		l1p = smmu_pmap_l1(pmap, va);
622 		l2p = smmu_pmap_l2(pmap, va);
623 		cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t));
624 		cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t));
625 
626 		goto retry;
627 	}
628 
629 	orig_l3 = smmu_pmap_load(l3);
630 	KASSERT(!smmu_pmap_l3_valid(orig_l3), ("l3 is valid"));
631 
632 	/* New mapping */
633 	smmu_pmap_store(l3, new_l3);
634 
635 	cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t));
636 
637 	smmu_pmap_resident_count_inc(pmap, 1);
638 	dsb(ishst);
639 
640 	rv = KERN_SUCCESS;
641 out:
642 	PMAP_UNLOCK(pmap);
643 
644 	return (rv);
645 }
646 
647 /*
648  * Remove a single Mali GPU entry.
649  */
650 int
651 pmap_gpu_remove(pmap_t pmap, vm_offset_t va)
652 {
653 	pd_entry_t *pde;
654 	pt_entry_t *pte;
655 	int lvl;
656 	int rc;
657 
658 	KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
659 	KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU"));
660 
661 	PMAP_LOCK(pmap);
662 
663 	pde = smmu_pmap_pde(pmap, va, &lvl);
664 	if (pde == NULL || lvl != 2) {
665 		rc = KERN_FAILURE;
666 		goto out;
667 	}
668 
669 	pte = smmu_pmap_l2_to_l3(pde, va);
670 
671 	smmu_pmap_resident_count_dec(pmap, 1);
672 	smmu_pmap_clear(pte);
673 	cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t));
674 	rc = KERN_SUCCESS;
675 
676 out:
677 	PMAP_UNLOCK(pmap);
678 
679 	return (rc);
680 }
681 
682 /*
683  * Add a single SMMU entry. This function does not sleep.
684  */
685 int
686 smmu_pmap_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
687     vm_prot_t prot, u_int flags)
688 {
689 	pd_entry_t *pde;
690 	pt_entry_t new_l3;
691 	pt_entry_t orig_l3 __diagused;
692 	pt_entry_t *l3;
693 	vm_page_t mpte;
694 	int lvl;
695 	int rv;
696 
697 	KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
698 
699 	va = trunc_page(va);
700 	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
701 	    ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE);
702 	if ((prot & VM_PROT_WRITE) == 0)
703 		new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
704 	new_l3 |= ATTR_S1_XN; /* Execute never. */
705 	new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER);
706 	new_l3 |= ATTR_S1_nG; /* Non global. */
707 
708 	CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa);
709 
710 	PMAP_LOCK(pmap);
711 
712 	/*
713 	 * In the case that a page table page is not
714 	 * resident, we are creating it here.
715 	 */
716 retry:
717 	pde = smmu_pmap_pde(pmap, va, &lvl);
718 	if (pde != NULL && lvl == 2) {
719 		l3 = smmu_pmap_l2_to_l3(pde, va);
720 	} else {
721 		mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va));
722 		if (mpte == NULL) {
723 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
724 			rv = KERN_RESOURCE_SHORTAGE;
725 			goto out;
726 		}
727 		goto retry;
728 	}
729 
730 	orig_l3 = smmu_pmap_load(l3);
731 	KASSERT(!smmu_pmap_l3_valid(orig_l3), ("l3 is valid"));
732 
733 	/* New mapping */
734 	smmu_pmap_store(l3, new_l3);
735 	smmu_pmap_resident_count_inc(pmap, 1);
736 	dsb(ishst);
737 
738 	rv = KERN_SUCCESS;
739 out:
740 	PMAP_UNLOCK(pmap);
741 
742 	return (rv);
743 }
744 
745 /*
746  * Remove a single SMMU entry.
747  */
748 int
749 smmu_pmap_remove(pmap_t pmap, vm_offset_t va)
750 {
751 	pt_entry_t *pte;
752 	int lvl;
753 	int rc;
754 
755 	PMAP_LOCK(pmap);
756 
757 	pte = smmu_pmap_pte(pmap, va, &lvl);
758 	KASSERT(lvl == 3,
759 	    ("Invalid SMMU pagetable level: %d != 3", lvl));
760 
761 	if (pte != NULL) {
762 		smmu_pmap_resident_count_dec(pmap, 1);
763 		smmu_pmap_clear(pte);
764 		rc = KERN_SUCCESS;
765 	} else
766 		rc = KERN_FAILURE;
767 
768 	PMAP_UNLOCK(pmap);
769 
770 	return (rc);
771 }
772 
773 /*
774  * Remove all the allocated L1, L2 pages from SMMU pmap.
775  * All the L3 entires must be cleared in advance, otherwise
776  * this function panics.
777  */
778 void
779 smmu_pmap_remove_pages(pmap_t pmap)
780 {
781 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
782 	pt_entry_t *l3, l3e;
783 	vm_page_t m, m0, m1;
784 	vm_offset_t sva;
785 	vm_paddr_t pa;
786 	vm_paddr_t pa0;
787 	vm_paddr_t pa1;
788 	int i, j, k, l;
789 
790 	PMAP_LOCK(pmap);
791 
792 	for (sva = VM_MINUSER_ADDRESS, i = smmu_l0_index(sva);
793 	    (i < Ln_ENTRIES && sva < VM_MAXUSER_ADDRESS); i++) {
794 		l0e = pmap->pm_l0[i];
795 		if ((l0e & ATTR_DESCR_VALID) == 0) {
796 			sva += IOMMU_L0_SIZE;
797 			continue;
798 		}
799 		pa0 = l0e & ~ATTR_MASK;
800 		m0 = PHYS_TO_VM_PAGE(pa0);
801 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0);
802 
803 		for (j = smmu_l1_index(sva); j < Ln_ENTRIES; j++) {
804 			l1e = l1[j];
805 			if ((l1e & ATTR_DESCR_VALID) == 0) {
806 				sva += IOMMU_L1_SIZE;
807 				continue;
808 			}
809 			if ((l1e & ATTR_DESCR_MASK) == IOMMU_L1_BLOCK) {
810 				sva += IOMMU_L1_SIZE;
811 				continue;
812 			}
813 			pa1 = l1e & ~ATTR_MASK;
814 			m1 = PHYS_TO_VM_PAGE(pa1);
815 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1);
816 
817 			for (k = smmu_l2_index(sva); k < Ln_ENTRIES; k++) {
818 				l2e = l2[k];
819 				if ((l2e & ATTR_DESCR_VALID) == 0) {
820 					sva += IOMMU_L2_SIZE;
821 					continue;
822 				}
823 				pa = l2e & ~ATTR_MASK;
824 				m = PHYS_TO_VM_PAGE(pa);
825 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
826 
827 				for (l = smmu_l3_index(sva); l < Ln_ENTRIES;
828 				    l++, sva += IOMMU_L3_SIZE) {
829 					l3e = l3[l];
830 					if ((l3e & ATTR_DESCR_VALID) == 0)
831 						continue;
832 					panic("%s: l3e found for va %jx\n",
833 					    __func__, sva);
834 				}
835 
836 				vm_page_unwire_noq(m1);
837 				vm_page_unwire_noq(m);
838 				smmu_pmap_resident_count_dec(pmap, 1);
839 				vm_page_free(m);
840 				smmu_pmap_clear(&l2[k]);
841 			}
842 
843 			vm_page_unwire_noq(m0);
844 			smmu_pmap_resident_count_dec(pmap, 1);
845 			vm_page_free(m1);
846 			smmu_pmap_clear(&l1[j]);
847 		}
848 
849 		smmu_pmap_resident_count_dec(pmap, 1);
850 		vm_page_free(m0);
851 		smmu_pmap_clear(&pmap->pm_l0[i]);
852 	}
853 
854 	KASSERT(pmap->pm_stats.resident_count == 0,
855 	    ("Invalid resident count %jd", pmap->pm_stats.resident_count));
856 
857 	PMAP_UNLOCK(pmap);
858 }
859