xref: /freebsd/sys/arm64/iommu/iommu_pmap.c (revision c03c5b1c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com>
5  * Copyright (c) 2014-2021 Andrew Turner
6  * Copyright (c) 2014-2016 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * This work was supported by Innovate UK project 105694, "Digital Security
10  * by Design (DSbD) Technology Platform Prototype".
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  *	Manages physical address maps for ARM SMMUv3 and ARM Mali GPU.
39  */
40 
41 #include "opt_vm.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_radix.h>
57 
58 #include <machine/machdep.h>
59 
60 #include <arm64/iommu/iommu_pmap.h>
61 #include <arm64/iommu/iommu_pte.h>
62 
63 #define	IOMMU_PAGE_SIZE		4096
64 
65 #define	NL0PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
66 #define	NL1PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
67 #define	NL2PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
68 #define	NL3PG		(IOMMU_PAGE_SIZE/(sizeof (pt_entry_t)))
69 
70 #define	NUL0E		IOMMU_L0_ENTRIES
71 #define	NUL1E		(NUL0E * NL1PG)
72 #define	NUL2E		(NUL1E * NL2PG)
73 
74 #define	iommu_l0_pindex(v)	(NUL2E + NUL1E + ((v) >> IOMMU_L0_SHIFT))
75 #define	iommu_l1_pindex(v)	(NUL2E + ((v) >> IOMMU_L1_SHIFT))
76 #define	iommu_l2_pindex(v)	((v) >> IOMMU_L2_SHIFT)
77 
78 /* This code assumes all L1 DMAP entries will be used */
79 CTASSERT((DMAP_MIN_ADDRESS  & ~IOMMU_L0_OFFSET) == DMAP_MIN_ADDRESS);
80 CTASSERT((DMAP_MAX_ADDRESS  & ~IOMMU_L0_OFFSET) == DMAP_MAX_ADDRESS);
81 
82 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex);
83 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
84     struct spglist *free);
85 
86 /*
87  * These load the old table data and store the new value.
88  * They need to be atomic as the System MMU may write to the table at
89  * the same time as the CPU.
90  */
91 #define	pmap_load(table)		(*table)
92 #define	pmap_clear(table)		atomic_store_64(table, 0)
93 #define	pmap_store(table, entry)	atomic_store_64(table, entry)
94 
95 /********************/
96 /* Inline functions */
97 /********************/
98 
99 static __inline pd_entry_t *
100 pmap_l0(pmap_t pmap, vm_offset_t va)
101 {
102 
103 	return (&pmap->pm_l0[iommu_l0_index(va)]);
104 }
105 
106 static __inline pd_entry_t *
107 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
108 {
109 	pd_entry_t *l1;
110 
111 	l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
112 	return (&l1[iommu_l1_index(va)]);
113 }
114 
115 static __inline pd_entry_t *
116 pmap_l1(pmap_t pmap, vm_offset_t va)
117 {
118 	pd_entry_t *l0;
119 
120 	l0 = pmap_l0(pmap, va);
121 	if ((pmap_load(l0) & ATTR_DESCR_MASK) != IOMMU_L0_TABLE)
122 		return (NULL);
123 
124 	return (pmap_l0_to_l1(l0, va));
125 }
126 
127 static __inline pd_entry_t *
128 pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
129 {
130 	pd_entry_t l1, *l2p;
131 
132 	l1 = pmap_load(l1p);
133 
134 	/*
135 	 * The valid bit may be clear if pmap_update_entry() is concurrently
136 	 * modifying the entry, so for KVA only the entry type may be checked.
137 	 */
138 	KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0,
139 	    ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
140 	KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
141 	    ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
142 	l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK);
143 	return (&l2p[iommu_l2_index(va)]);
144 }
145 
146 static __inline pd_entry_t *
147 pmap_l2(pmap_t pmap, vm_offset_t va)
148 {
149 	pd_entry_t *l1;
150 
151 	l1 = pmap_l1(pmap, va);
152 	if ((pmap_load(l1) & ATTR_DESCR_MASK) != IOMMU_L1_TABLE)
153 		return (NULL);
154 
155 	return (pmap_l1_to_l2(l1, va));
156 }
157 
158 static __inline pt_entry_t *
159 pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
160 {
161 	pd_entry_t l2;
162 	pt_entry_t *l3p;
163 
164 	l2 = pmap_load(l2p);
165 
166 	/*
167 	 * The valid bit may be clear if pmap_update_entry() is concurrently
168 	 * modifying the entry, so for KVA only the entry type may be checked.
169 	 */
170 	KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0,
171 	    ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
172 	KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
173 	    ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
174 	l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK);
175 	return (&l3p[iommu_l3_index(va)]);
176 }
177 
178 /*
179  * Returns the lowest valid pde for a given virtual address.
180  * The next level may or may not point to a valid page or block.
181  */
182 static __inline pd_entry_t *
183 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
184 {
185 	pd_entry_t *l0, *l1, *l2, desc;
186 
187 	l0 = pmap_l0(pmap, va);
188 	desc = pmap_load(l0) & ATTR_DESCR_MASK;
189 	if (desc != IOMMU_L0_TABLE) {
190 		*level = -1;
191 		return (NULL);
192 	}
193 
194 	l1 = pmap_l0_to_l1(l0, va);
195 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
196 	if (desc != IOMMU_L1_TABLE) {
197 		*level = 0;
198 		return (l0);
199 	}
200 
201 	l2 = pmap_l1_to_l2(l1, va);
202 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
203 	if (desc != IOMMU_L2_TABLE) {
204 		*level = 1;
205 		return (l1);
206 	}
207 
208 	*level = 2;
209 	return (l2);
210 }
211 
212 /*
213  * Returns the lowest valid pte block or table entry for a given virtual
214  * address. If there are no valid entries return NULL and set the level to
215  * the first invalid level.
216  */
217 static __inline pt_entry_t *
218 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
219 {
220 	pd_entry_t *l1, *l2, desc;
221 	pt_entry_t *l3;
222 
223 	l1 = pmap_l1(pmap, va);
224 	if (l1 == NULL) {
225 		*level = 0;
226 		return (NULL);
227 	}
228 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
229 	if (desc == IOMMU_L1_BLOCK) {
230 		*level = 1;
231 		return (l1);
232 	}
233 
234 	if (desc != IOMMU_L1_TABLE) {
235 		*level = 1;
236 		return (NULL);
237 	}
238 
239 	l2 = pmap_l1_to_l2(l1, va);
240 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
241 	if (desc == IOMMU_L2_BLOCK) {
242 		*level = 2;
243 		return (l2);
244 	}
245 
246 	if (desc != IOMMU_L2_TABLE) {
247 		*level = 2;
248 		return (NULL);
249 	}
250 
251 	*level = 3;
252 	l3 = pmap_l2_to_l3(l2, va);
253 	if ((pmap_load(l3) & ATTR_DESCR_MASK) != IOMMU_L3_PAGE)
254 		return (NULL);
255 
256 	return (l3);
257 }
258 
259 static __inline int
260 pmap_l3_valid(pt_entry_t l3)
261 {
262 
263 	return ((l3 & ATTR_DESCR_MASK) == IOMMU_L3_PAGE);
264 }
265 
266 CTASSERT(IOMMU_L1_BLOCK == IOMMU_L2_BLOCK);
267 
268 static __inline void
269 pmap_resident_count_inc(pmap_t pmap, int count)
270 {
271 
272 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
273 	pmap->pm_stats.resident_count += count;
274 }
275 
276 static __inline void
277 pmap_resident_count_dec(pmap_t pmap, int count)
278 {
279 
280 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
281 	KASSERT(pmap->pm_stats.resident_count >= count,
282 	    ("pmap %p resident count underflow %ld %d", pmap,
283 	    pmap->pm_stats.resident_count, count));
284 	pmap->pm_stats.resident_count -= count;
285 }
286 
287 /***************************************************
288  * Page table page management routines.....
289  ***************************************************/
290 /*
291  * Schedule the specified unused page table page to be freed.  Specifically,
292  * add the page to the specified list of pages that will be released to the
293  * physical memory manager after the TLB has been updated.
294  */
295 static __inline void
296 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
297     boolean_t set_PG_ZERO)
298 {
299 
300 	if (set_PG_ZERO)
301 		m->flags |= PG_ZERO;
302 	else
303 		m->flags &= ~PG_ZERO;
304 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
305 }
306 
307 /***************************************************
308  * Low level mapping routines.....
309  ***************************************************/
310 
311 /*
312  * Decrements a page table page's reference count, which is used to record the
313  * number of valid page table entries within the page.  If the reference count
314  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
315  * page table page was unmapped and FALSE otherwise.
316  */
317 static inline boolean_t
318 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
319 {
320 
321 	--m->ref_count;
322 	if (m->ref_count == 0) {
323 		_pmap_unwire_l3(pmap, va, m, free);
324 		return (TRUE);
325 	} else
326 		return (FALSE);
327 }
328 
329 static void
330 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
331 {
332 
333 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
334 	/*
335 	 * unmap the page table page
336 	 */
337 	if (m->pindex >= (NUL2E + NUL1E)) {
338 		/* l1 page */
339 		pd_entry_t *l0;
340 
341 		l0 = pmap_l0(pmap, va);
342 		pmap_clear(l0);
343 	} else if (m->pindex >= NUL2E) {
344 		/* l2 page */
345 		pd_entry_t *l1;
346 
347 		l1 = pmap_l1(pmap, va);
348 		pmap_clear(l1);
349 	} else {
350 		/* l3 page */
351 		pd_entry_t *l2;
352 
353 		l2 = pmap_l2(pmap, va);
354 		pmap_clear(l2);
355 	}
356 	pmap_resident_count_dec(pmap, 1);
357 	if (m->pindex < NUL2E) {
358 		/* We just released an l3, unhold the matching l2 */
359 		pd_entry_t *l1, tl1;
360 		vm_page_t l2pg;
361 
362 		l1 = pmap_l1(pmap, va);
363 		tl1 = pmap_load(l1);
364 		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
365 		pmap_unwire_l3(pmap, va, l2pg, free);
366 	} else if (m->pindex < (NUL2E + NUL1E)) {
367 		/* We just released an l2, unhold the matching l1 */
368 		pd_entry_t *l0, tl0;
369 		vm_page_t l1pg;
370 
371 		l0 = pmap_l0(pmap, va);
372 		tl0 = pmap_load(l0);
373 		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
374 		pmap_unwire_l3(pmap, va, l1pg, free);
375 	}
376 
377 	/*
378 	 * Put page on a list so that it is released after
379 	 * *ALL* TLB shootdown is done
380 	 */
381 	pmap_add_delayed_free_list(m, free, TRUE);
382 }
383 
384 static int
385 iommu_pmap_pinit_levels(pmap_t pmap, int levels)
386 {
387 	vm_page_t m;
388 
389 	/*
390 	 * allocate the l0 page
391 	 */
392 	m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
393 	    VM_ALLOC_ZERO);
394 	pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m);
395 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
396 
397 	vm_radix_init(&pmap->pm_root);
398 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
399 
400 	MPASS(levels == 3 || levels == 4);
401 	pmap->pm_levels = levels;
402 
403 	/*
404 	 * Allocate the level 1 entry to use as the root. This will increase
405 	 * the refcount on the level 1 page so it won't be removed until
406 	 * pmap_release() is called.
407 	 */
408 	if (pmap->pm_levels == 3) {
409 		PMAP_LOCK(pmap);
410 		m = _pmap_alloc_l3(pmap, NUL2E + NUL1E);
411 		PMAP_UNLOCK(pmap);
412 	}
413 	pmap->pm_ttbr = VM_PAGE_TO_PHYS(m);
414 
415 	return (1);
416 }
417 
418 int
419 iommu_pmap_pinit(pmap_t pmap)
420 {
421 
422 	return (iommu_pmap_pinit_levels(pmap, 4));
423 }
424 
425 /*
426  * This routine is called if the desired page table page does not exist.
427  *
428  * If page table page allocation fails, this routine may sleep before
429  * returning NULL.  It sleeps only if a lock pointer was given.
430  *
431  * Note: If a page allocation fails at page table level two or three,
432  * one or two pages may be held during the wait, only to be released
433  * afterwards.  This conservative approach is easily argued to avoid
434  * race conditions.
435  */
436 static vm_page_t
437 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex)
438 {
439 	vm_page_t m, l1pg, l2pg;
440 
441 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
442 
443 	/*
444 	 * Allocate a page table page.
445 	 */
446 	if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
447 		/*
448 		 * Indicate the need to retry.  While waiting, the page table
449 		 * page may have been allocated.
450 		 */
451 		return (NULL);
452 	}
453 	m->pindex = ptepindex;
454 
455 	/*
456 	 * Because of AArch64's weak memory consistency model, we must have a
457 	 * barrier here to ensure that the stores for zeroing "m", whether by
458 	 * pmap_zero_page() or an earlier function, are visible before adding
459 	 * "m" to the page table.  Otherwise, a page table walk by another
460 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
461 	 * PTE within "m".
462 	 */
463 	dmb(ishst);
464 
465 	/*
466 	 * Map the pagetable page into the process address space, if
467 	 * it isn't already there.
468 	 */
469 
470 	if (ptepindex >= (NUL2E + NUL1E)) {
471 		pd_entry_t *l0;
472 		vm_pindex_t l0index;
473 
474 		l0index = ptepindex - (NUL2E + NUL1E);
475 		l0 = &pmap->pm_l0[l0index];
476 		pmap_store(l0, VM_PAGE_TO_PHYS(m) | IOMMU_L0_TABLE);
477 	} else if (ptepindex >= NUL2E) {
478 		vm_pindex_t l0index, l1index;
479 		pd_entry_t *l0, *l1;
480 		pd_entry_t tl0;
481 
482 		l1index = ptepindex - NUL2E;
483 		l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
484 
485 		l0 = &pmap->pm_l0[l0index];
486 		tl0 = pmap_load(l0);
487 		if (tl0 == 0) {
488 			/* recurse for allocating page dir */
489 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index)
490 			    == NULL) {
491 				vm_page_unwire_noq(m);
492 				vm_page_free_zero(m);
493 				return (NULL);
494 			}
495 		} else {
496 			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
497 			l1pg->ref_count++;
498 		}
499 
500 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
501 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
502 		pmap_store(l1, VM_PAGE_TO_PHYS(m) | IOMMU_L1_TABLE);
503 	} else {
504 		vm_pindex_t l0index, l1index;
505 		pd_entry_t *l0, *l1, *l2;
506 		pd_entry_t tl0, tl1;
507 
508 		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
509 		l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
510 
511 		l0 = &pmap->pm_l0[l0index];
512 		tl0 = pmap_load(l0);
513 		if (tl0 == 0) {
514 			/* recurse for allocating page dir */
515 			if (_pmap_alloc_l3(pmap, NUL2E + l1index) == NULL) {
516 				vm_page_unwire_noq(m);
517 				vm_page_free_zero(m);
518 				return (NULL);
519 			}
520 			tl0 = pmap_load(l0);
521 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
522 			l1 = &l1[l1index & Ln_ADDR_MASK];
523 		} else {
524 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
525 			l1 = &l1[l1index & Ln_ADDR_MASK];
526 			tl1 = pmap_load(l1);
527 			if (tl1 == 0) {
528 				/* recurse for allocating page dir */
529 				if (_pmap_alloc_l3(pmap, NUL2E + l1index)
530 				    == NULL) {
531 					vm_page_unwire_noq(m);
532 					vm_page_free_zero(m);
533 					return (NULL);
534 				}
535 			} else {
536 				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
537 				l2pg->ref_count++;
538 			}
539 		}
540 
541 		l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
542 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
543 		pmap_store(l2, VM_PAGE_TO_PHYS(m) | IOMMU_L2_TABLE);
544 	}
545 
546 	pmap_resident_count_inc(pmap, 1);
547 
548 	return (m);
549 }
550 
551 /***************************************************
552  * Pmap allocation/deallocation routines.
553  ***************************************************/
554 
555 /*
556  * Release any resources held by the given physical map.
557  * Called when a pmap initialized by pmap_pinit is being released.
558  * Should only be called if the map contains no valid mappings.
559  */
560 void
561 iommu_pmap_release(pmap_t pmap)
562 {
563 	boolean_t rv;
564 	struct spglist free;
565 	vm_page_t m;
566 
567 	if (pmap->pm_levels != 4) {
568 		KASSERT(pmap->pm_stats.resident_count == 1,
569 		    ("pmap_release: pmap resident count %ld != 0",
570 		    pmap->pm_stats.resident_count));
571 		KASSERT((pmap->pm_l0[0] & ATTR_DESCR_VALID) == ATTR_DESCR_VALID,
572 		    ("pmap_release: Invalid l0 entry: %lx", pmap->pm_l0[0]));
573 
574 		SLIST_INIT(&free);
575 		m = PHYS_TO_VM_PAGE(pmap->pm_ttbr);
576 		PMAP_LOCK(pmap);
577 		rv = pmap_unwire_l3(pmap, 0, m, &free);
578 		PMAP_UNLOCK(pmap);
579 		MPASS(rv == TRUE);
580 		vm_page_free_pages_toq(&free, true);
581 	}
582 
583 	KASSERT(pmap->pm_stats.resident_count == 0,
584 	    ("pmap_release: pmap resident count %ld != 0",
585 	    pmap->pm_stats.resident_count));
586 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
587 	    ("pmap_release: pmap has reserved page table page(s)"));
588 
589 	m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
590 	vm_page_unwire_noq(m);
591 	vm_page_free_zero(m);
592 }
593 
594 /***************************************************
595  * page management routines.
596  ***************************************************/
597 
598 /*
599  * Add a single Mali GPU entry. This function does not sleep.
600  */
601 int
602 pmap_gpu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
603     vm_prot_t prot, u_int flags)
604 {
605 	pd_entry_t *pde;
606 	pt_entry_t new_l3, orig_l3;
607 	pt_entry_t *l3;
608 	vm_page_t mpte;
609 	pd_entry_t *l1p;
610 	pd_entry_t *l2p;
611 	int lvl;
612 	int rv;
613 
614 	KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU"));
615 	KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
616 	KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
617 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
618 
619 	new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | IOMMU_L3_BLOCK);
620 
621 	if ((prot & VM_PROT_WRITE) != 0)
622 		new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
623 	if ((prot & VM_PROT_READ) != 0)
624 		new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
625 	if ((prot & VM_PROT_EXECUTE) == 0)
626 		new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL);
627 
628 	CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa);
629 
630 	PMAP_LOCK(pmap);
631 
632 	/*
633 	 * In the case that a page table page is not
634 	 * resident, we are creating it here.
635 	 */
636 retry:
637 	pde = pmap_pde(pmap, va, &lvl);
638 	if (pde != NULL && lvl == 2) {
639 		l3 = pmap_l2_to_l3(pde, va);
640 	} else {
641 		mpte = _pmap_alloc_l3(pmap, iommu_l2_pindex(va));
642 		if (mpte == NULL) {
643 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
644 			rv = KERN_RESOURCE_SHORTAGE;
645 			goto out;
646 		}
647 
648 		/*
649 		 * Ensure newly created l1, l2 are visible to GPU.
650 		 * l0 is already visible by similar call in panfrost driver.
651 		 * The cache entry for l3 handled below.
652 		 */
653 
654 		l1p = pmap_l1(pmap, va);
655 		l2p = pmap_l2(pmap, va);
656 		cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t));
657 		cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t));
658 
659 		goto retry;
660 	}
661 
662 	orig_l3 = pmap_load(l3);
663 	KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid"));
664 
665 	/* New mapping */
666 	pmap_store(l3, new_l3);
667 
668 	cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t));
669 
670 	pmap_resident_count_inc(pmap, 1);
671 	dsb(ishst);
672 
673 	rv = KERN_SUCCESS;
674 out:
675 	PMAP_UNLOCK(pmap);
676 
677 	return (rv);
678 }
679 
680 /*
681  * Remove a single Mali GPU entry.
682  */
683 int
684 pmap_gpu_remove(pmap_t pmap, vm_offset_t va)
685 {
686 	pd_entry_t *pde;
687 	pt_entry_t *pte;
688 	int lvl;
689 	int rc;
690 
691 	KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
692 	KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU"));
693 
694 	PMAP_LOCK(pmap);
695 
696 	pde = pmap_pde(pmap, va, &lvl);
697 	if (pde == NULL || lvl != 2) {
698 		rc = KERN_FAILURE;
699 		goto out;
700 	}
701 
702 	pte = pmap_l2_to_l3(pde, va);
703 
704 	pmap_resident_count_dec(pmap, 1);
705 	pmap_clear(pte);
706 	cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t));
707 	rc = KERN_SUCCESS;
708 
709 out:
710 	PMAP_UNLOCK(pmap);
711 
712 	return (rc);
713 }
714 
715 /*
716  * Add a single SMMU entry. This function does not sleep.
717  */
718 int
719 pmap_smmu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
720     vm_prot_t prot, u_int flags)
721 {
722 	pd_entry_t *pde;
723 	pt_entry_t new_l3, orig_l3;
724 	pt_entry_t *l3;
725 	vm_page_t mpte;
726 	int lvl;
727 	int rv;
728 
729 	KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
730 
731 	va = trunc_page(va);
732 	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
733 	    ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE);
734 	if ((prot & VM_PROT_WRITE) == 0)
735 		new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
736 	new_l3 |= ATTR_S1_XN; /* Execute never. */
737 	new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER);
738 	new_l3 |= ATTR_S1_nG; /* Non global. */
739 
740 	CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa);
741 
742 	PMAP_LOCK(pmap);
743 
744 	/*
745 	 * In the case that a page table page is not
746 	 * resident, we are creating it here.
747 	 */
748 retry:
749 	pde = pmap_pde(pmap, va, &lvl);
750 	if (pde != NULL && lvl == 2) {
751 		l3 = pmap_l2_to_l3(pde, va);
752 	} else {
753 		mpte = _pmap_alloc_l3(pmap, iommu_l2_pindex(va));
754 		if (mpte == NULL) {
755 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
756 			rv = KERN_RESOURCE_SHORTAGE;
757 			goto out;
758 		}
759 		goto retry;
760 	}
761 
762 	orig_l3 = pmap_load(l3);
763 	KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid"));
764 
765 	/* New mapping */
766 	pmap_store(l3, new_l3);
767 	pmap_resident_count_inc(pmap, 1);
768 	dsb(ishst);
769 
770 	rv = KERN_SUCCESS;
771 out:
772 	PMAP_UNLOCK(pmap);
773 
774 	return (rv);
775 }
776 
777 /*
778  * Remove a single SMMU entry.
779  */
780 int
781 pmap_smmu_remove(pmap_t pmap, vm_offset_t va)
782 {
783 	pt_entry_t *pte;
784 	int lvl;
785 	int rc;
786 
787 	PMAP_LOCK(pmap);
788 
789 	pte = pmap_pte(pmap, va, &lvl);
790 	KASSERT(lvl == 3,
791 	    ("Invalid SMMU pagetable level: %d != 3", lvl));
792 
793 	if (pte != NULL) {
794 		pmap_resident_count_dec(pmap, 1);
795 		pmap_clear(pte);
796 		rc = KERN_SUCCESS;
797 	} else
798 		rc = KERN_FAILURE;
799 
800 	PMAP_UNLOCK(pmap);
801 
802 	return (rc);
803 }
804 
805 /*
806  * Remove all the allocated L1, L2 pages from SMMU pmap.
807  * All the L3 entires must be cleared in advance, otherwise
808  * this function panics.
809  */
810 void
811 iommu_pmap_remove_pages(pmap_t pmap)
812 {
813 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
814 	pt_entry_t *l3, l3e;
815 	vm_page_t m, m0, m1;
816 	vm_offset_t sva;
817 	vm_paddr_t pa;
818 	vm_paddr_t pa0;
819 	vm_paddr_t pa1;
820 	int i, j, k, l;
821 
822 	PMAP_LOCK(pmap);
823 
824 	for (sva = VM_MINUSER_ADDRESS, i = iommu_l0_index(sva);
825 	    (i < Ln_ENTRIES && sva < VM_MAXUSER_ADDRESS); i++) {
826 		l0e = pmap->pm_l0[i];
827 		if ((l0e & ATTR_DESCR_VALID) == 0) {
828 			sva += IOMMU_L0_SIZE;
829 			continue;
830 		}
831 		pa0 = l0e & ~ATTR_MASK;
832 		m0 = PHYS_TO_VM_PAGE(pa0);
833 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0);
834 
835 		for (j = iommu_l1_index(sva); j < Ln_ENTRIES; j++) {
836 			l1e = l1[j];
837 			if ((l1e & ATTR_DESCR_VALID) == 0) {
838 				sva += IOMMU_L1_SIZE;
839 				continue;
840 			}
841 			if ((l1e & ATTR_DESCR_MASK) == IOMMU_L1_BLOCK) {
842 				sva += IOMMU_L1_SIZE;
843 				continue;
844 			}
845 			pa1 = l1e & ~ATTR_MASK;
846 			m1 = PHYS_TO_VM_PAGE(pa1);
847 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1);
848 
849 			for (k = iommu_l2_index(sva); k < Ln_ENTRIES; k++) {
850 				l2e = l2[k];
851 				if ((l2e & ATTR_DESCR_VALID) == 0) {
852 					sva += IOMMU_L2_SIZE;
853 					continue;
854 				}
855 				pa = l2e & ~ATTR_MASK;
856 				m = PHYS_TO_VM_PAGE(pa);
857 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
858 
859 				for (l = iommu_l3_index(sva); l < Ln_ENTRIES;
860 				    l++, sva += IOMMU_L3_SIZE) {
861 					l3e = l3[l];
862 					if ((l3e & ATTR_DESCR_VALID) == 0)
863 						continue;
864 					panic("%s: l3e found for va %jx\n",
865 					    __func__, sva);
866 				}
867 
868 				vm_page_unwire_noq(m1);
869 				vm_page_unwire_noq(m);
870 				pmap_resident_count_dec(pmap, 1);
871 				vm_page_free(m);
872 				pmap_clear(&l2[k]);
873 			}
874 
875 			vm_page_unwire_noq(m0);
876 			pmap_resident_count_dec(pmap, 1);
877 			vm_page_free(m1);
878 			pmap_clear(&l1[j]);
879 		}
880 
881 		pmap_resident_count_dec(pmap, 1);
882 		vm_page_free(m0);
883 		pmap_clear(&pmap->pm_l0[i]);
884 	}
885 
886 	KASSERT(pmap->pm_stats.resident_count == 0,
887 	    ("Invalid resident count %jd", pmap->pm_stats.resident_count));
888 
889 	PMAP_UNLOCK(pmap);
890 }
891