xref: /freebsd/sys/arm64/iommu/iommu_pmap.c (revision 685dc743)
1f17c4e38SRuslan Bukin /*-
2f17c4e38SRuslan Bukin  * SPDX-License-Identifier: BSD-2-Clause
3f17c4e38SRuslan Bukin  *
4f17c4e38SRuslan Bukin  * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com>
5f17c4e38SRuslan Bukin  * Copyright (c) 2014-2021 Andrew Turner
6f17c4e38SRuslan Bukin  * Copyright (c) 2014-2016 The FreeBSD Foundation
7f17c4e38SRuslan Bukin  * All rights reserved.
8f17c4e38SRuslan Bukin  *
9f17c4e38SRuslan Bukin  * This work was supported by Innovate UK project 105694, "Digital Security
10f17c4e38SRuslan Bukin  * by Design (DSbD) Technology Platform Prototype".
11f17c4e38SRuslan Bukin  *
12f17c4e38SRuslan Bukin  * Redistribution and use in source and binary forms, with or without
13f17c4e38SRuslan Bukin  * modification, are permitted provided that the following conditions
14f17c4e38SRuslan Bukin  * are met:
15f17c4e38SRuslan Bukin  * 1. Redistributions of source code must retain the above copyright
16f17c4e38SRuslan Bukin  *    notice, this list of conditions and the following disclaimer.
17f17c4e38SRuslan Bukin  * 2. Redistributions in binary form must reproduce the above copyright
18f17c4e38SRuslan Bukin  *    notice, this list of conditions and the following disclaimer in the
19f17c4e38SRuslan Bukin  *    documentation and/or other materials provided with the distribution.
20f17c4e38SRuslan Bukin  *
21f17c4e38SRuslan Bukin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22f17c4e38SRuslan Bukin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23f17c4e38SRuslan Bukin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24f17c4e38SRuslan Bukin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25f17c4e38SRuslan Bukin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26f17c4e38SRuslan Bukin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27f17c4e38SRuslan Bukin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28f17c4e38SRuslan Bukin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29f17c4e38SRuslan Bukin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30f17c4e38SRuslan Bukin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31f17c4e38SRuslan Bukin  * SUCH DAMAGE.
32f17c4e38SRuslan Bukin  */
33f17c4e38SRuslan Bukin 
34f17c4e38SRuslan Bukin #include <sys/cdefs.h>
35f17c4e38SRuslan Bukin /*
36f17c4e38SRuslan Bukin  *	Manages physical address maps for ARM SMMUv3 and ARM Mali GPU.
37f17c4e38SRuslan Bukin  */
38f17c4e38SRuslan Bukin 
39f17c4e38SRuslan Bukin #include "opt_vm.h"
40f17c4e38SRuslan Bukin 
41f17c4e38SRuslan Bukin #include <sys/param.h>
42ea07ba11SJesper Schmitz Mouridsen #include <sys/systm.h>
43f17c4e38SRuslan Bukin #include <sys/ktr.h>
44ea07ba11SJesper Schmitz Mouridsen #include <sys/lock.h>
45f17c4e38SRuslan Bukin #include <sys/mutex.h>
46f17c4e38SRuslan Bukin #include <sys/rwlock.h>
47f17c4e38SRuslan Bukin 
48f17c4e38SRuslan Bukin #include <vm/vm.h>
49f17c4e38SRuslan Bukin #include <vm/vm_param.h>
50f17c4e38SRuslan Bukin #include <vm/vm_page.h>
51f17c4e38SRuslan Bukin #include <vm/vm_map.h>
52f17c4e38SRuslan Bukin #include <vm/vm_object.h>
53f17c4e38SRuslan Bukin #include <vm/vm_pageout.h>
54f17c4e38SRuslan Bukin #include <vm/vm_radix.h>
55f17c4e38SRuslan Bukin 
56f17c4e38SRuslan Bukin #include <machine/machdep.h>
57f17c4e38SRuslan Bukin 
58f17c4e38SRuslan Bukin #include <arm64/iommu/iommu_pmap.h>
59f17c4e38SRuslan Bukin #include <arm64/iommu/iommu_pte.h>
60f17c4e38SRuslan Bukin 
61f17c4e38SRuslan Bukin #define	IOMMU_PAGE_SIZE		4096
62f17c4e38SRuslan Bukin 
63b97e94d9SAndrew Turner #define	SMMU_PMAP_LOCK(pmap)	mtx_lock(&(pmap)->sp_mtx)
64b97e94d9SAndrew Turner #define	SMMU_PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->sp_mtx)
65b97e94d9SAndrew Turner #define	SMMU_PMAP_LOCK_ASSERT(pmap, type) \
66b97e94d9SAndrew Turner     mtx_assert(&(pmap)->sp_mtx, (type))
67b97e94d9SAndrew Turner 
68f17c4e38SRuslan Bukin #define	NL0PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
69f17c4e38SRuslan Bukin #define	NL1PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
70f17c4e38SRuslan Bukin #define	NL2PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
71f17c4e38SRuslan Bukin #define	NL3PG		(IOMMU_PAGE_SIZE/(sizeof (pt_entry_t)))
72f17c4e38SRuslan Bukin 
73f17c4e38SRuslan Bukin #define	NUL0E		IOMMU_L0_ENTRIES
74f17c4e38SRuslan Bukin #define	NUL1E		(NUL0E * NL1PG)
75f17c4e38SRuslan Bukin #define	NUL2E		(NUL1E * NL2PG)
76f17c4e38SRuslan Bukin 
7783fb1bdbSAndrew Turner #define	smmu_l0_pindex(v)	(NUL2E + NUL1E + ((v) >> IOMMU_L0_SHIFT))
7883fb1bdbSAndrew Turner #define	smmu_l1_pindex(v)	(NUL2E + ((v) >> IOMMU_L1_SHIFT))
7983fb1bdbSAndrew Turner #define	smmu_l2_pindex(v)	((v) >> IOMMU_L2_SHIFT)
8083fb1bdbSAndrew Turner 
8183fb1bdbSAndrew Turner #define	smmu_l0_index(va)	(((va) >> IOMMU_L0_SHIFT) & IOMMU_L0_ADDR_MASK)
8283fb1bdbSAndrew Turner #define	smmu_l1_index(va)	(((va) >> IOMMU_L1_SHIFT) & IOMMU_Ln_ADDR_MASK)
8383fb1bdbSAndrew Turner #define	smmu_l2_index(va)	(((va) >> IOMMU_L2_SHIFT) & IOMMU_Ln_ADDR_MASK)
8483fb1bdbSAndrew Turner #define	smmu_l3_index(va)	(((va) >> IOMMU_L3_SHIFT) & IOMMU_Ln_ADDR_MASK)
85f17c4e38SRuslan Bukin 
86b97e94d9SAndrew Turner static vm_page_t _pmap_alloc_l3(struct smmu_pmap *pmap, vm_pindex_t ptepindex);
87b97e94d9SAndrew Turner static void _smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va,
88b97e94d9SAndrew Turner     vm_page_t m, struct spglist *free);
89f17c4e38SRuslan Bukin 
90f17c4e38SRuslan Bukin /*
91f17c4e38SRuslan Bukin  * These load the old table data and store the new value.
92f17c4e38SRuslan Bukin  * They need to be atomic as the System MMU may write to the table at
93f17c4e38SRuslan Bukin  * the same time as the CPU.
94f17c4e38SRuslan Bukin  */
9583fb1bdbSAndrew Turner #define	smmu_pmap_load(table)		(*table)
9683fb1bdbSAndrew Turner #define	smmu_pmap_clear(table)		atomic_store_64(table, 0)
9783fb1bdbSAndrew Turner #define	smmu_pmap_store(table, entry)	atomic_store_64(table, entry)
98f17c4e38SRuslan Bukin 
99f17c4e38SRuslan Bukin /********************/
100f17c4e38SRuslan Bukin /* Inline functions */
101f17c4e38SRuslan Bukin /********************/
102f17c4e38SRuslan Bukin 
103f17c4e38SRuslan Bukin static __inline pd_entry_t *
smmu_pmap_l0(struct smmu_pmap * pmap,vm_offset_t va)104b97e94d9SAndrew Turner smmu_pmap_l0(struct smmu_pmap *pmap, vm_offset_t va)
105f17c4e38SRuslan Bukin {
106f17c4e38SRuslan Bukin 
107b97e94d9SAndrew Turner 	return (&pmap->sp_l0[smmu_l0_index(va)]);
108f17c4e38SRuslan Bukin }
109f17c4e38SRuslan Bukin 
110f17c4e38SRuslan Bukin static __inline pd_entry_t *
smmu_pmap_l0_to_l1(pd_entry_t * l0,vm_offset_t va)11183fb1bdbSAndrew Turner smmu_pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
112f17c4e38SRuslan Bukin {
113f17c4e38SRuslan Bukin 	pd_entry_t *l1;
114f17c4e38SRuslan Bukin 
11583fb1bdbSAndrew Turner 	l1 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l0) & ~ATTR_MASK);
11683fb1bdbSAndrew Turner 	return (&l1[smmu_l1_index(va)]);
117f17c4e38SRuslan Bukin }
118f17c4e38SRuslan Bukin 
119f17c4e38SRuslan Bukin static __inline pd_entry_t *
smmu_pmap_l1(struct smmu_pmap * pmap,vm_offset_t va)120b97e94d9SAndrew Turner smmu_pmap_l1(struct smmu_pmap *pmap, vm_offset_t va)
121f17c4e38SRuslan Bukin {
122f17c4e38SRuslan Bukin 	pd_entry_t *l0;
123f17c4e38SRuslan Bukin 
12483fb1bdbSAndrew Turner 	l0 = smmu_pmap_l0(pmap, va);
12583fb1bdbSAndrew Turner 	if ((smmu_pmap_load(l0) & ATTR_DESCR_MASK) != IOMMU_L0_TABLE)
126f17c4e38SRuslan Bukin 		return (NULL);
127f17c4e38SRuslan Bukin 
12883fb1bdbSAndrew Turner 	return (smmu_pmap_l0_to_l1(l0, va));
129f17c4e38SRuslan Bukin }
130f17c4e38SRuslan Bukin 
131f17c4e38SRuslan Bukin static __inline pd_entry_t *
smmu_pmap_l1_to_l2(pd_entry_t * l1p,vm_offset_t va)13283fb1bdbSAndrew Turner smmu_pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
133f17c4e38SRuslan Bukin {
134f17c4e38SRuslan Bukin 	pd_entry_t l1, *l2p;
135f17c4e38SRuslan Bukin 
13683fb1bdbSAndrew Turner 	l1 = smmu_pmap_load(l1p);
137f17c4e38SRuslan Bukin 
138f17c4e38SRuslan Bukin 	/*
139f17c4e38SRuslan Bukin 	 * The valid bit may be clear if pmap_update_entry() is concurrently
140f17c4e38SRuslan Bukin 	 * modifying the entry, so for KVA only the entry type may be checked.
141f17c4e38SRuslan Bukin 	 */
142f17c4e38SRuslan Bukin 	KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0,
143f17c4e38SRuslan Bukin 	    ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
144f17c4e38SRuslan Bukin 	KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
145f17c4e38SRuslan Bukin 	    ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
146f17c4e38SRuslan Bukin 	l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK);
14783fb1bdbSAndrew Turner 	return (&l2p[smmu_l2_index(va)]);
148f17c4e38SRuslan Bukin }
149f17c4e38SRuslan Bukin 
150f17c4e38SRuslan Bukin static __inline pd_entry_t *
smmu_pmap_l2(struct smmu_pmap * pmap,vm_offset_t va)151b97e94d9SAndrew Turner smmu_pmap_l2(struct smmu_pmap *pmap, vm_offset_t va)
152f17c4e38SRuslan Bukin {
153f17c4e38SRuslan Bukin 	pd_entry_t *l1;
154f17c4e38SRuslan Bukin 
15583fb1bdbSAndrew Turner 	l1 = smmu_pmap_l1(pmap, va);
15683fb1bdbSAndrew Turner 	if ((smmu_pmap_load(l1) & ATTR_DESCR_MASK) != IOMMU_L1_TABLE)
157f17c4e38SRuslan Bukin 		return (NULL);
158f17c4e38SRuslan Bukin 
15983fb1bdbSAndrew Turner 	return (smmu_pmap_l1_to_l2(l1, va));
160f17c4e38SRuslan Bukin }
161f17c4e38SRuslan Bukin 
162f17c4e38SRuslan Bukin static __inline pt_entry_t *
smmu_pmap_l2_to_l3(pd_entry_t * l2p,vm_offset_t va)16383fb1bdbSAndrew Turner smmu_pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
164f17c4e38SRuslan Bukin {
165f17c4e38SRuslan Bukin 	pd_entry_t l2;
166f17c4e38SRuslan Bukin 	pt_entry_t *l3p;
167f17c4e38SRuslan Bukin 
16883fb1bdbSAndrew Turner 	l2 = smmu_pmap_load(l2p);
169f17c4e38SRuslan Bukin 
170f17c4e38SRuslan Bukin 	/*
171f17c4e38SRuslan Bukin 	 * The valid bit may be clear if pmap_update_entry() is concurrently
172f17c4e38SRuslan Bukin 	 * modifying the entry, so for KVA only the entry type may be checked.
173f17c4e38SRuslan Bukin 	 */
174f17c4e38SRuslan Bukin 	KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0,
175f17c4e38SRuslan Bukin 	    ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
176f17c4e38SRuslan Bukin 	KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
177f17c4e38SRuslan Bukin 	    ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
178f17c4e38SRuslan Bukin 	l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK);
17983fb1bdbSAndrew Turner 	return (&l3p[smmu_l3_index(va)]);
180f17c4e38SRuslan Bukin }
181f17c4e38SRuslan Bukin 
182f17c4e38SRuslan Bukin /*
183f17c4e38SRuslan Bukin  * Returns the lowest valid pde for a given virtual address.
184f17c4e38SRuslan Bukin  * The next level may or may not point to a valid page or block.
185f17c4e38SRuslan Bukin  */
186f17c4e38SRuslan Bukin static __inline pd_entry_t *
smmu_pmap_pde(struct smmu_pmap * pmap,vm_offset_t va,int * level)187b97e94d9SAndrew Turner smmu_pmap_pde(struct smmu_pmap *pmap, vm_offset_t va, int *level)
188f17c4e38SRuslan Bukin {
189f17c4e38SRuslan Bukin 	pd_entry_t *l0, *l1, *l2, desc;
190f17c4e38SRuslan Bukin 
19183fb1bdbSAndrew Turner 	l0 = smmu_pmap_l0(pmap, va);
19283fb1bdbSAndrew Turner 	desc = smmu_pmap_load(l0) & ATTR_DESCR_MASK;
193f17c4e38SRuslan Bukin 	if (desc != IOMMU_L0_TABLE) {
194f17c4e38SRuslan Bukin 		*level = -1;
195f17c4e38SRuslan Bukin 		return (NULL);
196f17c4e38SRuslan Bukin 	}
197f17c4e38SRuslan Bukin 
19883fb1bdbSAndrew Turner 	l1 = smmu_pmap_l0_to_l1(l0, va);
19983fb1bdbSAndrew Turner 	desc = smmu_pmap_load(l1) & ATTR_DESCR_MASK;
200f17c4e38SRuslan Bukin 	if (desc != IOMMU_L1_TABLE) {
201f17c4e38SRuslan Bukin 		*level = 0;
202f17c4e38SRuslan Bukin 		return (l0);
203f17c4e38SRuslan Bukin 	}
204f17c4e38SRuslan Bukin 
20583fb1bdbSAndrew Turner 	l2 = smmu_pmap_l1_to_l2(l1, va);
20683fb1bdbSAndrew Turner 	desc = smmu_pmap_load(l2) & ATTR_DESCR_MASK;
207f17c4e38SRuslan Bukin 	if (desc != IOMMU_L2_TABLE) {
208f17c4e38SRuslan Bukin 		*level = 1;
209f17c4e38SRuslan Bukin 		return (l1);
210f17c4e38SRuslan Bukin 	}
211f17c4e38SRuslan Bukin 
212f17c4e38SRuslan Bukin 	*level = 2;
213f17c4e38SRuslan Bukin 	return (l2);
214f17c4e38SRuslan Bukin }
215f17c4e38SRuslan Bukin 
216f17c4e38SRuslan Bukin /*
217f17c4e38SRuslan Bukin  * Returns the lowest valid pte block or table entry for a given virtual
218f17c4e38SRuslan Bukin  * address. If there are no valid entries return NULL and set the level to
219f17c4e38SRuslan Bukin  * the first invalid level.
220f17c4e38SRuslan Bukin  */
221f17c4e38SRuslan Bukin static __inline pt_entry_t *
smmu_pmap_pte(struct smmu_pmap * pmap,vm_offset_t va,int * level)222b97e94d9SAndrew Turner smmu_pmap_pte(struct smmu_pmap *pmap, vm_offset_t va, int *level)
223f17c4e38SRuslan Bukin {
224f17c4e38SRuslan Bukin 	pd_entry_t *l1, *l2, desc;
225f17c4e38SRuslan Bukin 	pt_entry_t *l3;
226f17c4e38SRuslan Bukin 
22783fb1bdbSAndrew Turner 	l1 = smmu_pmap_l1(pmap, va);
228f17c4e38SRuslan Bukin 	if (l1 == NULL) {
229f17c4e38SRuslan Bukin 		*level = 0;
230f17c4e38SRuslan Bukin 		return (NULL);
231f17c4e38SRuslan Bukin 	}
23283fb1bdbSAndrew Turner 	desc = smmu_pmap_load(l1) & ATTR_DESCR_MASK;
233f17c4e38SRuslan Bukin 	if (desc == IOMMU_L1_BLOCK) {
234f17c4e38SRuslan Bukin 		*level = 1;
235f17c4e38SRuslan Bukin 		return (l1);
236f17c4e38SRuslan Bukin 	}
237f17c4e38SRuslan Bukin 
238f17c4e38SRuslan Bukin 	if (desc != IOMMU_L1_TABLE) {
239f17c4e38SRuslan Bukin 		*level = 1;
240f17c4e38SRuslan Bukin 		return (NULL);
241f17c4e38SRuslan Bukin 	}
242f17c4e38SRuslan Bukin 
24383fb1bdbSAndrew Turner 	l2 = smmu_pmap_l1_to_l2(l1, va);
24483fb1bdbSAndrew Turner 	desc = smmu_pmap_load(l2) & ATTR_DESCR_MASK;
245f17c4e38SRuslan Bukin 	if (desc == IOMMU_L2_BLOCK) {
246f17c4e38SRuslan Bukin 		*level = 2;
247f17c4e38SRuslan Bukin 		return (l2);
248f17c4e38SRuslan Bukin 	}
249f17c4e38SRuslan Bukin 
250f17c4e38SRuslan Bukin 	if (desc != IOMMU_L2_TABLE) {
251f17c4e38SRuslan Bukin 		*level = 2;
252f17c4e38SRuslan Bukin 		return (NULL);
253f17c4e38SRuslan Bukin 	}
254f17c4e38SRuslan Bukin 
255f17c4e38SRuslan Bukin 	*level = 3;
25683fb1bdbSAndrew Turner 	l3 = smmu_pmap_l2_to_l3(l2, va);
25783fb1bdbSAndrew Turner 	if ((smmu_pmap_load(l3) & ATTR_DESCR_MASK) != IOMMU_L3_PAGE)
258f17c4e38SRuslan Bukin 		return (NULL);
259f17c4e38SRuslan Bukin 
260f17c4e38SRuslan Bukin 	return (l3);
261f17c4e38SRuslan Bukin }
262f17c4e38SRuslan Bukin 
263f17c4e38SRuslan Bukin static __inline int
smmu_pmap_l3_valid(pt_entry_t l3)26483fb1bdbSAndrew Turner smmu_pmap_l3_valid(pt_entry_t l3)
265f17c4e38SRuslan Bukin {
266f17c4e38SRuslan Bukin 
267f17c4e38SRuslan Bukin 	return ((l3 & ATTR_DESCR_MASK) == IOMMU_L3_PAGE);
268f17c4e38SRuslan Bukin }
269f17c4e38SRuslan Bukin 
270f17c4e38SRuslan Bukin CTASSERT(IOMMU_L1_BLOCK == IOMMU_L2_BLOCK);
271f17c4e38SRuslan Bukin 
272b97e94d9SAndrew Turner #ifdef INVARIANTS
273f17c4e38SRuslan Bukin static __inline void
smmu_pmap_resident_count_inc(struct smmu_pmap * pmap,int count)274b97e94d9SAndrew Turner smmu_pmap_resident_count_inc(struct smmu_pmap *pmap, int count)
275f17c4e38SRuslan Bukin {
276f17c4e38SRuslan Bukin 
277b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED);
278b97e94d9SAndrew Turner 	pmap->sp_resident_count += count;
279f17c4e38SRuslan Bukin }
280f17c4e38SRuslan Bukin 
281f17c4e38SRuslan Bukin static __inline void
smmu_pmap_resident_count_dec(struct smmu_pmap * pmap,int count)282b97e94d9SAndrew Turner smmu_pmap_resident_count_dec(struct smmu_pmap *pmap, int count)
283f17c4e38SRuslan Bukin {
284f17c4e38SRuslan Bukin 
285b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED);
286b97e94d9SAndrew Turner 	KASSERT(pmap->sp_resident_count >= count,
287f17c4e38SRuslan Bukin 	    ("pmap %p resident count underflow %ld %d", pmap,
288b97e94d9SAndrew Turner 	    pmap->sp_resident_count, count));
289b97e94d9SAndrew Turner 	pmap->sp_resident_count -= count;
290f17c4e38SRuslan Bukin }
291b97e94d9SAndrew Turner #else
292b97e94d9SAndrew Turner static __inline void
smmu_pmap_resident_count_inc(struct smmu_pmap * pmap,int count)293b97e94d9SAndrew Turner smmu_pmap_resident_count_inc(struct smmu_pmap *pmap, int count)
294b97e94d9SAndrew Turner {
295b97e94d9SAndrew Turner }
296b97e94d9SAndrew Turner 
297b97e94d9SAndrew Turner static __inline void
smmu_pmap_resident_count_dec(struct smmu_pmap * pmap,int count)298b97e94d9SAndrew Turner smmu_pmap_resident_count_dec(struct smmu_pmap *pmap, int count)
299b97e94d9SAndrew Turner {
300b97e94d9SAndrew Turner }
301b97e94d9SAndrew Turner #endif
302f17c4e38SRuslan Bukin 
303f17c4e38SRuslan Bukin /***************************************************
304f17c4e38SRuslan Bukin  * Page table page management routines.....
305f17c4e38SRuslan Bukin  ***************************************************/
306f17c4e38SRuslan Bukin /*
307f17c4e38SRuslan Bukin  * Schedule the specified unused page table page to be freed.  Specifically,
308f17c4e38SRuslan Bukin  * add the page to the specified list of pages that will be released to the
309f17c4e38SRuslan Bukin  * physical memory manager after the TLB has been updated.
310f17c4e38SRuslan Bukin  */
311f17c4e38SRuslan Bukin static __inline void
smmu_pmap_add_delayed_free_list(vm_page_t m,struct spglist * free,boolean_t set_PG_ZERO)31283fb1bdbSAndrew Turner smmu_pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
313f17c4e38SRuslan Bukin     boolean_t set_PG_ZERO)
314f17c4e38SRuslan Bukin {
315f17c4e38SRuslan Bukin 
316f17c4e38SRuslan Bukin 	if (set_PG_ZERO)
317f17c4e38SRuslan Bukin 		m->flags |= PG_ZERO;
318f17c4e38SRuslan Bukin 	else
319f17c4e38SRuslan Bukin 		m->flags &= ~PG_ZERO;
320f17c4e38SRuslan Bukin 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
321f17c4e38SRuslan Bukin }
322f17c4e38SRuslan Bukin 
323f17c4e38SRuslan Bukin /***************************************************
324f17c4e38SRuslan Bukin  * Low level mapping routines.....
325f17c4e38SRuslan Bukin  ***************************************************/
326f17c4e38SRuslan Bukin 
327f17c4e38SRuslan Bukin /*
328f17c4e38SRuslan Bukin  * Decrements a page table page's reference count, which is used to record the
329f17c4e38SRuslan Bukin  * number of valid page table entries within the page.  If the reference count
330f17c4e38SRuslan Bukin  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
331f17c4e38SRuslan Bukin  * page table page was unmapped and FALSE otherwise.
332f17c4e38SRuslan Bukin  */
333f17c4e38SRuslan Bukin static inline boolean_t
smmu_pmap_unwire_l3(struct smmu_pmap * pmap,vm_offset_t va,vm_page_t m,struct spglist * free)334b97e94d9SAndrew Turner smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m,
33583fb1bdbSAndrew Turner     struct spglist *free)
336f17c4e38SRuslan Bukin {
337f17c4e38SRuslan Bukin 
338f17c4e38SRuslan Bukin 	--m->ref_count;
339f17c4e38SRuslan Bukin 	if (m->ref_count == 0) {
34083fb1bdbSAndrew Turner 		_smmu_pmap_unwire_l3(pmap, va, m, free);
341f17c4e38SRuslan Bukin 		return (TRUE);
342f17c4e38SRuslan Bukin 	} else
343f17c4e38SRuslan Bukin 		return (FALSE);
344f17c4e38SRuslan Bukin }
345f17c4e38SRuslan Bukin 
346f17c4e38SRuslan Bukin static void
_smmu_pmap_unwire_l3(struct smmu_pmap * pmap,vm_offset_t va,vm_page_t m,struct spglist * free)347b97e94d9SAndrew Turner _smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m,
34883fb1bdbSAndrew Turner     struct spglist *free)
349f17c4e38SRuslan Bukin {
350f17c4e38SRuslan Bukin 
351b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED);
352f17c4e38SRuslan Bukin 	/*
353f17c4e38SRuslan Bukin 	 * unmap the page table page
354f17c4e38SRuslan Bukin 	 */
355f17c4e38SRuslan Bukin 	if (m->pindex >= (NUL2E + NUL1E)) {
356f17c4e38SRuslan Bukin 		/* l1 page */
357f17c4e38SRuslan Bukin 		pd_entry_t *l0;
358f17c4e38SRuslan Bukin 
35983fb1bdbSAndrew Turner 		l0 = smmu_pmap_l0(pmap, va);
36083fb1bdbSAndrew Turner 		smmu_pmap_clear(l0);
361f17c4e38SRuslan Bukin 	} else if (m->pindex >= NUL2E) {
362f17c4e38SRuslan Bukin 		/* l2 page */
363f17c4e38SRuslan Bukin 		pd_entry_t *l1;
364f17c4e38SRuslan Bukin 
36583fb1bdbSAndrew Turner 		l1 = smmu_pmap_l1(pmap, va);
36683fb1bdbSAndrew Turner 		smmu_pmap_clear(l1);
367f17c4e38SRuslan Bukin 	} else {
368f17c4e38SRuslan Bukin 		/* l3 page */
369f17c4e38SRuslan Bukin 		pd_entry_t *l2;
370f17c4e38SRuslan Bukin 
37183fb1bdbSAndrew Turner 		l2 = smmu_pmap_l2(pmap, va);
37283fb1bdbSAndrew Turner 		smmu_pmap_clear(l2);
373f17c4e38SRuslan Bukin 	}
37483fb1bdbSAndrew Turner 	smmu_pmap_resident_count_dec(pmap, 1);
375f17c4e38SRuslan Bukin 	if (m->pindex < NUL2E) {
376f17c4e38SRuslan Bukin 		/* We just released an l3, unhold the matching l2 */
377f17c4e38SRuslan Bukin 		pd_entry_t *l1, tl1;
378f17c4e38SRuslan Bukin 		vm_page_t l2pg;
379f17c4e38SRuslan Bukin 
38083fb1bdbSAndrew Turner 		l1 = smmu_pmap_l1(pmap, va);
38183fb1bdbSAndrew Turner 		tl1 = smmu_pmap_load(l1);
382f17c4e38SRuslan Bukin 		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
38383fb1bdbSAndrew Turner 		smmu_pmap_unwire_l3(pmap, va, l2pg, free);
384f17c4e38SRuslan Bukin 	} else if (m->pindex < (NUL2E + NUL1E)) {
385f17c4e38SRuslan Bukin 		/* We just released an l2, unhold the matching l1 */
386f17c4e38SRuslan Bukin 		pd_entry_t *l0, tl0;
387f17c4e38SRuslan Bukin 		vm_page_t l1pg;
388f17c4e38SRuslan Bukin 
38983fb1bdbSAndrew Turner 		l0 = smmu_pmap_l0(pmap, va);
39083fb1bdbSAndrew Turner 		tl0 = smmu_pmap_load(l0);
391f17c4e38SRuslan Bukin 		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
39283fb1bdbSAndrew Turner 		smmu_pmap_unwire_l3(pmap, va, l1pg, free);
393f17c4e38SRuslan Bukin 	}
394f17c4e38SRuslan Bukin 
395f17c4e38SRuslan Bukin 	/*
396f17c4e38SRuslan Bukin 	 * Put page on a list so that it is released after
397f17c4e38SRuslan Bukin 	 * *ALL* TLB shootdown is done
398f17c4e38SRuslan Bukin 	 */
39983fb1bdbSAndrew Turner 	smmu_pmap_add_delayed_free_list(m, free, TRUE);
400f17c4e38SRuslan Bukin }
401f17c4e38SRuslan Bukin 
4025f2070adSAndrew Turner int
smmu_pmap_pinit(struct smmu_pmap * pmap)403b97e94d9SAndrew Turner smmu_pmap_pinit(struct smmu_pmap *pmap)
404f17c4e38SRuslan Bukin {
405f17c4e38SRuslan Bukin 	vm_page_t m;
406f17c4e38SRuslan Bukin 
407f17c4e38SRuslan Bukin 	/*
408f17c4e38SRuslan Bukin 	 * allocate the l0 page
409f17c4e38SRuslan Bukin 	 */
410a4667e09SMark Johnston 	m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
411a4667e09SMark Johnston 	    VM_ALLOC_ZERO);
412b97e94d9SAndrew Turner 	pmap->sp_l0_paddr = VM_PAGE_TO_PHYS(m);
413b97e94d9SAndrew Turner 	pmap->sp_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->sp_l0_paddr);
414f17c4e38SRuslan Bukin 
415c3e58aceSJohn Baldwin #ifdef INVARIANTS
416b97e94d9SAndrew Turner 	pmap->sp_resident_count = 0;
417c3e58aceSJohn Baldwin #endif
418b97e94d9SAndrew Turner 	mtx_init(&pmap->sp_mtx, "smmu pmap", NULL, MTX_DEF);
419f17c4e38SRuslan Bukin 
420f17c4e38SRuslan Bukin 	return (1);
421f17c4e38SRuslan Bukin }
422f17c4e38SRuslan Bukin 
423f17c4e38SRuslan Bukin /*
424f17c4e38SRuslan Bukin  * This routine is called if the desired page table page does not exist.
425f17c4e38SRuslan Bukin  *
426f17c4e38SRuslan Bukin  * If page table page allocation fails, this routine may sleep before
427f17c4e38SRuslan Bukin  * returning NULL.  It sleeps only if a lock pointer was given.
428f17c4e38SRuslan Bukin  *
429f17c4e38SRuslan Bukin  * Note: If a page allocation fails at page table level two or three,
430f17c4e38SRuslan Bukin  * one or two pages may be held during the wait, only to be released
431f17c4e38SRuslan Bukin  * afterwards.  This conservative approach is easily argued to avoid
432f17c4e38SRuslan Bukin  * race conditions.
433f17c4e38SRuslan Bukin  */
434f17c4e38SRuslan Bukin static vm_page_t
_pmap_alloc_l3(struct smmu_pmap * pmap,vm_pindex_t ptepindex)435b97e94d9SAndrew Turner _pmap_alloc_l3(struct smmu_pmap *pmap, vm_pindex_t ptepindex)
436f17c4e38SRuslan Bukin {
437f17c4e38SRuslan Bukin 	vm_page_t m, l1pg, l2pg;
438f17c4e38SRuslan Bukin 
439b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED);
440f17c4e38SRuslan Bukin 
441f17c4e38SRuslan Bukin 	/*
442f17c4e38SRuslan Bukin 	 * Allocate a page table page.
443f17c4e38SRuslan Bukin 	 */
444a4667e09SMark Johnston 	if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
445f17c4e38SRuslan Bukin 		/*
446f17c4e38SRuslan Bukin 		 * Indicate the need to retry.  While waiting, the page table
447f17c4e38SRuslan Bukin 		 * page may have been allocated.
448f17c4e38SRuslan Bukin 		 */
449f17c4e38SRuslan Bukin 		return (NULL);
450f17c4e38SRuslan Bukin 	}
451a4667e09SMark Johnston 	m->pindex = ptepindex;
452f17c4e38SRuslan Bukin 
453f17c4e38SRuslan Bukin 	/*
454f17c4e38SRuslan Bukin 	 * Because of AArch64's weak memory consistency model, we must have a
455f17c4e38SRuslan Bukin 	 * barrier here to ensure that the stores for zeroing "m", whether by
456f17c4e38SRuslan Bukin 	 * pmap_zero_page() or an earlier function, are visible before adding
457f17c4e38SRuslan Bukin 	 * "m" to the page table.  Otherwise, a page table walk by another
458f17c4e38SRuslan Bukin 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
459f17c4e38SRuslan Bukin 	 * PTE within "m".
460f17c4e38SRuslan Bukin 	 */
461f17c4e38SRuslan Bukin 	dmb(ishst);
462f17c4e38SRuslan Bukin 
463f17c4e38SRuslan Bukin 	/*
464f17c4e38SRuslan Bukin 	 * Map the pagetable page into the process address space, if
465f17c4e38SRuslan Bukin 	 * it isn't already there.
466f17c4e38SRuslan Bukin 	 */
467f17c4e38SRuslan Bukin 
468f17c4e38SRuslan Bukin 	if (ptepindex >= (NUL2E + NUL1E)) {
469f17c4e38SRuslan Bukin 		pd_entry_t *l0;
470f17c4e38SRuslan Bukin 		vm_pindex_t l0index;
471f17c4e38SRuslan Bukin 
472f17c4e38SRuslan Bukin 		l0index = ptepindex - (NUL2E + NUL1E);
473b97e94d9SAndrew Turner 		l0 = &pmap->sp_l0[l0index];
47483fb1bdbSAndrew Turner 		smmu_pmap_store(l0, VM_PAGE_TO_PHYS(m) | IOMMU_L0_TABLE);
475f17c4e38SRuslan Bukin 	} else if (ptepindex >= NUL2E) {
476f17c4e38SRuslan Bukin 		vm_pindex_t l0index, l1index;
477f17c4e38SRuslan Bukin 		pd_entry_t *l0, *l1;
478f17c4e38SRuslan Bukin 		pd_entry_t tl0;
479f17c4e38SRuslan Bukin 
480f17c4e38SRuslan Bukin 		l1index = ptepindex - NUL2E;
481f17c4e38SRuslan Bukin 		l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
482f17c4e38SRuslan Bukin 
483b97e94d9SAndrew Turner 		l0 = &pmap->sp_l0[l0index];
48483fb1bdbSAndrew Turner 		tl0 = smmu_pmap_load(l0);
485f17c4e38SRuslan Bukin 		if (tl0 == 0) {
486f17c4e38SRuslan Bukin 			/* recurse for allocating page dir */
487f17c4e38SRuslan Bukin 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index)
488f17c4e38SRuslan Bukin 			    == NULL) {
489f17c4e38SRuslan Bukin 				vm_page_unwire_noq(m);
490f17c4e38SRuslan Bukin 				vm_page_free_zero(m);
491f17c4e38SRuslan Bukin 				return (NULL);
492f17c4e38SRuslan Bukin 			}
493f17c4e38SRuslan Bukin 		} else {
494f17c4e38SRuslan Bukin 			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
495f17c4e38SRuslan Bukin 			l1pg->ref_count++;
496f17c4e38SRuslan Bukin 		}
497f17c4e38SRuslan Bukin 
49883fb1bdbSAndrew Turner 		l1 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l0) &~ATTR_MASK);
499f17c4e38SRuslan Bukin 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
50083fb1bdbSAndrew Turner 		smmu_pmap_store(l1, VM_PAGE_TO_PHYS(m) | IOMMU_L1_TABLE);
501f17c4e38SRuslan Bukin 	} else {
502f17c4e38SRuslan Bukin 		vm_pindex_t l0index, l1index;
503f17c4e38SRuslan Bukin 		pd_entry_t *l0, *l1, *l2;
504f17c4e38SRuslan Bukin 		pd_entry_t tl0, tl1;
505f17c4e38SRuslan Bukin 
506cdd34e00SAndrew Turner 		l1index = ptepindex >> IOMMU_Ln_ENTRIES_SHIFT;
507f17c4e38SRuslan Bukin 		l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
508f17c4e38SRuslan Bukin 
509b97e94d9SAndrew Turner 		l0 = &pmap->sp_l0[l0index];
51083fb1bdbSAndrew Turner 		tl0 = smmu_pmap_load(l0);
511f17c4e38SRuslan Bukin 		if (tl0 == 0) {
512f17c4e38SRuslan Bukin 			/* recurse for allocating page dir */
513f17c4e38SRuslan Bukin 			if (_pmap_alloc_l3(pmap, NUL2E + l1index) == NULL) {
514f17c4e38SRuslan Bukin 				vm_page_unwire_noq(m);
515f17c4e38SRuslan Bukin 				vm_page_free_zero(m);
516f17c4e38SRuslan Bukin 				return (NULL);
517f17c4e38SRuslan Bukin 			}
51883fb1bdbSAndrew Turner 			tl0 = smmu_pmap_load(l0);
519f17c4e38SRuslan Bukin 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
520f17c4e38SRuslan Bukin 			l1 = &l1[l1index & Ln_ADDR_MASK];
521f17c4e38SRuslan Bukin 		} else {
522f17c4e38SRuslan Bukin 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
523f17c4e38SRuslan Bukin 			l1 = &l1[l1index & Ln_ADDR_MASK];
52483fb1bdbSAndrew Turner 			tl1 = smmu_pmap_load(l1);
525f17c4e38SRuslan Bukin 			if (tl1 == 0) {
526f17c4e38SRuslan Bukin 				/* recurse for allocating page dir */
527f17c4e38SRuslan Bukin 				if (_pmap_alloc_l3(pmap, NUL2E + l1index)
528f17c4e38SRuslan Bukin 				    == NULL) {
529f17c4e38SRuslan Bukin 					vm_page_unwire_noq(m);
530f17c4e38SRuslan Bukin 					vm_page_free_zero(m);
531f17c4e38SRuslan Bukin 					return (NULL);
532f17c4e38SRuslan Bukin 				}
533f17c4e38SRuslan Bukin 			} else {
534f17c4e38SRuslan Bukin 				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
535f17c4e38SRuslan Bukin 				l2pg->ref_count++;
536f17c4e38SRuslan Bukin 			}
537f17c4e38SRuslan Bukin 		}
538f17c4e38SRuslan Bukin 
53983fb1bdbSAndrew Turner 		l2 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l1) &~ATTR_MASK);
540f17c4e38SRuslan Bukin 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
54183fb1bdbSAndrew Turner 		smmu_pmap_store(l2, VM_PAGE_TO_PHYS(m) | IOMMU_L2_TABLE);
542f17c4e38SRuslan Bukin 	}
543f17c4e38SRuslan Bukin 
54483fb1bdbSAndrew Turner 	smmu_pmap_resident_count_inc(pmap, 1);
545f17c4e38SRuslan Bukin 
546f17c4e38SRuslan Bukin 	return (m);
547f17c4e38SRuslan Bukin }
548f17c4e38SRuslan Bukin 
549f17c4e38SRuslan Bukin /***************************************************
550f17c4e38SRuslan Bukin  * Pmap allocation/deallocation routines.
551f17c4e38SRuslan Bukin  ***************************************************/
552f17c4e38SRuslan Bukin 
553f17c4e38SRuslan Bukin /*
554f17c4e38SRuslan Bukin  * Release any resources held by the given physical map.
555f17c4e38SRuslan Bukin  * Called when a pmap initialized by pmap_pinit is being released.
556f17c4e38SRuslan Bukin  * Should only be called if the map contains no valid mappings.
557f17c4e38SRuslan Bukin  */
558f17c4e38SRuslan Bukin void
smmu_pmap_release(struct smmu_pmap * pmap)559b97e94d9SAndrew Turner smmu_pmap_release(struct smmu_pmap *pmap)
560f17c4e38SRuslan Bukin {
561f17c4e38SRuslan Bukin 	vm_page_t m;
562f17c4e38SRuslan Bukin 
563b97e94d9SAndrew Turner 	KASSERT(pmap->sp_resident_count == 0,
564f17c4e38SRuslan Bukin 	    ("pmap_release: pmap resident count %ld != 0",
565b97e94d9SAndrew Turner 	    pmap->sp_resident_count));
566f17c4e38SRuslan Bukin 
567b97e94d9SAndrew Turner 	m = PHYS_TO_VM_PAGE(pmap->sp_l0_paddr);
568f17c4e38SRuslan Bukin 	vm_page_unwire_noq(m);
569f17c4e38SRuslan Bukin 	vm_page_free_zero(m);
570b97e94d9SAndrew Turner 	mtx_destroy(&pmap->sp_mtx);
571f17c4e38SRuslan Bukin }
572f17c4e38SRuslan Bukin 
573f17c4e38SRuslan Bukin /***************************************************
574f17c4e38SRuslan Bukin  * page management routines.
575f17c4e38SRuslan Bukin  ***************************************************/
576f17c4e38SRuslan Bukin 
577f17c4e38SRuslan Bukin /*
578f17c4e38SRuslan Bukin  * Add a single Mali GPU entry. This function does not sleep.
579f17c4e38SRuslan Bukin  */
580f17c4e38SRuslan Bukin int
pmap_gpu_enter(struct smmu_pmap * pmap,vm_offset_t va,vm_paddr_t pa,vm_prot_t prot,u_int flags)581b97e94d9SAndrew Turner pmap_gpu_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa,
582f17c4e38SRuslan Bukin     vm_prot_t prot, u_int flags)
583f17c4e38SRuslan Bukin {
584f17c4e38SRuslan Bukin 	pd_entry_t *pde;
58559446e8aSRuslan Bukin 	pt_entry_t new_l3;
5860a8e88faSRuslan Bukin 	pt_entry_t orig_l3 __diagused;
587f17c4e38SRuslan Bukin 	pt_entry_t *l3;
588f17c4e38SRuslan Bukin 	vm_page_t mpte;
589f17c4e38SRuslan Bukin 	pd_entry_t *l1p;
590f17c4e38SRuslan Bukin 	pd_entry_t *l2p;
591f17c4e38SRuslan Bukin 	int lvl;
592f17c4e38SRuslan Bukin 	int rv;
593f17c4e38SRuslan Bukin 
594f17c4e38SRuslan Bukin 	KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
595f17c4e38SRuslan Bukin 	KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
596f17c4e38SRuslan Bukin 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
597f17c4e38SRuslan Bukin 
598f17c4e38SRuslan Bukin 	new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | IOMMU_L3_BLOCK);
599f17c4e38SRuslan Bukin 
600f17c4e38SRuslan Bukin 	if ((prot & VM_PROT_WRITE) != 0)
601f17c4e38SRuslan Bukin 		new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
602f17c4e38SRuslan Bukin 	if ((prot & VM_PROT_READ) != 0)
603f17c4e38SRuslan Bukin 		new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
604f17c4e38SRuslan Bukin 	if ((prot & VM_PROT_EXECUTE) == 0)
605f17c4e38SRuslan Bukin 		new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL);
606f17c4e38SRuslan Bukin 
607f17c4e38SRuslan Bukin 	CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa);
608f17c4e38SRuslan Bukin 
609b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK(pmap);
610f17c4e38SRuslan Bukin 
611f17c4e38SRuslan Bukin 	/*
612f17c4e38SRuslan Bukin 	 * In the case that a page table page is not
613f17c4e38SRuslan Bukin 	 * resident, we are creating it here.
614f17c4e38SRuslan Bukin 	 */
615f17c4e38SRuslan Bukin retry:
61683fb1bdbSAndrew Turner 	pde = smmu_pmap_pde(pmap, va, &lvl);
617f17c4e38SRuslan Bukin 	if (pde != NULL && lvl == 2) {
61883fb1bdbSAndrew Turner 		l3 = smmu_pmap_l2_to_l3(pde, va);
619f17c4e38SRuslan Bukin 	} else {
62083fb1bdbSAndrew Turner 		mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va));
621f17c4e38SRuslan Bukin 		if (mpte == NULL) {
622f17c4e38SRuslan Bukin 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
623f17c4e38SRuslan Bukin 			rv = KERN_RESOURCE_SHORTAGE;
624f17c4e38SRuslan Bukin 			goto out;
625f17c4e38SRuslan Bukin 		}
626f17c4e38SRuslan Bukin 
627f17c4e38SRuslan Bukin 		/*
628f17c4e38SRuslan Bukin 		 * Ensure newly created l1, l2 are visible to GPU.
629f17c4e38SRuslan Bukin 		 * l0 is already visible by similar call in panfrost driver.
630f17c4e38SRuslan Bukin 		 * The cache entry for l3 handled below.
631f17c4e38SRuslan Bukin 		 */
632f17c4e38SRuslan Bukin 
63383fb1bdbSAndrew Turner 		l1p = smmu_pmap_l1(pmap, va);
63483fb1bdbSAndrew Turner 		l2p = smmu_pmap_l2(pmap, va);
635f17c4e38SRuslan Bukin 		cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t));
636f17c4e38SRuslan Bukin 		cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t));
637f17c4e38SRuslan Bukin 
638f17c4e38SRuslan Bukin 		goto retry;
639f17c4e38SRuslan Bukin 	}
640f17c4e38SRuslan Bukin 
64183fb1bdbSAndrew Turner 	orig_l3 = smmu_pmap_load(l3);
64283fb1bdbSAndrew Turner 	KASSERT(!smmu_pmap_l3_valid(orig_l3), ("l3 is valid"));
643f17c4e38SRuslan Bukin 
644f17c4e38SRuslan Bukin 	/* New mapping */
64583fb1bdbSAndrew Turner 	smmu_pmap_store(l3, new_l3);
646f17c4e38SRuslan Bukin 
647f17c4e38SRuslan Bukin 	cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t));
648f17c4e38SRuslan Bukin 
64983fb1bdbSAndrew Turner 	smmu_pmap_resident_count_inc(pmap, 1);
650f17c4e38SRuslan Bukin 	dsb(ishst);
651f17c4e38SRuslan Bukin 
652f17c4e38SRuslan Bukin 	rv = KERN_SUCCESS;
653f17c4e38SRuslan Bukin out:
654b97e94d9SAndrew Turner 	SMMU_PMAP_UNLOCK(pmap);
655f17c4e38SRuslan Bukin 
656f17c4e38SRuslan Bukin 	return (rv);
657f17c4e38SRuslan Bukin }
658f17c4e38SRuslan Bukin 
659f17c4e38SRuslan Bukin /*
660f17c4e38SRuslan Bukin  * Remove a single Mali GPU entry.
661f17c4e38SRuslan Bukin  */
662f17c4e38SRuslan Bukin int
pmap_gpu_remove(struct smmu_pmap * pmap,vm_offset_t va)663b97e94d9SAndrew Turner pmap_gpu_remove(struct smmu_pmap *pmap, vm_offset_t va)
664f17c4e38SRuslan Bukin {
665f17c4e38SRuslan Bukin 	pd_entry_t *pde;
666f17c4e38SRuslan Bukin 	pt_entry_t *pte;
667f17c4e38SRuslan Bukin 	int lvl;
668f17c4e38SRuslan Bukin 	int rc;
669f17c4e38SRuslan Bukin 
670f17c4e38SRuslan Bukin 	KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
671f17c4e38SRuslan Bukin 
672b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK(pmap);
673f17c4e38SRuslan Bukin 
67483fb1bdbSAndrew Turner 	pde = smmu_pmap_pde(pmap, va, &lvl);
675f17c4e38SRuslan Bukin 	if (pde == NULL || lvl != 2) {
676f17c4e38SRuslan Bukin 		rc = KERN_FAILURE;
677f17c4e38SRuslan Bukin 		goto out;
678f17c4e38SRuslan Bukin 	}
679f17c4e38SRuslan Bukin 
68083fb1bdbSAndrew Turner 	pte = smmu_pmap_l2_to_l3(pde, va);
681f17c4e38SRuslan Bukin 
68283fb1bdbSAndrew Turner 	smmu_pmap_resident_count_dec(pmap, 1);
68383fb1bdbSAndrew Turner 	smmu_pmap_clear(pte);
684f17c4e38SRuslan Bukin 	cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t));
685f17c4e38SRuslan Bukin 	rc = KERN_SUCCESS;
686f17c4e38SRuslan Bukin 
687f17c4e38SRuslan Bukin out:
688b97e94d9SAndrew Turner 	SMMU_PMAP_UNLOCK(pmap);
689f17c4e38SRuslan Bukin 
690f17c4e38SRuslan Bukin 	return (rc);
691f17c4e38SRuslan Bukin }
692f17c4e38SRuslan Bukin 
693f17c4e38SRuslan Bukin /*
694f17c4e38SRuslan Bukin  * Add a single SMMU entry. This function does not sleep.
695f17c4e38SRuslan Bukin  */
696f17c4e38SRuslan Bukin int
smmu_pmap_enter(struct smmu_pmap * pmap,vm_offset_t va,vm_paddr_t pa,vm_prot_t prot,u_int flags)697b97e94d9SAndrew Turner smmu_pmap_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa,
698f17c4e38SRuslan Bukin     vm_prot_t prot, u_int flags)
699f17c4e38SRuslan Bukin {
700f17c4e38SRuslan Bukin 	pd_entry_t *pde;
70159446e8aSRuslan Bukin 	pt_entry_t new_l3;
7020a8e88faSRuslan Bukin 	pt_entry_t orig_l3 __diagused;
703f17c4e38SRuslan Bukin 	pt_entry_t *l3;
704f17c4e38SRuslan Bukin 	vm_page_t mpte;
705f17c4e38SRuslan Bukin 	int lvl;
706f17c4e38SRuslan Bukin 	int rv;
707f17c4e38SRuslan Bukin 
708f17c4e38SRuslan Bukin 	KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
709f17c4e38SRuslan Bukin 
710f17c4e38SRuslan Bukin 	va = trunc_page(va);
711f17c4e38SRuslan Bukin 	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
712f17c4e38SRuslan Bukin 	    ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE);
713f17c4e38SRuslan Bukin 	if ((prot & VM_PROT_WRITE) == 0)
714f17c4e38SRuslan Bukin 		new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
715f17c4e38SRuslan Bukin 	new_l3 |= ATTR_S1_XN; /* Execute never. */
716f17c4e38SRuslan Bukin 	new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER);
717f17c4e38SRuslan Bukin 	new_l3 |= ATTR_S1_nG; /* Non global. */
718f17c4e38SRuslan Bukin 
719f17c4e38SRuslan Bukin 	CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa);
720f17c4e38SRuslan Bukin 
721b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK(pmap);
722f17c4e38SRuslan Bukin 
723f17c4e38SRuslan Bukin 	/*
724f17c4e38SRuslan Bukin 	 * In the case that a page table page is not
725f17c4e38SRuslan Bukin 	 * resident, we are creating it here.
726f17c4e38SRuslan Bukin 	 */
727f17c4e38SRuslan Bukin retry:
72883fb1bdbSAndrew Turner 	pde = smmu_pmap_pde(pmap, va, &lvl);
729f17c4e38SRuslan Bukin 	if (pde != NULL && lvl == 2) {
73083fb1bdbSAndrew Turner 		l3 = smmu_pmap_l2_to_l3(pde, va);
731f17c4e38SRuslan Bukin 	} else {
73283fb1bdbSAndrew Turner 		mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va));
733f17c4e38SRuslan Bukin 		if (mpte == NULL) {
734f17c4e38SRuslan Bukin 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
735f17c4e38SRuslan Bukin 			rv = KERN_RESOURCE_SHORTAGE;
736f17c4e38SRuslan Bukin 			goto out;
737f17c4e38SRuslan Bukin 		}
738f17c4e38SRuslan Bukin 		goto retry;
739f17c4e38SRuslan Bukin 	}
740f17c4e38SRuslan Bukin 
74183fb1bdbSAndrew Turner 	orig_l3 = smmu_pmap_load(l3);
74283fb1bdbSAndrew Turner 	KASSERT(!smmu_pmap_l3_valid(orig_l3), ("l3 is valid"));
743f17c4e38SRuslan Bukin 
744f17c4e38SRuslan Bukin 	/* New mapping */
74583fb1bdbSAndrew Turner 	smmu_pmap_store(l3, new_l3);
74683fb1bdbSAndrew Turner 	smmu_pmap_resident_count_inc(pmap, 1);
747f17c4e38SRuslan Bukin 	dsb(ishst);
748f17c4e38SRuslan Bukin 
749f17c4e38SRuslan Bukin 	rv = KERN_SUCCESS;
750f17c4e38SRuslan Bukin out:
751b97e94d9SAndrew Turner 	SMMU_PMAP_UNLOCK(pmap);
752f17c4e38SRuslan Bukin 
753f17c4e38SRuslan Bukin 	return (rv);
754f17c4e38SRuslan Bukin }
755f17c4e38SRuslan Bukin 
756f17c4e38SRuslan Bukin /*
757f17c4e38SRuslan Bukin  * Remove a single SMMU entry.
758f17c4e38SRuslan Bukin  */
759f17c4e38SRuslan Bukin int
smmu_pmap_remove(struct smmu_pmap * pmap,vm_offset_t va)760b97e94d9SAndrew Turner smmu_pmap_remove(struct smmu_pmap *pmap, vm_offset_t va)
761f17c4e38SRuslan Bukin {
762f17c4e38SRuslan Bukin 	pt_entry_t *pte;
763f17c4e38SRuslan Bukin 	int lvl;
764f17c4e38SRuslan Bukin 	int rc;
765f17c4e38SRuslan Bukin 
766b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK(pmap);
767f17c4e38SRuslan Bukin 
76883fb1bdbSAndrew Turner 	pte = smmu_pmap_pte(pmap, va, &lvl);
769f17c4e38SRuslan Bukin 	KASSERT(lvl == 3,
770f17c4e38SRuslan Bukin 	    ("Invalid SMMU pagetable level: %d != 3", lvl));
771f17c4e38SRuslan Bukin 
772f17c4e38SRuslan Bukin 	if (pte != NULL) {
77383fb1bdbSAndrew Turner 		smmu_pmap_resident_count_dec(pmap, 1);
77483fb1bdbSAndrew Turner 		smmu_pmap_clear(pte);
775f17c4e38SRuslan Bukin 		rc = KERN_SUCCESS;
776f17c4e38SRuslan Bukin 	} else
777f17c4e38SRuslan Bukin 		rc = KERN_FAILURE;
778f17c4e38SRuslan Bukin 
779b97e94d9SAndrew Turner 	SMMU_PMAP_UNLOCK(pmap);
780f17c4e38SRuslan Bukin 
781f17c4e38SRuslan Bukin 	return (rc);
782f17c4e38SRuslan Bukin }
783f17c4e38SRuslan Bukin 
784f17c4e38SRuslan Bukin /*
785f17c4e38SRuslan Bukin  * Remove all the allocated L1, L2 pages from SMMU pmap.
786f17c4e38SRuslan Bukin  * All the L3 entires must be cleared in advance, otherwise
787f17c4e38SRuslan Bukin  * this function panics.
788f17c4e38SRuslan Bukin  */
789f17c4e38SRuslan Bukin void
smmu_pmap_remove_pages(struct smmu_pmap * pmap)790b97e94d9SAndrew Turner smmu_pmap_remove_pages(struct smmu_pmap *pmap)
791f17c4e38SRuslan Bukin {
792f17c4e38SRuslan Bukin 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
793f17c4e38SRuslan Bukin 	pt_entry_t *l3, l3e;
794f17c4e38SRuslan Bukin 	vm_page_t m, m0, m1;
795f17c4e38SRuslan Bukin 	vm_paddr_t pa;
796f17c4e38SRuslan Bukin 	vm_paddr_t pa0;
797f17c4e38SRuslan Bukin 	vm_paddr_t pa1;
798f17c4e38SRuslan Bukin 	int i, j, k, l;
799f17c4e38SRuslan Bukin 
800b97e94d9SAndrew Turner 	SMMU_PMAP_LOCK(pmap);
801f17c4e38SRuslan Bukin 
802cdd34e00SAndrew Turner 	for (i = 0; i < IOMMU_L0_ENTRIES; i++) {
803b97e94d9SAndrew Turner 		l0e = pmap->sp_l0[i];
804f17c4e38SRuslan Bukin 		if ((l0e & ATTR_DESCR_VALID) == 0) {
805f17c4e38SRuslan Bukin 			continue;
806f17c4e38SRuslan Bukin 		}
807f17c4e38SRuslan Bukin 		pa0 = l0e & ~ATTR_MASK;
808f17c4e38SRuslan Bukin 		m0 = PHYS_TO_VM_PAGE(pa0);
809f17c4e38SRuslan Bukin 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0);
810f17c4e38SRuslan Bukin 
811cdd34e00SAndrew Turner 		for (j = 0; j < IOMMU_Ln_ENTRIES; j++) {
812f17c4e38SRuslan Bukin 			l1e = l1[j];
813f17c4e38SRuslan Bukin 			if ((l1e & ATTR_DESCR_VALID) == 0) {
814f17c4e38SRuslan Bukin 				continue;
815f17c4e38SRuslan Bukin 			}
816f17c4e38SRuslan Bukin 			if ((l1e & ATTR_DESCR_MASK) == IOMMU_L1_BLOCK) {
817f17c4e38SRuslan Bukin 				continue;
818f17c4e38SRuslan Bukin 			}
819f17c4e38SRuslan Bukin 			pa1 = l1e & ~ATTR_MASK;
820f17c4e38SRuslan Bukin 			m1 = PHYS_TO_VM_PAGE(pa1);
821f17c4e38SRuslan Bukin 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1);
822f17c4e38SRuslan Bukin 
823cdd34e00SAndrew Turner 			for (k = 0; k < IOMMU_Ln_ENTRIES; k++) {
824f17c4e38SRuslan Bukin 				l2e = l2[k];
825f17c4e38SRuslan Bukin 				if ((l2e & ATTR_DESCR_VALID) == 0) {
826f17c4e38SRuslan Bukin 					continue;
827f17c4e38SRuslan Bukin 				}
828f17c4e38SRuslan Bukin 				pa = l2e & ~ATTR_MASK;
829f17c4e38SRuslan Bukin 				m = PHYS_TO_VM_PAGE(pa);
830f17c4e38SRuslan Bukin 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
831f17c4e38SRuslan Bukin 
832cdd34e00SAndrew Turner 				for (l = 0; l < IOMMU_Ln_ENTRIES; l++) {
833f17c4e38SRuslan Bukin 					l3e = l3[l];
834f17c4e38SRuslan Bukin 					if ((l3e & ATTR_DESCR_VALID) == 0)
835f17c4e38SRuslan Bukin 						continue;
836cdd34e00SAndrew Turner 					panic(
837cdd34e00SAndrew Turner 					  "%s: l3e found (indexes %d %d %d %d)",
838cdd34e00SAndrew Turner 					    __func__, i, j, k, l);
839f17c4e38SRuslan Bukin 				}
840f17c4e38SRuslan Bukin 
841f17c4e38SRuslan Bukin 				vm_page_unwire_noq(m1);
842f17c4e38SRuslan Bukin 				vm_page_unwire_noq(m);
84383fb1bdbSAndrew Turner 				smmu_pmap_resident_count_dec(pmap, 1);
844f17c4e38SRuslan Bukin 				vm_page_free(m);
84583fb1bdbSAndrew Turner 				smmu_pmap_clear(&l2[k]);
846f17c4e38SRuslan Bukin 			}
847f17c4e38SRuslan Bukin 
848f17c4e38SRuslan Bukin 			vm_page_unwire_noq(m0);
84983fb1bdbSAndrew Turner 			smmu_pmap_resident_count_dec(pmap, 1);
850f17c4e38SRuslan Bukin 			vm_page_free(m1);
85183fb1bdbSAndrew Turner 			smmu_pmap_clear(&l1[j]);
852f17c4e38SRuslan Bukin 		}
853f17c4e38SRuslan Bukin 
85483fb1bdbSAndrew Turner 		smmu_pmap_resident_count_dec(pmap, 1);
855f17c4e38SRuslan Bukin 		vm_page_free(m0);
856b97e94d9SAndrew Turner 		smmu_pmap_clear(&pmap->sp_l0[i]);
857f17c4e38SRuslan Bukin 	}
858f17c4e38SRuslan Bukin 
859b97e94d9SAndrew Turner 	KASSERT(pmap->sp_resident_count == 0,
860b97e94d9SAndrew Turner 	    ("Invalid resident count %jd", pmap->sp_resident_count));
861f17c4e38SRuslan Bukin 
862b97e94d9SAndrew Turner 	SMMU_PMAP_UNLOCK(pmap);
863f17c4e38SRuslan Bukin }
864