1f17c4e38SRuslan Bukin /*- 2f17c4e38SRuslan Bukin * SPDX-License-Identifier: BSD-2-Clause 3f17c4e38SRuslan Bukin * 4f17c4e38SRuslan Bukin * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com> 5f17c4e38SRuslan Bukin * Copyright (c) 2014-2021 Andrew Turner 6f17c4e38SRuslan Bukin * Copyright (c) 2014-2016 The FreeBSD Foundation 7f17c4e38SRuslan Bukin * All rights reserved. 8f17c4e38SRuslan Bukin * 9f17c4e38SRuslan Bukin * This work was supported by Innovate UK project 105694, "Digital Security 10f17c4e38SRuslan Bukin * by Design (DSbD) Technology Platform Prototype". 11f17c4e38SRuslan Bukin * 12f17c4e38SRuslan Bukin * Redistribution and use in source and binary forms, with or without 13f17c4e38SRuslan Bukin * modification, are permitted provided that the following conditions 14f17c4e38SRuslan Bukin * are met: 15f17c4e38SRuslan Bukin * 1. Redistributions of source code must retain the above copyright 16f17c4e38SRuslan Bukin * notice, this list of conditions and the following disclaimer. 17f17c4e38SRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright 18f17c4e38SRuslan Bukin * notice, this list of conditions and the following disclaimer in the 19f17c4e38SRuslan Bukin * documentation and/or other materials provided with the distribution. 20f17c4e38SRuslan Bukin * 21f17c4e38SRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22f17c4e38SRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23f17c4e38SRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24f17c4e38SRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25f17c4e38SRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26f17c4e38SRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27f17c4e38SRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28f17c4e38SRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29f17c4e38SRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30f17c4e38SRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31f17c4e38SRuslan Bukin * SUCH DAMAGE. 32f17c4e38SRuslan Bukin */ 33f17c4e38SRuslan Bukin 34f17c4e38SRuslan Bukin #include <sys/cdefs.h> 35f17c4e38SRuslan Bukin __FBSDID("$FreeBSD$"); 36f17c4e38SRuslan Bukin 37f17c4e38SRuslan Bukin /* 38f17c4e38SRuslan Bukin * Manages physical address maps for ARM SMMUv3 and ARM Mali GPU. 39f17c4e38SRuslan Bukin */ 40f17c4e38SRuslan Bukin 41f17c4e38SRuslan Bukin #include "opt_vm.h" 42f17c4e38SRuslan Bukin 43f17c4e38SRuslan Bukin #include <sys/param.h> 44ea07ba11SJesper Schmitz Mouridsen #include <sys/systm.h> 45f17c4e38SRuslan Bukin #include <sys/ktr.h> 46ea07ba11SJesper Schmitz Mouridsen #include <sys/lock.h> 47f17c4e38SRuslan Bukin #include <sys/mutex.h> 48f17c4e38SRuslan Bukin #include <sys/rwlock.h> 49f17c4e38SRuslan Bukin 50f17c4e38SRuslan Bukin #include <vm/vm.h> 51f17c4e38SRuslan Bukin #include <vm/vm_param.h> 52f17c4e38SRuslan Bukin #include <vm/vm_page.h> 53f17c4e38SRuslan Bukin #include <vm/vm_map.h> 54f17c4e38SRuslan Bukin #include <vm/vm_object.h> 55f17c4e38SRuslan Bukin #include <vm/vm_pageout.h> 56f17c4e38SRuslan Bukin #include <vm/vm_radix.h> 57f17c4e38SRuslan Bukin 58f17c4e38SRuslan Bukin #include <machine/machdep.h> 59f17c4e38SRuslan Bukin 60f17c4e38SRuslan Bukin #include <arm64/iommu/iommu_pmap.h> 61f17c4e38SRuslan Bukin #include <arm64/iommu/iommu_pte.h> 62f17c4e38SRuslan Bukin 63f17c4e38SRuslan Bukin #define IOMMU_PAGE_SIZE 4096 64f17c4e38SRuslan Bukin 65*b97e94d9SAndrew Turner #define SMMU_PMAP_LOCK(pmap) mtx_lock(&(pmap)->sp_mtx) 66*b97e94d9SAndrew Turner #define SMMU_PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->sp_mtx) 67*b97e94d9SAndrew Turner #define SMMU_PMAP_LOCK_ASSERT(pmap, type) \ 68*b97e94d9SAndrew Turner mtx_assert(&(pmap)->sp_mtx, (type)) 69*b97e94d9SAndrew Turner 70f17c4e38SRuslan Bukin #define NL0PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 71f17c4e38SRuslan Bukin #define NL1PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 72f17c4e38SRuslan Bukin #define NL2PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 73f17c4e38SRuslan Bukin #define NL3PG (IOMMU_PAGE_SIZE/(sizeof (pt_entry_t))) 74f17c4e38SRuslan Bukin 75f17c4e38SRuslan Bukin #define NUL0E IOMMU_L0_ENTRIES 76f17c4e38SRuslan Bukin #define NUL1E (NUL0E * NL1PG) 77f17c4e38SRuslan Bukin #define NUL2E (NUL1E * NL2PG) 78f17c4e38SRuslan Bukin 7983fb1bdbSAndrew Turner #define smmu_l0_pindex(v) (NUL2E + NUL1E + ((v) >> IOMMU_L0_SHIFT)) 8083fb1bdbSAndrew Turner #define smmu_l1_pindex(v) (NUL2E + ((v) >> IOMMU_L1_SHIFT)) 8183fb1bdbSAndrew Turner #define smmu_l2_pindex(v) ((v) >> IOMMU_L2_SHIFT) 8283fb1bdbSAndrew Turner 8383fb1bdbSAndrew Turner #define smmu_l0_index(va) (((va) >> IOMMU_L0_SHIFT) & IOMMU_L0_ADDR_MASK) 8483fb1bdbSAndrew Turner #define smmu_l1_index(va) (((va) >> IOMMU_L1_SHIFT) & IOMMU_Ln_ADDR_MASK) 8583fb1bdbSAndrew Turner #define smmu_l2_index(va) (((va) >> IOMMU_L2_SHIFT) & IOMMU_Ln_ADDR_MASK) 8683fb1bdbSAndrew Turner #define smmu_l3_index(va) (((va) >> IOMMU_L3_SHIFT) & IOMMU_Ln_ADDR_MASK) 87f17c4e38SRuslan Bukin 88*b97e94d9SAndrew Turner static vm_page_t _pmap_alloc_l3(struct smmu_pmap *pmap, vm_pindex_t ptepindex); 89*b97e94d9SAndrew Turner static void _smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, 90*b97e94d9SAndrew Turner vm_page_t m, struct spglist *free); 91f17c4e38SRuslan Bukin 92f17c4e38SRuslan Bukin /* 93f17c4e38SRuslan Bukin * These load the old table data and store the new value. 94f17c4e38SRuslan Bukin * They need to be atomic as the System MMU may write to the table at 95f17c4e38SRuslan Bukin * the same time as the CPU. 96f17c4e38SRuslan Bukin */ 9783fb1bdbSAndrew Turner #define smmu_pmap_load(table) (*table) 9883fb1bdbSAndrew Turner #define smmu_pmap_clear(table) atomic_store_64(table, 0) 9983fb1bdbSAndrew Turner #define smmu_pmap_store(table, entry) atomic_store_64(table, entry) 100f17c4e38SRuslan Bukin 101f17c4e38SRuslan Bukin /********************/ 102f17c4e38SRuslan Bukin /* Inline functions */ 103f17c4e38SRuslan Bukin /********************/ 104f17c4e38SRuslan Bukin 105f17c4e38SRuslan Bukin static __inline pd_entry_t * 106*b97e94d9SAndrew Turner smmu_pmap_l0(struct smmu_pmap *pmap, vm_offset_t va) 107f17c4e38SRuslan Bukin { 108f17c4e38SRuslan Bukin 109*b97e94d9SAndrew Turner return (&pmap->sp_l0[smmu_l0_index(va)]); 110f17c4e38SRuslan Bukin } 111f17c4e38SRuslan Bukin 112f17c4e38SRuslan Bukin static __inline pd_entry_t * 11383fb1bdbSAndrew Turner smmu_pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) 114f17c4e38SRuslan Bukin { 115f17c4e38SRuslan Bukin pd_entry_t *l1; 116f17c4e38SRuslan Bukin 11783fb1bdbSAndrew Turner l1 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l0) & ~ATTR_MASK); 11883fb1bdbSAndrew Turner return (&l1[smmu_l1_index(va)]); 119f17c4e38SRuslan Bukin } 120f17c4e38SRuslan Bukin 121f17c4e38SRuslan Bukin static __inline pd_entry_t * 122*b97e94d9SAndrew Turner smmu_pmap_l1(struct smmu_pmap *pmap, vm_offset_t va) 123f17c4e38SRuslan Bukin { 124f17c4e38SRuslan Bukin pd_entry_t *l0; 125f17c4e38SRuslan Bukin 12683fb1bdbSAndrew Turner l0 = smmu_pmap_l0(pmap, va); 12783fb1bdbSAndrew Turner if ((smmu_pmap_load(l0) & ATTR_DESCR_MASK) != IOMMU_L0_TABLE) 128f17c4e38SRuslan Bukin return (NULL); 129f17c4e38SRuslan Bukin 13083fb1bdbSAndrew Turner return (smmu_pmap_l0_to_l1(l0, va)); 131f17c4e38SRuslan Bukin } 132f17c4e38SRuslan Bukin 133f17c4e38SRuslan Bukin static __inline pd_entry_t * 13483fb1bdbSAndrew Turner smmu_pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) 135f17c4e38SRuslan Bukin { 136f17c4e38SRuslan Bukin pd_entry_t l1, *l2p; 137f17c4e38SRuslan Bukin 13883fb1bdbSAndrew Turner l1 = smmu_pmap_load(l1p); 139f17c4e38SRuslan Bukin 140f17c4e38SRuslan Bukin /* 141f17c4e38SRuslan Bukin * The valid bit may be clear if pmap_update_entry() is concurrently 142f17c4e38SRuslan Bukin * modifying the entry, so for KVA only the entry type may be checked. 143f17c4e38SRuslan Bukin */ 144f17c4e38SRuslan Bukin KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0, 145f17c4e38SRuslan Bukin ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); 146f17c4e38SRuslan Bukin KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, 147f17c4e38SRuslan Bukin ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); 148f17c4e38SRuslan Bukin l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK); 14983fb1bdbSAndrew Turner return (&l2p[smmu_l2_index(va)]); 150f17c4e38SRuslan Bukin } 151f17c4e38SRuslan Bukin 152f17c4e38SRuslan Bukin static __inline pd_entry_t * 153*b97e94d9SAndrew Turner smmu_pmap_l2(struct smmu_pmap *pmap, vm_offset_t va) 154f17c4e38SRuslan Bukin { 155f17c4e38SRuslan Bukin pd_entry_t *l1; 156f17c4e38SRuslan Bukin 15783fb1bdbSAndrew Turner l1 = smmu_pmap_l1(pmap, va); 15883fb1bdbSAndrew Turner if ((smmu_pmap_load(l1) & ATTR_DESCR_MASK) != IOMMU_L1_TABLE) 159f17c4e38SRuslan Bukin return (NULL); 160f17c4e38SRuslan Bukin 16183fb1bdbSAndrew Turner return (smmu_pmap_l1_to_l2(l1, va)); 162f17c4e38SRuslan Bukin } 163f17c4e38SRuslan Bukin 164f17c4e38SRuslan Bukin static __inline pt_entry_t * 16583fb1bdbSAndrew Turner smmu_pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) 166f17c4e38SRuslan Bukin { 167f17c4e38SRuslan Bukin pd_entry_t l2; 168f17c4e38SRuslan Bukin pt_entry_t *l3p; 169f17c4e38SRuslan Bukin 17083fb1bdbSAndrew Turner l2 = smmu_pmap_load(l2p); 171f17c4e38SRuslan Bukin 172f17c4e38SRuslan Bukin /* 173f17c4e38SRuslan Bukin * The valid bit may be clear if pmap_update_entry() is concurrently 174f17c4e38SRuslan Bukin * modifying the entry, so for KVA only the entry type may be checked. 175f17c4e38SRuslan Bukin */ 176f17c4e38SRuslan Bukin KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0, 177f17c4e38SRuslan Bukin ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); 178f17c4e38SRuslan Bukin KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, 179f17c4e38SRuslan Bukin ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); 180f17c4e38SRuslan Bukin l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK); 18183fb1bdbSAndrew Turner return (&l3p[smmu_l3_index(va)]); 182f17c4e38SRuslan Bukin } 183f17c4e38SRuslan Bukin 184f17c4e38SRuslan Bukin /* 185f17c4e38SRuslan Bukin * Returns the lowest valid pde for a given virtual address. 186f17c4e38SRuslan Bukin * The next level may or may not point to a valid page or block. 187f17c4e38SRuslan Bukin */ 188f17c4e38SRuslan Bukin static __inline pd_entry_t * 189*b97e94d9SAndrew Turner smmu_pmap_pde(struct smmu_pmap *pmap, vm_offset_t va, int *level) 190f17c4e38SRuslan Bukin { 191f17c4e38SRuslan Bukin pd_entry_t *l0, *l1, *l2, desc; 192f17c4e38SRuslan Bukin 19383fb1bdbSAndrew Turner l0 = smmu_pmap_l0(pmap, va); 19483fb1bdbSAndrew Turner desc = smmu_pmap_load(l0) & ATTR_DESCR_MASK; 195f17c4e38SRuslan Bukin if (desc != IOMMU_L0_TABLE) { 196f17c4e38SRuslan Bukin *level = -1; 197f17c4e38SRuslan Bukin return (NULL); 198f17c4e38SRuslan Bukin } 199f17c4e38SRuslan Bukin 20083fb1bdbSAndrew Turner l1 = smmu_pmap_l0_to_l1(l0, va); 20183fb1bdbSAndrew Turner desc = smmu_pmap_load(l1) & ATTR_DESCR_MASK; 202f17c4e38SRuslan Bukin if (desc != IOMMU_L1_TABLE) { 203f17c4e38SRuslan Bukin *level = 0; 204f17c4e38SRuslan Bukin return (l0); 205f17c4e38SRuslan Bukin } 206f17c4e38SRuslan Bukin 20783fb1bdbSAndrew Turner l2 = smmu_pmap_l1_to_l2(l1, va); 20883fb1bdbSAndrew Turner desc = smmu_pmap_load(l2) & ATTR_DESCR_MASK; 209f17c4e38SRuslan Bukin if (desc != IOMMU_L2_TABLE) { 210f17c4e38SRuslan Bukin *level = 1; 211f17c4e38SRuslan Bukin return (l1); 212f17c4e38SRuslan Bukin } 213f17c4e38SRuslan Bukin 214f17c4e38SRuslan Bukin *level = 2; 215f17c4e38SRuslan Bukin return (l2); 216f17c4e38SRuslan Bukin } 217f17c4e38SRuslan Bukin 218f17c4e38SRuslan Bukin /* 219f17c4e38SRuslan Bukin * Returns the lowest valid pte block or table entry for a given virtual 220f17c4e38SRuslan Bukin * address. If there are no valid entries return NULL and set the level to 221f17c4e38SRuslan Bukin * the first invalid level. 222f17c4e38SRuslan Bukin */ 223f17c4e38SRuslan Bukin static __inline pt_entry_t * 224*b97e94d9SAndrew Turner smmu_pmap_pte(struct smmu_pmap *pmap, vm_offset_t va, int *level) 225f17c4e38SRuslan Bukin { 226f17c4e38SRuslan Bukin pd_entry_t *l1, *l2, desc; 227f17c4e38SRuslan Bukin pt_entry_t *l3; 228f17c4e38SRuslan Bukin 22983fb1bdbSAndrew Turner l1 = smmu_pmap_l1(pmap, va); 230f17c4e38SRuslan Bukin if (l1 == NULL) { 231f17c4e38SRuslan Bukin *level = 0; 232f17c4e38SRuslan Bukin return (NULL); 233f17c4e38SRuslan Bukin } 23483fb1bdbSAndrew Turner desc = smmu_pmap_load(l1) & ATTR_DESCR_MASK; 235f17c4e38SRuslan Bukin if (desc == IOMMU_L1_BLOCK) { 236f17c4e38SRuslan Bukin *level = 1; 237f17c4e38SRuslan Bukin return (l1); 238f17c4e38SRuslan Bukin } 239f17c4e38SRuslan Bukin 240f17c4e38SRuslan Bukin if (desc != IOMMU_L1_TABLE) { 241f17c4e38SRuslan Bukin *level = 1; 242f17c4e38SRuslan Bukin return (NULL); 243f17c4e38SRuslan Bukin } 244f17c4e38SRuslan Bukin 24583fb1bdbSAndrew Turner l2 = smmu_pmap_l1_to_l2(l1, va); 24683fb1bdbSAndrew Turner desc = smmu_pmap_load(l2) & ATTR_DESCR_MASK; 247f17c4e38SRuslan Bukin if (desc == IOMMU_L2_BLOCK) { 248f17c4e38SRuslan Bukin *level = 2; 249f17c4e38SRuslan Bukin return (l2); 250f17c4e38SRuslan Bukin } 251f17c4e38SRuslan Bukin 252f17c4e38SRuslan Bukin if (desc != IOMMU_L2_TABLE) { 253f17c4e38SRuslan Bukin *level = 2; 254f17c4e38SRuslan Bukin return (NULL); 255f17c4e38SRuslan Bukin } 256f17c4e38SRuslan Bukin 257f17c4e38SRuslan Bukin *level = 3; 25883fb1bdbSAndrew Turner l3 = smmu_pmap_l2_to_l3(l2, va); 25983fb1bdbSAndrew Turner if ((smmu_pmap_load(l3) & ATTR_DESCR_MASK) != IOMMU_L3_PAGE) 260f17c4e38SRuslan Bukin return (NULL); 261f17c4e38SRuslan Bukin 262f17c4e38SRuslan Bukin return (l3); 263f17c4e38SRuslan Bukin } 264f17c4e38SRuslan Bukin 265f17c4e38SRuslan Bukin static __inline int 26683fb1bdbSAndrew Turner smmu_pmap_l3_valid(pt_entry_t l3) 267f17c4e38SRuslan Bukin { 268f17c4e38SRuslan Bukin 269f17c4e38SRuslan Bukin return ((l3 & ATTR_DESCR_MASK) == IOMMU_L3_PAGE); 270f17c4e38SRuslan Bukin } 271f17c4e38SRuslan Bukin 272f17c4e38SRuslan Bukin CTASSERT(IOMMU_L1_BLOCK == IOMMU_L2_BLOCK); 273f17c4e38SRuslan Bukin 274*b97e94d9SAndrew Turner #ifdef INVARIANTS 275f17c4e38SRuslan Bukin static __inline void 276*b97e94d9SAndrew Turner smmu_pmap_resident_count_inc(struct smmu_pmap *pmap, int count) 277f17c4e38SRuslan Bukin { 278f17c4e38SRuslan Bukin 279*b97e94d9SAndrew Turner SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED); 280*b97e94d9SAndrew Turner pmap->sp_resident_count += count; 281f17c4e38SRuslan Bukin } 282f17c4e38SRuslan Bukin 283f17c4e38SRuslan Bukin static __inline void 284*b97e94d9SAndrew Turner smmu_pmap_resident_count_dec(struct smmu_pmap *pmap, int count) 285f17c4e38SRuslan Bukin { 286f17c4e38SRuslan Bukin 287*b97e94d9SAndrew Turner SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED); 288*b97e94d9SAndrew Turner KASSERT(pmap->sp_resident_count >= count, 289f17c4e38SRuslan Bukin ("pmap %p resident count underflow %ld %d", pmap, 290*b97e94d9SAndrew Turner pmap->sp_resident_count, count)); 291*b97e94d9SAndrew Turner pmap->sp_resident_count -= count; 292f17c4e38SRuslan Bukin } 293*b97e94d9SAndrew Turner #else 294*b97e94d9SAndrew Turner static __inline void 295*b97e94d9SAndrew Turner smmu_pmap_resident_count_inc(struct smmu_pmap *pmap, int count) 296*b97e94d9SAndrew Turner { 297*b97e94d9SAndrew Turner } 298*b97e94d9SAndrew Turner 299*b97e94d9SAndrew Turner static __inline void 300*b97e94d9SAndrew Turner smmu_pmap_resident_count_dec(struct smmu_pmap *pmap, int count) 301*b97e94d9SAndrew Turner { 302*b97e94d9SAndrew Turner } 303*b97e94d9SAndrew Turner #endif 304f17c4e38SRuslan Bukin 305f17c4e38SRuslan Bukin /*************************************************** 306f17c4e38SRuslan Bukin * Page table page management routines..... 307f17c4e38SRuslan Bukin ***************************************************/ 308f17c4e38SRuslan Bukin /* 309f17c4e38SRuslan Bukin * Schedule the specified unused page table page to be freed. Specifically, 310f17c4e38SRuslan Bukin * add the page to the specified list of pages that will be released to the 311f17c4e38SRuslan Bukin * physical memory manager after the TLB has been updated. 312f17c4e38SRuslan Bukin */ 313f17c4e38SRuslan Bukin static __inline void 31483fb1bdbSAndrew Turner smmu_pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 315f17c4e38SRuslan Bukin boolean_t set_PG_ZERO) 316f17c4e38SRuslan Bukin { 317f17c4e38SRuslan Bukin 318f17c4e38SRuslan Bukin if (set_PG_ZERO) 319f17c4e38SRuslan Bukin m->flags |= PG_ZERO; 320f17c4e38SRuslan Bukin else 321f17c4e38SRuslan Bukin m->flags &= ~PG_ZERO; 322f17c4e38SRuslan Bukin SLIST_INSERT_HEAD(free, m, plinks.s.ss); 323f17c4e38SRuslan Bukin } 324f17c4e38SRuslan Bukin 325f17c4e38SRuslan Bukin /*************************************************** 326f17c4e38SRuslan Bukin * Low level mapping routines..... 327f17c4e38SRuslan Bukin ***************************************************/ 328f17c4e38SRuslan Bukin 329f17c4e38SRuslan Bukin /* 330f17c4e38SRuslan Bukin * Decrements a page table page's reference count, which is used to record the 331f17c4e38SRuslan Bukin * number of valid page table entries within the page. If the reference count 332f17c4e38SRuslan Bukin * drops to zero, then the page table page is unmapped. Returns TRUE if the 333f17c4e38SRuslan Bukin * page table page was unmapped and FALSE otherwise. 334f17c4e38SRuslan Bukin */ 335f17c4e38SRuslan Bukin static inline boolean_t 336*b97e94d9SAndrew Turner smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m, 33783fb1bdbSAndrew Turner struct spglist *free) 338f17c4e38SRuslan Bukin { 339f17c4e38SRuslan Bukin 340f17c4e38SRuslan Bukin --m->ref_count; 341f17c4e38SRuslan Bukin if (m->ref_count == 0) { 34283fb1bdbSAndrew Turner _smmu_pmap_unwire_l3(pmap, va, m, free); 343f17c4e38SRuslan Bukin return (TRUE); 344f17c4e38SRuslan Bukin } else 345f17c4e38SRuslan Bukin return (FALSE); 346f17c4e38SRuslan Bukin } 347f17c4e38SRuslan Bukin 348f17c4e38SRuslan Bukin static void 349*b97e94d9SAndrew Turner _smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m, 35083fb1bdbSAndrew Turner struct spglist *free) 351f17c4e38SRuslan Bukin { 352f17c4e38SRuslan Bukin 353*b97e94d9SAndrew Turner SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED); 354f17c4e38SRuslan Bukin /* 355f17c4e38SRuslan Bukin * unmap the page table page 356f17c4e38SRuslan Bukin */ 357f17c4e38SRuslan Bukin if (m->pindex >= (NUL2E + NUL1E)) { 358f17c4e38SRuslan Bukin /* l1 page */ 359f17c4e38SRuslan Bukin pd_entry_t *l0; 360f17c4e38SRuslan Bukin 36183fb1bdbSAndrew Turner l0 = smmu_pmap_l0(pmap, va); 36283fb1bdbSAndrew Turner smmu_pmap_clear(l0); 363f17c4e38SRuslan Bukin } else if (m->pindex >= NUL2E) { 364f17c4e38SRuslan Bukin /* l2 page */ 365f17c4e38SRuslan Bukin pd_entry_t *l1; 366f17c4e38SRuslan Bukin 36783fb1bdbSAndrew Turner l1 = smmu_pmap_l1(pmap, va); 36883fb1bdbSAndrew Turner smmu_pmap_clear(l1); 369f17c4e38SRuslan Bukin } else { 370f17c4e38SRuslan Bukin /* l3 page */ 371f17c4e38SRuslan Bukin pd_entry_t *l2; 372f17c4e38SRuslan Bukin 37383fb1bdbSAndrew Turner l2 = smmu_pmap_l2(pmap, va); 37483fb1bdbSAndrew Turner smmu_pmap_clear(l2); 375f17c4e38SRuslan Bukin } 37683fb1bdbSAndrew Turner smmu_pmap_resident_count_dec(pmap, 1); 377f17c4e38SRuslan Bukin if (m->pindex < NUL2E) { 378f17c4e38SRuslan Bukin /* We just released an l3, unhold the matching l2 */ 379f17c4e38SRuslan Bukin pd_entry_t *l1, tl1; 380f17c4e38SRuslan Bukin vm_page_t l2pg; 381f17c4e38SRuslan Bukin 38283fb1bdbSAndrew Turner l1 = smmu_pmap_l1(pmap, va); 38383fb1bdbSAndrew Turner tl1 = smmu_pmap_load(l1); 384f17c4e38SRuslan Bukin l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); 38583fb1bdbSAndrew Turner smmu_pmap_unwire_l3(pmap, va, l2pg, free); 386f17c4e38SRuslan Bukin } else if (m->pindex < (NUL2E + NUL1E)) { 387f17c4e38SRuslan Bukin /* We just released an l2, unhold the matching l1 */ 388f17c4e38SRuslan Bukin pd_entry_t *l0, tl0; 389f17c4e38SRuslan Bukin vm_page_t l1pg; 390f17c4e38SRuslan Bukin 39183fb1bdbSAndrew Turner l0 = smmu_pmap_l0(pmap, va); 39283fb1bdbSAndrew Turner tl0 = smmu_pmap_load(l0); 393f17c4e38SRuslan Bukin l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); 39483fb1bdbSAndrew Turner smmu_pmap_unwire_l3(pmap, va, l1pg, free); 395f17c4e38SRuslan Bukin } 396f17c4e38SRuslan Bukin 397f17c4e38SRuslan Bukin /* 398f17c4e38SRuslan Bukin * Put page on a list so that it is released after 399f17c4e38SRuslan Bukin * *ALL* TLB shootdown is done 400f17c4e38SRuslan Bukin */ 40183fb1bdbSAndrew Turner smmu_pmap_add_delayed_free_list(m, free, TRUE); 402f17c4e38SRuslan Bukin } 403f17c4e38SRuslan Bukin 4045f2070adSAndrew Turner int 405*b97e94d9SAndrew Turner smmu_pmap_pinit(struct smmu_pmap *pmap) 406f17c4e38SRuslan Bukin { 407f17c4e38SRuslan Bukin vm_page_t m; 408f17c4e38SRuslan Bukin 409f17c4e38SRuslan Bukin /* 410f17c4e38SRuslan Bukin * allocate the l0 page 411f17c4e38SRuslan Bukin */ 412a4667e09SMark Johnston m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED | 413a4667e09SMark Johnston VM_ALLOC_ZERO); 414*b97e94d9SAndrew Turner pmap->sp_l0_paddr = VM_PAGE_TO_PHYS(m); 415*b97e94d9SAndrew Turner pmap->sp_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->sp_l0_paddr); 416f17c4e38SRuslan Bukin 417*b97e94d9SAndrew Turner pmap->sp_resident_count = 0; 418*b97e94d9SAndrew Turner mtx_init(&pmap->sp_mtx, "smmu pmap", NULL, MTX_DEF); 419f17c4e38SRuslan Bukin 420f17c4e38SRuslan Bukin return (1); 421f17c4e38SRuslan Bukin } 422f17c4e38SRuslan Bukin 423f17c4e38SRuslan Bukin /* 424f17c4e38SRuslan Bukin * This routine is called if the desired page table page does not exist. 425f17c4e38SRuslan Bukin * 426f17c4e38SRuslan Bukin * If page table page allocation fails, this routine may sleep before 427f17c4e38SRuslan Bukin * returning NULL. It sleeps only if a lock pointer was given. 428f17c4e38SRuslan Bukin * 429f17c4e38SRuslan Bukin * Note: If a page allocation fails at page table level two or three, 430f17c4e38SRuslan Bukin * one or two pages may be held during the wait, only to be released 431f17c4e38SRuslan Bukin * afterwards. This conservative approach is easily argued to avoid 432f17c4e38SRuslan Bukin * race conditions. 433f17c4e38SRuslan Bukin */ 434f17c4e38SRuslan Bukin static vm_page_t 435*b97e94d9SAndrew Turner _pmap_alloc_l3(struct smmu_pmap *pmap, vm_pindex_t ptepindex) 436f17c4e38SRuslan Bukin { 437f17c4e38SRuslan Bukin vm_page_t m, l1pg, l2pg; 438f17c4e38SRuslan Bukin 439*b97e94d9SAndrew Turner SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED); 440f17c4e38SRuslan Bukin 441f17c4e38SRuslan Bukin /* 442f17c4e38SRuslan Bukin * Allocate a page table page. 443f17c4e38SRuslan Bukin */ 444a4667e09SMark Johnston if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 445f17c4e38SRuslan Bukin /* 446f17c4e38SRuslan Bukin * Indicate the need to retry. While waiting, the page table 447f17c4e38SRuslan Bukin * page may have been allocated. 448f17c4e38SRuslan Bukin */ 449f17c4e38SRuslan Bukin return (NULL); 450f17c4e38SRuslan Bukin } 451a4667e09SMark Johnston m->pindex = ptepindex; 452f17c4e38SRuslan Bukin 453f17c4e38SRuslan Bukin /* 454f17c4e38SRuslan Bukin * Because of AArch64's weak memory consistency model, we must have a 455f17c4e38SRuslan Bukin * barrier here to ensure that the stores for zeroing "m", whether by 456f17c4e38SRuslan Bukin * pmap_zero_page() or an earlier function, are visible before adding 457f17c4e38SRuslan Bukin * "m" to the page table. Otherwise, a page table walk by another 458f17c4e38SRuslan Bukin * processor's MMU could see the mapping to "m" and a stale, non-zero 459f17c4e38SRuslan Bukin * PTE within "m". 460f17c4e38SRuslan Bukin */ 461f17c4e38SRuslan Bukin dmb(ishst); 462f17c4e38SRuslan Bukin 463f17c4e38SRuslan Bukin /* 464f17c4e38SRuslan Bukin * Map the pagetable page into the process address space, if 465f17c4e38SRuslan Bukin * it isn't already there. 466f17c4e38SRuslan Bukin */ 467f17c4e38SRuslan Bukin 468f17c4e38SRuslan Bukin if (ptepindex >= (NUL2E + NUL1E)) { 469f17c4e38SRuslan Bukin pd_entry_t *l0; 470f17c4e38SRuslan Bukin vm_pindex_t l0index; 471f17c4e38SRuslan Bukin 472f17c4e38SRuslan Bukin l0index = ptepindex - (NUL2E + NUL1E); 473*b97e94d9SAndrew Turner l0 = &pmap->sp_l0[l0index]; 47483fb1bdbSAndrew Turner smmu_pmap_store(l0, VM_PAGE_TO_PHYS(m) | IOMMU_L0_TABLE); 475f17c4e38SRuslan Bukin } else if (ptepindex >= NUL2E) { 476f17c4e38SRuslan Bukin vm_pindex_t l0index, l1index; 477f17c4e38SRuslan Bukin pd_entry_t *l0, *l1; 478f17c4e38SRuslan Bukin pd_entry_t tl0; 479f17c4e38SRuslan Bukin 480f17c4e38SRuslan Bukin l1index = ptepindex - NUL2E; 481f17c4e38SRuslan Bukin l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT; 482f17c4e38SRuslan Bukin 483*b97e94d9SAndrew Turner l0 = &pmap->sp_l0[l0index]; 48483fb1bdbSAndrew Turner tl0 = smmu_pmap_load(l0); 485f17c4e38SRuslan Bukin if (tl0 == 0) { 486f17c4e38SRuslan Bukin /* recurse for allocating page dir */ 487f17c4e38SRuslan Bukin if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index) 488f17c4e38SRuslan Bukin == NULL) { 489f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 490f17c4e38SRuslan Bukin vm_page_free_zero(m); 491f17c4e38SRuslan Bukin return (NULL); 492f17c4e38SRuslan Bukin } 493f17c4e38SRuslan Bukin } else { 494f17c4e38SRuslan Bukin l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); 495f17c4e38SRuslan Bukin l1pg->ref_count++; 496f17c4e38SRuslan Bukin } 497f17c4e38SRuslan Bukin 49883fb1bdbSAndrew Turner l1 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l0) &~ATTR_MASK); 499f17c4e38SRuslan Bukin l1 = &l1[ptepindex & Ln_ADDR_MASK]; 50083fb1bdbSAndrew Turner smmu_pmap_store(l1, VM_PAGE_TO_PHYS(m) | IOMMU_L1_TABLE); 501f17c4e38SRuslan Bukin } else { 502f17c4e38SRuslan Bukin vm_pindex_t l0index, l1index; 503f17c4e38SRuslan Bukin pd_entry_t *l0, *l1, *l2; 504f17c4e38SRuslan Bukin pd_entry_t tl0, tl1; 505f17c4e38SRuslan Bukin 506f17c4e38SRuslan Bukin l1index = ptepindex >> Ln_ENTRIES_SHIFT; 507f17c4e38SRuslan Bukin l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT; 508f17c4e38SRuslan Bukin 509*b97e94d9SAndrew Turner l0 = &pmap->sp_l0[l0index]; 51083fb1bdbSAndrew Turner tl0 = smmu_pmap_load(l0); 511f17c4e38SRuslan Bukin if (tl0 == 0) { 512f17c4e38SRuslan Bukin /* recurse for allocating page dir */ 513f17c4e38SRuslan Bukin if (_pmap_alloc_l3(pmap, NUL2E + l1index) == NULL) { 514f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 515f17c4e38SRuslan Bukin vm_page_free_zero(m); 516f17c4e38SRuslan Bukin return (NULL); 517f17c4e38SRuslan Bukin } 51883fb1bdbSAndrew Turner tl0 = smmu_pmap_load(l0); 519f17c4e38SRuslan Bukin l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); 520f17c4e38SRuslan Bukin l1 = &l1[l1index & Ln_ADDR_MASK]; 521f17c4e38SRuslan Bukin } else { 522f17c4e38SRuslan Bukin l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); 523f17c4e38SRuslan Bukin l1 = &l1[l1index & Ln_ADDR_MASK]; 52483fb1bdbSAndrew Turner tl1 = smmu_pmap_load(l1); 525f17c4e38SRuslan Bukin if (tl1 == 0) { 526f17c4e38SRuslan Bukin /* recurse for allocating page dir */ 527f17c4e38SRuslan Bukin if (_pmap_alloc_l3(pmap, NUL2E + l1index) 528f17c4e38SRuslan Bukin == NULL) { 529f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 530f17c4e38SRuslan Bukin vm_page_free_zero(m); 531f17c4e38SRuslan Bukin return (NULL); 532f17c4e38SRuslan Bukin } 533f17c4e38SRuslan Bukin } else { 534f17c4e38SRuslan Bukin l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); 535f17c4e38SRuslan Bukin l2pg->ref_count++; 536f17c4e38SRuslan Bukin } 537f17c4e38SRuslan Bukin } 538f17c4e38SRuslan Bukin 53983fb1bdbSAndrew Turner l2 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l1) &~ATTR_MASK); 540f17c4e38SRuslan Bukin l2 = &l2[ptepindex & Ln_ADDR_MASK]; 54183fb1bdbSAndrew Turner smmu_pmap_store(l2, VM_PAGE_TO_PHYS(m) | IOMMU_L2_TABLE); 542f17c4e38SRuslan Bukin } 543f17c4e38SRuslan Bukin 54483fb1bdbSAndrew Turner smmu_pmap_resident_count_inc(pmap, 1); 545f17c4e38SRuslan Bukin 546f17c4e38SRuslan Bukin return (m); 547f17c4e38SRuslan Bukin } 548f17c4e38SRuslan Bukin 549f17c4e38SRuslan Bukin /*************************************************** 550f17c4e38SRuslan Bukin * Pmap allocation/deallocation routines. 551f17c4e38SRuslan Bukin ***************************************************/ 552f17c4e38SRuslan Bukin 553f17c4e38SRuslan Bukin /* 554f17c4e38SRuslan Bukin * Release any resources held by the given physical map. 555f17c4e38SRuslan Bukin * Called when a pmap initialized by pmap_pinit is being released. 556f17c4e38SRuslan Bukin * Should only be called if the map contains no valid mappings. 557f17c4e38SRuslan Bukin */ 558f17c4e38SRuslan Bukin void 559*b97e94d9SAndrew Turner smmu_pmap_release(struct smmu_pmap *pmap) 560f17c4e38SRuslan Bukin { 561f17c4e38SRuslan Bukin vm_page_t m; 562f17c4e38SRuslan Bukin 563*b97e94d9SAndrew Turner KASSERT(pmap->sp_resident_count == 0, 564f17c4e38SRuslan Bukin ("pmap_release: pmap resident count %ld != 0", 565*b97e94d9SAndrew Turner pmap->sp_resident_count)); 566f17c4e38SRuslan Bukin 567*b97e94d9SAndrew Turner m = PHYS_TO_VM_PAGE(pmap->sp_l0_paddr); 568f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 569f17c4e38SRuslan Bukin vm_page_free_zero(m); 570*b97e94d9SAndrew Turner mtx_destroy(&pmap->sp_mtx); 571f17c4e38SRuslan Bukin } 572f17c4e38SRuslan Bukin 573f17c4e38SRuslan Bukin /*************************************************** 574f17c4e38SRuslan Bukin * page management routines. 575f17c4e38SRuslan Bukin ***************************************************/ 576f17c4e38SRuslan Bukin 577f17c4e38SRuslan Bukin /* 578f17c4e38SRuslan Bukin * Add a single Mali GPU entry. This function does not sleep. 579f17c4e38SRuslan Bukin */ 580f17c4e38SRuslan Bukin int 581*b97e94d9SAndrew Turner pmap_gpu_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa, 582f17c4e38SRuslan Bukin vm_prot_t prot, u_int flags) 583f17c4e38SRuslan Bukin { 584f17c4e38SRuslan Bukin pd_entry_t *pde; 58559446e8aSRuslan Bukin pt_entry_t new_l3; 5860a8e88faSRuslan Bukin pt_entry_t orig_l3 __diagused; 587f17c4e38SRuslan Bukin pt_entry_t *l3; 588f17c4e38SRuslan Bukin vm_page_t mpte; 589f17c4e38SRuslan Bukin pd_entry_t *l1p; 590f17c4e38SRuslan Bukin pd_entry_t *l2p; 591f17c4e38SRuslan Bukin int lvl; 592f17c4e38SRuslan Bukin int rv; 593f17c4e38SRuslan Bukin 594f17c4e38SRuslan Bukin KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); 595f17c4e38SRuslan Bukin KASSERT((va & PAGE_MASK) == 0, ("va is misaligned")); 596f17c4e38SRuslan Bukin KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned")); 597f17c4e38SRuslan Bukin 598f17c4e38SRuslan Bukin new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | IOMMU_L3_BLOCK); 599f17c4e38SRuslan Bukin 600f17c4e38SRuslan Bukin if ((prot & VM_PROT_WRITE) != 0) 601f17c4e38SRuslan Bukin new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE); 602f17c4e38SRuslan Bukin if ((prot & VM_PROT_READ) != 0) 603f17c4e38SRuslan Bukin new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ); 604f17c4e38SRuslan Bukin if ((prot & VM_PROT_EXECUTE) == 0) 605f17c4e38SRuslan Bukin new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL); 606f17c4e38SRuslan Bukin 607f17c4e38SRuslan Bukin CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa); 608f17c4e38SRuslan Bukin 609*b97e94d9SAndrew Turner SMMU_PMAP_LOCK(pmap); 610f17c4e38SRuslan Bukin 611f17c4e38SRuslan Bukin /* 612f17c4e38SRuslan Bukin * In the case that a page table page is not 613f17c4e38SRuslan Bukin * resident, we are creating it here. 614f17c4e38SRuslan Bukin */ 615f17c4e38SRuslan Bukin retry: 61683fb1bdbSAndrew Turner pde = smmu_pmap_pde(pmap, va, &lvl); 617f17c4e38SRuslan Bukin if (pde != NULL && lvl == 2) { 61883fb1bdbSAndrew Turner l3 = smmu_pmap_l2_to_l3(pde, va); 619f17c4e38SRuslan Bukin } else { 62083fb1bdbSAndrew Turner mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va)); 621f17c4e38SRuslan Bukin if (mpte == NULL) { 622f17c4e38SRuslan Bukin CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); 623f17c4e38SRuslan Bukin rv = KERN_RESOURCE_SHORTAGE; 624f17c4e38SRuslan Bukin goto out; 625f17c4e38SRuslan Bukin } 626f17c4e38SRuslan Bukin 627f17c4e38SRuslan Bukin /* 628f17c4e38SRuslan Bukin * Ensure newly created l1, l2 are visible to GPU. 629f17c4e38SRuslan Bukin * l0 is already visible by similar call in panfrost driver. 630f17c4e38SRuslan Bukin * The cache entry for l3 handled below. 631f17c4e38SRuslan Bukin */ 632f17c4e38SRuslan Bukin 63383fb1bdbSAndrew Turner l1p = smmu_pmap_l1(pmap, va); 63483fb1bdbSAndrew Turner l2p = smmu_pmap_l2(pmap, va); 635f17c4e38SRuslan Bukin cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t)); 636f17c4e38SRuslan Bukin cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t)); 637f17c4e38SRuslan Bukin 638f17c4e38SRuslan Bukin goto retry; 639f17c4e38SRuslan Bukin } 640f17c4e38SRuslan Bukin 64183fb1bdbSAndrew Turner orig_l3 = smmu_pmap_load(l3); 64283fb1bdbSAndrew Turner KASSERT(!smmu_pmap_l3_valid(orig_l3), ("l3 is valid")); 643f17c4e38SRuslan Bukin 644f17c4e38SRuslan Bukin /* New mapping */ 64583fb1bdbSAndrew Turner smmu_pmap_store(l3, new_l3); 646f17c4e38SRuslan Bukin 647f17c4e38SRuslan Bukin cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t)); 648f17c4e38SRuslan Bukin 64983fb1bdbSAndrew Turner smmu_pmap_resident_count_inc(pmap, 1); 650f17c4e38SRuslan Bukin dsb(ishst); 651f17c4e38SRuslan Bukin 652f17c4e38SRuslan Bukin rv = KERN_SUCCESS; 653f17c4e38SRuslan Bukin out: 654*b97e94d9SAndrew Turner SMMU_PMAP_UNLOCK(pmap); 655f17c4e38SRuslan Bukin 656f17c4e38SRuslan Bukin return (rv); 657f17c4e38SRuslan Bukin } 658f17c4e38SRuslan Bukin 659f17c4e38SRuslan Bukin /* 660f17c4e38SRuslan Bukin * Remove a single Mali GPU entry. 661f17c4e38SRuslan Bukin */ 662f17c4e38SRuslan Bukin int 663*b97e94d9SAndrew Turner pmap_gpu_remove(struct smmu_pmap *pmap, vm_offset_t va) 664f17c4e38SRuslan Bukin { 665f17c4e38SRuslan Bukin pd_entry_t *pde; 666f17c4e38SRuslan Bukin pt_entry_t *pte; 667f17c4e38SRuslan Bukin int lvl; 668f17c4e38SRuslan Bukin int rc; 669f17c4e38SRuslan Bukin 670f17c4e38SRuslan Bukin KASSERT((va & PAGE_MASK) == 0, ("va is misaligned")); 671f17c4e38SRuslan Bukin 672*b97e94d9SAndrew Turner SMMU_PMAP_LOCK(pmap); 673f17c4e38SRuslan Bukin 67483fb1bdbSAndrew Turner pde = smmu_pmap_pde(pmap, va, &lvl); 675f17c4e38SRuslan Bukin if (pde == NULL || lvl != 2) { 676f17c4e38SRuslan Bukin rc = KERN_FAILURE; 677f17c4e38SRuslan Bukin goto out; 678f17c4e38SRuslan Bukin } 679f17c4e38SRuslan Bukin 68083fb1bdbSAndrew Turner pte = smmu_pmap_l2_to_l3(pde, va); 681f17c4e38SRuslan Bukin 68283fb1bdbSAndrew Turner smmu_pmap_resident_count_dec(pmap, 1); 68383fb1bdbSAndrew Turner smmu_pmap_clear(pte); 684f17c4e38SRuslan Bukin cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t)); 685f17c4e38SRuslan Bukin rc = KERN_SUCCESS; 686f17c4e38SRuslan Bukin 687f17c4e38SRuslan Bukin out: 688*b97e94d9SAndrew Turner SMMU_PMAP_UNLOCK(pmap); 689f17c4e38SRuslan Bukin 690f17c4e38SRuslan Bukin return (rc); 691f17c4e38SRuslan Bukin } 692f17c4e38SRuslan Bukin 693f17c4e38SRuslan Bukin /* 694f17c4e38SRuslan Bukin * Add a single SMMU entry. This function does not sleep. 695f17c4e38SRuslan Bukin */ 696f17c4e38SRuslan Bukin int 697*b97e94d9SAndrew Turner smmu_pmap_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa, 698f17c4e38SRuslan Bukin vm_prot_t prot, u_int flags) 699f17c4e38SRuslan Bukin { 700f17c4e38SRuslan Bukin pd_entry_t *pde; 70159446e8aSRuslan Bukin pt_entry_t new_l3; 7020a8e88faSRuslan Bukin pt_entry_t orig_l3 __diagused; 703f17c4e38SRuslan Bukin pt_entry_t *l3; 704f17c4e38SRuslan Bukin vm_page_t mpte; 705f17c4e38SRuslan Bukin int lvl; 706f17c4e38SRuslan Bukin int rv; 707f17c4e38SRuslan Bukin 708f17c4e38SRuslan Bukin KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); 709f17c4e38SRuslan Bukin 710f17c4e38SRuslan Bukin va = trunc_page(va); 711f17c4e38SRuslan Bukin new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | 712f17c4e38SRuslan Bukin ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE); 713f17c4e38SRuslan Bukin if ((prot & VM_PROT_WRITE) == 0) 714f17c4e38SRuslan Bukin new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); 715f17c4e38SRuslan Bukin new_l3 |= ATTR_S1_XN; /* Execute never. */ 716f17c4e38SRuslan Bukin new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER); 717f17c4e38SRuslan Bukin new_l3 |= ATTR_S1_nG; /* Non global. */ 718f17c4e38SRuslan Bukin 719f17c4e38SRuslan Bukin CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa); 720f17c4e38SRuslan Bukin 721*b97e94d9SAndrew Turner SMMU_PMAP_LOCK(pmap); 722f17c4e38SRuslan Bukin 723f17c4e38SRuslan Bukin /* 724f17c4e38SRuslan Bukin * In the case that a page table page is not 725f17c4e38SRuslan Bukin * resident, we are creating it here. 726f17c4e38SRuslan Bukin */ 727f17c4e38SRuslan Bukin retry: 72883fb1bdbSAndrew Turner pde = smmu_pmap_pde(pmap, va, &lvl); 729f17c4e38SRuslan Bukin if (pde != NULL && lvl == 2) { 73083fb1bdbSAndrew Turner l3 = smmu_pmap_l2_to_l3(pde, va); 731f17c4e38SRuslan Bukin } else { 73283fb1bdbSAndrew Turner mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va)); 733f17c4e38SRuslan Bukin if (mpte == NULL) { 734f17c4e38SRuslan Bukin CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); 735f17c4e38SRuslan Bukin rv = KERN_RESOURCE_SHORTAGE; 736f17c4e38SRuslan Bukin goto out; 737f17c4e38SRuslan Bukin } 738f17c4e38SRuslan Bukin goto retry; 739f17c4e38SRuslan Bukin } 740f17c4e38SRuslan Bukin 74183fb1bdbSAndrew Turner orig_l3 = smmu_pmap_load(l3); 74283fb1bdbSAndrew Turner KASSERT(!smmu_pmap_l3_valid(orig_l3), ("l3 is valid")); 743f17c4e38SRuslan Bukin 744f17c4e38SRuslan Bukin /* New mapping */ 74583fb1bdbSAndrew Turner smmu_pmap_store(l3, new_l3); 74683fb1bdbSAndrew Turner smmu_pmap_resident_count_inc(pmap, 1); 747f17c4e38SRuslan Bukin dsb(ishst); 748f17c4e38SRuslan Bukin 749f17c4e38SRuslan Bukin rv = KERN_SUCCESS; 750f17c4e38SRuslan Bukin out: 751*b97e94d9SAndrew Turner SMMU_PMAP_UNLOCK(pmap); 752f17c4e38SRuslan Bukin 753f17c4e38SRuslan Bukin return (rv); 754f17c4e38SRuslan Bukin } 755f17c4e38SRuslan Bukin 756f17c4e38SRuslan Bukin /* 757f17c4e38SRuslan Bukin * Remove a single SMMU entry. 758f17c4e38SRuslan Bukin */ 759f17c4e38SRuslan Bukin int 760*b97e94d9SAndrew Turner smmu_pmap_remove(struct smmu_pmap *pmap, vm_offset_t va) 761f17c4e38SRuslan Bukin { 762f17c4e38SRuslan Bukin pt_entry_t *pte; 763f17c4e38SRuslan Bukin int lvl; 764f17c4e38SRuslan Bukin int rc; 765f17c4e38SRuslan Bukin 766*b97e94d9SAndrew Turner SMMU_PMAP_LOCK(pmap); 767f17c4e38SRuslan Bukin 76883fb1bdbSAndrew Turner pte = smmu_pmap_pte(pmap, va, &lvl); 769f17c4e38SRuslan Bukin KASSERT(lvl == 3, 770f17c4e38SRuslan Bukin ("Invalid SMMU pagetable level: %d != 3", lvl)); 771f17c4e38SRuslan Bukin 772f17c4e38SRuslan Bukin if (pte != NULL) { 77383fb1bdbSAndrew Turner smmu_pmap_resident_count_dec(pmap, 1); 77483fb1bdbSAndrew Turner smmu_pmap_clear(pte); 775f17c4e38SRuslan Bukin rc = KERN_SUCCESS; 776f17c4e38SRuslan Bukin } else 777f17c4e38SRuslan Bukin rc = KERN_FAILURE; 778f17c4e38SRuslan Bukin 779*b97e94d9SAndrew Turner SMMU_PMAP_UNLOCK(pmap); 780f17c4e38SRuslan Bukin 781f17c4e38SRuslan Bukin return (rc); 782f17c4e38SRuslan Bukin } 783f17c4e38SRuslan Bukin 784f17c4e38SRuslan Bukin /* 785f17c4e38SRuslan Bukin * Remove all the allocated L1, L2 pages from SMMU pmap. 786f17c4e38SRuslan Bukin * All the L3 entires must be cleared in advance, otherwise 787f17c4e38SRuslan Bukin * this function panics. 788f17c4e38SRuslan Bukin */ 789f17c4e38SRuslan Bukin void 790*b97e94d9SAndrew Turner smmu_pmap_remove_pages(struct smmu_pmap *pmap) 791f17c4e38SRuslan Bukin { 792f17c4e38SRuslan Bukin pd_entry_t l0e, *l1, l1e, *l2, l2e; 793f17c4e38SRuslan Bukin pt_entry_t *l3, l3e; 794f17c4e38SRuslan Bukin vm_page_t m, m0, m1; 795f17c4e38SRuslan Bukin vm_offset_t sva; 796f17c4e38SRuslan Bukin vm_paddr_t pa; 797f17c4e38SRuslan Bukin vm_paddr_t pa0; 798f17c4e38SRuslan Bukin vm_paddr_t pa1; 799f17c4e38SRuslan Bukin int i, j, k, l; 800f17c4e38SRuslan Bukin 801*b97e94d9SAndrew Turner SMMU_PMAP_LOCK(pmap); 802f17c4e38SRuslan Bukin 80383fb1bdbSAndrew Turner for (sva = VM_MINUSER_ADDRESS, i = smmu_l0_index(sva); 804f17c4e38SRuslan Bukin (i < Ln_ENTRIES && sva < VM_MAXUSER_ADDRESS); i++) { 805*b97e94d9SAndrew Turner l0e = pmap->sp_l0[i]; 806f17c4e38SRuslan Bukin if ((l0e & ATTR_DESCR_VALID) == 0) { 807f17c4e38SRuslan Bukin sva += IOMMU_L0_SIZE; 808f17c4e38SRuslan Bukin continue; 809f17c4e38SRuslan Bukin } 810f17c4e38SRuslan Bukin pa0 = l0e & ~ATTR_MASK; 811f17c4e38SRuslan Bukin m0 = PHYS_TO_VM_PAGE(pa0); 812f17c4e38SRuslan Bukin l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0); 813f17c4e38SRuslan Bukin 81483fb1bdbSAndrew Turner for (j = smmu_l1_index(sva); j < Ln_ENTRIES; j++) { 815f17c4e38SRuslan Bukin l1e = l1[j]; 816f17c4e38SRuslan Bukin if ((l1e & ATTR_DESCR_VALID) == 0) { 817f17c4e38SRuslan Bukin sva += IOMMU_L1_SIZE; 818f17c4e38SRuslan Bukin continue; 819f17c4e38SRuslan Bukin } 820f17c4e38SRuslan Bukin if ((l1e & ATTR_DESCR_MASK) == IOMMU_L1_BLOCK) { 821f17c4e38SRuslan Bukin sva += IOMMU_L1_SIZE; 822f17c4e38SRuslan Bukin continue; 823f17c4e38SRuslan Bukin } 824f17c4e38SRuslan Bukin pa1 = l1e & ~ATTR_MASK; 825f17c4e38SRuslan Bukin m1 = PHYS_TO_VM_PAGE(pa1); 826f17c4e38SRuslan Bukin l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1); 827f17c4e38SRuslan Bukin 82883fb1bdbSAndrew Turner for (k = smmu_l2_index(sva); k < Ln_ENTRIES; k++) { 829f17c4e38SRuslan Bukin l2e = l2[k]; 830f17c4e38SRuslan Bukin if ((l2e & ATTR_DESCR_VALID) == 0) { 831f17c4e38SRuslan Bukin sva += IOMMU_L2_SIZE; 832f17c4e38SRuslan Bukin continue; 833f17c4e38SRuslan Bukin } 834f17c4e38SRuslan Bukin pa = l2e & ~ATTR_MASK; 835f17c4e38SRuslan Bukin m = PHYS_TO_VM_PAGE(pa); 836f17c4e38SRuslan Bukin l3 = (pt_entry_t *)PHYS_TO_DMAP(pa); 837f17c4e38SRuslan Bukin 83883fb1bdbSAndrew Turner for (l = smmu_l3_index(sva); l < Ln_ENTRIES; 839f17c4e38SRuslan Bukin l++, sva += IOMMU_L3_SIZE) { 840f17c4e38SRuslan Bukin l3e = l3[l]; 841f17c4e38SRuslan Bukin if ((l3e & ATTR_DESCR_VALID) == 0) 842f17c4e38SRuslan Bukin continue; 843f17c4e38SRuslan Bukin panic("%s: l3e found for va %jx\n", 844f17c4e38SRuslan Bukin __func__, sva); 845f17c4e38SRuslan Bukin } 846f17c4e38SRuslan Bukin 847f17c4e38SRuslan Bukin vm_page_unwire_noq(m1); 848f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 84983fb1bdbSAndrew Turner smmu_pmap_resident_count_dec(pmap, 1); 850f17c4e38SRuslan Bukin vm_page_free(m); 85183fb1bdbSAndrew Turner smmu_pmap_clear(&l2[k]); 852f17c4e38SRuslan Bukin } 853f17c4e38SRuslan Bukin 854f17c4e38SRuslan Bukin vm_page_unwire_noq(m0); 85583fb1bdbSAndrew Turner smmu_pmap_resident_count_dec(pmap, 1); 856f17c4e38SRuslan Bukin vm_page_free(m1); 85783fb1bdbSAndrew Turner smmu_pmap_clear(&l1[j]); 858f17c4e38SRuslan Bukin } 859f17c4e38SRuslan Bukin 86083fb1bdbSAndrew Turner smmu_pmap_resident_count_dec(pmap, 1); 861f17c4e38SRuslan Bukin vm_page_free(m0); 862*b97e94d9SAndrew Turner smmu_pmap_clear(&pmap->sp_l0[i]); 863f17c4e38SRuslan Bukin } 864f17c4e38SRuslan Bukin 865*b97e94d9SAndrew Turner KASSERT(pmap->sp_resident_count == 0, 866*b97e94d9SAndrew Turner ("Invalid resident count %jd", pmap->sp_resident_count)); 867f17c4e38SRuslan Bukin 868*b97e94d9SAndrew Turner SMMU_PMAP_UNLOCK(pmap); 869f17c4e38SRuslan Bukin } 870