1f17c4e38SRuslan Bukin /*- 2f17c4e38SRuslan Bukin * SPDX-License-Identifier: BSD-2-Clause 3f17c4e38SRuslan Bukin * 4f17c4e38SRuslan Bukin * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com> 5f17c4e38SRuslan Bukin * Copyright (c) 2014-2021 Andrew Turner 6f17c4e38SRuslan Bukin * Copyright (c) 2014-2016 The FreeBSD Foundation 7f17c4e38SRuslan Bukin * All rights reserved. 8f17c4e38SRuslan Bukin * 9f17c4e38SRuslan Bukin * This work was supported by Innovate UK project 105694, "Digital Security 10f17c4e38SRuslan Bukin * by Design (DSbD) Technology Platform Prototype". 11f17c4e38SRuslan Bukin * 12f17c4e38SRuslan Bukin * Redistribution and use in source and binary forms, with or without 13f17c4e38SRuslan Bukin * modification, are permitted provided that the following conditions 14f17c4e38SRuslan Bukin * are met: 15f17c4e38SRuslan Bukin * 1. Redistributions of source code must retain the above copyright 16f17c4e38SRuslan Bukin * notice, this list of conditions and the following disclaimer. 17f17c4e38SRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright 18f17c4e38SRuslan Bukin * notice, this list of conditions and the following disclaimer in the 19f17c4e38SRuslan Bukin * documentation and/or other materials provided with the distribution. 20f17c4e38SRuslan Bukin * 21f17c4e38SRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22f17c4e38SRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23f17c4e38SRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24f17c4e38SRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25f17c4e38SRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26f17c4e38SRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27f17c4e38SRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28f17c4e38SRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29f17c4e38SRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30f17c4e38SRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31f17c4e38SRuslan Bukin * SUCH DAMAGE. 32f17c4e38SRuslan Bukin */ 33f17c4e38SRuslan Bukin 34f17c4e38SRuslan Bukin #include <sys/cdefs.h> 35f17c4e38SRuslan Bukin __FBSDID("$FreeBSD$"); 36f17c4e38SRuslan Bukin 37f17c4e38SRuslan Bukin /* 38f17c4e38SRuslan Bukin * Manages physical address maps for ARM SMMUv3 and ARM Mali GPU. 39f17c4e38SRuslan Bukin */ 40f17c4e38SRuslan Bukin 41f17c4e38SRuslan Bukin #include "opt_vm.h" 42f17c4e38SRuslan Bukin 43f17c4e38SRuslan Bukin #include <sys/param.h> 44ea07ba11SJesper Schmitz Mouridsen #include <sys/systm.h> 45f17c4e38SRuslan Bukin #include <sys/ktr.h> 46ea07ba11SJesper Schmitz Mouridsen #include <sys/lock.h> 47f17c4e38SRuslan Bukin #include <sys/mutex.h> 48f17c4e38SRuslan Bukin #include <sys/rwlock.h> 49f17c4e38SRuslan Bukin 50f17c4e38SRuslan Bukin #include <vm/vm.h> 51f17c4e38SRuslan Bukin #include <vm/vm_param.h> 52f17c4e38SRuslan Bukin #include <vm/vm_page.h> 53f17c4e38SRuslan Bukin #include <vm/vm_map.h> 54f17c4e38SRuslan Bukin #include <vm/vm_object.h> 55f17c4e38SRuslan Bukin #include <vm/vm_pageout.h> 56f17c4e38SRuslan Bukin #include <vm/vm_radix.h> 57f17c4e38SRuslan Bukin 58f17c4e38SRuslan Bukin #include <machine/machdep.h> 59f17c4e38SRuslan Bukin 60f17c4e38SRuslan Bukin #include <arm64/iommu/iommu_pmap.h> 61f17c4e38SRuslan Bukin #include <arm64/iommu/iommu_pte.h> 62f17c4e38SRuslan Bukin 63f17c4e38SRuslan Bukin #define IOMMU_PAGE_SIZE 4096 64f17c4e38SRuslan Bukin 65f17c4e38SRuslan Bukin #define NL0PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 66f17c4e38SRuslan Bukin #define NL1PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 67f17c4e38SRuslan Bukin #define NL2PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t))) 68f17c4e38SRuslan Bukin #define NL3PG (IOMMU_PAGE_SIZE/(sizeof (pt_entry_t))) 69f17c4e38SRuslan Bukin 70f17c4e38SRuslan Bukin #define NUL0E IOMMU_L0_ENTRIES 71f17c4e38SRuslan Bukin #define NUL1E (NUL0E * NL1PG) 72f17c4e38SRuslan Bukin #define NUL2E (NUL1E * NL2PG) 73f17c4e38SRuslan Bukin 74f17c4e38SRuslan Bukin #define iommu_l0_pindex(v) (NUL2E + NUL1E + ((v) >> IOMMU_L0_SHIFT)) 75f17c4e38SRuslan Bukin #define iommu_l1_pindex(v) (NUL2E + ((v) >> IOMMU_L1_SHIFT)) 76f17c4e38SRuslan Bukin #define iommu_l2_pindex(v) ((v) >> IOMMU_L2_SHIFT) 77f17c4e38SRuslan Bukin 78f17c4e38SRuslan Bukin /* This code assumes all L1 DMAP entries will be used */ 79f17c4e38SRuslan Bukin CTASSERT((DMAP_MIN_ADDRESS & ~IOMMU_L0_OFFSET) == DMAP_MIN_ADDRESS); 80f17c4e38SRuslan Bukin CTASSERT((DMAP_MAX_ADDRESS & ~IOMMU_L0_OFFSET) == DMAP_MAX_ADDRESS); 81f17c4e38SRuslan Bukin 82f17c4e38SRuslan Bukin static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex); 83f17c4e38SRuslan Bukin static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, 84f17c4e38SRuslan Bukin struct spglist *free); 85f17c4e38SRuslan Bukin 86f17c4e38SRuslan Bukin /* 87f17c4e38SRuslan Bukin * These load the old table data and store the new value. 88f17c4e38SRuslan Bukin * They need to be atomic as the System MMU may write to the table at 89f17c4e38SRuslan Bukin * the same time as the CPU. 90f17c4e38SRuslan Bukin */ 91f17c4e38SRuslan Bukin #define pmap_load(table) (*table) 92f17c4e38SRuslan Bukin #define pmap_clear(table) atomic_store_64(table, 0) 93f17c4e38SRuslan Bukin #define pmap_store(table, entry) atomic_store_64(table, entry) 94f17c4e38SRuslan Bukin 95f17c4e38SRuslan Bukin /********************/ 96f17c4e38SRuslan Bukin /* Inline functions */ 97f17c4e38SRuslan Bukin /********************/ 98f17c4e38SRuslan Bukin 99f17c4e38SRuslan Bukin static __inline pd_entry_t * 100f17c4e38SRuslan Bukin pmap_l0(pmap_t pmap, vm_offset_t va) 101f17c4e38SRuslan Bukin { 102f17c4e38SRuslan Bukin 103f17c4e38SRuslan Bukin return (&pmap->pm_l0[iommu_l0_index(va)]); 104f17c4e38SRuslan Bukin } 105f17c4e38SRuslan Bukin 106f17c4e38SRuslan Bukin static __inline pd_entry_t * 107f17c4e38SRuslan Bukin pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) 108f17c4e38SRuslan Bukin { 109f17c4e38SRuslan Bukin pd_entry_t *l1; 110f17c4e38SRuslan Bukin 111f17c4e38SRuslan Bukin l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); 112f17c4e38SRuslan Bukin return (&l1[iommu_l1_index(va)]); 113f17c4e38SRuslan Bukin } 114f17c4e38SRuslan Bukin 115f17c4e38SRuslan Bukin static __inline pd_entry_t * 116f17c4e38SRuslan Bukin pmap_l1(pmap_t pmap, vm_offset_t va) 117f17c4e38SRuslan Bukin { 118f17c4e38SRuslan Bukin pd_entry_t *l0; 119f17c4e38SRuslan Bukin 120f17c4e38SRuslan Bukin l0 = pmap_l0(pmap, va); 121f17c4e38SRuslan Bukin if ((pmap_load(l0) & ATTR_DESCR_MASK) != IOMMU_L0_TABLE) 122f17c4e38SRuslan Bukin return (NULL); 123f17c4e38SRuslan Bukin 124f17c4e38SRuslan Bukin return (pmap_l0_to_l1(l0, va)); 125f17c4e38SRuslan Bukin } 126f17c4e38SRuslan Bukin 127f17c4e38SRuslan Bukin static __inline pd_entry_t * 128f17c4e38SRuslan Bukin pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) 129f17c4e38SRuslan Bukin { 130f17c4e38SRuslan Bukin pd_entry_t l1, *l2p; 131f17c4e38SRuslan Bukin 132f17c4e38SRuslan Bukin l1 = pmap_load(l1p); 133f17c4e38SRuslan Bukin 134f17c4e38SRuslan Bukin /* 135f17c4e38SRuslan Bukin * The valid bit may be clear if pmap_update_entry() is concurrently 136f17c4e38SRuslan Bukin * modifying the entry, so for KVA only the entry type may be checked. 137f17c4e38SRuslan Bukin */ 138f17c4e38SRuslan Bukin KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0, 139f17c4e38SRuslan Bukin ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); 140f17c4e38SRuslan Bukin KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, 141f17c4e38SRuslan Bukin ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); 142f17c4e38SRuslan Bukin l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK); 143f17c4e38SRuslan Bukin return (&l2p[iommu_l2_index(va)]); 144f17c4e38SRuslan Bukin } 145f17c4e38SRuslan Bukin 146f17c4e38SRuslan Bukin static __inline pd_entry_t * 147f17c4e38SRuslan Bukin pmap_l2(pmap_t pmap, vm_offset_t va) 148f17c4e38SRuslan Bukin { 149f17c4e38SRuslan Bukin pd_entry_t *l1; 150f17c4e38SRuslan Bukin 151f17c4e38SRuslan Bukin l1 = pmap_l1(pmap, va); 152f17c4e38SRuslan Bukin if ((pmap_load(l1) & ATTR_DESCR_MASK) != IOMMU_L1_TABLE) 153f17c4e38SRuslan Bukin return (NULL); 154f17c4e38SRuslan Bukin 155f17c4e38SRuslan Bukin return (pmap_l1_to_l2(l1, va)); 156f17c4e38SRuslan Bukin } 157f17c4e38SRuslan Bukin 158f17c4e38SRuslan Bukin static __inline pt_entry_t * 159f17c4e38SRuslan Bukin pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) 160f17c4e38SRuslan Bukin { 161f17c4e38SRuslan Bukin pd_entry_t l2; 162f17c4e38SRuslan Bukin pt_entry_t *l3p; 163f17c4e38SRuslan Bukin 164f17c4e38SRuslan Bukin l2 = pmap_load(l2p); 165f17c4e38SRuslan Bukin 166f17c4e38SRuslan Bukin /* 167f17c4e38SRuslan Bukin * The valid bit may be clear if pmap_update_entry() is concurrently 168f17c4e38SRuslan Bukin * modifying the entry, so for KVA only the entry type may be checked. 169f17c4e38SRuslan Bukin */ 170f17c4e38SRuslan Bukin KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0, 171f17c4e38SRuslan Bukin ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); 172f17c4e38SRuslan Bukin KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, 173f17c4e38SRuslan Bukin ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); 174f17c4e38SRuslan Bukin l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK); 175f17c4e38SRuslan Bukin return (&l3p[iommu_l3_index(va)]); 176f17c4e38SRuslan Bukin } 177f17c4e38SRuslan Bukin 178f17c4e38SRuslan Bukin /* 179f17c4e38SRuslan Bukin * Returns the lowest valid pde for a given virtual address. 180f17c4e38SRuslan Bukin * The next level may or may not point to a valid page or block. 181f17c4e38SRuslan Bukin */ 182f17c4e38SRuslan Bukin static __inline pd_entry_t * 183f17c4e38SRuslan Bukin pmap_pde(pmap_t pmap, vm_offset_t va, int *level) 184f17c4e38SRuslan Bukin { 185f17c4e38SRuslan Bukin pd_entry_t *l0, *l1, *l2, desc; 186f17c4e38SRuslan Bukin 187f17c4e38SRuslan Bukin l0 = pmap_l0(pmap, va); 188f17c4e38SRuslan Bukin desc = pmap_load(l0) & ATTR_DESCR_MASK; 189f17c4e38SRuslan Bukin if (desc != IOMMU_L0_TABLE) { 190f17c4e38SRuslan Bukin *level = -1; 191f17c4e38SRuslan Bukin return (NULL); 192f17c4e38SRuslan Bukin } 193f17c4e38SRuslan Bukin 194f17c4e38SRuslan Bukin l1 = pmap_l0_to_l1(l0, va); 195f17c4e38SRuslan Bukin desc = pmap_load(l1) & ATTR_DESCR_MASK; 196f17c4e38SRuslan Bukin if (desc != IOMMU_L1_TABLE) { 197f17c4e38SRuslan Bukin *level = 0; 198f17c4e38SRuslan Bukin return (l0); 199f17c4e38SRuslan Bukin } 200f17c4e38SRuslan Bukin 201f17c4e38SRuslan Bukin l2 = pmap_l1_to_l2(l1, va); 202f17c4e38SRuslan Bukin desc = pmap_load(l2) & ATTR_DESCR_MASK; 203f17c4e38SRuslan Bukin if (desc != IOMMU_L2_TABLE) { 204f17c4e38SRuslan Bukin *level = 1; 205f17c4e38SRuslan Bukin return (l1); 206f17c4e38SRuslan Bukin } 207f17c4e38SRuslan Bukin 208f17c4e38SRuslan Bukin *level = 2; 209f17c4e38SRuslan Bukin return (l2); 210f17c4e38SRuslan Bukin } 211f17c4e38SRuslan Bukin 212f17c4e38SRuslan Bukin /* 213f17c4e38SRuslan Bukin * Returns the lowest valid pte block or table entry for a given virtual 214f17c4e38SRuslan Bukin * address. If there are no valid entries return NULL and set the level to 215f17c4e38SRuslan Bukin * the first invalid level. 216f17c4e38SRuslan Bukin */ 217f17c4e38SRuslan Bukin static __inline pt_entry_t * 218f17c4e38SRuslan Bukin pmap_pte(pmap_t pmap, vm_offset_t va, int *level) 219f17c4e38SRuslan Bukin { 220f17c4e38SRuslan Bukin pd_entry_t *l1, *l2, desc; 221f17c4e38SRuslan Bukin pt_entry_t *l3; 222f17c4e38SRuslan Bukin 223f17c4e38SRuslan Bukin l1 = pmap_l1(pmap, va); 224f17c4e38SRuslan Bukin if (l1 == NULL) { 225f17c4e38SRuslan Bukin *level = 0; 226f17c4e38SRuslan Bukin return (NULL); 227f17c4e38SRuslan Bukin } 228f17c4e38SRuslan Bukin desc = pmap_load(l1) & ATTR_DESCR_MASK; 229f17c4e38SRuslan Bukin if (desc == IOMMU_L1_BLOCK) { 230f17c4e38SRuslan Bukin *level = 1; 231f17c4e38SRuslan Bukin return (l1); 232f17c4e38SRuslan Bukin } 233f17c4e38SRuslan Bukin 234f17c4e38SRuslan Bukin if (desc != IOMMU_L1_TABLE) { 235f17c4e38SRuslan Bukin *level = 1; 236f17c4e38SRuslan Bukin return (NULL); 237f17c4e38SRuslan Bukin } 238f17c4e38SRuslan Bukin 239f17c4e38SRuslan Bukin l2 = pmap_l1_to_l2(l1, va); 240f17c4e38SRuslan Bukin desc = pmap_load(l2) & ATTR_DESCR_MASK; 241f17c4e38SRuslan Bukin if (desc == IOMMU_L2_BLOCK) { 242f17c4e38SRuslan Bukin *level = 2; 243f17c4e38SRuslan Bukin return (l2); 244f17c4e38SRuslan Bukin } 245f17c4e38SRuslan Bukin 246f17c4e38SRuslan Bukin if (desc != IOMMU_L2_TABLE) { 247f17c4e38SRuslan Bukin *level = 2; 248f17c4e38SRuslan Bukin return (NULL); 249f17c4e38SRuslan Bukin } 250f17c4e38SRuslan Bukin 251f17c4e38SRuslan Bukin *level = 3; 252f17c4e38SRuslan Bukin l3 = pmap_l2_to_l3(l2, va); 253f17c4e38SRuslan Bukin if ((pmap_load(l3) & ATTR_DESCR_MASK) != IOMMU_L3_PAGE) 254f17c4e38SRuslan Bukin return (NULL); 255f17c4e38SRuslan Bukin 256f17c4e38SRuslan Bukin return (l3); 257f17c4e38SRuslan Bukin } 258f17c4e38SRuslan Bukin 259f17c4e38SRuslan Bukin static __inline int 260f17c4e38SRuslan Bukin pmap_l3_valid(pt_entry_t l3) 261f17c4e38SRuslan Bukin { 262f17c4e38SRuslan Bukin 263f17c4e38SRuslan Bukin return ((l3 & ATTR_DESCR_MASK) == IOMMU_L3_PAGE); 264f17c4e38SRuslan Bukin } 265f17c4e38SRuslan Bukin 266f17c4e38SRuslan Bukin CTASSERT(IOMMU_L1_BLOCK == IOMMU_L2_BLOCK); 267f17c4e38SRuslan Bukin 268f17c4e38SRuslan Bukin static __inline void 269f17c4e38SRuslan Bukin pmap_resident_count_inc(pmap_t pmap, int count) 270f17c4e38SRuslan Bukin { 271f17c4e38SRuslan Bukin 272f17c4e38SRuslan Bukin PMAP_LOCK_ASSERT(pmap, MA_OWNED); 273f17c4e38SRuslan Bukin pmap->pm_stats.resident_count += count; 274f17c4e38SRuslan Bukin } 275f17c4e38SRuslan Bukin 276f17c4e38SRuslan Bukin static __inline void 277f17c4e38SRuslan Bukin pmap_resident_count_dec(pmap_t pmap, int count) 278f17c4e38SRuslan Bukin { 279f17c4e38SRuslan Bukin 280f17c4e38SRuslan Bukin PMAP_LOCK_ASSERT(pmap, MA_OWNED); 281f17c4e38SRuslan Bukin KASSERT(pmap->pm_stats.resident_count >= count, 282f17c4e38SRuslan Bukin ("pmap %p resident count underflow %ld %d", pmap, 283f17c4e38SRuslan Bukin pmap->pm_stats.resident_count, count)); 284f17c4e38SRuslan Bukin pmap->pm_stats.resident_count -= count; 285f17c4e38SRuslan Bukin } 286f17c4e38SRuslan Bukin 287f17c4e38SRuslan Bukin /*************************************************** 288f17c4e38SRuslan Bukin * Page table page management routines..... 289f17c4e38SRuslan Bukin ***************************************************/ 290f17c4e38SRuslan Bukin /* 291f17c4e38SRuslan Bukin * Schedule the specified unused page table page to be freed. Specifically, 292f17c4e38SRuslan Bukin * add the page to the specified list of pages that will be released to the 293f17c4e38SRuslan Bukin * physical memory manager after the TLB has been updated. 294f17c4e38SRuslan Bukin */ 295f17c4e38SRuslan Bukin static __inline void 296f17c4e38SRuslan Bukin pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 297f17c4e38SRuslan Bukin boolean_t set_PG_ZERO) 298f17c4e38SRuslan Bukin { 299f17c4e38SRuslan Bukin 300f17c4e38SRuslan Bukin if (set_PG_ZERO) 301f17c4e38SRuslan Bukin m->flags |= PG_ZERO; 302f17c4e38SRuslan Bukin else 303f17c4e38SRuslan Bukin m->flags &= ~PG_ZERO; 304f17c4e38SRuslan Bukin SLIST_INSERT_HEAD(free, m, plinks.s.ss); 305f17c4e38SRuslan Bukin } 306f17c4e38SRuslan Bukin 307f17c4e38SRuslan Bukin /*************************************************** 308f17c4e38SRuslan Bukin * Low level mapping routines..... 309f17c4e38SRuslan Bukin ***************************************************/ 310f17c4e38SRuslan Bukin 311f17c4e38SRuslan Bukin /* 312f17c4e38SRuslan Bukin * Decrements a page table page's reference count, which is used to record the 313f17c4e38SRuslan Bukin * number of valid page table entries within the page. If the reference count 314f17c4e38SRuslan Bukin * drops to zero, then the page table page is unmapped. Returns TRUE if the 315f17c4e38SRuslan Bukin * page table page was unmapped and FALSE otherwise. 316f17c4e38SRuslan Bukin */ 317f17c4e38SRuslan Bukin static inline boolean_t 318f17c4e38SRuslan Bukin pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 319f17c4e38SRuslan Bukin { 320f17c4e38SRuslan Bukin 321f17c4e38SRuslan Bukin --m->ref_count; 322f17c4e38SRuslan Bukin if (m->ref_count == 0) { 323f17c4e38SRuslan Bukin _pmap_unwire_l3(pmap, va, m, free); 324f17c4e38SRuslan Bukin return (TRUE); 325f17c4e38SRuslan Bukin } else 326f17c4e38SRuslan Bukin return (FALSE); 327f17c4e38SRuslan Bukin } 328f17c4e38SRuslan Bukin 329f17c4e38SRuslan Bukin static void 330f17c4e38SRuslan Bukin _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 331f17c4e38SRuslan Bukin { 332f17c4e38SRuslan Bukin 333f17c4e38SRuslan Bukin PMAP_LOCK_ASSERT(pmap, MA_OWNED); 334f17c4e38SRuslan Bukin /* 335f17c4e38SRuslan Bukin * unmap the page table page 336f17c4e38SRuslan Bukin */ 337f17c4e38SRuslan Bukin if (m->pindex >= (NUL2E + NUL1E)) { 338f17c4e38SRuslan Bukin /* l1 page */ 339f17c4e38SRuslan Bukin pd_entry_t *l0; 340f17c4e38SRuslan Bukin 341f17c4e38SRuslan Bukin l0 = pmap_l0(pmap, va); 342f17c4e38SRuslan Bukin pmap_clear(l0); 343f17c4e38SRuslan Bukin } else if (m->pindex >= NUL2E) { 344f17c4e38SRuslan Bukin /* l2 page */ 345f17c4e38SRuslan Bukin pd_entry_t *l1; 346f17c4e38SRuslan Bukin 347f17c4e38SRuslan Bukin l1 = pmap_l1(pmap, va); 348f17c4e38SRuslan Bukin pmap_clear(l1); 349f17c4e38SRuslan Bukin } else { 350f17c4e38SRuslan Bukin /* l3 page */ 351f17c4e38SRuslan Bukin pd_entry_t *l2; 352f17c4e38SRuslan Bukin 353f17c4e38SRuslan Bukin l2 = pmap_l2(pmap, va); 354f17c4e38SRuslan Bukin pmap_clear(l2); 355f17c4e38SRuslan Bukin } 356f17c4e38SRuslan Bukin pmap_resident_count_dec(pmap, 1); 357f17c4e38SRuslan Bukin if (m->pindex < NUL2E) { 358f17c4e38SRuslan Bukin /* We just released an l3, unhold the matching l2 */ 359f17c4e38SRuslan Bukin pd_entry_t *l1, tl1; 360f17c4e38SRuslan Bukin vm_page_t l2pg; 361f17c4e38SRuslan Bukin 362f17c4e38SRuslan Bukin l1 = pmap_l1(pmap, va); 363f17c4e38SRuslan Bukin tl1 = pmap_load(l1); 364f17c4e38SRuslan Bukin l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); 365f17c4e38SRuslan Bukin pmap_unwire_l3(pmap, va, l2pg, free); 366f17c4e38SRuslan Bukin } else if (m->pindex < (NUL2E + NUL1E)) { 367f17c4e38SRuslan Bukin /* We just released an l2, unhold the matching l1 */ 368f17c4e38SRuslan Bukin pd_entry_t *l0, tl0; 369f17c4e38SRuslan Bukin vm_page_t l1pg; 370f17c4e38SRuslan Bukin 371f17c4e38SRuslan Bukin l0 = pmap_l0(pmap, va); 372f17c4e38SRuslan Bukin tl0 = pmap_load(l0); 373f17c4e38SRuslan Bukin l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); 374f17c4e38SRuslan Bukin pmap_unwire_l3(pmap, va, l1pg, free); 375f17c4e38SRuslan Bukin } 376f17c4e38SRuslan Bukin 377f17c4e38SRuslan Bukin /* 378f17c4e38SRuslan Bukin * Put page on a list so that it is released after 379f17c4e38SRuslan Bukin * *ALL* TLB shootdown is done 380f17c4e38SRuslan Bukin */ 381f17c4e38SRuslan Bukin pmap_add_delayed_free_list(m, free, TRUE); 382f17c4e38SRuslan Bukin } 383f17c4e38SRuslan Bukin 384f17c4e38SRuslan Bukin static int 385f17c4e38SRuslan Bukin iommu_pmap_pinit_levels(pmap_t pmap, int levels) 386f17c4e38SRuslan Bukin { 387f17c4e38SRuslan Bukin vm_page_t m; 388f17c4e38SRuslan Bukin 389f17c4e38SRuslan Bukin /* 390f17c4e38SRuslan Bukin * allocate the l0 page 391f17c4e38SRuslan Bukin */ 392a4667e09SMark Johnston m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED | 393a4667e09SMark Johnston VM_ALLOC_ZERO); 394f17c4e38SRuslan Bukin pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m); 395f17c4e38SRuslan Bukin pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); 396f17c4e38SRuslan Bukin 397ff93447dSMark Johnston vm_radix_init(&pmap->pm_root); 398f17c4e38SRuslan Bukin bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 399f17c4e38SRuslan Bukin 400f17c4e38SRuslan Bukin MPASS(levels == 3 || levels == 4); 401f17c4e38SRuslan Bukin pmap->pm_levels = levels; 402f17c4e38SRuslan Bukin 403f17c4e38SRuslan Bukin /* 404f17c4e38SRuslan Bukin * Allocate the level 1 entry to use as the root. This will increase 405f17c4e38SRuslan Bukin * the refcount on the level 1 page so it won't be removed until 406f17c4e38SRuslan Bukin * pmap_release() is called. 407f17c4e38SRuslan Bukin */ 408f17c4e38SRuslan Bukin if (pmap->pm_levels == 3) { 409f17c4e38SRuslan Bukin PMAP_LOCK(pmap); 410f17c4e38SRuslan Bukin m = _pmap_alloc_l3(pmap, NUL2E + NUL1E); 411f17c4e38SRuslan Bukin PMAP_UNLOCK(pmap); 412f17c4e38SRuslan Bukin } 413f17c4e38SRuslan Bukin pmap->pm_ttbr = VM_PAGE_TO_PHYS(m); 414f17c4e38SRuslan Bukin 415f17c4e38SRuslan Bukin return (1); 416f17c4e38SRuslan Bukin } 417f17c4e38SRuslan Bukin 418f17c4e38SRuslan Bukin int 419f17c4e38SRuslan Bukin iommu_pmap_pinit(pmap_t pmap) 420f17c4e38SRuslan Bukin { 421f17c4e38SRuslan Bukin 422f17c4e38SRuslan Bukin return (iommu_pmap_pinit_levels(pmap, 4)); 423f17c4e38SRuslan Bukin } 424f17c4e38SRuslan Bukin 425f17c4e38SRuslan Bukin /* 426f17c4e38SRuslan Bukin * This routine is called if the desired page table page does not exist. 427f17c4e38SRuslan Bukin * 428f17c4e38SRuslan Bukin * If page table page allocation fails, this routine may sleep before 429f17c4e38SRuslan Bukin * returning NULL. It sleeps only if a lock pointer was given. 430f17c4e38SRuslan Bukin * 431f17c4e38SRuslan Bukin * Note: If a page allocation fails at page table level two or three, 432f17c4e38SRuslan Bukin * one or two pages may be held during the wait, only to be released 433f17c4e38SRuslan Bukin * afterwards. This conservative approach is easily argued to avoid 434f17c4e38SRuslan Bukin * race conditions. 435f17c4e38SRuslan Bukin */ 436f17c4e38SRuslan Bukin static vm_page_t 437f17c4e38SRuslan Bukin _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex) 438f17c4e38SRuslan Bukin { 439f17c4e38SRuslan Bukin vm_page_t m, l1pg, l2pg; 440f17c4e38SRuslan Bukin 441f17c4e38SRuslan Bukin PMAP_LOCK_ASSERT(pmap, MA_OWNED); 442f17c4e38SRuslan Bukin 443f17c4e38SRuslan Bukin /* 444f17c4e38SRuslan Bukin * Allocate a page table page. 445f17c4e38SRuslan Bukin */ 446a4667e09SMark Johnston if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 447f17c4e38SRuslan Bukin /* 448f17c4e38SRuslan Bukin * Indicate the need to retry. While waiting, the page table 449f17c4e38SRuslan Bukin * page may have been allocated. 450f17c4e38SRuslan Bukin */ 451f17c4e38SRuslan Bukin return (NULL); 452f17c4e38SRuslan Bukin } 453a4667e09SMark Johnston m->pindex = ptepindex; 454f17c4e38SRuslan Bukin 455f17c4e38SRuslan Bukin /* 456f17c4e38SRuslan Bukin * Because of AArch64's weak memory consistency model, we must have a 457f17c4e38SRuslan Bukin * barrier here to ensure that the stores for zeroing "m", whether by 458f17c4e38SRuslan Bukin * pmap_zero_page() or an earlier function, are visible before adding 459f17c4e38SRuslan Bukin * "m" to the page table. Otherwise, a page table walk by another 460f17c4e38SRuslan Bukin * processor's MMU could see the mapping to "m" and a stale, non-zero 461f17c4e38SRuslan Bukin * PTE within "m". 462f17c4e38SRuslan Bukin */ 463f17c4e38SRuslan Bukin dmb(ishst); 464f17c4e38SRuslan Bukin 465f17c4e38SRuslan Bukin /* 466f17c4e38SRuslan Bukin * Map the pagetable page into the process address space, if 467f17c4e38SRuslan Bukin * it isn't already there. 468f17c4e38SRuslan Bukin */ 469f17c4e38SRuslan Bukin 470f17c4e38SRuslan Bukin if (ptepindex >= (NUL2E + NUL1E)) { 471f17c4e38SRuslan Bukin pd_entry_t *l0; 472f17c4e38SRuslan Bukin vm_pindex_t l0index; 473f17c4e38SRuslan Bukin 474f17c4e38SRuslan Bukin l0index = ptepindex - (NUL2E + NUL1E); 475f17c4e38SRuslan Bukin l0 = &pmap->pm_l0[l0index]; 476f17c4e38SRuslan Bukin pmap_store(l0, VM_PAGE_TO_PHYS(m) | IOMMU_L0_TABLE); 477f17c4e38SRuslan Bukin } else if (ptepindex >= NUL2E) { 478f17c4e38SRuslan Bukin vm_pindex_t l0index, l1index; 479f17c4e38SRuslan Bukin pd_entry_t *l0, *l1; 480f17c4e38SRuslan Bukin pd_entry_t tl0; 481f17c4e38SRuslan Bukin 482f17c4e38SRuslan Bukin l1index = ptepindex - NUL2E; 483f17c4e38SRuslan Bukin l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT; 484f17c4e38SRuslan Bukin 485f17c4e38SRuslan Bukin l0 = &pmap->pm_l0[l0index]; 486f17c4e38SRuslan Bukin tl0 = pmap_load(l0); 487f17c4e38SRuslan Bukin if (tl0 == 0) { 488f17c4e38SRuslan Bukin /* recurse for allocating page dir */ 489f17c4e38SRuslan Bukin if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index) 490f17c4e38SRuslan Bukin == NULL) { 491f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 492f17c4e38SRuslan Bukin vm_page_free_zero(m); 493f17c4e38SRuslan Bukin return (NULL); 494f17c4e38SRuslan Bukin } 495f17c4e38SRuslan Bukin } else { 496f17c4e38SRuslan Bukin l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); 497f17c4e38SRuslan Bukin l1pg->ref_count++; 498f17c4e38SRuslan Bukin } 499f17c4e38SRuslan Bukin 500f17c4e38SRuslan Bukin l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); 501f17c4e38SRuslan Bukin l1 = &l1[ptepindex & Ln_ADDR_MASK]; 502f17c4e38SRuslan Bukin pmap_store(l1, VM_PAGE_TO_PHYS(m) | IOMMU_L1_TABLE); 503f17c4e38SRuslan Bukin } else { 504f17c4e38SRuslan Bukin vm_pindex_t l0index, l1index; 505f17c4e38SRuslan Bukin pd_entry_t *l0, *l1, *l2; 506f17c4e38SRuslan Bukin pd_entry_t tl0, tl1; 507f17c4e38SRuslan Bukin 508f17c4e38SRuslan Bukin l1index = ptepindex >> Ln_ENTRIES_SHIFT; 509f17c4e38SRuslan Bukin l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT; 510f17c4e38SRuslan Bukin 511f17c4e38SRuslan Bukin l0 = &pmap->pm_l0[l0index]; 512f17c4e38SRuslan Bukin tl0 = pmap_load(l0); 513f17c4e38SRuslan Bukin if (tl0 == 0) { 514f17c4e38SRuslan Bukin /* recurse for allocating page dir */ 515f17c4e38SRuslan Bukin if (_pmap_alloc_l3(pmap, NUL2E + l1index) == NULL) { 516f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 517f17c4e38SRuslan Bukin vm_page_free_zero(m); 518f17c4e38SRuslan Bukin return (NULL); 519f17c4e38SRuslan Bukin } 520f17c4e38SRuslan Bukin tl0 = pmap_load(l0); 521f17c4e38SRuslan Bukin l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); 522f17c4e38SRuslan Bukin l1 = &l1[l1index & Ln_ADDR_MASK]; 523f17c4e38SRuslan Bukin } else { 524f17c4e38SRuslan Bukin l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); 525f17c4e38SRuslan Bukin l1 = &l1[l1index & Ln_ADDR_MASK]; 526f17c4e38SRuslan Bukin tl1 = pmap_load(l1); 527f17c4e38SRuslan Bukin if (tl1 == 0) { 528f17c4e38SRuslan Bukin /* recurse for allocating page dir */ 529f17c4e38SRuslan Bukin if (_pmap_alloc_l3(pmap, NUL2E + l1index) 530f17c4e38SRuslan Bukin == NULL) { 531f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 532f17c4e38SRuslan Bukin vm_page_free_zero(m); 533f17c4e38SRuslan Bukin return (NULL); 534f17c4e38SRuslan Bukin } 535f17c4e38SRuslan Bukin } else { 536f17c4e38SRuslan Bukin l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); 537f17c4e38SRuslan Bukin l2pg->ref_count++; 538f17c4e38SRuslan Bukin } 539f17c4e38SRuslan Bukin } 540f17c4e38SRuslan Bukin 541f17c4e38SRuslan Bukin l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); 542f17c4e38SRuslan Bukin l2 = &l2[ptepindex & Ln_ADDR_MASK]; 543f17c4e38SRuslan Bukin pmap_store(l2, VM_PAGE_TO_PHYS(m) | IOMMU_L2_TABLE); 544f17c4e38SRuslan Bukin } 545f17c4e38SRuslan Bukin 546f17c4e38SRuslan Bukin pmap_resident_count_inc(pmap, 1); 547f17c4e38SRuslan Bukin 548f17c4e38SRuslan Bukin return (m); 549f17c4e38SRuslan Bukin } 550f17c4e38SRuslan Bukin 551f17c4e38SRuslan Bukin /*************************************************** 552f17c4e38SRuslan Bukin * Pmap allocation/deallocation routines. 553f17c4e38SRuslan Bukin ***************************************************/ 554f17c4e38SRuslan Bukin 555f17c4e38SRuslan Bukin /* 556f17c4e38SRuslan Bukin * Release any resources held by the given physical map. 557f17c4e38SRuslan Bukin * Called when a pmap initialized by pmap_pinit is being released. 558f17c4e38SRuslan Bukin * Should only be called if the map contains no valid mappings. 559f17c4e38SRuslan Bukin */ 560f17c4e38SRuslan Bukin void 561f17c4e38SRuslan Bukin iommu_pmap_release(pmap_t pmap) 562f17c4e38SRuslan Bukin { 563f17c4e38SRuslan Bukin boolean_t rv; 564f17c4e38SRuslan Bukin struct spglist free; 565f17c4e38SRuslan Bukin vm_page_t m; 566f17c4e38SRuslan Bukin 567f17c4e38SRuslan Bukin if (pmap->pm_levels != 4) { 568f17c4e38SRuslan Bukin KASSERT(pmap->pm_stats.resident_count == 1, 569f17c4e38SRuslan Bukin ("pmap_release: pmap resident count %ld != 0", 570f17c4e38SRuslan Bukin pmap->pm_stats.resident_count)); 571f17c4e38SRuslan Bukin KASSERT((pmap->pm_l0[0] & ATTR_DESCR_VALID) == ATTR_DESCR_VALID, 572f17c4e38SRuslan Bukin ("pmap_release: Invalid l0 entry: %lx", pmap->pm_l0[0])); 573f17c4e38SRuslan Bukin 574f17c4e38SRuslan Bukin SLIST_INIT(&free); 575f17c4e38SRuslan Bukin m = PHYS_TO_VM_PAGE(pmap->pm_ttbr); 576f17c4e38SRuslan Bukin PMAP_LOCK(pmap); 577f17c4e38SRuslan Bukin rv = pmap_unwire_l3(pmap, 0, m, &free); 578f17c4e38SRuslan Bukin PMAP_UNLOCK(pmap); 579f17c4e38SRuslan Bukin MPASS(rv == TRUE); 580f17c4e38SRuslan Bukin vm_page_free_pages_toq(&free, true); 581f17c4e38SRuslan Bukin } 582f17c4e38SRuslan Bukin 583f17c4e38SRuslan Bukin KASSERT(pmap->pm_stats.resident_count == 0, 584f17c4e38SRuslan Bukin ("pmap_release: pmap resident count %ld != 0", 585f17c4e38SRuslan Bukin pmap->pm_stats.resident_count)); 586f17c4e38SRuslan Bukin KASSERT(vm_radix_is_empty(&pmap->pm_root), 587f17c4e38SRuslan Bukin ("pmap_release: pmap has reserved page table page(s)")); 588f17c4e38SRuslan Bukin 589f17c4e38SRuslan Bukin m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr); 590f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 591f17c4e38SRuslan Bukin vm_page_free_zero(m); 592f17c4e38SRuslan Bukin } 593f17c4e38SRuslan Bukin 594f17c4e38SRuslan Bukin /*************************************************** 595f17c4e38SRuslan Bukin * page management routines. 596f17c4e38SRuslan Bukin ***************************************************/ 597f17c4e38SRuslan Bukin 598f17c4e38SRuslan Bukin /* 599f17c4e38SRuslan Bukin * Add a single Mali GPU entry. This function does not sleep. 600f17c4e38SRuslan Bukin */ 601f17c4e38SRuslan Bukin int 602f17c4e38SRuslan Bukin pmap_gpu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 603f17c4e38SRuslan Bukin vm_prot_t prot, u_int flags) 604f17c4e38SRuslan Bukin { 605f17c4e38SRuslan Bukin pd_entry_t *pde; 606f17c4e38SRuslan Bukin pt_entry_t new_l3, orig_l3; 607f17c4e38SRuslan Bukin pt_entry_t *l3; 608f17c4e38SRuslan Bukin vm_page_t mpte; 609f17c4e38SRuslan Bukin pd_entry_t *l1p; 610f17c4e38SRuslan Bukin pd_entry_t *l2p; 611f17c4e38SRuslan Bukin int lvl; 612f17c4e38SRuslan Bukin int rv; 613f17c4e38SRuslan Bukin 614f17c4e38SRuslan Bukin KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU")); 615f17c4e38SRuslan Bukin KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); 616f17c4e38SRuslan Bukin KASSERT((va & PAGE_MASK) == 0, ("va is misaligned")); 617f17c4e38SRuslan Bukin KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned")); 618f17c4e38SRuslan Bukin 619f17c4e38SRuslan Bukin new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | IOMMU_L3_BLOCK); 620f17c4e38SRuslan Bukin 621f17c4e38SRuslan Bukin if ((prot & VM_PROT_WRITE) != 0) 622f17c4e38SRuslan Bukin new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE); 623f17c4e38SRuslan Bukin if ((prot & VM_PROT_READ) != 0) 624f17c4e38SRuslan Bukin new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ); 625f17c4e38SRuslan Bukin if ((prot & VM_PROT_EXECUTE) == 0) 626f17c4e38SRuslan Bukin new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL); 627f17c4e38SRuslan Bukin 628f17c4e38SRuslan Bukin CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa); 629f17c4e38SRuslan Bukin 630f17c4e38SRuslan Bukin PMAP_LOCK(pmap); 631f17c4e38SRuslan Bukin 632f17c4e38SRuslan Bukin /* 633f17c4e38SRuslan Bukin * In the case that a page table page is not 634f17c4e38SRuslan Bukin * resident, we are creating it here. 635f17c4e38SRuslan Bukin */ 636f17c4e38SRuslan Bukin retry: 637f17c4e38SRuslan Bukin pde = pmap_pde(pmap, va, &lvl); 638f17c4e38SRuslan Bukin if (pde != NULL && lvl == 2) { 639f17c4e38SRuslan Bukin l3 = pmap_l2_to_l3(pde, va); 640f17c4e38SRuslan Bukin } else { 641f17c4e38SRuslan Bukin mpte = _pmap_alloc_l3(pmap, iommu_l2_pindex(va)); 642f17c4e38SRuslan Bukin if (mpte == NULL) { 643f17c4e38SRuslan Bukin CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); 644f17c4e38SRuslan Bukin rv = KERN_RESOURCE_SHORTAGE; 645f17c4e38SRuslan Bukin goto out; 646f17c4e38SRuslan Bukin } 647f17c4e38SRuslan Bukin 648f17c4e38SRuslan Bukin /* 649f17c4e38SRuslan Bukin * Ensure newly created l1, l2 are visible to GPU. 650f17c4e38SRuslan Bukin * l0 is already visible by similar call in panfrost driver. 651f17c4e38SRuslan Bukin * The cache entry for l3 handled below. 652f17c4e38SRuslan Bukin */ 653f17c4e38SRuslan Bukin 654f17c4e38SRuslan Bukin l1p = pmap_l1(pmap, va); 655f17c4e38SRuslan Bukin l2p = pmap_l2(pmap, va); 656f17c4e38SRuslan Bukin cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t)); 657f17c4e38SRuslan Bukin cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t)); 658f17c4e38SRuslan Bukin 659f17c4e38SRuslan Bukin goto retry; 660f17c4e38SRuslan Bukin } 661f17c4e38SRuslan Bukin 662f17c4e38SRuslan Bukin orig_l3 = pmap_load(l3); 663f17c4e38SRuslan Bukin KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid")); 664f17c4e38SRuslan Bukin 665f17c4e38SRuslan Bukin /* New mapping */ 666f17c4e38SRuslan Bukin pmap_store(l3, new_l3); 667f17c4e38SRuslan Bukin 668f17c4e38SRuslan Bukin cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t)); 669f17c4e38SRuslan Bukin 670f17c4e38SRuslan Bukin pmap_resident_count_inc(pmap, 1); 671f17c4e38SRuslan Bukin dsb(ishst); 672f17c4e38SRuslan Bukin 673f17c4e38SRuslan Bukin rv = KERN_SUCCESS; 674f17c4e38SRuslan Bukin out: 675f17c4e38SRuslan Bukin PMAP_UNLOCK(pmap); 676f17c4e38SRuslan Bukin 677f17c4e38SRuslan Bukin return (rv); 678f17c4e38SRuslan Bukin } 679f17c4e38SRuslan Bukin 680f17c4e38SRuslan Bukin /* 681f17c4e38SRuslan Bukin * Remove a single Mali GPU entry. 682f17c4e38SRuslan Bukin */ 683f17c4e38SRuslan Bukin int 684f17c4e38SRuslan Bukin pmap_gpu_remove(pmap_t pmap, vm_offset_t va) 685f17c4e38SRuslan Bukin { 686f17c4e38SRuslan Bukin pd_entry_t *pde; 687f17c4e38SRuslan Bukin pt_entry_t *pte; 688f17c4e38SRuslan Bukin int lvl; 689f17c4e38SRuslan Bukin int rc; 690f17c4e38SRuslan Bukin 691f17c4e38SRuslan Bukin KASSERT((va & PAGE_MASK) == 0, ("va is misaligned")); 692f17c4e38SRuslan Bukin KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU")); 693f17c4e38SRuslan Bukin 694f17c4e38SRuslan Bukin PMAP_LOCK(pmap); 695f17c4e38SRuslan Bukin 696f17c4e38SRuslan Bukin pde = pmap_pde(pmap, va, &lvl); 697f17c4e38SRuslan Bukin if (pde == NULL || lvl != 2) { 698f17c4e38SRuslan Bukin rc = KERN_FAILURE; 699f17c4e38SRuslan Bukin goto out; 700f17c4e38SRuslan Bukin } 701f17c4e38SRuslan Bukin 702f17c4e38SRuslan Bukin pte = pmap_l2_to_l3(pde, va); 703f17c4e38SRuslan Bukin 704f17c4e38SRuslan Bukin pmap_resident_count_dec(pmap, 1); 705f17c4e38SRuslan Bukin pmap_clear(pte); 706f17c4e38SRuslan Bukin cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t)); 707f17c4e38SRuslan Bukin rc = KERN_SUCCESS; 708f17c4e38SRuslan Bukin 709f17c4e38SRuslan Bukin out: 710f17c4e38SRuslan Bukin PMAP_UNLOCK(pmap); 711f17c4e38SRuslan Bukin 712f17c4e38SRuslan Bukin return (rc); 713f17c4e38SRuslan Bukin } 714f17c4e38SRuslan Bukin 715f17c4e38SRuslan Bukin /* 716f17c4e38SRuslan Bukin * Add a single SMMU entry. This function does not sleep. 717f17c4e38SRuslan Bukin */ 718f17c4e38SRuslan Bukin int 719f17c4e38SRuslan Bukin pmap_smmu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 720f17c4e38SRuslan Bukin vm_prot_t prot, u_int flags) 721f17c4e38SRuslan Bukin { 722f17c4e38SRuslan Bukin pd_entry_t *pde; 723f17c4e38SRuslan Bukin pt_entry_t new_l3, orig_l3; 724f17c4e38SRuslan Bukin pt_entry_t *l3; 725f17c4e38SRuslan Bukin vm_page_t mpte; 726f17c4e38SRuslan Bukin int lvl; 727f17c4e38SRuslan Bukin int rv; 728f17c4e38SRuslan Bukin 729f17c4e38SRuslan Bukin KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); 730f17c4e38SRuslan Bukin 731f17c4e38SRuslan Bukin va = trunc_page(va); 732f17c4e38SRuslan Bukin new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | 733f17c4e38SRuslan Bukin ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE); 734f17c4e38SRuslan Bukin if ((prot & VM_PROT_WRITE) == 0) 735f17c4e38SRuslan Bukin new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); 736f17c4e38SRuslan Bukin new_l3 |= ATTR_S1_XN; /* Execute never. */ 737f17c4e38SRuslan Bukin new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER); 738f17c4e38SRuslan Bukin new_l3 |= ATTR_S1_nG; /* Non global. */ 739f17c4e38SRuslan Bukin 740f17c4e38SRuslan Bukin CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa); 741f17c4e38SRuslan Bukin 742f17c4e38SRuslan Bukin PMAP_LOCK(pmap); 743f17c4e38SRuslan Bukin 744f17c4e38SRuslan Bukin /* 745f17c4e38SRuslan Bukin * In the case that a page table page is not 746f17c4e38SRuslan Bukin * resident, we are creating it here. 747f17c4e38SRuslan Bukin */ 748f17c4e38SRuslan Bukin retry: 749f17c4e38SRuslan Bukin pde = pmap_pde(pmap, va, &lvl); 750f17c4e38SRuslan Bukin if (pde != NULL && lvl == 2) { 751f17c4e38SRuslan Bukin l3 = pmap_l2_to_l3(pde, va); 752f17c4e38SRuslan Bukin } else { 753f17c4e38SRuslan Bukin mpte = _pmap_alloc_l3(pmap, iommu_l2_pindex(va)); 754f17c4e38SRuslan Bukin if (mpte == NULL) { 755f17c4e38SRuslan Bukin CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); 756f17c4e38SRuslan Bukin rv = KERN_RESOURCE_SHORTAGE; 757f17c4e38SRuslan Bukin goto out; 758f17c4e38SRuslan Bukin } 759f17c4e38SRuslan Bukin goto retry; 760f17c4e38SRuslan Bukin } 761f17c4e38SRuslan Bukin 762f17c4e38SRuslan Bukin orig_l3 = pmap_load(l3); 763f17c4e38SRuslan Bukin KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid")); 764f17c4e38SRuslan Bukin 765f17c4e38SRuslan Bukin /* New mapping */ 766f17c4e38SRuslan Bukin pmap_store(l3, new_l3); 767f17c4e38SRuslan Bukin pmap_resident_count_inc(pmap, 1); 768f17c4e38SRuslan Bukin dsb(ishst); 769f17c4e38SRuslan Bukin 770f17c4e38SRuslan Bukin rv = KERN_SUCCESS; 771f17c4e38SRuslan Bukin out: 772f17c4e38SRuslan Bukin PMAP_UNLOCK(pmap); 773f17c4e38SRuslan Bukin 774f17c4e38SRuslan Bukin return (rv); 775f17c4e38SRuslan Bukin } 776f17c4e38SRuslan Bukin 777f17c4e38SRuslan Bukin /* 778f17c4e38SRuslan Bukin * Remove a single SMMU entry. 779f17c4e38SRuslan Bukin */ 780f17c4e38SRuslan Bukin int 781f17c4e38SRuslan Bukin pmap_smmu_remove(pmap_t pmap, vm_offset_t va) 782f17c4e38SRuslan Bukin { 783f17c4e38SRuslan Bukin pt_entry_t *pte; 784f17c4e38SRuslan Bukin int lvl; 785f17c4e38SRuslan Bukin int rc; 786f17c4e38SRuslan Bukin 787f17c4e38SRuslan Bukin PMAP_LOCK(pmap); 788f17c4e38SRuslan Bukin 789f17c4e38SRuslan Bukin pte = pmap_pte(pmap, va, &lvl); 790f17c4e38SRuslan Bukin KASSERT(lvl == 3, 791f17c4e38SRuslan Bukin ("Invalid SMMU pagetable level: %d != 3", lvl)); 792f17c4e38SRuslan Bukin 793f17c4e38SRuslan Bukin if (pte != NULL) { 794f17c4e38SRuslan Bukin pmap_resident_count_dec(pmap, 1); 795f17c4e38SRuslan Bukin pmap_clear(pte); 796f17c4e38SRuslan Bukin rc = KERN_SUCCESS; 797f17c4e38SRuslan Bukin } else 798f17c4e38SRuslan Bukin rc = KERN_FAILURE; 799f17c4e38SRuslan Bukin 800f17c4e38SRuslan Bukin PMAP_UNLOCK(pmap); 801f17c4e38SRuslan Bukin 802f17c4e38SRuslan Bukin return (rc); 803f17c4e38SRuslan Bukin } 804f17c4e38SRuslan Bukin 805f17c4e38SRuslan Bukin /* 806f17c4e38SRuslan Bukin * Remove all the allocated L1, L2 pages from SMMU pmap. 807f17c4e38SRuslan Bukin * All the L3 entires must be cleared in advance, otherwise 808f17c4e38SRuslan Bukin * this function panics. 809f17c4e38SRuslan Bukin */ 810f17c4e38SRuslan Bukin void 811f17c4e38SRuslan Bukin iommu_pmap_remove_pages(pmap_t pmap) 812f17c4e38SRuslan Bukin { 813f17c4e38SRuslan Bukin pd_entry_t l0e, *l1, l1e, *l2, l2e; 814f17c4e38SRuslan Bukin pt_entry_t *l3, l3e; 815f17c4e38SRuslan Bukin vm_page_t m, m0, m1; 816f17c4e38SRuslan Bukin vm_offset_t sva; 817f17c4e38SRuslan Bukin vm_paddr_t pa; 818f17c4e38SRuslan Bukin vm_paddr_t pa0; 819f17c4e38SRuslan Bukin vm_paddr_t pa1; 820f17c4e38SRuslan Bukin int i, j, k, l; 821f17c4e38SRuslan Bukin 822f17c4e38SRuslan Bukin PMAP_LOCK(pmap); 823f17c4e38SRuslan Bukin 824f17c4e38SRuslan Bukin for (sva = VM_MINUSER_ADDRESS, i = iommu_l0_index(sva); 825f17c4e38SRuslan Bukin (i < Ln_ENTRIES && sva < VM_MAXUSER_ADDRESS); i++) { 826f17c4e38SRuslan Bukin l0e = pmap->pm_l0[i]; 827f17c4e38SRuslan Bukin if ((l0e & ATTR_DESCR_VALID) == 0) { 828f17c4e38SRuslan Bukin sva += IOMMU_L0_SIZE; 829f17c4e38SRuslan Bukin continue; 830f17c4e38SRuslan Bukin } 831f17c4e38SRuslan Bukin pa0 = l0e & ~ATTR_MASK; 832f17c4e38SRuslan Bukin m0 = PHYS_TO_VM_PAGE(pa0); 833f17c4e38SRuslan Bukin l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0); 834f17c4e38SRuslan Bukin 835f17c4e38SRuslan Bukin for (j = iommu_l1_index(sva); j < Ln_ENTRIES; j++) { 836f17c4e38SRuslan Bukin l1e = l1[j]; 837f17c4e38SRuslan Bukin if ((l1e & ATTR_DESCR_VALID) == 0) { 838f17c4e38SRuslan Bukin sva += IOMMU_L1_SIZE; 839f17c4e38SRuslan Bukin continue; 840f17c4e38SRuslan Bukin } 841f17c4e38SRuslan Bukin if ((l1e & ATTR_DESCR_MASK) == IOMMU_L1_BLOCK) { 842f17c4e38SRuslan Bukin sva += IOMMU_L1_SIZE; 843f17c4e38SRuslan Bukin continue; 844f17c4e38SRuslan Bukin } 845f17c4e38SRuslan Bukin pa1 = l1e & ~ATTR_MASK; 846f17c4e38SRuslan Bukin m1 = PHYS_TO_VM_PAGE(pa1); 847f17c4e38SRuslan Bukin l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1); 848f17c4e38SRuslan Bukin 849f17c4e38SRuslan Bukin for (k = iommu_l2_index(sva); k < Ln_ENTRIES; k++) { 850f17c4e38SRuslan Bukin l2e = l2[k]; 851f17c4e38SRuslan Bukin if ((l2e & ATTR_DESCR_VALID) == 0) { 852f17c4e38SRuslan Bukin sva += IOMMU_L2_SIZE; 853f17c4e38SRuslan Bukin continue; 854f17c4e38SRuslan Bukin } 855f17c4e38SRuslan Bukin pa = l2e & ~ATTR_MASK; 856f17c4e38SRuslan Bukin m = PHYS_TO_VM_PAGE(pa); 857f17c4e38SRuslan Bukin l3 = (pt_entry_t *)PHYS_TO_DMAP(pa); 858f17c4e38SRuslan Bukin 859f17c4e38SRuslan Bukin for (l = iommu_l3_index(sva); l < Ln_ENTRIES; 860f17c4e38SRuslan Bukin l++, sva += IOMMU_L3_SIZE) { 861f17c4e38SRuslan Bukin l3e = l3[l]; 862f17c4e38SRuslan Bukin if ((l3e & ATTR_DESCR_VALID) == 0) 863f17c4e38SRuslan Bukin continue; 864f17c4e38SRuslan Bukin panic("%s: l3e found for va %jx\n", 865f17c4e38SRuslan Bukin __func__, sva); 866f17c4e38SRuslan Bukin } 867f17c4e38SRuslan Bukin 868f17c4e38SRuslan Bukin vm_page_unwire_noq(m1); 869f17c4e38SRuslan Bukin vm_page_unwire_noq(m); 870f17c4e38SRuslan Bukin pmap_resident_count_dec(pmap, 1); 871f17c4e38SRuslan Bukin vm_page_free(m); 872f17c4e38SRuslan Bukin pmap_clear(&l2[k]); 873f17c4e38SRuslan Bukin } 874f17c4e38SRuslan Bukin 875f17c4e38SRuslan Bukin vm_page_unwire_noq(m0); 876f17c4e38SRuslan Bukin pmap_resident_count_dec(pmap, 1); 877f17c4e38SRuslan Bukin vm_page_free(m1); 878f17c4e38SRuslan Bukin pmap_clear(&l1[j]); 879f17c4e38SRuslan Bukin } 880f17c4e38SRuslan Bukin 881f17c4e38SRuslan Bukin pmap_resident_count_dec(pmap, 1); 882f17c4e38SRuslan Bukin vm_page_free(m0); 883f17c4e38SRuslan Bukin pmap_clear(&pmap->pm_l0[i]); 884f17c4e38SRuslan Bukin } 885f17c4e38SRuslan Bukin 886f17c4e38SRuslan Bukin KASSERT(pmap->pm_stats.resident_count == 0, 887f17c4e38SRuslan Bukin ("Invalid resident count %jd", pmap->pm_stats.resident_count)); 888f17c4e38SRuslan Bukin 889f17c4e38SRuslan Bukin PMAP_UNLOCK(pmap); 890f17c4e38SRuslan Bukin } 891