Lines Matching refs:va

81 #define	smmu_l0_index(va)	(((va) >> IOMMU_L0_SHIFT) & IOMMU_L0_ADDR_MASK)  argument
82 #define smmu_l1_index(va) (((va) >> IOMMU_L1_SHIFT) & IOMMU_Ln_ADDR_MASK) argument
83 #define smmu_l2_index(va) (((va) >> IOMMU_L2_SHIFT) & IOMMU_Ln_ADDR_MASK) argument
84 #define smmu_l3_index(va) (((va) >> IOMMU_L3_SHIFT) & IOMMU_Ln_ADDR_MASK) argument
87 static void _smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va,
104 smmu_pmap_l0(struct smmu_pmap *pmap, vm_offset_t va) in smmu_pmap_l0() argument
107 return (&pmap->sp_l0[smmu_l0_index(va)]); in smmu_pmap_l0()
111 smmu_pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) in smmu_pmap_l0_to_l1() argument
116 return (&l1[smmu_l1_index(va)]); in smmu_pmap_l0_to_l1()
120 smmu_pmap_l1(struct smmu_pmap *pmap, vm_offset_t va) in smmu_pmap_l1() argument
124 l0 = smmu_pmap_l0(pmap, va); in smmu_pmap_l1()
128 return (smmu_pmap_l0_to_l1(l0, va)); in smmu_pmap_l1()
132 smmu_pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) in smmu_pmap_l1_to_l2() argument
142 KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0, in smmu_pmap_l1_to_l2()
143 ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); in smmu_pmap_l1_to_l2()
145 ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); in smmu_pmap_l1_to_l2()
147 return (&l2p[smmu_l2_index(va)]); in smmu_pmap_l1_to_l2()
151 smmu_pmap_l2(struct smmu_pmap *pmap, vm_offset_t va) in smmu_pmap_l2() argument
155 l1 = smmu_pmap_l1(pmap, va); in smmu_pmap_l2()
159 return (smmu_pmap_l1_to_l2(l1, va)); in smmu_pmap_l2()
163 smmu_pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) in smmu_pmap_l2_to_l3() argument
174 KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0, in smmu_pmap_l2_to_l3()
175 ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); in smmu_pmap_l2_to_l3()
177 ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); in smmu_pmap_l2_to_l3()
179 return (&l3p[smmu_l3_index(va)]); in smmu_pmap_l2_to_l3()
187 smmu_pmap_pde(struct smmu_pmap *pmap, vm_offset_t va, int *level) in smmu_pmap_pde() argument
191 l0 = smmu_pmap_l0(pmap, va); in smmu_pmap_pde()
198 l1 = smmu_pmap_l0_to_l1(l0, va); in smmu_pmap_pde()
205 l2 = smmu_pmap_l1_to_l2(l1, va); in smmu_pmap_pde()
222 smmu_pmap_pte(struct smmu_pmap *pmap, vm_offset_t va, int *level) in smmu_pmap_pte() argument
227 l1 = smmu_pmap_l1(pmap, va); in smmu_pmap_pte()
243 l2 = smmu_pmap_l1_to_l2(l1, va); in smmu_pmap_pte()
256 l3 = smmu_pmap_l2_to_l3(l2, va); in smmu_pmap_pte()
334 smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m, in smmu_pmap_unwire_l3() argument
340 _smmu_pmap_unwire_l3(pmap, va, m, free); in smmu_pmap_unwire_l3()
347 _smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m, in _smmu_pmap_unwire_l3() argument
359 l0 = smmu_pmap_l0(pmap, va); in _smmu_pmap_unwire_l3()
365 l1 = smmu_pmap_l1(pmap, va); in _smmu_pmap_unwire_l3()
371 l2 = smmu_pmap_l2(pmap, va); in _smmu_pmap_unwire_l3()
380 l1 = smmu_pmap_l1(pmap, va); in _smmu_pmap_unwire_l3()
383 smmu_pmap_unwire_l3(pmap, va, l2pg, free); in _smmu_pmap_unwire_l3()
389 l0 = smmu_pmap_l0(pmap, va); in _smmu_pmap_unwire_l3()
392 smmu_pmap_unwire_l3(pmap, va, l1pg, free); in _smmu_pmap_unwire_l3()
581 pmap_gpu_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa, in pmap_gpu_enter() argument
594 KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); in pmap_gpu_enter()
595 KASSERT((va & PAGE_MASK) == 0, ("va is misaligned")); in pmap_gpu_enter()
607 CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa); in pmap_gpu_enter()
616 pde = smmu_pmap_pde(pmap, va, &lvl); in pmap_gpu_enter()
618 l3 = smmu_pmap_l2_to_l3(pde, va); in pmap_gpu_enter()
620 mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va)); in pmap_gpu_enter()
633 l1p = smmu_pmap_l1(pmap, va); in pmap_gpu_enter()
634 l2p = smmu_pmap_l2(pmap, va); in pmap_gpu_enter()
663 pmap_gpu_remove(struct smmu_pmap *pmap, vm_offset_t va) in pmap_gpu_remove() argument
670 KASSERT((va & PAGE_MASK) == 0, ("va is misaligned")); in pmap_gpu_remove()
674 pde = smmu_pmap_pde(pmap, va, &lvl); in pmap_gpu_remove()
680 pte = smmu_pmap_l2_to_l3(pde, va); in pmap_gpu_remove()
697 smmu_pmap_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa, in smmu_pmap_enter() argument
708 KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); in smmu_pmap_enter()
710 va = trunc_page(va); in smmu_pmap_enter()
719 CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa); in smmu_pmap_enter()
728 pde = smmu_pmap_pde(pmap, va, &lvl); in smmu_pmap_enter()
730 l3 = smmu_pmap_l2_to_l3(pde, va); in smmu_pmap_enter()
732 mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va)); in smmu_pmap_enter()
760 smmu_pmap_remove(struct smmu_pmap *pmap, vm_offset_t va) in smmu_pmap_remove() argument
768 pte = smmu_pmap_pte(pmap, va, &lvl); in smmu_pmap_remove()