xref: /freebsd/sys/arm64/arm64/pmap.c (revision 53b70c86)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2003 Peter Wemm
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  * Copyright (c) 2014 Andrew Turner
13  * All rights reserved.
14  * Copyright (c) 2014-2016 The FreeBSD Foundation
15  * All rights reserved.
16  *
17  * This code is derived from software contributed to Berkeley by
18  * the Systems Programming Group of the University of Utah Computer
19  * Science Department and William Jolitz of UUNET Technologies Inc.
20  *
21  * This software was developed by Andrew Turner under sponsorship from
22  * the FreeBSD Foundation.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 3. All advertising materials mentioning features or use of this software
33  *    must display the following acknowledgement:
34  *	This product includes software developed by the University of
35  *	California, Berkeley and its contributors.
36  * 4. Neither the name of the University nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  *
52  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
53  */
54 /*-
55  * Copyright (c) 2003 Networks Associates Technology, Inc.
56  * All rights reserved.
57  *
58  * This software was developed for the FreeBSD Project by Jake Burkholder,
59  * Safeport Network Services, and Network Associates Laboratories, the
60  * Security Research Division of Network Associates, Inc. under
61  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
62  * CHATS research program.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83  * SUCH DAMAGE.
84  */
85 
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
88 
89 /*
90  *	Manages physical address maps.
91  *
92  *	Since the information managed by this module is
93  *	also stored by the logical address mapping module,
94  *	this module may throw away valid virtual-to-physical
95  *	mappings at almost any time.  However, invalidations
96  *	of virtual-to-physical mappings must be done as
97  *	requested.
98  *
99  *	In order to cope with hardware architectures which
100  *	make virtual-to-physical map invalidates expensive,
101  *	this module may delay invalidate or reduced protection
102  *	operations until such time as they are actually
103  *	necessary.  This module is given full information as
104  *	to which processors are currently using which maps,
105  *	and to when physical maps must be made correct.
106  */
107 
108 #include "opt_vm.h"
109 
110 #include <sys/param.h>
111 #include <sys/bitstring.h>
112 #include <sys/bus.h>
113 #include <sys/systm.h>
114 #include <sys/kernel.h>
115 #include <sys/ktr.h>
116 #include <sys/limits.h>
117 #include <sys/lock.h>
118 #include <sys/malloc.h>
119 #include <sys/mman.h>
120 #include <sys/msgbuf.h>
121 #include <sys/mutex.h>
122 #include <sys/physmem.h>
123 #include <sys/proc.h>
124 #include <sys/rwlock.h>
125 #include <sys/sbuf.h>
126 #include <sys/sx.h>
127 #include <sys/vmem.h>
128 #include <sys/vmmeter.h>
129 #include <sys/sched.h>
130 #include <sys/sysctl.h>
131 #include <sys/_unrhdr.h>
132 #include <sys/smp.h>
133 
134 #include <vm/vm.h>
135 #include <vm/vm_param.h>
136 #include <vm/vm_kern.h>
137 #include <vm/vm_page.h>
138 #include <vm/vm_map.h>
139 #include <vm/vm_object.h>
140 #include <vm/vm_extern.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_pager.h>
143 #include <vm/vm_phys.h>
144 #include <vm/vm_radix.h>
145 #include <vm/vm_reserv.h>
146 #include <vm/vm_dumpset.h>
147 #include <vm/uma.h>
148 
149 #include <machine/machdep.h>
150 #include <machine/md_var.h>
151 #include <machine/pcb.h>
152 
153 #define	PMAP_ASSERT_STAGE1(pmap)	MPASS((pmap)->pm_stage == PM_STAGE1)
154 #define	PMAP_ASSERT_STAGE2(pmap)	MPASS((pmap)->pm_stage == PM_STAGE2)
155 
156 #define	NL0PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
157 #define	NL1PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
158 #define	NL2PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
159 #define	NL3PG		(PAGE_SIZE/(sizeof (pt_entry_t)))
160 
161 #define	NUL0E		L0_ENTRIES
162 #define	NUL1E		(NUL0E * NL1PG)
163 #define	NUL2E		(NUL1E * NL2PG)
164 
165 #if !defined(DIAGNOSTIC)
166 #ifdef __GNUC_GNU_INLINE__
167 #define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
168 #else
169 #define PMAP_INLINE	extern inline
170 #endif
171 #else
172 #define PMAP_INLINE
173 #endif
174 
175 #ifdef PV_STATS
176 #define PV_STAT(x)	do { x ; } while (0)
177 #else
178 #define PV_STAT(x)	do { } while (0)
179 #endif
180 
181 #define	pmap_l0_pindex(v)	(NUL2E + NUL1E + ((v) >> L0_SHIFT))
182 #define	pmap_l1_pindex(v)	(NUL2E + ((v) >> L1_SHIFT))
183 #define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
184 
185 static struct md_page *
186 pa_to_pvh(vm_paddr_t pa)
187 {
188 	struct vm_phys_seg *seg;
189 	int segind;
190 
191 	for (segind = 0; segind < vm_phys_nsegs; segind++) {
192 		seg = &vm_phys_segs[segind];
193 		if (pa >= seg->start && pa < seg->end)
194 			return ((struct md_page *)seg->md_first +
195 			    pmap_l2_pindex(pa) - pmap_l2_pindex(seg->start));
196 	}
197 	panic("pa 0x%jx not within vm_phys_segs", (uintmax_t)pa);
198 }
199 
200 static struct md_page *
201 page_to_pvh(vm_page_t m)
202 {
203 	struct vm_phys_seg *seg;
204 
205 	seg = &vm_phys_segs[m->segind];
206 	return ((struct md_page *)seg->md_first +
207 	    pmap_l2_pindex(VM_PAGE_TO_PHYS(m)) - pmap_l2_pindex(seg->start));
208 }
209 
210 #define	NPV_LIST_LOCKS	MAXCPU
211 
212 #define	PHYS_TO_PV_LIST_LOCK(pa)	\
213 			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
214 
215 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
216 	struct rwlock **_lockp = (lockp);		\
217 	struct rwlock *_new_lock;			\
218 							\
219 	_new_lock = PHYS_TO_PV_LIST_LOCK(pa);		\
220 	if (_new_lock != *_lockp) {			\
221 		if (*_lockp != NULL)			\
222 			rw_wunlock(*_lockp);		\
223 		*_lockp = _new_lock;			\
224 		rw_wlock(*_lockp);			\
225 	}						\
226 } while (0)
227 
228 #define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
229 			CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
230 
231 #define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
232 	struct rwlock **_lockp = (lockp);		\
233 							\
234 	if (*_lockp != NULL) {				\
235 		rw_wunlock(*_lockp);			\
236 		*_lockp = NULL;				\
237 	}						\
238 } while (0)
239 
240 #define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
241 			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
242 
243 /*
244  * The presence of this flag indicates that the mapping is writeable.
245  * If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise
246  * it is dirty.  This flag may only be set on managed mappings.
247  *
248  * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it
249  * as a software managed bit.
250  */
251 #define	ATTR_SW_DBM	ATTR_DBM
252 
253 struct pmap kernel_pmap_store;
254 
255 /* Used for mapping ACPI memory before VM is initialized */
256 #define	PMAP_PREINIT_MAPPING_COUNT	32
257 #define	PMAP_PREINIT_MAPPING_SIZE	(PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
258 static vm_offset_t preinit_map_va;	/* Start VA of pre-init mapping space */
259 static int vm_initialized = 0;		/* No need to use pre-init maps when set */
260 
261 /*
262  * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
263  * Always map entire L2 block for simplicity.
264  * VA of L2 block = preinit_map_va + i * L2_SIZE
265  */
266 static struct pmap_preinit_mapping {
267 	vm_paddr_t	pa;
268 	vm_offset_t	va;
269 	vm_size_t	size;
270 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
271 
272 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
273 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
274 vm_offset_t kernel_vm_end = 0;
275 
276 /*
277  * Data for the pv entry allocation mechanism.
278  */
279 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
280 static struct mtx pv_chunks_mutex;
281 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
282 static struct md_page *pv_table;
283 static struct md_page pv_dummy;
284 
285 vm_paddr_t dmap_phys_base;	/* The start of the dmap region */
286 vm_paddr_t dmap_phys_max;	/* The limit of the dmap region */
287 vm_offset_t dmap_max_addr;	/* The virtual address limit of the dmap */
288 
289 /* This code assumes all L1 DMAP entries will be used */
290 CTASSERT((DMAP_MIN_ADDRESS  & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
291 CTASSERT((DMAP_MAX_ADDRESS  & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
292 
293 #define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
294 extern pt_entry_t pagetable_dmap[];
295 
296 #define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
297 static vm_paddr_t physmap[PHYSMAP_SIZE];
298 static u_int physmap_idx;
299 
300 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
301     "VM/pmap parameters");
302 
303 /*
304  * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs
305  * that it has currently allocated to a pmap, a cursor ("asid_next") to
306  * optimize its search for a free ASID in the bit vector, and an epoch number
307  * ("asid_epoch") to indicate when it has reclaimed all previously allocated
308  * ASIDs that are not currently active on a processor.
309  *
310  * The current epoch number is always in the range [0, INT_MAX).  Negative
311  * numbers and INT_MAX are reserved for special cases that are described
312  * below.
313  */
314 struct asid_set {
315 	int asid_bits;
316 	bitstr_t *asid_set;
317 	int asid_set_size;
318 	int asid_next;
319 	int asid_epoch;
320 	struct mtx asid_set_mutex;
321 };
322 
323 static struct asid_set asids;
324 static struct asid_set vmids;
325 
326 static SYSCTL_NODE(_vm_pmap, OID_AUTO, asid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
327     "ASID allocator");
328 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, bits, CTLFLAG_RD, &asids.asid_bits, 0,
329     "The number of bits in an ASID");
330 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, next, CTLFLAG_RD, &asids.asid_next, 0,
331     "The last allocated ASID plus one");
332 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, epoch, CTLFLAG_RD, &asids.asid_epoch, 0,
333     "The current epoch number");
334 
335 static SYSCTL_NODE(_vm_pmap, OID_AUTO, vmid, CTLFLAG_RD, 0, "VMID allocator");
336 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, bits, CTLFLAG_RD, &vmids.asid_bits, 0,
337     "The number of bits in an VMID");
338 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, next, CTLFLAG_RD, &vmids.asid_next, 0,
339     "The last allocated VMID plus one");
340 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, epoch, CTLFLAG_RD, &vmids.asid_epoch, 0,
341     "The current epoch number");
342 
343 void (*pmap_clean_stage2_tlbi)(void);
344 void (*pmap_invalidate_vpipt_icache)(void);
345 
346 /*
347  * A pmap's cookie encodes an ASID and epoch number.  Cookies for reserved
348  * ASIDs have a negative epoch number, specifically, INT_MIN.  Cookies for
349  * dynamically allocated ASIDs have a non-negative epoch number.
350  *
351  * An invalid ASID is represented by -1.
352  *
353  * There are two special-case cookie values: (1) COOKIE_FROM(-1, INT_MIN),
354  * which indicates that an ASID should never be allocated to the pmap, and
355  * (2) COOKIE_FROM(-1, INT_MAX), which indicates that an ASID should be
356  * allocated when the pmap is next activated.
357  */
358 #define	COOKIE_FROM(asid, epoch)	((long)((u_int)(asid) |	\
359 					    ((u_long)(epoch) << 32)))
360 #define	COOKIE_TO_ASID(cookie)		((int)(cookie))
361 #define	COOKIE_TO_EPOCH(cookie)		((int)((u_long)(cookie) >> 32))
362 
363 static int superpages_enabled = 1;
364 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
365     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
366     "Are large page mappings enabled?");
367 
368 /*
369  * Internal flags for pmap_enter()'s helper functions.
370  */
371 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
372 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
373 
374 static void	free_pv_chunk(struct pv_chunk *pc);
375 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
376 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
377 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
378 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
379 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
380 		    vm_offset_t va);
381 
382 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
383 static bool pmap_activate_int(pmap_t pmap);
384 static void pmap_alloc_asid(pmap_t pmap);
385 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
386 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
387 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
388     vm_offset_t va, struct rwlock **lockp);
389 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
390 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
391     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
392 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
393     u_int flags, vm_page_t m, struct rwlock **lockp);
394 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
395     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
396 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
397     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
398 static void pmap_reset_asid_set(pmap_t pmap);
399 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
400     vm_page_t m, struct rwlock **lockp);
401 
402 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
403 		struct rwlock **lockp);
404 
405 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
406     struct spglist *free);
407 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
408 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
409 
410 /*
411  * These load the old table data and store the new value.
412  * They need to be atomic as the System MMU may write to the table at
413  * the same time as the CPU.
414  */
415 #define	pmap_clear(table)		atomic_store_64(table, 0)
416 #define	pmap_clear_bits(table, bits)	atomic_clear_64(table, bits)
417 #define	pmap_load(table)		(*table)
418 #define	pmap_load_clear(table)		atomic_swap_64(table, 0)
419 #define	pmap_load_store(table, entry)	atomic_swap_64(table, entry)
420 #define	pmap_set_bits(table, bits)	atomic_set_64(table, bits)
421 #define	pmap_store(table, entry)	atomic_store_64(table, entry)
422 
423 /********************/
424 /* Inline functions */
425 /********************/
426 
427 static __inline void
428 pagecopy(void *s, void *d)
429 {
430 
431 	memcpy(d, s, PAGE_SIZE);
432 }
433 
434 static __inline pd_entry_t *
435 pmap_l0(pmap_t pmap, vm_offset_t va)
436 {
437 
438 	return (&pmap->pm_l0[pmap_l0_index(va)]);
439 }
440 
441 static __inline pd_entry_t *
442 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
443 {
444 	pd_entry_t *l1;
445 
446 	l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
447 	return (&l1[pmap_l1_index(va)]);
448 }
449 
450 static __inline pd_entry_t *
451 pmap_l1(pmap_t pmap, vm_offset_t va)
452 {
453 	pd_entry_t *l0;
454 
455 	l0 = pmap_l0(pmap, va);
456 	if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
457 		return (NULL);
458 
459 	return (pmap_l0_to_l1(l0, va));
460 }
461 
462 static __inline pd_entry_t *
463 pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
464 {
465 	pd_entry_t l1, *l2p;
466 
467 	l1 = pmap_load(l1p);
468 
469 	KASSERT(ADDR_IS_CANONICAL(va),
470 	    ("%s: Address not in canonical form: %lx", __func__, va));
471 	/*
472 	 * The valid bit may be clear if pmap_update_entry() is concurrently
473 	 * modifying the entry, so for KVA only the entry type may be checked.
474 	 */
475 	KASSERT(ADDR_IS_KERNEL(va) || (l1 & ATTR_DESCR_VALID) != 0,
476 	    ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
477 	KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
478 	    ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
479 	l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK);
480 	return (&l2p[pmap_l2_index(va)]);
481 }
482 
483 static __inline pd_entry_t *
484 pmap_l2(pmap_t pmap, vm_offset_t va)
485 {
486 	pd_entry_t *l1;
487 
488 	l1 = pmap_l1(pmap, va);
489 	if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
490 		return (NULL);
491 
492 	return (pmap_l1_to_l2(l1, va));
493 }
494 
495 static __inline pt_entry_t *
496 pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
497 {
498 	pd_entry_t l2;
499 	pt_entry_t *l3p;
500 
501 	l2 = pmap_load(l2p);
502 
503 	KASSERT(ADDR_IS_CANONICAL(va),
504 	    ("%s: Address not in canonical form: %lx", __func__, va));
505 	/*
506 	 * The valid bit may be clear if pmap_update_entry() is concurrently
507 	 * modifying the entry, so for KVA only the entry type may be checked.
508 	 */
509 	KASSERT(ADDR_IS_KERNEL(va) || (l2 & ATTR_DESCR_VALID) != 0,
510 	    ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
511 	KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
512 	    ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
513 	l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK);
514 	return (&l3p[pmap_l3_index(va)]);
515 }
516 
517 /*
518  * Returns the lowest valid pde for a given virtual address.
519  * The next level may or may not point to a valid page or block.
520  */
521 static __inline pd_entry_t *
522 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
523 {
524 	pd_entry_t *l0, *l1, *l2, desc;
525 
526 	l0 = pmap_l0(pmap, va);
527 	desc = pmap_load(l0) & ATTR_DESCR_MASK;
528 	if (desc != L0_TABLE) {
529 		*level = -1;
530 		return (NULL);
531 	}
532 
533 	l1 = pmap_l0_to_l1(l0, va);
534 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
535 	if (desc != L1_TABLE) {
536 		*level = 0;
537 		return (l0);
538 	}
539 
540 	l2 = pmap_l1_to_l2(l1, va);
541 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
542 	if (desc != L2_TABLE) {
543 		*level = 1;
544 		return (l1);
545 	}
546 
547 	*level = 2;
548 	return (l2);
549 }
550 
551 /*
552  * Returns the lowest valid pte block or table entry for a given virtual
553  * address. If there are no valid entries return NULL and set the level to
554  * the first invalid level.
555  */
556 static __inline pt_entry_t *
557 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
558 {
559 	pd_entry_t *l1, *l2, desc;
560 	pt_entry_t *l3;
561 
562 	l1 = pmap_l1(pmap, va);
563 	if (l1 == NULL) {
564 		*level = 0;
565 		return (NULL);
566 	}
567 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
568 	if (desc == L1_BLOCK) {
569 		*level = 1;
570 		return (l1);
571 	}
572 
573 	if (desc != L1_TABLE) {
574 		*level = 1;
575 		return (NULL);
576 	}
577 
578 	l2 = pmap_l1_to_l2(l1, va);
579 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
580 	if (desc == L2_BLOCK) {
581 		*level = 2;
582 		return (l2);
583 	}
584 
585 	if (desc != L2_TABLE) {
586 		*level = 2;
587 		return (NULL);
588 	}
589 
590 	*level = 3;
591 	l3 = pmap_l2_to_l3(l2, va);
592 	if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
593 		return (NULL);
594 
595 	return (l3);
596 }
597 
598 bool
599 pmap_ps_enabled(pmap_t pmap __unused)
600 {
601 
602 	return (superpages_enabled != 0);
603 }
604 
605 bool
606 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
607     pd_entry_t **l2, pt_entry_t **l3)
608 {
609 	pd_entry_t *l0p, *l1p, *l2p;
610 
611 	if (pmap->pm_l0 == NULL)
612 		return (false);
613 
614 	l0p = pmap_l0(pmap, va);
615 	*l0 = l0p;
616 
617 	if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
618 		return (false);
619 
620 	l1p = pmap_l0_to_l1(l0p, va);
621 	*l1 = l1p;
622 
623 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
624 		*l2 = NULL;
625 		*l3 = NULL;
626 		return (true);
627 	}
628 
629 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
630 		return (false);
631 
632 	l2p = pmap_l1_to_l2(l1p, va);
633 	*l2 = l2p;
634 
635 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
636 		*l3 = NULL;
637 		return (true);
638 	}
639 
640 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
641 		return (false);
642 
643 	*l3 = pmap_l2_to_l3(l2p, va);
644 
645 	return (true);
646 }
647 
648 static __inline int
649 pmap_l3_valid(pt_entry_t l3)
650 {
651 
652 	return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
653 }
654 
655 CTASSERT(L1_BLOCK == L2_BLOCK);
656 
657 static pt_entry_t
658 pmap_pte_memattr(pmap_t pmap, vm_memattr_t memattr)
659 {
660 	pt_entry_t val;
661 
662 	if (pmap->pm_stage == PM_STAGE1) {
663 		val = ATTR_S1_IDX(memattr);
664 		if (memattr == VM_MEMATTR_DEVICE)
665 			val |= ATTR_S1_XN;
666 		return (val);
667 	}
668 
669 	val = 0;
670 
671 	switch (memattr) {
672 	case VM_MEMATTR_DEVICE:
673 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_DEVICE_nGnRnE) |
674 		    ATTR_S2_XN(ATTR_S2_XN_ALL));
675 	case VM_MEMATTR_UNCACHEABLE:
676 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_NC));
677 	case VM_MEMATTR_WRITE_BACK:
678 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WB));
679 	case VM_MEMATTR_WRITE_THROUGH:
680 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WT));
681 	default:
682 		panic("%s: invalid memory attribute %x", __func__, memattr);
683 	}
684 }
685 
686 static pt_entry_t
687 pmap_pte_prot(pmap_t pmap, vm_prot_t prot)
688 {
689 	pt_entry_t val;
690 
691 	val = 0;
692 	if (pmap->pm_stage == PM_STAGE1) {
693 		if ((prot & VM_PROT_EXECUTE) == 0)
694 			val |= ATTR_S1_XN;
695 		if ((prot & VM_PROT_WRITE) == 0)
696 			val |= ATTR_S1_AP(ATTR_S1_AP_RO);
697 	} else {
698 		if ((prot & VM_PROT_WRITE) != 0)
699 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
700 		if ((prot & VM_PROT_READ) != 0)
701 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
702 		if ((prot & VM_PROT_EXECUTE) == 0)
703 			val |= ATTR_S2_XN(ATTR_S2_XN_ALL);
704 	}
705 
706 	return (val);
707 }
708 
709 /*
710  * Checks if the PTE is dirty.
711  */
712 static inline int
713 pmap_pte_dirty(pmap_t pmap, pt_entry_t pte)
714 {
715 
716 	KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
717 
718 	if (pmap->pm_stage == PM_STAGE1) {
719 		KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
720 		    ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
721 
722 		return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
723 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM));
724 	}
725 
726 	return ((pte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
727 	    ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE));
728 }
729 
730 static __inline void
731 pmap_resident_count_inc(pmap_t pmap, int count)
732 {
733 
734 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
735 	pmap->pm_stats.resident_count += count;
736 }
737 
738 static __inline void
739 pmap_resident_count_dec(pmap_t pmap, int count)
740 {
741 
742 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
743 	KASSERT(pmap->pm_stats.resident_count >= count,
744 	    ("pmap %p resident count underflow %ld %d", pmap,
745 	    pmap->pm_stats.resident_count, count));
746 	pmap->pm_stats.resident_count -= count;
747 }
748 
749 static vm_paddr_t
750 pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
751 {
752 	vm_paddr_t pa_page;
753 
754 	pa_page = arm64_address_translate_s1e1r(va) & PAR_PA_MASK;
755 	return (pa_page | (va & PAR_LOW_MASK));
756 }
757 
758 static vm_offset_t
759 pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
760     vm_offset_t freemempos)
761 {
762 	pt_entry_t *l2;
763 	vm_offset_t va;
764 	vm_paddr_t l2_pa, pa;
765 	u_int l1_slot, l2_slot, prev_l1_slot;
766 	int i;
767 
768 	dmap_phys_base = min_pa & ~L1_OFFSET;
769 	dmap_phys_max = 0;
770 	dmap_max_addr = 0;
771 	l2 = NULL;
772 	prev_l1_slot = -1;
773 
774 #define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
775 	memset(pagetable_dmap, 0, PAGE_SIZE * DMAP_TABLES);
776 
777 	for (i = 0; i < (physmap_idx * 2); i += 2) {
778 		pa = physmap[i] & ~L2_OFFSET;
779 		va = pa - dmap_phys_base + DMAP_MIN_ADDRESS;
780 
781 		/* Create L2 mappings at the start of the region */
782 		if ((pa & L1_OFFSET) != 0) {
783 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
784 			if (l1_slot != prev_l1_slot) {
785 				prev_l1_slot = l1_slot;
786 				l2 = (pt_entry_t *)freemempos;
787 				l2_pa = pmap_early_vtophys(kern_l1,
788 				    (vm_offset_t)l2);
789 				freemempos += PAGE_SIZE;
790 
791 				pmap_store(&pagetable_dmap[l1_slot],
792 				    (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
793 
794 				memset(l2, 0, PAGE_SIZE);
795 			}
796 			KASSERT(l2 != NULL,
797 			    ("pmap_bootstrap_dmap: NULL l2 map"));
798 			for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
799 			    pa += L2_SIZE, va += L2_SIZE) {
800 				/*
801 				 * We are on a boundary, stop to
802 				 * create a level 1 block
803 				 */
804 				if ((pa & L1_OFFSET) == 0)
805 					break;
806 
807 				l2_slot = pmap_l2_index(va);
808 				KASSERT(l2_slot != 0, ("..."));
809 				pmap_store(&l2[l2_slot],
810 				    (pa & ~L2_OFFSET) | ATTR_DEFAULT |
811 				    ATTR_S1_XN |
812 				    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
813 				    L2_BLOCK);
814 			}
815 			KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS),
816 			    ("..."));
817 		}
818 
819 		for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] &&
820 		    (physmap[i + 1] - pa) >= L1_SIZE;
821 		    pa += L1_SIZE, va += L1_SIZE) {
822 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
823 			pmap_store(&pagetable_dmap[l1_slot],
824 			    (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_S1_XN |
825 			    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK);
826 		}
827 
828 		/* Create L2 mappings at the end of the region */
829 		if (pa < physmap[i + 1]) {
830 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
831 			if (l1_slot != prev_l1_slot) {
832 				prev_l1_slot = l1_slot;
833 				l2 = (pt_entry_t *)freemempos;
834 				l2_pa = pmap_early_vtophys(kern_l1,
835 				    (vm_offset_t)l2);
836 				freemempos += PAGE_SIZE;
837 
838 				pmap_store(&pagetable_dmap[l1_slot],
839 				    (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
840 
841 				memset(l2, 0, PAGE_SIZE);
842 			}
843 			KASSERT(l2 != NULL,
844 			    ("pmap_bootstrap_dmap: NULL l2 map"));
845 			for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
846 			    pa += L2_SIZE, va += L2_SIZE) {
847 				l2_slot = pmap_l2_index(va);
848 				pmap_store(&l2[l2_slot],
849 				    (pa & ~L2_OFFSET) | ATTR_DEFAULT |
850 				    ATTR_S1_XN |
851 				    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
852 				    L2_BLOCK);
853 			}
854 		}
855 
856 		if (pa > dmap_phys_max) {
857 			dmap_phys_max = pa;
858 			dmap_max_addr = va;
859 		}
860 	}
861 
862 	cpu_tlb_flushID();
863 
864 	return (freemempos);
865 }
866 
867 static vm_offset_t
868 pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
869 {
870 	vm_offset_t l2pt;
871 	vm_paddr_t pa;
872 	pd_entry_t *l1;
873 	u_int l1_slot;
874 
875 	KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
876 
877 	l1 = (pd_entry_t *)l1pt;
878 	l1_slot = pmap_l1_index(va);
879 	l2pt = l2_start;
880 
881 	for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
882 		KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
883 
884 		pa = pmap_early_vtophys(l1pt, l2pt);
885 		pmap_store(&l1[l1_slot],
886 		    (pa & ~Ln_TABLE_MASK) | L1_TABLE);
887 		l2pt += PAGE_SIZE;
888 	}
889 
890 	/* Clean the L2 page table */
891 	memset((void *)l2_start, 0, l2pt - l2_start);
892 
893 	return l2pt;
894 }
895 
896 static vm_offset_t
897 pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
898 {
899 	vm_offset_t l3pt;
900 	vm_paddr_t pa;
901 	pd_entry_t *l2;
902 	u_int l2_slot;
903 
904 	KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
905 
906 	l2 = pmap_l2(kernel_pmap, va);
907 	l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
908 	l2_slot = pmap_l2_index(va);
909 	l3pt = l3_start;
910 
911 	for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
912 		KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
913 
914 		pa = pmap_early_vtophys(l1pt, l3pt);
915 		pmap_store(&l2[l2_slot],
916 		    (pa & ~Ln_TABLE_MASK) | ATTR_S1_UXN | L2_TABLE);
917 		l3pt += PAGE_SIZE;
918 	}
919 
920 	/* Clean the L2 page table */
921 	memset((void *)l3_start, 0, l3pt - l3_start);
922 
923 	return l3pt;
924 }
925 
926 /*
927  *	Bootstrap the system enough to run with virtual memory.
928  */
929 void
930 pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
931     vm_size_t kernlen)
932 {
933 	vm_offset_t freemempos;
934 	vm_offset_t dpcpu, msgbufpv;
935 	vm_paddr_t start_pa, pa, min_pa;
936 	uint64_t kern_delta;
937 	int i;
938 
939 	/* Verify that the ASID is set through TTBR0. */
940 	KASSERT((READ_SPECIALREG(tcr_el1) & TCR_A1) == 0,
941 	    ("pmap_bootstrap: TCR_EL1.A1 != 0"));
942 
943 	kern_delta = KERNBASE - kernstart;
944 
945 	printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
946 	printf("%lx\n", l1pt);
947 	printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
948 
949 	/* Set this early so we can use the pagetable walking functions */
950 	kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
951 	PMAP_LOCK_INIT(kernel_pmap);
952 	kernel_pmap->pm_l0_paddr = l0pt - kern_delta;
953 	kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
954 	kernel_pmap->pm_stage = PM_STAGE1;
955 	kernel_pmap->pm_levels = 4;
956 	kernel_pmap->pm_ttbr = kernel_pmap->pm_l0_paddr;
957 	kernel_pmap->pm_asid_set = &asids;
958 
959 	/* Assume the address we were loaded to is a valid physical address */
960 	min_pa = KERNBASE - kern_delta;
961 
962 	physmap_idx = physmem_avail(physmap, nitems(physmap));
963 	physmap_idx /= 2;
964 
965 	/*
966 	 * Find the minimum physical address. physmap is sorted,
967 	 * but may contain empty ranges.
968 	 */
969 	for (i = 0; i < physmap_idx * 2; i += 2) {
970 		if (physmap[i] == physmap[i + 1])
971 			continue;
972 		if (physmap[i] <= min_pa)
973 			min_pa = physmap[i];
974 	}
975 
976 	freemempos = KERNBASE + kernlen;
977 	freemempos = roundup2(freemempos, PAGE_SIZE);
978 
979 	/* Create a direct map region early so we can use it for pa -> va */
980 	freemempos = pmap_bootstrap_dmap(l1pt, min_pa, freemempos);
981 
982 	start_pa = pa = KERNBASE - kern_delta;
983 
984 	/*
985 	 * Create the l2 tables up to VM_MAX_KERNEL_ADDRESS.  We assume that the
986 	 * loader allocated the first and only l2 page table page used to map
987 	 * the kernel, preloaded files and module metadata.
988 	 */
989 	freemempos = pmap_bootstrap_l2(l1pt, KERNBASE + L1_SIZE, freemempos);
990 	/* And the l3 tables for the early devmap */
991 	freemempos = pmap_bootstrap_l3(l1pt,
992 	    VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE), freemempos);
993 
994 	cpu_tlb_flushID();
995 
996 #define alloc_pages(var, np)						\
997 	(var) = freemempos;						\
998 	freemempos += (np * PAGE_SIZE);					\
999 	memset((char *)(var), 0, ((np) * PAGE_SIZE));
1000 
1001 	/* Allocate dynamic per-cpu area. */
1002 	alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1003 	dpcpu_init((void *)dpcpu, 0);
1004 
1005 	/* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
1006 	alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1007 	msgbufp = (void *)msgbufpv;
1008 
1009 	/* Reserve some VA space for early BIOS/ACPI mapping */
1010 	preinit_map_va = roundup2(freemempos, L2_SIZE);
1011 
1012 	virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
1013 	virtual_avail = roundup2(virtual_avail, L1_SIZE);
1014 	virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
1015 	kernel_vm_end = virtual_avail;
1016 
1017 	pa = pmap_early_vtophys(l1pt, freemempos);
1018 
1019 	physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
1020 
1021 	cpu_tlb_flushID();
1022 }
1023 
1024 /*
1025  *	Initialize a vm_page's machine-dependent fields.
1026  */
1027 void
1028 pmap_page_init(vm_page_t m)
1029 {
1030 
1031 	TAILQ_INIT(&m->md.pv_list);
1032 	m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
1033 }
1034 
1035 static void
1036 pmap_init_asids(struct asid_set *set, int bits)
1037 {
1038 	int i;
1039 
1040 	set->asid_bits = bits;
1041 
1042 	/*
1043 	 * We may be too early in the overall initialization process to use
1044 	 * bit_alloc().
1045 	 */
1046 	set->asid_set_size = 1 << set->asid_bits;
1047 	set->asid_set = (bitstr_t *)kmem_malloc(bitstr_size(set->asid_set_size),
1048 	    M_WAITOK | M_ZERO);
1049 	for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
1050 		bit_set(set->asid_set, i);
1051 	set->asid_next = ASID_FIRST_AVAILABLE;
1052 	mtx_init(&set->asid_set_mutex, "asid set", NULL, MTX_SPIN);
1053 }
1054 
1055 /*
1056  *	Initialize the pmap module.
1057  *	Called by vm_init, to initialize any structures that the pmap
1058  *	system needs to map virtual memory.
1059  */
1060 void
1061 pmap_init(void)
1062 {
1063 	struct vm_phys_seg *seg, *next_seg;
1064 	struct md_page *pvh;
1065 	vm_size_t s;
1066 	uint64_t mmfr1;
1067 	int i, pv_npg, vmid_bits;
1068 
1069 	/*
1070 	 * Are large page mappings enabled?
1071 	 */
1072 	TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
1073 	if (superpages_enabled) {
1074 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1075 		    ("pmap_init: can't assign to pagesizes[1]"));
1076 		pagesizes[1] = L2_SIZE;
1077 		KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
1078 		    ("pmap_init: can't assign to pagesizes[2]"));
1079 		pagesizes[2] = L1_SIZE;
1080 	}
1081 
1082 	/*
1083 	 * Initialize the ASID allocator.
1084 	 */
1085 	pmap_init_asids(&asids,
1086 	    (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8);
1087 
1088 	if (has_hyp()) {
1089 		mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1090 		vmid_bits = 8;
1091 
1092 		if (ID_AA64MMFR1_VMIDBits_VAL(mmfr1) ==
1093 		    ID_AA64MMFR1_VMIDBits_16)
1094 			vmid_bits = 16;
1095 		pmap_init_asids(&vmids, vmid_bits);
1096 	}
1097 
1098 	/*
1099 	 * Initialize the pv chunk list mutex.
1100 	 */
1101 	mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
1102 
1103 	/*
1104 	 * Initialize the pool of pv list locks.
1105 	 */
1106 	for (i = 0; i < NPV_LIST_LOCKS; i++)
1107 		rw_init(&pv_list_locks[i], "pmap pv list");
1108 
1109 	/*
1110 	 * Calculate the size of the pv head table for superpages.
1111 	 */
1112 	pv_npg = 0;
1113 	for (i = 0; i < vm_phys_nsegs; i++) {
1114 		seg = &vm_phys_segs[i];
1115 		pv_npg += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
1116 		    pmap_l2_pindex(seg->start);
1117 	}
1118 
1119 	/*
1120 	 * Allocate memory for the pv head table for superpages.
1121 	 */
1122 	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1123 	s = round_page(s);
1124 	pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
1125 	for (i = 0; i < pv_npg; i++)
1126 		TAILQ_INIT(&pv_table[i].pv_list);
1127 	TAILQ_INIT(&pv_dummy.pv_list);
1128 
1129 	/*
1130 	 * Set pointers from vm_phys_segs to pv_table.
1131 	 */
1132 	for (i = 0, pvh = pv_table; i < vm_phys_nsegs; i++) {
1133 		seg = &vm_phys_segs[i];
1134 		seg->md_first = pvh;
1135 		pvh += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
1136 		    pmap_l2_pindex(seg->start);
1137 
1138 		/*
1139 		 * If there is a following segment, and the final
1140 		 * superpage of this segment and the initial superpage
1141 		 * of the next segment are the same then adjust the
1142 		 * pv_table entry for that next segment down by one so
1143 		 * that the pv_table entries will be shared.
1144 		 */
1145 		if (i + 1 < vm_phys_nsegs) {
1146 			next_seg = &vm_phys_segs[i + 1];
1147 			if (pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - 1 ==
1148 			    pmap_l2_pindex(next_seg->start)) {
1149 				pvh--;
1150 			}
1151 		}
1152 	}
1153 
1154 	vm_initialized = 1;
1155 }
1156 
1157 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1158     "2MB page mapping counters");
1159 
1160 static u_long pmap_l2_demotions;
1161 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
1162     &pmap_l2_demotions, 0, "2MB page demotions");
1163 
1164 static u_long pmap_l2_mappings;
1165 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
1166     &pmap_l2_mappings, 0, "2MB page mappings");
1167 
1168 static u_long pmap_l2_p_failures;
1169 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
1170     &pmap_l2_p_failures, 0, "2MB page promotion failures");
1171 
1172 static u_long pmap_l2_promotions;
1173 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
1174     &pmap_l2_promotions, 0, "2MB page promotions");
1175 
1176 /*
1177  * Invalidate a single TLB entry.
1178  */
1179 static __inline void
1180 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1181 {
1182 	uint64_t r;
1183 
1184 	PMAP_ASSERT_STAGE1(pmap);
1185 
1186 	dsb(ishst);
1187 	if (pmap == kernel_pmap) {
1188 		r = atop(va);
1189 		__asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1190 	} else {
1191 		r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va);
1192 		__asm __volatile("tlbi vae1is, %0" : : "r" (r));
1193 	}
1194 	dsb(ish);
1195 	isb();
1196 }
1197 
1198 static __inline void
1199 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1200 {
1201 	uint64_t end, r, start;
1202 
1203 	PMAP_ASSERT_STAGE1(pmap);
1204 
1205 	dsb(ishst);
1206 	if (pmap == kernel_pmap) {
1207 		start = atop(sva);
1208 		end = atop(eva);
1209 		for (r = start; r < end; r++)
1210 			__asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1211 	} else {
1212 		start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1213 		start |= atop(sva);
1214 		end |= atop(eva);
1215 		for (r = start; r < end; r++)
1216 			__asm __volatile("tlbi vae1is, %0" : : "r" (r));
1217 	}
1218 	dsb(ish);
1219 	isb();
1220 }
1221 
1222 static __inline void
1223 pmap_invalidate_all(pmap_t pmap)
1224 {
1225 	uint64_t r;
1226 
1227 	PMAP_ASSERT_STAGE1(pmap);
1228 
1229 	dsb(ishst);
1230 	if (pmap == kernel_pmap) {
1231 		__asm __volatile("tlbi vmalle1is");
1232 	} else {
1233 		r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1234 		__asm __volatile("tlbi aside1is, %0" : : "r" (r));
1235 	}
1236 	dsb(ish);
1237 	isb();
1238 }
1239 
1240 /*
1241  *	Routine:	pmap_extract
1242  *	Function:
1243  *		Extract the physical page address associated
1244  *		with the given map/virtual_address pair.
1245  */
1246 vm_paddr_t
1247 pmap_extract(pmap_t pmap, vm_offset_t va)
1248 {
1249 	pt_entry_t *pte, tpte;
1250 	vm_paddr_t pa;
1251 	int lvl;
1252 
1253 	pa = 0;
1254 	PMAP_LOCK(pmap);
1255 	/*
1256 	 * Find the block or page map for this virtual address. pmap_pte
1257 	 * will return either a valid block/page entry, or NULL.
1258 	 */
1259 	pte = pmap_pte(pmap, va, &lvl);
1260 	if (pte != NULL) {
1261 		tpte = pmap_load(pte);
1262 		pa = tpte & ~ATTR_MASK;
1263 		switch(lvl) {
1264 		case 1:
1265 			KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1266 			    ("pmap_extract: Invalid L1 pte found: %lx",
1267 			    tpte & ATTR_DESCR_MASK));
1268 			pa |= (va & L1_OFFSET);
1269 			break;
1270 		case 2:
1271 			KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1272 			    ("pmap_extract: Invalid L2 pte found: %lx",
1273 			    tpte & ATTR_DESCR_MASK));
1274 			pa |= (va & L2_OFFSET);
1275 			break;
1276 		case 3:
1277 			KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1278 			    ("pmap_extract: Invalid L3 pte found: %lx",
1279 			    tpte & ATTR_DESCR_MASK));
1280 			pa |= (va & L3_OFFSET);
1281 			break;
1282 		}
1283 	}
1284 	PMAP_UNLOCK(pmap);
1285 	return (pa);
1286 }
1287 
1288 /*
1289  *	Routine:	pmap_extract_and_hold
1290  *	Function:
1291  *		Atomically extract and hold the physical page
1292  *		with the given pmap and virtual address pair
1293  *		if that mapping permits the given protection.
1294  */
1295 vm_page_t
1296 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1297 {
1298 	pt_entry_t *pte, tpte;
1299 	vm_offset_t off;
1300 	vm_page_t m;
1301 	int lvl;
1302 	bool use;
1303 
1304 	m = NULL;
1305 	PMAP_LOCK(pmap);
1306 	pte = pmap_pte(pmap, va, &lvl);
1307 	if (pte != NULL) {
1308 		tpte = pmap_load(pte);
1309 
1310 		KASSERT(lvl > 0 && lvl <= 3,
1311 		    ("pmap_extract_and_hold: Invalid level %d", lvl));
1312 		CTASSERT(L1_BLOCK == L2_BLOCK);
1313 		KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
1314 		    (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
1315 		    ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
1316 		     tpte & ATTR_DESCR_MASK));
1317 
1318 		use = false;
1319 		if ((prot & VM_PROT_WRITE) == 0)
1320 			use = true;
1321 		else if (pmap->pm_stage == PM_STAGE1 &&
1322 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))
1323 			use = true;
1324 		else if (pmap->pm_stage == PM_STAGE2 &&
1325 		    ((tpte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
1326 		     ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)))
1327 			use = true;
1328 
1329 		if (use) {
1330 			switch (lvl) {
1331 			case 1:
1332 				off = va & L1_OFFSET;
1333 				break;
1334 			case 2:
1335 				off = va & L2_OFFSET;
1336 				break;
1337 			case 3:
1338 			default:
1339 				off = 0;
1340 			}
1341 			m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
1342 			if (m != NULL && !vm_page_wire_mapped(m))
1343 				m = NULL;
1344 		}
1345 	}
1346 	PMAP_UNLOCK(pmap);
1347 	return (m);
1348 }
1349 
1350 /*
1351  * Walks the page tables to translate a kernel virtual address to a
1352  * physical address. Returns true if the kva is valid and stores the
1353  * physical address in pa if it is not NULL.
1354  */
1355 bool
1356 pmap_klookup(vm_offset_t va, vm_paddr_t *pa)
1357 {
1358 	pt_entry_t *pte, tpte;
1359 	register_t intr;
1360 	uint64_t par;
1361 
1362 	/*
1363 	 * Disable interrupts so we don't get interrupted between asking
1364 	 * for address translation, and getting the result back.
1365 	 */
1366 	intr = intr_disable();
1367 	par = arm64_address_translate_s1e1r(va);
1368 	intr_restore(intr);
1369 
1370 	if (PAR_SUCCESS(par)) {
1371 		if (pa != NULL)
1372 			*pa = (par & PAR_PA_MASK) | (va & PAR_LOW_MASK);
1373 		return (true);
1374 	}
1375 
1376 	/*
1377 	 * Fall back to walking the page table. The address translation
1378 	 * instruction may fail when the page is in a break-before-make
1379 	 * sequence. As we only clear the valid bit in said sequence we
1380 	 * can walk the page table to find the physical address.
1381 	 */
1382 
1383 	pte = pmap_l1(kernel_pmap, va);
1384 	if (pte == NULL)
1385 		return (false);
1386 
1387 	/*
1388 	 * A concurrent pmap_update_entry() will clear the entry's valid bit
1389 	 * but leave the rest of the entry unchanged.  Therefore, we treat a
1390 	 * non-zero entry as being valid, and we ignore the valid bit when
1391 	 * determining whether the entry maps a block, page, or table.
1392 	 */
1393 	tpte = pmap_load(pte);
1394 	if (tpte == 0)
1395 		return (false);
1396 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
1397 		if (pa != NULL)
1398 			*pa = (tpte & ~ATTR_MASK) | (va & L1_OFFSET);
1399 		return (true);
1400 	}
1401 	pte = pmap_l1_to_l2(&tpte, va);
1402 	tpte = pmap_load(pte);
1403 	if (tpte == 0)
1404 		return (false);
1405 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
1406 		if (pa != NULL)
1407 			*pa = (tpte & ~ATTR_MASK) | (va & L2_OFFSET);
1408 		return (true);
1409 	}
1410 	pte = pmap_l2_to_l3(&tpte, va);
1411 	tpte = pmap_load(pte);
1412 	if (tpte == 0)
1413 		return (false);
1414 	if (pa != NULL)
1415 		*pa = (tpte & ~ATTR_MASK) | (va & L3_OFFSET);
1416 	return (true);
1417 }
1418 
1419 vm_paddr_t
1420 pmap_kextract(vm_offset_t va)
1421 {
1422 	vm_paddr_t pa;
1423 
1424 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
1425 		return (DMAP_TO_PHYS(va));
1426 
1427 	if (pmap_klookup(va, &pa) == false)
1428 		return (0);
1429 	return (pa);
1430 }
1431 
1432 /***************************************************
1433  * Low level mapping routines.....
1434  ***************************************************/
1435 
1436 void
1437 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1438 {
1439 	pd_entry_t *pde;
1440 	pt_entry_t *pte, attr;
1441 	vm_offset_t va;
1442 	int lvl;
1443 
1444 	KASSERT((pa & L3_OFFSET) == 0,
1445 	   ("pmap_kenter: Invalid physical address"));
1446 	KASSERT((sva & L3_OFFSET) == 0,
1447 	   ("pmap_kenter: Invalid virtual address"));
1448 	KASSERT((size & PAGE_MASK) == 0,
1449 	    ("pmap_kenter: Mapping is not page-sized"));
1450 
1451 	attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
1452 	    ATTR_S1_IDX(mode) | L3_PAGE;
1453 	va = sva;
1454 	while (size != 0) {
1455 		pde = pmap_pde(kernel_pmap, va, &lvl);
1456 		KASSERT(pde != NULL,
1457 		    ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
1458 		KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
1459 
1460 		pte = pmap_l2_to_l3(pde, va);
1461 		pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
1462 
1463 		va += PAGE_SIZE;
1464 		pa += PAGE_SIZE;
1465 		size -= PAGE_SIZE;
1466 	}
1467 	pmap_invalidate_range(kernel_pmap, sva, va);
1468 }
1469 
1470 void
1471 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
1472 {
1473 
1474 	pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE);
1475 }
1476 
1477 /*
1478  * Remove a page from the kernel pagetables.
1479  */
1480 PMAP_INLINE void
1481 pmap_kremove(vm_offset_t va)
1482 {
1483 	pt_entry_t *pte;
1484 	int lvl;
1485 
1486 	pte = pmap_pte(kernel_pmap, va, &lvl);
1487 	KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
1488 	KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
1489 
1490 	pmap_clear(pte);
1491 	pmap_invalidate_page(kernel_pmap, va);
1492 }
1493 
1494 void
1495 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
1496 {
1497 	pt_entry_t *pte;
1498 	vm_offset_t va;
1499 	int lvl;
1500 
1501 	KASSERT((sva & L3_OFFSET) == 0,
1502 	   ("pmap_kremove_device: Invalid virtual address"));
1503 	KASSERT((size & PAGE_MASK) == 0,
1504 	    ("pmap_kremove_device: Mapping is not page-sized"));
1505 
1506 	va = sva;
1507 	while (size != 0) {
1508 		pte = pmap_pte(kernel_pmap, va, &lvl);
1509 		KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
1510 		KASSERT(lvl == 3,
1511 		    ("Invalid device pagetable level: %d != 3", lvl));
1512 		pmap_clear(pte);
1513 
1514 		va += PAGE_SIZE;
1515 		size -= PAGE_SIZE;
1516 	}
1517 	pmap_invalidate_range(kernel_pmap, sva, va);
1518 }
1519 
1520 /*
1521  *	Used to map a range of physical addresses into kernel
1522  *	virtual address space.
1523  *
1524  *	The value passed in '*virt' is a suggested virtual address for
1525  *	the mapping. Architectures which can support a direct-mapped
1526  *	physical to virtual region can return the appropriate address
1527  *	within that region, leaving '*virt' unchanged. Other
1528  *	architectures should map the pages starting at '*virt' and
1529  *	update '*virt' with the first usable address after the mapped
1530  *	region.
1531  */
1532 vm_offset_t
1533 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1534 {
1535 	return PHYS_TO_DMAP(start);
1536 }
1537 
1538 /*
1539  * Add a list of wired pages to the kva
1540  * this routine is only used for temporary
1541  * kernel mappings that do not need to have
1542  * page modification or references recorded.
1543  * Note that old mappings are simply written
1544  * over.  The page *must* be wired.
1545  * Note: SMP coherent.  Uses a ranged shootdown IPI.
1546  */
1547 void
1548 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1549 {
1550 	pd_entry_t *pde;
1551 	pt_entry_t *pte, pa;
1552 	vm_offset_t va;
1553 	vm_page_t m;
1554 	int i, lvl;
1555 
1556 	va = sva;
1557 	for (i = 0; i < count; i++) {
1558 		pde = pmap_pde(kernel_pmap, va, &lvl);
1559 		KASSERT(pde != NULL,
1560 		    ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
1561 		KASSERT(lvl == 2,
1562 		    ("pmap_qenter: Invalid level %d", lvl));
1563 
1564 		m = ma[i];
1565 		pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
1566 		    ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
1567 		    ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
1568 		pte = pmap_l2_to_l3(pde, va);
1569 		pmap_load_store(pte, pa);
1570 
1571 		va += L3_SIZE;
1572 	}
1573 	pmap_invalidate_range(kernel_pmap, sva, va);
1574 }
1575 
1576 /*
1577  * This routine tears out page mappings from the
1578  * kernel -- it is meant only for temporary mappings.
1579  */
1580 void
1581 pmap_qremove(vm_offset_t sva, int count)
1582 {
1583 	pt_entry_t *pte;
1584 	vm_offset_t va;
1585 	int lvl;
1586 
1587 	KASSERT(ADDR_IS_CANONICAL(sva),
1588 	    ("%s: Address not in canonical form: %lx", __func__, sva));
1589 	KASSERT(ADDR_IS_KERNEL(sva), ("usermode va %lx", sva));
1590 
1591 	va = sva;
1592 	while (count-- > 0) {
1593 		pte = pmap_pte(kernel_pmap, va, &lvl);
1594 		KASSERT(lvl == 3,
1595 		    ("Invalid device pagetable level: %d != 3", lvl));
1596 		if (pte != NULL) {
1597 			pmap_clear(pte);
1598 		}
1599 
1600 		va += PAGE_SIZE;
1601 	}
1602 	pmap_invalidate_range(kernel_pmap, sva, va);
1603 }
1604 
1605 /***************************************************
1606  * Page table page management routines.....
1607  ***************************************************/
1608 /*
1609  * Schedule the specified unused page table page to be freed.  Specifically,
1610  * add the page to the specified list of pages that will be released to the
1611  * physical memory manager after the TLB has been updated.
1612  */
1613 static __inline void
1614 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1615     boolean_t set_PG_ZERO)
1616 {
1617 
1618 	if (set_PG_ZERO)
1619 		m->flags |= PG_ZERO;
1620 	else
1621 		m->flags &= ~PG_ZERO;
1622 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1623 }
1624 
1625 /*
1626  * Decrements a page table page's reference count, which is used to record the
1627  * number of valid page table entries within the page.  If the reference count
1628  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1629  * page table page was unmapped and FALSE otherwise.
1630  */
1631 static inline boolean_t
1632 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1633 {
1634 
1635 	--m->ref_count;
1636 	if (m->ref_count == 0) {
1637 		_pmap_unwire_l3(pmap, va, m, free);
1638 		return (TRUE);
1639 	} else
1640 		return (FALSE);
1641 }
1642 
1643 static void
1644 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1645 {
1646 
1647 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1648 	/*
1649 	 * unmap the page table page
1650 	 */
1651 	if (m->pindex >= (NUL2E + NUL1E)) {
1652 		/* l1 page */
1653 		pd_entry_t *l0;
1654 
1655 		l0 = pmap_l0(pmap, va);
1656 		pmap_clear(l0);
1657 	} else if (m->pindex >= NUL2E) {
1658 		/* l2 page */
1659 		pd_entry_t *l1;
1660 
1661 		l1 = pmap_l1(pmap, va);
1662 		pmap_clear(l1);
1663 	} else {
1664 		/* l3 page */
1665 		pd_entry_t *l2;
1666 
1667 		l2 = pmap_l2(pmap, va);
1668 		pmap_clear(l2);
1669 	}
1670 	pmap_resident_count_dec(pmap, 1);
1671 	if (m->pindex < NUL2E) {
1672 		/* We just released an l3, unhold the matching l2 */
1673 		pd_entry_t *l1, tl1;
1674 		vm_page_t l2pg;
1675 
1676 		l1 = pmap_l1(pmap, va);
1677 		tl1 = pmap_load(l1);
1678 		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1679 		pmap_unwire_l3(pmap, va, l2pg, free);
1680 	} else if (m->pindex < (NUL2E + NUL1E)) {
1681 		/* We just released an l2, unhold the matching l1 */
1682 		pd_entry_t *l0, tl0;
1683 		vm_page_t l1pg;
1684 
1685 		l0 = pmap_l0(pmap, va);
1686 		tl0 = pmap_load(l0);
1687 		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1688 		pmap_unwire_l3(pmap, va, l1pg, free);
1689 	}
1690 	pmap_invalidate_page(pmap, va);
1691 
1692 	/*
1693 	 * Put page on a list so that it is released after
1694 	 * *ALL* TLB shootdown is done
1695 	 */
1696 	pmap_add_delayed_free_list(m, free, TRUE);
1697 }
1698 
1699 /*
1700  * After removing a page table entry, this routine is used to
1701  * conditionally free the page, and manage the reference count.
1702  */
1703 static int
1704 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1705     struct spglist *free)
1706 {
1707 	vm_page_t mpte;
1708 
1709 	KASSERT(ADDR_IS_CANONICAL(va),
1710 	    ("%s: Address not in canonical form: %lx", __func__, va));
1711 	if (ADDR_IS_KERNEL(va))
1712 		return (0);
1713 	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1714 	mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
1715 	return (pmap_unwire_l3(pmap, va, mpte, free));
1716 }
1717 
1718 /*
1719  * Release a page table page reference after a failed attempt to create a
1720  * mapping.
1721  */
1722 static void
1723 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1724 {
1725 	struct spglist free;
1726 
1727 	SLIST_INIT(&free);
1728 	if (pmap_unwire_l3(pmap, va, mpte, &free)) {
1729 		/*
1730 		 * Although "va" was never mapped, the TLB could nonetheless
1731 		 * have intermediate entries that refer to the freed page
1732 		 * table pages.  Invalidate those entries.
1733 		 *
1734 		 * XXX redundant invalidation (See _pmap_unwire_l3().)
1735 		 */
1736 		pmap_invalidate_page(pmap, va);
1737 		vm_page_free_pages_toq(&free, true);
1738 	}
1739 }
1740 
1741 void
1742 pmap_pinit0(pmap_t pmap)
1743 {
1744 
1745 	PMAP_LOCK_INIT(pmap);
1746 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1747 	pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1);
1748 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
1749 	pmap->pm_root.rt_root = 0;
1750 	pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
1751 	pmap->pm_stage = PM_STAGE1;
1752 	pmap->pm_levels = 4;
1753 	pmap->pm_ttbr = pmap->pm_l0_paddr;
1754 	pmap->pm_asid_set = &asids;
1755 
1756 	PCPU_SET(curpmap, pmap);
1757 }
1758 
1759 int
1760 pmap_pinit_stage(pmap_t pmap, enum pmap_stage stage, int levels)
1761 {
1762 	vm_page_t m;
1763 
1764 	/*
1765 	 * allocate the l0 page
1766 	 */
1767 	while ((m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1768 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1769 		vm_wait(NULL);
1770 
1771 	pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m);
1772 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
1773 
1774 	if ((m->flags & PG_ZERO) == 0)
1775 		pagezero(pmap->pm_l0);
1776 
1777 	pmap->pm_root.rt_root = 0;
1778 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1779 	pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
1780 
1781 	MPASS(levels == 3 || levels == 4);
1782 	pmap->pm_levels = levels;
1783 	pmap->pm_stage = stage;
1784 	switch (stage) {
1785 	case PM_STAGE1:
1786 		pmap->pm_asid_set = &asids;
1787 		break;
1788 	case PM_STAGE2:
1789 		pmap->pm_asid_set = &vmids;
1790 		break;
1791 	default:
1792 		panic("%s: Invalid pmap type %d", __func__, stage);
1793 		break;
1794 	}
1795 
1796 	/* XXX Temporarily disable deferred ASID allocation. */
1797 	pmap_alloc_asid(pmap);
1798 
1799 	/*
1800 	 * Allocate the level 1 entry to use as the root. This will increase
1801 	 * the refcount on the level 1 page so it won't be removed until
1802 	 * pmap_release() is called.
1803 	 */
1804 	if (pmap->pm_levels == 3) {
1805 		PMAP_LOCK(pmap);
1806 		m = _pmap_alloc_l3(pmap, NUL2E + NUL1E, NULL);
1807 		PMAP_UNLOCK(pmap);
1808 	}
1809 	pmap->pm_ttbr = VM_PAGE_TO_PHYS(m);
1810 
1811 	return (1);
1812 }
1813 
1814 int
1815 pmap_pinit(pmap_t pmap)
1816 {
1817 
1818 	return (pmap_pinit_stage(pmap, PM_STAGE1, 4));
1819 }
1820 
1821 /*
1822  * This routine is called if the desired page table page does not exist.
1823  *
1824  * If page table page allocation fails, this routine may sleep before
1825  * returning NULL.  It sleeps only if a lock pointer was given.
1826  *
1827  * Note: If a page allocation fails at page table level two or three,
1828  * one or two pages may be held during the wait, only to be released
1829  * afterwards.  This conservative approach is easily argued to avoid
1830  * race conditions.
1831  */
1832 static vm_page_t
1833 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1834 {
1835 	vm_page_t m, l1pg, l2pg;
1836 
1837 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1838 
1839 	/*
1840 	 * Allocate a page table page.
1841 	 */
1842 	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1843 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1844 		if (lockp != NULL) {
1845 			RELEASE_PV_LIST_LOCK(lockp);
1846 			PMAP_UNLOCK(pmap);
1847 			vm_wait(NULL);
1848 			PMAP_LOCK(pmap);
1849 		}
1850 
1851 		/*
1852 		 * Indicate the need to retry.  While waiting, the page table
1853 		 * page may have been allocated.
1854 		 */
1855 		return (NULL);
1856 	}
1857 	if ((m->flags & PG_ZERO) == 0)
1858 		pmap_zero_page(m);
1859 
1860 	/*
1861 	 * Because of AArch64's weak memory consistency model, we must have a
1862 	 * barrier here to ensure that the stores for zeroing "m", whether by
1863 	 * pmap_zero_page() or an earlier function, are visible before adding
1864 	 * "m" to the page table.  Otherwise, a page table walk by another
1865 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
1866 	 * PTE within "m".
1867 	 */
1868 	dmb(ishst);
1869 
1870 	/*
1871 	 * Map the pagetable page into the process address space, if
1872 	 * it isn't already there.
1873 	 */
1874 
1875 	if (ptepindex >= (NUL2E + NUL1E)) {
1876 		pd_entry_t *l0;
1877 		vm_pindex_t l0index;
1878 
1879 		l0index = ptepindex - (NUL2E + NUL1E);
1880 		l0 = &pmap->pm_l0[l0index];
1881 		KASSERT((pmap_load(l0) & ATTR_DESCR_VALID) == 0,
1882 		    ("%s: L0 entry %#lx is valid", __func__, pmap_load(l0)));
1883 		pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
1884 	} else if (ptepindex >= NUL2E) {
1885 		vm_pindex_t l0index, l1index;
1886 		pd_entry_t *l0, *l1;
1887 		pd_entry_t tl0;
1888 
1889 		l1index = ptepindex - NUL2E;
1890 		l0index = l1index >> L0_ENTRIES_SHIFT;
1891 
1892 		l0 = &pmap->pm_l0[l0index];
1893 		tl0 = pmap_load(l0);
1894 		if (tl0 == 0) {
1895 			/* recurse for allocating page dir */
1896 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1897 			    lockp) == NULL) {
1898 				vm_page_unwire_noq(m);
1899 				vm_page_free_zero(m);
1900 				return (NULL);
1901 			}
1902 		} else {
1903 			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1904 			l1pg->ref_count++;
1905 		}
1906 
1907 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
1908 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
1909 		KASSERT((pmap_load(l1) & ATTR_DESCR_VALID) == 0,
1910 		    ("%s: L1 entry %#lx is valid", __func__, pmap_load(l1)));
1911 		pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
1912 	} else {
1913 		vm_pindex_t l0index, l1index;
1914 		pd_entry_t *l0, *l1, *l2;
1915 		pd_entry_t tl0, tl1;
1916 
1917 		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
1918 		l0index = l1index >> L0_ENTRIES_SHIFT;
1919 
1920 		l0 = &pmap->pm_l0[l0index];
1921 		tl0 = pmap_load(l0);
1922 		if (tl0 == 0) {
1923 			/* recurse for allocating page dir */
1924 			if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1925 			    lockp) == NULL) {
1926 				vm_page_unwire_noq(m);
1927 				vm_page_free_zero(m);
1928 				return (NULL);
1929 			}
1930 			tl0 = pmap_load(l0);
1931 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1932 			l1 = &l1[l1index & Ln_ADDR_MASK];
1933 		} else {
1934 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1935 			l1 = &l1[l1index & Ln_ADDR_MASK];
1936 			tl1 = pmap_load(l1);
1937 			if (tl1 == 0) {
1938 				/* recurse for allocating page dir */
1939 				if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1940 				    lockp) == NULL) {
1941 					vm_page_unwire_noq(m);
1942 					vm_page_free_zero(m);
1943 					return (NULL);
1944 				}
1945 			} else {
1946 				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1947 				l2pg->ref_count++;
1948 			}
1949 		}
1950 
1951 		l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
1952 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
1953 		KASSERT((pmap_load(l2) & ATTR_DESCR_VALID) == 0,
1954 		    ("%s: L2 entry %#lx is valid", __func__, pmap_load(l2)));
1955 		pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
1956 	}
1957 
1958 	pmap_resident_count_inc(pmap, 1);
1959 
1960 	return (m);
1961 }
1962 
1963 static pd_entry_t *
1964 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp,
1965     struct rwlock **lockp)
1966 {
1967 	pd_entry_t *l1, *l2;
1968 	vm_page_t l2pg;
1969 	vm_pindex_t l2pindex;
1970 
1971 	KASSERT(ADDR_IS_CANONICAL(va),
1972 	    ("%s: Address not in canonical form: %lx", __func__, va));
1973 
1974 retry:
1975 	l1 = pmap_l1(pmap, va);
1976 	if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
1977 		l2 = pmap_l1_to_l2(l1, va);
1978 		if (!ADDR_IS_KERNEL(va)) {
1979 			/* Add a reference to the L2 page. */
1980 			l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
1981 			l2pg->ref_count++;
1982 		} else
1983 			l2pg = NULL;
1984 	} else if (!ADDR_IS_KERNEL(va)) {
1985 		/* Allocate a L2 page. */
1986 		l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
1987 		l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1988 		if (l2pg == NULL) {
1989 			if (lockp != NULL)
1990 				goto retry;
1991 			else
1992 				return (NULL);
1993 		}
1994 		l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
1995 		l2 = &l2[pmap_l2_index(va)];
1996 	} else
1997 		panic("pmap_alloc_l2: missing page table page for va %#lx",
1998 		    va);
1999 	*l2pgp = l2pg;
2000 	return (l2);
2001 }
2002 
2003 static vm_page_t
2004 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
2005 {
2006 	vm_pindex_t ptepindex;
2007 	pd_entry_t *pde, tpde;
2008 #ifdef INVARIANTS
2009 	pt_entry_t *pte;
2010 #endif
2011 	vm_page_t m;
2012 	int lvl;
2013 
2014 	/*
2015 	 * Calculate pagetable page index
2016 	 */
2017 	ptepindex = pmap_l2_pindex(va);
2018 retry:
2019 	/*
2020 	 * Get the page directory entry
2021 	 */
2022 	pde = pmap_pde(pmap, va, &lvl);
2023 
2024 	/*
2025 	 * If the page table page is mapped, we just increment the hold count,
2026 	 * and activate it. If we get a level 2 pde it will point to a level 3
2027 	 * table.
2028 	 */
2029 	switch (lvl) {
2030 	case -1:
2031 		break;
2032 	case 0:
2033 #ifdef INVARIANTS
2034 		pte = pmap_l0_to_l1(pde, va);
2035 		KASSERT(pmap_load(pte) == 0,
2036 		    ("pmap_alloc_l3: TODO: l0 superpages"));
2037 #endif
2038 		break;
2039 	case 1:
2040 #ifdef INVARIANTS
2041 		pte = pmap_l1_to_l2(pde, va);
2042 		KASSERT(pmap_load(pte) == 0,
2043 		    ("pmap_alloc_l3: TODO: l1 superpages"));
2044 #endif
2045 		break;
2046 	case 2:
2047 		tpde = pmap_load(pde);
2048 		if (tpde != 0) {
2049 			m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
2050 			m->ref_count++;
2051 			return (m);
2052 		}
2053 		break;
2054 	default:
2055 		panic("pmap_alloc_l3: Invalid level %d", lvl);
2056 	}
2057 
2058 	/*
2059 	 * Here if the pte page isn't mapped, or if it has been deallocated.
2060 	 */
2061 	m = _pmap_alloc_l3(pmap, ptepindex, lockp);
2062 	if (m == NULL && lockp != NULL)
2063 		goto retry;
2064 
2065 	return (m);
2066 }
2067 
2068 /***************************************************
2069  * Pmap allocation/deallocation routines.
2070  ***************************************************/
2071 
2072 /*
2073  * Release any resources held by the given physical map.
2074  * Called when a pmap initialized by pmap_pinit is being released.
2075  * Should only be called if the map contains no valid mappings.
2076  */
2077 void
2078 pmap_release(pmap_t pmap)
2079 {
2080 	boolean_t rv;
2081 	struct spglist free;
2082 	struct asid_set *set;
2083 	vm_page_t m;
2084 	int asid;
2085 
2086 	if (pmap->pm_levels != 4) {
2087 		PMAP_ASSERT_STAGE2(pmap);
2088 		KASSERT(pmap->pm_stats.resident_count == 1,
2089 		    ("pmap_release: pmap resident count %ld != 0",
2090 		    pmap->pm_stats.resident_count));
2091 		KASSERT((pmap->pm_l0[0] & ATTR_DESCR_VALID) == ATTR_DESCR_VALID,
2092 		    ("pmap_release: Invalid l0 entry: %lx", pmap->pm_l0[0]));
2093 
2094 		SLIST_INIT(&free);
2095 		m = PHYS_TO_VM_PAGE(pmap->pm_ttbr);
2096 		PMAP_LOCK(pmap);
2097 		rv = pmap_unwire_l3(pmap, 0, m, &free);
2098 		PMAP_UNLOCK(pmap);
2099 		MPASS(rv == TRUE);
2100 		vm_page_free_pages_toq(&free, true);
2101 	}
2102 
2103 	KASSERT(pmap->pm_stats.resident_count == 0,
2104 	    ("pmap_release: pmap resident count %ld != 0",
2105 	    pmap->pm_stats.resident_count));
2106 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
2107 	    ("pmap_release: pmap has reserved page table page(s)"));
2108 
2109 	set = pmap->pm_asid_set;
2110 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
2111 
2112 	/*
2113 	 * Allow the ASID to be reused. In stage 2 VMIDs we don't invalidate
2114 	 * the entries when removing them so rely on a later tlb invalidation.
2115 	 * this will happen when updating the VMID generation. Because of this
2116 	 * we don't reuse VMIDs within a generation.
2117 	 */
2118 	if (pmap->pm_stage == PM_STAGE1) {
2119 		mtx_lock_spin(&set->asid_set_mutex);
2120 		if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch) {
2121 			asid = COOKIE_TO_ASID(pmap->pm_cookie);
2122 			KASSERT(asid >= ASID_FIRST_AVAILABLE &&
2123 			    asid < set->asid_set_size,
2124 			    ("pmap_release: pmap cookie has out-of-range asid"));
2125 			bit_clear(set->asid_set, asid);
2126 		}
2127 		mtx_unlock_spin(&set->asid_set_mutex);
2128 	}
2129 
2130 	m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
2131 	vm_page_unwire_noq(m);
2132 	vm_page_free_zero(m);
2133 }
2134 
2135 static int
2136 kvm_size(SYSCTL_HANDLER_ARGS)
2137 {
2138 	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
2139 
2140 	return sysctl_handle_long(oidp, &ksize, 0, req);
2141 }
2142 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
2143     0, 0, kvm_size, "LU",
2144     "Size of KVM");
2145 
2146 static int
2147 kvm_free(SYSCTL_HANDLER_ARGS)
2148 {
2149 	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
2150 
2151 	return sysctl_handle_long(oidp, &kfree, 0, req);
2152 }
2153 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
2154     0, 0, kvm_free, "LU",
2155     "Amount of KVM free");
2156 
2157 /*
2158  * grow the number of kernel page table entries, if needed
2159  */
2160 void
2161 pmap_growkernel(vm_offset_t addr)
2162 {
2163 	vm_paddr_t paddr;
2164 	vm_page_t nkpg;
2165 	pd_entry_t *l0, *l1, *l2;
2166 
2167 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
2168 
2169 	addr = roundup2(addr, L2_SIZE);
2170 	if (addr - 1 >= vm_map_max(kernel_map))
2171 		addr = vm_map_max(kernel_map);
2172 	while (kernel_vm_end < addr) {
2173 		l0 = pmap_l0(kernel_pmap, kernel_vm_end);
2174 		KASSERT(pmap_load(l0) != 0,
2175 		    ("pmap_growkernel: No level 0 kernel entry"));
2176 
2177 		l1 = pmap_l0_to_l1(l0, kernel_vm_end);
2178 		if (pmap_load(l1) == 0) {
2179 			/* We need a new PDP entry */
2180 			nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
2181 			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
2182 			    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2183 			if (nkpg == NULL)
2184 				panic("pmap_growkernel: no memory to grow kernel");
2185 			if ((nkpg->flags & PG_ZERO) == 0)
2186 				pmap_zero_page(nkpg);
2187 			/* See the dmb() in _pmap_alloc_l3(). */
2188 			dmb(ishst);
2189 			paddr = VM_PAGE_TO_PHYS(nkpg);
2190 			pmap_store(l1, paddr | L1_TABLE);
2191 			continue; /* try again */
2192 		}
2193 		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
2194 		if (pmap_load(l2) != 0) {
2195 			kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
2196 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2197 				kernel_vm_end = vm_map_max(kernel_map);
2198 				break;
2199 			}
2200 			continue;
2201 		}
2202 
2203 		nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
2204 		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2205 		    VM_ALLOC_ZERO);
2206 		if (nkpg == NULL)
2207 			panic("pmap_growkernel: no memory to grow kernel");
2208 		if ((nkpg->flags & PG_ZERO) == 0)
2209 			pmap_zero_page(nkpg);
2210 		/* See the dmb() in _pmap_alloc_l3(). */
2211 		dmb(ishst);
2212 		paddr = VM_PAGE_TO_PHYS(nkpg);
2213 		pmap_store(l2, paddr | L2_TABLE);
2214 
2215 		kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
2216 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2217 			kernel_vm_end = vm_map_max(kernel_map);
2218 			break;
2219 		}
2220 	}
2221 }
2222 
2223 /***************************************************
2224  * page management routines.
2225  ***************************************************/
2226 
2227 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
2228 CTASSERT(_NPCM == 3);
2229 CTASSERT(_NPCPV == 168);
2230 
2231 static __inline struct pv_chunk *
2232 pv_to_chunk(pv_entry_t pv)
2233 {
2234 
2235 	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
2236 }
2237 
2238 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
2239 
2240 #define	PC_FREE0	0xfffffffffffffffful
2241 #define	PC_FREE1	0xfffffffffffffffful
2242 #define	PC_FREE2	0x000000fffffffffful
2243 
2244 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
2245 
2246 #ifdef PV_STATS
2247 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2248 
2249 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
2250 	"Current number of pv entry chunks");
2251 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
2252 	"Current number of pv entry chunks allocated");
2253 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
2254 	"Current number of pv entry chunks frees");
2255 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
2256 	"Number of times tried to get a chunk page but failed.");
2257 
2258 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
2259 static int pv_entry_spare;
2260 
2261 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
2262 	"Current number of pv entry frees");
2263 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
2264 	"Current number of pv entry allocs");
2265 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
2266 	"Current number of pv entries");
2267 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
2268 	"Current number of spare pv entries");
2269 #endif
2270 
2271 /*
2272  * We are in a serious low memory condition.  Resort to
2273  * drastic measures to free some pages so we can allocate
2274  * another pv entry chunk.
2275  *
2276  * Returns NULL if PV entries were reclaimed from the specified pmap.
2277  *
2278  * We do not, however, unmap 2mpages because subsequent accesses will
2279  * allocate per-page pv entries until repromotion occurs, thereby
2280  * exacerbating the shortage of free pv entries.
2281  */
2282 static vm_page_t
2283 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
2284 {
2285 	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
2286 	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
2287 	struct md_page *pvh;
2288 	pd_entry_t *pde;
2289 	pmap_t next_pmap, pmap;
2290 	pt_entry_t *pte, tpte;
2291 	pv_entry_t pv;
2292 	vm_offset_t va;
2293 	vm_page_t m, m_pc;
2294 	struct spglist free;
2295 	uint64_t inuse;
2296 	int bit, field, freed, lvl;
2297 	static int active_reclaims = 0;
2298 
2299 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2300 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
2301 
2302 	pmap = NULL;
2303 	m_pc = NULL;
2304 	SLIST_INIT(&free);
2305 	bzero(&pc_marker_b, sizeof(pc_marker_b));
2306 	bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
2307 	pc_marker = (struct pv_chunk *)&pc_marker_b;
2308 	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
2309 
2310 	mtx_lock(&pv_chunks_mutex);
2311 	active_reclaims++;
2312 	TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
2313 	TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
2314 	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
2315 	    SLIST_EMPTY(&free)) {
2316 		next_pmap = pc->pc_pmap;
2317 		if (next_pmap == NULL) {
2318 			/*
2319 			 * The next chunk is a marker.  However, it is
2320 			 * not our marker, so active_reclaims must be
2321 			 * > 1.  Consequently, the next_chunk code
2322 			 * will not rotate the pv_chunks list.
2323 			 */
2324 			goto next_chunk;
2325 		}
2326 		mtx_unlock(&pv_chunks_mutex);
2327 
2328 		/*
2329 		 * A pv_chunk can only be removed from the pc_lru list
2330 		 * when both pv_chunks_mutex is owned and the
2331 		 * corresponding pmap is locked.
2332 		 */
2333 		if (pmap != next_pmap) {
2334 			if (pmap != NULL && pmap != locked_pmap)
2335 				PMAP_UNLOCK(pmap);
2336 			pmap = next_pmap;
2337 			/* Avoid deadlock and lock recursion. */
2338 			if (pmap > locked_pmap) {
2339 				RELEASE_PV_LIST_LOCK(lockp);
2340 				PMAP_LOCK(pmap);
2341 				mtx_lock(&pv_chunks_mutex);
2342 				continue;
2343 			} else if (pmap != locked_pmap) {
2344 				if (PMAP_TRYLOCK(pmap)) {
2345 					mtx_lock(&pv_chunks_mutex);
2346 					continue;
2347 				} else {
2348 					pmap = NULL; /* pmap is not locked */
2349 					mtx_lock(&pv_chunks_mutex);
2350 					pc = TAILQ_NEXT(pc_marker, pc_lru);
2351 					if (pc == NULL ||
2352 					    pc->pc_pmap != next_pmap)
2353 						continue;
2354 					goto next_chunk;
2355 				}
2356 			}
2357 		}
2358 
2359 		/*
2360 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
2361 		 */
2362 		freed = 0;
2363 		for (field = 0; field < _NPCM; field++) {
2364 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2365 			    inuse != 0; inuse &= ~(1UL << bit)) {
2366 				bit = ffsl(inuse) - 1;
2367 				pv = &pc->pc_pventry[field * 64 + bit];
2368 				va = pv->pv_va;
2369 				pde = pmap_pde(pmap, va, &lvl);
2370 				if (lvl != 2)
2371 					continue;
2372 				pte = pmap_l2_to_l3(pde, va);
2373 				tpte = pmap_load(pte);
2374 				if ((tpte & ATTR_SW_WIRED) != 0)
2375 					continue;
2376 				tpte = pmap_load_clear(pte);
2377 				m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
2378 				if (pmap_pte_dirty(pmap, tpte))
2379 					vm_page_dirty(m);
2380 				if ((tpte & ATTR_AF) != 0) {
2381 					pmap_invalidate_page(pmap, va);
2382 					vm_page_aflag_set(m, PGA_REFERENCED);
2383 				}
2384 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2385 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2386 				m->md.pv_gen++;
2387 				if (TAILQ_EMPTY(&m->md.pv_list) &&
2388 				    (m->flags & PG_FICTITIOUS) == 0) {
2389 					pvh = page_to_pvh(m);
2390 					if (TAILQ_EMPTY(&pvh->pv_list)) {
2391 						vm_page_aflag_clear(m,
2392 						    PGA_WRITEABLE);
2393 					}
2394 				}
2395 				pc->pc_map[field] |= 1UL << bit;
2396 				pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
2397 				freed++;
2398 			}
2399 		}
2400 		if (freed == 0) {
2401 			mtx_lock(&pv_chunks_mutex);
2402 			goto next_chunk;
2403 		}
2404 		/* Every freed mapping is for a 4 KB page. */
2405 		pmap_resident_count_dec(pmap, freed);
2406 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
2407 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
2408 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
2409 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2410 		if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
2411 		    pc->pc_map[2] == PC_FREE2) {
2412 			PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2413 			PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2414 			PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2415 			/* Entire chunk is free; return it. */
2416 			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2417 			dump_drop_page(m_pc->phys_addr);
2418 			mtx_lock(&pv_chunks_mutex);
2419 			TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2420 			break;
2421 		}
2422 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2423 		mtx_lock(&pv_chunks_mutex);
2424 		/* One freed pv entry in locked_pmap is sufficient. */
2425 		if (pmap == locked_pmap)
2426 			break;
2427 
2428 next_chunk:
2429 		TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2430 		TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
2431 		if (active_reclaims == 1 && pmap != NULL) {
2432 			/*
2433 			 * Rotate the pv chunks list so that we do not
2434 			 * scan the same pv chunks that could not be
2435 			 * freed (because they contained a wired
2436 			 * and/or superpage mapping) on every
2437 			 * invocation of reclaim_pv_chunk().
2438 			 */
2439 			while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
2440 				MPASS(pc->pc_pmap != NULL);
2441 				TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2442 				TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2443 			}
2444 		}
2445 	}
2446 	TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2447 	TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
2448 	active_reclaims--;
2449 	mtx_unlock(&pv_chunks_mutex);
2450 	if (pmap != NULL && pmap != locked_pmap)
2451 		PMAP_UNLOCK(pmap);
2452 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
2453 		m_pc = SLIST_FIRST(&free);
2454 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2455 		/* Recycle a freed page table page. */
2456 		m_pc->ref_count = 1;
2457 	}
2458 	vm_page_free_pages_toq(&free, true);
2459 	return (m_pc);
2460 }
2461 
2462 /*
2463  * free the pv_entry back to the free list
2464  */
2465 static void
2466 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2467 {
2468 	struct pv_chunk *pc;
2469 	int idx, field, bit;
2470 
2471 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2472 	PV_STAT(atomic_add_long(&pv_entry_frees, 1));
2473 	PV_STAT(atomic_add_int(&pv_entry_spare, 1));
2474 	PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
2475 	pc = pv_to_chunk(pv);
2476 	idx = pv - &pc->pc_pventry[0];
2477 	field = idx / 64;
2478 	bit = idx % 64;
2479 	pc->pc_map[field] |= 1ul << bit;
2480 	if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
2481 	    pc->pc_map[2] != PC_FREE2) {
2482 		/* 98% of the time, pc is already at the head of the list. */
2483 		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
2484 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2485 			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2486 		}
2487 		return;
2488 	}
2489 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2490 	free_pv_chunk(pc);
2491 }
2492 
2493 static void
2494 free_pv_chunk(struct pv_chunk *pc)
2495 {
2496 	vm_page_t m;
2497 
2498 	mtx_lock(&pv_chunks_mutex);
2499  	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2500 	mtx_unlock(&pv_chunks_mutex);
2501 	PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2502 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2503 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2504 	/* entire chunk is free, return it */
2505 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2506 	dump_drop_page(m->phys_addr);
2507 	vm_page_unwire_noq(m);
2508 	vm_page_free(m);
2509 }
2510 
2511 /*
2512  * Returns a new PV entry, allocating a new PV chunk from the system when
2513  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
2514  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
2515  * returned.
2516  *
2517  * The given PV list lock may be released.
2518  */
2519 static pv_entry_t
2520 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
2521 {
2522 	int bit, field;
2523 	pv_entry_t pv;
2524 	struct pv_chunk *pc;
2525 	vm_page_t m;
2526 
2527 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2528 	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
2529 retry:
2530 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2531 	if (pc != NULL) {
2532 		for (field = 0; field < _NPCM; field++) {
2533 			if (pc->pc_map[field]) {
2534 				bit = ffsl(pc->pc_map[field]) - 1;
2535 				break;
2536 			}
2537 		}
2538 		if (field < _NPCM) {
2539 			pv = &pc->pc_pventry[field * 64 + bit];
2540 			pc->pc_map[field] &= ~(1ul << bit);
2541 			/* If this was the last item, move it to tail */
2542 			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
2543 			    pc->pc_map[2] == 0) {
2544 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2545 				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
2546 				    pc_list);
2547 			}
2548 			PV_STAT(atomic_add_long(&pv_entry_count, 1));
2549 			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
2550 			return (pv);
2551 		}
2552 	}
2553 	/* No free items, allocate another chunk */
2554 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2555 	    VM_ALLOC_WIRED);
2556 	if (m == NULL) {
2557 		if (lockp == NULL) {
2558 			PV_STAT(pc_chunk_tryfail++);
2559 			return (NULL);
2560 		}
2561 		m = reclaim_pv_chunk(pmap, lockp);
2562 		if (m == NULL)
2563 			goto retry;
2564 	}
2565 	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2566 	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2567 	dump_add_page(m->phys_addr);
2568 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2569 	pc->pc_pmap = pmap;
2570 	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
2571 	pc->pc_map[1] = PC_FREE1;
2572 	pc->pc_map[2] = PC_FREE2;
2573 	mtx_lock(&pv_chunks_mutex);
2574 	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2575 	mtx_unlock(&pv_chunks_mutex);
2576 	pv = &pc->pc_pventry[0];
2577 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2578 	PV_STAT(atomic_add_long(&pv_entry_count, 1));
2579 	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
2580 	return (pv);
2581 }
2582 
2583 /*
2584  * Ensure that the number of spare PV entries in the specified pmap meets or
2585  * exceeds the given count, "needed".
2586  *
2587  * The given PV list lock may be released.
2588  */
2589 static void
2590 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
2591 {
2592 	struct pch new_tail;
2593 	struct pv_chunk *pc;
2594 	vm_page_t m;
2595 	int avail, free;
2596 	bool reclaimed;
2597 
2598 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2599 	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
2600 
2601 	/*
2602 	 * Newly allocated PV chunks must be stored in a private list until
2603 	 * the required number of PV chunks have been allocated.  Otherwise,
2604 	 * reclaim_pv_chunk() could recycle one of these chunks.  In
2605 	 * contrast, these chunks must be added to the pmap upon allocation.
2606 	 */
2607 	TAILQ_INIT(&new_tail);
2608 retry:
2609 	avail = 0;
2610 	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
2611 		bit_count((bitstr_t *)pc->pc_map, 0,
2612 		    sizeof(pc->pc_map) * NBBY, &free);
2613 		if (free == 0)
2614 			break;
2615 		avail += free;
2616 		if (avail >= needed)
2617 			break;
2618 	}
2619 	for (reclaimed = false; avail < needed; avail += _NPCPV) {
2620 		m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2621 		    VM_ALLOC_WIRED);
2622 		if (m == NULL) {
2623 			m = reclaim_pv_chunk(pmap, lockp);
2624 			if (m == NULL)
2625 				goto retry;
2626 			reclaimed = true;
2627 		}
2628 		PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2629 		PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2630 		dump_add_page(m->phys_addr);
2631 		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2632 		pc->pc_pmap = pmap;
2633 		pc->pc_map[0] = PC_FREE0;
2634 		pc->pc_map[1] = PC_FREE1;
2635 		pc->pc_map[2] = PC_FREE2;
2636 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2637 		TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
2638 		PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
2639 
2640 		/*
2641 		 * The reclaim might have freed a chunk from the current pmap.
2642 		 * If that chunk contained available entries, we need to
2643 		 * re-count the number of available entries.
2644 		 */
2645 		if (reclaimed)
2646 			goto retry;
2647 	}
2648 	if (!TAILQ_EMPTY(&new_tail)) {
2649 		mtx_lock(&pv_chunks_mutex);
2650 		TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
2651 		mtx_unlock(&pv_chunks_mutex);
2652 	}
2653 }
2654 
2655 /*
2656  * First find and then remove the pv entry for the specified pmap and virtual
2657  * address from the specified pv list.  Returns the pv entry if found and NULL
2658  * otherwise.  This operation can be performed on pv lists for either 4KB or
2659  * 2MB page mappings.
2660  */
2661 static __inline pv_entry_t
2662 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2663 {
2664 	pv_entry_t pv;
2665 
2666 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2667 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2668 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2669 			pvh->pv_gen++;
2670 			break;
2671 		}
2672 	}
2673 	return (pv);
2674 }
2675 
2676 /*
2677  * After demotion from a 2MB page mapping to 512 4KB page mappings,
2678  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
2679  * entries for each of the 4KB page mappings.
2680  */
2681 static void
2682 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2683     struct rwlock **lockp)
2684 {
2685 	struct md_page *pvh;
2686 	struct pv_chunk *pc;
2687 	pv_entry_t pv;
2688 	vm_offset_t va_last;
2689 	vm_page_t m;
2690 	int bit, field;
2691 
2692 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2693 	KASSERT((va & L2_OFFSET) == 0,
2694 	    ("pmap_pv_demote_l2: va is not 2mpage aligned"));
2695 	KASSERT((pa & L2_OFFSET) == 0,
2696 	    ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
2697 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2698 
2699 	/*
2700 	 * Transfer the 2mpage's pv entry for this mapping to the first
2701 	 * page's pv list.  Once this transfer begins, the pv list lock
2702 	 * must not be released until the last pv entry is reinstantiated.
2703 	 */
2704 	pvh = pa_to_pvh(pa);
2705 	pv = pmap_pvh_remove(pvh, pmap, va);
2706 	KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
2707 	m = PHYS_TO_VM_PAGE(pa);
2708 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2709 	m->md.pv_gen++;
2710 	/* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
2711 	PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
2712 	va_last = va + L2_SIZE - PAGE_SIZE;
2713 	for (;;) {
2714 		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2715 		KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
2716 		    pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
2717 		for (field = 0; field < _NPCM; field++) {
2718 			while (pc->pc_map[field]) {
2719 				bit = ffsl(pc->pc_map[field]) - 1;
2720 				pc->pc_map[field] &= ~(1ul << bit);
2721 				pv = &pc->pc_pventry[field * 64 + bit];
2722 				va += PAGE_SIZE;
2723 				pv->pv_va = va;
2724 				m++;
2725 				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2726 			    ("pmap_pv_demote_l2: page %p is not managed", m));
2727 				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2728 				m->md.pv_gen++;
2729 				if (va == va_last)
2730 					goto out;
2731 			}
2732 		}
2733 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2734 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2735 	}
2736 out:
2737 	if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
2738 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2739 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2740 	}
2741 	PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
2742 	PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
2743 }
2744 
2745 /*
2746  * First find and then destroy the pv entry for the specified pmap and virtual
2747  * address.  This operation can be performed on pv lists for either 4KB or 2MB
2748  * page mappings.
2749  */
2750 static void
2751 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2752 {
2753 	pv_entry_t pv;
2754 
2755 	pv = pmap_pvh_remove(pvh, pmap, va);
2756 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2757 	free_pv_entry(pmap, pv);
2758 }
2759 
2760 /*
2761  * Conditionally create the PV entry for a 4KB page mapping if the required
2762  * memory can be allocated without resorting to reclamation.
2763  */
2764 static boolean_t
2765 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
2766     struct rwlock **lockp)
2767 {
2768 	pv_entry_t pv;
2769 
2770 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2771 	/* Pass NULL instead of the lock pointer to disable reclamation. */
2772 	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
2773 		pv->pv_va = va;
2774 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2775 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2776 		m->md.pv_gen++;
2777 		return (TRUE);
2778 	} else
2779 		return (FALSE);
2780 }
2781 
2782 /*
2783  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
2784  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
2785  * false if the PV entry cannot be allocated without resorting to reclamation.
2786  */
2787 static bool
2788 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
2789     struct rwlock **lockp)
2790 {
2791 	struct md_page *pvh;
2792 	pv_entry_t pv;
2793 	vm_paddr_t pa;
2794 
2795 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2796 	/* Pass NULL instead of the lock pointer to disable reclamation. */
2797 	if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
2798 	    NULL : lockp)) == NULL)
2799 		return (false);
2800 	pv->pv_va = va;
2801 	pa = l2e & ~ATTR_MASK;
2802 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2803 	pvh = pa_to_pvh(pa);
2804 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2805 	pvh->pv_gen++;
2806 	return (true);
2807 }
2808 
2809 static void
2810 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
2811 {
2812 	pt_entry_t newl2, oldl2;
2813 	vm_page_t ml3;
2814 	vm_paddr_t ml3pa;
2815 
2816 	KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
2817 	KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
2818 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2819 
2820 	ml3 = pmap_remove_pt_page(pmap, va);
2821 	if (ml3 == NULL)
2822 		panic("pmap_remove_kernel_l2: Missing pt page");
2823 
2824 	ml3pa = VM_PAGE_TO_PHYS(ml3);
2825 	newl2 = ml3pa | L2_TABLE;
2826 
2827 	/*
2828 	 * If this page table page was unmapped by a promotion, then it
2829 	 * contains valid mappings.  Zero it to invalidate those mappings.
2830 	 */
2831 	if (ml3->valid != 0)
2832 		pagezero((void *)PHYS_TO_DMAP(ml3pa));
2833 
2834 	/*
2835 	 * Demote the mapping.  The caller must have already invalidated the
2836 	 * mapping (i.e., the "break" in break-before-make).
2837 	 */
2838 	oldl2 = pmap_load_store(l2, newl2);
2839 	KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
2840 	    __func__, l2, oldl2));
2841 }
2842 
2843 /*
2844  * pmap_remove_l2: Do the things to unmap a level 2 superpage.
2845  */
2846 static int
2847 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2848     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
2849 {
2850 	struct md_page *pvh;
2851 	pt_entry_t old_l2;
2852 	vm_page_t m, ml3, mt;
2853 
2854 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2855 	KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
2856 	old_l2 = pmap_load_clear(l2);
2857 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
2858 	    ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
2859 
2860 	/*
2861 	 * Since a promotion must break the 4KB page mappings before making
2862 	 * the 2MB page mapping, a pmap_invalidate_page() suffices.
2863 	 */
2864 	pmap_invalidate_page(pmap, sva);
2865 
2866 	if (old_l2 & ATTR_SW_WIRED)
2867 		pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2868 	pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2869 	if (old_l2 & ATTR_SW_MANAGED) {
2870 		m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
2871 		pvh = page_to_pvh(m);
2872 		CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK);
2873 		pmap_pvh_free(pvh, pmap, sva);
2874 		for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) {
2875 			if (pmap_pte_dirty(pmap, old_l2))
2876 				vm_page_dirty(mt);
2877 			if (old_l2 & ATTR_AF)
2878 				vm_page_aflag_set(mt, PGA_REFERENCED);
2879 			if (TAILQ_EMPTY(&mt->md.pv_list) &&
2880 			    TAILQ_EMPTY(&pvh->pv_list))
2881 				vm_page_aflag_clear(mt, PGA_WRITEABLE);
2882 		}
2883 	}
2884 	if (pmap == kernel_pmap) {
2885 		pmap_remove_kernel_l2(pmap, l2, sva);
2886 	} else {
2887 		ml3 = pmap_remove_pt_page(pmap, sva);
2888 		if (ml3 != NULL) {
2889 			KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
2890 			    ("pmap_remove_l2: l3 page not promoted"));
2891 			pmap_resident_count_dec(pmap, 1);
2892 			KASSERT(ml3->ref_count == NL3PG,
2893 			    ("pmap_remove_l2: l3 page ref count error"));
2894 			ml3->ref_count = 0;
2895 			pmap_add_delayed_free_list(ml3, free, FALSE);
2896 		}
2897 	}
2898 	return (pmap_unuse_pt(pmap, sva, l1e, free));
2899 }
2900 
2901 /*
2902  * pmap_remove_l3: do the things to unmap a page in a process
2903  */
2904 static int
2905 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2906     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2907 {
2908 	struct md_page *pvh;
2909 	pt_entry_t old_l3;
2910 	vm_page_t m;
2911 
2912 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2913 	old_l3 = pmap_load_clear(l3);
2914 	pmap_invalidate_page(pmap, va);
2915 	if (old_l3 & ATTR_SW_WIRED)
2916 		pmap->pm_stats.wired_count -= 1;
2917 	pmap_resident_count_dec(pmap, 1);
2918 	if (old_l3 & ATTR_SW_MANAGED) {
2919 		m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2920 		if (pmap_pte_dirty(pmap, old_l3))
2921 			vm_page_dirty(m);
2922 		if (old_l3 & ATTR_AF)
2923 			vm_page_aflag_set(m, PGA_REFERENCED);
2924 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2925 		pmap_pvh_free(&m->md, pmap, va);
2926 		if (TAILQ_EMPTY(&m->md.pv_list) &&
2927 		    (m->flags & PG_FICTITIOUS) == 0) {
2928 			pvh = page_to_pvh(m);
2929 			if (TAILQ_EMPTY(&pvh->pv_list))
2930 				vm_page_aflag_clear(m, PGA_WRITEABLE);
2931 		}
2932 	}
2933 	return (pmap_unuse_pt(pmap, va, l2e, free));
2934 }
2935 
2936 /*
2937  * Remove the specified range of addresses from the L3 page table that is
2938  * identified by the given L2 entry.
2939  */
2940 static void
2941 pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
2942     vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
2943 {
2944 	struct md_page *pvh;
2945 	struct rwlock *new_lock;
2946 	pt_entry_t *l3, old_l3;
2947 	vm_offset_t va;
2948 	vm_page_t l3pg, m;
2949 
2950 	KASSERT(ADDR_IS_CANONICAL(sva),
2951 	    ("%s: Start address not in canonical form: %lx", __func__, sva));
2952 	KASSERT(ADDR_IS_CANONICAL(eva) || eva == VM_MAX_USER_ADDRESS,
2953 	    ("%s: End address not in canonical form: %lx", __func__, eva));
2954 
2955 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2956 	KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
2957 	    ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
2958 	l3pg = !ADDR_IS_KERNEL(sva) ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) : NULL;
2959 	va = eva;
2960 	for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
2961 		if (!pmap_l3_valid(pmap_load(l3))) {
2962 			if (va != eva) {
2963 				pmap_invalidate_range(pmap, va, sva);
2964 				va = eva;
2965 			}
2966 			continue;
2967 		}
2968 		old_l3 = pmap_load_clear(l3);
2969 		if ((old_l3 & ATTR_SW_WIRED) != 0)
2970 			pmap->pm_stats.wired_count--;
2971 		pmap_resident_count_dec(pmap, 1);
2972 		if ((old_l3 & ATTR_SW_MANAGED) != 0) {
2973 			m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2974 			if (pmap_pte_dirty(pmap, old_l3))
2975 				vm_page_dirty(m);
2976 			if ((old_l3 & ATTR_AF) != 0)
2977 				vm_page_aflag_set(m, PGA_REFERENCED);
2978 			new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m));
2979 			if (new_lock != *lockp) {
2980 				if (*lockp != NULL) {
2981 					/*
2982 					 * Pending TLB invalidations must be
2983 					 * performed before the PV list lock is
2984 					 * released.  Otherwise, a concurrent
2985 					 * pmap_remove_all() on a physical page
2986 					 * could return while a stale TLB entry
2987 					 * still provides access to that page.
2988 					 */
2989 					if (va != eva) {
2990 						pmap_invalidate_range(pmap, va,
2991 						    sva);
2992 						va = eva;
2993 					}
2994 					rw_wunlock(*lockp);
2995 				}
2996 				*lockp = new_lock;
2997 				rw_wlock(*lockp);
2998 			}
2999 			pmap_pvh_free(&m->md, pmap, sva);
3000 			if (TAILQ_EMPTY(&m->md.pv_list) &&
3001 			    (m->flags & PG_FICTITIOUS) == 0) {
3002 				pvh = page_to_pvh(m);
3003 				if (TAILQ_EMPTY(&pvh->pv_list))
3004 					vm_page_aflag_clear(m, PGA_WRITEABLE);
3005 			}
3006 		}
3007 		if (va == eva)
3008 			va = sva;
3009 		if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) {
3010 			sva += L3_SIZE;
3011 			break;
3012 		}
3013 	}
3014 	if (va != eva)
3015 		pmap_invalidate_range(pmap, va, sva);
3016 }
3017 
3018 /*
3019  *	Remove the given range of addresses from the specified map.
3020  *
3021  *	It is assumed that the start and end are properly
3022  *	rounded to the page size.
3023  */
3024 void
3025 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3026 {
3027 	struct rwlock *lock;
3028 	vm_offset_t va_next;
3029 	pd_entry_t *l0, *l1, *l2;
3030 	pt_entry_t l3_paddr;
3031 	struct spglist free;
3032 
3033 	/*
3034 	 * Perform an unsynchronized read.  This is, however, safe.
3035 	 */
3036 	if (pmap->pm_stats.resident_count == 0)
3037 		return;
3038 
3039 	SLIST_INIT(&free);
3040 
3041 	PMAP_LOCK(pmap);
3042 
3043 	lock = NULL;
3044 	for (; sva < eva; sva = va_next) {
3045 		if (pmap->pm_stats.resident_count == 0)
3046 			break;
3047 
3048 		l0 = pmap_l0(pmap, sva);
3049 		if (pmap_load(l0) == 0) {
3050 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
3051 			if (va_next < sva)
3052 				va_next = eva;
3053 			continue;
3054 		}
3055 
3056 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3057 		if (va_next < sva)
3058 			va_next = eva;
3059 		l1 = pmap_l0_to_l1(l0, sva);
3060 		if (pmap_load(l1) == 0)
3061 			continue;
3062 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
3063 			KASSERT(va_next <= eva,
3064 			    ("partial update of non-transparent 1G page "
3065 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
3066 			    pmap_load(l1), sva, eva, va_next));
3067 			MPASS(pmap != kernel_pmap);
3068 			MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0);
3069 			pmap_clear(l1);
3070 			pmap_invalidate_page(pmap, sva);
3071 			pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE);
3072 			pmap_unuse_pt(pmap, sva, pmap_load(l0), &free);
3073 			continue;
3074 		}
3075 
3076 		/*
3077 		 * Calculate index for next page table.
3078 		 */
3079 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3080 		if (va_next < sva)
3081 			va_next = eva;
3082 
3083 		l2 = pmap_l1_to_l2(l1, sva);
3084 		if (l2 == NULL)
3085 			continue;
3086 
3087 		l3_paddr = pmap_load(l2);
3088 
3089 		if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
3090 			if (sva + L2_SIZE == va_next && eva >= va_next) {
3091 				pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
3092 				    &free, &lock);
3093 				continue;
3094 			} else if (pmap_demote_l2_locked(pmap, l2, sva,
3095 			    &lock) == NULL)
3096 				continue;
3097 			l3_paddr = pmap_load(l2);
3098 		}
3099 
3100 		/*
3101 		 * Weed out invalid mappings.
3102 		 */
3103 		if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
3104 			continue;
3105 
3106 		/*
3107 		 * Limit our scan to either the end of the va represented
3108 		 * by the current page table page, or to the end of the
3109 		 * range being removed.
3110 		 */
3111 		if (va_next > eva)
3112 			va_next = eva;
3113 
3114 		pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
3115 		    &lock);
3116 	}
3117 	if (lock != NULL)
3118 		rw_wunlock(lock);
3119 	PMAP_UNLOCK(pmap);
3120 	vm_page_free_pages_toq(&free, true);
3121 }
3122 
3123 /*
3124  *	Routine:	pmap_remove_all
3125  *	Function:
3126  *		Removes this physical page from
3127  *		all physical maps in which it resides.
3128  *		Reflects back modify bits to the pager.
3129  *
3130  *	Notes:
3131  *		Original versions of this routine were very
3132  *		inefficient because they iteratively called
3133  *		pmap_remove (slow...)
3134  */
3135 
3136 void
3137 pmap_remove_all(vm_page_t m)
3138 {
3139 	struct md_page *pvh;
3140 	pv_entry_t pv;
3141 	pmap_t pmap;
3142 	struct rwlock *lock;
3143 	pd_entry_t *pde, tpde;
3144 	pt_entry_t *pte, tpte;
3145 	vm_offset_t va;
3146 	struct spglist free;
3147 	int lvl, pvh_gen, md_gen;
3148 
3149 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3150 	    ("pmap_remove_all: page %p is not managed", m));
3151 	SLIST_INIT(&free);
3152 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3153 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
3154 	rw_wlock(lock);
3155 retry:
3156 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
3157 		pmap = PV_PMAP(pv);
3158 		if (!PMAP_TRYLOCK(pmap)) {
3159 			pvh_gen = pvh->pv_gen;
3160 			rw_wunlock(lock);
3161 			PMAP_LOCK(pmap);
3162 			rw_wlock(lock);
3163 			if (pvh_gen != pvh->pv_gen) {
3164 				PMAP_UNLOCK(pmap);
3165 				goto retry;
3166 			}
3167 		}
3168 		va = pv->pv_va;
3169 		pte = pmap_pte(pmap, va, &lvl);
3170 		KASSERT(pte != NULL,
3171 		    ("pmap_remove_all: no page table entry found"));
3172 		KASSERT(lvl == 2,
3173 		    ("pmap_remove_all: invalid pte level %d", lvl));
3174 		pmap_demote_l2_locked(pmap, pte, va, &lock);
3175 		PMAP_UNLOCK(pmap);
3176 	}
3177 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3178 		pmap = PV_PMAP(pv);
3179 		PMAP_ASSERT_STAGE1(pmap);
3180 		if (!PMAP_TRYLOCK(pmap)) {
3181 			pvh_gen = pvh->pv_gen;
3182 			md_gen = m->md.pv_gen;
3183 			rw_wunlock(lock);
3184 			PMAP_LOCK(pmap);
3185 			rw_wlock(lock);
3186 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3187 				PMAP_UNLOCK(pmap);
3188 				goto retry;
3189 			}
3190 		}
3191 		pmap_resident_count_dec(pmap, 1);
3192 
3193 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
3194 		KASSERT(pde != NULL,
3195 		    ("pmap_remove_all: no page directory entry found"));
3196 		KASSERT(lvl == 2,
3197 		    ("pmap_remove_all: invalid pde level %d", lvl));
3198 		tpde = pmap_load(pde);
3199 
3200 		pte = pmap_l2_to_l3(pde, pv->pv_va);
3201 		tpte = pmap_load_clear(pte);
3202 		if (tpte & ATTR_SW_WIRED)
3203 			pmap->pm_stats.wired_count--;
3204 		if ((tpte & ATTR_AF) != 0) {
3205 			pmap_invalidate_page(pmap, pv->pv_va);
3206 			vm_page_aflag_set(m, PGA_REFERENCED);
3207 		}
3208 
3209 		/*
3210 		 * Update the vm_page_t clean and reference bits.
3211 		 */
3212 		if (pmap_pte_dirty(pmap, tpte))
3213 			vm_page_dirty(m);
3214 		pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
3215 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3216 		m->md.pv_gen++;
3217 		free_pv_entry(pmap, pv);
3218 		PMAP_UNLOCK(pmap);
3219 	}
3220 	vm_page_aflag_clear(m, PGA_WRITEABLE);
3221 	rw_wunlock(lock);
3222 	vm_page_free_pages_toq(&free, true);
3223 }
3224 
3225 /*
3226  * pmap_protect_l2: do the things to protect a 2MB page in a pmap
3227  */
3228 static void
3229 pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
3230     pt_entry_t nbits)
3231 {
3232 	pd_entry_t old_l2;
3233 	vm_page_t m, mt;
3234 
3235 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3236 	PMAP_ASSERT_STAGE1(pmap);
3237 	KASSERT((sva & L2_OFFSET) == 0,
3238 	    ("pmap_protect_l2: sva is not 2mpage aligned"));
3239 	old_l2 = pmap_load(l2);
3240 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
3241 	    ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2));
3242 
3243 	/*
3244 	 * Return if the L2 entry already has the desired access restrictions
3245 	 * in place.
3246 	 */
3247 	if ((old_l2 & mask) == nbits)
3248 		return;
3249 
3250 	while (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
3251 		cpu_spinwait();
3252 
3253 	/*
3254 	 * When a dirty read/write superpage mapping is write protected,
3255 	 * update the dirty field of each of the superpage's constituent 4KB
3256 	 * pages.
3257 	 */
3258 	if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
3259 	    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
3260 	    pmap_pte_dirty(pmap, old_l2)) {
3261 		m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
3262 		for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3263 			vm_page_dirty(mt);
3264 	}
3265 
3266 	/*
3267 	 * Since a promotion must break the 4KB page mappings before making
3268 	 * the 2MB page mapping, a pmap_invalidate_page() suffices.
3269 	 */
3270 	pmap_invalidate_page(pmap, sva);
3271 }
3272 
3273 /*
3274  *	Set the physical protection on the
3275  *	specified range of this map as requested.
3276  */
3277 void
3278 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3279 {
3280 	vm_offset_t va, va_next;
3281 	pd_entry_t *l0, *l1, *l2;
3282 	pt_entry_t *l3p, l3, mask, nbits;
3283 
3284 	PMAP_ASSERT_STAGE1(pmap);
3285 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
3286 	if (prot == VM_PROT_NONE) {
3287 		pmap_remove(pmap, sva, eva);
3288 		return;
3289 	}
3290 
3291 	mask = nbits = 0;
3292 	if ((prot & VM_PROT_WRITE) == 0) {
3293 		mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM;
3294 		nbits |= ATTR_S1_AP(ATTR_S1_AP_RO);
3295 	}
3296 	if ((prot & VM_PROT_EXECUTE) == 0) {
3297 		mask |= ATTR_S1_XN;
3298 		nbits |= ATTR_S1_XN;
3299 	}
3300 	if (mask == 0)
3301 		return;
3302 
3303 	PMAP_LOCK(pmap);
3304 	for (; sva < eva; sva = va_next) {
3305 		l0 = pmap_l0(pmap, sva);
3306 		if (pmap_load(l0) == 0) {
3307 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
3308 			if (va_next < sva)
3309 				va_next = eva;
3310 			continue;
3311 		}
3312 
3313 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3314 		if (va_next < sva)
3315 			va_next = eva;
3316 		l1 = pmap_l0_to_l1(l0, sva);
3317 		if (pmap_load(l1) == 0)
3318 			continue;
3319 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
3320 			KASSERT(va_next <= eva,
3321 			    ("partial update of non-transparent 1G page "
3322 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
3323 			    pmap_load(l1), sva, eva, va_next));
3324 			MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0);
3325 			if ((pmap_load(l1) & mask) != nbits) {
3326 				pmap_store(l1, (pmap_load(l1) & ~mask) | nbits);
3327 				pmap_invalidate_page(pmap, sva);
3328 			}
3329 			continue;
3330 		}
3331 
3332 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3333 		if (va_next < sva)
3334 			va_next = eva;
3335 
3336 		l2 = pmap_l1_to_l2(l1, sva);
3337 		if (pmap_load(l2) == 0)
3338 			continue;
3339 
3340 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3341 			if (sva + L2_SIZE == va_next && eva >= va_next) {
3342 				pmap_protect_l2(pmap, l2, sva, mask, nbits);
3343 				continue;
3344 			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
3345 				continue;
3346 		}
3347 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3348 		    ("pmap_protect: Invalid L2 entry after demotion"));
3349 
3350 		if (va_next > eva)
3351 			va_next = eva;
3352 
3353 		va = va_next;
3354 		for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
3355 		    sva += L3_SIZE) {
3356 			l3 = pmap_load(l3p);
3357 
3358 			/*
3359 			 * Go to the next L3 entry if the current one is
3360 			 * invalid or already has the desired access
3361 			 * restrictions in place.  (The latter case occurs
3362 			 * frequently.  For example, in a "buildworld"
3363 			 * workload, almost 1 out of 4 L3 entries already
3364 			 * have the desired restrictions.)
3365 			 */
3366 			if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) {
3367 				if (va != va_next) {
3368 					pmap_invalidate_range(pmap, va, sva);
3369 					va = va_next;
3370 				}
3371 				continue;
3372 			}
3373 
3374 			while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) |
3375 			    nbits))
3376 				cpu_spinwait();
3377 
3378 			/*
3379 			 * When a dirty read/write mapping is write protected,
3380 			 * update the page's dirty field.
3381 			 */
3382 			if ((l3 & ATTR_SW_MANAGED) != 0 &&
3383 			    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
3384 			    pmap_pte_dirty(pmap, l3))
3385 				vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
3386 
3387 			if (va == va_next)
3388 				va = sva;
3389 		}
3390 		if (va != va_next)
3391 			pmap_invalidate_range(pmap, va, sva);
3392 	}
3393 	PMAP_UNLOCK(pmap);
3394 }
3395 
3396 /*
3397  * Inserts the specified page table page into the specified pmap's collection
3398  * of idle page table pages.  Each of a pmap's page table pages is responsible
3399  * for mapping a distinct range of virtual addresses.  The pmap's collection is
3400  * ordered by this virtual address range.
3401  *
3402  * If "promoted" is false, then the page table page "mpte" must be zero filled.
3403  */
3404 static __inline int
3405 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
3406 {
3407 
3408 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3409 	mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
3410 	return (vm_radix_insert(&pmap->pm_root, mpte));
3411 }
3412 
3413 /*
3414  * Removes the page table page mapping the specified virtual address from the
3415  * specified pmap's collection of idle page table pages, and returns it.
3416  * Otherwise, returns NULL if there is no page table page corresponding to the
3417  * specified virtual address.
3418  */
3419 static __inline vm_page_t
3420 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
3421 {
3422 
3423 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3424 	return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
3425 }
3426 
3427 /*
3428  * Performs a break-before-make update of a pmap entry. This is needed when
3429  * either promoting or demoting pages to ensure the TLB doesn't get into an
3430  * inconsistent state.
3431  */
3432 static void
3433 pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
3434     vm_offset_t va, vm_size_t size)
3435 {
3436 	register_t intr;
3437 
3438 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3439 
3440 	/*
3441 	 * Ensure we don't get switched out with the page table in an
3442 	 * inconsistent state. We also need to ensure no interrupts fire
3443 	 * as they may make use of an address we are about to invalidate.
3444 	 */
3445 	intr = intr_disable();
3446 
3447 	/*
3448 	 * Clear the old mapping's valid bit, but leave the rest of the entry
3449 	 * unchanged, so that a lockless, concurrent pmap_kextract() can still
3450 	 * lookup the physical address.
3451 	 */
3452 	pmap_clear_bits(pte, ATTR_DESCR_VALID);
3453 	pmap_invalidate_range(pmap, va, va + size);
3454 
3455 	/* Create the new mapping */
3456 	pmap_store(pte, newpte);
3457 	dsb(ishst);
3458 
3459 	intr_restore(intr);
3460 }
3461 
3462 #if VM_NRESERVLEVEL > 0
3463 /*
3464  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
3465  * replace the many pv entries for the 4KB page mappings by a single pv entry
3466  * for the 2MB page mapping.
3467  */
3468 static void
3469 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3470     struct rwlock **lockp)
3471 {
3472 	struct md_page *pvh;
3473 	pv_entry_t pv;
3474 	vm_offset_t va_last;
3475 	vm_page_t m;
3476 
3477 	KASSERT((pa & L2_OFFSET) == 0,
3478 	    ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
3479 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3480 
3481 	/*
3482 	 * Transfer the first page's pv entry for this mapping to the 2mpage's
3483 	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
3484 	 * a transfer avoids the possibility that get_pv_entry() calls
3485 	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
3486 	 * mappings that is being promoted.
3487 	 */
3488 	m = PHYS_TO_VM_PAGE(pa);
3489 	va = va & ~L2_OFFSET;
3490 	pv = pmap_pvh_remove(&m->md, pmap, va);
3491 	KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
3492 	pvh = page_to_pvh(m);
3493 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3494 	pvh->pv_gen++;
3495 	/* Free the remaining NPTEPG - 1 pv entries. */
3496 	va_last = va + L2_SIZE - PAGE_SIZE;
3497 	do {
3498 		m++;
3499 		va += PAGE_SIZE;
3500 		pmap_pvh_free(&m->md, pmap, va);
3501 	} while (va < va_last);
3502 }
3503 
3504 /*
3505  * Tries to promote the 512, contiguous 4KB page mappings that are within a
3506  * single level 2 table entry to a single 2MB page mapping.  For promotion
3507  * to occur, two conditions must be met: (1) the 4KB page mappings must map
3508  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
3509  * identical characteristics.
3510  */
3511 static void
3512 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
3513     struct rwlock **lockp)
3514 {
3515 	pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
3516 	vm_page_t mpte;
3517 	vm_offset_t sva;
3518 
3519 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3520 	PMAP_ASSERT_STAGE1(pmap);
3521 
3522 	sva = va & ~L2_OFFSET;
3523 	firstl3 = pmap_l2_to_l3(l2, sva);
3524 	newl2 = pmap_load(firstl3);
3525 
3526 	if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) {
3527 		atomic_add_long(&pmap_l2_p_failures, 1);
3528 		CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3529 		    " in pmap %p", va, pmap);
3530 		return;
3531 	}
3532 
3533 setl2:
3534 	if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
3535 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
3536 		/*
3537 		 * When the mapping is clean, i.e., ATTR_S1_AP_RO is set,
3538 		 * ATTR_SW_DBM can be cleared without a TLB invalidation.
3539 		 */
3540 		if (!atomic_fcmpset_64(firstl3, &newl2, newl2 & ~ATTR_SW_DBM))
3541 			goto setl2;
3542 		newl2 &= ~ATTR_SW_DBM;
3543 	}
3544 
3545 	pa = newl2 + L2_SIZE - PAGE_SIZE;
3546 	for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
3547 		oldl3 = pmap_load(l3);
3548 setl3:
3549 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
3550 		    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
3551 			/*
3552 			 * When the mapping is clean, i.e., ATTR_S1_AP_RO is
3553 			 * set, ATTR_SW_DBM can be cleared without a TLB
3554 			 * invalidation.
3555 			 */
3556 			if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
3557 			    ~ATTR_SW_DBM))
3558 				goto setl3;
3559 			oldl3 &= ~ATTR_SW_DBM;
3560 		}
3561 		if (oldl3 != pa) {
3562 			atomic_add_long(&pmap_l2_p_failures, 1);
3563 			CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3564 			    " in pmap %p", va, pmap);
3565 			return;
3566 		}
3567 		pa -= PAGE_SIZE;
3568 	}
3569 
3570 	/*
3571 	 * Save the page table page in its current state until the L2
3572 	 * mapping the superpage is demoted by pmap_demote_l2() or
3573 	 * destroyed by pmap_remove_l3().
3574 	 */
3575 	mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3576 	KASSERT(mpte >= vm_page_array &&
3577 	    mpte < &vm_page_array[vm_page_array_size],
3578 	    ("pmap_promote_l2: page table page is out of range"));
3579 	KASSERT(mpte->pindex == pmap_l2_pindex(va),
3580 	    ("pmap_promote_l2: page table page's pindex is wrong"));
3581 	if (pmap_insert_pt_page(pmap, mpte, true)) {
3582 		atomic_add_long(&pmap_l2_p_failures, 1);
3583 		CTR2(KTR_PMAP,
3584 		    "pmap_promote_l2: failure for va %#lx in pmap %p", va,
3585 		    pmap);
3586 		return;
3587 	}
3588 
3589 	if ((newl2 & ATTR_SW_MANAGED) != 0)
3590 		pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
3591 
3592 	newl2 &= ~ATTR_DESCR_MASK;
3593 	newl2 |= L2_BLOCK;
3594 
3595 	pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
3596 
3597 	atomic_add_long(&pmap_l2_promotions, 1);
3598 	CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
3599 		    pmap);
3600 }
3601 #endif /* VM_NRESERVLEVEL > 0 */
3602 
3603 static int
3604 pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags,
3605     int psind)
3606 {
3607 	pd_entry_t *l0p, *l1p, *l2p, origpte;
3608 	vm_page_t mp;
3609 
3610 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3611 	KASSERT(psind > 0 && psind < MAXPAGESIZES,
3612 	    ("psind %d unexpected", psind));
3613 	KASSERT(((newpte & ~ATTR_MASK) & (pagesizes[psind] - 1)) == 0,
3614 	    ("unaligned phys address %#lx newpte %#lx psind %d",
3615 	    (newpte & ~ATTR_MASK), newpte, psind));
3616 
3617 restart:
3618 	if (psind == 2) {
3619 		l0p = pmap_l0(pmap, va);
3620 		if ((pmap_load(l0p) & ATTR_DESCR_VALID) == 0) {
3621 			mp = _pmap_alloc_l3(pmap, pmap_l0_pindex(va), NULL);
3622 			if (mp == NULL) {
3623 				if ((flags & PMAP_ENTER_NOSLEEP) != 0)
3624 					return (KERN_RESOURCE_SHORTAGE);
3625 				PMAP_UNLOCK(pmap);
3626 				vm_wait(NULL);
3627 				PMAP_LOCK(pmap);
3628 				goto restart;
3629 			}
3630 			l1p = pmap_l0_to_l1(l0p, va);
3631 			KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va));
3632 			origpte = pmap_load(l1p);
3633 		} else {
3634 			l1p = pmap_l0_to_l1(l0p, va);
3635 			KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va));
3636 			origpte = pmap_load(l1p);
3637 			if ((origpte & ATTR_DESCR_VALID) == 0) {
3638 				mp = PHYS_TO_VM_PAGE(pmap_load(l0p) &
3639 				    ~ATTR_MASK);
3640 				mp->ref_count++;
3641 			}
3642 		}
3643 		KASSERT((origpte & ATTR_DESCR_VALID) == 0 ||
3644 		    ((origpte & ATTR_DESCR_MASK) == L1_BLOCK &&
3645 		     (origpte & ~ATTR_MASK) == (newpte & ~ATTR_MASK)),
3646 		    ("va %#lx changing 1G phys page l1 %#lx newpte %#lx",
3647 		    va, origpte, newpte));
3648 		pmap_store(l1p, newpte);
3649 	} else /* (psind == 1) */ {
3650 		l2p = pmap_l2(pmap, va);
3651 		if (l2p == NULL) {
3652 			mp = _pmap_alloc_l3(pmap, pmap_l1_pindex(va), NULL);
3653 			if (mp == NULL) {
3654 				if ((flags & PMAP_ENTER_NOSLEEP) != 0)
3655 					return (KERN_RESOURCE_SHORTAGE);
3656 				PMAP_UNLOCK(pmap);
3657 				vm_wait(NULL);
3658 				PMAP_LOCK(pmap);
3659 				goto restart;
3660 			}
3661 			l2p = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
3662 			l2p = &l2p[pmap_l2_index(va)];
3663 			origpte = pmap_load(l2p);
3664 		} else {
3665 			l1p = pmap_l1(pmap, va);
3666 			origpte = pmap_load(l2p);
3667 			if ((origpte & ATTR_DESCR_VALID) == 0) {
3668 				mp = PHYS_TO_VM_PAGE(pmap_load(l1p) &
3669 				    ~ATTR_MASK);
3670 				mp->ref_count++;
3671 			}
3672 		}
3673 		KASSERT((origpte & ATTR_DESCR_VALID) == 0 ||
3674 		    ((origpte & ATTR_DESCR_MASK) == L2_BLOCK &&
3675 		     (origpte & ~ATTR_MASK) == (newpte & ~ATTR_MASK)),
3676 		    ("va %#lx changing 2M phys page l2 %#lx newpte %#lx",
3677 		    va, origpte, newpte));
3678 		pmap_store(l2p, newpte);
3679 	}
3680 	dsb(ishst);
3681 
3682 	if ((origpte & ATTR_DESCR_VALID) == 0)
3683 		pmap_resident_count_inc(pmap, pagesizes[psind] / PAGE_SIZE);
3684 	if ((newpte & ATTR_SW_WIRED) != 0 && (origpte & ATTR_SW_WIRED) == 0)
3685 		pmap->pm_stats.wired_count += pagesizes[psind] / PAGE_SIZE;
3686 	else if ((newpte & ATTR_SW_WIRED) == 0 &&
3687 	    (origpte & ATTR_SW_WIRED) != 0)
3688 		pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE;
3689 
3690 	return (KERN_SUCCESS);
3691 }
3692 
3693 /*
3694  *	Insert the given physical page (p) at
3695  *	the specified virtual address (v) in the
3696  *	target physical map with the protection requested.
3697  *
3698  *	If specified, the page will be wired down, meaning
3699  *	that the related pte can not be reclaimed.
3700  *
3701  *	NB:  This is the only routine which MAY NOT lazy-evaluate
3702  *	or lose information.  That is, this routine must actually
3703  *	insert this page into the given map NOW.
3704  */
3705 int
3706 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3707     u_int flags, int8_t psind)
3708 {
3709 	struct rwlock *lock;
3710 	pd_entry_t *pde;
3711 	pt_entry_t new_l3, orig_l3;
3712 	pt_entry_t *l2, *l3;
3713 	pv_entry_t pv;
3714 	vm_paddr_t opa, pa;
3715 	vm_page_t mpte, om;
3716 	boolean_t nosleep;
3717 	int lvl, rv;
3718 
3719 	KASSERT(ADDR_IS_CANONICAL(va),
3720 	    ("%s: Address not in canonical form: %lx", __func__, va));
3721 
3722 	va = trunc_page(va);
3723 	if ((m->oflags & VPO_UNMANAGED) == 0)
3724 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
3725 	pa = VM_PAGE_TO_PHYS(m);
3726 	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | L3_PAGE);
3727 	new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
3728 	new_l3 |= pmap_pte_prot(pmap, prot);
3729 
3730 	if ((flags & PMAP_ENTER_WIRED) != 0)
3731 		new_l3 |= ATTR_SW_WIRED;
3732 	if (pmap->pm_stage == PM_STAGE1) {
3733 		if (!ADDR_IS_KERNEL(va))
3734 			new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
3735 		else
3736 			new_l3 |= ATTR_S1_UXN;
3737 		if (pmap != kernel_pmap)
3738 			new_l3 |= ATTR_S1_nG;
3739 	} else {
3740 		/*
3741 		 * Clear the access flag on executable mappings, this will be
3742 		 * set later when the page is accessed. The fault handler is
3743 		 * required to invalidate the I-cache.
3744 		 *
3745 		 * TODO: Switch to the valid flag to allow hardware management
3746 		 * of the access flag. Much of the pmap code assumes the
3747 		 * valid flag is set and fails to destroy the old page tables
3748 		 * correctly if it is clear.
3749 		 */
3750 		if (prot & VM_PROT_EXECUTE)
3751 			new_l3 &= ~ATTR_AF;
3752 	}
3753 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3754 		new_l3 |= ATTR_SW_MANAGED;
3755 		if ((prot & VM_PROT_WRITE) != 0) {
3756 			new_l3 |= ATTR_SW_DBM;
3757 			if ((flags & VM_PROT_WRITE) == 0) {
3758 				if (pmap->pm_stage == PM_STAGE1)
3759 					new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
3760 				else
3761 					new_l3 &=
3762 					    ~ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
3763 			}
3764 		}
3765 	}
3766 
3767 	CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
3768 
3769 	lock = NULL;
3770 	PMAP_LOCK(pmap);
3771 	if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
3772 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
3773 		    ("managed largepage va %#lx flags %#x", va, flags));
3774 		new_l3 &= ~L3_PAGE;
3775 		if (psind == 2)
3776 			new_l3 |= L1_BLOCK;
3777 		else /* (psind == 1) */
3778 			new_l3 |= L2_BLOCK;
3779 		rv = pmap_enter_largepage(pmap, va, new_l3, flags, psind);
3780 		goto out;
3781 	}
3782 	if (psind == 1) {
3783 		/* Assert the required virtual and physical alignment. */
3784 		KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
3785 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
3786 		rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
3787 		    flags, m, &lock);
3788 		goto out;
3789 	}
3790 	mpte = NULL;
3791 
3792 	/*
3793 	 * In the case that a page table page is not
3794 	 * resident, we are creating it here.
3795 	 */
3796 retry:
3797 	pde = pmap_pde(pmap, va, &lvl);
3798 	if (pde != NULL && lvl == 2) {
3799 		l3 = pmap_l2_to_l3(pde, va);
3800 		if (!ADDR_IS_KERNEL(va) && mpte == NULL) {
3801 			mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3802 			mpte->ref_count++;
3803 		}
3804 		goto havel3;
3805 	} else if (pde != NULL && lvl == 1) {
3806 		l2 = pmap_l1_to_l2(pde, va);
3807 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
3808 		    (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
3809 			l3 = &l3[pmap_l3_index(va)];
3810 			if (!ADDR_IS_KERNEL(va)) {
3811 				mpte = PHYS_TO_VM_PAGE(
3812 				    pmap_load(l2) & ~ATTR_MASK);
3813 				mpte->ref_count++;
3814 			}
3815 			goto havel3;
3816 		}
3817 		/* We need to allocate an L3 table. */
3818 	}
3819 	if (!ADDR_IS_KERNEL(va)) {
3820 		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
3821 
3822 		/*
3823 		 * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
3824 		 * to handle the possibility that a superpage mapping for "va"
3825 		 * was created while we slept.
3826 		 */
3827 		mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
3828 		    nosleep ? NULL : &lock);
3829 		if (mpte == NULL && nosleep) {
3830 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
3831 			rv = KERN_RESOURCE_SHORTAGE;
3832 			goto out;
3833 		}
3834 		goto retry;
3835 	} else
3836 		panic("pmap_enter: missing L3 table for kernel va %#lx", va);
3837 
3838 havel3:
3839 	orig_l3 = pmap_load(l3);
3840 	opa = orig_l3 & ~ATTR_MASK;
3841 	pv = NULL;
3842 
3843 	/*
3844 	 * Is the specified virtual address already mapped?
3845 	 */
3846 	if (pmap_l3_valid(orig_l3)) {
3847 		/*
3848 		 * Only allow adding new entries on stage 2 tables for now.
3849 		 * This simplifies cache invalidation as we may need to call
3850 		 * into EL2 to perform such actions.
3851 		 */
3852 		PMAP_ASSERT_STAGE1(pmap);
3853 		/*
3854 		 * Wiring change, just update stats. We don't worry about
3855 		 * wiring PT pages as they remain resident as long as there
3856 		 * are valid mappings in them. Hence, if a user page is wired,
3857 		 * the PT page will be also.
3858 		 */
3859 		if ((flags & PMAP_ENTER_WIRED) != 0 &&
3860 		    (orig_l3 & ATTR_SW_WIRED) == 0)
3861 			pmap->pm_stats.wired_count++;
3862 		else if ((flags & PMAP_ENTER_WIRED) == 0 &&
3863 		    (orig_l3 & ATTR_SW_WIRED) != 0)
3864 			pmap->pm_stats.wired_count--;
3865 
3866 		/*
3867 		 * Remove the extra PT page reference.
3868 		 */
3869 		if (mpte != NULL) {
3870 			mpte->ref_count--;
3871 			KASSERT(mpte->ref_count > 0,
3872 			    ("pmap_enter: missing reference to page table page,"
3873 			     " va: 0x%lx", va));
3874 		}
3875 
3876 		/*
3877 		 * Has the physical page changed?
3878 		 */
3879 		if (opa == pa) {
3880 			/*
3881 			 * No, might be a protection or wiring change.
3882 			 */
3883 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3884 			    (new_l3 & ATTR_SW_DBM) != 0)
3885 				vm_page_aflag_set(m, PGA_WRITEABLE);
3886 			goto validate;
3887 		}
3888 
3889 		/*
3890 		 * The physical page has changed.  Temporarily invalidate
3891 		 * the mapping.
3892 		 */
3893 		orig_l3 = pmap_load_clear(l3);
3894 		KASSERT((orig_l3 & ~ATTR_MASK) == opa,
3895 		    ("pmap_enter: unexpected pa update for %#lx", va));
3896 		if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
3897 			om = PHYS_TO_VM_PAGE(opa);
3898 
3899 			/*
3900 			 * The pmap lock is sufficient to synchronize with
3901 			 * concurrent calls to pmap_page_test_mappings() and
3902 			 * pmap_ts_referenced().
3903 			 */
3904 			if (pmap_pte_dirty(pmap, orig_l3))
3905 				vm_page_dirty(om);
3906 			if ((orig_l3 & ATTR_AF) != 0) {
3907 				pmap_invalidate_page(pmap, va);
3908 				vm_page_aflag_set(om, PGA_REFERENCED);
3909 			}
3910 			CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3911 			pv = pmap_pvh_remove(&om->md, pmap, va);
3912 			if ((m->oflags & VPO_UNMANAGED) != 0)
3913 				free_pv_entry(pmap, pv);
3914 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
3915 			    TAILQ_EMPTY(&om->md.pv_list) &&
3916 			    ((om->flags & PG_FICTITIOUS) != 0 ||
3917 			    TAILQ_EMPTY(&page_to_pvh(om)->pv_list)))
3918 				vm_page_aflag_clear(om, PGA_WRITEABLE);
3919 		} else {
3920 			KASSERT((orig_l3 & ATTR_AF) != 0,
3921 			    ("pmap_enter: unmanaged mapping lacks ATTR_AF"));
3922 			pmap_invalidate_page(pmap, va);
3923 		}
3924 		orig_l3 = 0;
3925 	} else {
3926 		/*
3927 		 * Increment the counters.
3928 		 */
3929 		if ((new_l3 & ATTR_SW_WIRED) != 0)
3930 			pmap->pm_stats.wired_count++;
3931 		pmap_resident_count_inc(pmap, 1);
3932 	}
3933 	/*
3934 	 * Enter on the PV list if part of our managed memory.
3935 	 */
3936 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3937 		if (pv == NULL) {
3938 			pv = get_pv_entry(pmap, &lock);
3939 			pv->pv_va = va;
3940 		}
3941 		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3942 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3943 		m->md.pv_gen++;
3944 		if ((new_l3 & ATTR_SW_DBM) != 0)
3945 			vm_page_aflag_set(m, PGA_WRITEABLE);
3946 	}
3947 
3948 validate:
3949 	if (pmap->pm_stage == PM_STAGE1) {
3950 		/*
3951 		 * Sync icache if exec permission and attribute
3952 		 * VM_MEMATTR_WRITE_BACK is set. Do it now, before the mapping
3953 		 * is stored and made valid for hardware table walk. If done
3954 		 * later, then other can access this page before caches are
3955 		 * properly synced. Don't do it for kernel memory which is
3956 		 * mapped with exec permission even if the memory isn't going
3957 		 * to hold executable code. The only time when icache sync is
3958 		 * needed is after kernel module is loaded and the relocation
3959 		 * info is processed. And it's done in elf_cpu_load_file().
3960 		*/
3961 		if ((prot & VM_PROT_EXECUTE) &&  pmap != kernel_pmap &&
3962 		    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
3963 		    (opa != pa || (orig_l3 & ATTR_S1_XN))) {
3964 			PMAP_ASSERT_STAGE1(pmap);
3965 			cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3966 		}
3967 	} else {
3968 		cpu_dcache_wb_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3969 	}
3970 
3971 	/*
3972 	 * Update the L3 entry
3973 	 */
3974 	if (pmap_l3_valid(orig_l3)) {
3975 		PMAP_ASSERT_STAGE1(pmap);
3976 		KASSERT(opa == pa, ("pmap_enter: invalid update"));
3977 		if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
3978 			/* same PA, different attributes */
3979 			orig_l3 = pmap_load_store(l3, new_l3);
3980 			pmap_invalidate_page(pmap, va);
3981 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3982 			    pmap_pte_dirty(pmap, orig_l3))
3983 				vm_page_dirty(m);
3984 		} else {
3985 			/*
3986 			 * orig_l3 == new_l3
3987 			 * This can happens if multiple threads simultaneously
3988 			 * access not yet mapped page. This bad for performance
3989 			 * since this can cause full demotion-NOP-promotion
3990 			 * cycle.
3991 			 * Another possible reasons are:
3992 			 * - VM and pmap memory layout are diverged
3993 			 * - tlb flush is missing somewhere and CPU doesn't see
3994 			 *   actual mapping.
3995 			 */
3996 			CTR4(KTR_PMAP, "%s: already mapped page - "
3997 			    "pmap %p va 0x%#lx pte 0x%lx",
3998 			    __func__, pmap, va, new_l3);
3999 		}
4000 	} else {
4001 		/* New mapping */
4002 		pmap_store(l3, new_l3);
4003 		dsb(ishst);
4004 	}
4005 
4006 #if VM_NRESERVLEVEL > 0
4007 	/*
4008 	 * Try to promote from level 3 pages to a level 2 superpage. This
4009 	 * currently only works on stage 1 pmaps as pmap_promote_l2 looks at
4010 	 * stage 1 specific fields and performs a break-before-make sequence
4011 	 * that is incorrect a stage 2 pmap.
4012 	 */
4013 	if ((mpte == NULL || mpte->ref_count == NL3PG) &&
4014 	    pmap_ps_enabled(pmap) && pmap->pm_stage == PM_STAGE1 &&
4015 	    (m->flags & PG_FICTITIOUS) == 0 &&
4016 	    vm_reserv_level_iffullpop(m) == 0) {
4017 		pmap_promote_l2(pmap, pde, va, &lock);
4018 	}
4019 #endif
4020 
4021 	rv = KERN_SUCCESS;
4022 out:
4023 	if (lock != NULL)
4024 		rw_wunlock(lock);
4025 	PMAP_UNLOCK(pmap);
4026 	return (rv);
4027 }
4028 
4029 /*
4030  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
4031  * if successful.  Returns false if (1) a page table page cannot be allocated
4032  * without sleeping, (2) a mapping already exists at the specified virtual
4033  * address, or (3) a PV entry cannot be allocated without reclaiming another
4034  * PV entry.
4035  */
4036 static bool
4037 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
4038     struct rwlock **lockp)
4039 {
4040 	pd_entry_t new_l2;
4041 
4042 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4043 	PMAP_ASSERT_STAGE1(pmap);
4044 	KASSERT(ADDR_IS_CANONICAL(va),
4045 	    ("%s: Address not in canonical form: %lx", __func__, va));
4046 
4047 	new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
4048 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
4049 	    L2_BLOCK);
4050 	if ((m->oflags & VPO_UNMANAGED) == 0) {
4051 		new_l2 |= ATTR_SW_MANAGED;
4052 		new_l2 &= ~ATTR_AF;
4053 	}
4054 	if ((prot & VM_PROT_EXECUTE) == 0 ||
4055 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
4056 		new_l2 |= ATTR_S1_XN;
4057 	if (!ADDR_IS_KERNEL(va))
4058 		new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
4059 	else
4060 		new_l2 |= ATTR_S1_UXN;
4061 	if (pmap != kernel_pmap)
4062 		new_l2 |= ATTR_S1_nG;
4063 	return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
4064 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m, lockp) ==
4065 	    KERN_SUCCESS);
4066 }
4067 
4068 /*
4069  * Returns true if every page table entry in the specified page table is
4070  * zero.
4071  */
4072 static bool
4073 pmap_every_pte_zero(vm_paddr_t pa)
4074 {
4075 	pt_entry_t *pt_end, *pte;
4076 
4077 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
4078 	pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
4079 	for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) {
4080 		if (*pte != 0)
4081 			return (false);
4082 	}
4083 	return (true);
4084 }
4085 
4086 /*
4087  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
4088  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
4089  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
4090  * a mapping already exists at the specified virtual address.  Returns
4091  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
4092  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
4093  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
4094  */
4095 static int
4096 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
4097     vm_page_t m, struct rwlock **lockp)
4098 {
4099 	struct spglist free;
4100 	pd_entry_t *l2, old_l2;
4101 	vm_page_t l2pg, mt;
4102 
4103 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4104 	KASSERT(ADDR_IS_CANONICAL(va),
4105 	    ("%s: Address not in canonical form: %lx", __func__, va));
4106 
4107 	if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags &
4108 	    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
4109 		CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
4110 		    va, pmap);
4111 		return (KERN_RESOURCE_SHORTAGE);
4112 	}
4113 
4114 	/*
4115 	 * If there are existing mappings, either abort or remove them.
4116 	 */
4117 	if ((old_l2 = pmap_load(l2)) != 0) {
4118 		KASSERT(l2pg == NULL || l2pg->ref_count > 1,
4119 		    ("pmap_enter_l2: l2pg's ref count is too low"));
4120 		if ((flags & PMAP_ENTER_NOREPLACE) != 0 &&
4121 		    (!ADDR_IS_KERNEL(va) ||
4122 		    (old_l2 & ATTR_DESCR_MASK) == L2_BLOCK ||
4123 		    !pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) {
4124 			if (l2pg != NULL)
4125 				l2pg->ref_count--;
4126 			CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx"
4127 			    " in pmap %p", va, pmap);
4128 			return (KERN_FAILURE);
4129 		}
4130 		SLIST_INIT(&free);
4131 		if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
4132 			(void)pmap_remove_l2(pmap, l2, va,
4133 			    pmap_load(pmap_l1(pmap, va)), &free, lockp);
4134 		else
4135 			pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
4136 			    &free, lockp);
4137 		if (!ADDR_IS_KERNEL(va)) {
4138 			vm_page_free_pages_toq(&free, true);
4139 			KASSERT(pmap_load(l2) == 0,
4140 			    ("pmap_enter_l2: non-zero L2 entry %p", l2));
4141 		} else {
4142 			KASSERT(SLIST_EMPTY(&free),
4143 			    ("pmap_enter_l2: freed kernel page table page"));
4144 
4145 			/*
4146 			 * Both pmap_remove_l2() and pmap_remove_l3_range()
4147 			 * will leave the kernel page table page zero filled.
4148 			 * Nonetheless, the TLB could have an intermediate
4149 			 * entry for the kernel page table page.
4150 			 */
4151 			mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
4152 			if (pmap_insert_pt_page(pmap, mt, false))
4153 				panic("pmap_enter_l2: trie insert failed");
4154 			pmap_clear(l2);
4155 			pmap_invalidate_page(pmap, va);
4156 		}
4157 	}
4158 
4159 	if ((new_l2 & ATTR_SW_MANAGED) != 0) {
4160 		/*
4161 		 * Abort this mapping if its PV entry could not be created.
4162 		 */
4163 		if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
4164 			if (l2pg != NULL)
4165 				pmap_abort_ptp(pmap, va, l2pg);
4166 			CTR2(KTR_PMAP,
4167 			    "pmap_enter_l2: failure for va %#lx in pmap %p",
4168 			    va, pmap);
4169 			return (KERN_RESOURCE_SHORTAGE);
4170 		}
4171 		if ((new_l2 & ATTR_SW_DBM) != 0)
4172 			for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4173 				vm_page_aflag_set(mt, PGA_WRITEABLE);
4174 	}
4175 
4176 	/*
4177 	 * Increment counters.
4178 	 */
4179 	if ((new_l2 & ATTR_SW_WIRED) != 0)
4180 		pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
4181 	pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
4182 
4183 	/*
4184 	 * Conditionally sync the icache.  See pmap_enter() for details.
4185 	 */
4186 	if ((new_l2 & ATTR_S1_XN) == 0 && ((new_l2 & ~ATTR_MASK) !=
4187 	    (old_l2 & ~ATTR_MASK) || (old_l2 & ATTR_S1_XN) != 0) &&
4188 	    pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) {
4189 		cpu_icache_sync_range(PHYS_TO_DMAP(new_l2 & ~ATTR_MASK),
4190 		    L2_SIZE);
4191 	}
4192 
4193 	/*
4194 	 * Map the superpage.
4195 	 */
4196 	pmap_store(l2, new_l2);
4197 	dsb(ishst);
4198 
4199 	atomic_add_long(&pmap_l2_mappings, 1);
4200 	CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
4201 	    va, pmap);
4202 
4203 	return (KERN_SUCCESS);
4204 }
4205 
4206 /*
4207  * Maps a sequence of resident pages belonging to the same object.
4208  * The sequence begins with the given page m_start.  This page is
4209  * mapped at the given virtual address start.  Each subsequent page is
4210  * mapped at a virtual address that is offset from start by the same
4211  * amount as the page is offset from m_start within the object.  The
4212  * last page in the sequence is the page with the largest offset from
4213  * m_start that can be mapped at a virtual address less than the given
4214  * virtual address end.  Not every virtual page between start and end
4215  * is mapped; only those for which a resident page exists with the
4216  * corresponding offset from m_start are mapped.
4217  */
4218 void
4219 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
4220     vm_page_t m_start, vm_prot_t prot)
4221 {
4222 	struct rwlock *lock;
4223 	vm_offset_t va;
4224 	vm_page_t m, mpte;
4225 	vm_pindex_t diff, psize;
4226 
4227 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
4228 
4229 	psize = atop(end - start);
4230 	mpte = NULL;
4231 	m = m_start;
4232 	lock = NULL;
4233 	PMAP_LOCK(pmap);
4234 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
4235 		va = start + ptoa(diff);
4236 		if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
4237 		    m->psind == 1 && pmap_ps_enabled(pmap) &&
4238 		    pmap_enter_2mpage(pmap, va, m, prot, &lock))
4239 			m = &m[L2_SIZE / PAGE_SIZE - 1];
4240 		else
4241 			mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
4242 			    &lock);
4243 		m = TAILQ_NEXT(m, listq);
4244 	}
4245 	if (lock != NULL)
4246 		rw_wunlock(lock);
4247 	PMAP_UNLOCK(pmap);
4248 }
4249 
4250 /*
4251  * this code makes some *MAJOR* assumptions:
4252  * 1. Current pmap & pmap exists.
4253  * 2. Not wired.
4254  * 3. Read access.
4255  * 4. No page table pages.
4256  * but is *MUCH* faster than pmap_enter...
4257  */
4258 
4259 void
4260 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
4261 {
4262 	struct rwlock *lock;
4263 
4264 	lock = NULL;
4265 	PMAP_LOCK(pmap);
4266 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
4267 	if (lock != NULL)
4268 		rw_wunlock(lock);
4269 	PMAP_UNLOCK(pmap);
4270 }
4271 
4272 static vm_page_t
4273 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
4274     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
4275 {
4276 	pd_entry_t *pde;
4277 	pt_entry_t *l2, *l3, l3_val;
4278 	vm_paddr_t pa;
4279 	int lvl;
4280 
4281 	KASSERT(!VA_IS_CLEANMAP(va) ||
4282 	    (m->oflags & VPO_UNMANAGED) != 0,
4283 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
4284 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4285 	PMAP_ASSERT_STAGE1(pmap);
4286 	KASSERT(ADDR_IS_CANONICAL(va),
4287 	    ("%s: Address not in canonical form: %lx", __func__, va));
4288 
4289 	CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
4290 	/*
4291 	 * In the case that a page table page is not
4292 	 * resident, we are creating it here.
4293 	 */
4294 	if (!ADDR_IS_KERNEL(va)) {
4295 		vm_pindex_t l2pindex;
4296 
4297 		/*
4298 		 * Calculate pagetable page index
4299 		 */
4300 		l2pindex = pmap_l2_pindex(va);
4301 		if (mpte && (mpte->pindex == l2pindex)) {
4302 			mpte->ref_count++;
4303 		} else {
4304 			/*
4305 			 * Get the l2 entry
4306 			 */
4307 			pde = pmap_pde(pmap, va, &lvl);
4308 
4309 			/*
4310 			 * If the page table page is mapped, we just increment
4311 			 * the hold count, and activate it.  Otherwise, we
4312 			 * attempt to allocate a page table page.  If this
4313 			 * attempt fails, we don't retry.  Instead, we give up.
4314 			 */
4315 			if (lvl == 1) {
4316 				l2 = pmap_l1_to_l2(pde, va);
4317 				if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
4318 				    L2_BLOCK)
4319 					return (NULL);
4320 			}
4321 			if (lvl == 2 && pmap_load(pde) != 0) {
4322 				mpte =
4323 				    PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
4324 				mpte->ref_count++;
4325 			} else {
4326 				/*
4327 				 * Pass NULL instead of the PV list lock
4328 				 * pointer, because we don't intend to sleep.
4329 				 */
4330 				mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
4331 				if (mpte == NULL)
4332 					return (mpte);
4333 			}
4334 		}
4335 		l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
4336 		l3 = &l3[pmap_l3_index(va)];
4337 	} else {
4338 		mpte = NULL;
4339 		pde = pmap_pde(kernel_pmap, va, &lvl);
4340 		KASSERT(pde != NULL,
4341 		    ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
4342 		     va));
4343 		KASSERT(lvl == 2,
4344 		    ("pmap_enter_quick_locked: Invalid level %d", lvl));
4345 		l3 = pmap_l2_to_l3(pde, va);
4346 	}
4347 
4348 	/*
4349 	 * Abort if a mapping already exists.
4350 	 */
4351 	if (pmap_load(l3) != 0) {
4352 		if (mpte != NULL)
4353 			mpte->ref_count--;
4354 		return (NULL);
4355 	}
4356 
4357 	/*
4358 	 * Enter on the PV list if part of our managed memory.
4359 	 */
4360 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
4361 	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
4362 		if (mpte != NULL)
4363 			pmap_abort_ptp(pmap, va, mpte);
4364 		return (NULL);
4365 	}
4366 
4367 	/*
4368 	 * Increment counters
4369 	 */
4370 	pmap_resident_count_inc(pmap, 1);
4371 
4372 	pa = VM_PAGE_TO_PHYS(m);
4373 	l3_val = pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
4374 	    ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
4375 	if ((prot & VM_PROT_EXECUTE) == 0 ||
4376 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
4377 		l3_val |= ATTR_S1_XN;
4378 	if (!ADDR_IS_KERNEL(va))
4379 		l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
4380 	else
4381 		l3_val |= ATTR_S1_UXN;
4382 	if (pmap != kernel_pmap)
4383 		l3_val |= ATTR_S1_nG;
4384 
4385 	/*
4386 	 * Now validate mapping with RO protection
4387 	 */
4388 	if ((m->oflags & VPO_UNMANAGED) == 0) {
4389 		l3_val |= ATTR_SW_MANAGED;
4390 		l3_val &= ~ATTR_AF;
4391 	}
4392 
4393 	/* Sync icache before the mapping is stored to PTE */
4394 	if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
4395 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
4396 		cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
4397 
4398 	pmap_store(l3, l3_val);
4399 	dsb(ishst);
4400 
4401 	return (mpte);
4402 }
4403 
4404 /*
4405  * This code maps large physical mmap regions into the
4406  * processor address space.  Note that some shortcuts
4407  * are taken, but the code works.
4408  */
4409 void
4410 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
4411     vm_pindex_t pindex, vm_size_t size)
4412 {
4413 
4414 	VM_OBJECT_ASSERT_WLOCKED(object);
4415 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4416 	    ("pmap_object_init_pt: non-device object"));
4417 }
4418 
4419 /*
4420  *	Clear the wired attribute from the mappings for the specified range of
4421  *	addresses in the given pmap.  Every valid mapping within that range
4422  *	must have the wired attribute set.  In contrast, invalid mappings
4423  *	cannot have the wired attribute set, so they are ignored.
4424  *
4425  *	The wired attribute of the page table entry is not a hardware feature,
4426  *	so there is no need to invalidate any TLB entries.
4427  */
4428 void
4429 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4430 {
4431 	vm_offset_t va_next;
4432 	pd_entry_t *l0, *l1, *l2;
4433 	pt_entry_t *l3;
4434 
4435 	PMAP_LOCK(pmap);
4436 	for (; sva < eva; sva = va_next) {
4437 		l0 = pmap_l0(pmap, sva);
4438 		if (pmap_load(l0) == 0) {
4439 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
4440 			if (va_next < sva)
4441 				va_next = eva;
4442 			continue;
4443 		}
4444 
4445 		l1 = pmap_l0_to_l1(l0, sva);
4446 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
4447 		if (va_next < sva)
4448 			va_next = eva;
4449 		if (pmap_load(l1) == 0)
4450 			continue;
4451 
4452 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
4453 			KASSERT(va_next <= eva,
4454 			    ("partial update of non-transparent 1G page "
4455 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
4456 			    pmap_load(l1), sva, eva, va_next));
4457 			MPASS(pmap != kernel_pmap);
4458 			MPASS((pmap_load(l1) & (ATTR_SW_MANAGED |
4459 			    ATTR_SW_WIRED)) == ATTR_SW_WIRED);
4460 			pmap_clear_bits(l1, ATTR_SW_WIRED);
4461 			pmap->pm_stats.wired_count -= L1_SIZE / PAGE_SIZE;
4462 			continue;
4463 		}
4464 
4465 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
4466 		if (va_next < sva)
4467 			va_next = eva;
4468 
4469 		l2 = pmap_l1_to_l2(l1, sva);
4470 		if (pmap_load(l2) == 0)
4471 			continue;
4472 
4473 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
4474 			if ((pmap_load(l2) & ATTR_SW_WIRED) == 0)
4475 				panic("pmap_unwire: l2 %#jx is missing "
4476 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2));
4477 
4478 			/*
4479 			 * Are we unwiring the entire large page?  If not,
4480 			 * demote the mapping and fall through.
4481 			 */
4482 			if (sva + L2_SIZE == va_next && eva >= va_next) {
4483 				pmap_clear_bits(l2, ATTR_SW_WIRED);
4484 				pmap->pm_stats.wired_count -= L2_SIZE /
4485 				    PAGE_SIZE;
4486 				continue;
4487 			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
4488 				panic("pmap_unwire: demotion failed");
4489 		}
4490 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
4491 		    ("pmap_unwire: Invalid l2 entry after demotion"));
4492 
4493 		if (va_next > eva)
4494 			va_next = eva;
4495 		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
4496 		    sva += L3_SIZE) {
4497 			if (pmap_load(l3) == 0)
4498 				continue;
4499 			if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
4500 				panic("pmap_unwire: l3 %#jx is missing "
4501 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
4502 
4503 			/*
4504 			 * ATTR_SW_WIRED must be cleared atomically.  Although
4505 			 * the pmap lock synchronizes access to ATTR_SW_WIRED,
4506 			 * the System MMU may write to the entry concurrently.
4507 			 */
4508 			pmap_clear_bits(l3, ATTR_SW_WIRED);
4509 			pmap->pm_stats.wired_count--;
4510 		}
4511 	}
4512 	PMAP_UNLOCK(pmap);
4513 }
4514 
4515 /*
4516  *	Copy the range specified by src_addr/len
4517  *	from the source map to the range dst_addr/len
4518  *	in the destination map.
4519  *
4520  *	This routine is only advisory and need not do anything.
4521  *
4522  *	Because the executable mappings created by this routine are copied,
4523  *	it should not have to flush the instruction cache.
4524  */
4525 void
4526 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
4527     vm_offset_t src_addr)
4528 {
4529 	struct rwlock *lock;
4530 	pd_entry_t *l0, *l1, *l2, srcptepaddr;
4531 	pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
4532 	vm_offset_t addr, end_addr, va_next;
4533 	vm_page_t dst_m, dstmpte, srcmpte;
4534 
4535 	PMAP_ASSERT_STAGE1(dst_pmap);
4536 	PMAP_ASSERT_STAGE1(src_pmap);
4537 
4538 	if (dst_addr != src_addr)
4539 		return;
4540 	end_addr = src_addr + len;
4541 	lock = NULL;
4542 	if (dst_pmap < src_pmap) {
4543 		PMAP_LOCK(dst_pmap);
4544 		PMAP_LOCK(src_pmap);
4545 	} else {
4546 		PMAP_LOCK(src_pmap);
4547 		PMAP_LOCK(dst_pmap);
4548 	}
4549 	for (addr = src_addr; addr < end_addr; addr = va_next) {
4550 		l0 = pmap_l0(src_pmap, addr);
4551 		if (pmap_load(l0) == 0) {
4552 			va_next = (addr + L0_SIZE) & ~L0_OFFSET;
4553 			if (va_next < addr)
4554 				va_next = end_addr;
4555 			continue;
4556 		}
4557 
4558 		va_next = (addr + L1_SIZE) & ~L1_OFFSET;
4559 		if (va_next < addr)
4560 			va_next = end_addr;
4561 		l1 = pmap_l0_to_l1(l0, addr);
4562 		if (pmap_load(l1) == 0)
4563 			continue;
4564 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
4565 			KASSERT(va_next <= end_addr,
4566 			    ("partial update of non-transparent 1G page "
4567 			    "l1 %#lx addr %#lx end_addr %#lx va_next %#lx",
4568 			    pmap_load(l1), addr, end_addr, va_next));
4569 			srcptepaddr = pmap_load(l1);
4570 			l1 = pmap_l1(dst_pmap, addr);
4571 			if (l1 == NULL) {
4572 				if (_pmap_alloc_l3(dst_pmap,
4573 				    pmap_l0_pindex(addr), NULL) == NULL)
4574 					break;
4575 				l1 = pmap_l1(dst_pmap, addr);
4576 			} else {
4577 				l0 = pmap_l0(dst_pmap, addr);
4578 				dst_m = PHYS_TO_VM_PAGE(pmap_load(l0) &
4579 				    ~ATTR_MASK);
4580 				dst_m->ref_count++;
4581 			}
4582 			KASSERT(pmap_load(l1) == 0,
4583 			    ("1G mapping present in dst pmap "
4584 			    "l1 %#lx addr %#lx end_addr %#lx va_next %#lx",
4585 			    pmap_load(l1), addr, end_addr, va_next));
4586 			pmap_store(l1, srcptepaddr & ~ATTR_SW_WIRED);
4587 			pmap_resident_count_inc(dst_pmap, L1_SIZE / PAGE_SIZE);
4588 			continue;
4589 		}
4590 
4591 		va_next = (addr + L2_SIZE) & ~L2_OFFSET;
4592 		if (va_next < addr)
4593 			va_next = end_addr;
4594 		l2 = pmap_l1_to_l2(l1, addr);
4595 		srcptepaddr = pmap_load(l2);
4596 		if (srcptepaddr == 0)
4597 			continue;
4598 		if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
4599 			/*
4600 			 * We can only virtual copy whole superpages.
4601 			 */
4602 			if ((addr & L2_OFFSET) != 0 ||
4603 			    addr + L2_SIZE > end_addr)
4604 				continue;
4605 			l2 = pmap_alloc_l2(dst_pmap, addr, &dst_m, NULL);
4606 			if (l2 == NULL)
4607 				break;
4608 			if (pmap_load(l2) == 0 &&
4609 			    ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
4610 			    pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
4611 			    PMAP_ENTER_NORECLAIM, &lock))) {
4612 				/*
4613 				 * We leave the dirty bit unchanged because
4614 				 * managed read/write superpage mappings are
4615 				 * required to be dirty.  However, managed
4616 				 * superpage mappings are not required to
4617 				 * have their accessed bit set, so we clear
4618 				 * it because we don't know if this mapping
4619 				 * will be used.
4620 				 */
4621 				srcptepaddr &= ~ATTR_SW_WIRED;
4622 				if ((srcptepaddr & ATTR_SW_MANAGED) != 0)
4623 					srcptepaddr &= ~ATTR_AF;
4624 				pmap_store(l2, srcptepaddr);
4625 				pmap_resident_count_inc(dst_pmap, L2_SIZE /
4626 				    PAGE_SIZE);
4627 				atomic_add_long(&pmap_l2_mappings, 1);
4628 			} else
4629 				pmap_abort_ptp(dst_pmap, addr, dst_m);
4630 			continue;
4631 		}
4632 		KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
4633 		    ("pmap_copy: invalid L2 entry"));
4634 		srcptepaddr &= ~ATTR_MASK;
4635 		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
4636 		KASSERT(srcmpte->ref_count > 0,
4637 		    ("pmap_copy: source page table page is unused"));
4638 		if (va_next > end_addr)
4639 			va_next = end_addr;
4640 		src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
4641 		src_pte = &src_pte[pmap_l3_index(addr)];
4642 		dstmpte = NULL;
4643 		for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
4644 			ptetemp = pmap_load(src_pte);
4645 
4646 			/*
4647 			 * We only virtual copy managed pages.
4648 			 */
4649 			if ((ptetemp & ATTR_SW_MANAGED) == 0)
4650 				continue;
4651 
4652 			if (dstmpte != NULL) {
4653 				KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
4654 				    ("dstmpte pindex/addr mismatch"));
4655 				dstmpte->ref_count++;
4656 			} else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
4657 			    NULL)) == NULL)
4658 				goto out;
4659 			dst_pte = (pt_entry_t *)
4660 			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
4661 			dst_pte = &dst_pte[pmap_l3_index(addr)];
4662 			if (pmap_load(dst_pte) == 0 &&
4663 			    pmap_try_insert_pv_entry(dst_pmap, addr,
4664 			    PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) {
4665 				/*
4666 				 * Clear the wired, modified, and accessed
4667 				 * (referenced) bits during the copy.
4668 				 */
4669 				mask = ATTR_AF | ATTR_SW_WIRED;
4670 				nbits = 0;
4671 				if ((ptetemp & ATTR_SW_DBM) != 0)
4672 					nbits |= ATTR_S1_AP_RW_BIT;
4673 				pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
4674 				pmap_resident_count_inc(dst_pmap, 1);
4675 			} else {
4676 				pmap_abort_ptp(dst_pmap, addr, dstmpte);
4677 				goto out;
4678 			}
4679 			/* Have we copied all of the valid mappings? */
4680 			if (dstmpte->ref_count >= srcmpte->ref_count)
4681 				break;
4682 		}
4683 	}
4684 out:
4685 	/*
4686 	 * XXX This barrier may not be needed because the destination pmap is
4687 	 * not active.
4688 	 */
4689 	dsb(ishst);
4690 
4691 	if (lock != NULL)
4692 		rw_wunlock(lock);
4693 	PMAP_UNLOCK(src_pmap);
4694 	PMAP_UNLOCK(dst_pmap);
4695 }
4696 
4697 /*
4698  *	pmap_zero_page zeros the specified hardware page by mapping
4699  *	the page into KVM and using bzero to clear its contents.
4700  */
4701 void
4702 pmap_zero_page(vm_page_t m)
4703 {
4704 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4705 
4706 	pagezero((void *)va);
4707 }
4708 
4709 /*
4710  *	pmap_zero_page_area zeros the specified hardware page by mapping
4711  *	the page into KVM and using bzero to clear its contents.
4712  *
4713  *	off and size may not cover an area beyond a single hardware page.
4714  */
4715 void
4716 pmap_zero_page_area(vm_page_t m, int off, int size)
4717 {
4718 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4719 
4720 	if (off == 0 && size == PAGE_SIZE)
4721 		pagezero((void *)va);
4722 	else
4723 		bzero((char *)va + off, size);
4724 }
4725 
4726 /*
4727  *	pmap_copy_page copies the specified (machine independent)
4728  *	page by mapping the page into virtual memory and using
4729  *	bcopy to copy the page, one machine dependent page at a
4730  *	time.
4731  */
4732 void
4733 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
4734 {
4735 	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
4736 	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
4737 
4738 	pagecopy((void *)src, (void *)dst);
4739 }
4740 
4741 int unmapped_buf_allowed = 1;
4742 
4743 void
4744 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
4745     vm_offset_t b_offset, int xfersize)
4746 {
4747 	void *a_cp, *b_cp;
4748 	vm_page_t m_a, m_b;
4749 	vm_paddr_t p_a, p_b;
4750 	vm_offset_t a_pg_offset, b_pg_offset;
4751 	int cnt;
4752 
4753 	while (xfersize > 0) {
4754 		a_pg_offset = a_offset & PAGE_MASK;
4755 		m_a = ma[a_offset >> PAGE_SHIFT];
4756 		p_a = m_a->phys_addr;
4757 		b_pg_offset = b_offset & PAGE_MASK;
4758 		m_b = mb[b_offset >> PAGE_SHIFT];
4759 		p_b = m_b->phys_addr;
4760 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4761 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4762 		if (__predict_false(!PHYS_IN_DMAP(p_a))) {
4763 			panic("!DMAP a %lx", p_a);
4764 		} else {
4765 			a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
4766 		}
4767 		if (__predict_false(!PHYS_IN_DMAP(p_b))) {
4768 			panic("!DMAP b %lx", p_b);
4769 		} else {
4770 			b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
4771 		}
4772 		bcopy(a_cp, b_cp, cnt);
4773 		a_offset += cnt;
4774 		b_offset += cnt;
4775 		xfersize -= cnt;
4776 	}
4777 }
4778 
4779 vm_offset_t
4780 pmap_quick_enter_page(vm_page_t m)
4781 {
4782 
4783 	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
4784 }
4785 
4786 void
4787 pmap_quick_remove_page(vm_offset_t addr)
4788 {
4789 }
4790 
4791 /*
4792  * Returns true if the pmap's pv is one of the first
4793  * 16 pvs linked to from this page.  This count may
4794  * be changed upwards or downwards in the future; it
4795  * is only necessary that true be returned for a small
4796  * subset of pmaps for proper page aging.
4797  */
4798 boolean_t
4799 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4800 {
4801 	struct md_page *pvh;
4802 	struct rwlock *lock;
4803 	pv_entry_t pv;
4804 	int loops = 0;
4805 	boolean_t rv;
4806 
4807 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4808 	    ("pmap_page_exists_quick: page %p is not managed", m));
4809 	rv = FALSE;
4810 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4811 	rw_rlock(lock);
4812 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4813 		if (PV_PMAP(pv) == pmap) {
4814 			rv = TRUE;
4815 			break;
4816 		}
4817 		loops++;
4818 		if (loops >= 16)
4819 			break;
4820 	}
4821 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4822 		pvh = page_to_pvh(m);
4823 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4824 			if (PV_PMAP(pv) == pmap) {
4825 				rv = TRUE;
4826 				break;
4827 			}
4828 			loops++;
4829 			if (loops >= 16)
4830 				break;
4831 		}
4832 	}
4833 	rw_runlock(lock);
4834 	return (rv);
4835 }
4836 
4837 /*
4838  *	pmap_page_wired_mappings:
4839  *
4840  *	Return the number of managed mappings to the given physical page
4841  *	that are wired.
4842  */
4843 int
4844 pmap_page_wired_mappings(vm_page_t m)
4845 {
4846 	struct rwlock *lock;
4847 	struct md_page *pvh;
4848 	pmap_t pmap;
4849 	pt_entry_t *pte;
4850 	pv_entry_t pv;
4851 	int count, lvl, md_gen, pvh_gen;
4852 
4853 	if ((m->oflags & VPO_UNMANAGED) != 0)
4854 		return (0);
4855 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4856 	rw_rlock(lock);
4857 restart:
4858 	count = 0;
4859 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4860 		pmap = PV_PMAP(pv);
4861 		if (!PMAP_TRYLOCK(pmap)) {
4862 			md_gen = m->md.pv_gen;
4863 			rw_runlock(lock);
4864 			PMAP_LOCK(pmap);
4865 			rw_rlock(lock);
4866 			if (md_gen != m->md.pv_gen) {
4867 				PMAP_UNLOCK(pmap);
4868 				goto restart;
4869 			}
4870 		}
4871 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4872 		if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4873 			count++;
4874 		PMAP_UNLOCK(pmap);
4875 	}
4876 	if ((m->flags & PG_FICTITIOUS) == 0) {
4877 		pvh = page_to_pvh(m);
4878 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4879 			pmap = PV_PMAP(pv);
4880 			if (!PMAP_TRYLOCK(pmap)) {
4881 				md_gen = m->md.pv_gen;
4882 				pvh_gen = pvh->pv_gen;
4883 				rw_runlock(lock);
4884 				PMAP_LOCK(pmap);
4885 				rw_rlock(lock);
4886 				if (md_gen != m->md.pv_gen ||
4887 				    pvh_gen != pvh->pv_gen) {
4888 					PMAP_UNLOCK(pmap);
4889 					goto restart;
4890 				}
4891 			}
4892 			pte = pmap_pte(pmap, pv->pv_va, &lvl);
4893 			if (pte != NULL &&
4894 			    (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4895 				count++;
4896 			PMAP_UNLOCK(pmap);
4897 		}
4898 	}
4899 	rw_runlock(lock);
4900 	return (count);
4901 }
4902 
4903 /*
4904  * Returns true if the given page is mapped individually or as part of
4905  * a 2mpage.  Otherwise, returns false.
4906  */
4907 bool
4908 pmap_page_is_mapped(vm_page_t m)
4909 {
4910 	struct rwlock *lock;
4911 	bool rv;
4912 
4913 	if ((m->oflags & VPO_UNMANAGED) != 0)
4914 		return (false);
4915 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4916 	rw_rlock(lock);
4917 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
4918 	    ((m->flags & PG_FICTITIOUS) == 0 &&
4919 	    !TAILQ_EMPTY(&page_to_pvh(m)->pv_list));
4920 	rw_runlock(lock);
4921 	return (rv);
4922 }
4923 
4924 /*
4925  * Destroy all managed, non-wired mappings in the given user-space
4926  * pmap.  This pmap cannot be active on any processor besides the
4927  * caller.
4928  *
4929  * This function cannot be applied to the kernel pmap.  Moreover, it
4930  * is not intended for general use.  It is only to be used during
4931  * process termination.  Consequently, it can be implemented in ways
4932  * that make it faster than pmap_remove().  First, it can more quickly
4933  * destroy mappings by iterating over the pmap's collection of PV
4934  * entries, rather than searching the page table.  Second, it doesn't
4935  * have to test and clear the page table entries atomically, because
4936  * no processor is currently accessing the user address space.  In
4937  * particular, a page table entry's dirty bit won't change state once
4938  * this function starts.
4939  */
4940 void
4941 pmap_remove_pages(pmap_t pmap)
4942 {
4943 	pd_entry_t *pde;
4944 	pt_entry_t *pte, tpte;
4945 	struct spglist free;
4946 	vm_page_t m, ml3, mt;
4947 	pv_entry_t pv;
4948 	struct md_page *pvh;
4949 	struct pv_chunk *pc, *npc;
4950 	struct rwlock *lock;
4951 	int64_t bit;
4952 	uint64_t inuse, bitmask;
4953 	int allfree, field, freed, idx, lvl;
4954 	vm_paddr_t pa;
4955 
4956 	lock = NULL;
4957 
4958 	SLIST_INIT(&free);
4959 	PMAP_LOCK(pmap);
4960 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4961 		allfree = 1;
4962 		freed = 0;
4963 		for (field = 0; field < _NPCM; field++) {
4964 			inuse = ~pc->pc_map[field] & pc_freemask[field];
4965 			while (inuse != 0) {
4966 				bit = ffsl(inuse) - 1;
4967 				bitmask = 1UL << bit;
4968 				idx = field * 64 + bit;
4969 				pv = &pc->pc_pventry[idx];
4970 				inuse &= ~bitmask;
4971 
4972 				pde = pmap_pde(pmap, pv->pv_va, &lvl);
4973 				KASSERT(pde != NULL,
4974 				    ("Attempting to remove an unmapped page"));
4975 
4976 				switch(lvl) {
4977 				case 1:
4978 					pte = pmap_l1_to_l2(pde, pv->pv_va);
4979 					tpte = pmap_load(pte);
4980 					KASSERT((tpte & ATTR_DESCR_MASK) ==
4981 					    L2_BLOCK,
4982 					    ("Attempting to remove an invalid "
4983 					    "block: %lx", tpte));
4984 					break;
4985 				case 2:
4986 					pte = pmap_l2_to_l3(pde, pv->pv_va);
4987 					tpte = pmap_load(pte);
4988 					KASSERT((tpte & ATTR_DESCR_MASK) ==
4989 					    L3_PAGE,
4990 					    ("Attempting to remove an invalid "
4991 					     "page: %lx", tpte));
4992 					break;
4993 				default:
4994 					panic(
4995 					    "Invalid page directory level: %d",
4996 					    lvl);
4997 				}
4998 
4999 /*
5000  * We cannot remove wired pages from a process' mapping at this time
5001  */
5002 				if (tpte & ATTR_SW_WIRED) {
5003 					allfree = 0;
5004 					continue;
5005 				}
5006 
5007 				/* Mark free */
5008 				pc->pc_map[field] |= bitmask;
5009 
5010 				/*
5011 				 * Because this pmap is not active on other
5012 				 * processors, the dirty bit cannot have
5013 				 * changed state since we last loaded pte.
5014 				 */
5015 				pmap_clear(pte);
5016 
5017 				pa = tpte & ~ATTR_MASK;
5018 
5019 				m = PHYS_TO_VM_PAGE(pa);
5020 				KASSERT(m->phys_addr == pa,
5021 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5022 				    m, (uintmax_t)m->phys_addr,
5023 				    (uintmax_t)tpte));
5024 
5025 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5026 				    m < &vm_page_array[vm_page_array_size],
5027 				    ("pmap_remove_pages: bad pte %#jx",
5028 				    (uintmax_t)tpte));
5029 
5030 				/*
5031 				 * Update the vm_page_t clean/reference bits.
5032 				 */
5033 				if (pmap_pte_dirty(pmap, tpte)) {
5034 					switch (lvl) {
5035 					case 1:
5036 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
5037 							vm_page_dirty(mt);
5038 						break;
5039 					case 2:
5040 						vm_page_dirty(m);
5041 						break;
5042 					}
5043 				}
5044 
5045 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5046 
5047 				switch (lvl) {
5048 				case 1:
5049 					pmap_resident_count_dec(pmap,
5050 					    L2_SIZE / PAGE_SIZE);
5051 					pvh = page_to_pvh(m);
5052 					TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
5053 					pvh->pv_gen++;
5054 					if (TAILQ_EMPTY(&pvh->pv_list)) {
5055 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
5056 							if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
5057 							    TAILQ_EMPTY(&mt->md.pv_list))
5058 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
5059 					}
5060 					ml3 = pmap_remove_pt_page(pmap,
5061 					    pv->pv_va);
5062 					if (ml3 != NULL) {
5063 						KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
5064 						    ("pmap_remove_pages: l3 page not promoted"));
5065 						pmap_resident_count_dec(pmap,1);
5066 						KASSERT(ml3->ref_count == NL3PG,
5067 						    ("pmap_remove_pages: l3 page ref count error"));
5068 						ml3->ref_count = 0;
5069 						pmap_add_delayed_free_list(ml3,
5070 						    &free, FALSE);
5071 					}
5072 					break;
5073 				case 2:
5074 					pmap_resident_count_dec(pmap, 1);
5075 					TAILQ_REMOVE(&m->md.pv_list, pv,
5076 					    pv_next);
5077 					m->md.pv_gen++;
5078 					if ((m->a.flags & PGA_WRITEABLE) != 0 &&
5079 					    TAILQ_EMPTY(&m->md.pv_list) &&
5080 					    (m->flags & PG_FICTITIOUS) == 0) {
5081 						pvh = page_to_pvh(m);
5082 						if (TAILQ_EMPTY(&pvh->pv_list))
5083 							vm_page_aflag_clear(m,
5084 							    PGA_WRITEABLE);
5085 					}
5086 					break;
5087 				}
5088 				pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
5089 				    &free);
5090 				freed++;
5091 			}
5092 		}
5093 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5094 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5095 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5096 		if (allfree) {
5097 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5098 			free_pv_chunk(pc);
5099 		}
5100 	}
5101 	if (lock != NULL)
5102 		rw_wunlock(lock);
5103 	pmap_invalidate_all(pmap);
5104 	PMAP_UNLOCK(pmap);
5105 	vm_page_free_pages_toq(&free, true);
5106 }
5107 
5108 /*
5109  * This is used to check if a page has been accessed or modified.
5110  */
5111 static boolean_t
5112 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
5113 {
5114 	struct rwlock *lock;
5115 	pv_entry_t pv;
5116 	struct md_page *pvh;
5117 	pt_entry_t *pte, mask, value;
5118 	pmap_t pmap;
5119 	int lvl, md_gen, pvh_gen;
5120 	boolean_t rv;
5121 
5122 	rv = FALSE;
5123 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5124 	rw_rlock(lock);
5125 restart:
5126 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5127 		pmap = PV_PMAP(pv);
5128 		PMAP_ASSERT_STAGE1(pmap);
5129 		if (!PMAP_TRYLOCK(pmap)) {
5130 			md_gen = m->md.pv_gen;
5131 			rw_runlock(lock);
5132 			PMAP_LOCK(pmap);
5133 			rw_rlock(lock);
5134 			if (md_gen != m->md.pv_gen) {
5135 				PMAP_UNLOCK(pmap);
5136 				goto restart;
5137 			}
5138 		}
5139 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
5140 		KASSERT(lvl == 3,
5141 		    ("pmap_page_test_mappings: Invalid level %d", lvl));
5142 		mask = 0;
5143 		value = 0;
5144 		if (modified) {
5145 			mask |= ATTR_S1_AP_RW_BIT;
5146 			value |= ATTR_S1_AP(ATTR_S1_AP_RW);
5147 		}
5148 		if (accessed) {
5149 			mask |= ATTR_AF | ATTR_DESCR_MASK;
5150 			value |= ATTR_AF | L3_PAGE;
5151 		}
5152 		rv = (pmap_load(pte) & mask) == value;
5153 		PMAP_UNLOCK(pmap);
5154 		if (rv)
5155 			goto out;
5156 	}
5157 	if ((m->flags & PG_FICTITIOUS) == 0) {
5158 		pvh = page_to_pvh(m);
5159 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5160 			pmap = PV_PMAP(pv);
5161 			PMAP_ASSERT_STAGE1(pmap);
5162 			if (!PMAP_TRYLOCK(pmap)) {
5163 				md_gen = m->md.pv_gen;
5164 				pvh_gen = pvh->pv_gen;
5165 				rw_runlock(lock);
5166 				PMAP_LOCK(pmap);
5167 				rw_rlock(lock);
5168 				if (md_gen != m->md.pv_gen ||
5169 				    pvh_gen != pvh->pv_gen) {
5170 					PMAP_UNLOCK(pmap);
5171 					goto restart;
5172 				}
5173 			}
5174 			pte = pmap_pte(pmap, pv->pv_va, &lvl);
5175 			KASSERT(lvl == 2,
5176 			    ("pmap_page_test_mappings: Invalid level %d", lvl));
5177 			mask = 0;
5178 			value = 0;
5179 			if (modified) {
5180 				mask |= ATTR_S1_AP_RW_BIT;
5181 				value |= ATTR_S1_AP(ATTR_S1_AP_RW);
5182 			}
5183 			if (accessed) {
5184 				mask |= ATTR_AF | ATTR_DESCR_MASK;
5185 				value |= ATTR_AF | L2_BLOCK;
5186 			}
5187 			rv = (pmap_load(pte) & mask) == value;
5188 			PMAP_UNLOCK(pmap);
5189 			if (rv)
5190 				goto out;
5191 		}
5192 	}
5193 out:
5194 	rw_runlock(lock);
5195 	return (rv);
5196 }
5197 
5198 /*
5199  *	pmap_is_modified:
5200  *
5201  *	Return whether or not the specified physical page was modified
5202  *	in any physical maps.
5203  */
5204 boolean_t
5205 pmap_is_modified(vm_page_t m)
5206 {
5207 
5208 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5209 	    ("pmap_is_modified: page %p is not managed", m));
5210 
5211 	/*
5212 	 * If the page is not busied then this check is racy.
5213 	 */
5214 	if (!pmap_page_is_write_mapped(m))
5215 		return (FALSE);
5216 	return (pmap_page_test_mappings(m, FALSE, TRUE));
5217 }
5218 
5219 /*
5220  *	pmap_is_prefaultable:
5221  *
5222  *	Return whether or not the specified virtual address is eligible
5223  *	for prefault.
5224  */
5225 boolean_t
5226 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
5227 {
5228 	pt_entry_t *pte;
5229 	boolean_t rv;
5230 	int lvl;
5231 
5232 	rv = FALSE;
5233 	PMAP_LOCK(pmap);
5234 	pte = pmap_pte(pmap, addr, &lvl);
5235 	if (pte != NULL && pmap_load(pte) != 0) {
5236 		rv = TRUE;
5237 	}
5238 	PMAP_UNLOCK(pmap);
5239 	return (rv);
5240 }
5241 
5242 /*
5243  *	pmap_is_referenced:
5244  *
5245  *	Return whether or not the specified physical page was referenced
5246  *	in any physical maps.
5247  */
5248 boolean_t
5249 pmap_is_referenced(vm_page_t m)
5250 {
5251 
5252 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5253 	    ("pmap_is_referenced: page %p is not managed", m));
5254 	return (pmap_page_test_mappings(m, TRUE, FALSE));
5255 }
5256 
5257 /*
5258  * Clear the write and modified bits in each of the given page's mappings.
5259  */
5260 void
5261 pmap_remove_write(vm_page_t m)
5262 {
5263 	struct md_page *pvh;
5264 	pmap_t pmap;
5265 	struct rwlock *lock;
5266 	pv_entry_t next_pv, pv;
5267 	pt_entry_t oldpte, *pte;
5268 	vm_offset_t va;
5269 	int lvl, md_gen, pvh_gen;
5270 
5271 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5272 	    ("pmap_remove_write: page %p is not managed", m));
5273 	vm_page_assert_busied(m);
5274 
5275 	if (!pmap_page_is_write_mapped(m))
5276 		return;
5277 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5278 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
5279 	rw_wlock(lock);
5280 retry:
5281 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5282 		pmap = PV_PMAP(pv);
5283 		PMAP_ASSERT_STAGE1(pmap);
5284 		if (!PMAP_TRYLOCK(pmap)) {
5285 			pvh_gen = pvh->pv_gen;
5286 			rw_wunlock(lock);
5287 			PMAP_LOCK(pmap);
5288 			rw_wlock(lock);
5289 			if (pvh_gen != pvh->pv_gen) {
5290 				PMAP_UNLOCK(pmap);
5291 				goto retry;
5292 			}
5293 		}
5294 		va = pv->pv_va;
5295 		pte = pmap_pte(pmap, va, &lvl);
5296 		if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
5297 			(void)pmap_demote_l2_locked(pmap, pte, va, &lock);
5298 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5299 		    ("inconsistent pv lock %p %p for page %p",
5300 		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5301 		PMAP_UNLOCK(pmap);
5302 	}
5303 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5304 		pmap = PV_PMAP(pv);
5305 		PMAP_ASSERT_STAGE1(pmap);
5306 		if (!PMAP_TRYLOCK(pmap)) {
5307 			pvh_gen = pvh->pv_gen;
5308 			md_gen = m->md.pv_gen;
5309 			rw_wunlock(lock);
5310 			PMAP_LOCK(pmap);
5311 			rw_wlock(lock);
5312 			if (pvh_gen != pvh->pv_gen ||
5313 			    md_gen != m->md.pv_gen) {
5314 				PMAP_UNLOCK(pmap);
5315 				goto retry;
5316 			}
5317 		}
5318 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
5319 		oldpte = pmap_load(pte);
5320 		if ((oldpte & ATTR_SW_DBM) != 0) {
5321 			while (!atomic_fcmpset_64(pte, &oldpte,
5322 			    (oldpte | ATTR_S1_AP_RW_BIT) & ~ATTR_SW_DBM))
5323 				cpu_spinwait();
5324 			if ((oldpte & ATTR_S1_AP_RW_BIT) ==
5325 			    ATTR_S1_AP(ATTR_S1_AP_RW))
5326 				vm_page_dirty(m);
5327 			pmap_invalidate_page(pmap, pv->pv_va);
5328 		}
5329 		PMAP_UNLOCK(pmap);
5330 	}
5331 	rw_wunlock(lock);
5332 	vm_page_aflag_clear(m, PGA_WRITEABLE);
5333 }
5334 
5335 /*
5336  *	pmap_ts_referenced:
5337  *
5338  *	Return a count of reference bits for a page, clearing those bits.
5339  *	It is not necessary for every reference bit to be cleared, but it
5340  *	is necessary that 0 only be returned when there are truly no
5341  *	reference bits set.
5342  *
5343  *	As an optimization, update the page's dirty field if a modified bit is
5344  *	found while counting reference bits.  This opportunistic update can be
5345  *	performed at low cost and can eliminate the need for some future calls
5346  *	to pmap_is_modified().  However, since this function stops after
5347  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
5348  *	dirty pages.  Those dirty pages will only be detected by a future call
5349  *	to pmap_is_modified().
5350  */
5351 int
5352 pmap_ts_referenced(vm_page_t m)
5353 {
5354 	struct md_page *pvh;
5355 	pv_entry_t pv, pvf;
5356 	pmap_t pmap;
5357 	struct rwlock *lock;
5358 	pd_entry_t *pde, tpde;
5359 	pt_entry_t *pte, tpte;
5360 	vm_offset_t va;
5361 	vm_paddr_t pa;
5362 	int cleared, lvl, md_gen, not_cleared, pvh_gen;
5363 	struct spglist free;
5364 
5365 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5366 	    ("pmap_ts_referenced: page %p is not managed", m));
5367 	SLIST_INIT(&free);
5368 	cleared = 0;
5369 	pa = VM_PAGE_TO_PHYS(m);
5370 	lock = PHYS_TO_PV_LIST_LOCK(pa);
5371 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
5372 	rw_wlock(lock);
5373 retry:
5374 	not_cleared = 0;
5375 	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
5376 		goto small_mappings;
5377 	pv = pvf;
5378 	do {
5379 		if (pvf == NULL)
5380 			pvf = pv;
5381 		pmap = PV_PMAP(pv);
5382 		if (!PMAP_TRYLOCK(pmap)) {
5383 			pvh_gen = pvh->pv_gen;
5384 			rw_wunlock(lock);
5385 			PMAP_LOCK(pmap);
5386 			rw_wlock(lock);
5387 			if (pvh_gen != pvh->pv_gen) {
5388 				PMAP_UNLOCK(pmap);
5389 				goto retry;
5390 			}
5391 		}
5392 		va = pv->pv_va;
5393 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
5394 		KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
5395 		KASSERT(lvl == 1,
5396 		    ("pmap_ts_referenced: invalid pde level %d", lvl));
5397 		tpde = pmap_load(pde);
5398 		KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
5399 		    ("pmap_ts_referenced: found an invalid l1 table"));
5400 		pte = pmap_l1_to_l2(pde, pv->pv_va);
5401 		tpte = pmap_load(pte);
5402 		if (pmap_pte_dirty(pmap, tpte)) {
5403 			/*
5404 			 * Although "tpte" is mapping a 2MB page, because
5405 			 * this function is called at a 4KB page granularity,
5406 			 * we only update the 4KB page under test.
5407 			 */
5408 			vm_page_dirty(m);
5409 		}
5410 
5411 		if ((tpte & ATTR_AF) != 0) {
5412 			/*
5413 			 * Since this reference bit is shared by 512 4KB pages,
5414 			 * it should not be cleared every time it is tested.
5415 			 * Apply a simple "hash" function on the physical page
5416 			 * number, the virtual superpage number, and the pmap
5417 			 * address to select one 4KB page out of the 512 on
5418 			 * which testing the reference bit will result in
5419 			 * clearing that reference bit.  This function is
5420 			 * designed to avoid the selection of the same 4KB page
5421 			 * for every 2MB page mapping.
5422 			 *
5423 			 * On demotion, a mapping that hasn't been referenced
5424 			 * is simply destroyed.  To avoid the possibility of a
5425 			 * subsequent page fault on a demoted wired mapping,
5426 			 * always leave its reference bit set.  Moreover,
5427 			 * since the superpage is wired, the current state of
5428 			 * its reference bit won't affect page replacement.
5429 			 */
5430 			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
5431 			    (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
5432 			    (tpte & ATTR_SW_WIRED) == 0) {
5433 				pmap_clear_bits(pte, ATTR_AF);
5434 				pmap_invalidate_page(pmap, pv->pv_va);
5435 				cleared++;
5436 			} else
5437 				not_cleared++;
5438 		}
5439 		PMAP_UNLOCK(pmap);
5440 		/* Rotate the PV list if it has more than one entry. */
5441 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
5442 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5443 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5444 			pvh->pv_gen++;
5445 		}
5446 		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
5447 			goto out;
5448 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
5449 small_mappings:
5450 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
5451 		goto out;
5452 	pv = pvf;
5453 	do {
5454 		if (pvf == NULL)
5455 			pvf = pv;
5456 		pmap = PV_PMAP(pv);
5457 		if (!PMAP_TRYLOCK(pmap)) {
5458 			pvh_gen = pvh->pv_gen;
5459 			md_gen = m->md.pv_gen;
5460 			rw_wunlock(lock);
5461 			PMAP_LOCK(pmap);
5462 			rw_wlock(lock);
5463 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5464 				PMAP_UNLOCK(pmap);
5465 				goto retry;
5466 			}
5467 		}
5468 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
5469 		KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
5470 		KASSERT(lvl == 2,
5471 		    ("pmap_ts_referenced: invalid pde level %d", lvl));
5472 		tpde = pmap_load(pde);
5473 		KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
5474 		    ("pmap_ts_referenced: found an invalid l2 table"));
5475 		pte = pmap_l2_to_l3(pde, pv->pv_va);
5476 		tpte = pmap_load(pte);
5477 		if (pmap_pte_dirty(pmap, tpte))
5478 			vm_page_dirty(m);
5479 		if ((tpte & ATTR_AF) != 0) {
5480 			if ((tpte & ATTR_SW_WIRED) == 0) {
5481 				pmap_clear_bits(pte, ATTR_AF);
5482 				pmap_invalidate_page(pmap, pv->pv_va);
5483 				cleared++;
5484 			} else
5485 				not_cleared++;
5486 		}
5487 		PMAP_UNLOCK(pmap);
5488 		/* Rotate the PV list if it has more than one entry. */
5489 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
5490 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5491 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5492 			m->md.pv_gen++;
5493 		}
5494 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
5495 	    not_cleared < PMAP_TS_REFERENCED_MAX);
5496 out:
5497 	rw_wunlock(lock);
5498 	vm_page_free_pages_toq(&free, true);
5499 	return (cleared + not_cleared);
5500 }
5501 
5502 /*
5503  *	Apply the given advice to the specified range of addresses within the
5504  *	given pmap.  Depending on the advice, clear the referenced and/or
5505  *	modified flags in each mapping and set the mapped page's dirty field.
5506  */
5507 void
5508 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
5509 {
5510 	struct rwlock *lock;
5511 	vm_offset_t va, va_next;
5512 	vm_page_t m;
5513 	pd_entry_t *l0, *l1, *l2, oldl2;
5514 	pt_entry_t *l3, oldl3;
5515 
5516 	PMAP_ASSERT_STAGE1(pmap);
5517 
5518 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
5519 		return;
5520 
5521 	PMAP_LOCK(pmap);
5522 	for (; sva < eva; sva = va_next) {
5523 		l0 = pmap_l0(pmap, sva);
5524 		if (pmap_load(l0) == 0) {
5525 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
5526 			if (va_next < sva)
5527 				va_next = eva;
5528 			continue;
5529 		}
5530 
5531 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
5532 		if (va_next < sva)
5533 			va_next = eva;
5534 		l1 = pmap_l0_to_l1(l0, sva);
5535 		if (pmap_load(l1) == 0)
5536 			continue;
5537 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
5538 			KASSERT(va_next <= eva,
5539 			    ("partial update of non-transparent 1G page "
5540 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
5541 			    pmap_load(l1), sva, eva, va_next));
5542 			continue;
5543 		}
5544 
5545 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
5546 		if (va_next < sva)
5547 			va_next = eva;
5548 		l2 = pmap_l1_to_l2(l1, sva);
5549 		oldl2 = pmap_load(l2);
5550 		if (oldl2 == 0)
5551 			continue;
5552 		if ((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK) {
5553 			if ((oldl2 & ATTR_SW_MANAGED) == 0)
5554 				continue;
5555 			lock = NULL;
5556 			if (!pmap_demote_l2_locked(pmap, l2, sva, &lock)) {
5557 				if (lock != NULL)
5558 					rw_wunlock(lock);
5559 
5560 				/*
5561 				 * The 2MB page mapping was destroyed.
5562 				 */
5563 				continue;
5564 			}
5565 
5566 			/*
5567 			 * Unless the page mappings are wired, remove the
5568 			 * mapping to a single page so that a subsequent
5569 			 * access may repromote.  Choosing the last page
5570 			 * within the address range [sva, min(va_next, eva))
5571 			 * generally results in more repromotions.  Since the
5572 			 * underlying page table page is fully populated, this
5573 			 * removal never frees a page table page.
5574 			 */
5575 			if ((oldl2 & ATTR_SW_WIRED) == 0) {
5576 				va = eva;
5577 				if (va > va_next)
5578 					va = va_next;
5579 				va -= PAGE_SIZE;
5580 				KASSERT(va >= sva,
5581 				    ("pmap_advise: no address gap"));
5582 				l3 = pmap_l2_to_l3(l2, va);
5583 				KASSERT(pmap_load(l3) != 0,
5584 				    ("pmap_advise: invalid PTE"));
5585 				pmap_remove_l3(pmap, l3, va, pmap_load(l2),
5586 				    NULL, &lock);
5587 			}
5588 			if (lock != NULL)
5589 				rw_wunlock(lock);
5590 		}
5591 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
5592 		    ("pmap_advise: invalid L2 entry after demotion"));
5593 		if (va_next > eva)
5594 			va_next = eva;
5595 		va = va_next;
5596 		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
5597 		    sva += L3_SIZE) {
5598 			oldl3 = pmap_load(l3);
5599 			if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
5600 			    (ATTR_SW_MANAGED | L3_PAGE))
5601 				goto maybe_invlrng;
5602 			else if (pmap_pte_dirty(pmap, oldl3)) {
5603 				if (advice == MADV_DONTNEED) {
5604 					/*
5605 					 * Future calls to pmap_is_modified()
5606 					 * can be avoided by making the page
5607 					 * dirty now.
5608 					 */
5609 					m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK);
5610 					vm_page_dirty(m);
5611 				}
5612 				while (!atomic_fcmpset_long(l3, &oldl3,
5613 				    (oldl3 & ~ATTR_AF) |
5614 				    ATTR_S1_AP(ATTR_S1_AP_RO)))
5615 					cpu_spinwait();
5616 			} else if ((oldl3 & ATTR_AF) != 0)
5617 				pmap_clear_bits(l3, ATTR_AF);
5618 			else
5619 				goto maybe_invlrng;
5620 			if (va == va_next)
5621 				va = sva;
5622 			continue;
5623 maybe_invlrng:
5624 			if (va != va_next) {
5625 				pmap_invalidate_range(pmap, va, sva);
5626 				va = va_next;
5627 			}
5628 		}
5629 		if (va != va_next)
5630 			pmap_invalidate_range(pmap, va, sva);
5631 	}
5632 	PMAP_UNLOCK(pmap);
5633 }
5634 
5635 /*
5636  *	Clear the modify bits on the specified physical page.
5637  */
5638 void
5639 pmap_clear_modify(vm_page_t m)
5640 {
5641 	struct md_page *pvh;
5642 	struct rwlock *lock;
5643 	pmap_t pmap;
5644 	pv_entry_t next_pv, pv;
5645 	pd_entry_t *l2, oldl2;
5646 	pt_entry_t *l3, oldl3;
5647 	vm_offset_t va;
5648 	int md_gen, pvh_gen;
5649 
5650 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5651 	    ("pmap_clear_modify: page %p is not managed", m));
5652 	vm_page_assert_busied(m);
5653 
5654 	if (!pmap_page_is_write_mapped(m))
5655 		return;
5656 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
5657 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5658 	rw_wlock(lock);
5659 restart:
5660 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5661 		pmap = PV_PMAP(pv);
5662 		PMAP_ASSERT_STAGE1(pmap);
5663 		if (!PMAP_TRYLOCK(pmap)) {
5664 			pvh_gen = pvh->pv_gen;
5665 			rw_wunlock(lock);
5666 			PMAP_LOCK(pmap);
5667 			rw_wlock(lock);
5668 			if (pvh_gen != pvh->pv_gen) {
5669 				PMAP_UNLOCK(pmap);
5670 				goto restart;
5671 			}
5672 		}
5673 		va = pv->pv_va;
5674 		l2 = pmap_l2(pmap, va);
5675 		oldl2 = pmap_load(l2);
5676 		/* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */
5677 		if ((oldl2 & ATTR_SW_DBM) != 0 &&
5678 		    pmap_demote_l2_locked(pmap, l2, va, &lock) &&
5679 		    (oldl2 & ATTR_SW_WIRED) == 0) {
5680 			/*
5681 			 * Write protect the mapping to a single page so that
5682 			 * a subsequent write access may repromote.
5683 			 */
5684 			va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK);
5685 			l3 = pmap_l2_to_l3(l2, va);
5686 			oldl3 = pmap_load(l3);
5687 			while (!atomic_fcmpset_long(l3, &oldl3,
5688 			    (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO)))
5689 				cpu_spinwait();
5690 			vm_page_dirty(m);
5691 			pmap_invalidate_page(pmap, va);
5692 		}
5693 		PMAP_UNLOCK(pmap);
5694 	}
5695 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5696 		pmap = PV_PMAP(pv);
5697 		PMAP_ASSERT_STAGE1(pmap);
5698 		if (!PMAP_TRYLOCK(pmap)) {
5699 			md_gen = m->md.pv_gen;
5700 			pvh_gen = pvh->pv_gen;
5701 			rw_wunlock(lock);
5702 			PMAP_LOCK(pmap);
5703 			rw_wlock(lock);
5704 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5705 				PMAP_UNLOCK(pmap);
5706 				goto restart;
5707 			}
5708 		}
5709 		l2 = pmap_l2(pmap, pv->pv_va);
5710 		l3 = pmap_l2_to_l3(l2, pv->pv_va);
5711 		oldl3 = pmap_load(l3);
5712 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){
5713 			pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO));
5714 			pmap_invalidate_page(pmap, pv->pv_va);
5715 		}
5716 		PMAP_UNLOCK(pmap);
5717 	}
5718 	rw_wunlock(lock);
5719 }
5720 
5721 void *
5722 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
5723 {
5724 	struct pmap_preinit_mapping *ppim;
5725 	vm_offset_t va, offset;
5726 	pd_entry_t *pde;
5727 	pt_entry_t *l2;
5728 	int i, lvl, l2_blocks, free_l2_count, start_idx;
5729 
5730 	if (!vm_initialized) {
5731 		/*
5732 		 * No L3 ptables so map entire L2 blocks where start VA is:
5733 		 * 	preinit_map_va + start_idx * L2_SIZE
5734 		 * There may be duplicate mappings (multiple VA -> same PA) but
5735 		 * ARM64 dcache is always PIPT so that's acceptable.
5736 		 */
5737 		 if (size == 0)
5738 			 return (NULL);
5739 
5740 		 /* Calculate how many L2 blocks are needed for the mapping */
5741 		l2_blocks = (roundup2(pa + size, L2_SIZE) -
5742 		    rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
5743 
5744 		offset = pa & L2_OFFSET;
5745 
5746 		if (preinit_map_va == 0)
5747 			return (NULL);
5748 
5749 		/* Map 2MiB L2 blocks from reserved VA space */
5750 
5751 		free_l2_count = 0;
5752 		start_idx = -1;
5753 		/* Find enough free contiguous VA space */
5754 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5755 			ppim = pmap_preinit_mapping + i;
5756 			if (free_l2_count > 0 && ppim->pa != 0) {
5757 				/* Not enough space here */
5758 				free_l2_count = 0;
5759 				start_idx = -1;
5760 				continue;
5761 			}
5762 
5763 			if (ppim->pa == 0) {
5764 				/* Free L2 block */
5765 				if (start_idx == -1)
5766 					start_idx = i;
5767 				free_l2_count++;
5768 				if (free_l2_count == l2_blocks)
5769 					break;
5770 			}
5771 		}
5772 		if (free_l2_count != l2_blocks)
5773 			panic("%s: too many preinit mappings", __func__);
5774 
5775 		va = preinit_map_va + (start_idx * L2_SIZE);
5776 		for (i = start_idx; i < start_idx + l2_blocks; i++) {
5777 			/* Mark entries as allocated */
5778 			ppim = pmap_preinit_mapping + i;
5779 			ppim->pa = pa;
5780 			ppim->va = va + offset;
5781 			ppim->size = size;
5782 		}
5783 
5784 		/* Map L2 blocks */
5785 		pa = rounddown2(pa, L2_SIZE);
5786 		for (i = 0; i < l2_blocks; i++) {
5787 			pde = pmap_pde(kernel_pmap, va, &lvl);
5788 			KASSERT(pde != NULL,
5789 			    ("pmap_mapbios: Invalid page entry, va: 0x%lx",
5790 			    va));
5791 			KASSERT(lvl == 1,
5792 			    ("pmap_mapbios: Invalid level %d", lvl));
5793 
5794 			/* Insert L2_BLOCK */
5795 			l2 = pmap_l1_to_l2(pde, va);
5796 			pmap_load_store(l2,
5797 			    pa | ATTR_DEFAULT | ATTR_S1_XN |
5798 			    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
5799 
5800 			va += L2_SIZE;
5801 			pa += L2_SIZE;
5802 		}
5803 		pmap_invalidate_all(kernel_pmap);
5804 
5805 		va = preinit_map_va + (start_idx * L2_SIZE);
5806 
5807 	} else {
5808 		/* kva_alloc may be used to map the pages */
5809 		offset = pa & PAGE_MASK;
5810 		size = round_page(offset + size);
5811 
5812 		va = kva_alloc(size);
5813 		if (va == 0)
5814 			panic("%s: Couldn't allocate KVA", __func__);
5815 
5816 		pde = pmap_pde(kernel_pmap, va, &lvl);
5817 		KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
5818 
5819 		/* L3 table is linked */
5820 		va = trunc_page(va);
5821 		pa = trunc_page(pa);
5822 		pmap_kenter(va, size, pa, memory_mapping_mode(pa));
5823 	}
5824 
5825 	return ((void *)(va + offset));
5826 }
5827 
5828 void
5829 pmap_unmapbios(vm_offset_t va, vm_size_t size)
5830 {
5831 	struct pmap_preinit_mapping *ppim;
5832 	vm_offset_t offset, tmpsize, va_trunc;
5833 	pd_entry_t *pde;
5834 	pt_entry_t *l2;
5835 	int i, lvl, l2_blocks, block;
5836 	bool preinit_map;
5837 
5838 	l2_blocks =
5839 	   (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
5840 	KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
5841 
5842 	/* Remove preinit mapping */
5843 	preinit_map = false;
5844 	block = 0;
5845 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5846 		ppim = pmap_preinit_mapping + i;
5847 		if (ppim->va == va) {
5848 			KASSERT(ppim->size == size,
5849 			    ("pmap_unmapbios: size mismatch"));
5850 			ppim->va = 0;
5851 			ppim->pa = 0;
5852 			ppim->size = 0;
5853 			preinit_map = true;
5854 			offset = block * L2_SIZE;
5855 			va_trunc = rounddown2(va, L2_SIZE) + offset;
5856 
5857 			/* Remove L2_BLOCK */
5858 			pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
5859 			KASSERT(pde != NULL,
5860 			    ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
5861 			    va_trunc));
5862 			l2 = pmap_l1_to_l2(pde, va_trunc);
5863 			pmap_clear(l2);
5864 
5865 			if (block == (l2_blocks - 1))
5866 				break;
5867 			block++;
5868 		}
5869 	}
5870 	if (preinit_map) {
5871 		pmap_invalidate_all(kernel_pmap);
5872 		return;
5873 	}
5874 
5875 	/* Unmap the pages reserved with kva_alloc. */
5876 	if (vm_initialized) {
5877 		offset = va & PAGE_MASK;
5878 		size = round_page(offset + size);
5879 		va = trunc_page(va);
5880 
5881 		pde = pmap_pde(kernel_pmap, va, &lvl);
5882 		KASSERT(pde != NULL,
5883 		    ("pmap_unmapbios: Invalid page entry, va: 0x%lx", va));
5884 		KASSERT(lvl == 2, ("pmap_unmapbios: Invalid level %d", lvl));
5885 
5886 		/* Unmap and invalidate the pages */
5887                 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5888 			pmap_kremove(va + tmpsize);
5889 
5890 		kva_free(va, size);
5891 	}
5892 }
5893 
5894 /*
5895  * Sets the memory attribute for the specified page.
5896  */
5897 void
5898 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5899 {
5900 
5901 	m->md.pv_memattr = ma;
5902 
5903 	/*
5904 	 * If "m" is a normal page, update its direct mapping.  This update
5905 	 * can be relied upon to perform any cache operations that are
5906 	 * required for data coherence.
5907 	 */
5908 	if ((m->flags & PG_FICTITIOUS) == 0 &&
5909 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
5910 	    m->md.pv_memattr) != 0)
5911 		panic("memory attribute change on the direct map failed");
5912 }
5913 
5914 /*
5915  * Changes the specified virtual address range's memory type to that given by
5916  * the parameter "mode".  The specified virtual address range must be
5917  * completely contained within either the direct map or the kernel map.  If
5918  * the virtual address range is contained within the kernel map, then the
5919  * memory type for each of the corresponding ranges of the direct map is also
5920  * changed.  (The corresponding ranges of the direct map are those ranges that
5921  * map the same physical pages as the specified virtual address range.)  These
5922  * changes to the direct map are necessary because Intel describes the
5923  * behavior of their processors as "undefined" if two or more mappings to the
5924  * same physical page have different memory types.
5925  *
5926  * Returns zero if the change completed successfully, and either EINVAL or
5927  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
5928  * of the virtual address range was not mapped, and ENOMEM is returned if
5929  * there was insufficient memory available to complete the change.  In the
5930  * latter case, the memory type may have been changed on some part of the
5931  * virtual address range or the direct map.
5932  */
5933 int
5934 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
5935 {
5936 	int error;
5937 
5938 	PMAP_LOCK(kernel_pmap);
5939 	error = pmap_change_attr_locked(va, size, mode);
5940 	PMAP_UNLOCK(kernel_pmap);
5941 	return (error);
5942 }
5943 
5944 static int
5945 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
5946 {
5947 	vm_offset_t base, offset, tmpva;
5948 	pt_entry_t l3, *pte, *newpte;
5949 	int lvl;
5950 
5951 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
5952 	base = trunc_page(va);
5953 	offset = va & PAGE_MASK;
5954 	size = round_page(offset + size);
5955 
5956 	if (!VIRT_IN_DMAP(base) &&
5957 	    !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
5958 		return (EINVAL);
5959 
5960 	for (tmpva = base; tmpva < base + size; ) {
5961 		pte = pmap_pte(kernel_pmap, tmpva, &lvl);
5962 		if (pte == NULL)
5963 			return (EINVAL);
5964 
5965 		if ((pmap_load(pte) & ATTR_S1_IDX_MASK) == ATTR_S1_IDX(mode)) {
5966 			/*
5967 			 * We already have the correct attribute,
5968 			 * ignore this entry.
5969 			 */
5970 			switch (lvl) {
5971 			default:
5972 				panic("Invalid DMAP table level: %d\n", lvl);
5973 			case 1:
5974 				tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
5975 				break;
5976 			case 2:
5977 				tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
5978 				break;
5979 			case 3:
5980 				tmpva += PAGE_SIZE;
5981 				break;
5982 			}
5983 		} else {
5984 			/*
5985 			 * Split the entry to an level 3 table, then
5986 			 * set the new attribute.
5987 			 */
5988 			switch (lvl) {
5989 			default:
5990 				panic("Invalid DMAP table level: %d\n", lvl);
5991 			case 1:
5992 				newpte = pmap_demote_l1(kernel_pmap, pte,
5993 				    tmpva & ~L1_OFFSET);
5994 				if (newpte == NULL)
5995 					return (EINVAL);
5996 				pte = pmap_l1_to_l2(pte, tmpva);
5997 			case 2:
5998 				newpte = pmap_demote_l2(kernel_pmap, pte,
5999 				    tmpva);
6000 				if (newpte == NULL)
6001 					return (EINVAL);
6002 				pte = pmap_l2_to_l3(pte, tmpva);
6003 			case 3:
6004 				/* Update the entry */
6005 				l3 = pmap_load(pte);
6006 				l3 &= ~ATTR_S1_IDX_MASK;
6007 				l3 |= ATTR_S1_IDX(mode);
6008 				if (mode == VM_MEMATTR_DEVICE)
6009 					l3 |= ATTR_S1_XN;
6010 
6011 				pmap_update_entry(kernel_pmap, pte, l3, tmpva,
6012 				    PAGE_SIZE);
6013 
6014 				/*
6015 				 * If moving to a non-cacheable entry flush
6016 				 * the cache.
6017 				 */
6018 				if (mode == VM_MEMATTR_UNCACHEABLE)
6019 					cpu_dcache_wbinv_range(tmpva, L3_SIZE);
6020 
6021 				break;
6022 			}
6023 			tmpva += PAGE_SIZE;
6024 		}
6025 	}
6026 
6027 	return (0);
6028 }
6029 
6030 /*
6031  * Create an L2 table to map all addresses within an L1 mapping.
6032  */
6033 static pt_entry_t *
6034 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
6035 {
6036 	pt_entry_t *l2, newl2, oldl1;
6037 	vm_offset_t tmpl1;
6038 	vm_paddr_t l2phys, phys;
6039 	vm_page_t ml2;
6040 	int i;
6041 
6042 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6043 	oldl1 = pmap_load(l1);
6044 	KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
6045 	    ("pmap_demote_l1: Demoting a non-block entry"));
6046 	KASSERT((va & L1_OFFSET) == 0,
6047 	    ("pmap_demote_l1: Invalid virtual address %#lx", va));
6048 	KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
6049 	    ("pmap_demote_l1: Level 1 table shouldn't be managed"));
6050 
6051 	tmpl1 = 0;
6052 	if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
6053 		tmpl1 = kva_alloc(PAGE_SIZE);
6054 		if (tmpl1 == 0)
6055 			return (NULL);
6056 	}
6057 
6058 	if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
6059 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
6060 		CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
6061 		    " in pmap %p", va, pmap);
6062 		l2 = NULL;
6063 		goto fail;
6064 	}
6065 
6066 	l2phys = VM_PAGE_TO_PHYS(ml2);
6067 	l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
6068 
6069 	/* Address the range points at */
6070 	phys = oldl1 & ~ATTR_MASK;
6071 	/* The attributed from the old l1 table to be copied */
6072 	newl2 = oldl1 & ATTR_MASK;
6073 
6074 	/* Create the new entries */
6075 	for (i = 0; i < Ln_ENTRIES; i++) {
6076 		l2[i] = newl2 | phys;
6077 		phys += L2_SIZE;
6078 	}
6079 	KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
6080 	    ("Invalid l2 page (%lx != %lx)", l2[0],
6081 	    (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
6082 
6083 	if (tmpl1 != 0) {
6084 		pmap_kenter(tmpl1, PAGE_SIZE,
6085 		    DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET,
6086 		    VM_MEMATTR_WRITE_BACK);
6087 		l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
6088 	}
6089 
6090 	pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
6091 
6092 fail:
6093 	if (tmpl1 != 0) {
6094 		pmap_kremove(tmpl1);
6095 		kva_free(tmpl1, PAGE_SIZE);
6096 	}
6097 
6098 	return (l2);
6099 }
6100 
6101 static void
6102 pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3)
6103 {
6104 	pt_entry_t *l3;
6105 
6106 	for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) {
6107 		*l3 = newl3;
6108 		newl3 += L3_SIZE;
6109 	}
6110 }
6111 
6112 static void
6113 pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
6114     struct rwlock **lockp)
6115 {
6116 	struct spglist free;
6117 
6118 	SLIST_INIT(&free);
6119 	(void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
6120 	    lockp);
6121 	vm_page_free_pages_toq(&free, true);
6122 }
6123 
6124 /*
6125  * Create an L3 table to map all addresses within an L2 mapping.
6126  */
6127 static pt_entry_t *
6128 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
6129     struct rwlock **lockp)
6130 {
6131 	pt_entry_t *l3, newl3, oldl2;
6132 	vm_offset_t tmpl2;
6133 	vm_paddr_t l3phys;
6134 	vm_page_t ml3;
6135 
6136 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6137 	PMAP_ASSERT_STAGE1(pmap);
6138 	KASSERT(ADDR_IS_CANONICAL(va),
6139 	    ("%s: Address not in canonical form: %lx", __func__, va));
6140 
6141 	l3 = NULL;
6142 	oldl2 = pmap_load(l2);
6143 	KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
6144 	    ("pmap_demote_l2: Demoting a non-block entry"));
6145 	va &= ~L2_OFFSET;
6146 
6147 	tmpl2 = 0;
6148 	if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
6149 		tmpl2 = kva_alloc(PAGE_SIZE);
6150 		if (tmpl2 == 0)
6151 			return (NULL);
6152 	}
6153 
6154 	/*
6155 	 * Invalidate the 2MB page mapping and return "failure" if the
6156 	 * mapping was never accessed.
6157 	 */
6158 	if ((oldl2 & ATTR_AF) == 0) {
6159 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
6160 		    ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
6161 		pmap_demote_l2_abort(pmap, va, l2, lockp);
6162 		CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
6163 		    va, pmap);
6164 		goto fail;
6165 	}
6166 
6167 	if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
6168 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
6169 		    ("pmap_demote_l2: page table page for a wired mapping"
6170 		    " is missing"));
6171 
6172 		/*
6173 		 * If the page table page is missing and the mapping
6174 		 * is for a kernel address, the mapping must belong to
6175 		 * the direct map.  Page table pages are preallocated
6176 		 * for every other part of the kernel address space,
6177 		 * so the direct map region is the only part of the
6178 		 * kernel address space that must be handled here.
6179 		 */
6180 		KASSERT(!ADDR_IS_KERNEL(va) || VIRT_IN_DMAP(va),
6181 		    ("pmap_demote_l2: No saved mpte for va %#lx", va));
6182 
6183 		/*
6184 		 * If the 2MB page mapping belongs to the direct map
6185 		 * region of the kernel's address space, then the page
6186 		 * allocation request specifies the highest possible
6187 		 * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
6188 		 * priority is normal.
6189 		 */
6190 		ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
6191 		    (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
6192 		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
6193 
6194 		/*
6195 		 * If the allocation of the new page table page fails,
6196 		 * invalidate the 2MB page mapping and return "failure".
6197 		 */
6198 		if (ml3 == NULL) {
6199 			pmap_demote_l2_abort(pmap, va, l2, lockp);
6200 			CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
6201 			    " in pmap %p", va, pmap);
6202 			goto fail;
6203 		}
6204 
6205 		if (!ADDR_IS_KERNEL(va)) {
6206 			ml3->ref_count = NL3PG;
6207 			pmap_resident_count_inc(pmap, 1);
6208 		}
6209 	}
6210 	l3phys = VM_PAGE_TO_PHYS(ml3);
6211 	l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
6212 	newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
6213 	KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) !=
6214 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM),
6215 	    ("pmap_demote_l2: L2 entry is writeable but not dirty"));
6216 
6217 	/*
6218 	 * If the page table page is not leftover from an earlier promotion,
6219 	 * or the mapping attributes have changed, (re)initialize the L3 table.
6220 	 *
6221 	 * When pmap_update_entry() clears the old L2 mapping, it (indirectly)
6222 	 * performs a dsb().  That dsb() ensures that the stores for filling
6223 	 * "l3" are visible before "l3" is added to the page table.
6224 	 */
6225 	if (ml3->valid == 0 || (l3[0] & ATTR_MASK) != (newl3 & ATTR_MASK))
6226 		pmap_fill_l3(l3, newl3);
6227 
6228 	/*
6229 	 * Map the temporary page so we don't lose access to the l2 table.
6230 	 */
6231 	if (tmpl2 != 0) {
6232 		pmap_kenter(tmpl2, PAGE_SIZE,
6233 		    DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET,
6234 		    VM_MEMATTR_WRITE_BACK);
6235 		l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
6236 	}
6237 
6238 	/*
6239 	 * The spare PV entries must be reserved prior to demoting the
6240 	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
6241 	 * of the L2 and the PV lists will be inconsistent, which can result
6242 	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
6243 	 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
6244 	 * PV entry for the 2MB page mapping that is being demoted.
6245 	 */
6246 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
6247 		reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
6248 
6249 	/*
6250 	 * Pass PAGE_SIZE so that a single TLB invalidation is performed on
6251 	 * the 2MB page mapping.
6252 	 */
6253 	pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
6254 
6255 	/*
6256 	 * Demote the PV entry.
6257 	 */
6258 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
6259 		pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
6260 
6261 	atomic_add_long(&pmap_l2_demotions, 1);
6262 	CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
6263 	    " in pmap %p %lx", va, pmap, l3[0]);
6264 
6265 fail:
6266 	if (tmpl2 != 0) {
6267 		pmap_kremove(tmpl2);
6268 		kva_free(tmpl2, PAGE_SIZE);
6269 	}
6270 
6271 	return (l3);
6272 
6273 }
6274 
6275 static pt_entry_t *
6276 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
6277 {
6278 	struct rwlock *lock;
6279 	pt_entry_t *l3;
6280 
6281 	lock = NULL;
6282 	l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
6283 	if (lock != NULL)
6284 		rw_wunlock(lock);
6285 	return (l3);
6286 }
6287 
6288 /*
6289  * Perform the pmap work for mincore(2).  If the page is not both referenced and
6290  * modified by this pmap, returns its physical address so that the caller can
6291  * find other mappings.
6292  */
6293 int
6294 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
6295 {
6296 	pt_entry_t *pte, tpte;
6297 	vm_paddr_t mask, pa;
6298 	int lvl, val;
6299 	bool managed;
6300 
6301 	PMAP_ASSERT_STAGE1(pmap);
6302 	PMAP_LOCK(pmap);
6303 	pte = pmap_pte(pmap, addr, &lvl);
6304 	if (pte != NULL) {
6305 		tpte = pmap_load(pte);
6306 
6307 		switch (lvl) {
6308 		case 3:
6309 			mask = L3_OFFSET;
6310 			break;
6311 		case 2:
6312 			mask = L2_OFFSET;
6313 			break;
6314 		case 1:
6315 			mask = L1_OFFSET;
6316 			break;
6317 		default:
6318 			panic("pmap_mincore: invalid level %d", lvl);
6319 		}
6320 
6321 		managed = (tpte & ATTR_SW_MANAGED) != 0;
6322 		val = MINCORE_INCORE;
6323 		if (lvl != 3)
6324 			val |= MINCORE_PSIND(3 - lvl);
6325 		if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
6326 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
6327 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
6328 		if ((tpte & ATTR_AF) == ATTR_AF)
6329 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
6330 
6331 		pa = (tpte & ~ATTR_MASK) | (addr & mask);
6332 	} else {
6333 		managed = false;
6334 		val = 0;
6335 	}
6336 
6337 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
6338 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
6339 		*pap = pa;
6340 	}
6341 	PMAP_UNLOCK(pmap);
6342 	return (val);
6343 }
6344 
6345 /*
6346  * Garbage collect every ASID that is neither active on a processor nor
6347  * reserved.
6348  */
6349 static void
6350 pmap_reset_asid_set(pmap_t pmap)
6351 {
6352 	pmap_t curpmap;
6353 	int asid, cpuid, epoch;
6354 	struct asid_set *set;
6355 	enum pmap_stage stage;
6356 
6357 	set = pmap->pm_asid_set;
6358 	stage = pmap->pm_stage;
6359 
6360 	set = pmap->pm_asid_set;
6361 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
6362 	mtx_assert(&set->asid_set_mutex, MA_OWNED);
6363 
6364 	/*
6365 	 * Ensure that the store to asid_epoch is globally visible before the
6366 	 * loads from pc_curpmap are performed.
6367 	 */
6368 	epoch = set->asid_epoch + 1;
6369 	if (epoch == INT_MAX)
6370 		epoch = 0;
6371 	set->asid_epoch = epoch;
6372 	dsb(ishst);
6373 	if (stage == PM_STAGE1) {
6374 		__asm __volatile("tlbi vmalle1is");
6375 	} else {
6376 		KASSERT(pmap_clean_stage2_tlbi != NULL,
6377 		    ("%s: Unset stage 2 tlb invalidation callback\n",
6378 		    __func__));
6379 		pmap_clean_stage2_tlbi();
6380 	}
6381 	dsb(ish);
6382 	bit_nclear(set->asid_set, ASID_FIRST_AVAILABLE,
6383 	    set->asid_set_size - 1);
6384 	CPU_FOREACH(cpuid) {
6385 		if (cpuid == curcpu)
6386 			continue;
6387 		if (stage == PM_STAGE1) {
6388 			curpmap = pcpu_find(cpuid)->pc_curpmap;
6389 			PMAP_ASSERT_STAGE1(pmap);
6390 		} else {
6391 			curpmap = pcpu_find(cpuid)->pc_curvmpmap;
6392 			if (curpmap == NULL)
6393 				continue;
6394 			PMAP_ASSERT_STAGE2(pmap);
6395 		}
6396 		KASSERT(curpmap->pm_asid_set == set, ("Incorrect set"));
6397 		asid = COOKIE_TO_ASID(curpmap->pm_cookie);
6398 		if (asid == -1)
6399 			continue;
6400 		bit_set(set->asid_set, asid);
6401 		curpmap->pm_cookie = COOKIE_FROM(asid, epoch);
6402 	}
6403 }
6404 
6405 /*
6406  * Allocate a new ASID for the specified pmap.
6407  */
6408 static void
6409 pmap_alloc_asid(pmap_t pmap)
6410 {
6411 	struct asid_set *set;
6412 	int new_asid;
6413 
6414 	set = pmap->pm_asid_set;
6415 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
6416 
6417 	mtx_lock_spin(&set->asid_set_mutex);
6418 
6419 	/*
6420 	 * While this processor was waiting to acquire the asid set mutex,
6421 	 * pmap_reset_asid_set() running on another processor might have
6422 	 * updated this pmap's cookie to the current epoch.  In which case, we
6423 	 * don't need to allocate a new ASID.
6424 	 */
6425 	if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch)
6426 		goto out;
6427 
6428 	bit_ffc_at(set->asid_set, set->asid_next, set->asid_set_size,
6429 	    &new_asid);
6430 	if (new_asid == -1) {
6431 		bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
6432 		    set->asid_next, &new_asid);
6433 		if (new_asid == -1) {
6434 			pmap_reset_asid_set(pmap);
6435 			bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
6436 			    set->asid_set_size, &new_asid);
6437 			KASSERT(new_asid != -1, ("ASID allocation failure"));
6438 		}
6439 	}
6440 	bit_set(set->asid_set, new_asid);
6441 	set->asid_next = new_asid + 1;
6442 	pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch);
6443 out:
6444 	mtx_unlock_spin(&set->asid_set_mutex);
6445 }
6446 
6447 /*
6448  * Compute the value that should be stored in ttbr0 to activate the specified
6449  * pmap.  This value may change from time to time.
6450  */
6451 uint64_t
6452 pmap_to_ttbr0(pmap_t pmap)
6453 {
6454 
6455 	return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) |
6456 	    pmap->pm_ttbr);
6457 }
6458 
6459 static bool
6460 pmap_activate_int(pmap_t pmap)
6461 {
6462 	struct asid_set *set;
6463 	int epoch;
6464 
6465 	KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
6466 	KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
6467 
6468 	if ((pmap->pm_stage == PM_STAGE1 && pmap == PCPU_GET(curpmap)) ||
6469 	    (pmap->pm_stage == PM_STAGE2 && pmap == PCPU_GET(curvmpmap))) {
6470 		/*
6471 		 * Handle the possibility that the old thread was preempted
6472 		 * after an "ic" or "tlbi" instruction but before it performed
6473 		 * a "dsb" instruction.  If the old thread migrates to a new
6474 		 * processor, its completion of a "dsb" instruction on that
6475 		 * new processor does not guarantee that the "ic" or "tlbi"
6476 		 * instructions performed on the old processor have completed.
6477 		 */
6478 		dsb(ish);
6479 		return (false);
6480 	}
6481 
6482 	set = pmap->pm_asid_set;
6483 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
6484 
6485 	/*
6486 	 * Ensure that the store to curpmap is globally visible before the
6487 	 * load from asid_epoch is performed.
6488 	 */
6489 	if (pmap->pm_stage == PM_STAGE1)
6490 		PCPU_SET(curpmap, pmap);
6491 	else
6492 		PCPU_SET(curvmpmap, pmap);
6493 	dsb(ish);
6494 	epoch = COOKIE_TO_EPOCH(pmap->pm_cookie);
6495 	if (epoch >= 0 && epoch != set->asid_epoch)
6496 		pmap_alloc_asid(pmap);
6497 
6498 	if (pmap->pm_stage == PM_STAGE1) {
6499 		set_ttbr0(pmap_to_ttbr0(pmap));
6500 		if (PCPU_GET(bcast_tlbi_workaround) != 0)
6501 			invalidate_local_icache();
6502 	}
6503 	return (true);
6504 }
6505 
6506 void
6507 pmap_activate_vm(pmap_t pmap)
6508 {
6509 
6510 	PMAP_ASSERT_STAGE2(pmap);
6511 
6512 	(void)pmap_activate_int(pmap);
6513 }
6514 
6515 void
6516 pmap_activate(struct thread *td)
6517 {
6518 	pmap_t	pmap;
6519 
6520 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
6521 	PMAP_ASSERT_STAGE1(pmap);
6522 	critical_enter();
6523 	(void)pmap_activate_int(pmap);
6524 	critical_exit();
6525 }
6526 
6527 /*
6528  * To eliminate the unused parameter "old", we would have to add an instruction
6529  * to cpu_switch().
6530  */
6531 struct pcb *
6532 pmap_switch(struct thread *old __unused, struct thread *new)
6533 {
6534 	pcpu_bp_harden bp_harden;
6535 	struct pcb *pcb;
6536 
6537 	/* Store the new curthread */
6538 	PCPU_SET(curthread, new);
6539 
6540 	/* And the new pcb */
6541 	pcb = new->td_pcb;
6542 	PCPU_SET(curpcb, pcb);
6543 
6544 	/*
6545 	 * TODO: We may need to flush the cache here if switching
6546 	 * to a user process.
6547 	 */
6548 
6549 	if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) {
6550 		/*
6551 		 * Stop userspace from training the branch predictor against
6552 		 * other processes. This will call into a CPU specific
6553 		 * function that clears the branch predictor state.
6554 		 */
6555 		bp_harden = PCPU_GET(bp_harden);
6556 		if (bp_harden != NULL)
6557 			bp_harden();
6558 	}
6559 
6560 	return (pcb);
6561 }
6562 
6563 void
6564 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
6565 {
6566 
6567 	PMAP_ASSERT_STAGE1(pmap);
6568 	KASSERT(ADDR_IS_CANONICAL(va),
6569 	    ("%s: Address not in canonical form: %lx", __func__, va));
6570 
6571 	if (ADDR_IS_KERNEL(va)) {
6572 		cpu_icache_sync_range(va, sz);
6573 	} else {
6574 		u_int len, offset;
6575 		vm_paddr_t pa;
6576 
6577 		/* Find the length of data in this page to flush */
6578 		offset = va & PAGE_MASK;
6579 		len = imin(PAGE_SIZE - offset, sz);
6580 
6581 		while (sz != 0) {
6582 			/* Extract the physical address & find it in the DMAP */
6583 			pa = pmap_extract(pmap, va);
6584 			if (pa != 0)
6585 				cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
6586 
6587 			/* Move to the next page */
6588 			sz -= len;
6589 			va += len;
6590 			/* Set the length for the next iteration */
6591 			len = imin(PAGE_SIZE, sz);
6592 		}
6593 	}
6594 }
6595 
6596 static int
6597 pmap_stage2_fault(pmap_t pmap, uint64_t esr, uint64_t far)
6598 {
6599 	pd_entry_t *pdep;
6600 	pt_entry_t *ptep, pte;
6601 	int rv, lvl, dfsc;
6602 
6603 	PMAP_ASSERT_STAGE2(pmap);
6604 	rv = KERN_FAILURE;
6605 
6606 	/* Data and insn aborts use same encoding for FSC field. */
6607 	dfsc = esr & ISS_DATA_DFSC_MASK;
6608 	switch (dfsc) {
6609 	case ISS_DATA_DFSC_TF_L0:
6610 	case ISS_DATA_DFSC_TF_L1:
6611 	case ISS_DATA_DFSC_TF_L2:
6612 	case ISS_DATA_DFSC_TF_L3:
6613 		PMAP_LOCK(pmap);
6614 		pdep = pmap_pde(pmap, far, &lvl);
6615 		if (pdep == NULL || lvl != (dfsc - ISS_DATA_DFSC_TF_L1)) {
6616 			PMAP_LOCK(pmap);
6617 			break;
6618 		}
6619 
6620 		switch (lvl) {
6621 		case 0:
6622 			ptep = pmap_l0_to_l1(pdep, far);
6623 			break;
6624 		case 1:
6625 			ptep = pmap_l1_to_l2(pdep, far);
6626 			break;
6627 		case 2:
6628 			ptep = pmap_l2_to_l3(pdep, far);
6629 			break;
6630 		default:
6631 			panic("%s: Invalid pde level %d", __func__,lvl);
6632 		}
6633 		goto fault_exec;
6634 
6635 	case ISS_DATA_DFSC_AFF_L1:
6636 	case ISS_DATA_DFSC_AFF_L2:
6637 	case ISS_DATA_DFSC_AFF_L3:
6638 		PMAP_LOCK(pmap);
6639 		ptep = pmap_pte(pmap, far, &lvl);
6640 fault_exec:
6641 		if (ptep != NULL && (pte = pmap_load(ptep)) != 0) {
6642 			if (icache_vmid) {
6643 				pmap_invalidate_vpipt_icache();
6644 			} else {
6645 				/*
6646 				 * If accessing an executable page invalidate
6647 				 * the I-cache so it will be valid when we
6648 				 * continue execution in the guest. The D-cache
6649 				 * is assumed to already be clean to the Point
6650 				 * of Coherency.
6651 				 */
6652 				if ((pte & ATTR_S2_XN_MASK) !=
6653 				    ATTR_S2_XN(ATTR_S2_XN_NONE)) {
6654 					invalidate_icache();
6655 				}
6656 			}
6657 			pmap_set_bits(ptep, ATTR_AF | ATTR_DESCR_VALID);
6658 			rv = KERN_SUCCESS;
6659 		}
6660 		PMAP_UNLOCK(pmap);
6661 		break;
6662 	}
6663 
6664 	return (rv);
6665 }
6666 
6667 int
6668 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
6669 {
6670 	pt_entry_t pte, *ptep;
6671 	register_t intr;
6672 	uint64_t ec, par;
6673 	int lvl, rv;
6674 
6675 	rv = KERN_FAILURE;
6676 
6677 	ec = ESR_ELx_EXCEPTION(esr);
6678 	switch (ec) {
6679 	case EXCP_INSN_ABORT_L:
6680 	case EXCP_INSN_ABORT:
6681 	case EXCP_DATA_ABORT_L:
6682 	case EXCP_DATA_ABORT:
6683 		break;
6684 	default:
6685 		return (rv);
6686 	}
6687 
6688 	if (pmap->pm_stage == PM_STAGE2)
6689 		return (pmap_stage2_fault(pmap, esr, far));
6690 
6691 	/* Data and insn aborts use same encoding for FSC field. */
6692 	switch (esr & ISS_DATA_DFSC_MASK) {
6693 	case ISS_DATA_DFSC_AFF_L1:
6694 	case ISS_DATA_DFSC_AFF_L2:
6695 	case ISS_DATA_DFSC_AFF_L3:
6696 		PMAP_LOCK(pmap);
6697 		ptep = pmap_pte(pmap, far, &lvl);
6698 		if (ptep != NULL) {
6699 			pmap_set_bits(ptep, ATTR_AF);
6700 			rv = KERN_SUCCESS;
6701 			/*
6702 			 * XXXMJ as an optimization we could mark the entry
6703 			 * dirty if this is a write fault.
6704 			 */
6705 		}
6706 		PMAP_UNLOCK(pmap);
6707 		break;
6708 	case ISS_DATA_DFSC_PF_L1:
6709 	case ISS_DATA_DFSC_PF_L2:
6710 	case ISS_DATA_DFSC_PF_L3:
6711 		if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) ||
6712 		    (esr & ISS_DATA_WnR) == 0)
6713 			return (rv);
6714 		PMAP_LOCK(pmap);
6715 		ptep = pmap_pte(pmap, far, &lvl);
6716 		if (ptep != NULL &&
6717 		    ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
6718 			if ((pte & ATTR_S1_AP_RW_BIT) ==
6719 			    ATTR_S1_AP(ATTR_S1_AP_RO)) {
6720 				pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT);
6721 				pmap_invalidate_page(pmap, far);
6722 			}
6723 			rv = KERN_SUCCESS;
6724 		}
6725 		PMAP_UNLOCK(pmap);
6726 		break;
6727 	case ISS_DATA_DFSC_TF_L0:
6728 	case ISS_DATA_DFSC_TF_L1:
6729 	case ISS_DATA_DFSC_TF_L2:
6730 	case ISS_DATA_DFSC_TF_L3:
6731 		/*
6732 		 * Retry the translation.  A break-before-make sequence can
6733 		 * produce a transient fault.
6734 		 */
6735 		if (pmap == kernel_pmap) {
6736 			/*
6737 			 * The translation fault may have occurred within a
6738 			 * critical section.  Therefore, we must check the
6739 			 * address without acquiring the kernel pmap's lock.
6740 			 */
6741 			if (pmap_klookup(far, NULL))
6742 				rv = KERN_SUCCESS;
6743 		} else {
6744 			PMAP_LOCK(pmap);
6745 			/* Ask the MMU to check the address. */
6746 			intr = intr_disable();
6747 			par = arm64_address_translate_s1e0r(far);
6748 			intr_restore(intr);
6749 			PMAP_UNLOCK(pmap);
6750 
6751 			/*
6752 			 * If the translation was successful, then we can
6753 			 * return success to the trap handler.
6754 			 */
6755 			if (PAR_SUCCESS(par))
6756 				rv = KERN_SUCCESS;
6757 		}
6758 		break;
6759 	}
6760 
6761 	return (rv);
6762 }
6763 
6764 /*
6765  *	Increase the starting virtual address of the given mapping if a
6766  *	different alignment might result in more superpage mappings.
6767  */
6768 void
6769 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
6770     vm_offset_t *addr, vm_size_t size)
6771 {
6772 	vm_offset_t superpage_offset;
6773 
6774 	if (size < L2_SIZE)
6775 		return;
6776 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
6777 		offset += ptoa(object->pg_color);
6778 	superpage_offset = offset & L2_OFFSET;
6779 	if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
6780 	    (*addr & L2_OFFSET) == superpage_offset)
6781 		return;
6782 	if ((*addr & L2_OFFSET) < superpage_offset)
6783 		*addr = (*addr & ~L2_OFFSET) + superpage_offset;
6784 	else
6785 		*addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
6786 }
6787 
6788 /**
6789  * Get the kernel virtual address of a set of physical pages. If there are
6790  * physical addresses not covered by the DMAP perform a transient mapping
6791  * that will be removed when calling pmap_unmap_io_transient.
6792  *
6793  * \param page        The pages the caller wishes to obtain the virtual
6794  *                    address on the kernel memory map.
6795  * \param vaddr       On return contains the kernel virtual memory address
6796  *                    of the pages passed in the page parameter.
6797  * \param count       Number of pages passed in.
6798  * \param can_fault   TRUE if the thread using the mapped pages can take
6799  *                    page faults, FALSE otherwise.
6800  *
6801  * \returns TRUE if the caller must call pmap_unmap_io_transient when
6802  *          finished or FALSE otherwise.
6803  *
6804  */
6805 boolean_t
6806 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
6807     boolean_t can_fault)
6808 {
6809 	vm_paddr_t paddr;
6810 	boolean_t needs_mapping;
6811 	int error, i;
6812 
6813 	/*
6814 	 * Allocate any KVA space that we need, this is done in a separate
6815 	 * loop to prevent calling vmem_alloc while pinned.
6816 	 */
6817 	needs_mapping = FALSE;
6818 	for (i = 0; i < count; i++) {
6819 		paddr = VM_PAGE_TO_PHYS(page[i]);
6820 		if (__predict_false(!PHYS_IN_DMAP(paddr))) {
6821 			error = vmem_alloc(kernel_arena, PAGE_SIZE,
6822 			    M_BESTFIT | M_WAITOK, &vaddr[i]);
6823 			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
6824 			needs_mapping = TRUE;
6825 		} else {
6826 			vaddr[i] = PHYS_TO_DMAP(paddr);
6827 		}
6828 	}
6829 
6830 	/* Exit early if everything is covered by the DMAP */
6831 	if (!needs_mapping)
6832 		return (FALSE);
6833 
6834 	if (!can_fault)
6835 		sched_pin();
6836 	for (i = 0; i < count; i++) {
6837 		paddr = VM_PAGE_TO_PHYS(page[i]);
6838 		if (!PHYS_IN_DMAP(paddr)) {
6839 			panic(
6840 			   "pmap_map_io_transient: TODO: Map out of DMAP data");
6841 		}
6842 	}
6843 
6844 	return (needs_mapping);
6845 }
6846 
6847 void
6848 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
6849     boolean_t can_fault)
6850 {
6851 	vm_paddr_t paddr;
6852 	int i;
6853 
6854 	if (!can_fault)
6855 		sched_unpin();
6856 	for (i = 0; i < count; i++) {
6857 		paddr = VM_PAGE_TO_PHYS(page[i]);
6858 		if (!PHYS_IN_DMAP(paddr)) {
6859 			panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
6860 		}
6861 	}
6862 }
6863 
6864 boolean_t
6865 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
6866 {
6867 
6868 	return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
6869 }
6870 
6871 /*
6872  * Track a range of the kernel's virtual address space that is contiguous
6873  * in various mapping attributes.
6874  */
6875 struct pmap_kernel_map_range {
6876 	vm_offset_t sva;
6877 	pt_entry_t attrs;
6878 	int l3pages;
6879 	int l3contig;
6880 	int l2blocks;
6881 	int l1blocks;
6882 };
6883 
6884 static void
6885 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
6886     vm_offset_t eva)
6887 {
6888 	const char *mode;
6889 	int index;
6890 
6891 	if (eva <= range->sva)
6892 		return;
6893 
6894 	index = range->attrs & ATTR_S1_IDX_MASK;
6895 	switch (index) {
6896 	case ATTR_S1_IDX(VM_MEMATTR_DEVICE):
6897 		mode = "DEV";
6898 		break;
6899 	case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE):
6900 		mode = "UC";
6901 		break;
6902 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK):
6903 		mode = "WB";
6904 		break;
6905 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH):
6906 		mode = "WT";
6907 		break;
6908 	default:
6909 		printf(
6910 		    "%s: unknown memory type %x for range 0x%016lx-0x%016lx\n",
6911 		    __func__, index, range->sva, eva);
6912 		mode = "??";
6913 		break;
6914 	}
6915 
6916 	sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c %3s %d %d %d %d\n",
6917 	    range->sva, eva,
6918 	    (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
6919 	    (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
6920 	    (range->attrs & ATTR_S1_AP_USER) != 0 ? 'u' : 's',
6921 	    mode, range->l1blocks, range->l2blocks, range->l3contig,
6922 	    range->l3pages);
6923 
6924 	/* Reset to sentinel value. */
6925 	range->sva = 0xfffffffffffffffful;
6926 }
6927 
6928 /*
6929  * Determine whether the attributes specified by a page table entry match those
6930  * being tracked by the current range.
6931  */
6932 static bool
6933 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
6934 {
6935 
6936 	return (range->attrs == attrs);
6937 }
6938 
6939 static void
6940 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
6941     pt_entry_t attrs)
6942 {
6943 
6944 	memset(range, 0, sizeof(*range));
6945 	range->sva = va;
6946 	range->attrs = attrs;
6947 }
6948 
6949 /*
6950  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
6951  * those of the current run, dump the address range and its attributes, and
6952  * begin a new run.
6953  */
6954 static void
6955 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
6956     vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e,
6957     pt_entry_t l3e)
6958 {
6959 	pt_entry_t attrs;
6960 
6961 	attrs = l0e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6962 	attrs |= l1e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6963 	if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK)
6964 		attrs |= l1e & ATTR_S1_IDX_MASK;
6965 	attrs |= l2e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6966 	if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK)
6967 		attrs |= l2e & ATTR_S1_IDX_MASK;
6968 	attrs |= l3e & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK);
6969 
6970 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
6971 		sysctl_kmaps_dump(sb, range, va);
6972 		sysctl_kmaps_reinit(range, va, attrs);
6973 	}
6974 }
6975 
6976 static int
6977 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
6978 {
6979 	struct pmap_kernel_map_range range;
6980 	struct sbuf sbuf, *sb;
6981 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
6982 	pt_entry_t *l3, l3e;
6983 	vm_offset_t sva;
6984 	vm_paddr_t pa;
6985 	int error, i, j, k, l;
6986 
6987 	error = sysctl_wire_old_buffer(req, 0);
6988 	if (error != 0)
6989 		return (error);
6990 	sb = &sbuf;
6991 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
6992 
6993 	/* Sentinel value. */
6994 	range.sva = 0xfffffffffffffffful;
6995 
6996 	/*
6997 	 * Iterate over the kernel page tables without holding the kernel pmap
6998 	 * lock.  Kernel page table pages are never freed, so at worst we will
6999 	 * observe inconsistencies in the output.
7000 	 */
7001 	for (sva = 0xffff000000000000ul, i = pmap_l0_index(sva); i < Ln_ENTRIES;
7002 	    i++) {
7003 		if (i == pmap_l0_index(DMAP_MIN_ADDRESS))
7004 			sbuf_printf(sb, "\nDirect map:\n");
7005 		else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
7006 			sbuf_printf(sb, "\nKernel map:\n");
7007 
7008 		l0e = kernel_pmap->pm_l0[i];
7009 		if ((l0e & ATTR_DESCR_VALID) == 0) {
7010 			sysctl_kmaps_dump(sb, &range, sva);
7011 			sva += L0_SIZE;
7012 			continue;
7013 		}
7014 		pa = l0e & ~ATTR_MASK;
7015 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa);
7016 
7017 		for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) {
7018 			l1e = l1[j];
7019 			if ((l1e & ATTR_DESCR_VALID) == 0) {
7020 				sysctl_kmaps_dump(sb, &range, sva);
7021 				sva += L1_SIZE;
7022 				continue;
7023 			}
7024 			if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) {
7025 				sysctl_kmaps_check(sb, &range, sva, l0e, l1e,
7026 				    0, 0);
7027 				range.l1blocks++;
7028 				sva += L1_SIZE;
7029 				continue;
7030 			}
7031 			pa = l1e & ~ATTR_MASK;
7032 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa);
7033 
7034 			for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) {
7035 				l2e = l2[k];
7036 				if ((l2e & ATTR_DESCR_VALID) == 0) {
7037 					sysctl_kmaps_dump(sb, &range, sva);
7038 					sva += L2_SIZE;
7039 					continue;
7040 				}
7041 				if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
7042 					sysctl_kmaps_check(sb, &range, sva,
7043 					    l0e, l1e, l2e, 0);
7044 					range.l2blocks++;
7045 					sva += L2_SIZE;
7046 					continue;
7047 				}
7048 				pa = l2e & ~ATTR_MASK;
7049 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
7050 
7051 				for (l = pmap_l3_index(sva); l < Ln_ENTRIES;
7052 				    l++, sva += L3_SIZE) {
7053 					l3e = l3[l];
7054 					if ((l3e & ATTR_DESCR_VALID) == 0) {
7055 						sysctl_kmaps_dump(sb, &range,
7056 						    sva);
7057 						continue;
7058 					}
7059 					sysctl_kmaps_check(sb, &range, sva,
7060 					    l0e, l1e, l2e, l3e);
7061 					if ((l3e & ATTR_CONTIGUOUS) != 0)
7062 						range.l3contig += l % 16 == 0 ?
7063 						    1 : 0;
7064 					else
7065 						range.l3pages++;
7066 				}
7067 			}
7068 		}
7069 	}
7070 
7071 	error = sbuf_finish(sb);
7072 	sbuf_delete(sb);
7073 	return (error);
7074 }
7075 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
7076     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
7077     NULL, 0, sysctl_kmaps, "A",
7078     "Dump kernel address layout");
7079