xref: /freebsd/sys/arm64/arm64/pmap.c (revision 6419bb52)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2003 Peter Wemm
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  * Copyright (c) 2014 Andrew Turner
13  * All rights reserved.
14  * Copyright (c) 2014-2016 The FreeBSD Foundation
15  * All rights reserved.
16  *
17  * This code is derived from software contributed to Berkeley by
18  * the Systems Programming Group of the University of Utah Computer
19  * Science Department and William Jolitz of UUNET Technologies Inc.
20  *
21  * This software was developed by Andrew Turner under sponsorship from
22  * the FreeBSD Foundation.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 3. All advertising materials mentioning features or use of this software
33  *    must display the following acknowledgement:
34  *	This product includes software developed by the University of
35  *	California, Berkeley and its contributors.
36  * 4. Neither the name of the University nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  *
52  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
53  */
54 /*-
55  * Copyright (c) 2003 Networks Associates Technology, Inc.
56  * All rights reserved.
57  *
58  * This software was developed for the FreeBSD Project by Jake Burkholder,
59  * Safeport Network Services, and Network Associates Laboratories, the
60  * Security Research Division of Network Associates, Inc. under
61  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
62  * CHATS research program.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83  * SUCH DAMAGE.
84  */
85 
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
88 
89 /*
90  *	Manages physical address maps.
91  *
92  *	Since the information managed by this module is
93  *	also stored by the logical address mapping module,
94  *	this module may throw away valid virtual-to-physical
95  *	mappings at almost any time.  However, invalidations
96  *	of virtual-to-physical mappings must be done as
97  *	requested.
98  *
99  *	In order to cope with hardware architectures which
100  *	make virtual-to-physical map invalidates expensive,
101  *	this module may delay invalidate or reduced protection
102  *	operations until such time as they are actually
103  *	necessary.  This module is given full information as
104  *	to which processors are currently using which maps,
105  *	and to when physical maps must be made correct.
106  */
107 
108 #include "opt_vm.h"
109 
110 #include <sys/param.h>
111 #include <sys/bitstring.h>
112 #include <sys/bus.h>
113 #include <sys/systm.h>
114 #include <sys/kernel.h>
115 #include <sys/ktr.h>
116 #include <sys/limits.h>
117 #include <sys/lock.h>
118 #include <sys/malloc.h>
119 #include <sys/mman.h>
120 #include <sys/msgbuf.h>
121 #include <sys/mutex.h>
122 #include <sys/physmem.h>
123 #include <sys/proc.h>
124 #include <sys/rwlock.h>
125 #include <sys/sbuf.h>
126 #include <sys/sx.h>
127 #include <sys/vmem.h>
128 #include <sys/vmmeter.h>
129 #include <sys/sched.h>
130 #include <sys/sysctl.h>
131 #include <sys/_unrhdr.h>
132 #include <sys/smp.h>
133 
134 #include <vm/vm.h>
135 #include <vm/vm_param.h>
136 #include <vm/vm_kern.h>
137 #include <vm/vm_page.h>
138 #include <vm/vm_map.h>
139 #include <vm/vm_object.h>
140 #include <vm/vm_extern.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_pager.h>
143 #include <vm/vm_phys.h>
144 #include <vm/vm_radix.h>
145 #include <vm/vm_reserv.h>
146 #include <vm/uma.h>
147 
148 #include <machine/machdep.h>
149 #include <machine/md_var.h>
150 #include <machine/pcb.h>
151 
152 #define	PMAP_ASSERT_STAGE1(pmap)	MPASS((pmap)->pm_stage == PM_STAGE1)
153 #define	PMAP_ASSERT_STAGE2(pmap)	MPASS((pmap)->pm_stage == PM_STAGE2)
154 
155 #define	NL0PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
156 #define	NL1PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
157 #define	NL2PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
158 #define	NL3PG		(PAGE_SIZE/(sizeof (pt_entry_t)))
159 
160 #define	NUL0E		L0_ENTRIES
161 #define	NUL1E		(NUL0E * NL1PG)
162 #define	NUL2E		(NUL1E * NL2PG)
163 
164 #if !defined(DIAGNOSTIC)
165 #ifdef __GNUC_GNU_INLINE__
166 #define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
167 #else
168 #define PMAP_INLINE	extern inline
169 #endif
170 #else
171 #define PMAP_INLINE
172 #endif
173 
174 #ifdef PV_STATS
175 #define PV_STAT(x)	do { x ; } while (0)
176 #else
177 #define PV_STAT(x)	do { } while (0)
178 #endif
179 
180 #define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
181 #define	pa_to_pvh(pa)		(&pv_table[pmap_l2_pindex(pa)])
182 
183 #define	NPV_LIST_LOCKS	MAXCPU
184 
185 #define	PHYS_TO_PV_LIST_LOCK(pa)	\
186 			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
187 
188 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
189 	struct rwlock **_lockp = (lockp);		\
190 	struct rwlock *_new_lock;			\
191 							\
192 	_new_lock = PHYS_TO_PV_LIST_LOCK(pa);		\
193 	if (_new_lock != *_lockp) {			\
194 		if (*_lockp != NULL)			\
195 			rw_wunlock(*_lockp);		\
196 		*_lockp = _new_lock;			\
197 		rw_wlock(*_lockp);			\
198 	}						\
199 } while (0)
200 
201 #define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
202 			CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
203 
204 #define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
205 	struct rwlock **_lockp = (lockp);		\
206 							\
207 	if (*_lockp != NULL) {				\
208 		rw_wunlock(*_lockp);			\
209 		*_lockp = NULL;				\
210 	}						\
211 } while (0)
212 
213 #define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
214 			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
215 
216 /*
217  * The presence of this flag indicates that the mapping is writeable.
218  * If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise
219  * it is dirty.  This flag may only be set on managed mappings.
220  *
221  * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it
222  * as a software managed bit.
223  */
224 #define	ATTR_SW_DBM	ATTR_DBM
225 
226 struct pmap kernel_pmap_store;
227 
228 /* Used for mapping ACPI memory before VM is initialized */
229 #define	PMAP_PREINIT_MAPPING_COUNT	32
230 #define	PMAP_PREINIT_MAPPING_SIZE	(PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
231 static vm_offset_t preinit_map_va;	/* Start VA of pre-init mapping space */
232 static int vm_initialized = 0;		/* No need to use pre-init maps when set */
233 
234 /*
235  * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
236  * Always map entire L2 block for simplicity.
237  * VA of L2 block = preinit_map_va + i * L2_SIZE
238  */
239 static struct pmap_preinit_mapping {
240 	vm_paddr_t	pa;
241 	vm_offset_t	va;
242 	vm_size_t	size;
243 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
244 
245 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
246 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
247 vm_offset_t kernel_vm_end = 0;
248 
249 /*
250  * Data for the pv entry allocation mechanism.
251  */
252 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
253 static struct mtx pv_chunks_mutex;
254 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
255 static struct md_page *pv_table;
256 static struct md_page pv_dummy;
257 
258 vm_paddr_t dmap_phys_base;	/* The start of the dmap region */
259 vm_paddr_t dmap_phys_max;	/* The limit of the dmap region */
260 vm_offset_t dmap_max_addr;	/* The virtual address limit of the dmap */
261 
262 /* This code assumes all L1 DMAP entries will be used */
263 CTASSERT((DMAP_MIN_ADDRESS  & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
264 CTASSERT((DMAP_MAX_ADDRESS  & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
265 
266 #define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
267 extern pt_entry_t pagetable_dmap[];
268 
269 #define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
270 static vm_paddr_t physmap[PHYSMAP_SIZE];
271 static u_int physmap_idx;
272 
273 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
274     "VM/pmap parameters");
275 
276 /*
277  * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs
278  * that it has currently allocated to a pmap, a cursor ("asid_next") to
279  * optimize its search for a free ASID in the bit vector, and an epoch number
280  * ("asid_epoch") to indicate when it has reclaimed all previously allocated
281  * ASIDs that are not currently active on a processor.
282  *
283  * The current epoch number is always in the range [0, INT_MAX).  Negative
284  * numbers and INT_MAX are reserved for special cases that are described
285  * below.
286  */
287 struct asid_set {
288 	int asid_bits;
289 	bitstr_t *asid_set;
290 	int asid_set_size;
291 	int asid_next;
292 	int asid_epoch;
293 	struct mtx asid_set_mutex;
294 };
295 
296 static struct asid_set asids;
297 static struct asid_set vmids;
298 
299 static SYSCTL_NODE(_vm_pmap, OID_AUTO, asid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
300     "ASID allocator");
301 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, bits, CTLFLAG_RD, &asids.asid_bits, 0,
302     "The number of bits in an ASID");
303 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, next, CTLFLAG_RD, &asids.asid_next, 0,
304     "The last allocated ASID plus one");
305 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, epoch, CTLFLAG_RD, &asids.asid_epoch, 0,
306     "The current epoch number");
307 
308 static SYSCTL_NODE(_vm_pmap, OID_AUTO, vmid, CTLFLAG_RD, 0, "VMID allocator");
309 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, bits, CTLFLAG_RD, &vmids.asid_bits, 0,
310     "The number of bits in an VMID");
311 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, next, CTLFLAG_RD, &vmids.asid_next, 0,
312     "The last allocated VMID plus one");
313 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, epoch, CTLFLAG_RD, &vmids.asid_epoch, 0,
314     "The current epoch number");
315 
316 void (*pmap_clean_stage2_tlbi)(void);
317 void (*pmap_invalidate_vpipt_icache)(void);
318 
319 /*
320  * A pmap's cookie encodes an ASID and epoch number.  Cookies for reserved
321  * ASIDs have a negative epoch number, specifically, INT_MIN.  Cookies for
322  * dynamically allocated ASIDs have a non-negative epoch number.
323  *
324  * An invalid ASID is represented by -1.
325  *
326  * There are two special-case cookie values: (1) COOKIE_FROM(-1, INT_MIN),
327  * which indicates that an ASID should never be allocated to the pmap, and
328  * (2) COOKIE_FROM(-1, INT_MAX), which indicates that an ASID should be
329  * allocated when the pmap is next activated.
330  */
331 #define	COOKIE_FROM(asid, epoch)	((long)((u_int)(asid) |	\
332 					    ((u_long)(epoch) << 32)))
333 #define	COOKIE_TO_ASID(cookie)		((int)(cookie))
334 #define	COOKIE_TO_EPOCH(cookie)		((int)((u_long)(cookie) >> 32))
335 
336 static int superpages_enabled = 1;
337 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
338     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
339     "Are large page mappings enabled?");
340 
341 /*
342  * Internal flags for pmap_enter()'s helper functions.
343  */
344 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
345 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
346 
347 static void	free_pv_chunk(struct pv_chunk *pc);
348 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
349 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
350 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
351 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
352 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
353 		    vm_offset_t va);
354 
355 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
356 static bool pmap_activate_int(pmap_t pmap);
357 static void pmap_alloc_asid(pmap_t pmap);
358 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
359 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
360 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
361     vm_offset_t va, struct rwlock **lockp);
362 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
363 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
364     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
365 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
366     u_int flags, vm_page_t m, struct rwlock **lockp);
367 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
368     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
369 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
370     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
371 static void pmap_reset_asid_set(pmap_t pmap);
372 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
373     vm_page_t m, struct rwlock **lockp);
374 
375 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
376 		struct rwlock **lockp);
377 
378 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
379     struct spglist *free);
380 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
381 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
382 
383 /*
384  * These load the old table data and store the new value.
385  * They need to be atomic as the System MMU may write to the table at
386  * the same time as the CPU.
387  */
388 #define	pmap_clear(table)		atomic_store_64(table, 0)
389 #define	pmap_clear_bits(table, bits)	atomic_clear_64(table, bits)
390 #define	pmap_load(table)		(*table)
391 #define	pmap_load_clear(table)		atomic_swap_64(table, 0)
392 #define	pmap_load_store(table, entry)	atomic_swap_64(table, entry)
393 #define	pmap_set_bits(table, bits)	atomic_set_64(table, bits)
394 #define	pmap_store(table, entry)	atomic_store_64(table, entry)
395 
396 /********************/
397 /* Inline functions */
398 /********************/
399 
400 static __inline void
401 pagecopy(void *s, void *d)
402 {
403 
404 	memcpy(d, s, PAGE_SIZE);
405 }
406 
407 static __inline pd_entry_t *
408 pmap_l0(pmap_t pmap, vm_offset_t va)
409 {
410 
411 	return (&pmap->pm_l0[pmap_l0_index(va)]);
412 }
413 
414 static __inline pd_entry_t *
415 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
416 {
417 	pd_entry_t *l1;
418 
419 	l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
420 	return (&l1[pmap_l1_index(va)]);
421 }
422 
423 static __inline pd_entry_t *
424 pmap_l1(pmap_t pmap, vm_offset_t va)
425 {
426 	pd_entry_t *l0;
427 
428 	l0 = pmap_l0(pmap, va);
429 	if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
430 		return (NULL);
431 
432 	return (pmap_l0_to_l1(l0, va));
433 }
434 
435 static __inline pd_entry_t *
436 pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
437 {
438 	pd_entry_t *l2;
439 
440 	l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
441 	return (&l2[pmap_l2_index(va)]);
442 }
443 
444 static __inline pd_entry_t *
445 pmap_l2(pmap_t pmap, vm_offset_t va)
446 {
447 	pd_entry_t *l1;
448 
449 	l1 = pmap_l1(pmap, va);
450 	if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
451 		return (NULL);
452 
453 	return (pmap_l1_to_l2(l1, va));
454 }
455 
456 static __inline pt_entry_t *
457 pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
458 {
459 	pt_entry_t *l3;
460 
461 	l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
462 	return (&l3[pmap_l3_index(va)]);
463 }
464 
465 /*
466  * Returns the lowest valid pde for a given virtual address.
467  * The next level may or may not point to a valid page or block.
468  */
469 static __inline pd_entry_t *
470 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
471 {
472 	pd_entry_t *l0, *l1, *l2, desc;
473 
474 	l0 = pmap_l0(pmap, va);
475 	desc = pmap_load(l0) & ATTR_DESCR_MASK;
476 	if (desc != L0_TABLE) {
477 		*level = -1;
478 		return (NULL);
479 	}
480 
481 	l1 = pmap_l0_to_l1(l0, va);
482 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
483 	if (desc != L1_TABLE) {
484 		*level = 0;
485 		return (l0);
486 	}
487 
488 	l2 = pmap_l1_to_l2(l1, va);
489 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
490 	if (desc != L2_TABLE) {
491 		*level = 1;
492 		return (l1);
493 	}
494 
495 	*level = 2;
496 	return (l2);
497 }
498 
499 /*
500  * Returns the lowest valid pte block or table entry for a given virtual
501  * address. If there are no valid entries return NULL and set the level to
502  * the first invalid level.
503  */
504 static __inline pt_entry_t *
505 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
506 {
507 	pd_entry_t *l1, *l2, desc;
508 	pt_entry_t *l3;
509 
510 	l1 = pmap_l1(pmap, va);
511 	if (l1 == NULL) {
512 		*level = 0;
513 		return (NULL);
514 	}
515 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
516 	if (desc == L1_BLOCK) {
517 		*level = 1;
518 		return (l1);
519 	}
520 
521 	if (desc != L1_TABLE) {
522 		*level = 1;
523 		return (NULL);
524 	}
525 
526 	l2 = pmap_l1_to_l2(l1, va);
527 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
528 	if (desc == L2_BLOCK) {
529 		*level = 2;
530 		return (l2);
531 	}
532 
533 	if (desc != L2_TABLE) {
534 		*level = 2;
535 		return (NULL);
536 	}
537 
538 	*level = 3;
539 	l3 = pmap_l2_to_l3(l2, va);
540 	if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
541 		return (NULL);
542 
543 	return (l3);
544 }
545 
546 bool
547 pmap_ps_enabled(pmap_t pmap __unused)
548 {
549 
550 	return (superpages_enabled != 0);
551 }
552 
553 bool
554 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
555     pd_entry_t **l2, pt_entry_t **l3)
556 {
557 	pd_entry_t *l0p, *l1p, *l2p;
558 
559 	if (pmap->pm_l0 == NULL)
560 		return (false);
561 
562 	l0p = pmap_l0(pmap, va);
563 	*l0 = l0p;
564 
565 	if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
566 		return (false);
567 
568 	l1p = pmap_l0_to_l1(l0p, va);
569 	*l1 = l1p;
570 
571 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
572 		*l2 = NULL;
573 		*l3 = NULL;
574 		return (true);
575 	}
576 
577 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
578 		return (false);
579 
580 	l2p = pmap_l1_to_l2(l1p, va);
581 	*l2 = l2p;
582 
583 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
584 		*l3 = NULL;
585 		return (true);
586 	}
587 
588 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
589 		return (false);
590 
591 	*l3 = pmap_l2_to_l3(l2p, va);
592 
593 	return (true);
594 }
595 
596 static __inline int
597 pmap_l3_valid(pt_entry_t l3)
598 {
599 
600 	return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
601 }
602 
603 
604 CTASSERT(L1_BLOCK == L2_BLOCK);
605 
606 static pt_entry_t
607 pmap_pte_memattr(pmap_t pmap, vm_memattr_t memattr)
608 {
609 	pt_entry_t val;
610 
611 	if (pmap->pm_stage == PM_STAGE1) {
612 		val = ATTR_S1_IDX(memattr);
613 		if (memattr == VM_MEMATTR_DEVICE)
614 			val |= ATTR_S1_XN;
615 		return (val);
616 	}
617 
618 	val = 0;
619 
620 	switch (memattr) {
621 	case VM_MEMATTR_DEVICE:
622 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_DEVICE_nGnRnE) |
623 		    ATTR_S2_XN(ATTR_S2_XN_ALL));
624 	case VM_MEMATTR_UNCACHEABLE:
625 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_NC));
626 	case VM_MEMATTR_WRITE_BACK:
627 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WB));
628 	case VM_MEMATTR_WRITE_THROUGH:
629 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WT));
630 	default:
631 		panic("%s: invalid memory attribute %x", __func__, memattr);
632 	}
633 }
634 
635 static pt_entry_t
636 pmap_pte_prot(pmap_t pmap, vm_prot_t prot)
637 {
638 	pt_entry_t val;
639 
640 	val = 0;
641 	if (pmap->pm_stage == PM_STAGE1) {
642 		if ((prot & VM_PROT_EXECUTE) == 0)
643 			val |= ATTR_S1_XN;
644 		if ((prot & VM_PROT_WRITE) == 0)
645 			val |= ATTR_S1_AP(ATTR_S1_AP_RO);
646 	} else {
647 		if ((prot & VM_PROT_WRITE) != 0)
648 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
649 		if ((prot & VM_PROT_READ) != 0)
650 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
651 		if ((prot & VM_PROT_EXECUTE) == 0)
652 			val |= ATTR_S2_XN(ATTR_S2_XN_ALL);
653 	}
654 
655 	return (val);
656 }
657 
658 /*
659  * Checks if the PTE is dirty.
660  */
661 static inline int
662 pmap_pte_dirty(pmap_t pmap, pt_entry_t pte)
663 {
664 
665 	PMAP_ASSERT_STAGE1(pmap);
666 	KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
667 	KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
668 	    ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
669 
670 	return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
671 	    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM));
672 }
673 
674 static __inline void
675 pmap_resident_count_inc(pmap_t pmap, int count)
676 {
677 
678 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
679 	pmap->pm_stats.resident_count += count;
680 }
681 
682 static __inline void
683 pmap_resident_count_dec(pmap_t pmap, int count)
684 {
685 
686 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
687 	KASSERT(pmap->pm_stats.resident_count >= count,
688 	    ("pmap %p resident count underflow %ld %d", pmap,
689 	    pmap->pm_stats.resident_count, count));
690 	pmap->pm_stats.resident_count -= count;
691 }
692 
693 static pt_entry_t *
694 pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
695     u_int *l2_slot)
696 {
697 	pt_entry_t *l2;
698 	pd_entry_t *l1;
699 
700 	l1 = (pd_entry_t *)l1pt;
701 	*l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
702 
703 	/* Check locore has used a table L1 map */
704 	KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
705 	   ("Invalid bootstrap L1 table"));
706 	/* Find the address of the L2 table */
707 	l2 = (pt_entry_t *)init_pt_va;
708 	*l2_slot = pmap_l2_index(va);
709 
710 	return (l2);
711 }
712 
713 static vm_paddr_t
714 pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
715 {
716 	u_int l1_slot, l2_slot;
717 	pt_entry_t *l2;
718 
719 	l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
720 
721 	return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
722 }
723 
724 static vm_offset_t
725 pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
726     vm_offset_t freemempos)
727 {
728 	pt_entry_t *l2;
729 	vm_offset_t va;
730 	vm_paddr_t l2_pa, pa;
731 	u_int l1_slot, l2_slot, prev_l1_slot;
732 	int i;
733 
734 	dmap_phys_base = min_pa & ~L1_OFFSET;
735 	dmap_phys_max = 0;
736 	dmap_max_addr = 0;
737 	l2 = NULL;
738 	prev_l1_slot = -1;
739 
740 #define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
741 	memset(pagetable_dmap, 0, PAGE_SIZE * DMAP_TABLES);
742 
743 	for (i = 0; i < (physmap_idx * 2); i += 2) {
744 		pa = physmap[i] & ~L2_OFFSET;
745 		va = pa - dmap_phys_base + DMAP_MIN_ADDRESS;
746 
747 		/* Create L2 mappings at the start of the region */
748 		if ((pa & L1_OFFSET) != 0) {
749 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
750 			if (l1_slot != prev_l1_slot) {
751 				prev_l1_slot = l1_slot;
752 				l2 = (pt_entry_t *)freemempos;
753 				l2_pa = pmap_early_vtophys(kern_l1,
754 				    (vm_offset_t)l2);
755 				freemempos += PAGE_SIZE;
756 
757 				pmap_store(&pagetable_dmap[l1_slot],
758 				    (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
759 
760 				memset(l2, 0, PAGE_SIZE);
761 			}
762 			KASSERT(l2 != NULL,
763 			    ("pmap_bootstrap_dmap: NULL l2 map"));
764 			for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
765 			    pa += L2_SIZE, va += L2_SIZE) {
766 				/*
767 				 * We are on a boundary, stop to
768 				 * create a level 1 block
769 				 */
770 				if ((pa & L1_OFFSET) == 0)
771 					break;
772 
773 				l2_slot = pmap_l2_index(va);
774 				KASSERT(l2_slot != 0, ("..."));
775 				pmap_store(&l2[l2_slot],
776 				    (pa & ~L2_OFFSET) | ATTR_DEFAULT |
777 				    ATTR_S1_XN |
778 				    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
779 				    L2_BLOCK);
780 			}
781 			KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS),
782 			    ("..."));
783 		}
784 
785 		for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] &&
786 		    (physmap[i + 1] - pa) >= L1_SIZE;
787 		    pa += L1_SIZE, va += L1_SIZE) {
788 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
789 			pmap_store(&pagetable_dmap[l1_slot],
790 			    (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_S1_XN |
791 			    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK);
792 		}
793 
794 		/* Create L2 mappings at the end of the region */
795 		if (pa < physmap[i + 1]) {
796 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
797 			if (l1_slot != prev_l1_slot) {
798 				prev_l1_slot = l1_slot;
799 				l2 = (pt_entry_t *)freemempos;
800 				l2_pa = pmap_early_vtophys(kern_l1,
801 				    (vm_offset_t)l2);
802 				freemempos += PAGE_SIZE;
803 
804 				pmap_store(&pagetable_dmap[l1_slot],
805 				    (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
806 
807 				memset(l2, 0, PAGE_SIZE);
808 			}
809 			KASSERT(l2 != NULL,
810 			    ("pmap_bootstrap_dmap: NULL l2 map"));
811 			for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
812 			    pa += L2_SIZE, va += L2_SIZE) {
813 				l2_slot = pmap_l2_index(va);
814 				pmap_store(&l2[l2_slot],
815 				    (pa & ~L2_OFFSET) | ATTR_DEFAULT |
816 				    ATTR_S1_XN |
817 				    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
818 				    L2_BLOCK);
819 			}
820 		}
821 
822 		if (pa > dmap_phys_max) {
823 			dmap_phys_max = pa;
824 			dmap_max_addr = va;
825 		}
826 	}
827 
828 	cpu_tlb_flushID();
829 
830 	return (freemempos);
831 }
832 
833 static vm_offset_t
834 pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
835 {
836 	vm_offset_t l2pt;
837 	vm_paddr_t pa;
838 	pd_entry_t *l1;
839 	u_int l1_slot;
840 
841 	KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
842 
843 	l1 = (pd_entry_t *)l1pt;
844 	l1_slot = pmap_l1_index(va);
845 	l2pt = l2_start;
846 
847 	for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
848 		KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
849 
850 		pa = pmap_early_vtophys(l1pt, l2pt);
851 		pmap_store(&l1[l1_slot],
852 		    (pa & ~Ln_TABLE_MASK) | L1_TABLE);
853 		l2pt += PAGE_SIZE;
854 	}
855 
856 	/* Clean the L2 page table */
857 	memset((void *)l2_start, 0, l2pt - l2_start);
858 
859 	return l2pt;
860 }
861 
862 static vm_offset_t
863 pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
864 {
865 	vm_offset_t l3pt;
866 	vm_paddr_t pa;
867 	pd_entry_t *l2;
868 	u_int l2_slot;
869 
870 	KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
871 
872 	l2 = pmap_l2(kernel_pmap, va);
873 	l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
874 	l2_slot = pmap_l2_index(va);
875 	l3pt = l3_start;
876 
877 	for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
878 		KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
879 
880 		pa = pmap_early_vtophys(l1pt, l3pt);
881 		pmap_store(&l2[l2_slot],
882 		    (pa & ~Ln_TABLE_MASK) | ATTR_S1_UXN | L2_TABLE);
883 		l3pt += PAGE_SIZE;
884 	}
885 
886 	/* Clean the L2 page table */
887 	memset((void *)l3_start, 0, l3pt - l3_start);
888 
889 	return l3pt;
890 }
891 
892 /*
893  *	Bootstrap the system enough to run with virtual memory.
894  */
895 void
896 pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
897     vm_size_t kernlen)
898 {
899 	vm_offset_t freemempos;
900 	vm_offset_t dpcpu, msgbufpv;
901 	vm_paddr_t start_pa, pa, min_pa;
902 	uint64_t kern_delta;
903 	int i;
904 
905 	/* Verify that the ASID is set through TTBR0. */
906 	KASSERT((READ_SPECIALREG(tcr_el1) & TCR_A1) == 0,
907 	    ("pmap_bootstrap: TCR_EL1.A1 != 0"));
908 
909 	kern_delta = KERNBASE - kernstart;
910 
911 	printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
912 	printf("%lx\n", l1pt);
913 	printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
914 
915 	/* Set this early so we can use the pagetable walking functions */
916 	kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
917 	PMAP_LOCK_INIT(kernel_pmap);
918 	kernel_pmap->pm_l0_paddr = l0pt - kern_delta;
919 	kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
920 	kernel_pmap->pm_stage = PM_STAGE1;
921 	kernel_pmap->pm_asid_set = &asids;
922 
923 	/* Assume the address we were loaded to is a valid physical address */
924 	min_pa = KERNBASE - kern_delta;
925 
926 	physmap_idx = physmem_avail(physmap, nitems(physmap));
927 	physmap_idx /= 2;
928 
929 	/*
930 	 * Find the minimum physical address. physmap is sorted,
931 	 * but may contain empty ranges.
932 	 */
933 	for (i = 0; i < physmap_idx * 2; i += 2) {
934 		if (physmap[i] == physmap[i + 1])
935 			continue;
936 		if (physmap[i] <= min_pa)
937 			min_pa = physmap[i];
938 	}
939 
940 	freemempos = KERNBASE + kernlen;
941 	freemempos = roundup2(freemempos, PAGE_SIZE);
942 
943 	/* Create a direct map region early so we can use it for pa -> va */
944 	freemempos = pmap_bootstrap_dmap(l1pt, min_pa, freemempos);
945 
946 	start_pa = pa = KERNBASE - kern_delta;
947 
948 	/*
949 	 * Create the l2 tables up to VM_MAX_KERNEL_ADDRESS.  We assume that the
950 	 * loader allocated the first and only l2 page table page used to map
951 	 * the kernel, preloaded files and module metadata.
952 	 */
953 	freemempos = pmap_bootstrap_l2(l1pt, KERNBASE + L1_SIZE, freemempos);
954 	/* And the l3 tables for the early devmap */
955 	freemempos = pmap_bootstrap_l3(l1pt,
956 	    VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE), freemempos);
957 
958 	cpu_tlb_flushID();
959 
960 #define alloc_pages(var, np)						\
961 	(var) = freemempos;						\
962 	freemempos += (np * PAGE_SIZE);					\
963 	memset((char *)(var), 0, ((np) * PAGE_SIZE));
964 
965 	/* Allocate dynamic per-cpu area. */
966 	alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
967 	dpcpu_init((void *)dpcpu, 0);
968 
969 	/* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
970 	alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
971 	msgbufp = (void *)msgbufpv;
972 
973 	/* Reserve some VA space for early BIOS/ACPI mapping */
974 	preinit_map_va = roundup2(freemempos, L2_SIZE);
975 
976 	virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
977 	virtual_avail = roundup2(virtual_avail, L1_SIZE);
978 	virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
979 	kernel_vm_end = virtual_avail;
980 
981 	pa = pmap_early_vtophys(l1pt, freemempos);
982 
983 	physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
984 
985 	cpu_tlb_flushID();
986 }
987 
988 /*
989  *	Initialize a vm_page's machine-dependent fields.
990  */
991 void
992 pmap_page_init(vm_page_t m)
993 {
994 
995 	TAILQ_INIT(&m->md.pv_list);
996 	m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
997 }
998 
999 static void
1000 pmap_init_asids(struct asid_set *set, int bits)
1001 {
1002 	int i;
1003 
1004 	set->asid_bits = bits;
1005 
1006 	/*
1007 	 * We may be too early in the overall initialization process to use
1008 	 * bit_alloc().
1009 	 */
1010 	set->asid_set_size = 1 << set->asid_bits;
1011 	set->asid_set = (bitstr_t *)kmem_malloc(bitstr_size(set->asid_set_size),
1012 	    M_WAITOK | M_ZERO);
1013 	for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
1014 		bit_set(set->asid_set, i);
1015 	set->asid_next = ASID_FIRST_AVAILABLE;
1016 	mtx_init(&set->asid_set_mutex, "asid set", NULL, MTX_SPIN);
1017 }
1018 
1019 /*
1020  *	Initialize the pmap module.
1021  *	Called by vm_init, to initialize any structures that the pmap
1022  *	system needs to map virtual memory.
1023  */
1024 void
1025 pmap_init(void)
1026 {
1027 	vm_size_t s;
1028 	uint64_t mmfr1;
1029 	int i, pv_npg, vmid_bits;
1030 
1031 	/*
1032 	 * Are large page mappings enabled?
1033 	 */
1034 	TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
1035 	if (superpages_enabled) {
1036 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1037 		    ("pmap_init: can't assign to pagesizes[1]"));
1038 		pagesizes[1] = L2_SIZE;
1039 	}
1040 
1041 	/*
1042 	 * Initialize the ASID allocator.
1043 	 */
1044 	pmap_init_asids(&asids,
1045 	    (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8);
1046 
1047 	if (has_hyp()) {
1048 		mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1049 		vmid_bits = 8;
1050 
1051 		if (ID_AA64MMFR1_VMIDBits_VAL(mmfr1) ==
1052 		    ID_AA64MMFR1_VMIDBits_16)
1053 			vmid_bits = 16;
1054 		pmap_init_asids(&vmids, vmid_bits);
1055 	}
1056 
1057 	/*
1058 	 * Initialize the pv chunk list mutex.
1059 	 */
1060 	mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
1061 
1062 	/*
1063 	 * Initialize the pool of pv list locks.
1064 	 */
1065 	for (i = 0; i < NPV_LIST_LOCKS; i++)
1066 		rw_init(&pv_list_locks[i], "pmap pv list");
1067 
1068 	/*
1069 	 * Calculate the size of the pv head table for superpages.
1070 	 */
1071 	pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
1072 
1073 	/*
1074 	 * Allocate memory for the pv head table for superpages.
1075 	 */
1076 	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1077 	s = round_page(s);
1078 	pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
1079 	for (i = 0; i < pv_npg; i++)
1080 		TAILQ_INIT(&pv_table[i].pv_list);
1081 	TAILQ_INIT(&pv_dummy.pv_list);
1082 
1083 	vm_initialized = 1;
1084 }
1085 
1086 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1087     "2MB page mapping counters");
1088 
1089 static u_long pmap_l2_demotions;
1090 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
1091     &pmap_l2_demotions, 0, "2MB page demotions");
1092 
1093 static u_long pmap_l2_mappings;
1094 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
1095     &pmap_l2_mappings, 0, "2MB page mappings");
1096 
1097 static u_long pmap_l2_p_failures;
1098 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
1099     &pmap_l2_p_failures, 0, "2MB page promotion failures");
1100 
1101 static u_long pmap_l2_promotions;
1102 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
1103     &pmap_l2_promotions, 0, "2MB page promotions");
1104 
1105 /*
1106  * Invalidate a single TLB entry.
1107  */
1108 static __inline void
1109 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1110 {
1111 	uint64_t r;
1112 
1113 	PMAP_ASSERT_STAGE1(pmap);
1114 
1115 	dsb(ishst);
1116 	if (pmap == kernel_pmap) {
1117 		r = atop(va);
1118 		__asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1119 	} else {
1120 		r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va);
1121 		__asm __volatile("tlbi vae1is, %0" : : "r" (r));
1122 	}
1123 	dsb(ish);
1124 	isb();
1125 }
1126 
1127 static __inline void
1128 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1129 {
1130 	uint64_t end, r, start;
1131 
1132 	PMAP_ASSERT_STAGE1(pmap);
1133 
1134 	dsb(ishst);
1135 	if (pmap == kernel_pmap) {
1136 		start = atop(sva);
1137 		end = atop(eva);
1138 		for (r = start; r < end; r++)
1139 			__asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1140 	} else {
1141 		start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1142 		start |= atop(sva);
1143 		end |= atop(eva);
1144 		for (r = start; r < end; r++)
1145 			__asm __volatile("tlbi vae1is, %0" : : "r" (r));
1146 	}
1147 	dsb(ish);
1148 	isb();
1149 }
1150 
1151 static __inline void
1152 pmap_invalidate_all(pmap_t pmap)
1153 {
1154 	uint64_t r;
1155 
1156 	PMAP_ASSERT_STAGE1(pmap);
1157 
1158 	dsb(ishst);
1159 	if (pmap == kernel_pmap) {
1160 		__asm __volatile("tlbi vmalle1is");
1161 	} else {
1162 		r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1163 		__asm __volatile("tlbi aside1is, %0" : : "r" (r));
1164 	}
1165 	dsb(ish);
1166 	isb();
1167 }
1168 
1169 /*
1170  *	Routine:	pmap_extract
1171  *	Function:
1172  *		Extract the physical page address associated
1173  *		with the given map/virtual_address pair.
1174  */
1175 vm_paddr_t
1176 pmap_extract(pmap_t pmap, vm_offset_t va)
1177 {
1178 	pt_entry_t *pte, tpte;
1179 	vm_paddr_t pa;
1180 	int lvl;
1181 
1182 	pa = 0;
1183 	PMAP_LOCK(pmap);
1184 	/*
1185 	 * Find the block or page map for this virtual address. pmap_pte
1186 	 * will return either a valid block/page entry, or NULL.
1187 	 */
1188 	pte = pmap_pte(pmap, va, &lvl);
1189 	if (pte != NULL) {
1190 		tpte = pmap_load(pte);
1191 		pa = tpte & ~ATTR_MASK;
1192 		switch(lvl) {
1193 		case 1:
1194 			KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1195 			    ("pmap_extract: Invalid L1 pte found: %lx",
1196 			    tpte & ATTR_DESCR_MASK));
1197 			pa |= (va & L1_OFFSET);
1198 			break;
1199 		case 2:
1200 			KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1201 			    ("pmap_extract: Invalid L2 pte found: %lx",
1202 			    tpte & ATTR_DESCR_MASK));
1203 			pa |= (va & L2_OFFSET);
1204 			break;
1205 		case 3:
1206 			KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1207 			    ("pmap_extract: Invalid L3 pte found: %lx",
1208 			    tpte & ATTR_DESCR_MASK));
1209 			pa |= (va & L3_OFFSET);
1210 			break;
1211 		}
1212 	}
1213 	PMAP_UNLOCK(pmap);
1214 	return (pa);
1215 }
1216 
1217 /*
1218  *	Routine:	pmap_extract_and_hold
1219  *	Function:
1220  *		Atomically extract and hold the physical page
1221  *		with the given pmap and virtual address pair
1222  *		if that mapping permits the given protection.
1223  */
1224 vm_page_t
1225 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1226 {
1227 	pt_entry_t *pte, tpte;
1228 	vm_offset_t off;
1229 	vm_page_t m;
1230 	int lvl;
1231 
1232 	PMAP_ASSERT_STAGE1(pmap);
1233 
1234 	m = NULL;
1235 	PMAP_LOCK(pmap);
1236 	pte = pmap_pte(pmap, va, &lvl);
1237 	if (pte != NULL) {
1238 		tpte = pmap_load(pte);
1239 
1240 		KASSERT(lvl > 0 && lvl <= 3,
1241 		    ("pmap_extract_and_hold: Invalid level %d", lvl));
1242 		CTASSERT(L1_BLOCK == L2_BLOCK);
1243 		KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
1244 		    (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
1245 		    ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
1246 		     tpte & ATTR_DESCR_MASK));
1247 		if (((tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)) ||
1248 		    ((prot & VM_PROT_WRITE) == 0)) {
1249 			switch(lvl) {
1250 			case 1:
1251 				off = va & L1_OFFSET;
1252 				break;
1253 			case 2:
1254 				off = va & L2_OFFSET;
1255 				break;
1256 			case 3:
1257 			default:
1258 				off = 0;
1259 			}
1260 			m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
1261 			if (!vm_page_wire_mapped(m))
1262 				m = NULL;
1263 		}
1264 	}
1265 	PMAP_UNLOCK(pmap);
1266 	return (m);
1267 }
1268 
1269 vm_paddr_t
1270 pmap_kextract(vm_offset_t va)
1271 {
1272 	pt_entry_t *pte, tpte;
1273 
1274 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
1275 		return (DMAP_TO_PHYS(va));
1276 	pte = pmap_l1(kernel_pmap, va);
1277 	if (pte == NULL)
1278 		return (0);
1279 
1280 	/*
1281 	 * A concurrent pmap_update_entry() will clear the entry's valid bit
1282 	 * but leave the rest of the entry unchanged.  Therefore, we treat a
1283 	 * non-zero entry as being valid, and we ignore the valid bit when
1284 	 * determining whether the entry maps a block, page, or table.
1285 	 */
1286 	tpte = pmap_load(pte);
1287 	if (tpte == 0)
1288 		return (0);
1289 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
1290 		return ((tpte & ~ATTR_MASK) | (va & L1_OFFSET));
1291 	pte = pmap_l1_to_l2(&tpte, va);
1292 	tpte = pmap_load(pte);
1293 	if (tpte == 0)
1294 		return (0);
1295 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
1296 		return ((tpte & ~ATTR_MASK) | (va & L2_OFFSET));
1297 	pte = pmap_l2_to_l3(&tpte, va);
1298 	tpte = pmap_load(pte);
1299 	if (tpte == 0)
1300 		return (0);
1301 	return ((tpte & ~ATTR_MASK) | (va & L3_OFFSET));
1302 }
1303 
1304 /***************************************************
1305  * Low level mapping routines.....
1306  ***************************************************/
1307 
1308 void
1309 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1310 {
1311 	pd_entry_t *pde;
1312 	pt_entry_t *pte, attr;
1313 	vm_offset_t va;
1314 	int lvl;
1315 
1316 	KASSERT((pa & L3_OFFSET) == 0,
1317 	   ("pmap_kenter: Invalid physical address"));
1318 	KASSERT((sva & L3_OFFSET) == 0,
1319 	   ("pmap_kenter: Invalid virtual address"));
1320 	KASSERT((size & PAGE_MASK) == 0,
1321 	    ("pmap_kenter: Mapping is not page-sized"));
1322 
1323 	attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
1324 	    ATTR_S1_IDX(mode) | L3_PAGE;
1325 	va = sva;
1326 	while (size != 0) {
1327 		pde = pmap_pde(kernel_pmap, va, &lvl);
1328 		KASSERT(pde != NULL,
1329 		    ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
1330 		KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
1331 
1332 		pte = pmap_l2_to_l3(pde, va);
1333 		pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
1334 
1335 		va += PAGE_SIZE;
1336 		pa += PAGE_SIZE;
1337 		size -= PAGE_SIZE;
1338 	}
1339 	pmap_invalidate_range(kernel_pmap, sva, va);
1340 }
1341 
1342 void
1343 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
1344 {
1345 
1346 	pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE);
1347 }
1348 
1349 /*
1350  * Remove a page from the kernel pagetables.
1351  */
1352 PMAP_INLINE void
1353 pmap_kremove(vm_offset_t va)
1354 {
1355 	pt_entry_t *pte;
1356 	int lvl;
1357 
1358 	pte = pmap_pte(kernel_pmap, va, &lvl);
1359 	KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
1360 	KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
1361 
1362 	pmap_clear(pte);
1363 	pmap_invalidate_page(kernel_pmap, va);
1364 }
1365 
1366 void
1367 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
1368 {
1369 	pt_entry_t *pte;
1370 	vm_offset_t va;
1371 	int lvl;
1372 
1373 	KASSERT((sva & L3_OFFSET) == 0,
1374 	   ("pmap_kremove_device: Invalid virtual address"));
1375 	KASSERT((size & PAGE_MASK) == 0,
1376 	    ("pmap_kremove_device: Mapping is not page-sized"));
1377 
1378 	va = sva;
1379 	while (size != 0) {
1380 		pte = pmap_pte(kernel_pmap, va, &lvl);
1381 		KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
1382 		KASSERT(lvl == 3,
1383 		    ("Invalid device pagetable level: %d != 3", lvl));
1384 		pmap_clear(pte);
1385 
1386 		va += PAGE_SIZE;
1387 		size -= PAGE_SIZE;
1388 	}
1389 	pmap_invalidate_range(kernel_pmap, sva, va);
1390 }
1391 
1392 /*
1393  *	Used to map a range of physical addresses into kernel
1394  *	virtual address space.
1395  *
1396  *	The value passed in '*virt' is a suggested virtual address for
1397  *	the mapping. Architectures which can support a direct-mapped
1398  *	physical to virtual region can return the appropriate address
1399  *	within that region, leaving '*virt' unchanged. Other
1400  *	architectures should map the pages starting at '*virt' and
1401  *	update '*virt' with the first usable address after the mapped
1402  *	region.
1403  */
1404 vm_offset_t
1405 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1406 {
1407 	return PHYS_TO_DMAP(start);
1408 }
1409 
1410 
1411 /*
1412  * Add a list of wired pages to the kva
1413  * this routine is only used for temporary
1414  * kernel mappings that do not need to have
1415  * page modification or references recorded.
1416  * Note that old mappings are simply written
1417  * over.  The page *must* be wired.
1418  * Note: SMP coherent.  Uses a ranged shootdown IPI.
1419  */
1420 void
1421 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1422 {
1423 	pd_entry_t *pde;
1424 	pt_entry_t *pte, pa;
1425 	vm_offset_t va;
1426 	vm_page_t m;
1427 	int i, lvl;
1428 
1429 	va = sva;
1430 	for (i = 0; i < count; i++) {
1431 		pde = pmap_pde(kernel_pmap, va, &lvl);
1432 		KASSERT(pde != NULL,
1433 		    ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
1434 		KASSERT(lvl == 2,
1435 		    ("pmap_qenter: Invalid level %d", lvl));
1436 
1437 		m = ma[i];
1438 		pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
1439 		    ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
1440 		    ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
1441 		pte = pmap_l2_to_l3(pde, va);
1442 		pmap_load_store(pte, pa);
1443 
1444 		va += L3_SIZE;
1445 	}
1446 	pmap_invalidate_range(kernel_pmap, sva, va);
1447 }
1448 
1449 /*
1450  * This routine tears out page mappings from the
1451  * kernel -- it is meant only for temporary mappings.
1452  */
1453 void
1454 pmap_qremove(vm_offset_t sva, int count)
1455 {
1456 	pt_entry_t *pte;
1457 	vm_offset_t va;
1458 	int lvl;
1459 
1460 	KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1461 
1462 	va = sva;
1463 	while (count-- > 0) {
1464 		pte = pmap_pte(kernel_pmap, va, &lvl);
1465 		KASSERT(lvl == 3,
1466 		    ("Invalid device pagetable level: %d != 3", lvl));
1467 		if (pte != NULL) {
1468 			pmap_clear(pte);
1469 		}
1470 
1471 		va += PAGE_SIZE;
1472 	}
1473 	pmap_invalidate_range(kernel_pmap, sva, va);
1474 }
1475 
1476 /***************************************************
1477  * Page table page management routines.....
1478  ***************************************************/
1479 /*
1480  * Schedule the specified unused page table page to be freed.  Specifically,
1481  * add the page to the specified list of pages that will be released to the
1482  * physical memory manager after the TLB has been updated.
1483  */
1484 static __inline void
1485 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1486     boolean_t set_PG_ZERO)
1487 {
1488 
1489 	if (set_PG_ZERO)
1490 		m->flags |= PG_ZERO;
1491 	else
1492 		m->flags &= ~PG_ZERO;
1493 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1494 }
1495 
1496 /*
1497  * Decrements a page table page's reference count, which is used to record the
1498  * number of valid page table entries within the page.  If the reference count
1499  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1500  * page table page was unmapped and FALSE otherwise.
1501  */
1502 static inline boolean_t
1503 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1504 {
1505 
1506 	--m->ref_count;
1507 	if (m->ref_count == 0) {
1508 		_pmap_unwire_l3(pmap, va, m, free);
1509 		return (TRUE);
1510 	} else
1511 		return (FALSE);
1512 }
1513 
1514 static void
1515 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1516 {
1517 
1518 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1519 	/*
1520 	 * unmap the page table page
1521 	 */
1522 	if (m->pindex >= (NUL2E + NUL1E)) {
1523 		/* l1 page */
1524 		pd_entry_t *l0;
1525 
1526 		l0 = pmap_l0(pmap, va);
1527 		pmap_clear(l0);
1528 	} else if (m->pindex >= NUL2E) {
1529 		/* l2 page */
1530 		pd_entry_t *l1;
1531 
1532 		l1 = pmap_l1(pmap, va);
1533 		pmap_clear(l1);
1534 	} else {
1535 		/* l3 page */
1536 		pd_entry_t *l2;
1537 
1538 		l2 = pmap_l2(pmap, va);
1539 		pmap_clear(l2);
1540 	}
1541 	pmap_resident_count_dec(pmap, 1);
1542 	if (m->pindex < NUL2E) {
1543 		/* We just released an l3, unhold the matching l2 */
1544 		pd_entry_t *l1, tl1;
1545 		vm_page_t l2pg;
1546 
1547 		l1 = pmap_l1(pmap, va);
1548 		tl1 = pmap_load(l1);
1549 		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1550 		pmap_unwire_l3(pmap, va, l2pg, free);
1551 	} else if (m->pindex < (NUL2E + NUL1E)) {
1552 		/* We just released an l2, unhold the matching l1 */
1553 		pd_entry_t *l0, tl0;
1554 		vm_page_t l1pg;
1555 
1556 		l0 = pmap_l0(pmap, va);
1557 		tl0 = pmap_load(l0);
1558 		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1559 		pmap_unwire_l3(pmap, va, l1pg, free);
1560 	}
1561 	pmap_invalidate_page(pmap, va);
1562 
1563 	/*
1564 	 * Put page on a list so that it is released after
1565 	 * *ALL* TLB shootdown is done
1566 	 */
1567 	pmap_add_delayed_free_list(m, free, TRUE);
1568 }
1569 
1570 /*
1571  * After removing a page table entry, this routine is used to
1572  * conditionally free the page, and manage the reference count.
1573  */
1574 static int
1575 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1576     struct spglist *free)
1577 {
1578 	vm_page_t mpte;
1579 
1580 	if (va >= VM_MAXUSER_ADDRESS)
1581 		return (0);
1582 	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1583 	mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
1584 	return (pmap_unwire_l3(pmap, va, mpte, free));
1585 }
1586 
1587 /*
1588  * Release a page table page reference after a failed attempt to create a
1589  * mapping.
1590  */
1591 static void
1592 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1593 {
1594 	struct spglist free;
1595 
1596 	SLIST_INIT(&free);
1597 	if (pmap_unwire_l3(pmap, va, mpte, &free)) {
1598 		/*
1599 		 * Although "va" was never mapped, the TLB could nonetheless
1600 		 * have intermediate entries that refer to the freed page
1601 		 * table pages.  Invalidate those entries.
1602 		 *
1603 		 * XXX redundant invalidation (See _pmap_unwire_l3().)
1604 		 */
1605 		pmap_invalidate_page(pmap, va);
1606 		vm_page_free_pages_toq(&free, true);
1607 	}
1608 }
1609 
1610 void
1611 pmap_pinit0(pmap_t pmap)
1612 {
1613 
1614 	PMAP_LOCK_INIT(pmap);
1615 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1616 	pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1);
1617 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
1618 	pmap->pm_root.rt_root = 0;
1619 	pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
1620 	pmap->pm_stage = PM_STAGE1;
1621 	pmap->pm_asid_set = &asids;
1622 
1623 	PCPU_SET(curpmap, pmap);
1624 }
1625 
1626 int
1627 pmap_pinit_stage(pmap_t pmap, enum pmap_stage stage)
1628 {
1629 	vm_page_t l0pt;
1630 
1631 	/*
1632 	 * allocate the l0 page
1633 	 */
1634 	while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1635 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1636 		vm_wait(NULL);
1637 
1638 	pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(l0pt);
1639 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
1640 
1641 	if ((l0pt->flags & PG_ZERO) == 0)
1642 		pagezero(pmap->pm_l0);
1643 
1644 	pmap->pm_root.rt_root = 0;
1645 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1646 	pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
1647 
1648 	pmap->pm_stage = stage;
1649 	switch (stage) {
1650 	case PM_STAGE1:
1651 		pmap->pm_asid_set = &asids;
1652 		break;
1653 	case PM_STAGE2:
1654 		pmap->pm_asid_set = &vmids;
1655 		break;
1656 	default:
1657 		panic("%s: Invalid pmap type %d", __func__, stage);
1658 		break;
1659 	}
1660 
1661 	/* XXX Temporarily disable deferred ASID allocation. */
1662 	pmap_alloc_asid(pmap);
1663 
1664 	return (1);
1665 }
1666 
1667 int
1668 pmap_pinit(pmap_t pmap)
1669 {
1670 
1671 	return (pmap_pinit_stage(pmap, PM_STAGE1));
1672 }
1673 
1674 /*
1675  * This routine is called if the desired page table page does not exist.
1676  *
1677  * If page table page allocation fails, this routine may sleep before
1678  * returning NULL.  It sleeps only if a lock pointer was given.
1679  *
1680  * Note: If a page allocation fails at page table level two or three,
1681  * one or two pages may be held during the wait, only to be released
1682  * afterwards.  This conservative approach is easily argued to avoid
1683  * race conditions.
1684  */
1685 static vm_page_t
1686 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1687 {
1688 	vm_page_t m, l1pg, l2pg;
1689 
1690 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1691 
1692 	/*
1693 	 * Allocate a page table page.
1694 	 */
1695 	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1696 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1697 		if (lockp != NULL) {
1698 			RELEASE_PV_LIST_LOCK(lockp);
1699 			PMAP_UNLOCK(pmap);
1700 			vm_wait(NULL);
1701 			PMAP_LOCK(pmap);
1702 		}
1703 
1704 		/*
1705 		 * Indicate the need to retry.  While waiting, the page table
1706 		 * page may have been allocated.
1707 		 */
1708 		return (NULL);
1709 	}
1710 	if ((m->flags & PG_ZERO) == 0)
1711 		pmap_zero_page(m);
1712 
1713 	/*
1714 	 * Because of AArch64's weak memory consistency model, we must have a
1715 	 * barrier here to ensure that the stores for zeroing "m", whether by
1716 	 * pmap_zero_page() or an earlier function, are visible before adding
1717 	 * "m" to the page table.  Otherwise, a page table walk by another
1718 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
1719 	 * PTE within "m".
1720 	 */
1721 	dmb(ishst);
1722 
1723 	/*
1724 	 * Map the pagetable page into the process address space, if
1725 	 * it isn't already there.
1726 	 */
1727 
1728 	if (ptepindex >= (NUL2E + NUL1E)) {
1729 		pd_entry_t *l0;
1730 		vm_pindex_t l0index;
1731 
1732 		l0index = ptepindex - (NUL2E + NUL1E);
1733 		l0 = &pmap->pm_l0[l0index];
1734 		pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
1735 	} else if (ptepindex >= NUL2E) {
1736 		vm_pindex_t l0index, l1index;
1737 		pd_entry_t *l0, *l1;
1738 		pd_entry_t tl0;
1739 
1740 		l1index = ptepindex - NUL2E;
1741 		l0index = l1index >> L0_ENTRIES_SHIFT;
1742 
1743 		l0 = &pmap->pm_l0[l0index];
1744 		tl0 = pmap_load(l0);
1745 		if (tl0 == 0) {
1746 			/* recurse for allocating page dir */
1747 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1748 			    lockp) == NULL) {
1749 				vm_page_unwire_noq(m);
1750 				vm_page_free_zero(m);
1751 				return (NULL);
1752 			}
1753 		} else {
1754 			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1755 			l1pg->ref_count++;
1756 		}
1757 
1758 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
1759 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
1760 		pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
1761 	} else {
1762 		vm_pindex_t l0index, l1index;
1763 		pd_entry_t *l0, *l1, *l2;
1764 		pd_entry_t tl0, tl1;
1765 
1766 		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
1767 		l0index = l1index >> L0_ENTRIES_SHIFT;
1768 
1769 		l0 = &pmap->pm_l0[l0index];
1770 		tl0 = pmap_load(l0);
1771 		if (tl0 == 0) {
1772 			/* recurse for allocating page dir */
1773 			if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1774 			    lockp) == NULL) {
1775 				vm_page_unwire_noq(m);
1776 				vm_page_free_zero(m);
1777 				return (NULL);
1778 			}
1779 			tl0 = pmap_load(l0);
1780 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1781 			l1 = &l1[l1index & Ln_ADDR_MASK];
1782 		} else {
1783 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1784 			l1 = &l1[l1index & Ln_ADDR_MASK];
1785 			tl1 = pmap_load(l1);
1786 			if (tl1 == 0) {
1787 				/* recurse for allocating page dir */
1788 				if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1789 				    lockp) == NULL) {
1790 					vm_page_unwire_noq(m);
1791 					vm_page_free_zero(m);
1792 					return (NULL);
1793 				}
1794 			} else {
1795 				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1796 				l2pg->ref_count++;
1797 			}
1798 		}
1799 
1800 		l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
1801 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
1802 		pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
1803 	}
1804 
1805 	pmap_resident_count_inc(pmap, 1);
1806 
1807 	return (m);
1808 }
1809 
1810 static pd_entry_t *
1811 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp,
1812     struct rwlock **lockp)
1813 {
1814 	pd_entry_t *l1, *l2;
1815 	vm_page_t l2pg;
1816 	vm_pindex_t l2pindex;
1817 
1818 retry:
1819 	l1 = pmap_l1(pmap, va);
1820 	if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
1821 		l2 = pmap_l1_to_l2(l1, va);
1822 		if (va < VM_MAXUSER_ADDRESS) {
1823 			/* Add a reference to the L2 page. */
1824 			l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
1825 			l2pg->ref_count++;
1826 		} else
1827 			l2pg = NULL;
1828 	} else if (va < VM_MAXUSER_ADDRESS) {
1829 		/* Allocate a L2 page. */
1830 		l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
1831 		l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1832 		if (l2pg == NULL) {
1833 			if (lockp != NULL)
1834 				goto retry;
1835 			else
1836 				return (NULL);
1837 		}
1838 		l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
1839 		l2 = &l2[pmap_l2_index(va)];
1840 	} else
1841 		panic("pmap_alloc_l2: missing page table page for va %#lx",
1842 		    va);
1843 	*l2pgp = l2pg;
1844 	return (l2);
1845 }
1846 
1847 static vm_page_t
1848 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1849 {
1850 	vm_pindex_t ptepindex;
1851 	pd_entry_t *pde, tpde;
1852 #ifdef INVARIANTS
1853 	pt_entry_t *pte;
1854 #endif
1855 	vm_page_t m;
1856 	int lvl;
1857 
1858 	/*
1859 	 * Calculate pagetable page index
1860 	 */
1861 	ptepindex = pmap_l2_pindex(va);
1862 retry:
1863 	/*
1864 	 * Get the page directory entry
1865 	 */
1866 	pde = pmap_pde(pmap, va, &lvl);
1867 
1868 	/*
1869 	 * If the page table page is mapped, we just increment the hold count,
1870 	 * and activate it. If we get a level 2 pde it will point to a level 3
1871 	 * table.
1872 	 */
1873 	switch (lvl) {
1874 	case -1:
1875 		break;
1876 	case 0:
1877 #ifdef INVARIANTS
1878 		pte = pmap_l0_to_l1(pde, va);
1879 		KASSERT(pmap_load(pte) == 0,
1880 		    ("pmap_alloc_l3: TODO: l0 superpages"));
1881 #endif
1882 		break;
1883 	case 1:
1884 #ifdef INVARIANTS
1885 		pte = pmap_l1_to_l2(pde, va);
1886 		KASSERT(pmap_load(pte) == 0,
1887 		    ("pmap_alloc_l3: TODO: l1 superpages"));
1888 #endif
1889 		break;
1890 	case 2:
1891 		tpde = pmap_load(pde);
1892 		if (tpde != 0) {
1893 			m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1894 			m->ref_count++;
1895 			return (m);
1896 		}
1897 		break;
1898 	default:
1899 		panic("pmap_alloc_l3: Invalid level %d", lvl);
1900 	}
1901 
1902 	/*
1903 	 * Here if the pte page isn't mapped, or if it has been deallocated.
1904 	 */
1905 	m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1906 	if (m == NULL && lockp != NULL)
1907 		goto retry;
1908 
1909 	return (m);
1910 }
1911 
1912 /***************************************************
1913  * Pmap allocation/deallocation routines.
1914  ***************************************************/
1915 
1916 /*
1917  * Release any resources held by the given physical map.
1918  * Called when a pmap initialized by pmap_pinit is being released.
1919  * Should only be called if the map contains no valid mappings.
1920  */
1921 void
1922 pmap_release(pmap_t pmap)
1923 {
1924 	struct asid_set *set;
1925 	vm_page_t m;
1926 	int asid;
1927 
1928 	KASSERT(pmap->pm_stats.resident_count == 0,
1929 	    ("pmap_release: pmap resident count %ld != 0",
1930 	    pmap->pm_stats.resident_count));
1931 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
1932 	    ("pmap_release: pmap has reserved page table page(s)"));
1933 	PMAP_ASSERT_STAGE1(pmap);
1934 
1935 	set = pmap->pm_asid_set;
1936 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
1937 
1938 	mtx_lock_spin(&set->asid_set_mutex);
1939 	if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch) {
1940 		asid = COOKIE_TO_ASID(pmap->pm_cookie);
1941 		KASSERT(asid >= ASID_FIRST_AVAILABLE &&
1942 		    asid < set->asid_set_size,
1943 		    ("pmap_release: pmap cookie has out-of-range asid"));
1944 		bit_clear(set->asid_set, asid);
1945 	}
1946 	mtx_unlock_spin(&set->asid_set_mutex);
1947 
1948 	m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
1949 	vm_page_unwire_noq(m);
1950 	vm_page_free_zero(m);
1951 }
1952 
1953 static int
1954 kvm_size(SYSCTL_HANDLER_ARGS)
1955 {
1956 	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1957 
1958 	return sysctl_handle_long(oidp, &ksize, 0, req);
1959 }
1960 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
1961     0, 0, kvm_size, "LU",
1962     "Size of KVM");
1963 
1964 static int
1965 kvm_free(SYSCTL_HANDLER_ARGS)
1966 {
1967 	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1968 
1969 	return sysctl_handle_long(oidp, &kfree, 0, req);
1970 }
1971 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
1972     0, 0, kvm_free, "LU",
1973     "Amount of KVM free");
1974 
1975 /*
1976  * grow the number of kernel page table entries, if needed
1977  */
1978 void
1979 pmap_growkernel(vm_offset_t addr)
1980 {
1981 	vm_paddr_t paddr;
1982 	vm_page_t nkpg;
1983 	pd_entry_t *l0, *l1, *l2;
1984 
1985 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1986 
1987 	addr = roundup2(addr, L2_SIZE);
1988 	if (addr - 1 >= vm_map_max(kernel_map))
1989 		addr = vm_map_max(kernel_map);
1990 	while (kernel_vm_end < addr) {
1991 		l0 = pmap_l0(kernel_pmap, kernel_vm_end);
1992 		KASSERT(pmap_load(l0) != 0,
1993 		    ("pmap_growkernel: No level 0 kernel entry"));
1994 
1995 		l1 = pmap_l0_to_l1(l0, kernel_vm_end);
1996 		if (pmap_load(l1) == 0) {
1997 			/* We need a new PDP entry */
1998 			nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
1999 			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
2000 			    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2001 			if (nkpg == NULL)
2002 				panic("pmap_growkernel: no memory to grow kernel");
2003 			if ((nkpg->flags & PG_ZERO) == 0)
2004 				pmap_zero_page(nkpg);
2005 			/* See the dmb() in _pmap_alloc_l3(). */
2006 			dmb(ishst);
2007 			paddr = VM_PAGE_TO_PHYS(nkpg);
2008 			pmap_store(l1, paddr | L1_TABLE);
2009 			continue; /* try again */
2010 		}
2011 		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
2012 		if (pmap_load(l2) != 0) {
2013 			kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
2014 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2015 				kernel_vm_end = vm_map_max(kernel_map);
2016 				break;
2017 			}
2018 			continue;
2019 		}
2020 
2021 		nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
2022 		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2023 		    VM_ALLOC_ZERO);
2024 		if (nkpg == NULL)
2025 			panic("pmap_growkernel: no memory to grow kernel");
2026 		if ((nkpg->flags & PG_ZERO) == 0)
2027 			pmap_zero_page(nkpg);
2028 		/* See the dmb() in _pmap_alloc_l3(). */
2029 		dmb(ishst);
2030 		paddr = VM_PAGE_TO_PHYS(nkpg);
2031 		pmap_store(l2, paddr | L2_TABLE);
2032 
2033 		kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
2034 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2035 			kernel_vm_end = vm_map_max(kernel_map);
2036 			break;
2037 		}
2038 	}
2039 }
2040 
2041 
2042 /***************************************************
2043  * page management routines.
2044  ***************************************************/
2045 
2046 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
2047 CTASSERT(_NPCM == 3);
2048 CTASSERT(_NPCPV == 168);
2049 
2050 static __inline struct pv_chunk *
2051 pv_to_chunk(pv_entry_t pv)
2052 {
2053 
2054 	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
2055 }
2056 
2057 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
2058 
2059 #define	PC_FREE0	0xfffffffffffffffful
2060 #define	PC_FREE1	0xfffffffffffffffful
2061 #define	PC_FREE2	0x000000fffffffffful
2062 
2063 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
2064 
2065 #if 0
2066 #ifdef PV_STATS
2067 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2068 
2069 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
2070 	"Current number of pv entry chunks");
2071 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
2072 	"Current number of pv entry chunks allocated");
2073 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
2074 	"Current number of pv entry chunks frees");
2075 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
2076 	"Number of times tried to get a chunk page but failed.");
2077 
2078 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
2079 static int pv_entry_spare;
2080 
2081 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
2082 	"Current number of pv entry frees");
2083 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
2084 	"Current number of pv entry allocs");
2085 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
2086 	"Current number of pv entries");
2087 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
2088 	"Current number of spare pv entries");
2089 #endif
2090 #endif /* 0 */
2091 
2092 /*
2093  * We are in a serious low memory condition.  Resort to
2094  * drastic measures to free some pages so we can allocate
2095  * another pv entry chunk.
2096  *
2097  * Returns NULL if PV entries were reclaimed from the specified pmap.
2098  *
2099  * We do not, however, unmap 2mpages because subsequent accesses will
2100  * allocate per-page pv entries until repromotion occurs, thereby
2101  * exacerbating the shortage of free pv entries.
2102  */
2103 static vm_page_t
2104 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
2105 {
2106 	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
2107 	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
2108 	struct md_page *pvh;
2109 	pd_entry_t *pde;
2110 	pmap_t next_pmap, pmap;
2111 	pt_entry_t *pte, tpte;
2112 	pv_entry_t pv;
2113 	vm_offset_t va;
2114 	vm_page_t m, m_pc;
2115 	struct spglist free;
2116 	uint64_t inuse;
2117 	int bit, field, freed, lvl;
2118 	static int active_reclaims = 0;
2119 
2120 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2121 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
2122 
2123 	pmap = NULL;
2124 	m_pc = NULL;
2125 	SLIST_INIT(&free);
2126 	bzero(&pc_marker_b, sizeof(pc_marker_b));
2127 	bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
2128 	pc_marker = (struct pv_chunk *)&pc_marker_b;
2129 	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
2130 
2131 	mtx_lock(&pv_chunks_mutex);
2132 	active_reclaims++;
2133 	TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
2134 	TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
2135 	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
2136 	    SLIST_EMPTY(&free)) {
2137 		next_pmap = pc->pc_pmap;
2138 		if (next_pmap == NULL) {
2139 			/*
2140 			 * The next chunk is a marker.  However, it is
2141 			 * not our marker, so active_reclaims must be
2142 			 * > 1.  Consequently, the next_chunk code
2143 			 * will not rotate the pv_chunks list.
2144 			 */
2145 			goto next_chunk;
2146 		}
2147 		mtx_unlock(&pv_chunks_mutex);
2148 
2149 		/*
2150 		 * A pv_chunk can only be removed from the pc_lru list
2151 		 * when both pv_chunks_mutex is owned and the
2152 		 * corresponding pmap is locked.
2153 		 */
2154 		if (pmap != next_pmap) {
2155 			if (pmap != NULL && pmap != locked_pmap)
2156 				PMAP_UNLOCK(pmap);
2157 			pmap = next_pmap;
2158 			/* Avoid deadlock and lock recursion. */
2159 			if (pmap > locked_pmap) {
2160 				RELEASE_PV_LIST_LOCK(lockp);
2161 				PMAP_LOCK(pmap);
2162 				mtx_lock(&pv_chunks_mutex);
2163 				continue;
2164 			} else if (pmap != locked_pmap) {
2165 				if (PMAP_TRYLOCK(pmap)) {
2166 					mtx_lock(&pv_chunks_mutex);
2167 					continue;
2168 				} else {
2169 					pmap = NULL; /* pmap is not locked */
2170 					mtx_lock(&pv_chunks_mutex);
2171 					pc = TAILQ_NEXT(pc_marker, pc_lru);
2172 					if (pc == NULL ||
2173 					    pc->pc_pmap != next_pmap)
2174 						continue;
2175 					goto next_chunk;
2176 				}
2177 			}
2178 		}
2179 
2180 		/*
2181 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
2182 		 */
2183 		freed = 0;
2184 		for (field = 0; field < _NPCM; field++) {
2185 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2186 			    inuse != 0; inuse &= ~(1UL << bit)) {
2187 				bit = ffsl(inuse) - 1;
2188 				pv = &pc->pc_pventry[field * 64 + bit];
2189 				va = pv->pv_va;
2190 				pde = pmap_pde(pmap, va, &lvl);
2191 				if (lvl != 2)
2192 					continue;
2193 				pte = pmap_l2_to_l3(pde, va);
2194 				tpte = pmap_load(pte);
2195 				if ((tpte & ATTR_SW_WIRED) != 0)
2196 					continue;
2197 				tpte = pmap_load_clear(pte);
2198 				m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
2199 				if (pmap_pte_dirty(pmap, tpte))
2200 					vm_page_dirty(m);
2201 				if ((tpte & ATTR_AF) != 0) {
2202 					pmap_invalidate_page(pmap, va);
2203 					vm_page_aflag_set(m, PGA_REFERENCED);
2204 				}
2205 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2206 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2207 				m->md.pv_gen++;
2208 				if (TAILQ_EMPTY(&m->md.pv_list) &&
2209 				    (m->flags & PG_FICTITIOUS) == 0) {
2210 					pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2211 					if (TAILQ_EMPTY(&pvh->pv_list)) {
2212 						vm_page_aflag_clear(m,
2213 						    PGA_WRITEABLE);
2214 					}
2215 				}
2216 				pc->pc_map[field] |= 1UL << bit;
2217 				pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
2218 				freed++;
2219 			}
2220 		}
2221 		if (freed == 0) {
2222 			mtx_lock(&pv_chunks_mutex);
2223 			goto next_chunk;
2224 		}
2225 		/* Every freed mapping is for a 4 KB page. */
2226 		pmap_resident_count_dec(pmap, freed);
2227 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
2228 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
2229 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
2230 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2231 		if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
2232 		    pc->pc_map[2] == PC_FREE2) {
2233 			PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2234 			PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2235 			PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2236 			/* Entire chunk is free; return it. */
2237 			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2238 			dump_drop_page(m_pc->phys_addr);
2239 			mtx_lock(&pv_chunks_mutex);
2240 			TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2241 			break;
2242 		}
2243 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2244 		mtx_lock(&pv_chunks_mutex);
2245 		/* One freed pv entry in locked_pmap is sufficient. */
2246 		if (pmap == locked_pmap)
2247 			break;
2248 
2249 next_chunk:
2250 		TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2251 		TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
2252 		if (active_reclaims == 1 && pmap != NULL) {
2253 			/*
2254 			 * Rotate the pv chunks list so that we do not
2255 			 * scan the same pv chunks that could not be
2256 			 * freed (because they contained a wired
2257 			 * and/or superpage mapping) on every
2258 			 * invocation of reclaim_pv_chunk().
2259 			 */
2260 			while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
2261 				MPASS(pc->pc_pmap != NULL);
2262 				TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2263 				TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2264 			}
2265 		}
2266 	}
2267 	TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2268 	TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
2269 	active_reclaims--;
2270 	mtx_unlock(&pv_chunks_mutex);
2271 	if (pmap != NULL && pmap != locked_pmap)
2272 		PMAP_UNLOCK(pmap);
2273 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
2274 		m_pc = SLIST_FIRST(&free);
2275 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2276 		/* Recycle a freed page table page. */
2277 		m_pc->ref_count = 1;
2278 	}
2279 	vm_page_free_pages_toq(&free, true);
2280 	return (m_pc);
2281 }
2282 
2283 /*
2284  * free the pv_entry back to the free list
2285  */
2286 static void
2287 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2288 {
2289 	struct pv_chunk *pc;
2290 	int idx, field, bit;
2291 
2292 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2293 	PV_STAT(atomic_add_long(&pv_entry_frees, 1));
2294 	PV_STAT(atomic_add_int(&pv_entry_spare, 1));
2295 	PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
2296 	pc = pv_to_chunk(pv);
2297 	idx = pv - &pc->pc_pventry[0];
2298 	field = idx / 64;
2299 	bit = idx % 64;
2300 	pc->pc_map[field] |= 1ul << bit;
2301 	if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
2302 	    pc->pc_map[2] != PC_FREE2) {
2303 		/* 98% of the time, pc is already at the head of the list. */
2304 		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
2305 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2306 			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2307 		}
2308 		return;
2309 	}
2310 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2311 	free_pv_chunk(pc);
2312 }
2313 
2314 static void
2315 free_pv_chunk(struct pv_chunk *pc)
2316 {
2317 	vm_page_t m;
2318 
2319 	mtx_lock(&pv_chunks_mutex);
2320  	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2321 	mtx_unlock(&pv_chunks_mutex);
2322 	PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2323 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2324 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2325 	/* entire chunk is free, return it */
2326 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2327 	dump_drop_page(m->phys_addr);
2328 	vm_page_unwire_noq(m);
2329 	vm_page_free(m);
2330 }
2331 
2332 /*
2333  * Returns a new PV entry, allocating a new PV chunk from the system when
2334  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
2335  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
2336  * returned.
2337  *
2338  * The given PV list lock may be released.
2339  */
2340 static pv_entry_t
2341 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
2342 {
2343 	int bit, field;
2344 	pv_entry_t pv;
2345 	struct pv_chunk *pc;
2346 	vm_page_t m;
2347 
2348 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2349 	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
2350 retry:
2351 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2352 	if (pc != NULL) {
2353 		for (field = 0; field < _NPCM; field++) {
2354 			if (pc->pc_map[field]) {
2355 				bit = ffsl(pc->pc_map[field]) - 1;
2356 				break;
2357 			}
2358 		}
2359 		if (field < _NPCM) {
2360 			pv = &pc->pc_pventry[field * 64 + bit];
2361 			pc->pc_map[field] &= ~(1ul << bit);
2362 			/* If this was the last item, move it to tail */
2363 			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
2364 			    pc->pc_map[2] == 0) {
2365 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2366 				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
2367 				    pc_list);
2368 			}
2369 			PV_STAT(atomic_add_long(&pv_entry_count, 1));
2370 			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
2371 			return (pv);
2372 		}
2373 	}
2374 	/* No free items, allocate another chunk */
2375 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2376 	    VM_ALLOC_WIRED);
2377 	if (m == NULL) {
2378 		if (lockp == NULL) {
2379 			PV_STAT(pc_chunk_tryfail++);
2380 			return (NULL);
2381 		}
2382 		m = reclaim_pv_chunk(pmap, lockp);
2383 		if (m == NULL)
2384 			goto retry;
2385 	}
2386 	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2387 	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2388 	dump_add_page(m->phys_addr);
2389 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2390 	pc->pc_pmap = pmap;
2391 	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
2392 	pc->pc_map[1] = PC_FREE1;
2393 	pc->pc_map[2] = PC_FREE2;
2394 	mtx_lock(&pv_chunks_mutex);
2395 	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2396 	mtx_unlock(&pv_chunks_mutex);
2397 	pv = &pc->pc_pventry[0];
2398 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2399 	PV_STAT(atomic_add_long(&pv_entry_count, 1));
2400 	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
2401 	return (pv);
2402 }
2403 
2404 /*
2405  * Ensure that the number of spare PV entries in the specified pmap meets or
2406  * exceeds the given count, "needed".
2407  *
2408  * The given PV list lock may be released.
2409  */
2410 static void
2411 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
2412 {
2413 	struct pch new_tail;
2414 	struct pv_chunk *pc;
2415 	vm_page_t m;
2416 	int avail, free;
2417 	bool reclaimed;
2418 
2419 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2420 	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
2421 
2422 	/*
2423 	 * Newly allocated PV chunks must be stored in a private list until
2424 	 * the required number of PV chunks have been allocated.  Otherwise,
2425 	 * reclaim_pv_chunk() could recycle one of these chunks.  In
2426 	 * contrast, these chunks must be added to the pmap upon allocation.
2427 	 */
2428 	TAILQ_INIT(&new_tail);
2429 retry:
2430 	avail = 0;
2431 	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
2432 		bit_count((bitstr_t *)pc->pc_map, 0,
2433 		    sizeof(pc->pc_map) * NBBY, &free);
2434 		if (free == 0)
2435 			break;
2436 		avail += free;
2437 		if (avail >= needed)
2438 			break;
2439 	}
2440 	for (reclaimed = false; avail < needed; avail += _NPCPV) {
2441 		m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2442 		    VM_ALLOC_WIRED);
2443 		if (m == NULL) {
2444 			m = reclaim_pv_chunk(pmap, lockp);
2445 			if (m == NULL)
2446 				goto retry;
2447 			reclaimed = true;
2448 		}
2449 		PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2450 		PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2451 		dump_add_page(m->phys_addr);
2452 		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2453 		pc->pc_pmap = pmap;
2454 		pc->pc_map[0] = PC_FREE0;
2455 		pc->pc_map[1] = PC_FREE1;
2456 		pc->pc_map[2] = PC_FREE2;
2457 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2458 		TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
2459 		PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
2460 
2461 		/*
2462 		 * The reclaim might have freed a chunk from the current pmap.
2463 		 * If that chunk contained available entries, we need to
2464 		 * re-count the number of available entries.
2465 		 */
2466 		if (reclaimed)
2467 			goto retry;
2468 	}
2469 	if (!TAILQ_EMPTY(&new_tail)) {
2470 		mtx_lock(&pv_chunks_mutex);
2471 		TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
2472 		mtx_unlock(&pv_chunks_mutex);
2473 	}
2474 }
2475 
2476 /*
2477  * First find and then remove the pv entry for the specified pmap and virtual
2478  * address from the specified pv list.  Returns the pv entry if found and NULL
2479  * otherwise.  This operation can be performed on pv lists for either 4KB or
2480  * 2MB page mappings.
2481  */
2482 static __inline pv_entry_t
2483 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2484 {
2485 	pv_entry_t pv;
2486 
2487 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2488 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2489 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2490 			pvh->pv_gen++;
2491 			break;
2492 		}
2493 	}
2494 	return (pv);
2495 }
2496 
2497 /*
2498  * After demotion from a 2MB page mapping to 512 4KB page mappings,
2499  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
2500  * entries for each of the 4KB page mappings.
2501  */
2502 static void
2503 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2504     struct rwlock **lockp)
2505 {
2506 	struct md_page *pvh;
2507 	struct pv_chunk *pc;
2508 	pv_entry_t pv;
2509 	vm_offset_t va_last;
2510 	vm_page_t m;
2511 	int bit, field;
2512 
2513 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2514 	KASSERT((va & L2_OFFSET) == 0,
2515 	    ("pmap_pv_demote_l2: va is not 2mpage aligned"));
2516 	KASSERT((pa & L2_OFFSET) == 0,
2517 	    ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
2518 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2519 
2520 	/*
2521 	 * Transfer the 2mpage's pv entry for this mapping to the first
2522 	 * page's pv list.  Once this transfer begins, the pv list lock
2523 	 * must not be released until the last pv entry is reinstantiated.
2524 	 */
2525 	pvh = pa_to_pvh(pa);
2526 	pv = pmap_pvh_remove(pvh, pmap, va);
2527 	KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
2528 	m = PHYS_TO_VM_PAGE(pa);
2529 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2530 	m->md.pv_gen++;
2531 	/* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
2532 	PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
2533 	va_last = va + L2_SIZE - PAGE_SIZE;
2534 	for (;;) {
2535 		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2536 		KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
2537 		    pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
2538 		for (field = 0; field < _NPCM; field++) {
2539 			while (pc->pc_map[field]) {
2540 				bit = ffsl(pc->pc_map[field]) - 1;
2541 				pc->pc_map[field] &= ~(1ul << bit);
2542 				pv = &pc->pc_pventry[field * 64 + bit];
2543 				va += PAGE_SIZE;
2544 				pv->pv_va = va;
2545 				m++;
2546 				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2547 			    ("pmap_pv_demote_l2: page %p is not managed", m));
2548 				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2549 				m->md.pv_gen++;
2550 				if (va == va_last)
2551 					goto out;
2552 			}
2553 		}
2554 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2555 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2556 	}
2557 out:
2558 	if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
2559 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2560 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2561 	}
2562 	PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
2563 	PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
2564 }
2565 
2566 /*
2567  * First find and then destroy the pv entry for the specified pmap and virtual
2568  * address.  This operation can be performed on pv lists for either 4KB or 2MB
2569  * page mappings.
2570  */
2571 static void
2572 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2573 {
2574 	pv_entry_t pv;
2575 
2576 	pv = pmap_pvh_remove(pvh, pmap, va);
2577 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2578 	free_pv_entry(pmap, pv);
2579 }
2580 
2581 /*
2582  * Conditionally create the PV entry for a 4KB page mapping if the required
2583  * memory can be allocated without resorting to reclamation.
2584  */
2585 static boolean_t
2586 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
2587     struct rwlock **lockp)
2588 {
2589 	pv_entry_t pv;
2590 
2591 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2592 	/* Pass NULL instead of the lock pointer to disable reclamation. */
2593 	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
2594 		pv->pv_va = va;
2595 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2596 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2597 		m->md.pv_gen++;
2598 		return (TRUE);
2599 	} else
2600 		return (FALSE);
2601 }
2602 
2603 /*
2604  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
2605  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
2606  * false if the PV entry cannot be allocated without resorting to reclamation.
2607  */
2608 static bool
2609 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
2610     struct rwlock **lockp)
2611 {
2612 	struct md_page *pvh;
2613 	pv_entry_t pv;
2614 	vm_paddr_t pa;
2615 
2616 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2617 	/* Pass NULL instead of the lock pointer to disable reclamation. */
2618 	if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
2619 	    NULL : lockp)) == NULL)
2620 		return (false);
2621 	pv->pv_va = va;
2622 	pa = l2e & ~ATTR_MASK;
2623 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2624 	pvh = pa_to_pvh(pa);
2625 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2626 	pvh->pv_gen++;
2627 	return (true);
2628 }
2629 
2630 static void
2631 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
2632 {
2633 	pt_entry_t newl2, oldl2;
2634 	vm_page_t ml3;
2635 	vm_paddr_t ml3pa;
2636 
2637 	KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
2638 	KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
2639 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2640 
2641 	ml3 = pmap_remove_pt_page(pmap, va);
2642 	if (ml3 == NULL)
2643 		panic("pmap_remove_kernel_l2: Missing pt page");
2644 
2645 	ml3pa = VM_PAGE_TO_PHYS(ml3);
2646 	newl2 = ml3pa | L2_TABLE;
2647 
2648 	/*
2649 	 * If this page table page was unmapped by a promotion, then it
2650 	 * contains valid mappings.  Zero it to invalidate those mappings.
2651 	 */
2652 	if (ml3->valid != 0)
2653 		pagezero((void *)PHYS_TO_DMAP(ml3pa));
2654 
2655 	/*
2656 	 * Demote the mapping.  The caller must have already invalidated the
2657 	 * mapping (i.e., the "break" in break-before-make).
2658 	 */
2659 	oldl2 = pmap_load_store(l2, newl2);
2660 	KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
2661 	    __func__, l2, oldl2));
2662 }
2663 
2664 /*
2665  * pmap_remove_l2: Do the things to unmap a level 2 superpage.
2666  */
2667 static int
2668 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2669     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
2670 {
2671 	struct md_page *pvh;
2672 	pt_entry_t old_l2;
2673 	vm_offset_t eva, va;
2674 	vm_page_t m, ml3;
2675 
2676 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2677 	KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
2678 	old_l2 = pmap_load_clear(l2);
2679 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
2680 	    ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
2681 
2682 	/*
2683 	 * Since a promotion must break the 4KB page mappings before making
2684 	 * the 2MB page mapping, a pmap_invalidate_page() suffices.
2685 	 */
2686 	pmap_invalidate_page(pmap, sva);
2687 
2688 	if (old_l2 & ATTR_SW_WIRED)
2689 		pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2690 	pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2691 	if (old_l2 & ATTR_SW_MANAGED) {
2692 		CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK);
2693 		pvh = pa_to_pvh(old_l2 & ~ATTR_MASK);
2694 		pmap_pvh_free(pvh, pmap, sva);
2695 		eva = sva + L2_SIZE;
2696 		for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
2697 		    va < eva; va += PAGE_SIZE, m++) {
2698 			if (pmap_pte_dirty(pmap, old_l2))
2699 				vm_page_dirty(m);
2700 			if (old_l2 & ATTR_AF)
2701 				vm_page_aflag_set(m, PGA_REFERENCED);
2702 			if (TAILQ_EMPTY(&m->md.pv_list) &&
2703 			    TAILQ_EMPTY(&pvh->pv_list))
2704 				vm_page_aflag_clear(m, PGA_WRITEABLE);
2705 		}
2706 	}
2707 	if (pmap == kernel_pmap) {
2708 		pmap_remove_kernel_l2(pmap, l2, sva);
2709 	} else {
2710 		ml3 = pmap_remove_pt_page(pmap, sva);
2711 		if (ml3 != NULL) {
2712 			KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
2713 			    ("pmap_remove_l2: l3 page not promoted"));
2714 			pmap_resident_count_dec(pmap, 1);
2715 			KASSERT(ml3->ref_count == NL3PG,
2716 			    ("pmap_remove_l2: l3 page ref count error"));
2717 			ml3->ref_count = 0;
2718 			pmap_add_delayed_free_list(ml3, free, FALSE);
2719 		}
2720 	}
2721 	return (pmap_unuse_pt(pmap, sva, l1e, free));
2722 }
2723 
2724 /*
2725  * pmap_remove_l3: do the things to unmap a page in a process
2726  */
2727 static int
2728 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2729     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2730 {
2731 	struct md_page *pvh;
2732 	pt_entry_t old_l3;
2733 	vm_page_t m;
2734 
2735 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2736 	old_l3 = pmap_load_clear(l3);
2737 	pmap_invalidate_page(pmap, va);
2738 	if (old_l3 & ATTR_SW_WIRED)
2739 		pmap->pm_stats.wired_count -= 1;
2740 	pmap_resident_count_dec(pmap, 1);
2741 	if (old_l3 & ATTR_SW_MANAGED) {
2742 		m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2743 		if (pmap_pte_dirty(pmap, old_l3))
2744 			vm_page_dirty(m);
2745 		if (old_l3 & ATTR_AF)
2746 			vm_page_aflag_set(m, PGA_REFERENCED);
2747 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2748 		pmap_pvh_free(&m->md, pmap, va);
2749 		if (TAILQ_EMPTY(&m->md.pv_list) &&
2750 		    (m->flags & PG_FICTITIOUS) == 0) {
2751 			pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2752 			if (TAILQ_EMPTY(&pvh->pv_list))
2753 				vm_page_aflag_clear(m, PGA_WRITEABLE);
2754 		}
2755 	}
2756 	return (pmap_unuse_pt(pmap, va, l2e, free));
2757 }
2758 
2759 /*
2760  * Remove the specified range of addresses from the L3 page table that is
2761  * identified by the given L2 entry.
2762  */
2763 static void
2764 pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
2765     vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
2766 {
2767 	struct md_page *pvh;
2768 	struct rwlock *new_lock;
2769 	pt_entry_t *l3, old_l3;
2770 	vm_offset_t va;
2771 	vm_page_t l3pg, m;
2772 
2773 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2774 	KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
2775 	    ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
2776 	l3pg = sva < VM_MAXUSER_ADDRESS ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) :
2777 	    NULL;
2778 	va = eva;
2779 	for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
2780 		if (!pmap_l3_valid(pmap_load(l3))) {
2781 			if (va != eva) {
2782 				pmap_invalidate_range(pmap, va, sva);
2783 				va = eva;
2784 			}
2785 			continue;
2786 		}
2787 		old_l3 = pmap_load_clear(l3);
2788 		if ((old_l3 & ATTR_SW_WIRED) != 0)
2789 			pmap->pm_stats.wired_count--;
2790 		pmap_resident_count_dec(pmap, 1);
2791 		if ((old_l3 & ATTR_SW_MANAGED) != 0) {
2792 			m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2793 			if (pmap_pte_dirty(pmap, old_l3))
2794 				vm_page_dirty(m);
2795 			if ((old_l3 & ATTR_AF) != 0)
2796 				vm_page_aflag_set(m, PGA_REFERENCED);
2797 			new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m));
2798 			if (new_lock != *lockp) {
2799 				if (*lockp != NULL) {
2800 					/*
2801 					 * Pending TLB invalidations must be
2802 					 * performed before the PV list lock is
2803 					 * released.  Otherwise, a concurrent
2804 					 * pmap_remove_all() on a physical page
2805 					 * could return while a stale TLB entry
2806 					 * still provides access to that page.
2807 					 */
2808 					if (va != eva) {
2809 						pmap_invalidate_range(pmap, va,
2810 						    sva);
2811 						va = eva;
2812 					}
2813 					rw_wunlock(*lockp);
2814 				}
2815 				*lockp = new_lock;
2816 				rw_wlock(*lockp);
2817 			}
2818 			pmap_pvh_free(&m->md, pmap, sva);
2819 			if (TAILQ_EMPTY(&m->md.pv_list) &&
2820 			    (m->flags & PG_FICTITIOUS) == 0) {
2821 				pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2822 				if (TAILQ_EMPTY(&pvh->pv_list))
2823 					vm_page_aflag_clear(m, PGA_WRITEABLE);
2824 			}
2825 		}
2826 		if (va == eva)
2827 			va = sva;
2828 		if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) {
2829 			sva += L3_SIZE;
2830 			break;
2831 		}
2832 	}
2833 	if (va != eva)
2834 		pmap_invalidate_range(pmap, va, sva);
2835 }
2836 
2837 /*
2838  *	Remove the given range of addresses from the specified map.
2839  *
2840  *	It is assumed that the start and end are properly
2841  *	rounded to the page size.
2842  */
2843 void
2844 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2845 {
2846 	struct rwlock *lock;
2847 	vm_offset_t va_next;
2848 	pd_entry_t *l0, *l1, *l2;
2849 	pt_entry_t l3_paddr;
2850 	struct spglist free;
2851 
2852 	/*
2853 	 * Perform an unsynchronized read.  This is, however, safe.
2854 	 */
2855 	if (pmap->pm_stats.resident_count == 0)
2856 		return;
2857 
2858 	SLIST_INIT(&free);
2859 
2860 	PMAP_LOCK(pmap);
2861 
2862 	lock = NULL;
2863 	for (; sva < eva; sva = va_next) {
2864 
2865 		if (pmap->pm_stats.resident_count == 0)
2866 			break;
2867 
2868 		l0 = pmap_l0(pmap, sva);
2869 		if (pmap_load(l0) == 0) {
2870 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2871 			if (va_next < sva)
2872 				va_next = eva;
2873 			continue;
2874 		}
2875 
2876 		l1 = pmap_l0_to_l1(l0, sva);
2877 		if (pmap_load(l1) == 0) {
2878 			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2879 			if (va_next < sva)
2880 				va_next = eva;
2881 			continue;
2882 		}
2883 
2884 		/*
2885 		 * Calculate index for next page table.
2886 		 */
2887 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2888 		if (va_next < sva)
2889 			va_next = eva;
2890 
2891 		l2 = pmap_l1_to_l2(l1, sva);
2892 		if (l2 == NULL)
2893 			continue;
2894 
2895 		l3_paddr = pmap_load(l2);
2896 
2897 		if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
2898 			if (sva + L2_SIZE == va_next && eva >= va_next) {
2899 				pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
2900 				    &free, &lock);
2901 				continue;
2902 			} else if (pmap_demote_l2_locked(pmap, l2, sva,
2903 			    &lock) == NULL)
2904 				continue;
2905 			l3_paddr = pmap_load(l2);
2906 		}
2907 
2908 		/*
2909 		 * Weed out invalid mappings.
2910 		 */
2911 		if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2912 			continue;
2913 
2914 		/*
2915 		 * Limit our scan to either the end of the va represented
2916 		 * by the current page table page, or to the end of the
2917 		 * range being removed.
2918 		 */
2919 		if (va_next > eva)
2920 			va_next = eva;
2921 
2922 		pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
2923 		    &lock);
2924 	}
2925 	if (lock != NULL)
2926 		rw_wunlock(lock);
2927 	PMAP_UNLOCK(pmap);
2928 	vm_page_free_pages_toq(&free, true);
2929 }
2930 
2931 /*
2932  *	Routine:	pmap_remove_all
2933  *	Function:
2934  *		Removes this physical page from
2935  *		all physical maps in which it resides.
2936  *		Reflects back modify bits to the pager.
2937  *
2938  *	Notes:
2939  *		Original versions of this routine were very
2940  *		inefficient because they iteratively called
2941  *		pmap_remove (slow...)
2942  */
2943 
2944 void
2945 pmap_remove_all(vm_page_t m)
2946 {
2947 	struct md_page *pvh;
2948 	pv_entry_t pv;
2949 	pmap_t pmap;
2950 	struct rwlock *lock;
2951 	pd_entry_t *pde, tpde;
2952 	pt_entry_t *pte, tpte;
2953 	vm_offset_t va;
2954 	struct spglist free;
2955 	int lvl, pvh_gen, md_gen;
2956 
2957 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2958 	    ("pmap_remove_all: page %p is not managed", m));
2959 	SLIST_INIT(&free);
2960 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2961 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2962 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
2963 retry:
2964 	rw_wlock(lock);
2965 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2966 		pmap = PV_PMAP(pv);
2967 		if (!PMAP_TRYLOCK(pmap)) {
2968 			pvh_gen = pvh->pv_gen;
2969 			rw_wunlock(lock);
2970 			PMAP_LOCK(pmap);
2971 			rw_wlock(lock);
2972 			if (pvh_gen != pvh->pv_gen) {
2973 				rw_wunlock(lock);
2974 				PMAP_UNLOCK(pmap);
2975 				goto retry;
2976 			}
2977 		}
2978 		va = pv->pv_va;
2979 		pte = pmap_pte(pmap, va, &lvl);
2980 		KASSERT(pte != NULL,
2981 		    ("pmap_remove_all: no page table entry found"));
2982 		KASSERT(lvl == 2,
2983 		    ("pmap_remove_all: invalid pte level %d", lvl));
2984 
2985 		pmap_demote_l2_locked(pmap, pte, va, &lock);
2986 		PMAP_UNLOCK(pmap);
2987 	}
2988 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2989 		pmap = PV_PMAP(pv);
2990 		PMAP_ASSERT_STAGE1(pmap);
2991 		if (!PMAP_TRYLOCK(pmap)) {
2992 			pvh_gen = pvh->pv_gen;
2993 			md_gen = m->md.pv_gen;
2994 			rw_wunlock(lock);
2995 			PMAP_LOCK(pmap);
2996 			rw_wlock(lock);
2997 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2998 				rw_wunlock(lock);
2999 				PMAP_UNLOCK(pmap);
3000 				goto retry;
3001 			}
3002 		}
3003 		pmap_resident_count_dec(pmap, 1);
3004 
3005 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
3006 		KASSERT(pde != NULL,
3007 		    ("pmap_remove_all: no page directory entry found"));
3008 		KASSERT(lvl == 2,
3009 		    ("pmap_remove_all: invalid pde level %d", lvl));
3010 		tpde = pmap_load(pde);
3011 
3012 		pte = pmap_l2_to_l3(pde, pv->pv_va);
3013 		tpte = pmap_load_clear(pte);
3014 		if (tpte & ATTR_SW_WIRED)
3015 			pmap->pm_stats.wired_count--;
3016 		if ((tpte & ATTR_AF) != 0) {
3017 			pmap_invalidate_page(pmap, pv->pv_va);
3018 			vm_page_aflag_set(m, PGA_REFERENCED);
3019 		}
3020 
3021 		/*
3022 		 * Update the vm_page_t clean and reference bits.
3023 		 */
3024 		if (pmap_pte_dirty(pmap, tpte))
3025 			vm_page_dirty(m);
3026 		pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
3027 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3028 		m->md.pv_gen++;
3029 		free_pv_entry(pmap, pv);
3030 		PMAP_UNLOCK(pmap);
3031 	}
3032 	vm_page_aflag_clear(m, PGA_WRITEABLE);
3033 	rw_wunlock(lock);
3034 	vm_page_free_pages_toq(&free, true);
3035 }
3036 
3037 /*
3038  * pmap_protect_l2: do the things to protect a 2MB page in a pmap
3039  */
3040 static void
3041 pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
3042     pt_entry_t nbits)
3043 {
3044 	pd_entry_t old_l2;
3045 	vm_page_t m, mt;
3046 
3047 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3048 	PMAP_ASSERT_STAGE1(pmap);
3049 	KASSERT((sva & L2_OFFSET) == 0,
3050 	    ("pmap_protect_l2: sva is not 2mpage aligned"));
3051 	old_l2 = pmap_load(l2);
3052 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
3053 	    ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2));
3054 
3055 	/*
3056 	 * Return if the L2 entry already has the desired access restrictions
3057 	 * in place.
3058 	 */
3059 retry:
3060 	if ((old_l2 & mask) == nbits)
3061 		return;
3062 
3063 	/*
3064 	 * When a dirty read/write superpage mapping is write protected,
3065 	 * update the dirty field of each of the superpage's constituent 4KB
3066 	 * pages.
3067 	 */
3068 	if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
3069 	    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
3070 	    pmap_pte_dirty(pmap, old_l2)) {
3071 		m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
3072 		for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3073 			vm_page_dirty(mt);
3074 	}
3075 
3076 	if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
3077 		goto retry;
3078 
3079 	/*
3080 	 * Since a promotion must break the 4KB page mappings before making
3081 	 * the 2MB page mapping, a pmap_invalidate_page() suffices.
3082 	 */
3083 	pmap_invalidate_page(pmap, sva);
3084 }
3085 
3086 /*
3087  *	Set the physical protection on the
3088  *	specified range of this map as requested.
3089  */
3090 void
3091 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3092 {
3093 	vm_offset_t va, va_next;
3094 	pd_entry_t *l0, *l1, *l2;
3095 	pt_entry_t *l3p, l3, mask, nbits;
3096 
3097 	PMAP_ASSERT_STAGE1(pmap);
3098 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
3099 	if (prot == VM_PROT_NONE) {
3100 		pmap_remove(pmap, sva, eva);
3101 		return;
3102 	}
3103 
3104 	mask = nbits = 0;
3105 	if ((prot & VM_PROT_WRITE) == 0) {
3106 		mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM;
3107 		nbits |= ATTR_S1_AP(ATTR_S1_AP_RO);
3108 	}
3109 	if ((prot & VM_PROT_EXECUTE) == 0) {
3110 		mask |= ATTR_S1_XN;
3111 		nbits |= ATTR_S1_XN;
3112 	}
3113 	if (mask == 0)
3114 		return;
3115 
3116 	PMAP_LOCK(pmap);
3117 	for (; sva < eva; sva = va_next) {
3118 
3119 		l0 = pmap_l0(pmap, sva);
3120 		if (pmap_load(l0) == 0) {
3121 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
3122 			if (va_next < sva)
3123 				va_next = eva;
3124 			continue;
3125 		}
3126 
3127 		l1 = pmap_l0_to_l1(l0, sva);
3128 		if (pmap_load(l1) == 0) {
3129 			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3130 			if (va_next < sva)
3131 				va_next = eva;
3132 			continue;
3133 		}
3134 
3135 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3136 		if (va_next < sva)
3137 			va_next = eva;
3138 
3139 		l2 = pmap_l1_to_l2(l1, sva);
3140 		if (pmap_load(l2) == 0)
3141 			continue;
3142 
3143 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3144 			if (sva + L2_SIZE == va_next && eva >= va_next) {
3145 				pmap_protect_l2(pmap, l2, sva, mask, nbits);
3146 				continue;
3147 			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
3148 				continue;
3149 		}
3150 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3151 		    ("pmap_protect: Invalid L2 entry after demotion"));
3152 
3153 		if (va_next > eva)
3154 			va_next = eva;
3155 
3156 		va = va_next;
3157 		for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
3158 		    sva += L3_SIZE) {
3159 			l3 = pmap_load(l3p);
3160 retry:
3161 			/*
3162 			 * Go to the next L3 entry if the current one is
3163 			 * invalid or already has the desired access
3164 			 * restrictions in place.  (The latter case occurs
3165 			 * frequently.  For example, in a "buildworld"
3166 			 * workload, almost 1 out of 4 L3 entries already
3167 			 * have the desired restrictions.)
3168 			 */
3169 			if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) {
3170 				if (va != va_next) {
3171 					pmap_invalidate_range(pmap, va, sva);
3172 					va = va_next;
3173 				}
3174 				continue;
3175 			}
3176 
3177 			/*
3178 			 * When a dirty read/write mapping is write protected,
3179 			 * update the page's dirty field.
3180 			 */
3181 			if ((l3 & ATTR_SW_MANAGED) != 0 &&
3182 			    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
3183 			    pmap_pte_dirty(pmap, l3))
3184 				vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
3185 
3186 			if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
3187 				goto retry;
3188 			if (va == va_next)
3189 				va = sva;
3190 		}
3191 		if (va != va_next)
3192 			pmap_invalidate_range(pmap, va, sva);
3193 	}
3194 	PMAP_UNLOCK(pmap);
3195 }
3196 
3197 /*
3198  * Inserts the specified page table page into the specified pmap's collection
3199  * of idle page table pages.  Each of a pmap's page table pages is responsible
3200  * for mapping a distinct range of virtual addresses.  The pmap's collection is
3201  * ordered by this virtual address range.
3202  *
3203  * If "promoted" is false, then the page table page "mpte" must be zero filled.
3204  */
3205 static __inline int
3206 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
3207 {
3208 
3209 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3210 	mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
3211 	return (vm_radix_insert(&pmap->pm_root, mpte));
3212 }
3213 
3214 /*
3215  * Removes the page table page mapping the specified virtual address from the
3216  * specified pmap's collection of idle page table pages, and returns it.
3217  * Otherwise, returns NULL if there is no page table page corresponding to the
3218  * specified virtual address.
3219  */
3220 static __inline vm_page_t
3221 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
3222 {
3223 
3224 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3225 	return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
3226 }
3227 
3228 /*
3229  * Performs a break-before-make update of a pmap entry. This is needed when
3230  * either promoting or demoting pages to ensure the TLB doesn't get into an
3231  * inconsistent state.
3232  */
3233 static void
3234 pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
3235     vm_offset_t va, vm_size_t size)
3236 {
3237 	register_t intr;
3238 
3239 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3240 
3241 	/*
3242 	 * Ensure we don't get switched out with the page table in an
3243 	 * inconsistent state. We also need to ensure no interrupts fire
3244 	 * as they may make use of an address we are about to invalidate.
3245 	 */
3246 	intr = intr_disable();
3247 
3248 	/*
3249 	 * Clear the old mapping's valid bit, but leave the rest of the entry
3250 	 * unchanged, so that a lockless, concurrent pmap_kextract() can still
3251 	 * lookup the physical address.
3252 	 */
3253 	pmap_clear_bits(pte, ATTR_DESCR_VALID);
3254 	pmap_invalidate_range(pmap, va, va + size);
3255 
3256 	/* Create the new mapping */
3257 	pmap_store(pte, newpte);
3258 	dsb(ishst);
3259 
3260 	intr_restore(intr);
3261 }
3262 
3263 #if VM_NRESERVLEVEL > 0
3264 /*
3265  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
3266  * replace the many pv entries for the 4KB page mappings by a single pv entry
3267  * for the 2MB page mapping.
3268  */
3269 static void
3270 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3271     struct rwlock **lockp)
3272 {
3273 	struct md_page *pvh;
3274 	pv_entry_t pv;
3275 	vm_offset_t va_last;
3276 	vm_page_t m;
3277 
3278 	KASSERT((pa & L2_OFFSET) == 0,
3279 	    ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
3280 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3281 
3282 	/*
3283 	 * Transfer the first page's pv entry for this mapping to the 2mpage's
3284 	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
3285 	 * a transfer avoids the possibility that get_pv_entry() calls
3286 	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
3287 	 * mappings that is being promoted.
3288 	 */
3289 	m = PHYS_TO_VM_PAGE(pa);
3290 	va = va & ~L2_OFFSET;
3291 	pv = pmap_pvh_remove(&m->md, pmap, va);
3292 	KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
3293 	pvh = pa_to_pvh(pa);
3294 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3295 	pvh->pv_gen++;
3296 	/* Free the remaining NPTEPG - 1 pv entries. */
3297 	va_last = va + L2_SIZE - PAGE_SIZE;
3298 	do {
3299 		m++;
3300 		va += PAGE_SIZE;
3301 		pmap_pvh_free(&m->md, pmap, va);
3302 	} while (va < va_last);
3303 }
3304 
3305 /*
3306  * Tries to promote the 512, contiguous 4KB page mappings that are within a
3307  * single level 2 table entry to a single 2MB page mapping.  For promotion
3308  * to occur, two conditions must be met: (1) the 4KB page mappings must map
3309  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
3310  * identical characteristics.
3311  */
3312 static void
3313 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
3314     struct rwlock **lockp)
3315 {
3316 	pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
3317 	vm_page_t mpte;
3318 	vm_offset_t sva;
3319 
3320 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3321 	PMAP_ASSERT_STAGE1(pmap);
3322 
3323 	sva = va & ~L2_OFFSET;
3324 	firstl3 = pmap_l2_to_l3(l2, sva);
3325 	newl2 = pmap_load(firstl3);
3326 
3327 setl2:
3328 	if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) {
3329 		atomic_add_long(&pmap_l2_p_failures, 1);
3330 		CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3331 		    " in pmap %p", va, pmap);
3332 		return;
3333 	}
3334 
3335 	if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
3336 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
3337 		if (!atomic_fcmpset_64(l2, &newl2, newl2 & ~ATTR_SW_DBM))
3338 			goto setl2;
3339 		newl2 &= ~ATTR_SW_DBM;
3340 	}
3341 
3342 	pa = newl2 + L2_SIZE - PAGE_SIZE;
3343 	for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
3344 		oldl3 = pmap_load(l3);
3345 setl3:
3346 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
3347 		    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
3348 			if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
3349 			    ~ATTR_SW_DBM))
3350 				goto setl3;
3351 			oldl3 &= ~ATTR_SW_DBM;
3352 		}
3353 		if (oldl3 != pa) {
3354 			atomic_add_long(&pmap_l2_p_failures, 1);
3355 			CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3356 			    " in pmap %p", va, pmap);
3357 			return;
3358 		}
3359 		pa -= PAGE_SIZE;
3360 	}
3361 
3362 	/*
3363 	 * Save the page table page in its current state until the L2
3364 	 * mapping the superpage is demoted by pmap_demote_l2() or
3365 	 * destroyed by pmap_remove_l3().
3366 	 */
3367 	mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3368 	KASSERT(mpte >= vm_page_array &&
3369 	    mpte < &vm_page_array[vm_page_array_size],
3370 	    ("pmap_promote_l2: page table page is out of range"));
3371 	KASSERT(mpte->pindex == pmap_l2_pindex(va),
3372 	    ("pmap_promote_l2: page table page's pindex is wrong"));
3373 	if (pmap_insert_pt_page(pmap, mpte, true)) {
3374 		atomic_add_long(&pmap_l2_p_failures, 1);
3375 		CTR2(KTR_PMAP,
3376 		    "pmap_promote_l2: failure for va %#lx in pmap %p", va,
3377 		    pmap);
3378 		return;
3379 	}
3380 
3381 	if ((newl2 & ATTR_SW_MANAGED) != 0)
3382 		pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
3383 
3384 	newl2 &= ~ATTR_DESCR_MASK;
3385 	newl2 |= L2_BLOCK;
3386 
3387 	pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
3388 
3389 	atomic_add_long(&pmap_l2_promotions, 1);
3390 	CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
3391 		    pmap);
3392 }
3393 #endif /* VM_NRESERVLEVEL > 0 */
3394 
3395 /*
3396  *	Insert the given physical page (p) at
3397  *	the specified virtual address (v) in the
3398  *	target physical map with the protection requested.
3399  *
3400  *	If specified, the page will be wired down, meaning
3401  *	that the related pte can not be reclaimed.
3402  *
3403  *	NB:  This is the only routine which MAY NOT lazy-evaluate
3404  *	or lose information.  That is, this routine must actually
3405  *	insert this page into the given map NOW.
3406  */
3407 int
3408 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3409     u_int flags, int8_t psind)
3410 {
3411 	struct rwlock *lock;
3412 	pd_entry_t *pde;
3413 	pt_entry_t new_l3, orig_l3;
3414 	pt_entry_t *l2, *l3;
3415 	pv_entry_t pv;
3416 	vm_paddr_t opa, pa;
3417 	vm_page_t mpte, om;
3418 	boolean_t nosleep;
3419 	int lvl, rv;
3420 
3421 	va = trunc_page(va);
3422 	if ((m->oflags & VPO_UNMANAGED) == 0)
3423 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
3424 	pa = VM_PAGE_TO_PHYS(m);
3425 	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | L3_PAGE);
3426 	new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
3427 	new_l3 |= pmap_pte_prot(pmap, prot);
3428 
3429 	if ((flags & PMAP_ENTER_WIRED) != 0)
3430 		new_l3 |= ATTR_SW_WIRED;
3431 	if (pmap->pm_stage == PM_STAGE1) {
3432 		if (va < VM_MAXUSER_ADDRESS)
3433 			new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
3434 		else
3435 			new_l3 |= ATTR_S1_UXN;
3436 		if (pmap != kernel_pmap)
3437 			new_l3 |= ATTR_S1_nG;
3438 	} else {
3439 		/*
3440 		 * Clear the access flag on executable mappings, this will be
3441 		 * set later when the page is accessed. The fault handler is
3442 		 * required to invalidate the I-cache.
3443 		 *
3444 		 * TODO: Switch to the valid flag to allow hardware management
3445 		 * of the access flag. Much of the pmap code assumes the
3446 		 * valid flag is set and fails to destroy the old page tables
3447 		 * correctly if it is clear.
3448 		 */
3449 		if (prot & VM_PROT_EXECUTE)
3450 			new_l3 &= ~ATTR_AF;
3451 	}
3452 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3453 		new_l3 |= ATTR_SW_MANAGED;
3454 		if ((prot & VM_PROT_WRITE) != 0) {
3455 			new_l3 |= ATTR_SW_DBM;
3456 			if ((flags & VM_PROT_WRITE) == 0) {
3457 				PMAP_ASSERT_STAGE1(pmap);
3458 				new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
3459 			}
3460 		}
3461 	}
3462 
3463 	CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
3464 
3465 	lock = NULL;
3466 	PMAP_LOCK(pmap);
3467 	if (psind == 1) {
3468 		/* Assert the required virtual and physical alignment. */
3469 		KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
3470 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
3471 		rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
3472 		    flags, m, &lock);
3473 		goto out;
3474 	}
3475 	mpte = NULL;
3476 
3477 	/*
3478 	 * In the case that a page table page is not
3479 	 * resident, we are creating it here.
3480 	 */
3481 retry:
3482 	pde = pmap_pde(pmap, va, &lvl);
3483 	if (pde != NULL && lvl == 2) {
3484 		l3 = pmap_l2_to_l3(pde, va);
3485 		if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
3486 			mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3487 			mpte->ref_count++;
3488 		}
3489 		goto havel3;
3490 	} else if (pde != NULL && lvl == 1) {
3491 		l2 = pmap_l1_to_l2(pde, va);
3492 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
3493 		    (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
3494 			l3 = &l3[pmap_l3_index(va)];
3495 			if (va < VM_MAXUSER_ADDRESS) {
3496 				mpte = PHYS_TO_VM_PAGE(
3497 				    pmap_load(l2) & ~ATTR_MASK);
3498 				mpte->ref_count++;
3499 			}
3500 			goto havel3;
3501 		}
3502 		/* We need to allocate an L3 table. */
3503 	}
3504 	if (va < VM_MAXUSER_ADDRESS) {
3505 		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
3506 
3507 		/*
3508 		 * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
3509 		 * to handle the possibility that a superpage mapping for "va"
3510 		 * was created while we slept.
3511 		 */
3512 		mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
3513 		    nosleep ? NULL : &lock);
3514 		if (mpte == NULL && nosleep) {
3515 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
3516 			rv = KERN_RESOURCE_SHORTAGE;
3517 			goto out;
3518 		}
3519 		goto retry;
3520 	} else
3521 		panic("pmap_enter: missing L3 table for kernel va %#lx", va);
3522 
3523 havel3:
3524 	orig_l3 = pmap_load(l3);
3525 	opa = orig_l3 & ~ATTR_MASK;
3526 	pv = NULL;
3527 
3528 	/*
3529 	 * Is the specified virtual address already mapped?
3530 	 */
3531 	if (pmap_l3_valid(orig_l3)) {
3532 		/*
3533 		 * Only allow adding new entries on stage 2 tables for now.
3534 		 * This simplifies cache invalidation as we may need to call
3535 		 * into EL2 to perform such actions.
3536 		 */
3537 		PMAP_ASSERT_STAGE1(pmap);
3538 		/*
3539 		 * Wiring change, just update stats. We don't worry about
3540 		 * wiring PT pages as they remain resident as long as there
3541 		 * are valid mappings in them. Hence, if a user page is wired,
3542 		 * the PT page will be also.
3543 		 */
3544 		if ((flags & PMAP_ENTER_WIRED) != 0 &&
3545 		    (orig_l3 & ATTR_SW_WIRED) == 0)
3546 			pmap->pm_stats.wired_count++;
3547 		else if ((flags & PMAP_ENTER_WIRED) == 0 &&
3548 		    (orig_l3 & ATTR_SW_WIRED) != 0)
3549 			pmap->pm_stats.wired_count--;
3550 
3551 		/*
3552 		 * Remove the extra PT page reference.
3553 		 */
3554 		if (mpte != NULL) {
3555 			mpte->ref_count--;
3556 			KASSERT(mpte->ref_count > 0,
3557 			    ("pmap_enter: missing reference to page table page,"
3558 			     " va: 0x%lx", va));
3559 		}
3560 
3561 		/*
3562 		 * Has the physical page changed?
3563 		 */
3564 		if (opa == pa) {
3565 			/*
3566 			 * No, might be a protection or wiring change.
3567 			 */
3568 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3569 			    (new_l3 & ATTR_SW_DBM) != 0)
3570 				vm_page_aflag_set(m, PGA_WRITEABLE);
3571 			goto validate;
3572 		}
3573 
3574 		/*
3575 		 * The physical page has changed.  Temporarily invalidate
3576 		 * the mapping.
3577 		 */
3578 		orig_l3 = pmap_load_clear(l3);
3579 		KASSERT((orig_l3 & ~ATTR_MASK) == opa,
3580 		    ("pmap_enter: unexpected pa update for %#lx", va));
3581 		if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
3582 			om = PHYS_TO_VM_PAGE(opa);
3583 
3584 			/*
3585 			 * The pmap lock is sufficient to synchronize with
3586 			 * concurrent calls to pmap_page_test_mappings() and
3587 			 * pmap_ts_referenced().
3588 			 */
3589 			if (pmap_pte_dirty(pmap, orig_l3))
3590 				vm_page_dirty(om);
3591 			if ((orig_l3 & ATTR_AF) != 0) {
3592 				pmap_invalidate_page(pmap, va);
3593 				vm_page_aflag_set(om, PGA_REFERENCED);
3594 			}
3595 			CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3596 			pv = pmap_pvh_remove(&om->md, pmap, va);
3597 			if ((m->oflags & VPO_UNMANAGED) != 0)
3598 				free_pv_entry(pmap, pv);
3599 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
3600 			    TAILQ_EMPTY(&om->md.pv_list) &&
3601 			    ((om->flags & PG_FICTITIOUS) != 0 ||
3602 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3603 				vm_page_aflag_clear(om, PGA_WRITEABLE);
3604 		} else {
3605 			KASSERT((orig_l3 & ATTR_AF) != 0,
3606 			    ("pmap_enter: unmanaged mapping lacks ATTR_AF"));
3607 			pmap_invalidate_page(pmap, va);
3608 		}
3609 		orig_l3 = 0;
3610 	} else {
3611 		/*
3612 		 * Increment the counters.
3613 		 */
3614 		if ((new_l3 & ATTR_SW_WIRED) != 0)
3615 			pmap->pm_stats.wired_count++;
3616 		pmap_resident_count_inc(pmap, 1);
3617 	}
3618 	/*
3619 	 * Enter on the PV list if part of our managed memory.
3620 	 */
3621 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3622 		if (pv == NULL) {
3623 			pv = get_pv_entry(pmap, &lock);
3624 			pv->pv_va = va;
3625 		}
3626 		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3627 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3628 		m->md.pv_gen++;
3629 		if ((new_l3 & ATTR_SW_DBM) != 0)
3630 			vm_page_aflag_set(m, PGA_WRITEABLE);
3631 	}
3632 
3633 validate:
3634 	if (pmap->pm_stage == PM_STAGE1) {
3635 		/*
3636 		 * Sync icache if exec permission and attribute
3637 		 * VM_MEMATTR_WRITE_BACK is set. Do it now, before the mapping
3638 		 * is stored and made valid for hardware table walk. If done
3639 		 * later, then other can access this page before caches are
3640 		 * properly synced. Don't do it for kernel memory which is
3641 		 * mapped with exec permission even if the memory isn't going
3642 		 * to hold executable code. The only time when icache sync is
3643 		 * needed is after kernel module is loaded and the relocation
3644 		 * info is processed. And it's done in elf_cpu_load_file().
3645 		*/
3646 		if ((prot & VM_PROT_EXECUTE) &&  pmap != kernel_pmap &&
3647 		    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
3648 		    (opa != pa || (orig_l3 & ATTR_S1_XN))) {
3649 			PMAP_ASSERT_STAGE1(pmap);
3650 			cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3651 		}
3652 	} else {
3653 		cpu_dcache_wb_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3654 	}
3655 
3656 	/*
3657 	 * Update the L3 entry
3658 	 */
3659 	if (pmap_l3_valid(orig_l3)) {
3660 		PMAP_ASSERT_STAGE1(pmap);
3661 		KASSERT(opa == pa, ("pmap_enter: invalid update"));
3662 		if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
3663 			/* same PA, different attributes */
3664 			orig_l3 = pmap_load_store(l3, new_l3);
3665 			pmap_invalidate_page(pmap, va);
3666 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3667 			    pmap_pte_dirty(pmap, orig_l3))
3668 				vm_page_dirty(m);
3669 		} else {
3670 			/*
3671 			 * orig_l3 == new_l3
3672 			 * This can happens if multiple threads simultaneously
3673 			 * access not yet mapped page. This bad for performance
3674 			 * since this can cause full demotion-NOP-promotion
3675 			 * cycle.
3676 			 * Another possible reasons are:
3677 			 * - VM and pmap memory layout are diverged
3678 			 * - tlb flush is missing somewhere and CPU doesn't see
3679 			 *   actual mapping.
3680 			 */
3681 			CTR4(KTR_PMAP, "%s: already mapped page - "
3682 			    "pmap %p va 0x%#lx pte 0x%lx",
3683 			    __func__, pmap, va, new_l3);
3684 		}
3685 	} else {
3686 		/* New mapping */
3687 		pmap_store(l3, new_l3);
3688 		dsb(ishst);
3689 	}
3690 
3691 #if VM_NRESERVLEVEL > 0
3692 	/*
3693 	 * Try to promote from level 3 pages to a level 2 superpage. This
3694 	 * currently only works on stage 1 pmaps as pmap_promote_l2 looks at
3695 	 * stage 1 specific fields and performs a break-before-make sequence
3696 	 * that is incorrect a stage 2 pmap.
3697 	 */
3698 	if ((mpte == NULL || mpte->ref_count == NL3PG) &&
3699 	    pmap_ps_enabled(pmap) && pmap->pm_stage == PM_STAGE1 &&
3700 	    (m->flags & PG_FICTITIOUS) == 0 &&
3701 	    vm_reserv_level_iffullpop(m) == 0) {
3702 		pmap_promote_l2(pmap, pde, va, &lock);
3703 	}
3704 #endif
3705 
3706 	rv = KERN_SUCCESS;
3707 out:
3708 	if (lock != NULL)
3709 		rw_wunlock(lock);
3710 	PMAP_UNLOCK(pmap);
3711 	return (rv);
3712 }
3713 
3714 /*
3715  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
3716  * if successful.  Returns false if (1) a page table page cannot be allocated
3717  * without sleeping, (2) a mapping already exists at the specified virtual
3718  * address, or (3) a PV entry cannot be allocated without reclaiming another
3719  * PV entry.
3720  */
3721 static bool
3722 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3723     struct rwlock **lockp)
3724 {
3725 	pd_entry_t new_l2;
3726 
3727 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3728 	PMAP_ASSERT_STAGE1(pmap);
3729 
3730 	new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
3731 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
3732 	    L2_BLOCK);
3733 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3734 		new_l2 |= ATTR_SW_MANAGED;
3735 		new_l2 &= ~ATTR_AF;
3736 	}
3737 	if ((prot & VM_PROT_EXECUTE) == 0 ||
3738 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
3739 		new_l2 |= ATTR_S1_XN;
3740 	if (va < VM_MAXUSER_ADDRESS)
3741 		new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
3742 	else
3743 		new_l2 |= ATTR_S1_UXN;
3744 	if (pmap != kernel_pmap)
3745 		new_l2 |= ATTR_S1_nG;
3746 	return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
3747 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3748 	    KERN_SUCCESS);
3749 }
3750 
3751 /*
3752  * Returns true if every page table entry in the specified page table is
3753  * zero.
3754  */
3755 static bool
3756 pmap_every_pte_zero(vm_paddr_t pa)
3757 {
3758 	pt_entry_t *pt_end, *pte;
3759 
3760 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
3761 	pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
3762 	for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) {
3763 		if (*pte != 0)
3764 			return (false);
3765 	}
3766 	return (true);
3767 }
3768 
3769 /*
3770  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
3771  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3772  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3773  * a mapping already exists at the specified virtual address.  Returns
3774  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3775  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
3776  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3777  *
3778  * The parameter "m" is only used when creating a managed, writeable mapping.
3779  */
3780 static int
3781 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
3782     vm_page_t m, struct rwlock **lockp)
3783 {
3784 	struct spglist free;
3785 	pd_entry_t *l2, old_l2;
3786 	vm_page_t l2pg, mt;
3787 
3788 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3789 
3790 	if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags &
3791 	    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
3792 		CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
3793 		    va, pmap);
3794 		return (KERN_RESOURCE_SHORTAGE);
3795 	}
3796 
3797 	/*
3798 	 * If there are existing mappings, either abort or remove them.
3799 	 */
3800 	if ((old_l2 = pmap_load(l2)) != 0) {
3801 		KASSERT(l2pg == NULL || l2pg->ref_count > 1,
3802 		    ("pmap_enter_l2: l2pg's ref count is too low"));
3803 		if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
3804 		    VM_MAXUSER_ADDRESS || (old_l2 & ATTR_DESCR_MASK) ==
3805 		    L2_BLOCK || !pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) {
3806 			if (l2pg != NULL)
3807 				l2pg->ref_count--;
3808 			CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx"
3809 			    " in pmap %p", va, pmap);
3810 			return (KERN_FAILURE);
3811 		}
3812 		SLIST_INIT(&free);
3813 		if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
3814 			(void)pmap_remove_l2(pmap, l2, va,
3815 			    pmap_load(pmap_l1(pmap, va)), &free, lockp);
3816 		else
3817 			pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
3818 			    &free, lockp);
3819 		if (va < VM_MAXUSER_ADDRESS) {
3820 			vm_page_free_pages_toq(&free, true);
3821 			KASSERT(pmap_load(l2) == 0,
3822 			    ("pmap_enter_l2: non-zero L2 entry %p", l2));
3823 		} else {
3824 			KASSERT(SLIST_EMPTY(&free),
3825 			    ("pmap_enter_l2: freed kernel page table page"));
3826 
3827 			/*
3828 			 * Both pmap_remove_l2() and pmap_remove_l3_range()
3829 			 * will leave the kernel page table page zero filled.
3830 			 * Nonetheless, the TLB could have an intermediate
3831 			 * entry for the kernel page table page.
3832 			 */
3833 			mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3834 			if (pmap_insert_pt_page(pmap, mt, false))
3835 				panic("pmap_enter_l2: trie insert failed");
3836 			pmap_clear(l2);
3837 			pmap_invalidate_page(pmap, va);
3838 		}
3839 	}
3840 
3841 	if ((new_l2 & ATTR_SW_MANAGED) != 0) {
3842 		/*
3843 		 * Abort this mapping if its PV entry could not be created.
3844 		 */
3845 		if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
3846 			if (l2pg != NULL)
3847 				pmap_abort_ptp(pmap, va, l2pg);
3848 			CTR2(KTR_PMAP,
3849 			    "pmap_enter_l2: failure for va %#lx in pmap %p",
3850 			    va, pmap);
3851 			return (KERN_RESOURCE_SHORTAGE);
3852 		}
3853 		if ((new_l2 & ATTR_SW_DBM) != 0)
3854 			for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3855 				vm_page_aflag_set(mt, PGA_WRITEABLE);
3856 	}
3857 
3858 	/*
3859 	 * Increment counters.
3860 	 */
3861 	if ((new_l2 & ATTR_SW_WIRED) != 0)
3862 		pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
3863 	pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
3864 
3865 	/*
3866 	 * Map the superpage.
3867 	 */
3868 	pmap_store(l2, new_l2);
3869 	dsb(ishst);
3870 
3871 	atomic_add_long(&pmap_l2_mappings, 1);
3872 	CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
3873 	    va, pmap);
3874 
3875 	return (KERN_SUCCESS);
3876 }
3877 
3878 /*
3879  * Maps a sequence of resident pages belonging to the same object.
3880  * The sequence begins with the given page m_start.  This page is
3881  * mapped at the given virtual address start.  Each subsequent page is
3882  * mapped at a virtual address that is offset from start by the same
3883  * amount as the page is offset from m_start within the object.  The
3884  * last page in the sequence is the page with the largest offset from
3885  * m_start that can be mapped at a virtual address less than the given
3886  * virtual address end.  Not every virtual page between start and end
3887  * is mapped; only those for which a resident page exists with the
3888  * corresponding offset from m_start are mapped.
3889  */
3890 void
3891 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3892     vm_page_t m_start, vm_prot_t prot)
3893 {
3894 	struct rwlock *lock;
3895 	vm_offset_t va;
3896 	vm_page_t m, mpte;
3897 	vm_pindex_t diff, psize;
3898 
3899 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
3900 
3901 	psize = atop(end - start);
3902 	mpte = NULL;
3903 	m = m_start;
3904 	lock = NULL;
3905 	PMAP_LOCK(pmap);
3906 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3907 		va = start + ptoa(diff);
3908 		if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
3909 		    m->psind == 1 && pmap_ps_enabled(pmap) &&
3910 		    pmap_enter_2mpage(pmap, va, m, prot, &lock))
3911 			m = &m[L2_SIZE / PAGE_SIZE - 1];
3912 		else
3913 			mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
3914 			    &lock);
3915 		m = TAILQ_NEXT(m, listq);
3916 	}
3917 	if (lock != NULL)
3918 		rw_wunlock(lock);
3919 	PMAP_UNLOCK(pmap);
3920 }
3921 
3922 /*
3923  * this code makes some *MAJOR* assumptions:
3924  * 1. Current pmap & pmap exists.
3925  * 2. Not wired.
3926  * 3. Read access.
3927  * 4. No page table pages.
3928  * but is *MUCH* faster than pmap_enter...
3929  */
3930 
3931 void
3932 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3933 {
3934 	struct rwlock *lock;
3935 
3936 	lock = NULL;
3937 	PMAP_LOCK(pmap);
3938 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3939 	if (lock != NULL)
3940 		rw_wunlock(lock);
3941 	PMAP_UNLOCK(pmap);
3942 }
3943 
3944 static vm_page_t
3945 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3946     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3947 {
3948 	pd_entry_t *pde;
3949 	pt_entry_t *l2, *l3, l3_val;
3950 	vm_paddr_t pa;
3951 	int lvl;
3952 
3953 	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3954 	    (m->oflags & VPO_UNMANAGED) != 0,
3955 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3956 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3957 	PMAP_ASSERT_STAGE1(pmap);
3958 
3959 	CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3960 	/*
3961 	 * In the case that a page table page is not
3962 	 * resident, we are creating it here.
3963 	 */
3964 	if (va < VM_MAXUSER_ADDRESS) {
3965 		vm_pindex_t l2pindex;
3966 
3967 		/*
3968 		 * Calculate pagetable page index
3969 		 */
3970 		l2pindex = pmap_l2_pindex(va);
3971 		if (mpte && (mpte->pindex == l2pindex)) {
3972 			mpte->ref_count++;
3973 		} else {
3974 			/*
3975 			 * Get the l2 entry
3976 			 */
3977 			pde = pmap_pde(pmap, va, &lvl);
3978 
3979 			/*
3980 			 * If the page table page is mapped, we just increment
3981 			 * the hold count, and activate it.  Otherwise, we
3982 			 * attempt to allocate a page table page.  If this
3983 			 * attempt fails, we don't retry.  Instead, we give up.
3984 			 */
3985 			if (lvl == 1) {
3986 				l2 = pmap_l1_to_l2(pde, va);
3987 				if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
3988 				    L2_BLOCK)
3989 					return (NULL);
3990 			}
3991 			if (lvl == 2 && pmap_load(pde) != 0) {
3992 				mpte =
3993 				    PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3994 				mpte->ref_count++;
3995 			} else {
3996 				/*
3997 				 * Pass NULL instead of the PV list lock
3998 				 * pointer, because we don't intend to sleep.
3999 				 */
4000 				mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
4001 				if (mpte == NULL)
4002 					return (mpte);
4003 			}
4004 		}
4005 		l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
4006 		l3 = &l3[pmap_l3_index(va)];
4007 	} else {
4008 		mpte = NULL;
4009 		pde = pmap_pde(kernel_pmap, va, &lvl);
4010 		KASSERT(pde != NULL,
4011 		    ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
4012 		     va));
4013 		KASSERT(lvl == 2,
4014 		    ("pmap_enter_quick_locked: Invalid level %d", lvl));
4015 		l3 = pmap_l2_to_l3(pde, va);
4016 	}
4017 
4018 	/*
4019 	 * Abort if a mapping already exists.
4020 	 */
4021 	if (pmap_load(l3) != 0) {
4022 		if (mpte != NULL)
4023 			mpte->ref_count--;
4024 		return (NULL);
4025 	}
4026 
4027 	/*
4028 	 * Enter on the PV list if part of our managed memory.
4029 	 */
4030 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
4031 	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
4032 		if (mpte != NULL)
4033 			pmap_abort_ptp(pmap, va, mpte);
4034 		return (NULL);
4035 	}
4036 
4037 	/*
4038 	 * Increment counters
4039 	 */
4040 	pmap_resident_count_inc(pmap, 1);
4041 
4042 	pa = VM_PAGE_TO_PHYS(m);
4043 	l3_val = pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
4044 	    ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
4045 	if ((prot & VM_PROT_EXECUTE) == 0 ||
4046 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
4047 		l3_val |= ATTR_S1_XN;
4048 	if (va < VM_MAXUSER_ADDRESS)
4049 		l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
4050 	else
4051 		l3_val |= ATTR_S1_UXN;
4052 	if (pmap != kernel_pmap)
4053 		l3_val |= ATTR_S1_nG;
4054 
4055 	/*
4056 	 * Now validate mapping with RO protection
4057 	 */
4058 	if ((m->oflags & VPO_UNMANAGED) == 0) {
4059 		l3_val |= ATTR_SW_MANAGED;
4060 		l3_val &= ~ATTR_AF;
4061 	}
4062 
4063 	/* Sync icache before the mapping is stored to PTE */
4064 	if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
4065 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
4066 		cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
4067 
4068 	pmap_store(l3, l3_val);
4069 	dsb(ishst);
4070 
4071 	return (mpte);
4072 }
4073 
4074 /*
4075  * This code maps large physical mmap regions into the
4076  * processor address space.  Note that some shortcuts
4077  * are taken, but the code works.
4078  */
4079 void
4080 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
4081     vm_pindex_t pindex, vm_size_t size)
4082 {
4083 
4084 	VM_OBJECT_ASSERT_WLOCKED(object);
4085 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4086 	    ("pmap_object_init_pt: non-device object"));
4087 }
4088 
4089 /*
4090  *	Clear the wired attribute from the mappings for the specified range of
4091  *	addresses in the given pmap.  Every valid mapping within that range
4092  *	must have the wired attribute set.  In contrast, invalid mappings
4093  *	cannot have the wired attribute set, so they are ignored.
4094  *
4095  *	The wired attribute of the page table entry is not a hardware feature,
4096  *	so there is no need to invalidate any TLB entries.
4097  */
4098 void
4099 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4100 {
4101 	vm_offset_t va_next;
4102 	pd_entry_t *l0, *l1, *l2;
4103 	pt_entry_t *l3;
4104 
4105 	PMAP_LOCK(pmap);
4106 	for (; sva < eva; sva = va_next) {
4107 		l0 = pmap_l0(pmap, sva);
4108 		if (pmap_load(l0) == 0) {
4109 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
4110 			if (va_next < sva)
4111 				va_next = eva;
4112 			continue;
4113 		}
4114 
4115 		l1 = pmap_l0_to_l1(l0, sva);
4116 		if (pmap_load(l1) == 0) {
4117 			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
4118 			if (va_next < sva)
4119 				va_next = eva;
4120 			continue;
4121 		}
4122 
4123 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
4124 		if (va_next < sva)
4125 			va_next = eva;
4126 
4127 		l2 = pmap_l1_to_l2(l1, sva);
4128 		if (pmap_load(l2) == 0)
4129 			continue;
4130 
4131 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
4132 			if ((pmap_load(l2) & ATTR_SW_WIRED) == 0)
4133 				panic("pmap_unwire: l2 %#jx is missing "
4134 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2));
4135 
4136 			/*
4137 			 * Are we unwiring the entire large page?  If not,
4138 			 * demote the mapping and fall through.
4139 			 */
4140 			if (sva + L2_SIZE == va_next && eva >= va_next) {
4141 				pmap_clear_bits(l2, ATTR_SW_WIRED);
4142 				pmap->pm_stats.wired_count -= L2_SIZE /
4143 				    PAGE_SIZE;
4144 				continue;
4145 			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
4146 				panic("pmap_unwire: demotion failed");
4147 		}
4148 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
4149 		    ("pmap_unwire: Invalid l2 entry after demotion"));
4150 
4151 		if (va_next > eva)
4152 			va_next = eva;
4153 		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
4154 		    sva += L3_SIZE) {
4155 			if (pmap_load(l3) == 0)
4156 				continue;
4157 			if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
4158 				panic("pmap_unwire: l3 %#jx is missing "
4159 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
4160 
4161 			/*
4162 			 * ATTR_SW_WIRED must be cleared atomically.  Although
4163 			 * the pmap lock synchronizes access to ATTR_SW_WIRED,
4164 			 * the System MMU may write to the entry concurrently.
4165 			 */
4166 			pmap_clear_bits(l3, ATTR_SW_WIRED);
4167 			pmap->pm_stats.wired_count--;
4168 		}
4169 	}
4170 	PMAP_UNLOCK(pmap);
4171 }
4172 
4173 /*
4174  *	Copy the range specified by src_addr/len
4175  *	from the source map to the range dst_addr/len
4176  *	in the destination map.
4177  *
4178  *	This routine is only advisory and need not do anything.
4179  *
4180  *	Because the executable mappings created by this routine are copied,
4181  *	it should not have to flush the instruction cache.
4182  */
4183 void
4184 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
4185     vm_offset_t src_addr)
4186 {
4187 	struct rwlock *lock;
4188 	pd_entry_t *l0, *l1, *l2, srcptepaddr;
4189 	pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
4190 	vm_offset_t addr, end_addr, va_next;
4191 	vm_page_t dst_l2pg, dstmpte, srcmpte;
4192 
4193 	PMAP_ASSERT_STAGE1(dst_pmap);
4194 	PMAP_ASSERT_STAGE1(src_pmap);
4195 
4196 	if (dst_addr != src_addr)
4197 		return;
4198 	end_addr = src_addr + len;
4199 	lock = NULL;
4200 	if (dst_pmap < src_pmap) {
4201 		PMAP_LOCK(dst_pmap);
4202 		PMAP_LOCK(src_pmap);
4203 	} else {
4204 		PMAP_LOCK(src_pmap);
4205 		PMAP_LOCK(dst_pmap);
4206 	}
4207 	for (addr = src_addr; addr < end_addr; addr = va_next) {
4208 		l0 = pmap_l0(src_pmap, addr);
4209 		if (pmap_load(l0) == 0) {
4210 			va_next = (addr + L0_SIZE) & ~L0_OFFSET;
4211 			if (va_next < addr)
4212 				va_next = end_addr;
4213 			continue;
4214 		}
4215 		l1 = pmap_l0_to_l1(l0, addr);
4216 		if (pmap_load(l1) == 0) {
4217 			va_next = (addr + L1_SIZE) & ~L1_OFFSET;
4218 			if (va_next < addr)
4219 				va_next = end_addr;
4220 			continue;
4221 		}
4222 		va_next = (addr + L2_SIZE) & ~L2_OFFSET;
4223 		if (va_next < addr)
4224 			va_next = end_addr;
4225 		l2 = pmap_l1_to_l2(l1, addr);
4226 		srcptepaddr = pmap_load(l2);
4227 		if (srcptepaddr == 0)
4228 			continue;
4229 		if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
4230 			if ((addr & L2_OFFSET) != 0 ||
4231 			    addr + L2_SIZE > end_addr)
4232 				continue;
4233 			l2 = pmap_alloc_l2(dst_pmap, addr, &dst_l2pg, NULL);
4234 			if (l2 == NULL)
4235 				break;
4236 			if (pmap_load(l2) == 0 &&
4237 			    ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
4238 			    pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
4239 			    PMAP_ENTER_NORECLAIM, &lock))) {
4240 				mask = ATTR_AF | ATTR_SW_WIRED;
4241 				nbits = 0;
4242 				if ((srcptepaddr & ATTR_SW_DBM) != 0)
4243 					nbits |= ATTR_S1_AP_RW_BIT;
4244 				pmap_store(l2, (srcptepaddr & ~mask) | nbits);
4245 				pmap_resident_count_inc(dst_pmap, L2_SIZE /
4246 				    PAGE_SIZE);
4247 				atomic_add_long(&pmap_l2_mappings, 1);
4248 			} else
4249 				pmap_abort_ptp(dst_pmap, addr, dst_l2pg);
4250 			continue;
4251 		}
4252 		KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
4253 		    ("pmap_copy: invalid L2 entry"));
4254 		srcptepaddr &= ~ATTR_MASK;
4255 		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
4256 		KASSERT(srcmpte->ref_count > 0,
4257 		    ("pmap_copy: source page table page is unused"));
4258 		if (va_next > end_addr)
4259 			va_next = end_addr;
4260 		src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
4261 		src_pte = &src_pte[pmap_l3_index(addr)];
4262 		dstmpte = NULL;
4263 		for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
4264 			ptetemp = pmap_load(src_pte);
4265 
4266 			/*
4267 			 * We only virtual copy managed pages.
4268 			 */
4269 			if ((ptetemp & ATTR_SW_MANAGED) == 0)
4270 				continue;
4271 
4272 			if (dstmpte != NULL) {
4273 				KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
4274 				    ("dstmpte pindex/addr mismatch"));
4275 				dstmpte->ref_count++;
4276 			} else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
4277 			    NULL)) == NULL)
4278 				goto out;
4279 			dst_pte = (pt_entry_t *)
4280 			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
4281 			dst_pte = &dst_pte[pmap_l3_index(addr)];
4282 			if (pmap_load(dst_pte) == 0 &&
4283 			    pmap_try_insert_pv_entry(dst_pmap, addr,
4284 			    PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) {
4285 				/*
4286 				 * Clear the wired, modified, and accessed
4287 				 * (referenced) bits during the copy.
4288 				 */
4289 				mask = ATTR_AF | ATTR_SW_WIRED;
4290 				nbits = 0;
4291 				if ((ptetemp & ATTR_SW_DBM) != 0)
4292 					nbits |= ATTR_S1_AP_RW_BIT;
4293 				pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
4294 				pmap_resident_count_inc(dst_pmap, 1);
4295 			} else {
4296 				pmap_abort_ptp(dst_pmap, addr, dstmpte);
4297 				goto out;
4298 			}
4299 			/* Have we copied all of the valid mappings? */
4300 			if (dstmpte->ref_count >= srcmpte->ref_count)
4301 				break;
4302 		}
4303 	}
4304 out:
4305 	/*
4306 	 * XXX This barrier may not be needed because the destination pmap is
4307 	 * not active.
4308 	 */
4309 	dsb(ishst);
4310 
4311 	if (lock != NULL)
4312 		rw_wunlock(lock);
4313 	PMAP_UNLOCK(src_pmap);
4314 	PMAP_UNLOCK(dst_pmap);
4315 }
4316 
4317 /*
4318  *	pmap_zero_page zeros the specified hardware page by mapping
4319  *	the page into KVM and using bzero to clear its contents.
4320  */
4321 void
4322 pmap_zero_page(vm_page_t m)
4323 {
4324 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4325 
4326 	pagezero((void *)va);
4327 }
4328 
4329 /*
4330  *	pmap_zero_page_area zeros the specified hardware page by mapping
4331  *	the page into KVM and using bzero to clear its contents.
4332  *
4333  *	off and size may not cover an area beyond a single hardware page.
4334  */
4335 void
4336 pmap_zero_page_area(vm_page_t m, int off, int size)
4337 {
4338 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4339 
4340 	if (off == 0 && size == PAGE_SIZE)
4341 		pagezero((void *)va);
4342 	else
4343 		bzero((char *)va + off, size);
4344 }
4345 
4346 /*
4347  *	pmap_copy_page copies the specified (machine independent)
4348  *	page by mapping the page into virtual memory and using
4349  *	bcopy to copy the page, one machine dependent page at a
4350  *	time.
4351  */
4352 void
4353 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
4354 {
4355 	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
4356 	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
4357 
4358 	pagecopy((void *)src, (void *)dst);
4359 }
4360 
4361 int unmapped_buf_allowed = 1;
4362 
4363 void
4364 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
4365     vm_offset_t b_offset, int xfersize)
4366 {
4367 	void *a_cp, *b_cp;
4368 	vm_page_t m_a, m_b;
4369 	vm_paddr_t p_a, p_b;
4370 	vm_offset_t a_pg_offset, b_pg_offset;
4371 	int cnt;
4372 
4373 	while (xfersize > 0) {
4374 		a_pg_offset = a_offset & PAGE_MASK;
4375 		m_a = ma[a_offset >> PAGE_SHIFT];
4376 		p_a = m_a->phys_addr;
4377 		b_pg_offset = b_offset & PAGE_MASK;
4378 		m_b = mb[b_offset >> PAGE_SHIFT];
4379 		p_b = m_b->phys_addr;
4380 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4381 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4382 		if (__predict_false(!PHYS_IN_DMAP(p_a))) {
4383 			panic("!DMAP a %lx", p_a);
4384 		} else {
4385 			a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
4386 		}
4387 		if (__predict_false(!PHYS_IN_DMAP(p_b))) {
4388 			panic("!DMAP b %lx", p_b);
4389 		} else {
4390 			b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
4391 		}
4392 		bcopy(a_cp, b_cp, cnt);
4393 		a_offset += cnt;
4394 		b_offset += cnt;
4395 		xfersize -= cnt;
4396 	}
4397 }
4398 
4399 vm_offset_t
4400 pmap_quick_enter_page(vm_page_t m)
4401 {
4402 
4403 	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
4404 }
4405 
4406 void
4407 pmap_quick_remove_page(vm_offset_t addr)
4408 {
4409 }
4410 
4411 /*
4412  * Returns true if the pmap's pv is one of the first
4413  * 16 pvs linked to from this page.  This count may
4414  * be changed upwards or downwards in the future; it
4415  * is only necessary that true be returned for a small
4416  * subset of pmaps for proper page aging.
4417  */
4418 boolean_t
4419 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4420 {
4421 	struct md_page *pvh;
4422 	struct rwlock *lock;
4423 	pv_entry_t pv;
4424 	int loops = 0;
4425 	boolean_t rv;
4426 
4427 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4428 	    ("pmap_page_exists_quick: page %p is not managed", m));
4429 	rv = FALSE;
4430 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4431 	rw_rlock(lock);
4432 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4433 		if (PV_PMAP(pv) == pmap) {
4434 			rv = TRUE;
4435 			break;
4436 		}
4437 		loops++;
4438 		if (loops >= 16)
4439 			break;
4440 	}
4441 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4442 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4443 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4444 			if (PV_PMAP(pv) == pmap) {
4445 				rv = TRUE;
4446 				break;
4447 			}
4448 			loops++;
4449 			if (loops >= 16)
4450 				break;
4451 		}
4452 	}
4453 	rw_runlock(lock);
4454 	return (rv);
4455 }
4456 
4457 /*
4458  *	pmap_page_wired_mappings:
4459  *
4460  *	Return the number of managed mappings to the given physical page
4461  *	that are wired.
4462  */
4463 int
4464 pmap_page_wired_mappings(vm_page_t m)
4465 {
4466 	struct rwlock *lock;
4467 	struct md_page *pvh;
4468 	pmap_t pmap;
4469 	pt_entry_t *pte;
4470 	pv_entry_t pv;
4471 	int count, lvl, md_gen, pvh_gen;
4472 
4473 	if ((m->oflags & VPO_UNMANAGED) != 0)
4474 		return (0);
4475 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4476 	rw_rlock(lock);
4477 restart:
4478 	count = 0;
4479 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4480 		pmap = PV_PMAP(pv);
4481 		if (!PMAP_TRYLOCK(pmap)) {
4482 			md_gen = m->md.pv_gen;
4483 			rw_runlock(lock);
4484 			PMAP_LOCK(pmap);
4485 			rw_rlock(lock);
4486 			if (md_gen != m->md.pv_gen) {
4487 				PMAP_UNLOCK(pmap);
4488 				goto restart;
4489 			}
4490 		}
4491 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4492 		if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4493 			count++;
4494 		PMAP_UNLOCK(pmap);
4495 	}
4496 	if ((m->flags & PG_FICTITIOUS) == 0) {
4497 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4498 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4499 			pmap = PV_PMAP(pv);
4500 			if (!PMAP_TRYLOCK(pmap)) {
4501 				md_gen = m->md.pv_gen;
4502 				pvh_gen = pvh->pv_gen;
4503 				rw_runlock(lock);
4504 				PMAP_LOCK(pmap);
4505 				rw_rlock(lock);
4506 				if (md_gen != m->md.pv_gen ||
4507 				    pvh_gen != pvh->pv_gen) {
4508 					PMAP_UNLOCK(pmap);
4509 					goto restart;
4510 				}
4511 			}
4512 			pte = pmap_pte(pmap, pv->pv_va, &lvl);
4513 			if (pte != NULL &&
4514 			    (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4515 				count++;
4516 			PMAP_UNLOCK(pmap);
4517 		}
4518 	}
4519 	rw_runlock(lock);
4520 	return (count);
4521 }
4522 
4523 /*
4524  * Returns true if the given page is mapped individually or as part of
4525  * a 2mpage.  Otherwise, returns false.
4526  */
4527 bool
4528 pmap_page_is_mapped(vm_page_t m)
4529 {
4530 	struct rwlock *lock;
4531 	bool rv;
4532 
4533 	if ((m->oflags & VPO_UNMANAGED) != 0)
4534 		return (false);
4535 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4536 	rw_rlock(lock);
4537 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
4538 	    ((m->flags & PG_FICTITIOUS) == 0 &&
4539 	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
4540 	rw_runlock(lock);
4541 	return (rv);
4542 }
4543 
4544 /*
4545  * Destroy all managed, non-wired mappings in the given user-space
4546  * pmap.  This pmap cannot be active on any processor besides the
4547  * caller.
4548  *
4549  * This function cannot be applied to the kernel pmap.  Moreover, it
4550  * is not intended for general use.  It is only to be used during
4551  * process termination.  Consequently, it can be implemented in ways
4552  * that make it faster than pmap_remove().  First, it can more quickly
4553  * destroy mappings by iterating over the pmap's collection of PV
4554  * entries, rather than searching the page table.  Second, it doesn't
4555  * have to test and clear the page table entries atomically, because
4556  * no processor is currently accessing the user address space.  In
4557  * particular, a page table entry's dirty bit won't change state once
4558  * this function starts.
4559  */
4560 void
4561 pmap_remove_pages(pmap_t pmap)
4562 {
4563 	pd_entry_t *pde;
4564 	pt_entry_t *pte, tpte;
4565 	struct spglist free;
4566 	vm_page_t m, ml3, mt;
4567 	pv_entry_t pv;
4568 	struct md_page *pvh;
4569 	struct pv_chunk *pc, *npc;
4570 	struct rwlock *lock;
4571 	int64_t bit;
4572 	uint64_t inuse, bitmask;
4573 	int allfree, field, freed, idx, lvl;
4574 	vm_paddr_t pa;
4575 
4576 	KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
4577 
4578 	lock = NULL;
4579 
4580 	SLIST_INIT(&free);
4581 	PMAP_LOCK(pmap);
4582 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4583 		allfree = 1;
4584 		freed = 0;
4585 		for (field = 0; field < _NPCM; field++) {
4586 			inuse = ~pc->pc_map[field] & pc_freemask[field];
4587 			while (inuse != 0) {
4588 				bit = ffsl(inuse) - 1;
4589 				bitmask = 1UL << bit;
4590 				idx = field * 64 + bit;
4591 				pv = &pc->pc_pventry[idx];
4592 				inuse &= ~bitmask;
4593 
4594 				pde = pmap_pde(pmap, pv->pv_va, &lvl);
4595 				KASSERT(pde != NULL,
4596 				    ("Attempting to remove an unmapped page"));
4597 
4598 				switch(lvl) {
4599 				case 1:
4600 					pte = pmap_l1_to_l2(pde, pv->pv_va);
4601 					tpte = pmap_load(pte);
4602 					KASSERT((tpte & ATTR_DESCR_MASK) ==
4603 					    L2_BLOCK,
4604 					    ("Attempting to remove an invalid "
4605 					    "block: %lx", tpte));
4606 					break;
4607 				case 2:
4608 					pte = pmap_l2_to_l3(pde, pv->pv_va);
4609 					tpte = pmap_load(pte);
4610 					KASSERT((tpte & ATTR_DESCR_MASK) ==
4611 					    L3_PAGE,
4612 					    ("Attempting to remove an invalid "
4613 					     "page: %lx", tpte));
4614 					break;
4615 				default:
4616 					panic(
4617 					    "Invalid page directory level: %d",
4618 					    lvl);
4619 				}
4620 
4621 /*
4622  * We cannot remove wired pages from a process' mapping at this time
4623  */
4624 				if (tpte & ATTR_SW_WIRED) {
4625 					allfree = 0;
4626 					continue;
4627 				}
4628 
4629 				pa = tpte & ~ATTR_MASK;
4630 
4631 				m = PHYS_TO_VM_PAGE(pa);
4632 				KASSERT(m->phys_addr == pa,
4633 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4634 				    m, (uintmax_t)m->phys_addr,
4635 				    (uintmax_t)tpte));
4636 
4637 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4638 				    m < &vm_page_array[vm_page_array_size],
4639 				    ("pmap_remove_pages: bad pte %#jx",
4640 				    (uintmax_t)tpte));
4641 
4642 				/*
4643 				 * Because this pmap is not active on other
4644 				 * processors, the dirty bit cannot have
4645 				 * changed state since we last loaded pte.
4646 				 */
4647 				pmap_clear(pte);
4648 
4649 				/*
4650 				 * Update the vm_page_t clean/reference bits.
4651 				 */
4652 				if (pmap_pte_dirty(pmap, tpte)) {
4653 					switch (lvl) {
4654 					case 1:
4655 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4656 							vm_page_dirty(mt);
4657 						break;
4658 					case 2:
4659 						vm_page_dirty(m);
4660 						break;
4661 					}
4662 				}
4663 
4664 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
4665 
4666 				/* Mark free */
4667 				pc->pc_map[field] |= bitmask;
4668 				switch (lvl) {
4669 				case 1:
4670 					pmap_resident_count_dec(pmap,
4671 					    L2_SIZE / PAGE_SIZE);
4672 					pvh = pa_to_pvh(tpte & ~ATTR_MASK);
4673 					TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
4674 					pvh->pv_gen++;
4675 					if (TAILQ_EMPTY(&pvh->pv_list)) {
4676 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4677 							if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
4678 							    TAILQ_EMPTY(&mt->md.pv_list))
4679 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
4680 					}
4681 					ml3 = pmap_remove_pt_page(pmap,
4682 					    pv->pv_va);
4683 					if (ml3 != NULL) {
4684 						KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
4685 						    ("pmap_remove_pages: l3 page not promoted"));
4686 						pmap_resident_count_dec(pmap,1);
4687 						KASSERT(ml3->ref_count == NL3PG,
4688 						    ("pmap_remove_pages: l3 page ref count error"));
4689 						ml3->ref_count = 0;
4690 						pmap_add_delayed_free_list(ml3,
4691 						    &free, FALSE);
4692 					}
4693 					break;
4694 				case 2:
4695 					pmap_resident_count_dec(pmap, 1);
4696 					TAILQ_REMOVE(&m->md.pv_list, pv,
4697 					    pv_next);
4698 					m->md.pv_gen++;
4699 					if ((m->a.flags & PGA_WRITEABLE) != 0 &&
4700 					    TAILQ_EMPTY(&m->md.pv_list) &&
4701 					    (m->flags & PG_FICTITIOUS) == 0) {
4702 						pvh = pa_to_pvh(
4703 						    VM_PAGE_TO_PHYS(m));
4704 						if (TAILQ_EMPTY(&pvh->pv_list))
4705 							vm_page_aflag_clear(m,
4706 							    PGA_WRITEABLE);
4707 					}
4708 					break;
4709 				}
4710 				pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
4711 				    &free);
4712 				freed++;
4713 			}
4714 		}
4715 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
4716 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
4717 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
4718 		if (allfree) {
4719 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4720 			free_pv_chunk(pc);
4721 		}
4722 	}
4723 	if (lock != NULL)
4724 		rw_wunlock(lock);
4725 	pmap_invalidate_all(pmap);
4726 	PMAP_UNLOCK(pmap);
4727 	vm_page_free_pages_toq(&free, true);
4728 }
4729 
4730 /*
4731  * This is used to check if a page has been accessed or modified.
4732  */
4733 static boolean_t
4734 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
4735 {
4736 	struct rwlock *lock;
4737 	pv_entry_t pv;
4738 	struct md_page *pvh;
4739 	pt_entry_t *pte, mask, value;
4740 	pmap_t pmap;
4741 	int lvl, md_gen, pvh_gen;
4742 	boolean_t rv;
4743 
4744 	rv = FALSE;
4745 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4746 	rw_rlock(lock);
4747 restart:
4748 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4749 		pmap = PV_PMAP(pv);
4750 		PMAP_ASSERT_STAGE1(pmap);
4751 		if (!PMAP_TRYLOCK(pmap)) {
4752 			md_gen = m->md.pv_gen;
4753 			rw_runlock(lock);
4754 			PMAP_LOCK(pmap);
4755 			rw_rlock(lock);
4756 			if (md_gen != m->md.pv_gen) {
4757 				PMAP_UNLOCK(pmap);
4758 				goto restart;
4759 			}
4760 		}
4761 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4762 		KASSERT(lvl == 3,
4763 		    ("pmap_page_test_mappings: Invalid level %d", lvl));
4764 		mask = 0;
4765 		value = 0;
4766 		if (modified) {
4767 			mask |= ATTR_S1_AP_RW_BIT;
4768 			value |= ATTR_S1_AP(ATTR_S1_AP_RW);
4769 		}
4770 		if (accessed) {
4771 			mask |= ATTR_AF | ATTR_DESCR_MASK;
4772 			value |= ATTR_AF | L3_PAGE;
4773 		}
4774 		rv = (pmap_load(pte) & mask) == value;
4775 		PMAP_UNLOCK(pmap);
4776 		if (rv)
4777 			goto out;
4778 	}
4779 	if ((m->flags & PG_FICTITIOUS) == 0) {
4780 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4781 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4782 			pmap = PV_PMAP(pv);
4783 			PMAP_ASSERT_STAGE1(pmap);
4784 			if (!PMAP_TRYLOCK(pmap)) {
4785 				md_gen = m->md.pv_gen;
4786 				pvh_gen = pvh->pv_gen;
4787 				rw_runlock(lock);
4788 				PMAP_LOCK(pmap);
4789 				rw_rlock(lock);
4790 				if (md_gen != m->md.pv_gen ||
4791 				    pvh_gen != pvh->pv_gen) {
4792 					PMAP_UNLOCK(pmap);
4793 					goto restart;
4794 				}
4795 			}
4796 			pte = pmap_pte(pmap, pv->pv_va, &lvl);
4797 			KASSERT(lvl == 2,
4798 			    ("pmap_page_test_mappings: Invalid level %d", lvl));
4799 			mask = 0;
4800 			value = 0;
4801 			if (modified) {
4802 				mask |= ATTR_S1_AP_RW_BIT;
4803 				value |= ATTR_S1_AP(ATTR_S1_AP_RW);
4804 			}
4805 			if (accessed) {
4806 				mask |= ATTR_AF | ATTR_DESCR_MASK;
4807 				value |= ATTR_AF | L2_BLOCK;
4808 			}
4809 			rv = (pmap_load(pte) & mask) == value;
4810 			PMAP_UNLOCK(pmap);
4811 			if (rv)
4812 				goto out;
4813 		}
4814 	}
4815 out:
4816 	rw_runlock(lock);
4817 	return (rv);
4818 }
4819 
4820 /*
4821  *	pmap_is_modified:
4822  *
4823  *	Return whether or not the specified physical page was modified
4824  *	in any physical maps.
4825  */
4826 boolean_t
4827 pmap_is_modified(vm_page_t m)
4828 {
4829 
4830 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4831 	    ("pmap_is_modified: page %p is not managed", m));
4832 
4833 	/*
4834 	 * If the page is not busied then this check is racy.
4835 	 */
4836 	if (!pmap_page_is_write_mapped(m))
4837 		return (FALSE);
4838 	return (pmap_page_test_mappings(m, FALSE, TRUE));
4839 }
4840 
4841 /*
4842  *	pmap_is_prefaultable:
4843  *
4844  *	Return whether or not the specified virtual address is eligible
4845  *	for prefault.
4846  */
4847 boolean_t
4848 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4849 {
4850 	pt_entry_t *pte;
4851 	boolean_t rv;
4852 	int lvl;
4853 
4854 	rv = FALSE;
4855 	PMAP_LOCK(pmap);
4856 	pte = pmap_pte(pmap, addr, &lvl);
4857 	if (pte != NULL && pmap_load(pte) != 0) {
4858 		rv = TRUE;
4859 	}
4860 	PMAP_UNLOCK(pmap);
4861 	return (rv);
4862 }
4863 
4864 /*
4865  *	pmap_is_referenced:
4866  *
4867  *	Return whether or not the specified physical page was referenced
4868  *	in any physical maps.
4869  */
4870 boolean_t
4871 pmap_is_referenced(vm_page_t m)
4872 {
4873 
4874 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4875 	    ("pmap_is_referenced: page %p is not managed", m));
4876 	return (pmap_page_test_mappings(m, TRUE, FALSE));
4877 }
4878 
4879 /*
4880  * Clear the write and modified bits in each of the given page's mappings.
4881  */
4882 void
4883 pmap_remove_write(vm_page_t m)
4884 {
4885 	struct md_page *pvh;
4886 	pmap_t pmap;
4887 	struct rwlock *lock;
4888 	pv_entry_t next_pv, pv;
4889 	pt_entry_t oldpte, *pte;
4890 	vm_offset_t va;
4891 	int lvl, md_gen, pvh_gen;
4892 
4893 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4894 	    ("pmap_remove_write: page %p is not managed", m));
4895 	vm_page_assert_busied(m);
4896 
4897 	if (!pmap_page_is_write_mapped(m))
4898 		return;
4899 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4900 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4901 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
4902 retry_pv_loop:
4903 	rw_wlock(lock);
4904 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4905 		pmap = PV_PMAP(pv);
4906 		PMAP_ASSERT_STAGE1(pmap);
4907 		if (!PMAP_TRYLOCK(pmap)) {
4908 			pvh_gen = pvh->pv_gen;
4909 			rw_wunlock(lock);
4910 			PMAP_LOCK(pmap);
4911 			rw_wlock(lock);
4912 			if (pvh_gen != pvh->pv_gen) {
4913 				PMAP_UNLOCK(pmap);
4914 				rw_wunlock(lock);
4915 				goto retry_pv_loop;
4916 			}
4917 		}
4918 		va = pv->pv_va;
4919 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4920 		if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
4921 			(void)pmap_demote_l2_locked(pmap, pte, va, &lock);
4922 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4923 		    ("inconsistent pv lock %p %p for page %p",
4924 		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4925 		PMAP_UNLOCK(pmap);
4926 	}
4927 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4928 		pmap = PV_PMAP(pv);
4929 		PMAP_ASSERT_STAGE1(pmap);
4930 		if (!PMAP_TRYLOCK(pmap)) {
4931 			pvh_gen = pvh->pv_gen;
4932 			md_gen = m->md.pv_gen;
4933 			rw_wunlock(lock);
4934 			PMAP_LOCK(pmap);
4935 			rw_wlock(lock);
4936 			if (pvh_gen != pvh->pv_gen ||
4937 			    md_gen != m->md.pv_gen) {
4938 				PMAP_UNLOCK(pmap);
4939 				rw_wunlock(lock);
4940 				goto retry_pv_loop;
4941 			}
4942 		}
4943 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4944 		oldpte = pmap_load(pte);
4945 retry:
4946 		if ((oldpte & ATTR_SW_DBM) != 0) {
4947 			if (!atomic_fcmpset_long(pte, &oldpte,
4948 			    (oldpte | ATTR_S1_AP_RW_BIT) & ~ATTR_SW_DBM))
4949 				goto retry;
4950 			if ((oldpte & ATTR_S1_AP_RW_BIT) ==
4951 			    ATTR_S1_AP(ATTR_S1_AP_RW))
4952 				vm_page_dirty(m);
4953 			pmap_invalidate_page(pmap, pv->pv_va);
4954 		}
4955 		PMAP_UNLOCK(pmap);
4956 	}
4957 	rw_wunlock(lock);
4958 	vm_page_aflag_clear(m, PGA_WRITEABLE);
4959 }
4960 
4961 /*
4962  *	pmap_ts_referenced:
4963  *
4964  *	Return a count of reference bits for a page, clearing those bits.
4965  *	It is not necessary for every reference bit to be cleared, but it
4966  *	is necessary that 0 only be returned when there are truly no
4967  *	reference bits set.
4968  *
4969  *	As an optimization, update the page's dirty field if a modified bit is
4970  *	found while counting reference bits.  This opportunistic update can be
4971  *	performed at low cost and can eliminate the need for some future calls
4972  *	to pmap_is_modified().  However, since this function stops after
4973  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
4974  *	dirty pages.  Those dirty pages will only be detected by a future call
4975  *	to pmap_is_modified().
4976  */
4977 int
4978 pmap_ts_referenced(vm_page_t m)
4979 {
4980 	struct md_page *pvh;
4981 	pv_entry_t pv, pvf;
4982 	pmap_t pmap;
4983 	struct rwlock *lock;
4984 	pd_entry_t *pde, tpde;
4985 	pt_entry_t *pte, tpte;
4986 	vm_offset_t va;
4987 	vm_paddr_t pa;
4988 	int cleared, lvl, md_gen, not_cleared, pvh_gen;
4989 	struct spglist free;
4990 
4991 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4992 	    ("pmap_ts_referenced: page %p is not managed", m));
4993 	SLIST_INIT(&free);
4994 	cleared = 0;
4995 	pa = VM_PAGE_TO_PHYS(m);
4996 	lock = PHYS_TO_PV_LIST_LOCK(pa);
4997 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
4998 	rw_wlock(lock);
4999 retry:
5000 	not_cleared = 0;
5001 	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
5002 		goto small_mappings;
5003 	pv = pvf;
5004 	do {
5005 		if (pvf == NULL)
5006 			pvf = pv;
5007 		pmap = PV_PMAP(pv);
5008 		if (!PMAP_TRYLOCK(pmap)) {
5009 			pvh_gen = pvh->pv_gen;
5010 			rw_wunlock(lock);
5011 			PMAP_LOCK(pmap);
5012 			rw_wlock(lock);
5013 			if (pvh_gen != pvh->pv_gen) {
5014 				PMAP_UNLOCK(pmap);
5015 				goto retry;
5016 			}
5017 		}
5018 		va = pv->pv_va;
5019 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
5020 		KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
5021 		KASSERT(lvl == 1,
5022 		    ("pmap_ts_referenced: invalid pde level %d", lvl));
5023 		tpde = pmap_load(pde);
5024 		KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
5025 		    ("pmap_ts_referenced: found an invalid l1 table"));
5026 		pte = pmap_l1_to_l2(pde, pv->pv_va);
5027 		tpte = pmap_load(pte);
5028 		if (pmap_pte_dirty(pmap, tpte)) {
5029 			/*
5030 			 * Although "tpte" is mapping a 2MB page, because
5031 			 * this function is called at a 4KB page granularity,
5032 			 * we only update the 4KB page under test.
5033 			 */
5034 			vm_page_dirty(m);
5035 		}
5036 
5037 		if ((tpte & ATTR_AF) != 0) {
5038 			/*
5039 			 * Since this reference bit is shared by 512 4KB pages,
5040 			 * it should not be cleared every time it is tested.
5041 			 * Apply a simple "hash" function on the physical page
5042 			 * number, the virtual superpage number, and the pmap
5043 			 * address to select one 4KB page out of the 512 on
5044 			 * which testing the reference bit will result in
5045 			 * clearing that reference bit.  This function is
5046 			 * designed to avoid the selection of the same 4KB page
5047 			 * for every 2MB page mapping.
5048 			 *
5049 			 * On demotion, a mapping that hasn't been referenced
5050 			 * is simply destroyed.  To avoid the possibility of a
5051 			 * subsequent page fault on a demoted wired mapping,
5052 			 * always leave its reference bit set.  Moreover,
5053 			 * since the superpage is wired, the current state of
5054 			 * its reference bit won't affect page replacement.
5055 			 */
5056 			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
5057 			    (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
5058 			    (tpte & ATTR_SW_WIRED) == 0) {
5059 				pmap_clear_bits(pte, ATTR_AF);
5060 				pmap_invalidate_page(pmap, pv->pv_va);
5061 				cleared++;
5062 			} else
5063 				not_cleared++;
5064 		}
5065 		PMAP_UNLOCK(pmap);
5066 		/* Rotate the PV list if it has more than one entry. */
5067 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
5068 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5069 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5070 			pvh->pv_gen++;
5071 		}
5072 		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
5073 			goto out;
5074 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
5075 small_mappings:
5076 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
5077 		goto out;
5078 	pv = pvf;
5079 	do {
5080 		if (pvf == NULL)
5081 			pvf = pv;
5082 		pmap = PV_PMAP(pv);
5083 		if (!PMAP_TRYLOCK(pmap)) {
5084 			pvh_gen = pvh->pv_gen;
5085 			md_gen = m->md.pv_gen;
5086 			rw_wunlock(lock);
5087 			PMAP_LOCK(pmap);
5088 			rw_wlock(lock);
5089 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5090 				PMAP_UNLOCK(pmap);
5091 				goto retry;
5092 			}
5093 		}
5094 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
5095 		KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
5096 		KASSERT(lvl == 2,
5097 		    ("pmap_ts_referenced: invalid pde level %d", lvl));
5098 		tpde = pmap_load(pde);
5099 		KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
5100 		    ("pmap_ts_referenced: found an invalid l2 table"));
5101 		pte = pmap_l2_to_l3(pde, pv->pv_va);
5102 		tpte = pmap_load(pte);
5103 		if (pmap_pte_dirty(pmap, tpte))
5104 			vm_page_dirty(m);
5105 		if ((tpte & ATTR_AF) != 0) {
5106 			if ((tpte & ATTR_SW_WIRED) == 0) {
5107 				pmap_clear_bits(pte, ATTR_AF);
5108 				pmap_invalidate_page(pmap, pv->pv_va);
5109 				cleared++;
5110 			} else
5111 				not_cleared++;
5112 		}
5113 		PMAP_UNLOCK(pmap);
5114 		/* Rotate the PV list if it has more than one entry. */
5115 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
5116 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5117 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5118 			m->md.pv_gen++;
5119 		}
5120 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
5121 	    not_cleared < PMAP_TS_REFERENCED_MAX);
5122 out:
5123 	rw_wunlock(lock);
5124 	vm_page_free_pages_toq(&free, true);
5125 	return (cleared + not_cleared);
5126 }
5127 
5128 /*
5129  *	Apply the given advice to the specified range of addresses within the
5130  *	given pmap.  Depending on the advice, clear the referenced and/or
5131  *	modified flags in each mapping and set the mapped page's dirty field.
5132  */
5133 void
5134 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
5135 {
5136 	struct rwlock *lock;
5137 	vm_offset_t va, va_next;
5138 	vm_page_t m;
5139 	pd_entry_t *l0, *l1, *l2, oldl2;
5140 	pt_entry_t *l3, oldl3;
5141 
5142 	PMAP_ASSERT_STAGE1(pmap);
5143 
5144 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
5145 		return;
5146 
5147 	PMAP_LOCK(pmap);
5148 	for (; sva < eva; sva = va_next) {
5149 		l0 = pmap_l0(pmap, sva);
5150 		if (pmap_load(l0) == 0) {
5151 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
5152 			if (va_next < sva)
5153 				va_next = eva;
5154 			continue;
5155 		}
5156 		l1 = pmap_l0_to_l1(l0, sva);
5157 		if (pmap_load(l1) == 0) {
5158 			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
5159 			if (va_next < sva)
5160 				va_next = eva;
5161 			continue;
5162 		}
5163 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
5164 		if (va_next < sva)
5165 			va_next = eva;
5166 		l2 = pmap_l1_to_l2(l1, sva);
5167 		oldl2 = pmap_load(l2);
5168 		if (oldl2 == 0)
5169 			continue;
5170 		if ((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK) {
5171 			if ((oldl2 & ATTR_SW_MANAGED) == 0)
5172 				continue;
5173 			lock = NULL;
5174 			if (!pmap_demote_l2_locked(pmap, l2, sva, &lock)) {
5175 				if (lock != NULL)
5176 					rw_wunlock(lock);
5177 
5178 				/*
5179 				 * The 2MB page mapping was destroyed.
5180 				 */
5181 				continue;
5182 			}
5183 
5184 			/*
5185 			 * Unless the page mappings are wired, remove the
5186 			 * mapping to a single page so that a subsequent
5187 			 * access may repromote.  Choosing the last page
5188 			 * within the address range [sva, min(va_next, eva))
5189 			 * generally results in more repromotions.  Since the
5190 			 * underlying page table page is fully populated, this
5191 			 * removal never frees a page table page.
5192 			 */
5193 			if ((oldl2 & ATTR_SW_WIRED) == 0) {
5194 				va = eva;
5195 				if (va > va_next)
5196 					va = va_next;
5197 				va -= PAGE_SIZE;
5198 				KASSERT(va >= sva,
5199 				    ("pmap_advise: no address gap"));
5200 				l3 = pmap_l2_to_l3(l2, va);
5201 				KASSERT(pmap_load(l3) != 0,
5202 				    ("pmap_advise: invalid PTE"));
5203 				pmap_remove_l3(pmap, l3, va, pmap_load(l2),
5204 				    NULL, &lock);
5205 			}
5206 			if (lock != NULL)
5207 				rw_wunlock(lock);
5208 		}
5209 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
5210 		    ("pmap_advise: invalid L2 entry after demotion"));
5211 		if (va_next > eva)
5212 			va_next = eva;
5213 		va = va_next;
5214 		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
5215 		    sva += L3_SIZE) {
5216 			oldl3 = pmap_load(l3);
5217 			if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
5218 			    (ATTR_SW_MANAGED | L3_PAGE))
5219 				goto maybe_invlrng;
5220 			else if (pmap_pte_dirty(pmap, oldl3)) {
5221 				if (advice == MADV_DONTNEED) {
5222 					/*
5223 					 * Future calls to pmap_is_modified()
5224 					 * can be avoided by making the page
5225 					 * dirty now.
5226 					 */
5227 					m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK);
5228 					vm_page_dirty(m);
5229 				}
5230 				while (!atomic_fcmpset_long(l3, &oldl3,
5231 				    (oldl3 & ~ATTR_AF) |
5232 				    ATTR_S1_AP(ATTR_S1_AP_RO)))
5233 					cpu_spinwait();
5234 			} else if ((oldl3 & ATTR_AF) != 0)
5235 				pmap_clear_bits(l3, ATTR_AF);
5236 			else
5237 				goto maybe_invlrng;
5238 			if (va == va_next)
5239 				va = sva;
5240 			continue;
5241 maybe_invlrng:
5242 			if (va != va_next) {
5243 				pmap_invalidate_range(pmap, va, sva);
5244 				va = va_next;
5245 			}
5246 		}
5247 		if (va != va_next)
5248 			pmap_invalidate_range(pmap, va, sva);
5249 	}
5250 	PMAP_UNLOCK(pmap);
5251 }
5252 
5253 /*
5254  *	Clear the modify bits on the specified physical page.
5255  */
5256 void
5257 pmap_clear_modify(vm_page_t m)
5258 {
5259 	struct md_page *pvh;
5260 	struct rwlock *lock;
5261 	pmap_t pmap;
5262 	pv_entry_t next_pv, pv;
5263 	pd_entry_t *l2, oldl2;
5264 	pt_entry_t *l3, oldl3;
5265 	vm_offset_t va;
5266 	int md_gen, pvh_gen;
5267 
5268 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5269 	    ("pmap_clear_modify: page %p is not managed", m));
5270 	vm_page_assert_busied(m);
5271 
5272 	if (!pmap_page_is_write_mapped(m))
5273 		return;
5274 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5275 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
5276 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5277 	rw_wlock(lock);
5278 restart:
5279 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5280 		pmap = PV_PMAP(pv);
5281 		PMAP_ASSERT_STAGE1(pmap);
5282 		if (!PMAP_TRYLOCK(pmap)) {
5283 			pvh_gen = pvh->pv_gen;
5284 			rw_wunlock(lock);
5285 			PMAP_LOCK(pmap);
5286 			rw_wlock(lock);
5287 			if (pvh_gen != pvh->pv_gen) {
5288 				PMAP_UNLOCK(pmap);
5289 				goto restart;
5290 			}
5291 		}
5292 		va = pv->pv_va;
5293 		l2 = pmap_l2(pmap, va);
5294 		oldl2 = pmap_load(l2);
5295 		/* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */
5296 		if ((oldl2 & ATTR_SW_DBM) != 0 &&
5297 		    pmap_demote_l2_locked(pmap, l2, va, &lock) &&
5298 		    (oldl2 & ATTR_SW_WIRED) == 0) {
5299 			/*
5300 			 * Write protect the mapping to a single page so that
5301 			 * a subsequent write access may repromote.
5302 			 */
5303 			va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK);
5304 			l3 = pmap_l2_to_l3(l2, va);
5305 			oldl3 = pmap_load(l3);
5306 			while (!atomic_fcmpset_long(l3, &oldl3,
5307 			    (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO)))
5308 				cpu_spinwait();
5309 			vm_page_dirty(m);
5310 			pmap_invalidate_page(pmap, va);
5311 		}
5312 		PMAP_UNLOCK(pmap);
5313 	}
5314 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5315 		pmap = PV_PMAP(pv);
5316 		PMAP_ASSERT_STAGE1(pmap);
5317 		if (!PMAP_TRYLOCK(pmap)) {
5318 			md_gen = m->md.pv_gen;
5319 			pvh_gen = pvh->pv_gen;
5320 			rw_wunlock(lock);
5321 			PMAP_LOCK(pmap);
5322 			rw_wlock(lock);
5323 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5324 				PMAP_UNLOCK(pmap);
5325 				goto restart;
5326 			}
5327 		}
5328 		l2 = pmap_l2(pmap, pv->pv_va);
5329 		l3 = pmap_l2_to_l3(l2, pv->pv_va);
5330 		oldl3 = pmap_load(l3);
5331 		if (pmap_l3_valid(oldl3) &&
5332 		    (oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){
5333 			pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO));
5334 			pmap_invalidate_page(pmap, pv->pv_va);
5335 		}
5336 		PMAP_UNLOCK(pmap);
5337 	}
5338 	rw_wunlock(lock);
5339 }
5340 
5341 void *
5342 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
5343 {
5344 	struct pmap_preinit_mapping *ppim;
5345 	vm_offset_t va, offset;
5346 	pd_entry_t *pde;
5347 	pt_entry_t *l2;
5348 	int i, lvl, l2_blocks, free_l2_count, start_idx;
5349 
5350 	if (!vm_initialized) {
5351 		/*
5352 		 * No L3 ptables so map entire L2 blocks where start VA is:
5353 		 * 	preinit_map_va + start_idx * L2_SIZE
5354 		 * There may be duplicate mappings (multiple VA -> same PA) but
5355 		 * ARM64 dcache is always PIPT so that's acceptable.
5356 		 */
5357 		 if (size == 0)
5358 			 return (NULL);
5359 
5360 		 /* Calculate how many L2 blocks are needed for the mapping */
5361 		l2_blocks = (roundup2(pa + size, L2_SIZE) -
5362 		    rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
5363 
5364 		offset = pa & L2_OFFSET;
5365 
5366 		if (preinit_map_va == 0)
5367 			return (NULL);
5368 
5369 		/* Map 2MiB L2 blocks from reserved VA space */
5370 
5371 		free_l2_count = 0;
5372 		start_idx = -1;
5373 		/* Find enough free contiguous VA space */
5374 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5375 			ppim = pmap_preinit_mapping + i;
5376 			if (free_l2_count > 0 && ppim->pa != 0) {
5377 				/* Not enough space here */
5378 				free_l2_count = 0;
5379 				start_idx = -1;
5380 				continue;
5381 			}
5382 
5383 			if (ppim->pa == 0) {
5384 				/* Free L2 block */
5385 				if (start_idx == -1)
5386 					start_idx = i;
5387 				free_l2_count++;
5388 				if (free_l2_count == l2_blocks)
5389 					break;
5390 			}
5391 		}
5392 		if (free_l2_count != l2_blocks)
5393 			panic("%s: too many preinit mappings", __func__);
5394 
5395 		va = preinit_map_va + (start_idx * L2_SIZE);
5396 		for (i = start_idx; i < start_idx + l2_blocks; i++) {
5397 			/* Mark entries as allocated */
5398 			ppim = pmap_preinit_mapping + i;
5399 			ppim->pa = pa;
5400 			ppim->va = va + offset;
5401 			ppim->size = size;
5402 		}
5403 
5404 		/* Map L2 blocks */
5405 		pa = rounddown2(pa, L2_SIZE);
5406 		for (i = 0; i < l2_blocks; i++) {
5407 			pde = pmap_pde(kernel_pmap, va, &lvl);
5408 			KASSERT(pde != NULL,
5409 			    ("pmap_mapbios: Invalid page entry, va: 0x%lx",
5410 			    va));
5411 			KASSERT(lvl == 1,
5412 			    ("pmap_mapbios: Invalid level %d", lvl));
5413 
5414 			/* Insert L2_BLOCK */
5415 			l2 = pmap_l1_to_l2(pde, va);
5416 			pmap_load_store(l2,
5417 			    pa | ATTR_DEFAULT | ATTR_S1_XN |
5418 			    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
5419 
5420 			va += L2_SIZE;
5421 			pa += L2_SIZE;
5422 		}
5423 		pmap_invalidate_all(kernel_pmap);
5424 
5425 		va = preinit_map_va + (start_idx * L2_SIZE);
5426 
5427 	} else {
5428 		/* kva_alloc may be used to map the pages */
5429 		offset = pa & PAGE_MASK;
5430 		size = round_page(offset + size);
5431 
5432 		va = kva_alloc(size);
5433 		if (va == 0)
5434 			panic("%s: Couldn't allocate KVA", __func__);
5435 
5436 		pde = pmap_pde(kernel_pmap, va, &lvl);
5437 		KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
5438 
5439 		/* L3 table is linked */
5440 		va = trunc_page(va);
5441 		pa = trunc_page(pa);
5442 		pmap_kenter(va, size, pa, VM_MEMATTR_WRITE_BACK);
5443 	}
5444 
5445 	return ((void *)(va + offset));
5446 }
5447 
5448 void
5449 pmap_unmapbios(vm_offset_t va, vm_size_t size)
5450 {
5451 	struct pmap_preinit_mapping *ppim;
5452 	vm_offset_t offset, tmpsize, va_trunc;
5453 	pd_entry_t *pde;
5454 	pt_entry_t *l2;
5455 	int i, lvl, l2_blocks, block;
5456 	bool preinit_map;
5457 
5458 	l2_blocks =
5459 	   (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
5460 	KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
5461 
5462 	/* Remove preinit mapping */
5463 	preinit_map = false;
5464 	block = 0;
5465 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5466 		ppim = pmap_preinit_mapping + i;
5467 		if (ppim->va == va) {
5468 			KASSERT(ppim->size == size,
5469 			    ("pmap_unmapbios: size mismatch"));
5470 			ppim->va = 0;
5471 			ppim->pa = 0;
5472 			ppim->size = 0;
5473 			preinit_map = true;
5474 			offset = block * L2_SIZE;
5475 			va_trunc = rounddown2(va, L2_SIZE) + offset;
5476 
5477 			/* Remove L2_BLOCK */
5478 			pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
5479 			KASSERT(pde != NULL,
5480 			    ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
5481 			    va_trunc));
5482 			l2 = pmap_l1_to_l2(pde, va_trunc);
5483 			pmap_clear(l2);
5484 
5485 			if (block == (l2_blocks - 1))
5486 				break;
5487 			block++;
5488 		}
5489 	}
5490 	if (preinit_map) {
5491 		pmap_invalidate_all(kernel_pmap);
5492 		return;
5493 	}
5494 
5495 	/* Unmap the pages reserved with kva_alloc. */
5496 	if (vm_initialized) {
5497 		offset = va & PAGE_MASK;
5498 		size = round_page(offset + size);
5499 		va = trunc_page(va);
5500 
5501 		pde = pmap_pde(kernel_pmap, va, &lvl);
5502 		KASSERT(pde != NULL,
5503 		    ("pmap_unmapbios: Invalid page entry, va: 0x%lx", va));
5504 		KASSERT(lvl == 2, ("pmap_unmapbios: Invalid level %d", lvl));
5505 
5506 		/* Unmap and invalidate the pages */
5507                 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5508 			pmap_kremove(va + tmpsize);
5509 
5510 		kva_free(va, size);
5511 	}
5512 }
5513 
5514 /*
5515  * Sets the memory attribute for the specified page.
5516  */
5517 void
5518 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5519 {
5520 
5521 	m->md.pv_memattr = ma;
5522 
5523 	/*
5524 	 * If "m" is a normal page, update its direct mapping.  This update
5525 	 * can be relied upon to perform any cache operations that are
5526 	 * required for data coherence.
5527 	 */
5528 	if ((m->flags & PG_FICTITIOUS) == 0 &&
5529 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
5530 	    m->md.pv_memattr) != 0)
5531 		panic("memory attribute change on the direct map failed");
5532 }
5533 
5534 /*
5535  * Changes the specified virtual address range's memory type to that given by
5536  * the parameter "mode".  The specified virtual address range must be
5537  * completely contained within either the direct map or the kernel map.  If
5538  * the virtual address range is contained within the kernel map, then the
5539  * memory type for each of the corresponding ranges of the direct map is also
5540  * changed.  (The corresponding ranges of the direct map are those ranges that
5541  * map the same physical pages as the specified virtual address range.)  These
5542  * changes to the direct map are necessary because Intel describes the
5543  * behavior of their processors as "undefined" if two or more mappings to the
5544  * same physical page have different memory types.
5545  *
5546  * Returns zero if the change completed successfully, and either EINVAL or
5547  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
5548  * of the virtual address range was not mapped, and ENOMEM is returned if
5549  * there was insufficient memory available to complete the change.  In the
5550  * latter case, the memory type may have been changed on some part of the
5551  * virtual address range or the direct map.
5552  */
5553 int
5554 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
5555 {
5556 	int error;
5557 
5558 	PMAP_LOCK(kernel_pmap);
5559 	error = pmap_change_attr_locked(va, size, mode);
5560 	PMAP_UNLOCK(kernel_pmap);
5561 	return (error);
5562 }
5563 
5564 static int
5565 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
5566 {
5567 	vm_offset_t base, offset, tmpva;
5568 	pt_entry_t l3, *pte, *newpte;
5569 	int lvl;
5570 
5571 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
5572 	base = trunc_page(va);
5573 	offset = va & PAGE_MASK;
5574 	size = round_page(offset + size);
5575 
5576 	if (!VIRT_IN_DMAP(base) &&
5577 	    !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
5578 		return (EINVAL);
5579 
5580 	for (tmpva = base; tmpva < base + size; ) {
5581 		pte = pmap_pte(kernel_pmap, tmpva, &lvl);
5582 		if (pte == NULL)
5583 			return (EINVAL);
5584 
5585 		if ((pmap_load(pte) & ATTR_S1_IDX_MASK) == ATTR_S1_IDX(mode)) {
5586 			/*
5587 			 * We already have the correct attribute,
5588 			 * ignore this entry.
5589 			 */
5590 			switch (lvl) {
5591 			default:
5592 				panic("Invalid DMAP table level: %d\n", lvl);
5593 			case 1:
5594 				tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
5595 				break;
5596 			case 2:
5597 				tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
5598 				break;
5599 			case 3:
5600 				tmpva += PAGE_SIZE;
5601 				break;
5602 			}
5603 		} else {
5604 			/*
5605 			 * Split the entry to an level 3 table, then
5606 			 * set the new attribute.
5607 			 */
5608 			switch (lvl) {
5609 			default:
5610 				panic("Invalid DMAP table level: %d\n", lvl);
5611 			case 1:
5612 				newpte = pmap_demote_l1(kernel_pmap, pte,
5613 				    tmpva & ~L1_OFFSET);
5614 				if (newpte == NULL)
5615 					return (EINVAL);
5616 				pte = pmap_l1_to_l2(pte, tmpva);
5617 			case 2:
5618 				newpte = pmap_demote_l2(kernel_pmap, pte,
5619 				    tmpva);
5620 				if (newpte == NULL)
5621 					return (EINVAL);
5622 				pte = pmap_l2_to_l3(pte, tmpva);
5623 			case 3:
5624 				/* Update the entry */
5625 				l3 = pmap_load(pte);
5626 				l3 &= ~ATTR_S1_IDX_MASK;
5627 				l3 |= ATTR_S1_IDX(mode);
5628 				if (mode == VM_MEMATTR_DEVICE)
5629 					l3 |= ATTR_S1_XN;
5630 
5631 				pmap_update_entry(kernel_pmap, pte, l3, tmpva,
5632 				    PAGE_SIZE);
5633 
5634 				/*
5635 				 * If moving to a non-cacheable entry flush
5636 				 * the cache.
5637 				 */
5638 				if (mode == VM_MEMATTR_UNCACHEABLE)
5639 					cpu_dcache_wbinv_range(tmpva, L3_SIZE);
5640 
5641 				break;
5642 			}
5643 			tmpva += PAGE_SIZE;
5644 		}
5645 	}
5646 
5647 	return (0);
5648 }
5649 
5650 /*
5651  * Create an L2 table to map all addresses within an L1 mapping.
5652  */
5653 static pt_entry_t *
5654 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
5655 {
5656 	pt_entry_t *l2, newl2, oldl1;
5657 	vm_offset_t tmpl1;
5658 	vm_paddr_t l2phys, phys;
5659 	vm_page_t ml2;
5660 	int i;
5661 
5662 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5663 	oldl1 = pmap_load(l1);
5664 	KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
5665 	    ("pmap_demote_l1: Demoting a non-block entry"));
5666 	KASSERT((va & L1_OFFSET) == 0,
5667 	    ("pmap_demote_l1: Invalid virtual address %#lx", va));
5668 	KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
5669 	    ("pmap_demote_l1: Level 1 table shouldn't be managed"));
5670 
5671 	tmpl1 = 0;
5672 	if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
5673 		tmpl1 = kva_alloc(PAGE_SIZE);
5674 		if (tmpl1 == 0)
5675 			return (NULL);
5676 	}
5677 
5678 	if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
5679 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
5680 		CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
5681 		    " in pmap %p", va, pmap);
5682 		return (NULL);
5683 	}
5684 
5685 	l2phys = VM_PAGE_TO_PHYS(ml2);
5686 	l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
5687 
5688 	/* Address the range points at */
5689 	phys = oldl1 & ~ATTR_MASK;
5690 	/* The attributed from the old l1 table to be copied */
5691 	newl2 = oldl1 & ATTR_MASK;
5692 
5693 	/* Create the new entries */
5694 	for (i = 0; i < Ln_ENTRIES; i++) {
5695 		l2[i] = newl2 | phys;
5696 		phys += L2_SIZE;
5697 	}
5698 	KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
5699 	    ("Invalid l2 page (%lx != %lx)", l2[0],
5700 	    (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
5701 
5702 	if (tmpl1 != 0) {
5703 		pmap_kenter(tmpl1, PAGE_SIZE,
5704 		    DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET,
5705 		    VM_MEMATTR_WRITE_BACK);
5706 		l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
5707 	}
5708 
5709 	pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
5710 
5711 	if (tmpl1 != 0) {
5712 		pmap_kremove(tmpl1);
5713 		kva_free(tmpl1, PAGE_SIZE);
5714 	}
5715 
5716 	return (l2);
5717 }
5718 
5719 static void
5720 pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3)
5721 {
5722 	pt_entry_t *l3;
5723 
5724 	for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) {
5725 		*l3 = newl3;
5726 		newl3 += L3_SIZE;
5727 	}
5728 }
5729 
5730 static void
5731 pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
5732     struct rwlock **lockp)
5733 {
5734 	struct spglist free;
5735 
5736 	SLIST_INIT(&free);
5737 	(void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
5738 	    lockp);
5739 	vm_page_free_pages_toq(&free, true);
5740 }
5741 
5742 /*
5743  * Create an L3 table to map all addresses within an L2 mapping.
5744  */
5745 static pt_entry_t *
5746 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
5747     struct rwlock **lockp)
5748 {
5749 	pt_entry_t *l3, newl3, oldl2;
5750 	vm_offset_t tmpl2;
5751 	vm_paddr_t l3phys;
5752 	vm_page_t ml3;
5753 
5754 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5755 	PMAP_ASSERT_STAGE1(pmap);
5756 	l3 = NULL;
5757 	oldl2 = pmap_load(l2);
5758 	KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
5759 	    ("pmap_demote_l2: Demoting a non-block entry"));
5760 	va &= ~L2_OFFSET;
5761 
5762 	tmpl2 = 0;
5763 	if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
5764 		tmpl2 = kva_alloc(PAGE_SIZE);
5765 		if (tmpl2 == 0)
5766 			return (NULL);
5767 	}
5768 
5769 	/*
5770 	 * Invalidate the 2MB page mapping and return "failure" if the
5771 	 * mapping was never accessed.
5772 	 */
5773 	if ((oldl2 & ATTR_AF) == 0) {
5774 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5775 		    ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
5776 		pmap_demote_l2_abort(pmap, va, l2, lockp);
5777 		CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
5778 		    va, pmap);
5779 		goto fail;
5780 	}
5781 
5782 	if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
5783 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5784 		    ("pmap_demote_l2: page table page for a wired mapping"
5785 		    " is missing"));
5786 
5787 		/*
5788 		 * If the page table page is missing and the mapping
5789 		 * is for a kernel address, the mapping must belong to
5790 		 * the direct map.  Page table pages are preallocated
5791 		 * for every other part of the kernel address space,
5792 		 * so the direct map region is the only part of the
5793 		 * kernel address space that must be handled here.
5794 		 */
5795 		KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va),
5796 		    ("pmap_demote_l2: No saved mpte for va %#lx", va));
5797 
5798 		/*
5799 		 * If the 2MB page mapping belongs to the direct map
5800 		 * region of the kernel's address space, then the page
5801 		 * allocation request specifies the highest possible
5802 		 * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
5803 		 * priority is normal.
5804 		 */
5805 		ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
5806 		    (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
5807 		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5808 
5809 		/*
5810 		 * If the allocation of the new page table page fails,
5811 		 * invalidate the 2MB page mapping and return "failure".
5812 		 */
5813 		if (ml3 == NULL) {
5814 			pmap_demote_l2_abort(pmap, va, l2, lockp);
5815 			CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
5816 			    " in pmap %p", va, pmap);
5817 			goto fail;
5818 		}
5819 
5820 		if (va < VM_MAXUSER_ADDRESS) {
5821 			ml3->ref_count = NL3PG;
5822 			pmap_resident_count_inc(pmap, 1);
5823 		}
5824 	}
5825 	l3phys = VM_PAGE_TO_PHYS(ml3);
5826 	l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
5827 	newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
5828 	KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) !=
5829 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM),
5830 	    ("pmap_demote_l2: L2 entry is writeable but not dirty"));
5831 
5832 	/*
5833 	 * If the page table page is not leftover from an earlier promotion,
5834 	 * or the mapping attributes have changed, (re)initialize the L3 table.
5835 	 *
5836 	 * When pmap_update_entry() clears the old L2 mapping, it (indirectly)
5837 	 * performs a dsb().  That dsb() ensures that the stores for filling
5838 	 * "l3" are visible before "l3" is added to the page table.
5839 	 */
5840 	if (ml3->valid == 0 || (l3[0] & ATTR_MASK) != (newl3 & ATTR_MASK))
5841 		pmap_fill_l3(l3, newl3);
5842 
5843 	/*
5844 	 * Map the temporary page so we don't lose access to the l2 table.
5845 	 */
5846 	if (tmpl2 != 0) {
5847 		pmap_kenter(tmpl2, PAGE_SIZE,
5848 		    DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET,
5849 		    VM_MEMATTR_WRITE_BACK);
5850 		l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
5851 	}
5852 
5853 	/*
5854 	 * The spare PV entries must be reserved prior to demoting the
5855 	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
5856 	 * of the L2 and the PV lists will be inconsistent, which can result
5857 	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
5858 	 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
5859 	 * PV entry for the 2MB page mapping that is being demoted.
5860 	 */
5861 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
5862 		reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
5863 
5864 	/*
5865 	 * Pass PAGE_SIZE so that a single TLB invalidation is performed on
5866 	 * the 2MB page mapping.
5867 	 */
5868 	pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
5869 
5870 	/*
5871 	 * Demote the PV entry.
5872 	 */
5873 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
5874 		pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
5875 
5876 	atomic_add_long(&pmap_l2_demotions, 1);
5877 	CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
5878 	    " in pmap %p %lx", va, pmap, l3[0]);
5879 
5880 fail:
5881 	if (tmpl2 != 0) {
5882 		pmap_kremove(tmpl2);
5883 		kva_free(tmpl2, PAGE_SIZE);
5884 	}
5885 
5886 	return (l3);
5887 
5888 }
5889 
5890 static pt_entry_t *
5891 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
5892 {
5893 	struct rwlock *lock;
5894 	pt_entry_t *l3;
5895 
5896 	lock = NULL;
5897 	l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
5898 	if (lock != NULL)
5899 		rw_wunlock(lock);
5900 	return (l3);
5901 }
5902 
5903 /*
5904  * Perform the pmap work for mincore(2).  If the page is not both referenced and
5905  * modified by this pmap, returns its physical address so that the caller can
5906  * find other mappings.
5907  */
5908 int
5909 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
5910 {
5911 	pt_entry_t *pte, tpte;
5912 	vm_paddr_t mask, pa;
5913 	int lvl, val;
5914 	bool managed;
5915 
5916 	PMAP_ASSERT_STAGE1(pmap);
5917 	PMAP_LOCK(pmap);
5918 	pte = pmap_pte(pmap, addr, &lvl);
5919 	if (pte != NULL) {
5920 		tpte = pmap_load(pte);
5921 
5922 		switch (lvl) {
5923 		case 3:
5924 			mask = L3_OFFSET;
5925 			break;
5926 		case 2:
5927 			mask = L2_OFFSET;
5928 			break;
5929 		case 1:
5930 			mask = L1_OFFSET;
5931 			break;
5932 		default:
5933 			panic("pmap_mincore: invalid level %d", lvl);
5934 		}
5935 
5936 		managed = (tpte & ATTR_SW_MANAGED) != 0;
5937 		val = MINCORE_INCORE;
5938 		if (lvl != 3)
5939 			val |= MINCORE_SUPER;
5940 		if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
5941 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
5942 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5943 		if ((tpte & ATTR_AF) == ATTR_AF)
5944 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5945 
5946 		pa = (tpte & ~ATTR_MASK) | (addr & mask);
5947 	} else {
5948 		managed = false;
5949 		val = 0;
5950 	}
5951 
5952 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5953 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
5954 		*pap = pa;
5955 	}
5956 	PMAP_UNLOCK(pmap);
5957 	return (val);
5958 }
5959 
5960 /*
5961  * Garbage collect every ASID that is neither active on a processor nor
5962  * reserved.
5963  */
5964 static void
5965 pmap_reset_asid_set(pmap_t pmap)
5966 {
5967 	pmap_t curpmap;
5968 	int asid, cpuid, epoch;
5969 	struct asid_set *set;
5970 	enum pmap_stage stage;
5971 
5972 	set = pmap->pm_asid_set;
5973 	stage = pmap->pm_stage;
5974 
5975 	set = pmap->pm_asid_set;
5976 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
5977 	mtx_assert(&set->asid_set_mutex, MA_OWNED);
5978 
5979 	/*
5980 	 * Ensure that the store to asid_epoch is globally visible before the
5981 	 * loads from pc_curpmap are performed.
5982 	 */
5983 	epoch = set->asid_epoch + 1;
5984 	if (epoch == INT_MAX)
5985 		epoch = 0;
5986 	set->asid_epoch = epoch;
5987 	dsb(ishst);
5988 	if (stage == PM_STAGE1) {
5989 		__asm __volatile("tlbi vmalle1is");
5990 	} else {
5991 		KASSERT(pmap_clean_stage2_tlbi != NULL,
5992 		    ("%s: Unset stage 2 tlb invalidation callback\n",
5993 		    __func__));
5994 		pmap_clean_stage2_tlbi();
5995 	}
5996 	dsb(ish);
5997 	bit_nclear(set->asid_set, ASID_FIRST_AVAILABLE,
5998 	    set->asid_set_size - 1);
5999 	CPU_FOREACH(cpuid) {
6000 		if (cpuid == curcpu)
6001 			continue;
6002 		if (stage == PM_STAGE1) {
6003 			curpmap = pcpu_find(cpuid)->pc_curpmap;
6004 			PMAP_ASSERT_STAGE1(pmap);
6005 		} else {
6006 			curpmap = pcpu_find(cpuid)->pc_curvmpmap;
6007 			if (curpmap == NULL)
6008 				continue;
6009 			PMAP_ASSERT_STAGE2(pmap);
6010 		}
6011 		KASSERT(curpmap->pm_asid_set == set, ("Incorrect set"));
6012 		asid = COOKIE_TO_ASID(curpmap->pm_cookie);
6013 		if (asid == -1)
6014 			continue;
6015 		bit_set(set->asid_set, asid);
6016 		curpmap->pm_cookie = COOKIE_FROM(asid, epoch);
6017 	}
6018 }
6019 
6020 /*
6021  * Allocate a new ASID for the specified pmap.
6022  */
6023 static void
6024 pmap_alloc_asid(pmap_t pmap)
6025 {
6026 	struct asid_set *set;
6027 	int new_asid;
6028 
6029 	set = pmap->pm_asid_set;
6030 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
6031 
6032 	mtx_lock_spin(&set->asid_set_mutex);
6033 
6034 	/*
6035 	 * While this processor was waiting to acquire the asid set mutex,
6036 	 * pmap_reset_asid_set() running on another processor might have
6037 	 * updated this pmap's cookie to the current epoch.  In which case, we
6038 	 * don't need to allocate a new ASID.
6039 	 */
6040 	if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch)
6041 		goto out;
6042 
6043 	bit_ffc_at(set->asid_set, set->asid_next, set->asid_set_size,
6044 	    &new_asid);
6045 	if (new_asid == -1) {
6046 		bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
6047 		    set->asid_next, &new_asid);
6048 		if (new_asid == -1) {
6049 			pmap_reset_asid_set(pmap);
6050 			bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
6051 			    set->asid_set_size, &new_asid);
6052 			KASSERT(new_asid != -1, ("ASID allocation failure"));
6053 		}
6054 	}
6055 	bit_set(set->asid_set, new_asid);
6056 	set->asid_next = new_asid + 1;
6057 	pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch);
6058 out:
6059 	mtx_unlock_spin(&set->asid_set_mutex);
6060 }
6061 
6062 /*
6063  * Compute the value that should be stored in ttbr0 to activate the specified
6064  * pmap.  This value may change from time to time.
6065  */
6066 uint64_t
6067 pmap_to_ttbr0(pmap_t pmap)
6068 {
6069 
6070 	return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) |
6071 	    pmap->pm_l0_paddr);
6072 }
6073 
6074 static bool
6075 pmap_activate_int(pmap_t pmap)
6076 {
6077 	struct asid_set *set;
6078 	int epoch;
6079 
6080 	KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
6081 	KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
6082 
6083 	if ((pmap->pm_stage == PM_STAGE1 && pmap == PCPU_GET(curpmap)) ||
6084 	    (pmap->pm_stage == PM_STAGE2 && pmap == PCPU_GET(curvmpmap))) {
6085 		/*
6086 		 * Handle the possibility that the old thread was preempted
6087 		 * after an "ic" or "tlbi" instruction but before it performed
6088 		 * a "dsb" instruction.  If the old thread migrates to a new
6089 		 * processor, its completion of a "dsb" instruction on that
6090 		 * new processor does not guarantee that the "ic" or "tlbi"
6091 		 * instructions performed on the old processor have completed.
6092 		 */
6093 		dsb(ish);
6094 		return (false);
6095 	}
6096 
6097 	set = pmap->pm_asid_set;
6098 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
6099 
6100 	/*
6101 	 * Ensure that the store to curpmap is globally visible before the
6102 	 * load from asid_epoch is performed.
6103 	 */
6104 	if (pmap->pm_stage == PM_STAGE1)
6105 		PCPU_SET(curpmap, pmap);
6106 	else
6107 		PCPU_SET(curvmpmap, pmap);
6108 	dsb(ish);
6109 	epoch = COOKIE_TO_EPOCH(pmap->pm_cookie);
6110 	if (epoch >= 0 && epoch != set->asid_epoch)
6111 		pmap_alloc_asid(pmap);
6112 
6113 	if (pmap->pm_stage == PM_STAGE1) {
6114 		set_ttbr0(pmap_to_ttbr0(pmap));
6115 		if (PCPU_GET(bcast_tlbi_workaround) != 0)
6116 			invalidate_local_icache();
6117 	}
6118 	return (true);
6119 }
6120 
6121 void
6122 pmap_activate_vm(pmap_t pmap)
6123 {
6124 
6125 	PMAP_ASSERT_STAGE2(pmap);
6126 
6127 	(void)pmap_activate_int(pmap);
6128 }
6129 
6130 void
6131 pmap_activate(struct thread *td)
6132 {
6133 	pmap_t	pmap;
6134 
6135 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
6136 	PMAP_ASSERT_STAGE1(pmap);
6137 	critical_enter();
6138 	(void)pmap_activate_int(pmap);
6139 	critical_exit();
6140 }
6141 
6142 /*
6143  * To eliminate the unused parameter "old", we would have to add an instruction
6144  * to cpu_switch().
6145  */
6146 struct pcb *
6147 pmap_switch(struct thread *old __unused, struct thread *new)
6148 {
6149 	pcpu_bp_harden bp_harden;
6150 	struct pcb *pcb;
6151 
6152 	/* Store the new curthread */
6153 	PCPU_SET(curthread, new);
6154 
6155 	/* And the new pcb */
6156 	pcb = new->td_pcb;
6157 	PCPU_SET(curpcb, pcb);
6158 
6159 	/*
6160 	 * TODO: We may need to flush the cache here if switching
6161 	 * to a user process.
6162 	 */
6163 
6164 	if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) {
6165 		/*
6166 		 * Stop userspace from training the branch predictor against
6167 		 * other processes. This will call into a CPU specific
6168 		 * function that clears the branch predictor state.
6169 		 */
6170 		bp_harden = PCPU_GET(bp_harden);
6171 		if (bp_harden != NULL)
6172 			bp_harden();
6173 	}
6174 
6175 	return (pcb);
6176 }
6177 
6178 void
6179 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
6180 {
6181 
6182 	PMAP_ASSERT_STAGE1(pmap);
6183 	if (va >= VM_MIN_KERNEL_ADDRESS) {
6184 		cpu_icache_sync_range(va, sz);
6185 	} else {
6186 		u_int len, offset;
6187 		vm_paddr_t pa;
6188 
6189 		/* Find the length of data in this page to flush */
6190 		offset = va & PAGE_MASK;
6191 		len = imin(PAGE_SIZE - offset, sz);
6192 
6193 		while (sz != 0) {
6194 			/* Extract the physical address & find it in the DMAP */
6195 			pa = pmap_extract(pmap, va);
6196 			if (pa != 0)
6197 				cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
6198 
6199 			/* Move to the next page */
6200 			sz -= len;
6201 			va += len;
6202 			/* Set the length for the next iteration */
6203 			len = imin(PAGE_SIZE, sz);
6204 		}
6205 	}
6206 }
6207 
6208 static int
6209 pmap_stage2_fault(pmap_t pmap, uint64_t esr, uint64_t far)
6210 {
6211 	pd_entry_t *pdep;
6212 	pt_entry_t *ptep, pte;
6213 	int rv, lvl, dfsc;
6214 
6215 	PMAP_ASSERT_STAGE2(pmap);
6216 	rv = KERN_FAILURE;
6217 
6218 	/* Data and insn aborts use same encoding for FSC field. */
6219 	dfsc = esr & ISS_DATA_DFSC_MASK;
6220 	switch (dfsc) {
6221 	case ISS_DATA_DFSC_TF_L0:
6222 	case ISS_DATA_DFSC_TF_L1:
6223 	case ISS_DATA_DFSC_TF_L2:
6224 	case ISS_DATA_DFSC_TF_L3:
6225 		PMAP_LOCK(pmap);
6226 		pdep = pmap_pde(pmap, far, &lvl);
6227 		if (pdep == NULL || lvl != (dfsc - ISS_DATA_DFSC_TF_L1)) {
6228 			PMAP_LOCK(pmap);
6229 			break;
6230 		}
6231 
6232 		switch (lvl) {
6233 		case 0:
6234 			ptep = pmap_l0_to_l1(pdep, far);
6235 			break;
6236 		case 1:
6237 			ptep = pmap_l1_to_l2(pdep, far);
6238 			break;
6239 		case 2:
6240 			ptep = pmap_l2_to_l3(pdep, far);
6241 			break;
6242 		default:
6243 			panic("%s: Invalid pde level %d", __func__,lvl);
6244 		}
6245 		goto fault_exec;
6246 
6247 	case ISS_DATA_DFSC_AFF_L1:
6248 	case ISS_DATA_DFSC_AFF_L2:
6249 	case ISS_DATA_DFSC_AFF_L3:
6250 		PMAP_LOCK(pmap);
6251 		ptep = pmap_pte(pmap, far, &lvl);
6252 fault_exec:
6253 		if (ptep != NULL && (pte = pmap_load(ptep)) != 0) {
6254 			if (icache_vmid) {
6255 				pmap_invalidate_vpipt_icache();
6256 			} else {
6257 				/*
6258 				 * If accessing an executable page invalidate
6259 				 * the I-cache so it will be valid when we
6260 				 * continue execution in the guest. The D-cache
6261 				 * is assumed to already be clean to the Point
6262 				 * of Coherency.
6263 				 */
6264 				if ((pte & ATTR_S2_XN_MASK) !=
6265 				    ATTR_S2_XN(ATTR_S2_XN_NONE)) {
6266 					invalidate_icache();
6267 				}
6268 			}
6269 			pmap_set_bits(ptep, ATTR_AF | ATTR_DESCR_VALID);
6270 			rv = KERN_SUCCESS;
6271 		}
6272 		PMAP_UNLOCK(pmap);
6273 		break;
6274 	}
6275 
6276 	return (rv);
6277 }
6278 
6279 int
6280 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
6281 {
6282 	pt_entry_t pte, *ptep;
6283 	register_t intr;
6284 	uint64_t ec, par;
6285 	int lvl, rv;
6286 
6287 	rv = KERN_FAILURE;
6288 
6289 	ec = ESR_ELx_EXCEPTION(esr);
6290 	switch (ec) {
6291 	case EXCP_INSN_ABORT_L:
6292 	case EXCP_INSN_ABORT:
6293 	case EXCP_DATA_ABORT_L:
6294 	case EXCP_DATA_ABORT:
6295 		break;
6296 	default:
6297 		return (rv);
6298 	}
6299 
6300 	if (pmap->pm_stage == PM_STAGE2)
6301 		return (pmap_stage2_fault(pmap, esr, far));
6302 
6303 	/* Data and insn aborts use same encoding for FSC field. */
6304 	switch (esr & ISS_DATA_DFSC_MASK) {
6305 	case ISS_DATA_DFSC_AFF_L1:
6306 	case ISS_DATA_DFSC_AFF_L2:
6307 	case ISS_DATA_DFSC_AFF_L3:
6308 		PMAP_LOCK(pmap);
6309 		ptep = pmap_pte(pmap, far, &lvl);
6310 		if (ptep != NULL) {
6311 			pmap_set_bits(ptep, ATTR_AF);
6312 			rv = KERN_SUCCESS;
6313 			/*
6314 			 * XXXMJ as an optimization we could mark the entry
6315 			 * dirty if this is a write fault.
6316 			 */
6317 		}
6318 		PMAP_UNLOCK(pmap);
6319 		break;
6320 	case ISS_DATA_DFSC_PF_L1:
6321 	case ISS_DATA_DFSC_PF_L2:
6322 	case ISS_DATA_DFSC_PF_L3:
6323 		if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) ||
6324 		    (esr & ISS_DATA_WnR) == 0)
6325 			return (rv);
6326 		PMAP_LOCK(pmap);
6327 		ptep = pmap_pte(pmap, far, &lvl);
6328 		if (ptep != NULL &&
6329 		    ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
6330 			if ((pte & ATTR_S1_AP_RW_BIT) ==
6331 			    ATTR_S1_AP(ATTR_S1_AP_RO)) {
6332 				pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT);
6333 				pmap_invalidate_page(pmap, far);
6334 			}
6335 			rv = KERN_SUCCESS;
6336 		}
6337 		PMAP_UNLOCK(pmap);
6338 		break;
6339 	case ISS_DATA_DFSC_TF_L0:
6340 	case ISS_DATA_DFSC_TF_L1:
6341 	case ISS_DATA_DFSC_TF_L2:
6342 	case ISS_DATA_DFSC_TF_L3:
6343 		/*
6344 		 * Retry the translation.  A break-before-make sequence can
6345 		 * produce a transient fault.
6346 		 */
6347 		if (pmap == kernel_pmap) {
6348 			/*
6349 			 * The translation fault may have occurred within a
6350 			 * critical section.  Therefore, we must check the
6351 			 * address without acquiring the kernel pmap's lock.
6352 			 */
6353 			if (pmap_kextract(far) != 0)
6354 				rv = KERN_SUCCESS;
6355 		} else {
6356 			PMAP_LOCK(pmap);
6357 			/* Ask the MMU to check the address. */
6358 			intr = intr_disable();
6359 			par = arm64_address_translate_s1e0r(far);
6360 			intr_restore(intr);
6361 			PMAP_UNLOCK(pmap);
6362 
6363 			/*
6364 			 * If the translation was successful, then we can
6365 			 * return success to the trap handler.
6366 			 */
6367 			if (PAR_SUCCESS(par))
6368 				rv = KERN_SUCCESS;
6369 		}
6370 		break;
6371 	}
6372 
6373 	return (rv);
6374 }
6375 
6376 /*
6377  *	Increase the starting virtual address of the given mapping if a
6378  *	different alignment might result in more superpage mappings.
6379  */
6380 void
6381 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
6382     vm_offset_t *addr, vm_size_t size)
6383 {
6384 	vm_offset_t superpage_offset;
6385 
6386 	if (size < L2_SIZE)
6387 		return;
6388 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
6389 		offset += ptoa(object->pg_color);
6390 	superpage_offset = offset & L2_OFFSET;
6391 	if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
6392 	    (*addr & L2_OFFSET) == superpage_offset)
6393 		return;
6394 	if ((*addr & L2_OFFSET) < superpage_offset)
6395 		*addr = (*addr & ~L2_OFFSET) + superpage_offset;
6396 	else
6397 		*addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
6398 }
6399 
6400 /**
6401  * Get the kernel virtual address of a set of physical pages. If there are
6402  * physical addresses not covered by the DMAP perform a transient mapping
6403  * that will be removed when calling pmap_unmap_io_transient.
6404  *
6405  * \param page        The pages the caller wishes to obtain the virtual
6406  *                    address on the kernel memory map.
6407  * \param vaddr       On return contains the kernel virtual memory address
6408  *                    of the pages passed in the page parameter.
6409  * \param count       Number of pages passed in.
6410  * \param can_fault   TRUE if the thread using the mapped pages can take
6411  *                    page faults, FALSE otherwise.
6412  *
6413  * \returns TRUE if the caller must call pmap_unmap_io_transient when
6414  *          finished or FALSE otherwise.
6415  *
6416  */
6417 boolean_t
6418 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
6419     boolean_t can_fault)
6420 {
6421 	vm_paddr_t paddr;
6422 	boolean_t needs_mapping;
6423 	int error, i;
6424 
6425 	/*
6426 	 * Allocate any KVA space that we need, this is done in a separate
6427 	 * loop to prevent calling vmem_alloc while pinned.
6428 	 */
6429 	needs_mapping = FALSE;
6430 	for (i = 0; i < count; i++) {
6431 		paddr = VM_PAGE_TO_PHYS(page[i]);
6432 		if (__predict_false(!PHYS_IN_DMAP(paddr))) {
6433 			error = vmem_alloc(kernel_arena, PAGE_SIZE,
6434 			    M_BESTFIT | M_WAITOK, &vaddr[i]);
6435 			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
6436 			needs_mapping = TRUE;
6437 		} else {
6438 			vaddr[i] = PHYS_TO_DMAP(paddr);
6439 		}
6440 	}
6441 
6442 	/* Exit early if everything is covered by the DMAP */
6443 	if (!needs_mapping)
6444 		return (FALSE);
6445 
6446 	if (!can_fault)
6447 		sched_pin();
6448 	for (i = 0; i < count; i++) {
6449 		paddr = VM_PAGE_TO_PHYS(page[i]);
6450 		if (!PHYS_IN_DMAP(paddr)) {
6451 			panic(
6452 			   "pmap_map_io_transient: TODO: Map out of DMAP data");
6453 		}
6454 	}
6455 
6456 	return (needs_mapping);
6457 }
6458 
6459 void
6460 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
6461     boolean_t can_fault)
6462 {
6463 	vm_paddr_t paddr;
6464 	int i;
6465 
6466 	if (!can_fault)
6467 		sched_unpin();
6468 	for (i = 0; i < count; i++) {
6469 		paddr = VM_PAGE_TO_PHYS(page[i]);
6470 		if (!PHYS_IN_DMAP(paddr)) {
6471 			panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
6472 		}
6473 	}
6474 }
6475 
6476 boolean_t
6477 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
6478 {
6479 
6480 	return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
6481 }
6482 
6483 /*
6484  * Track a range of the kernel's virtual address space that is contiguous
6485  * in various mapping attributes.
6486  */
6487 struct pmap_kernel_map_range {
6488 	vm_offset_t sva;
6489 	pt_entry_t attrs;
6490 	int l3pages;
6491 	int l3contig;
6492 	int l2blocks;
6493 	int l1blocks;
6494 };
6495 
6496 static void
6497 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
6498     vm_offset_t eva)
6499 {
6500 	const char *mode;
6501 	int index;
6502 
6503 	if (eva <= range->sva)
6504 		return;
6505 
6506 	index = range->attrs & ATTR_S1_IDX_MASK;
6507 	switch (index) {
6508 	case ATTR_S1_IDX(VM_MEMATTR_DEVICE):
6509 		mode = "DEV";
6510 		break;
6511 	case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE):
6512 		mode = "UC";
6513 		break;
6514 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK):
6515 		mode = "WB";
6516 		break;
6517 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH):
6518 		mode = "WT";
6519 		break;
6520 	default:
6521 		printf(
6522 		    "%s: unknown memory type %x for range 0x%016lx-0x%016lx\n",
6523 		    __func__, index, range->sva, eva);
6524 		mode = "??";
6525 		break;
6526 	}
6527 
6528 	sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c %3s %d %d %d %d\n",
6529 	    range->sva, eva,
6530 	    (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
6531 	    (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
6532 	    (range->attrs & ATTR_S1_AP_USER) != 0 ? 'u' : 's',
6533 	    mode, range->l1blocks, range->l2blocks, range->l3contig,
6534 	    range->l3pages);
6535 
6536 	/* Reset to sentinel value. */
6537 	range->sva = 0xfffffffffffffffful;
6538 }
6539 
6540 /*
6541  * Determine whether the attributes specified by a page table entry match those
6542  * being tracked by the current range.
6543  */
6544 static bool
6545 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
6546 {
6547 
6548 	return (range->attrs == attrs);
6549 }
6550 
6551 static void
6552 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
6553     pt_entry_t attrs)
6554 {
6555 
6556 	memset(range, 0, sizeof(*range));
6557 	range->sva = va;
6558 	range->attrs = attrs;
6559 }
6560 
6561 /*
6562  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
6563  * those of the current run, dump the address range and its attributes, and
6564  * begin a new run.
6565  */
6566 static void
6567 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
6568     vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e,
6569     pt_entry_t l3e)
6570 {
6571 	pt_entry_t attrs;
6572 
6573 	attrs = l0e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6574 	attrs |= l1e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6575 	if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK)
6576 		attrs |= l1e & ATTR_S1_IDX_MASK;
6577 	attrs |= l2e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6578 	if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK)
6579 		attrs |= l2e & ATTR_S1_IDX_MASK;
6580 	attrs |= l3e & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK);
6581 
6582 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
6583 		sysctl_kmaps_dump(sb, range, va);
6584 		sysctl_kmaps_reinit(range, va, attrs);
6585 	}
6586 }
6587 
6588 static int
6589 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
6590 {
6591 	struct pmap_kernel_map_range range;
6592 	struct sbuf sbuf, *sb;
6593 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
6594 	pt_entry_t *l3, l3e;
6595 	vm_offset_t sva;
6596 	vm_paddr_t pa;
6597 	int error, i, j, k, l;
6598 
6599 	error = sysctl_wire_old_buffer(req, 0);
6600 	if (error != 0)
6601 		return (error);
6602 	sb = &sbuf;
6603 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
6604 
6605 	/* Sentinel value. */
6606 	range.sva = 0xfffffffffffffffful;
6607 
6608 	/*
6609 	 * Iterate over the kernel page tables without holding the kernel pmap
6610 	 * lock.  Kernel page table pages are never freed, so at worst we will
6611 	 * observe inconsistencies in the output.
6612 	 */
6613 	for (sva = 0xffff000000000000ul, i = pmap_l0_index(sva); i < Ln_ENTRIES;
6614 	    i++) {
6615 		if (i == pmap_l0_index(DMAP_MIN_ADDRESS))
6616 			sbuf_printf(sb, "\nDirect map:\n");
6617 		else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
6618 			sbuf_printf(sb, "\nKernel map:\n");
6619 
6620 		l0e = kernel_pmap->pm_l0[i];
6621 		if ((l0e & ATTR_DESCR_VALID) == 0) {
6622 			sysctl_kmaps_dump(sb, &range, sva);
6623 			sva += L0_SIZE;
6624 			continue;
6625 		}
6626 		pa = l0e & ~ATTR_MASK;
6627 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa);
6628 
6629 		for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) {
6630 			l1e = l1[j];
6631 			if ((l1e & ATTR_DESCR_VALID) == 0) {
6632 				sysctl_kmaps_dump(sb, &range, sva);
6633 				sva += L1_SIZE;
6634 				continue;
6635 			}
6636 			if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) {
6637 				sysctl_kmaps_check(sb, &range, sva, l0e, l1e,
6638 				    0, 0);
6639 				range.l1blocks++;
6640 				sva += L1_SIZE;
6641 				continue;
6642 			}
6643 			pa = l1e & ~ATTR_MASK;
6644 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa);
6645 
6646 			for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) {
6647 				l2e = l2[k];
6648 				if ((l2e & ATTR_DESCR_VALID) == 0) {
6649 					sysctl_kmaps_dump(sb, &range, sva);
6650 					sva += L2_SIZE;
6651 					continue;
6652 				}
6653 				if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
6654 					sysctl_kmaps_check(sb, &range, sva,
6655 					    l0e, l1e, l2e, 0);
6656 					range.l2blocks++;
6657 					sva += L2_SIZE;
6658 					continue;
6659 				}
6660 				pa = l2e & ~ATTR_MASK;
6661 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
6662 
6663 				for (l = pmap_l3_index(sva); l < Ln_ENTRIES;
6664 				    l++, sva += L3_SIZE) {
6665 					l3e = l3[l];
6666 					if ((l3e & ATTR_DESCR_VALID) == 0) {
6667 						sysctl_kmaps_dump(sb, &range,
6668 						    sva);
6669 						continue;
6670 					}
6671 					sysctl_kmaps_check(sb, &range, sva,
6672 					    l0e, l1e, l2e, l3e);
6673 					if ((l3e & ATTR_CONTIGUOUS) != 0)
6674 						range.l3contig += l % 16 == 0 ?
6675 						    1 : 0;
6676 					else
6677 						range.l3pages++;
6678 				}
6679 			}
6680 		}
6681 	}
6682 
6683 	error = sbuf_finish(sb);
6684 	sbuf_delete(sb);
6685 	return (error);
6686 }
6687 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
6688     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
6689     NULL, 0, sysctl_kmaps, "A",
6690     "Dump kernel address layout");
6691