xref: /freebsd/sys/arm64/arm64/pmap.c (revision e17f5b1d)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2003 Peter Wemm
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  * Copyright (c) 2014 Andrew Turner
13  * All rights reserved.
14  * Copyright (c) 2014-2016 The FreeBSD Foundation
15  * All rights reserved.
16  *
17  * This code is derived from software contributed to Berkeley by
18  * the Systems Programming Group of the University of Utah Computer
19  * Science Department and William Jolitz of UUNET Technologies Inc.
20  *
21  * This software was developed by Andrew Turner under sponsorship from
22  * the FreeBSD Foundation.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 3. All advertising materials mentioning features or use of this software
33  *    must display the following acknowledgement:
34  *	This product includes software developed by the University of
35  *	California, Berkeley and its contributors.
36  * 4. Neither the name of the University nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  *
52  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
53  */
54 /*-
55  * Copyright (c) 2003 Networks Associates Technology, Inc.
56  * All rights reserved.
57  *
58  * This software was developed for the FreeBSD Project by Jake Burkholder,
59  * Safeport Network Services, and Network Associates Laboratories, the
60  * Security Research Division of Network Associates, Inc. under
61  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
62  * CHATS research program.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83  * SUCH DAMAGE.
84  */
85 
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
88 
89 /*
90  *	Manages physical address maps.
91  *
92  *	Since the information managed by this module is
93  *	also stored by the logical address mapping module,
94  *	this module may throw away valid virtual-to-physical
95  *	mappings at almost any time.  However, invalidations
96  *	of virtual-to-physical mappings must be done as
97  *	requested.
98  *
99  *	In order to cope with hardware architectures which
100  *	make virtual-to-physical map invalidates expensive,
101  *	this module may delay invalidate or reduced protection
102  *	operations until such time as they are actually
103  *	necessary.  This module is given full information as
104  *	to which processors are currently using which maps,
105  *	and to when physical maps must be made correct.
106  */
107 
108 #include "opt_vm.h"
109 
110 #include <sys/param.h>
111 #include <sys/bitstring.h>
112 #include <sys/bus.h>
113 #include <sys/systm.h>
114 #include <sys/kernel.h>
115 #include <sys/ktr.h>
116 #include <sys/limits.h>
117 #include <sys/lock.h>
118 #include <sys/malloc.h>
119 #include <sys/mman.h>
120 #include <sys/msgbuf.h>
121 #include <sys/mutex.h>
122 #include <sys/physmem.h>
123 #include <sys/proc.h>
124 #include <sys/rwlock.h>
125 #include <sys/sbuf.h>
126 #include <sys/sx.h>
127 #include <sys/vmem.h>
128 #include <sys/vmmeter.h>
129 #include <sys/sched.h>
130 #include <sys/sysctl.h>
131 #include <sys/_unrhdr.h>
132 #include <sys/smp.h>
133 
134 #include <vm/vm.h>
135 #include <vm/vm_param.h>
136 #include <vm/vm_kern.h>
137 #include <vm/vm_page.h>
138 #include <vm/vm_map.h>
139 #include <vm/vm_object.h>
140 #include <vm/vm_extern.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_pager.h>
143 #include <vm/vm_phys.h>
144 #include <vm/vm_radix.h>
145 #include <vm/vm_reserv.h>
146 #include <vm/uma.h>
147 
148 #include <machine/machdep.h>
149 #include <machine/md_var.h>
150 #include <machine/pcb.h>
151 
152 #define	PMAP_ASSERT_STAGE1(pmap)	MPASS((pmap)->pm_stage == PM_STAGE1)
153 #define	PMAP_ASSERT_STAGE2(pmap)	MPASS((pmap)->pm_stage == PM_STAGE2)
154 
155 #define	NL0PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
156 #define	NL1PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
157 #define	NL2PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
158 #define	NL3PG		(PAGE_SIZE/(sizeof (pt_entry_t)))
159 
160 #define	NUL0E		L0_ENTRIES
161 #define	NUL1E		(NUL0E * NL1PG)
162 #define	NUL2E		(NUL1E * NL2PG)
163 
164 #if !defined(DIAGNOSTIC)
165 #ifdef __GNUC_GNU_INLINE__
166 #define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
167 #else
168 #define PMAP_INLINE	extern inline
169 #endif
170 #else
171 #define PMAP_INLINE
172 #endif
173 
174 #ifdef PV_STATS
175 #define PV_STAT(x)	do { x ; } while (0)
176 #else
177 #define PV_STAT(x)	do { } while (0)
178 #endif
179 
180 #define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
181 #define	pa_to_pvh(pa)		(&pv_table[pmap_l2_pindex(pa)])
182 
183 #define	NPV_LIST_LOCKS	MAXCPU
184 
185 #define	PHYS_TO_PV_LIST_LOCK(pa)	\
186 			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
187 
188 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
189 	struct rwlock **_lockp = (lockp);		\
190 	struct rwlock *_new_lock;			\
191 							\
192 	_new_lock = PHYS_TO_PV_LIST_LOCK(pa);		\
193 	if (_new_lock != *_lockp) {			\
194 		if (*_lockp != NULL)			\
195 			rw_wunlock(*_lockp);		\
196 		*_lockp = _new_lock;			\
197 		rw_wlock(*_lockp);			\
198 	}						\
199 } while (0)
200 
201 #define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
202 			CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
203 
204 #define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
205 	struct rwlock **_lockp = (lockp);		\
206 							\
207 	if (*_lockp != NULL) {				\
208 		rw_wunlock(*_lockp);			\
209 		*_lockp = NULL;				\
210 	}						\
211 } while (0)
212 
213 #define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
214 			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
215 
216 /*
217  * The presence of this flag indicates that the mapping is writeable.
218  * If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise
219  * it is dirty.  This flag may only be set on managed mappings.
220  *
221  * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it
222  * as a software managed bit.
223  */
224 #define	ATTR_SW_DBM	ATTR_DBM
225 
226 struct pmap kernel_pmap_store;
227 
228 /* Used for mapping ACPI memory before VM is initialized */
229 #define	PMAP_PREINIT_MAPPING_COUNT	32
230 #define	PMAP_PREINIT_MAPPING_SIZE	(PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
231 static vm_offset_t preinit_map_va;	/* Start VA of pre-init mapping space */
232 static int vm_initialized = 0;		/* No need to use pre-init maps when set */
233 
234 /*
235  * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
236  * Always map entire L2 block for simplicity.
237  * VA of L2 block = preinit_map_va + i * L2_SIZE
238  */
239 static struct pmap_preinit_mapping {
240 	vm_paddr_t	pa;
241 	vm_offset_t	va;
242 	vm_size_t	size;
243 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
244 
245 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
246 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
247 vm_offset_t kernel_vm_end = 0;
248 
249 /*
250  * Data for the pv entry allocation mechanism.
251  */
252 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
253 static struct mtx pv_chunks_mutex;
254 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
255 static struct md_page *pv_table;
256 static struct md_page pv_dummy;
257 
258 vm_paddr_t dmap_phys_base;	/* The start of the dmap region */
259 vm_paddr_t dmap_phys_max;	/* The limit of the dmap region */
260 vm_offset_t dmap_max_addr;	/* The virtual address limit of the dmap */
261 
262 /* This code assumes all L1 DMAP entries will be used */
263 CTASSERT((DMAP_MIN_ADDRESS  & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
264 CTASSERT((DMAP_MAX_ADDRESS  & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
265 
266 #define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
267 extern pt_entry_t pagetable_dmap[];
268 
269 #define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
270 static vm_paddr_t physmap[PHYSMAP_SIZE];
271 static u_int physmap_idx;
272 
273 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
274     "VM/pmap parameters");
275 
276 /*
277  * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs
278  * that it has currently allocated to a pmap, a cursor ("asid_next") to
279  * optimize its search for a free ASID in the bit vector, and an epoch number
280  * ("asid_epoch") to indicate when it has reclaimed all previously allocated
281  * ASIDs that are not currently active on a processor.
282  *
283  * The current epoch number is always in the range [0, INT_MAX).  Negative
284  * numbers and INT_MAX are reserved for special cases that are described
285  * below.
286  */
287 struct asid_set {
288 	int asid_bits;
289 	bitstr_t *asid_set;
290 	int asid_set_size;
291 	int asid_next;
292 	int asid_epoch;
293 	struct mtx asid_set_mutex;
294 };
295 
296 static struct asid_set asids;
297 static struct asid_set vmids;
298 
299 static SYSCTL_NODE(_vm_pmap, OID_AUTO, asid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
300     "ASID allocator");
301 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, bits, CTLFLAG_RD, &asids.asid_bits, 0,
302     "The number of bits in an ASID");
303 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, next, CTLFLAG_RD, &asids.asid_next, 0,
304     "The last allocated ASID plus one");
305 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, epoch, CTLFLAG_RD, &asids.asid_epoch, 0,
306     "The current epoch number");
307 
308 static SYSCTL_NODE(_vm_pmap, OID_AUTO, vmid, CTLFLAG_RD, 0, "VMID allocator");
309 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, bits, CTLFLAG_RD, &vmids.asid_bits, 0,
310     "The number of bits in an VMID");
311 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, next, CTLFLAG_RD, &vmids.asid_next, 0,
312     "The last allocated VMID plus one");
313 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, epoch, CTLFLAG_RD, &vmids.asid_epoch, 0,
314     "The current epoch number");
315 
316 void (*pmap_clean_stage2_tlbi)(void);
317 void (*pmap_invalidate_vpipt_icache)(void);
318 
319 /*
320  * A pmap's cookie encodes an ASID and epoch number.  Cookies for reserved
321  * ASIDs have a negative epoch number, specifically, INT_MIN.  Cookies for
322  * dynamically allocated ASIDs have a non-negative epoch number.
323  *
324  * An invalid ASID is represented by -1.
325  *
326  * There are two special-case cookie values: (1) COOKIE_FROM(-1, INT_MIN),
327  * which indicates that an ASID should never be allocated to the pmap, and
328  * (2) COOKIE_FROM(-1, INT_MAX), which indicates that an ASID should be
329  * allocated when the pmap is next activated.
330  */
331 #define	COOKIE_FROM(asid, epoch)	((long)((u_int)(asid) |	\
332 					    ((u_long)(epoch) << 32)))
333 #define	COOKIE_TO_ASID(cookie)		((int)(cookie))
334 #define	COOKIE_TO_EPOCH(cookie)		((int)((u_long)(cookie) >> 32))
335 
336 static int superpages_enabled = 1;
337 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
338     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
339     "Are large page mappings enabled?");
340 
341 /*
342  * Internal flags for pmap_enter()'s helper functions.
343  */
344 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
345 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
346 
347 static void	free_pv_chunk(struct pv_chunk *pc);
348 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
349 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
350 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
351 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
352 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
353 		    vm_offset_t va);
354 
355 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
356 static bool pmap_activate_int(pmap_t pmap);
357 static void pmap_alloc_asid(pmap_t pmap);
358 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
359 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
360 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
361     vm_offset_t va, struct rwlock **lockp);
362 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
363 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
364     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
365 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
366     u_int flags, vm_page_t m, struct rwlock **lockp);
367 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
368     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
369 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
370     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
371 static void pmap_reset_asid_set(pmap_t pmap);
372 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
373     vm_page_t m, struct rwlock **lockp);
374 
375 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
376 		struct rwlock **lockp);
377 
378 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
379     struct spglist *free);
380 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
381 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
382 
383 /*
384  * These load the old table data and store the new value.
385  * They need to be atomic as the System MMU may write to the table at
386  * the same time as the CPU.
387  */
388 #define	pmap_clear(table)		atomic_store_64(table, 0)
389 #define	pmap_clear_bits(table, bits)	atomic_clear_64(table, bits)
390 #define	pmap_load(table)		(*table)
391 #define	pmap_load_clear(table)		atomic_swap_64(table, 0)
392 #define	pmap_load_store(table, entry)	atomic_swap_64(table, entry)
393 #define	pmap_set_bits(table, bits)	atomic_set_64(table, bits)
394 #define	pmap_store(table, entry)	atomic_store_64(table, entry)
395 
396 /********************/
397 /* Inline functions */
398 /********************/
399 
400 static __inline void
401 pagecopy(void *s, void *d)
402 {
403 
404 	memcpy(d, s, PAGE_SIZE);
405 }
406 
407 static __inline pd_entry_t *
408 pmap_l0(pmap_t pmap, vm_offset_t va)
409 {
410 
411 	return (&pmap->pm_l0[pmap_l0_index(va)]);
412 }
413 
414 static __inline pd_entry_t *
415 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
416 {
417 	pd_entry_t *l1;
418 
419 	l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
420 	return (&l1[pmap_l1_index(va)]);
421 }
422 
423 static __inline pd_entry_t *
424 pmap_l1(pmap_t pmap, vm_offset_t va)
425 {
426 	pd_entry_t *l0;
427 
428 	l0 = pmap_l0(pmap, va);
429 	if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
430 		return (NULL);
431 
432 	return (pmap_l0_to_l1(l0, va));
433 }
434 
435 static __inline pd_entry_t *
436 pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
437 {
438 	pd_entry_t *l2;
439 
440 	l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
441 	return (&l2[pmap_l2_index(va)]);
442 }
443 
444 static __inline pd_entry_t *
445 pmap_l2(pmap_t pmap, vm_offset_t va)
446 {
447 	pd_entry_t *l1;
448 
449 	l1 = pmap_l1(pmap, va);
450 	if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
451 		return (NULL);
452 
453 	return (pmap_l1_to_l2(l1, va));
454 }
455 
456 static __inline pt_entry_t *
457 pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
458 {
459 	pt_entry_t *l3;
460 
461 	l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
462 	return (&l3[pmap_l3_index(va)]);
463 }
464 
465 /*
466  * Returns the lowest valid pde for a given virtual address.
467  * The next level may or may not point to a valid page or block.
468  */
469 static __inline pd_entry_t *
470 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
471 {
472 	pd_entry_t *l0, *l1, *l2, desc;
473 
474 	l0 = pmap_l0(pmap, va);
475 	desc = pmap_load(l0) & ATTR_DESCR_MASK;
476 	if (desc != L0_TABLE) {
477 		*level = -1;
478 		return (NULL);
479 	}
480 
481 	l1 = pmap_l0_to_l1(l0, va);
482 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
483 	if (desc != L1_TABLE) {
484 		*level = 0;
485 		return (l0);
486 	}
487 
488 	l2 = pmap_l1_to_l2(l1, va);
489 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
490 	if (desc != L2_TABLE) {
491 		*level = 1;
492 		return (l1);
493 	}
494 
495 	*level = 2;
496 	return (l2);
497 }
498 
499 /*
500  * Returns the lowest valid pte block or table entry for a given virtual
501  * address. If there are no valid entries return NULL and set the level to
502  * the first invalid level.
503  */
504 static __inline pt_entry_t *
505 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
506 {
507 	pd_entry_t *l1, *l2, desc;
508 	pt_entry_t *l3;
509 
510 	l1 = pmap_l1(pmap, va);
511 	if (l1 == NULL) {
512 		*level = 0;
513 		return (NULL);
514 	}
515 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
516 	if (desc == L1_BLOCK) {
517 		*level = 1;
518 		return (l1);
519 	}
520 
521 	if (desc != L1_TABLE) {
522 		*level = 1;
523 		return (NULL);
524 	}
525 
526 	l2 = pmap_l1_to_l2(l1, va);
527 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
528 	if (desc == L2_BLOCK) {
529 		*level = 2;
530 		return (l2);
531 	}
532 
533 	if (desc != L2_TABLE) {
534 		*level = 2;
535 		return (NULL);
536 	}
537 
538 	*level = 3;
539 	l3 = pmap_l2_to_l3(l2, va);
540 	if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
541 		return (NULL);
542 
543 	return (l3);
544 }
545 
546 bool
547 pmap_ps_enabled(pmap_t pmap __unused)
548 {
549 
550 	return (superpages_enabled != 0);
551 }
552 
553 bool
554 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
555     pd_entry_t **l2, pt_entry_t **l3)
556 {
557 	pd_entry_t *l0p, *l1p, *l2p;
558 
559 	if (pmap->pm_l0 == NULL)
560 		return (false);
561 
562 	l0p = pmap_l0(pmap, va);
563 	*l0 = l0p;
564 
565 	if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
566 		return (false);
567 
568 	l1p = pmap_l0_to_l1(l0p, va);
569 	*l1 = l1p;
570 
571 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
572 		*l2 = NULL;
573 		*l3 = NULL;
574 		return (true);
575 	}
576 
577 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
578 		return (false);
579 
580 	l2p = pmap_l1_to_l2(l1p, va);
581 	*l2 = l2p;
582 
583 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
584 		*l3 = NULL;
585 		return (true);
586 	}
587 
588 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
589 		return (false);
590 
591 	*l3 = pmap_l2_to_l3(l2p, va);
592 
593 	return (true);
594 }
595 
596 static __inline int
597 pmap_l3_valid(pt_entry_t l3)
598 {
599 
600 	return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
601 }
602 
603 
604 CTASSERT(L1_BLOCK == L2_BLOCK);
605 
606 static pt_entry_t
607 pmap_pte_memattr(pmap_t pmap, vm_memattr_t memattr)
608 {
609 	pt_entry_t val;
610 
611 	if (pmap->pm_stage == PM_STAGE1) {
612 		val = ATTR_S1_IDX(memattr);
613 		if (memattr == VM_MEMATTR_DEVICE)
614 			val |= ATTR_S1_XN;
615 		return (val);
616 	}
617 
618 	val = 0;
619 
620 	switch (memattr) {
621 	case VM_MEMATTR_DEVICE:
622 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_DEVICE_nGnRnE) |
623 		    ATTR_S2_XN(ATTR_S2_XN_ALL));
624 	case VM_MEMATTR_UNCACHEABLE:
625 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_NC));
626 	case VM_MEMATTR_WRITE_BACK:
627 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WB));
628 	case VM_MEMATTR_WRITE_THROUGH:
629 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WT));
630 	default:
631 		panic("%s: invalid memory attribute %x", __func__, memattr);
632 	}
633 }
634 
635 static pt_entry_t
636 pmap_pte_prot(pmap_t pmap, vm_prot_t prot)
637 {
638 	pt_entry_t val;
639 
640 	val = 0;
641 	if (pmap->pm_stage == PM_STAGE1) {
642 		if ((prot & VM_PROT_EXECUTE) == 0)
643 			val |= ATTR_S1_XN;
644 		if ((prot & VM_PROT_WRITE) == 0)
645 			val |= ATTR_S1_AP(ATTR_S1_AP_RO);
646 	} else {
647 		if ((prot & VM_PROT_WRITE) != 0)
648 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
649 		if ((prot & VM_PROT_READ) != 0)
650 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
651 		if ((prot & VM_PROT_EXECUTE) == 0)
652 			val |= ATTR_S2_XN(ATTR_S2_XN_ALL);
653 	}
654 
655 	return (val);
656 }
657 
658 /*
659  * Checks if the PTE is dirty.
660  */
661 static inline int
662 pmap_pte_dirty(pmap_t pmap, pt_entry_t pte)
663 {
664 
665 	PMAP_ASSERT_STAGE1(pmap);
666 	KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
667 	KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
668 	    ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
669 
670 	return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
671 	    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM));
672 }
673 
674 static __inline void
675 pmap_resident_count_inc(pmap_t pmap, int count)
676 {
677 
678 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
679 	pmap->pm_stats.resident_count += count;
680 }
681 
682 static __inline void
683 pmap_resident_count_dec(pmap_t pmap, int count)
684 {
685 
686 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
687 	KASSERT(pmap->pm_stats.resident_count >= count,
688 	    ("pmap %p resident count underflow %ld %d", pmap,
689 	    pmap->pm_stats.resident_count, count));
690 	pmap->pm_stats.resident_count -= count;
691 }
692 
693 static pt_entry_t *
694 pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
695     u_int *l2_slot)
696 {
697 	pt_entry_t *l2;
698 	pd_entry_t *l1;
699 
700 	l1 = (pd_entry_t *)l1pt;
701 	*l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
702 
703 	/* Check locore has used a table L1 map */
704 	KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
705 	   ("Invalid bootstrap L1 table"));
706 	/* Find the address of the L2 table */
707 	l2 = (pt_entry_t *)init_pt_va;
708 	*l2_slot = pmap_l2_index(va);
709 
710 	return (l2);
711 }
712 
713 static vm_paddr_t
714 pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
715 {
716 	u_int l1_slot, l2_slot;
717 	pt_entry_t *l2;
718 
719 	l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
720 
721 	return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
722 }
723 
724 static vm_offset_t
725 pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
726     vm_offset_t freemempos)
727 {
728 	pt_entry_t *l2;
729 	vm_offset_t va;
730 	vm_paddr_t l2_pa, pa;
731 	u_int l1_slot, l2_slot, prev_l1_slot;
732 	int i;
733 
734 	dmap_phys_base = min_pa & ~L1_OFFSET;
735 	dmap_phys_max = 0;
736 	dmap_max_addr = 0;
737 	l2 = NULL;
738 	prev_l1_slot = -1;
739 
740 #define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
741 	memset(pagetable_dmap, 0, PAGE_SIZE * DMAP_TABLES);
742 
743 	for (i = 0; i < (physmap_idx * 2); i += 2) {
744 		pa = physmap[i] & ~L2_OFFSET;
745 		va = pa - dmap_phys_base + DMAP_MIN_ADDRESS;
746 
747 		/* Create L2 mappings at the start of the region */
748 		if ((pa & L1_OFFSET) != 0) {
749 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
750 			if (l1_slot != prev_l1_slot) {
751 				prev_l1_slot = l1_slot;
752 				l2 = (pt_entry_t *)freemempos;
753 				l2_pa = pmap_early_vtophys(kern_l1,
754 				    (vm_offset_t)l2);
755 				freemempos += PAGE_SIZE;
756 
757 				pmap_store(&pagetable_dmap[l1_slot],
758 				    (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
759 
760 				memset(l2, 0, PAGE_SIZE);
761 			}
762 			KASSERT(l2 != NULL,
763 			    ("pmap_bootstrap_dmap: NULL l2 map"));
764 			for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
765 			    pa += L2_SIZE, va += L2_SIZE) {
766 				/*
767 				 * We are on a boundary, stop to
768 				 * create a level 1 block
769 				 */
770 				if ((pa & L1_OFFSET) == 0)
771 					break;
772 
773 				l2_slot = pmap_l2_index(va);
774 				KASSERT(l2_slot != 0, ("..."));
775 				pmap_store(&l2[l2_slot],
776 				    (pa & ~L2_OFFSET) | ATTR_DEFAULT |
777 				    ATTR_S1_XN |
778 				    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
779 				    L2_BLOCK);
780 			}
781 			KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS),
782 			    ("..."));
783 		}
784 
785 		for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] &&
786 		    (physmap[i + 1] - pa) >= L1_SIZE;
787 		    pa += L1_SIZE, va += L1_SIZE) {
788 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
789 			pmap_store(&pagetable_dmap[l1_slot],
790 			    (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_S1_XN |
791 			    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK);
792 		}
793 
794 		/* Create L2 mappings at the end of the region */
795 		if (pa < physmap[i + 1]) {
796 			l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
797 			if (l1_slot != prev_l1_slot) {
798 				prev_l1_slot = l1_slot;
799 				l2 = (pt_entry_t *)freemempos;
800 				l2_pa = pmap_early_vtophys(kern_l1,
801 				    (vm_offset_t)l2);
802 				freemempos += PAGE_SIZE;
803 
804 				pmap_store(&pagetable_dmap[l1_slot],
805 				    (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
806 
807 				memset(l2, 0, PAGE_SIZE);
808 			}
809 			KASSERT(l2 != NULL,
810 			    ("pmap_bootstrap_dmap: NULL l2 map"));
811 			for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
812 			    pa += L2_SIZE, va += L2_SIZE) {
813 				l2_slot = pmap_l2_index(va);
814 				pmap_store(&l2[l2_slot],
815 				    (pa & ~L2_OFFSET) | ATTR_DEFAULT |
816 				    ATTR_S1_XN |
817 				    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
818 				    L2_BLOCK);
819 			}
820 		}
821 
822 		if (pa > dmap_phys_max) {
823 			dmap_phys_max = pa;
824 			dmap_max_addr = va;
825 		}
826 	}
827 
828 	cpu_tlb_flushID();
829 
830 	return (freemempos);
831 }
832 
833 static vm_offset_t
834 pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
835 {
836 	vm_offset_t l2pt;
837 	vm_paddr_t pa;
838 	pd_entry_t *l1;
839 	u_int l1_slot;
840 
841 	KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
842 
843 	l1 = (pd_entry_t *)l1pt;
844 	l1_slot = pmap_l1_index(va);
845 	l2pt = l2_start;
846 
847 	for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
848 		KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
849 
850 		pa = pmap_early_vtophys(l1pt, l2pt);
851 		pmap_store(&l1[l1_slot],
852 		    (pa & ~Ln_TABLE_MASK) | L1_TABLE);
853 		l2pt += PAGE_SIZE;
854 	}
855 
856 	/* Clean the L2 page table */
857 	memset((void *)l2_start, 0, l2pt - l2_start);
858 
859 	return l2pt;
860 }
861 
862 static vm_offset_t
863 pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
864 {
865 	vm_offset_t l3pt;
866 	vm_paddr_t pa;
867 	pd_entry_t *l2;
868 	u_int l2_slot;
869 
870 	KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
871 
872 	l2 = pmap_l2(kernel_pmap, va);
873 	l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
874 	l2_slot = pmap_l2_index(va);
875 	l3pt = l3_start;
876 
877 	for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
878 		KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
879 
880 		pa = pmap_early_vtophys(l1pt, l3pt);
881 		pmap_store(&l2[l2_slot],
882 		    (pa & ~Ln_TABLE_MASK) | ATTR_S1_UXN | L2_TABLE);
883 		l3pt += PAGE_SIZE;
884 	}
885 
886 	/* Clean the L2 page table */
887 	memset((void *)l3_start, 0, l3pt - l3_start);
888 
889 	return l3pt;
890 }
891 
892 /*
893  *	Bootstrap the system enough to run with virtual memory.
894  */
895 void
896 pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
897     vm_size_t kernlen)
898 {
899 	vm_offset_t freemempos;
900 	vm_offset_t dpcpu, msgbufpv;
901 	vm_paddr_t start_pa, pa, min_pa;
902 	uint64_t kern_delta;
903 	int i;
904 
905 	/* Verify that the ASID is set through TTBR0. */
906 	KASSERT((READ_SPECIALREG(tcr_el1) & TCR_A1) == 0,
907 	    ("pmap_bootstrap: TCR_EL1.A1 != 0"));
908 
909 	kern_delta = KERNBASE - kernstart;
910 
911 	printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
912 	printf("%lx\n", l1pt);
913 	printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
914 
915 	/* Set this early so we can use the pagetable walking functions */
916 	kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
917 	PMAP_LOCK_INIT(kernel_pmap);
918 	kernel_pmap->pm_l0_paddr = l0pt - kern_delta;
919 	kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
920 	kernel_pmap->pm_stage = PM_STAGE1;
921 	kernel_pmap->pm_asid_set = &asids;
922 
923 	/* Assume the address we were loaded to is a valid physical address */
924 	min_pa = KERNBASE - kern_delta;
925 
926 	physmap_idx = physmem_avail(physmap, nitems(physmap));
927 	physmap_idx /= 2;
928 
929 	/*
930 	 * Find the minimum physical address. physmap is sorted,
931 	 * but may contain empty ranges.
932 	 */
933 	for (i = 0; i < physmap_idx * 2; i += 2) {
934 		if (physmap[i] == physmap[i + 1])
935 			continue;
936 		if (physmap[i] <= min_pa)
937 			min_pa = physmap[i];
938 	}
939 
940 	freemempos = KERNBASE + kernlen;
941 	freemempos = roundup2(freemempos, PAGE_SIZE);
942 
943 	/* Create a direct map region early so we can use it for pa -> va */
944 	freemempos = pmap_bootstrap_dmap(l1pt, min_pa, freemempos);
945 
946 	start_pa = pa = KERNBASE - kern_delta;
947 
948 	/*
949 	 * Create the l2 tables up to VM_MAX_KERNEL_ADDRESS.  We assume that the
950 	 * loader allocated the first and only l2 page table page used to map
951 	 * the kernel, preloaded files and module metadata.
952 	 */
953 	freemempos = pmap_bootstrap_l2(l1pt, KERNBASE + L1_SIZE, freemempos);
954 	/* And the l3 tables for the early devmap */
955 	freemempos = pmap_bootstrap_l3(l1pt,
956 	    VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE), freemempos);
957 
958 	cpu_tlb_flushID();
959 
960 #define alloc_pages(var, np)						\
961 	(var) = freemempos;						\
962 	freemempos += (np * PAGE_SIZE);					\
963 	memset((char *)(var), 0, ((np) * PAGE_SIZE));
964 
965 	/* Allocate dynamic per-cpu area. */
966 	alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
967 	dpcpu_init((void *)dpcpu, 0);
968 
969 	/* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
970 	alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
971 	msgbufp = (void *)msgbufpv;
972 
973 	/* Reserve some VA space for early BIOS/ACPI mapping */
974 	preinit_map_va = roundup2(freemempos, L2_SIZE);
975 
976 	virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
977 	virtual_avail = roundup2(virtual_avail, L1_SIZE);
978 	virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
979 	kernel_vm_end = virtual_avail;
980 
981 	pa = pmap_early_vtophys(l1pt, freemempos);
982 
983 	physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
984 
985 	cpu_tlb_flushID();
986 }
987 
988 /*
989  *	Initialize a vm_page's machine-dependent fields.
990  */
991 void
992 pmap_page_init(vm_page_t m)
993 {
994 
995 	TAILQ_INIT(&m->md.pv_list);
996 	m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
997 }
998 
999 static void
1000 pmap_init_asids(struct asid_set *set, int bits)
1001 {
1002 	int i;
1003 
1004 	set->asid_bits = bits;
1005 
1006 	/*
1007 	 * We may be too early in the overall initialization process to use
1008 	 * bit_alloc().
1009 	 */
1010 	set->asid_set_size = 1 << set->asid_bits;
1011 	set->asid_set = (bitstr_t *)kmem_malloc(bitstr_size(set->asid_set_size),
1012 	    M_WAITOK | M_ZERO);
1013 	for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
1014 		bit_set(set->asid_set, i);
1015 	set->asid_next = ASID_FIRST_AVAILABLE;
1016 	mtx_init(&set->asid_set_mutex, "asid set", NULL, MTX_SPIN);
1017 }
1018 
1019 /*
1020  *	Initialize the pmap module.
1021  *	Called by vm_init, to initialize any structures that the pmap
1022  *	system needs to map virtual memory.
1023  */
1024 void
1025 pmap_init(void)
1026 {
1027 	vm_size_t s;
1028 	uint64_t mmfr1;
1029 	int i, pv_npg, vmid_bits;
1030 
1031 	/*
1032 	 * Are large page mappings enabled?
1033 	 */
1034 	TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
1035 	if (superpages_enabled) {
1036 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1037 		    ("pmap_init: can't assign to pagesizes[1]"));
1038 		pagesizes[1] = L2_SIZE;
1039 	}
1040 
1041 	/*
1042 	 * Initialize the ASID allocator.
1043 	 */
1044 	pmap_init_asids(&asids,
1045 	    (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8);
1046 
1047 	if (has_hyp()) {
1048 		mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1049 		vmid_bits = 8;
1050 
1051 		if (ID_AA64MMFR1_VMIDBits_VAL(mmfr1) ==
1052 		    ID_AA64MMFR1_VMIDBits_16)
1053 			vmid_bits = 16;
1054 		pmap_init_asids(&vmids, vmid_bits);
1055 	}
1056 
1057 	/*
1058 	 * Initialize the pv chunk list mutex.
1059 	 */
1060 	mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
1061 
1062 	/*
1063 	 * Initialize the pool of pv list locks.
1064 	 */
1065 	for (i = 0; i < NPV_LIST_LOCKS; i++)
1066 		rw_init(&pv_list_locks[i], "pmap pv list");
1067 
1068 	/*
1069 	 * Calculate the size of the pv head table for superpages.
1070 	 */
1071 	pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
1072 
1073 	/*
1074 	 * Allocate memory for the pv head table for superpages.
1075 	 */
1076 	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1077 	s = round_page(s);
1078 	pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
1079 	for (i = 0; i < pv_npg; i++)
1080 		TAILQ_INIT(&pv_table[i].pv_list);
1081 	TAILQ_INIT(&pv_dummy.pv_list);
1082 
1083 	vm_initialized = 1;
1084 }
1085 
1086 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1087     "2MB page mapping counters");
1088 
1089 static u_long pmap_l2_demotions;
1090 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
1091     &pmap_l2_demotions, 0, "2MB page demotions");
1092 
1093 static u_long pmap_l2_mappings;
1094 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
1095     &pmap_l2_mappings, 0, "2MB page mappings");
1096 
1097 static u_long pmap_l2_p_failures;
1098 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
1099     &pmap_l2_p_failures, 0, "2MB page promotion failures");
1100 
1101 static u_long pmap_l2_promotions;
1102 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
1103     &pmap_l2_promotions, 0, "2MB page promotions");
1104 
1105 /*
1106  * Invalidate a single TLB entry.
1107  */
1108 static __inline void
1109 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1110 {
1111 	uint64_t r;
1112 
1113 	PMAP_ASSERT_STAGE1(pmap);
1114 
1115 	dsb(ishst);
1116 	if (pmap == kernel_pmap) {
1117 		r = atop(va);
1118 		__asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1119 	} else {
1120 		r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va);
1121 		__asm __volatile("tlbi vae1is, %0" : : "r" (r));
1122 	}
1123 	dsb(ish);
1124 	isb();
1125 }
1126 
1127 static __inline void
1128 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1129 {
1130 	uint64_t end, r, start;
1131 
1132 	PMAP_ASSERT_STAGE1(pmap);
1133 
1134 	dsb(ishst);
1135 	if (pmap == kernel_pmap) {
1136 		start = atop(sva);
1137 		end = atop(eva);
1138 		for (r = start; r < end; r++)
1139 			__asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1140 	} else {
1141 		start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1142 		start |= atop(sva);
1143 		end |= atop(eva);
1144 		for (r = start; r < end; r++)
1145 			__asm __volatile("tlbi vae1is, %0" : : "r" (r));
1146 	}
1147 	dsb(ish);
1148 	isb();
1149 }
1150 
1151 static __inline void
1152 pmap_invalidate_all(pmap_t pmap)
1153 {
1154 	uint64_t r;
1155 
1156 	PMAP_ASSERT_STAGE1(pmap);
1157 
1158 	dsb(ishst);
1159 	if (pmap == kernel_pmap) {
1160 		__asm __volatile("tlbi vmalle1is");
1161 	} else {
1162 		r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1163 		__asm __volatile("tlbi aside1is, %0" : : "r" (r));
1164 	}
1165 	dsb(ish);
1166 	isb();
1167 }
1168 
1169 /*
1170  *	Routine:	pmap_extract
1171  *	Function:
1172  *		Extract the physical page address associated
1173  *		with the given map/virtual_address pair.
1174  */
1175 vm_paddr_t
1176 pmap_extract(pmap_t pmap, vm_offset_t va)
1177 {
1178 	pt_entry_t *pte, tpte;
1179 	vm_paddr_t pa;
1180 	int lvl;
1181 
1182 	pa = 0;
1183 	PMAP_LOCK(pmap);
1184 	/*
1185 	 * Find the block or page map for this virtual address. pmap_pte
1186 	 * will return either a valid block/page entry, or NULL.
1187 	 */
1188 	pte = pmap_pte(pmap, va, &lvl);
1189 	if (pte != NULL) {
1190 		tpte = pmap_load(pte);
1191 		pa = tpte & ~ATTR_MASK;
1192 		switch(lvl) {
1193 		case 1:
1194 			KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1195 			    ("pmap_extract: Invalid L1 pte found: %lx",
1196 			    tpte & ATTR_DESCR_MASK));
1197 			pa |= (va & L1_OFFSET);
1198 			break;
1199 		case 2:
1200 			KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1201 			    ("pmap_extract: Invalid L2 pte found: %lx",
1202 			    tpte & ATTR_DESCR_MASK));
1203 			pa |= (va & L2_OFFSET);
1204 			break;
1205 		case 3:
1206 			KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1207 			    ("pmap_extract: Invalid L3 pte found: %lx",
1208 			    tpte & ATTR_DESCR_MASK));
1209 			pa |= (va & L3_OFFSET);
1210 			break;
1211 		}
1212 	}
1213 	PMAP_UNLOCK(pmap);
1214 	return (pa);
1215 }
1216 
1217 /*
1218  *	Routine:	pmap_extract_and_hold
1219  *	Function:
1220  *		Atomically extract and hold the physical page
1221  *		with the given pmap and virtual address pair
1222  *		if that mapping permits the given protection.
1223  */
1224 vm_page_t
1225 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1226 {
1227 	pt_entry_t *pte, tpte;
1228 	vm_offset_t off;
1229 	vm_page_t m;
1230 	int lvl;
1231 	bool use;
1232 
1233 	m = NULL;
1234 	PMAP_LOCK(pmap);
1235 	pte = pmap_pte(pmap, va, &lvl);
1236 	if (pte != NULL) {
1237 		tpte = pmap_load(pte);
1238 
1239 		KASSERT(lvl > 0 && lvl <= 3,
1240 		    ("pmap_extract_and_hold: Invalid level %d", lvl));
1241 		CTASSERT(L1_BLOCK == L2_BLOCK);
1242 		KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
1243 		    (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
1244 		    ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
1245 		     tpte & ATTR_DESCR_MASK));
1246 
1247 		use = false;
1248 		if ((prot & VM_PROT_WRITE) == 0)
1249 			use = true;
1250 		else if (pmap->pm_stage == PM_STAGE1 &&
1251 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))
1252 			use = true;
1253 		else if (pmap->pm_stage == PM_STAGE2 &&
1254 		    ((tpte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
1255 		     ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)))
1256 			use = true;
1257 
1258 		if (use) {
1259 			switch(lvl) {
1260 			case 1:
1261 				off = va & L1_OFFSET;
1262 				break;
1263 			case 2:
1264 				off = va & L2_OFFSET;
1265 				break;
1266 			case 3:
1267 			default:
1268 				off = 0;
1269 			}
1270 			m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
1271 			if (!vm_page_wire_mapped(m))
1272 				m = NULL;
1273 		}
1274 	}
1275 	PMAP_UNLOCK(pmap);
1276 	return (m);
1277 }
1278 
1279 vm_paddr_t
1280 pmap_kextract(vm_offset_t va)
1281 {
1282 	pt_entry_t *pte, tpte;
1283 
1284 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
1285 		return (DMAP_TO_PHYS(va));
1286 	pte = pmap_l1(kernel_pmap, va);
1287 	if (pte == NULL)
1288 		return (0);
1289 
1290 	/*
1291 	 * A concurrent pmap_update_entry() will clear the entry's valid bit
1292 	 * but leave the rest of the entry unchanged.  Therefore, we treat a
1293 	 * non-zero entry as being valid, and we ignore the valid bit when
1294 	 * determining whether the entry maps a block, page, or table.
1295 	 */
1296 	tpte = pmap_load(pte);
1297 	if (tpte == 0)
1298 		return (0);
1299 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
1300 		return ((tpte & ~ATTR_MASK) | (va & L1_OFFSET));
1301 	pte = pmap_l1_to_l2(&tpte, va);
1302 	tpte = pmap_load(pte);
1303 	if (tpte == 0)
1304 		return (0);
1305 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
1306 		return ((tpte & ~ATTR_MASK) | (va & L2_OFFSET));
1307 	pte = pmap_l2_to_l3(&tpte, va);
1308 	tpte = pmap_load(pte);
1309 	if (tpte == 0)
1310 		return (0);
1311 	return ((tpte & ~ATTR_MASK) | (va & L3_OFFSET));
1312 }
1313 
1314 /***************************************************
1315  * Low level mapping routines.....
1316  ***************************************************/
1317 
1318 void
1319 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1320 {
1321 	pd_entry_t *pde;
1322 	pt_entry_t *pte, attr;
1323 	vm_offset_t va;
1324 	int lvl;
1325 
1326 	KASSERT((pa & L3_OFFSET) == 0,
1327 	   ("pmap_kenter: Invalid physical address"));
1328 	KASSERT((sva & L3_OFFSET) == 0,
1329 	   ("pmap_kenter: Invalid virtual address"));
1330 	KASSERT((size & PAGE_MASK) == 0,
1331 	    ("pmap_kenter: Mapping is not page-sized"));
1332 
1333 	attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
1334 	    ATTR_S1_IDX(mode) | L3_PAGE;
1335 	va = sva;
1336 	while (size != 0) {
1337 		pde = pmap_pde(kernel_pmap, va, &lvl);
1338 		KASSERT(pde != NULL,
1339 		    ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
1340 		KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
1341 
1342 		pte = pmap_l2_to_l3(pde, va);
1343 		pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
1344 
1345 		va += PAGE_SIZE;
1346 		pa += PAGE_SIZE;
1347 		size -= PAGE_SIZE;
1348 	}
1349 	pmap_invalidate_range(kernel_pmap, sva, va);
1350 }
1351 
1352 void
1353 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
1354 {
1355 
1356 	pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE);
1357 }
1358 
1359 /*
1360  * Remove a page from the kernel pagetables.
1361  */
1362 PMAP_INLINE void
1363 pmap_kremove(vm_offset_t va)
1364 {
1365 	pt_entry_t *pte;
1366 	int lvl;
1367 
1368 	pte = pmap_pte(kernel_pmap, va, &lvl);
1369 	KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
1370 	KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
1371 
1372 	pmap_clear(pte);
1373 	pmap_invalidate_page(kernel_pmap, va);
1374 }
1375 
1376 void
1377 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
1378 {
1379 	pt_entry_t *pte;
1380 	vm_offset_t va;
1381 	int lvl;
1382 
1383 	KASSERT((sva & L3_OFFSET) == 0,
1384 	   ("pmap_kremove_device: Invalid virtual address"));
1385 	KASSERT((size & PAGE_MASK) == 0,
1386 	    ("pmap_kremove_device: Mapping is not page-sized"));
1387 
1388 	va = sva;
1389 	while (size != 0) {
1390 		pte = pmap_pte(kernel_pmap, va, &lvl);
1391 		KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
1392 		KASSERT(lvl == 3,
1393 		    ("Invalid device pagetable level: %d != 3", lvl));
1394 		pmap_clear(pte);
1395 
1396 		va += PAGE_SIZE;
1397 		size -= PAGE_SIZE;
1398 	}
1399 	pmap_invalidate_range(kernel_pmap, sva, va);
1400 }
1401 
1402 /*
1403  *	Used to map a range of physical addresses into kernel
1404  *	virtual address space.
1405  *
1406  *	The value passed in '*virt' is a suggested virtual address for
1407  *	the mapping. Architectures which can support a direct-mapped
1408  *	physical to virtual region can return the appropriate address
1409  *	within that region, leaving '*virt' unchanged. Other
1410  *	architectures should map the pages starting at '*virt' and
1411  *	update '*virt' with the first usable address after the mapped
1412  *	region.
1413  */
1414 vm_offset_t
1415 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1416 {
1417 	return PHYS_TO_DMAP(start);
1418 }
1419 
1420 
1421 /*
1422  * Add a list of wired pages to the kva
1423  * this routine is only used for temporary
1424  * kernel mappings that do not need to have
1425  * page modification or references recorded.
1426  * Note that old mappings are simply written
1427  * over.  The page *must* be wired.
1428  * Note: SMP coherent.  Uses a ranged shootdown IPI.
1429  */
1430 void
1431 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1432 {
1433 	pd_entry_t *pde;
1434 	pt_entry_t *pte, pa;
1435 	vm_offset_t va;
1436 	vm_page_t m;
1437 	int i, lvl;
1438 
1439 	va = sva;
1440 	for (i = 0; i < count; i++) {
1441 		pde = pmap_pde(kernel_pmap, va, &lvl);
1442 		KASSERT(pde != NULL,
1443 		    ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
1444 		KASSERT(lvl == 2,
1445 		    ("pmap_qenter: Invalid level %d", lvl));
1446 
1447 		m = ma[i];
1448 		pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
1449 		    ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
1450 		    ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
1451 		pte = pmap_l2_to_l3(pde, va);
1452 		pmap_load_store(pte, pa);
1453 
1454 		va += L3_SIZE;
1455 	}
1456 	pmap_invalidate_range(kernel_pmap, sva, va);
1457 }
1458 
1459 /*
1460  * This routine tears out page mappings from the
1461  * kernel -- it is meant only for temporary mappings.
1462  */
1463 void
1464 pmap_qremove(vm_offset_t sva, int count)
1465 {
1466 	pt_entry_t *pte;
1467 	vm_offset_t va;
1468 	int lvl;
1469 
1470 	KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1471 
1472 	va = sva;
1473 	while (count-- > 0) {
1474 		pte = pmap_pte(kernel_pmap, va, &lvl);
1475 		KASSERT(lvl == 3,
1476 		    ("Invalid device pagetable level: %d != 3", lvl));
1477 		if (pte != NULL) {
1478 			pmap_clear(pte);
1479 		}
1480 
1481 		va += PAGE_SIZE;
1482 	}
1483 	pmap_invalidate_range(kernel_pmap, sva, va);
1484 }
1485 
1486 /***************************************************
1487  * Page table page management routines.....
1488  ***************************************************/
1489 /*
1490  * Schedule the specified unused page table page to be freed.  Specifically,
1491  * add the page to the specified list of pages that will be released to the
1492  * physical memory manager after the TLB has been updated.
1493  */
1494 static __inline void
1495 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1496     boolean_t set_PG_ZERO)
1497 {
1498 
1499 	if (set_PG_ZERO)
1500 		m->flags |= PG_ZERO;
1501 	else
1502 		m->flags &= ~PG_ZERO;
1503 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1504 }
1505 
1506 /*
1507  * Decrements a page table page's reference count, which is used to record the
1508  * number of valid page table entries within the page.  If the reference count
1509  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1510  * page table page was unmapped and FALSE otherwise.
1511  */
1512 static inline boolean_t
1513 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1514 {
1515 
1516 	--m->ref_count;
1517 	if (m->ref_count == 0) {
1518 		_pmap_unwire_l3(pmap, va, m, free);
1519 		return (TRUE);
1520 	} else
1521 		return (FALSE);
1522 }
1523 
1524 static void
1525 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1526 {
1527 
1528 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1529 	/*
1530 	 * unmap the page table page
1531 	 */
1532 	if (m->pindex >= (NUL2E + NUL1E)) {
1533 		/* l1 page */
1534 		pd_entry_t *l0;
1535 
1536 		l0 = pmap_l0(pmap, va);
1537 		pmap_clear(l0);
1538 	} else if (m->pindex >= NUL2E) {
1539 		/* l2 page */
1540 		pd_entry_t *l1;
1541 
1542 		l1 = pmap_l1(pmap, va);
1543 		pmap_clear(l1);
1544 	} else {
1545 		/* l3 page */
1546 		pd_entry_t *l2;
1547 
1548 		l2 = pmap_l2(pmap, va);
1549 		pmap_clear(l2);
1550 	}
1551 	pmap_resident_count_dec(pmap, 1);
1552 	if (m->pindex < NUL2E) {
1553 		/* We just released an l3, unhold the matching l2 */
1554 		pd_entry_t *l1, tl1;
1555 		vm_page_t l2pg;
1556 
1557 		l1 = pmap_l1(pmap, va);
1558 		tl1 = pmap_load(l1);
1559 		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1560 		pmap_unwire_l3(pmap, va, l2pg, free);
1561 	} else if (m->pindex < (NUL2E + NUL1E)) {
1562 		/* We just released an l2, unhold the matching l1 */
1563 		pd_entry_t *l0, tl0;
1564 		vm_page_t l1pg;
1565 
1566 		l0 = pmap_l0(pmap, va);
1567 		tl0 = pmap_load(l0);
1568 		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1569 		pmap_unwire_l3(pmap, va, l1pg, free);
1570 	}
1571 	pmap_invalidate_page(pmap, va);
1572 
1573 	/*
1574 	 * Put page on a list so that it is released after
1575 	 * *ALL* TLB shootdown is done
1576 	 */
1577 	pmap_add_delayed_free_list(m, free, TRUE);
1578 }
1579 
1580 /*
1581  * After removing a page table entry, this routine is used to
1582  * conditionally free the page, and manage the reference count.
1583  */
1584 static int
1585 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1586     struct spglist *free)
1587 {
1588 	vm_page_t mpte;
1589 
1590 	if (va >= VM_MAXUSER_ADDRESS)
1591 		return (0);
1592 	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1593 	mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
1594 	return (pmap_unwire_l3(pmap, va, mpte, free));
1595 }
1596 
1597 /*
1598  * Release a page table page reference after a failed attempt to create a
1599  * mapping.
1600  */
1601 static void
1602 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1603 {
1604 	struct spglist free;
1605 
1606 	SLIST_INIT(&free);
1607 	if (pmap_unwire_l3(pmap, va, mpte, &free)) {
1608 		/*
1609 		 * Although "va" was never mapped, the TLB could nonetheless
1610 		 * have intermediate entries that refer to the freed page
1611 		 * table pages.  Invalidate those entries.
1612 		 *
1613 		 * XXX redundant invalidation (See _pmap_unwire_l3().)
1614 		 */
1615 		pmap_invalidate_page(pmap, va);
1616 		vm_page_free_pages_toq(&free, true);
1617 	}
1618 }
1619 
1620 void
1621 pmap_pinit0(pmap_t pmap)
1622 {
1623 
1624 	PMAP_LOCK_INIT(pmap);
1625 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1626 	pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1);
1627 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
1628 	pmap->pm_root.rt_root = 0;
1629 	pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
1630 	pmap->pm_stage = PM_STAGE1;
1631 	pmap->pm_asid_set = &asids;
1632 
1633 	PCPU_SET(curpmap, pmap);
1634 }
1635 
1636 int
1637 pmap_pinit_stage(pmap_t pmap, enum pmap_stage stage)
1638 {
1639 	vm_page_t l0pt;
1640 
1641 	/*
1642 	 * allocate the l0 page
1643 	 */
1644 	while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1645 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1646 		vm_wait(NULL);
1647 
1648 	pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(l0pt);
1649 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
1650 
1651 	if ((l0pt->flags & PG_ZERO) == 0)
1652 		pagezero(pmap->pm_l0);
1653 
1654 	pmap->pm_root.rt_root = 0;
1655 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1656 	pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
1657 
1658 	pmap->pm_stage = stage;
1659 	switch (stage) {
1660 	case PM_STAGE1:
1661 		pmap->pm_asid_set = &asids;
1662 		break;
1663 	case PM_STAGE2:
1664 		pmap->pm_asid_set = &vmids;
1665 		break;
1666 	default:
1667 		panic("%s: Invalid pmap type %d", __func__, stage);
1668 		break;
1669 	}
1670 
1671 	/* XXX Temporarily disable deferred ASID allocation. */
1672 	pmap_alloc_asid(pmap);
1673 
1674 	return (1);
1675 }
1676 
1677 int
1678 pmap_pinit(pmap_t pmap)
1679 {
1680 
1681 	return (pmap_pinit_stage(pmap, PM_STAGE1));
1682 }
1683 
1684 /*
1685  * This routine is called if the desired page table page does not exist.
1686  *
1687  * If page table page allocation fails, this routine may sleep before
1688  * returning NULL.  It sleeps only if a lock pointer was given.
1689  *
1690  * Note: If a page allocation fails at page table level two or three,
1691  * one or two pages may be held during the wait, only to be released
1692  * afterwards.  This conservative approach is easily argued to avoid
1693  * race conditions.
1694  */
1695 static vm_page_t
1696 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1697 {
1698 	vm_page_t m, l1pg, l2pg;
1699 
1700 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1701 
1702 	/*
1703 	 * Allocate a page table page.
1704 	 */
1705 	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1706 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1707 		if (lockp != NULL) {
1708 			RELEASE_PV_LIST_LOCK(lockp);
1709 			PMAP_UNLOCK(pmap);
1710 			vm_wait(NULL);
1711 			PMAP_LOCK(pmap);
1712 		}
1713 
1714 		/*
1715 		 * Indicate the need to retry.  While waiting, the page table
1716 		 * page may have been allocated.
1717 		 */
1718 		return (NULL);
1719 	}
1720 	if ((m->flags & PG_ZERO) == 0)
1721 		pmap_zero_page(m);
1722 
1723 	/*
1724 	 * Because of AArch64's weak memory consistency model, we must have a
1725 	 * barrier here to ensure that the stores for zeroing "m", whether by
1726 	 * pmap_zero_page() or an earlier function, are visible before adding
1727 	 * "m" to the page table.  Otherwise, a page table walk by another
1728 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
1729 	 * PTE within "m".
1730 	 */
1731 	dmb(ishst);
1732 
1733 	/*
1734 	 * Map the pagetable page into the process address space, if
1735 	 * it isn't already there.
1736 	 */
1737 
1738 	if (ptepindex >= (NUL2E + NUL1E)) {
1739 		pd_entry_t *l0;
1740 		vm_pindex_t l0index;
1741 
1742 		l0index = ptepindex - (NUL2E + NUL1E);
1743 		l0 = &pmap->pm_l0[l0index];
1744 		pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
1745 	} else if (ptepindex >= NUL2E) {
1746 		vm_pindex_t l0index, l1index;
1747 		pd_entry_t *l0, *l1;
1748 		pd_entry_t tl0;
1749 
1750 		l1index = ptepindex - NUL2E;
1751 		l0index = l1index >> L0_ENTRIES_SHIFT;
1752 
1753 		l0 = &pmap->pm_l0[l0index];
1754 		tl0 = pmap_load(l0);
1755 		if (tl0 == 0) {
1756 			/* recurse for allocating page dir */
1757 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1758 			    lockp) == NULL) {
1759 				vm_page_unwire_noq(m);
1760 				vm_page_free_zero(m);
1761 				return (NULL);
1762 			}
1763 		} else {
1764 			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1765 			l1pg->ref_count++;
1766 		}
1767 
1768 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
1769 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
1770 		pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
1771 	} else {
1772 		vm_pindex_t l0index, l1index;
1773 		pd_entry_t *l0, *l1, *l2;
1774 		pd_entry_t tl0, tl1;
1775 
1776 		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
1777 		l0index = l1index >> L0_ENTRIES_SHIFT;
1778 
1779 		l0 = &pmap->pm_l0[l0index];
1780 		tl0 = pmap_load(l0);
1781 		if (tl0 == 0) {
1782 			/* recurse for allocating page dir */
1783 			if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1784 			    lockp) == NULL) {
1785 				vm_page_unwire_noq(m);
1786 				vm_page_free_zero(m);
1787 				return (NULL);
1788 			}
1789 			tl0 = pmap_load(l0);
1790 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1791 			l1 = &l1[l1index & Ln_ADDR_MASK];
1792 		} else {
1793 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1794 			l1 = &l1[l1index & Ln_ADDR_MASK];
1795 			tl1 = pmap_load(l1);
1796 			if (tl1 == 0) {
1797 				/* recurse for allocating page dir */
1798 				if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1799 				    lockp) == NULL) {
1800 					vm_page_unwire_noq(m);
1801 					vm_page_free_zero(m);
1802 					return (NULL);
1803 				}
1804 			} else {
1805 				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1806 				l2pg->ref_count++;
1807 			}
1808 		}
1809 
1810 		l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
1811 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
1812 		pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
1813 	}
1814 
1815 	pmap_resident_count_inc(pmap, 1);
1816 
1817 	return (m);
1818 }
1819 
1820 static pd_entry_t *
1821 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp,
1822     struct rwlock **lockp)
1823 {
1824 	pd_entry_t *l1, *l2;
1825 	vm_page_t l2pg;
1826 	vm_pindex_t l2pindex;
1827 
1828 retry:
1829 	l1 = pmap_l1(pmap, va);
1830 	if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
1831 		l2 = pmap_l1_to_l2(l1, va);
1832 		if (va < VM_MAXUSER_ADDRESS) {
1833 			/* Add a reference to the L2 page. */
1834 			l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
1835 			l2pg->ref_count++;
1836 		} else
1837 			l2pg = NULL;
1838 	} else if (va < VM_MAXUSER_ADDRESS) {
1839 		/* Allocate a L2 page. */
1840 		l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
1841 		l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1842 		if (l2pg == NULL) {
1843 			if (lockp != NULL)
1844 				goto retry;
1845 			else
1846 				return (NULL);
1847 		}
1848 		l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
1849 		l2 = &l2[pmap_l2_index(va)];
1850 	} else
1851 		panic("pmap_alloc_l2: missing page table page for va %#lx",
1852 		    va);
1853 	*l2pgp = l2pg;
1854 	return (l2);
1855 }
1856 
1857 static vm_page_t
1858 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1859 {
1860 	vm_pindex_t ptepindex;
1861 	pd_entry_t *pde, tpde;
1862 #ifdef INVARIANTS
1863 	pt_entry_t *pte;
1864 #endif
1865 	vm_page_t m;
1866 	int lvl;
1867 
1868 	/*
1869 	 * Calculate pagetable page index
1870 	 */
1871 	ptepindex = pmap_l2_pindex(va);
1872 retry:
1873 	/*
1874 	 * Get the page directory entry
1875 	 */
1876 	pde = pmap_pde(pmap, va, &lvl);
1877 
1878 	/*
1879 	 * If the page table page is mapped, we just increment the hold count,
1880 	 * and activate it. If we get a level 2 pde it will point to a level 3
1881 	 * table.
1882 	 */
1883 	switch (lvl) {
1884 	case -1:
1885 		break;
1886 	case 0:
1887 #ifdef INVARIANTS
1888 		pte = pmap_l0_to_l1(pde, va);
1889 		KASSERT(pmap_load(pte) == 0,
1890 		    ("pmap_alloc_l3: TODO: l0 superpages"));
1891 #endif
1892 		break;
1893 	case 1:
1894 #ifdef INVARIANTS
1895 		pte = pmap_l1_to_l2(pde, va);
1896 		KASSERT(pmap_load(pte) == 0,
1897 		    ("pmap_alloc_l3: TODO: l1 superpages"));
1898 #endif
1899 		break;
1900 	case 2:
1901 		tpde = pmap_load(pde);
1902 		if (tpde != 0) {
1903 			m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1904 			m->ref_count++;
1905 			return (m);
1906 		}
1907 		break;
1908 	default:
1909 		panic("pmap_alloc_l3: Invalid level %d", lvl);
1910 	}
1911 
1912 	/*
1913 	 * Here if the pte page isn't mapped, or if it has been deallocated.
1914 	 */
1915 	m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1916 	if (m == NULL && lockp != NULL)
1917 		goto retry;
1918 
1919 	return (m);
1920 }
1921 
1922 /***************************************************
1923  * Pmap allocation/deallocation routines.
1924  ***************************************************/
1925 
1926 /*
1927  * Release any resources held by the given physical map.
1928  * Called when a pmap initialized by pmap_pinit is being released.
1929  * Should only be called if the map contains no valid mappings.
1930  */
1931 void
1932 pmap_release(pmap_t pmap)
1933 {
1934 	struct asid_set *set;
1935 	vm_page_t m;
1936 	int asid;
1937 
1938 	KASSERT(pmap->pm_stats.resident_count == 0,
1939 	    ("pmap_release: pmap resident count %ld != 0",
1940 	    pmap->pm_stats.resident_count));
1941 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
1942 	    ("pmap_release: pmap has reserved page table page(s)"));
1943 	PMAP_ASSERT_STAGE1(pmap);
1944 
1945 	set = pmap->pm_asid_set;
1946 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
1947 
1948 	mtx_lock_spin(&set->asid_set_mutex);
1949 	if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch) {
1950 		asid = COOKIE_TO_ASID(pmap->pm_cookie);
1951 		KASSERT(asid >= ASID_FIRST_AVAILABLE &&
1952 		    asid < set->asid_set_size,
1953 		    ("pmap_release: pmap cookie has out-of-range asid"));
1954 		bit_clear(set->asid_set, asid);
1955 	}
1956 	mtx_unlock_spin(&set->asid_set_mutex);
1957 
1958 	m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
1959 	vm_page_unwire_noq(m);
1960 	vm_page_free_zero(m);
1961 }
1962 
1963 static int
1964 kvm_size(SYSCTL_HANDLER_ARGS)
1965 {
1966 	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1967 
1968 	return sysctl_handle_long(oidp, &ksize, 0, req);
1969 }
1970 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
1971     0, 0, kvm_size, "LU",
1972     "Size of KVM");
1973 
1974 static int
1975 kvm_free(SYSCTL_HANDLER_ARGS)
1976 {
1977 	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1978 
1979 	return sysctl_handle_long(oidp, &kfree, 0, req);
1980 }
1981 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
1982     0, 0, kvm_free, "LU",
1983     "Amount of KVM free");
1984 
1985 /*
1986  * grow the number of kernel page table entries, if needed
1987  */
1988 void
1989 pmap_growkernel(vm_offset_t addr)
1990 {
1991 	vm_paddr_t paddr;
1992 	vm_page_t nkpg;
1993 	pd_entry_t *l0, *l1, *l2;
1994 
1995 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1996 
1997 	addr = roundup2(addr, L2_SIZE);
1998 	if (addr - 1 >= vm_map_max(kernel_map))
1999 		addr = vm_map_max(kernel_map);
2000 	while (kernel_vm_end < addr) {
2001 		l0 = pmap_l0(kernel_pmap, kernel_vm_end);
2002 		KASSERT(pmap_load(l0) != 0,
2003 		    ("pmap_growkernel: No level 0 kernel entry"));
2004 
2005 		l1 = pmap_l0_to_l1(l0, kernel_vm_end);
2006 		if (pmap_load(l1) == 0) {
2007 			/* We need a new PDP entry */
2008 			nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
2009 			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
2010 			    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2011 			if (nkpg == NULL)
2012 				panic("pmap_growkernel: no memory to grow kernel");
2013 			if ((nkpg->flags & PG_ZERO) == 0)
2014 				pmap_zero_page(nkpg);
2015 			/* See the dmb() in _pmap_alloc_l3(). */
2016 			dmb(ishst);
2017 			paddr = VM_PAGE_TO_PHYS(nkpg);
2018 			pmap_store(l1, paddr | L1_TABLE);
2019 			continue; /* try again */
2020 		}
2021 		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
2022 		if (pmap_load(l2) != 0) {
2023 			kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
2024 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2025 				kernel_vm_end = vm_map_max(kernel_map);
2026 				break;
2027 			}
2028 			continue;
2029 		}
2030 
2031 		nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
2032 		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2033 		    VM_ALLOC_ZERO);
2034 		if (nkpg == NULL)
2035 			panic("pmap_growkernel: no memory to grow kernel");
2036 		if ((nkpg->flags & PG_ZERO) == 0)
2037 			pmap_zero_page(nkpg);
2038 		/* See the dmb() in _pmap_alloc_l3(). */
2039 		dmb(ishst);
2040 		paddr = VM_PAGE_TO_PHYS(nkpg);
2041 		pmap_store(l2, paddr | L2_TABLE);
2042 
2043 		kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
2044 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2045 			kernel_vm_end = vm_map_max(kernel_map);
2046 			break;
2047 		}
2048 	}
2049 }
2050 
2051 
2052 /***************************************************
2053  * page management routines.
2054  ***************************************************/
2055 
2056 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
2057 CTASSERT(_NPCM == 3);
2058 CTASSERT(_NPCPV == 168);
2059 
2060 static __inline struct pv_chunk *
2061 pv_to_chunk(pv_entry_t pv)
2062 {
2063 
2064 	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
2065 }
2066 
2067 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
2068 
2069 #define	PC_FREE0	0xfffffffffffffffful
2070 #define	PC_FREE1	0xfffffffffffffffful
2071 #define	PC_FREE2	0x000000fffffffffful
2072 
2073 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
2074 
2075 #if 0
2076 #ifdef PV_STATS
2077 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2078 
2079 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
2080 	"Current number of pv entry chunks");
2081 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
2082 	"Current number of pv entry chunks allocated");
2083 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
2084 	"Current number of pv entry chunks frees");
2085 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
2086 	"Number of times tried to get a chunk page but failed.");
2087 
2088 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
2089 static int pv_entry_spare;
2090 
2091 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
2092 	"Current number of pv entry frees");
2093 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
2094 	"Current number of pv entry allocs");
2095 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
2096 	"Current number of pv entries");
2097 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
2098 	"Current number of spare pv entries");
2099 #endif
2100 #endif /* 0 */
2101 
2102 /*
2103  * We are in a serious low memory condition.  Resort to
2104  * drastic measures to free some pages so we can allocate
2105  * another pv entry chunk.
2106  *
2107  * Returns NULL if PV entries were reclaimed from the specified pmap.
2108  *
2109  * We do not, however, unmap 2mpages because subsequent accesses will
2110  * allocate per-page pv entries until repromotion occurs, thereby
2111  * exacerbating the shortage of free pv entries.
2112  */
2113 static vm_page_t
2114 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
2115 {
2116 	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
2117 	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
2118 	struct md_page *pvh;
2119 	pd_entry_t *pde;
2120 	pmap_t next_pmap, pmap;
2121 	pt_entry_t *pte, tpte;
2122 	pv_entry_t pv;
2123 	vm_offset_t va;
2124 	vm_page_t m, m_pc;
2125 	struct spglist free;
2126 	uint64_t inuse;
2127 	int bit, field, freed, lvl;
2128 	static int active_reclaims = 0;
2129 
2130 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2131 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
2132 
2133 	pmap = NULL;
2134 	m_pc = NULL;
2135 	SLIST_INIT(&free);
2136 	bzero(&pc_marker_b, sizeof(pc_marker_b));
2137 	bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
2138 	pc_marker = (struct pv_chunk *)&pc_marker_b;
2139 	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
2140 
2141 	mtx_lock(&pv_chunks_mutex);
2142 	active_reclaims++;
2143 	TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
2144 	TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
2145 	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
2146 	    SLIST_EMPTY(&free)) {
2147 		next_pmap = pc->pc_pmap;
2148 		if (next_pmap == NULL) {
2149 			/*
2150 			 * The next chunk is a marker.  However, it is
2151 			 * not our marker, so active_reclaims must be
2152 			 * > 1.  Consequently, the next_chunk code
2153 			 * will not rotate the pv_chunks list.
2154 			 */
2155 			goto next_chunk;
2156 		}
2157 		mtx_unlock(&pv_chunks_mutex);
2158 
2159 		/*
2160 		 * A pv_chunk can only be removed from the pc_lru list
2161 		 * when both pv_chunks_mutex is owned and the
2162 		 * corresponding pmap is locked.
2163 		 */
2164 		if (pmap != next_pmap) {
2165 			if (pmap != NULL && pmap != locked_pmap)
2166 				PMAP_UNLOCK(pmap);
2167 			pmap = next_pmap;
2168 			/* Avoid deadlock and lock recursion. */
2169 			if (pmap > locked_pmap) {
2170 				RELEASE_PV_LIST_LOCK(lockp);
2171 				PMAP_LOCK(pmap);
2172 				mtx_lock(&pv_chunks_mutex);
2173 				continue;
2174 			} else if (pmap != locked_pmap) {
2175 				if (PMAP_TRYLOCK(pmap)) {
2176 					mtx_lock(&pv_chunks_mutex);
2177 					continue;
2178 				} else {
2179 					pmap = NULL; /* pmap is not locked */
2180 					mtx_lock(&pv_chunks_mutex);
2181 					pc = TAILQ_NEXT(pc_marker, pc_lru);
2182 					if (pc == NULL ||
2183 					    pc->pc_pmap != next_pmap)
2184 						continue;
2185 					goto next_chunk;
2186 				}
2187 			}
2188 		}
2189 
2190 		/*
2191 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
2192 		 */
2193 		freed = 0;
2194 		for (field = 0; field < _NPCM; field++) {
2195 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2196 			    inuse != 0; inuse &= ~(1UL << bit)) {
2197 				bit = ffsl(inuse) - 1;
2198 				pv = &pc->pc_pventry[field * 64 + bit];
2199 				va = pv->pv_va;
2200 				pde = pmap_pde(pmap, va, &lvl);
2201 				if (lvl != 2)
2202 					continue;
2203 				pte = pmap_l2_to_l3(pde, va);
2204 				tpte = pmap_load(pte);
2205 				if ((tpte & ATTR_SW_WIRED) != 0)
2206 					continue;
2207 				tpte = pmap_load_clear(pte);
2208 				m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
2209 				if (pmap_pte_dirty(pmap, tpte))
2210 					vm_page_dirty(m);
2211 				if ((tpte & ATTR_AF) != 0) {
2212 					pmap_invalidate_page(pmap, va);
2213 					vm_page_aflag_set(m, PGA_REFERENCED);
2214 				}
2215 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2216 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2217 				m->md.pv_gen++;
2218 				if (TAILQ_EMPTY(&m->md.pv_list) &&
2219 				    (m->flags & PG_FICTITIOUS) == 0) {
2220 					pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2221 					if (TAILQ_EMPTY(&pvh->pv_list)) {
2222 						vm_page_aflag_clear(m,
2223 						    PGA_WRITEABLE);
2224 					}
2225 				}
2226 				pc->pc_map[field] |= 1UL << bit;
2227 				pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
2228 				freed++;
2229 			}
2230 		}
2231 		if (freed == 0) {
2232 			mtx_lock(&pv_chunks_mutex);
2233 			goto next_chunk;
2234 		}
2235 		/* Every freed mapping is for a 4 KB page. */
2236 		pmap_resident_count_dec(pmap, freed);
2237 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
2238 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
2239 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
2240 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2241 		if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
2242 		    pc->pc_map[2] == PC_FREE2) {
2243 			PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2244 			PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2245 			PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2246 			/* Entire chunk is free; return it. */
2247 			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2248 			dump_drop_page(m_pc->phys_addr);
2249 			mtx_lock(&pv_chunks_mutex);
2250 			TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2251 			break;
2252 		}
2253 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2254 		mtx_lock(&pv_chunks_mutex);
2255 		/* One freed pv entry in locked_pmap is sufficient. */
2256 		if (pmap == locked_pmap)
2257 			break;
2258 
2259 next_chunk:
2260 		TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2261 		TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
2262 		if (active_reclaims == 1 && pmap != NULL) {
2263 			/*
2264 			 * Rotate the pv chunks list so that we do not
2265 			 * scan the same pv chunks that could not be
2266 			 * freed (because they contained a wired
2267 			 * and/or superpage mapping) on every
2268 			 * invocation of reclaim_pv_chunk().
2269 			 */
2270 			while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
2271 				MPASS(pc->pc_pmap != NULL);
2272 				TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2273 				TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2274 			}
2275 		}
2276 	}
2277 	TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2278 	TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
2279 	active_reclaims--;
2280 	mtx_unlock(&pv_chunks_mutex);
2281 	if (pmap != NULL && pmap != locked_pmap)
2282 		PMAP_UNLOCK(pmap);
2283 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
2284 		m_pc = SLIST_FIRST(&free);
2285 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2286 		/* Recycle a freed page table page. */
2287 		m_pc->ref_count = 1;
2288 	}
2289 	vm_page_free_pages_toq(&free, true);
2290 	return (m_pc);
2291 }
2292 
2293 /*
2294  * free the pv_entry back to the free list
2295  */
2296 static void
2297 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2298 {
2299 	struct pv_chunk *pc;
2300 	int idx, field, bit;
2301 
2302 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2303 	PV_STAT(atomic_add_long(&pv_entry_frees, 1));
2304 	PV_STAT(atomic_add_int(&pv_entry_spare, 1));
2305 	PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
2306 	pc = pv_to_chunk(pv);
2307 	idx = pv - &pc->pc_pventry[0];
2308 	field = idx / 64;
2309 	bit = idx % 64;
2310 	pc->pc_map[field] |= 1ul << bit;
2311 	if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
2312 	    pc->pc_map[2] != PC_FREE2) {
2313 		/* 98% of the time, pc is already at the head of the list. */
2314 		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
2315 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2316 			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2317 		}
2318 		return;
2319 	}
2320 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2321 	free_pv_chunk(pc);
2322 }
2323 
2324 static void
2325 free_pv_chunk(struct pv_chunk *pc)
2326 {
2327 	vm_page_t m;
2328 
2329 	mtx_lock(&pv_chunks_mutex);
2330  	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2331 	mtx_unlock(&pv_chunks_mutex);
2332 	PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2333 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2334 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2335 	/* entire chunk is free, return it */
2336 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2337 	dump_drop_page(m->phys_addr);
2338 	vm_page_unwire_noq(m);
2339 	vm_page_free(m);
2340 }
2341 
2342 /*
2343  * Returns a new PV entry, allocating a new PV chunk from the system when
2344  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
2345  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
2346  * returned.
2347  *
2348  * The given PV list lock may be released.
2349  */
2350 static pv_entry_t
2351 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
2352 {
2353 	int bit, field;
2354 	pv_entry_t pv;
2355 	struct pv_chunk *pc;
2356 	vm_page_t m;
2357 
2358 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2359 	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
2360 retry:
2361 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2362 	if (pc != NULL) {
2363 		for (field = 0; field < _NPCM; field++) {
2364 			if (pc->pc_map[field]) {
2365 				bit = ffsl(pc->pc_map[field]) - 1;
2366 				break;
2367 			}
2368 		}
2369 		if (field < _NPCM) {
2370 			pv = &pc->pc_pventry[field * 64 + bit];
2371 			pc->pc_map[field] &= ~(1ul << bit);
2372 			/* If this was the last item, move it to tail */
2373 			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
2374 			    pc->pc_map[2] == 0) {
2375 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2376 				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
2377 				    pc_list);
2378 			}
2379 			PV_STAT(atomic_add_long(&pv_entry_count, 1));
2380 			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
2381 			return (pv);
2382 		}
2383 	}
2384 	/* No free items, allocate another chunk */
2385 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2386 	    VM_ALLOC_WIRED);
2387 	if (m == NULL) {
2388 		if (lockp == NULL) {
2389 			PV_STAT(pc_chunk_tryfail++);
2390 			return (NULL);
2391 		}
2392 		m = reclaim_pv_chunk(pmap, lockp);
2393 		if (m == NULL)
2394 			goto retry;
2395 	}
2396 	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2397 	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2398 	dump_add_page(m->phys_addr);
2399 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2400 	pc->pc_pmap = pmap;
2401 	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
2402 	pc->pc_map[1] = PC_FREE1;
2403 	pc->pc_map[2] = PC_FREE2;
2404 	mtx_lock(&pv_chunks_mutex);
2405 	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2406 	mtx_unlock(&pv_chunks_mutex);
2407 	pv = &pc->pc_pventry[0];
2408 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2409 	PV_STAT(atomic_add_long(&pv_entry_count, 1));
2410 	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
2411 	return (pv);
2412 }
2413 
2414 /*
2415  * Ensure that the number of spare PV entries in the specified pmap meets or
2416  * exceeds the given count, "needed".
2417  *
2418  * The given PV list lock may be released.
2419  */
2420 static void
2421 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
2422 {
2423 	struct pch new_tail;
2424 	struct pv_chunk *pc;
2425 	vm_page_t m;
2426 	int avail, free;
2427 	bool reclaimed;
2428 
2429 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2430 	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
2431 
2432 	/*
2433 	 * Newly allocated PV chunks must be stored in a private list until
2434 	 * the required number of PV chunks have been allocated.  Otherwise,
2435 	 * reclaim_pv_chunk() could recycle one of these chunks.  In
2436 	 * contrast, these chunks must be added to the pmap upon allocation.
2437 	 */
2438 	TAILQ_INIT(&new_tail);
2439 retry:
2440 	avail = 0;
2441 	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
2442 		bit_count((bitstr_t *)pc->pc_map, 0,
2443 		    sizeof(pc->pc_map) * NBBY, &free);
2444 		if (free == 0)
2445 			break;
2446 		avail += free;
2447 		if (avail >= needed)
2448 			break;
2449 	}
2450 	for (reclaimed = false; avail < needed; avail += _NPCPV) {
2451 		m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2452 		    VM_ALLOC_WIRED);
2453 		if (m == NULL) {
2454 			m = reclaim_pv_chunk(pmap, lockp);
2455 			if (m == NULL)
2456 				goto retry;
2457 			reclaimed = true;
2458 		}
2459 		PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2460 		PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2461 		dump_add_page(m->phys_addr);
2462 		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2463 		pc->pc_pmap = pmap;
2464 		pc->pc_map[0] = PC_FREE0;
2465 		pc->pc_map[1] = PC_FREE1;
2466 		pc->pc_map[2] = PC_FREE2;
2467 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2468 		TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
2469 		PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
2470 
2471 		/*
2472 		 * The reclaim might have freed a chunk from the current pmap.
2473 		 * If that chunk contained available entries, we need to
2474 		 * re-count the number of available entries.
2475 		 */
2476 		if (reclaimed)
2477 			goto retry;
2478 	}
2479 	if (!TAILQ_EMPTY(&new_tail)) {
2480 		mtx_lock(&pv_chunks_mutex);
2481 		TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
2482 		mtx_unlock(&pv_chunks_mutex);
2483 	}
2484 }
2485 
2486 /*
2487  * First find and then remove the pv entry for the specified pmap and virtual
2488  * address from the specified pv list.  Returns the pv entry if found and NULL
2489  * otherwise.  This operation can be performed on pv lists for either 4KB or
2490  * 2MB page mappings.
2491  */
2492 static __inline pv_entry_t
2493 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2494 {
2495 	pv_entry_t pv;
2496 
2497 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2498 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2499 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2500 			pvh->pv_gen++;
2501 			break;
2502 		}
2503 	}
2504 	return (pv);
2505 }
2506 
2507 /*
2508  * After demotion from a 2MB page mapping to 512 4KB page mappings,
2509  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
2510  * entries for each of the 4KB page mappings.
2511  */
2512 static void
2513 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2514     struct rwlock **lockp)
2515 {
2516 	struct md_page *pvh;
2517 	struct pv_chunk *pc;
2518 	pv_entry_t pv;
2519 	vm_offset_t va_last;
2520 	vm_page_t m;
2521 	int bit, field;
2522 
2523 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2524 	KASSERT((va & L2_OFFSET) == 0,
2525 	    ("pmap_pv_demote_l2: va is not 2mpage aligned"));
2526 	KASSERT((pa & L2_OFFSET) == 0,
2527 	    ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
2528 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2529 
2530 	/*
2531 	 * Transfer the 2mpage's pv entry for this mapping to the first
2532 	 * page's pv list.  Once this transfer begins, the pv list lock
2533 	 * must not be released until the last pv entry is reinstantiated.
2534 	 */
2535 	pvh = pa_to_pvh(pa);
2536 	pv = pmap_pvh_remove(pvh, pmap, va);
2537 	KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
2538 	m = PHYS_TO_VM_PAGE(pa);
2539 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2540 	m->md.pv_gen++;
2541 	/* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
2542 	PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
2543 	va_last = va + L2_SIZE - PAGE_SIZE;
2544 	for (;;) {
2545 		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2546 		KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
2547 		    pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
2548 		for (field = 0; field < _NPCM; field++) {
2549 			while (pc->pc_map[field]) {
2550 				bit = ffsl(pc->pc_map[field]) - 1;
2551 				pc->pc_map[field] &= ~(1ul << bit);
2552 				pv = &pc->pc_pventry[field * 64 + bit];
2553 				va += PAGE_SIZE;
2554 				pv->pv_va = va;
2555 				m++;
2556 				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2557 			    ("pmap_pv_demote_l2: page %p is not managed", m));
2558 				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2559 				m->md.pv_gen++;
2560 				if (va == va_last)
2561 					goto out;
2562 			}
2563 		}
2564 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2565 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2566 	}
2567 out:
2568 	if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
2569 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2570 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2571 	}
2572 	PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
2573 	PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
2574 }
2575 
2576 /*
2577  * First find and then destroy the pv entry for the specified pmap and virtual
2578  * address.  This operation can be performed on pv lists for either 4KB or 2MB
2579  * page mappings.
2580  */
2581 static void
2582 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2583 {
2584 	pv_entry_t pv;
2585 
2586 	pv = pmap_pvh_remove(pvh, pmap, va);
2587 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2588 	free_pv_entry(pmap, pv);
2589 }
2590 
2591 /*
2592  * Conditionally create the PV entry for a 4KB page mapping if the required
2593  * memory can be allocated without resorting to reclamation.
2594  */
2595 static boolean_t
2596 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
2597     struct rwlock **lockp)
2598 {
2599 	pv_entry_t pv;
2600 
2601 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2602 	/* Pass NULL instead of the lock pointer to disable reclamation. */
2603 	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
2604 		pv->pv_va = va;
2605 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2606 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2607 		m->md.pv_gen++;
2608 		return (TRUE);
2609 	} else
2610 		return (FALSE);
2611 }
2612 
2613 /*
2614  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
2615  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
2616  * false if the PV entry cannot be allocated without resorting to reclamation.
2617  */
2618 static bool
2619 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
2620     struct rwlock **lockp)
2621 {
2622 	struct md_page *pvh;
2623 	pv_entry_t pv;
2624 	vm_paddr_t pa;
2625 
2626 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2627 	/* Pass NULL instead of the lock pointer to disable reclamation. */
2628 	if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
2629 	    NULL : lockp)) == NULL)
2630 		return (false);
2631 	pv->pv_va = va;
2632 	pa = l2e & ~ATTR_MASK;
2633 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2634 	pvh = pa_to_pvh(pa);
2635 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2636 	pvh->pv_gen++;
2637 	return (true);
2638 }
2639 
2640 static void
2641 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
2642 {
2643 	pt_entry_t newl2, oldl2;
2644 	vm_page_t ml3;
2645 	vm_paddr_t ml3pa;
2646 
2647 	KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
2648 	KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
2649 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2650 
2651 	ml3 = pmap_remove_pt_page(pmap, va);
2652 	if (ml3 == NULL)
2653 		panic("pmap_remove_kernel_l2: Missing pt page");
2654 
2655 	ml3pa = VM_PAGE_TO_PHYS(ml3);
2656 	newl2 = ml3pa | L2_TABLE;
2657 
2658 	/*
2659 	 * If this page table page was unmapped by a promotion, then it
2660 	 * contains valid mappings.  Zero it to invalidate those mappings.
2661 	 */
2662 	if (ml3->valid != 0)
2663 		pagezero((void *)PHYS_TO_DMAP(ml3pa));
2664 
2665 	/*
2666 	 * Demote the mapping.  The caller must have already invalidated the
2667 	 * mapping (i.e., the "break" in break-before-make).
2668 	 */
2669 	oldl2 = pmap_load_store(l2, newl2);
2670 	KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
2671 	    __func__, l2, oldl2));
2672 }
2673 
2674 /*
2675  * pmap_remove_l2: Do the things to unmap a level 2 superpage.
2676  */
2677 static int
2678 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2679     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
2680 {
2681 	struct md_page *pvh;
2682 	pt_entry_t old_l2;
2683 	vm_offset_t eva, va;
2684 	vm_page_t m, ml3;
2685 
2686 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2687 	KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
2688 	old_l2 = pmap_load_clear(l2);
2689 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
2690 	    ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
2691 
2692 	/*
2693 	 * Since a promotion must break the 4KB page mappings before making
2694 	 * the 2MB page mapping, a pmap_invalidate_page() suffices.
2695 	 */
2696 	pmap_invalidate_page(pmap, sva);
2697 
2698 	if (old_l2 & ATTR_SW_WIRED)
2699 		pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2700 	pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2701 	if (old_l2 & ATTR_SW_MANAGED) {
2702 		CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK);
2703 		pvh = pa_to_pvh(old_l2 & ~ATTR_MASK);
2704 		pmap_pvh_free(pvh, pmap, sva);
2705 		eva = sva + L2_SIZE;
2706 		for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
2707 		    va < eva; va += PAGE_SIZE, m++) {
2708 			if (pmap_pte_dirty(pmap, old_l2))
2709 				vm_page_dirty(m);
2710 			if (old_l2 & ATTR_AF)
2711 				vm_page_aflag_set(m, PGA_REFERENCED);
2712 			if (TAILQ_EMPTY(&m->md.pv_list) &&
2713 			    TAILQ_EMPTY(&pvh->pv_list))
2714 				vm_page_aflag_clear(m, PGA_WRITEABLE);
2715 		}
2716 	}
2717 	if (pmap == kernel_pmap) {
2718 		pmap_remove_kernel_l2(pmap, l2, sva);
2719 	} else {
2720 		ml3 = pmap_remove_pt_page(pmap, sva);
2721 		if (ml3 != NULL) {
2722 			KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
2723 			    ("pmap_remove_l2: l3 page not promoted"));
2724 			pmap_resident_count_dec(pmap, 1);
2725 			KASSERT(ml3->ref_count == NL3PG,
2726 			    ("pmap_remove_l2: l3 page ref count error"));
2727 			ml3->ref_count = 0;
2728 			pmap_add_delayed_free_list(ml3, free, FALSE);
2729 		}
2730 	}
2731 	return (pmap_unuse_pt(pmap, sva, l1e, free));
2732 }
2733 
2734 /*
2735  * pmap_remove_l3: do the things to unmap a page in a process
2736  */
2737 static int
2738 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2739     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2740 {
2741 	struct md_page *pvh;
2742 	pt_entry_t old_l3;
2743 	vm_page_t m;
2744 
2745 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2746 	old_l3 = pmap_load_clear(l3);
2747 	pmap_invalidate_page(pmap, va);
2748 	if (old_l3 & ATTR_SW_WIRED)
2749 		pmap->pm_stats.wired_count -= 1;
2750 	pmap_resident_count_dec(pmap, 1);
2751 	if (old_l3 & ATTR_SW_MANAGED) {
2752 		m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2753 		if (pmap_pte_dirty(pmap, old_l3))
2754 			vm_page_dirty(m);
2755 		if (old_l3 & ATTR_AF)
2756 			vm_page_aflag_set(m, PGA_REFERENCED);
2757 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2758 		pmap_pvh_free(&m->md, pmap, va);
2759 		if (TAILQ_EMPTY(&m->md.pv_list) &&
2760 		    (m->flags & PG_FICTITIOUS) == 0) {
2761 			pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2762 			if (TAILQ_EMPTY(&pvh->pv_list))
2763 				vm_page_aflag_clear(m, PGA_WRITEABLE);
2764 		}
2765 	}
2766 	return (pmap_unuse_pt(pmap, va, l2e, free));
2767 }
2768 
2769 /*
2770  * Remove the specified range of addresses from the L3 page table that is
2771  * identified by the given L2 entry.
2772  */
2773 static void
2774 pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
2775     vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
2776 {
2777 	struct md_page *pvh;
2778 	struct rwlock *new_lock;
2779 	pt_entry_t *l3, old_l3;
2780 	vm_offset_t va;
2781 	vm_page_t l3pg, m;
2782 
2783 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2784 	KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
2785 	    ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
2786 	l3pg = sva < VM_MAXUSER_ADDRESS ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) :
2787 	    NULL;
2788 	va = eva;
2789 	for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
2790 		if (!pmap_l3_valid(pmap_load(l3))) {
2791 			if (va != eva) {
2792 				pmap_invalidate_range(pmap, va, sva);
2793 				va = eva;
2794 			}
2795 			continue;
2796 		}
2797 		old_l3 = pmap_load_clear(l3);
2798 		if ((old_l3 & ATTR_SW_WIRED) != 0)
2799 			pmap->pm_stats.wired_count--;
2800 		pmap_resident_count_dec(pmap, 1);
2801 		if ((old_l3 & ATTR_SW_MANAGED) != 0) {
2802 			m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2803 			if (pmap_pte_dirty(pmap, old_l3))
2804 				vm_page_dirty(m);
2805 			if ((old_l3 & ATTR_AF) != 0)
2806 				vm_page_aflag_set(m, PGA_REFERENCED);
2807 			new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m));
2808 			if (new_lock != *lockp) {
2809 				if (*lockp != NULL) {
2810 					/*
2811 					 * Pending TLB invalidations must be
2812 					 * performed before the PV list lock is
2813 					 * released.  Otherwise, a concurrent
2814 					 * pmap_remove_all() on a physical page
2815 					 * could return while a stale TLB entry
2816 					 * still provides access to that page.
2817 					 */
2818 					if (va != eva) {
2819 						pmap_invalidate_range(pmap, va,
2820 						    sva);
2821 						va = eva;
2822 					}
2823 					rw_wunlock(*lockp);
2824 				}
2825 				*lockp = new_lock;
2826 				rw_wlock(*lockp);
2827 			}
2828 			pmap_pvh_free(&m->md, pmap, sva);
2829 			if (TAILQ_EMPTY(&m->md.pv_list) &&
2830 			    (m->flags & PG_FICTITIOUS) == 0) {
2831 				pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2832 				if (TAILQ_EMPTY(&pvh->pv_list))
2833 					vm_page_aflag_clear(m, PGA_WRITEABLE);
2834 			}
2835 		}
2836 		if (va == eva)
2837 			va = sva;
2838 		if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) {
2839 			sva += L3_SIZE;
2840 			break;
2841 		}
2842 	}
2843 	if (va != eva)
2844 		pmap_invalidate_range(pmap, va, sva);
2845 }
2846 
2847 /*
2848  *	Remove the given range of addresses from the specified map.
2849  *
2850  *	It is assumed that the start and end are properly
2851  *	rounded to the page size.
2852  */
2853 void
2854 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2855 {
2856 	struct rwlock *lock;
2857 	vm_offset_t va_next;
2858 	pd_entry_t *l0, *l1, *l2;
2859 	pt_entry_t l3_paddr;
2860 	struct spglist free;
2861 
2862 	/*
2863 	 * Perform an unsynchronized read.  This is, however, safe.
2864 	 */
2865 	if (pmap->pm_stats.resident_count == 0)
2866 		return;
2867 
2868 	SLIST_INIT(&free);
2869 
2870 	PMAP_LOCK(pmap);
2871 
2872 	lock = NULL;
2873 	for (; sva < eva; sva = va_next) {
2874 
2875 		if (pmap->pm_stats.resident_count == 0)
2876 			break;
2877 
2878 		l0 = pmap_l0(pmap, sva);
2879 		if (pmap_load(l0) == 0) {
2880 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2881 			if (va_next < sva)
2882 				va_next = eva;
2883 			continue;
2884 		}
2885 
2886 		l1 = pmap_l0_to_l1(l0, sva);
2887 		if (pmap_load(l1) == 0) {
2888 			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2889 			if (va_next < sva)
2890 				va_next = eva;
2891 			continue;
2892 		}
2893 
2894 		/*
2895 		 * Calculate index for next page table.
2896 		 */
2897 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2898 		if (va_next < sva)
2899 			va_next = eva;
2900 
2901 		l2 = pmap_l1_to_l2(l1, sva);
2902 		if (l2 == NULL)
2903 			continue;
2904 
2905 		l3_paddr = pmap_load(l2);
2906 
2907 		if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
2908 			if (sva + L2_SIZE == va_next && eva >= va_next) {
2909 				pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
2910 				    &free, &lock);
2911 				continue;
2912 			} else if (pmap_demote_l2_locked(pmap, l2, sva,
2913 			    &lock) == NULL)
2914 				continue;
2915 			l3_paddr = pmap_load(l2);
2916 		}
2917 
2918 		/*
2919 		 * Weed out invalid mappings.
2920 		 */
2921 		if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2922 			continue;
2923 
2924 		/*
2925 		 * Limit our scan to either the end of the va represented
2926 		 * by the current page table page, or to the end of the
2927 		 * range being removed.
2928 		 */
2929 		if (va_next > eva)
2930 			va_next = eva;
2931 
2932 		pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
2933 		    &lock);
2934 	}
2935 	if (lock != NULL)
2936 		rw_wunlock(lock);
2937 	PMAP_UNLOCK(pmap);
2938 	vm_page_free_pages_toq(&free, true);
2939 }
2940 
2941 /*
2942  *	Routine:	pmap_remove_all
2943  *	Function:
2944  *		Removes this physical page from
2945  *		all physical maps in which it resides.
2946  *		Reflects back modify bits to the pager.
2947  *
2948  *	Notes:
2949  *		Original versions of this routine were very
2950  *		inefficient because they iteratively called
2951  *		pmap_remove (slow...)
2952  */
2953 
2954 void
2955 pmap_remove_all(vm_page_t m)
2956 {
2957 	struct md_page *pvh;
2958 	pv_entry_t pv;
2959 	pmap_t pmap;
2960 	struct rwlock *lock;
2961 	pd_entry_t *pde, tpde;
2962 	pt_entry_t *pte, tpte;
2963 	vm_offset_t va;
2964 	struct spglist free;
2965 	int lvl, pvh_gen, md_gen;
2966 
2967 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2968 	    ("pmap_remove_all: page %p is not managed", m));
2969 	SLIST_INIT(&free);
2970 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2971 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2972 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
2973 retry:
2974 	rw_wlock(lock);
2975 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2976 		pmap = PV_PMAP(pv);
2977 		if (!PMAP_TRYLOCK(pmap)) {
2978 			pvh_gen = pvh->pv_gen;
2979 			rw_wunlock(lock);
2980 			PMAP_LOCK(pmap);
2981 			rw_wlock(lock);
2982 			if (pvh_gen != pvh->pv_gen) {
2983 				rw_wunlock(lock);
2984 				PMAP_UNLOCK(pmap);
2985 				goto retry;
2986 			}
2987 		}
2988 		va = pv->pv_va;
2989 		pte = pmap_pte(pmap, va, &lvl);
2990 		KASSERT(pte != NULL,
2991 		    ("pmap_remove_all: no page table entry found"));
2992 		KASSERT(lvl == 2,
2993 		    ("pmap_remove_all: invalid pte level %d", lvl));
2994 
2995 		pmap_demote_l2_locked(pmap, pte, va, &lock);
2996 		PMAP_UNLOCK(pmap);
2997 	}
2998 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2999 		pmap = PV_PMAP(pv);
3000 		PMAP_ASSERT_STAGE1(pmap);
3001 		if (!PMAP_TRYLOCK(pmap)) {
3002 			pvh_gen = pvh->pv_gen;
3003 			md_gen = m->md.pv_gen;
3004 			rw_wunlock(lock);
3005 			PMAP_LOCK(pmap);
3006 			rw_wlock(lock);
3007 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3008 				rw_wunlock(lock);
3009 				PMAP_UNLOCK(pmap);
3010 				goto retry;
3011 			}
3012 		}
3013 		pmap_resident_count_dec(pmap, 1);
3014 
3015 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
3016 		KASSERT(pde != NULL,
3017 		    ("pmap_remove_all: no page directory entry found"));
3018 		KASSERT(lvl == 2,
3019 		    ("pmap_remove_all: invalid pde level %d", lvl));
3020 		tpde = pmap_load(pde);
3021 
3022 		pte = pmap_l2_to_l3(pde, pv->pv_va);
3023 		tpte = pmap_load_clear(pte);
3024 		if (tpte & ATTR_SW_WIRED)
3025 			pmap->pm_stats.wired_count--;
3026 		if ((tpte & ATTR_AF) != 0) {
3027 			pmap_invalidate_page(pmap, pv->pv_va);
3028 			vm_page_aflag_set(m, PGA_REFERENCED);
3029 		}
3030 
3031 		/*
3032 		 * Update the vm_page_t clean and reference bits.
3033 		 */
3034 		if (pmap_pte_dirty(pmap, tpte))
3035 			vm_page_dirty(m);
3036 		pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
3037 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3038 		m->md.pv_gen++;
3039 		free_pv_entry(pmap, pv);
3040 		PMAP_UNLOCK(pmap);
3041 	}
3042 	vm_page_aflag_clear(m, PGA_WRITEABLE);
3043 	rw_wunlock(lock);
3044 	vm_page_free_pages_toq(&free, true);
3045 }
3046 
3047 /*
3048  * pmap_protect_l2: do the things to protect a 2MB page in a pmap
3049  */
3050 static void
3051 pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
3052     pt_entry_t nbits)
3053 {
3054 	pd_entry_t old_l2;
3055 	vm_page_t m, mt;
3056 
3057 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3058 	PMAP_ASSERT_STAGE1(pmap);
3059 	KASSERT((sva & L2_OFFSET) == 0,
3060 	    ("pmap_protect_l2: sva is not 2mpage aligned"));
3061 	old_l2 = pmap_load(l2);
3062 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
3063 	    ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2));
3064 
3065 	/*
3066 	 * Return if the L2 entry already has the desired access restrictions
3067 	 * in place.
3068 	 */
3069 retry:
3070 	if ((old_l2 & mask) == nbits)
3071 		return;
3072 
3073 	/*
3074 	 * When a dirty read/write superpage mapping is write protected,
3075 	 * update the dirty field of each of the superpage's constituent 4KB
3076 	 * pages.
3077 	 */
3078 	if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
3079 	    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
3080 	    pmap_pte_dirty(pmap, old_l2)) {
3081 		m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
3082 		for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3083 			vm_page_dirty(mt);
3084 	}
3085 
3086 	if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
3087 		goto retry;
3088 
3089 	/*
3090 	 * Since a promotion must break the 4KB page mappings before making
3091 	 * the 2MB page mapping, a pmap_invalidate_page() suffices.
3092 	 */
3093 	pmap_invalidate_page(pmap, sva);
3094 }
3095 
3096 /*
3097  *	Set the physical protection on the
3098  *	specified range of this map as requested.
3099  */
3100 void
3101 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3102 {
3103 	vm_offset_t va, va_next;
3104 	pd_entry_t *l0, *l1, *l2;
3105 	pt_entry_t *l3p, l3, mask, nbits;
3106 
3107 	PMAP_ASSERT_STAGE1(pmap);
3108 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
3109 	if (prot == VM_PROT_NONE) {
3110 		pmap_remove(pmap, sva, eva);
3111 		return;
3112 	}
3113 
3114 	mask = nbits = 0;
3115 	if ((prot & VM_PROT_WRITE) == 0) {
3116 		mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM;
3117 		nbits |= ATTR_S1_AP(ATTR_S1_AP_RO);
3118 	}
3119 	if ((prot & VM_PROT_EXECUTE) == 0) {
3120 		mask |= ATTR_S1_XN;
3121 		nbits |= ATTR_S1_XN;
3122 	}
3123 	if (mask == 0)
3124 		return;
3125 
3126 	PMAP_LOCK(pmap);
3127 	for (; sva < eva; sva = va_next) {
3128 
3129 		l0 = pmap_l0(pmap, sva);
3130 		if (pmap_load(l0) == 0) {
3131 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
3132 			if (va_next < sva)
3133 				va_next = eva;
3134 			continue;
3135 		}
3136 
3137 		l1 = pmap_l0_to_l1(l0, sva);
3138 		if (pmap_load(l1) == 0) {
3139 			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3140 			if (va_next < sva)
3141 				va_next = eva;
3142 			continue;
3143 		}
3144 
3145 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3146 		if (va_next < sva)
3147 			va_next = eva;
3148 
3149 		l2 = pmap_l1_to_l2(l1, sva);
3150 		if (pmap_load(l2) == 0)
3151 			continue;
3152 
3153 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3154 			if (sva + L2_SIZE == va_next && eva >= va_next) {
3155 				pmap_protect_l2(pmap, l2, sva, mask, nbits);
3156 				continue;
3157 			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
3158 				continue;
3159 		}
3160 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3161 		    ("pmap_protect: Invalid L2 entry after demotion"));
3162 
3163 		if (va_next > eva)
3164 			va_next = eva;
3165 
3166 		va = va_next;
3167 		for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
3168 		    sva += L3_SIZE) {
3169 			l3 = pmap_load(l3p);
3170 retry:
3171 			/*
3172 			 * Go to the next L3 entry if the current one is
3173 			 * invalid or already has the desired access
3174 			 * restrictions in place.  (The latter case occurs
3175 			 * frequently.  For example, in a "buildworld"
3176 			 * workload, almost 1 out of 4 L3 entries already
3177 			 * have the desired restrictions.)
3178 			 */
3179 			if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) {
3180 				if (va != va_next) {
3181 					pmap_invalidate_range(pmap, va, sva);
3182 					va = va_next;
3183 				}
3184 				continue;
3185 			}
3186 
3187 			/*
3188 			 * When a dirty read/write mapping is write protected,
3189 			 * update the page's dirty field.
3190 			 */
3191 			if ((l3 & ATTR_SW_MANAGED) != 0 &&
3192 			    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
3193 			    pmap_pte_dirty(pmap, l3))
3194 				vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
3195 
3196 			if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
3197 				goto retry;
3198 			if (va == va_next)
3199 				va = sva;
3200 		}
3201 		if (va != va_next)
3202 			pmap_invalidate_range(pmap, va, sva);
3203 	}
3204 	PMAP_UNLOCK(pmap);
3205 }
3206 
3207 /*
3208  * Inserts the specified page table page into the specified pmap's collection
3209  * of idle page table pages.  Each of a pmap's page table pages is responsible
3210  * for mapping a distinct range of virtual addresses.  The pmap's collection is
3211  * ordered by this virtual address range.
3212  *
3213  * If "promoted" is false, then the page table page "mpte" must be zero filled.
3214  */
3215 static __inline int
3216 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
3217 {
3218 
3219 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3220 	mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
3221 	return (vm_radix_insert(&pmap->pm_root, mpte));
3222 }
3223 
3224 /*
3225  * Removes the page table page mapping the specified virtual address from the
3226  * specified pmap's collection of idle page table pages, and returns it.
3227  * Otherwise, returns NULL if there is no page table page corresponding to the
3228  * specified virtual address.
3229  */
3230 static __inline vm_page_t
3231 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
3232 {
3233 
3234 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3235 	return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
3236 }
3237 
3238 /*
3239  * Performs a break-before-make update of a pmap entry. This is needed when
3240  * either promoting or demoting pages to ensure the TLB doesn't get into an
3241  * inconsistent state.
3242  */
3243 static void
3244 pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
3245     vm_offset_t va, vm_size_t size)
3246 {
3247 	register_t intr;
3248 
3249 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3250 
3251 	/*
3252 	 * Ensure we don't get switched out with the page table in an
3253 	 * inconsistent state. We also need to ensure no interrupts fire
3254 	 * as they may make use of an address we are about to invalidate.
3255 	 */
3256 	intr = intr_disable();
3257 
3258 	/*
3259 	 * Clear the old mapping's valid bit, but leave the rest of the entry
3260 	 * unchanged, so that a lockless, concurrent pmap_kextract() can still
3261 	 * lookup the physical address.
3262 	 */
3263 	pmap_clear_bits(pte, ATTR_DESCR_VALID);
3264 	pmap_invalidate_range(pmap, va, va + size);
3265 
3266 	/* Create the new mapping */
3267 	pmap_store(pte, newpte);
3268 	dsb(ishst);
3269 
3270 	intr_restore(intr);
3271 }
3272 
3273 #if VM_NRESERVLEVEL > 0
3274 /*
3275  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
3276  * replace the many pv entries for the 4KB page mappings by a single pv entry
3277  * for the 2MB page mapping.
3278  */
3279 static void
3280 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3281     struct rwlock **lockp)
3282 {
3283 	struct md_page *pvh;
3284 	pv_entry_t pv;
3285 	vm_offset_t va_last;
3286 	vm_page_t m;
3287 
3288 	KASSERT((pa & L2_OFFSET) == 0,
3289 	    ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
3290 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3291 
3292 	/*
3293 	 * Transfer the first page's pv entry for this mapping to the 2mpage's
3294 	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
3295 	 * a transfer avoids the possibility that get_pv_entry() calls
3296 	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
3297 	 * mappings that is being promoted.
3298 	 */
3299 	m = PHYS_TO_VM_PAGE(pa);
3300 	va = va & ~L2_OFFSET;
3301 	pv = pmap_pvh_remove(&m->md, pmap, va);
3302 	KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
3303 	pvh = pa_to_pvh(pa);
3304 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3305 	pvh->pv_gen++;
3306 	/* Free the remaining NPTEPG - 1 pv entries. */
3307 	va_last = va + L2_SIZE - PAGE_SIZE;
3308 	do {
3309 		m++;
3310 		va += PAGE_SIZE;
3311 		pmap_pvh_free(&m->md, pmap, va);
3312 	} while (va < va_last);
3313 }
3314 
3315 /*
3316  * Tries to promote the 512, contiguous 4KB page mappings that are within a
3317  * single level 2 table entry to a single 2MB page mapping.  For promotion
3318  * to occur, two conditions must be met: (1) the 4KB page mappings must map
3319  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
3320  * identical characteristics.
3321  */
3322 static void
3323 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
3324     struct rwlock **lockp)
3325 {
3326 	pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
3327 	vm_page_t mpte;
3328 	vm_offset_t sva;
3329 
3330 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3331 	PMAP_ASSERT_STAGE1(pmap);
3332 
3333 	sva = va & ~L2_OFFSET;
3334 	firstl3 = pmap_l2_to_l3(l2, sva);
3335 	newl2 = pmap_load(firstl3);
3336 
3337 setl2:
3338 	if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) {
3339 		atomic_add_long(&pmap_l2_p_failures, 1);
3340 		CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3341 		    " in pmap %p", va, pmap);
3342 		return;
3343 	}
3344 
3345 	if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
3346 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
3347 		if (!atomic_fcmpset_64(l2, &newl2, newl2 & ~ATTR_SW_DBM))
3348 			goto setl2;
3349 		newl2 &= ~ATTR_SW_DBM;
3350 	}
3351 
3352 	pa = newl2 + L2_SIZE - PAGE_SIZE;
3353 	for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
3354 		oldl3 = pmap_load(l3);
3355 setl3:
3356 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
3357 		    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
3358 			if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
3359 			    ~ATTR_SW_DBM))
3360 				goto setl3;
3361 			oldl3 &= ~ATTR_SW_DBM;
3362 		}
3363 		if (oldl3 != pa) {
3364 			atomic_add_long(&pmap_l2_p_failures, 1);
3365 			CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3366 			    " in pmap %p", va, pmap);
3367 			return;
3368 		}
3369 		pa -= PAGE_SIZE;
3370 	}
3371 
3372 	/*
3373 	 * Save the page table page in its current state until the L2
3374 	 * mapping the superpage is demoted by pmap_demote_l2() or
3375 	 * destroyed by pmap_remove_l3().
3376 	 */
3377 	mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3378 	KASSERT(mpte >= vm_page_array &&
3379 	    mpte < &vm_page_array[vm_page_array_size],
3380 	    ("pmap_promote_l2: page table page is out of range"));
3381 	KASSERT(mpte->pindex == pmap_l2_pindex(va),
3382 	    ("pmap_promote_l2: page table page's pindex is wrong"));
3383 	if (pmap_insert_pt_page(pmap, mpte, true)) {
3384 		atomic_add_long(&pmap_l2_p_failures, 1);
3385 		CTR2(KTR_PMAP,
3386 		    "pmap_promote_l2: failure for va %#lx in pmap %p", va,
3387 		    pmap);
3388 		return;
3389 	}
3390 
3391 	if ((newl2 & ATTR_SW_MANAGED) != 0)
3392 		pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
3393 
3394 	newl2 &= ~ATTR_DESCR_MASK;
3395 	newl2 |= L2_BLOCK;
3396 
3397 	pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
3398 
3399 	atomic_add_long(&pmap_l2_promotions, 1);
3400 	CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
3401 		    pmap);
3402 }
3403 #endif /* VM_NRESERVLEVEL > 0 */
3404 
3405 /*
3406  *	Insert the given physical page (p) at
3407  *	the specified virtual address (v) in the
3408  *	target physical map with the protection requested.
3409  *
3410  *	If specified, the page will be wired down, meaning
3411  *	that the related pte can not be reclaimed.
3412  *
3413  *	NB:  This is the only routine which MAY NOT lazy-evaluate
3414  *	or lose information.  That is, this routine must actually
3415  *	insert this page into the given map NOW.
3416  */
3417 int
3418 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3419     u_int flags, int8_t psind)
3420 {
3421 	struct rwlock *lock;
3422 	pd_entry_t *pde;
3423 	pt_entry_t new_l3, orig_l3;
3424 	pt_entry_t *l2, *l3;
3425 	pv_entry_t pv;
3426 	vm_paddr_t opa, pa;
3427 	vm_page_t mpte, om;
3428 	boolean_t nosleep;
3429 	int lvl, rv;
3430 
3431 	va = trunc_page(va);
3432 	if ((m->oflags & VPO_UNMANAGED) == 0)
3433 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
3434 	pa = VM_PAGE_TO_PHYS(m);
3435 	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | L3_PAGE);
3436 	new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
3437 	new_l3 |= pmap_pte_prot(pmap, prot);
3438 
3439 	if ((flags & PMAP_ENTER_WIRED) != 0)
3440 		new_l3 |= ATTR_SW_WIRED;
3441 	if (pmap->pm_stage == PM_STAGE1) {
3442 		if (va < VM_MAXUSER_ADDRESS)
3443 			new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
3444 		else
3445 			new_l3 |= ATTR_S1_UXN;
3446 		if (pmap != kernel_pmap)
3447 			new_l3 |= ATTR_S1_nG;
3448 	} else {
3449 		/*
3450 		 * Clear the access flag on executable mappings, this will be
3451 		 * set later when the page is accessed. The fault handler is
3452 		 * required to invalidate the I-cache.
3453 		 *
3454 		 * TODO: Switch to the valid flag to allow hardware management
3455 		 * of the access flag. Much of the pmap code assumes the
3456 		 * valid flag is set and fails to destroy the old page tables
3457 		 * correctly if it is clear.
3458 		 */
3459 		if (prot & VM_PROT_EXECUTE)
3460 			new_l3 &= ~ATTR_AF;
3461 	}
3462 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3463 		new_l3 |= ATTR_SW_MANAGED;
3464 		if ((prot & VM_PROT_WRITE) != 0) {
3465 			new_l3 |= ATTR_SW_DBM;
3466 			if ((flags & VM_PROT_WRITE) == 0) {
3467 				PMAP_ASSERT_STAGE1(pmap);
3468 				new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
3469 			}
3470 		}
3471 	}
3472 
3473 	CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
3474 
3475 	lock = NULL;
3476 	PMAP_LOCK(pmap);
3477 	if (psind == 1) {
3478 		/* Assert the required virtual and physical alignment. */
3479 		KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
3480 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
3481 		rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
3482 		    flags, m, &lock);
3483 		goto out;
3484 	}
3485 	mpte = NULL;
3486 
3487 	/*
3488 	 * In the case that a page table page is not
3489 	 * resident, we are creating it here.
3490 	 */
3491 retry:
3492 	pde = pmap_pde(pmap, va, &lvl);
3493 	if (pde != NULL && lvl == 2) {
3494 		l3 = pmap_l2_to_l3(pde, va);
3495 		if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
3496 			mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3497 			mpte->ref_count++;
3498 		}
3499 		goto havel3;
3500 	} else if (pde != NULL && lvl == 1) {
3501 		l2 = pmap_l1_to_l2(pde, va);
3502 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
3503 		    (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
3504 			l3 = &l3[pmap_l3_index(va)];
3505 			if (va < VM_MAXUSER_ADDRESS) {
3506 				mpte = PHYS_TO_VM_PAGE(
3507 				    pmap_load(l2) & ~ATTR_MASK);
3508 				mpte->ref_count++;
3509 			}
3510 			goto havel3;
3511 		}
3512 		/* We need to allocate an L3 table. */
3513 	}
3514 	if (va < VM_MAXUSER_ADDRESS) {
3515 		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
3516 
3517 		/*
3518 		 * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
3519 		 * to handle the possibility that a superpage mapping for "va"
3520 		 * was created while we slept.
3521 		 */
3522 		mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
3523 		    nosleep ? NULL : &lock);
3524 		if (mpte == NULL && nosleep) {
3525 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
3526 			rv = KERN_RESOURCE_SHORTAGE;
3527 			goto out;
3528 		}
3529 		goto retry;
3530 	} else
3531 		panic("pmap_enter: missing L3 table for kernel va %#lx", va);
3532 
3533 havel3:
3534 	orig_l3 = pmap_load(l3);
3535 	opa = orig_l3 & ~ATTR_MASK;
3536 	pv = NULL;
3537 
3538 	/*
3539 	 * Is the specified virtual address already mapped?
3540 	 */
3541 	if (pmap_l3_valid(orig_l3)) {
3542 		/*
3543 		 * Only allow adding new entries on stage 2 tables for now.
3544 		 * This simplifies cache invalidation as we may need to call
3545 		 * into EL2 to perform such actions.
3546 		 */
3547 		PMAP_ASSERT_STAGE1(pmap);
3548 		/*
3549 		 * Wiring change, just update stats. We don't worry about
3550 		 * wiring PT pages as they remain resident as long as there
3551 		 * are valid mappings in them. Hence, if a user page is wired,
3552 		 * the PT page will be also.
3553 		 */
3554 		if ((flags & PMAP_ENTER_WIRED) != 0 &&
3555 		    (orig_l3 & ATTR_SW_WIRED) == 0)
3556 			pmap->pm_stats.wired_count++;
3557 		else if ((flags & PMAP_ENTER_WIRED) == 0 &&
3558 		    (orig_l3 & ATTR_SW_WIRED) != 0)
3559 			pmap->pm_stats.wired_count--;
3560 
3561 		/*
3562 		 * Remove the extra PT page reference.
3563 		 */
3564 		if (mpte != NULL) {
3565 			mpte->ref_count--;
3566 			KASSERT(mpte->ref_count > 0,
3567 			    ("pmap_enter: missing reference to page table page,"
3568 			     " va: 0x%lx", va));
3569 		}
3570 
3571 		/*
3572 		 * Has the physical page changed?
3573 		 */
3574 		if (opa == pa) {
3575 			/*
3576 			 * No, might be a protection or wiring change.
3577 			 */
3578 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3579 			    (new_l3 & ATTR_SW_DBM) != 0)
3580 				vm_page_aflag_set(m, PGA_WRITEABLE);
3581 			goto validate;
3582 		}
3583 
3584 		/*
3585 		 * The physical page has changed.  Temporarily invalidate
3586 		 * the mapping.
3587 		 */
3588 		orig_l3 = pmap_load_clear(l3);
3589 		KASSERT((orig_l3 & ~ATTR_MASK) == opa,
3590 		    ("pmap_enter: unexpected pa update for %#lx", va));
3591 		if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
3592 			om = PHYS_TO_VM_PAGE(opa);
3593 
3594 			/*
3595 			 * The pmap lock is sufficient to synchronize with
3596 			 * concurrent calls to pmap_page_test_mappings() and
3597 			 * pmap_ts_referenced().
3598 			 */
3599 			if (pmap_pte_dirty(pmap, orig_l3))
3600 				vm_page_dirty(om);
3601 			if ((orig_l3 & ATTR_AF) != 0) {
3602 				pmap_invalidate_page(pmap, va);
3603 				vm_page_aflag_set(om, PGA_REFERENCED);
3604 			}
3605 			CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3606 			pv = pmap_pvh_remove(&om->md, pmap, va);
3607 			if ((m->oflags & VPO_UNMANAGED) != 0)
3608 				free_pv_entry(pmap, pv);
3609 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
3610 			    TAILQ_EMPTY(&om->md.pv_list) &&
3611 			    ((om->flags & PG_FICTITIOUS) != 0 ||
3612 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3613 				vm_page_aflag_clear(om, PGA_WRITEABLE);
3614 		} else {
3615 			KASSERT((orig_l3 & ATTR_AF) != 0,
3616 			    ("pmap_enter: unmanaged mapping lacks ATTR_AF"));
3617 			pmap_invalidate_page(pmap, va);
3618 		}
3619 		orig_l3 = 0;
3620 	} else {
3621 		/*
3622 		 * Increment the counters.
3623 		 */
3624 		if ((new_l3 & ATTR_SW_WIRED) != 0)
3625 			pmap->pm_stats.wired_count++;
3626 		pmap_resident_count_inc(pmap, 1);
3627 	}
3628 	/*
3629 	 * Enter on the PV list if part of our managed memory.
3630 	 */
3631 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3632 		if (pv == NULL) {
3633 			pv = get_pv_entry(pmap, &lock);
3634 			pv->pv_va = va;
3635 		}
3636 		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3637 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3638 		m->md.pv_gen++;
3639 		if ((new_l3 & ATTR_SW_DBM) != 0)
3640 			vm_page_aflag_set(m, PGA_WRITEABLE);
3641 	}
3642 
3643 validate:
3644 	if (pmap->pm_stage == PM_STAGE1) {
3645 		/*
3646 		 * Sync icache if exec permission and attribute
3647 		 * VM_MEMATTR_WRITE_BACK is set. Do it now, before the mapping
3648 		 * is stored and made valid for hardware table walk. If done
3649 		 * later, then other can access this page before caches are
3650 		 * properly synced. Don't do it for kernel memory which is
3651 		 * mapped with exec permission even if the memory isn't going
3652 		 * to hold executable code. The only time when icache sync is
3653 		 * needed is after kernel module is loaded and the relocation
3654 		 * info is processed. And it's done in elf_cpu_load_file().
3655 		*/
3656 		if ((prot & VM_PROT_EXECUTE) &&  pmap != kernel_pmap &&
3657 		    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
3658 		    (opa != pa || (orig_l3 & ATTR_S1_XN))) {
3659 			PMAP_ASSERT_STAGE1(pmap);
3660 			cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3661 		}
3662 	} else {
3663 		cpu_dcache_wb_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3664 	}
3665 
3666 	/*
3667 	 * Update the L3 entry
3668 	 */
3669 	if (pmap_l3_valid(orig_l3)) {
3670 		PMAP_ASSERT_STAGE1(pmap);
3671 		KASSERT(opa == pa, ("pmap_enter: invalid update"));
3672 		if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
3673 			/* same PA, different attributes */
3674 			orig_l3 = pmap_load_store(l3, new_l3);
3675 			pmap_invalidate_page(pmap, va);
3676 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3677 			    pmap_pte_dirty(pmap, orig_l3))
3678 				vm_page_dirty(m);
3679 		} else {
3680 			/*
3681 			 * orig_l3 == new_l3
3682 			 * This can happens if multiple threads simultaneously
3683 			 * access not yet mapped page. This bad for performance
3684 			 * since this can cause full demotion-NOP-promotion
3685 			 * cycle.
3686 			 * Another possible reasons are:
3687 			 * - VM and pmap memory layout are diverged
3688 			 * - tlb flush is missing somewhere and CPU doesn't see
3689 			 *   actual mapping.
3690 			 */
3691 			CTR4(KTR_PMAP, "%s: already mapped page - "
3692 			    "pmap %p va 0x%#lx pte 0x%lx",
3693 			    __func__, pmap, va, new_l3);
3694 		}
3695 	} else {
3696 		/* New mapping */
3697 		pmap_store(l3, new_l3);
3698 		dsb(ishst);
3699 	}
3700 
3701 #if VM_NRESERVLEVEL > 0
3702 	/*
3703 	 * Try to promote from level 3 pages to a level 2 superpage. This
3704 	 * currently only works on stage 1 pmaps as pmap_promote_l2 looks at
3705 	 * stage 1 specific fields and performs a break-before-make sequence
3706 	 * that is incorrect a stage 2 pmap.
3707 	 */
3708 	if ((mpte == NULL || mpte->ref_count == NL3PG) &&
3709 	    pmap_ps_enabled(pmap) && pmap->pm_stage == PM_STAGE1 &&
3710 	    (m->flags & PG_FICTITIOUS) == 0 &&
3711 	    vm_reserv_level_iffullpop(m) == 0) {
3712 		pmap_promote_l2(pmap, pde, va, &lock);
3713 	}
3714 #endif
3715 
3716 	rv = KERN_SUCCESS;
3717 out:
3718 	if (lock != NULL)
3719 		rw_wunlock(lock);
3720 	PMAP_UNLOCK(pmap);
3721 	return (rv);
3722 }
3723 
3724 /*
3725  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
3726  * if successful.  Returns false if (1) a page table page cannot be allocated
3727  * without sleeping, (2) a mapping already exists at the specified virtual
3728  * address, or (3) a PV entry cannot be allocated without reclaiming another
3729  * PV entry.
3730  */
3731 static bool
3732 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3733     struct rwlock **lockp)
3734 {
3735 	pd_entry_t new_l2;
3736 
3737 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3738 	PMAP_ASSERT_STAGE1(pmap);
3739 
3740 	new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
3741 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
3742 	    L2_BLOCK);
3743 	if ((m->oflags & VPO_UNMANAGED) == 0) {
3744 		new_l2 |= ATTR_SW_MANAGED;
3745 		new_l2 &= ~ATTR_AF;
3746 	}
3747 	if ((prot & VM_PROT_EXECUTE) == 0 ||
3748 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
3749 		new_l2 |= ATTR_S1_XN;
3750 	if (va < VM_MAXUSER_ADDRESS)
3751 		new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
3752 	else
3753 		new_l2 |= ATTR_S1_UXN;
3754 	if (pmap != kernel_pmap)
3755 		new_l2 |= ATTR_S1_nG;
3756 	return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
3757 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3758 	    KERN_SUCCESS);
3759 }
3760 
3761 /*
3762  * Returns true if every page table entry in the specified page table is
3763  * zero.
3764  */
3765 static bool
3766 pmap_every_pte_zero(vm_paddr_t pa)
3767 {
3768 	pt_entry_t *pt_end, *pte;
3769 
3770 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
3771 	pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
3772 	for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) {
3773 		if (*pte != 0)
3774 			return (false);
3775 	}
3776 	return (true);
3777 }
3778 
3779 /*
3780  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
3781  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3782  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3783  * a mapping already exists at the specified virtual address.  Returns
3784  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3785  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
3786  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3787  *
3788  * The parameter "m" is only used when creating a managed, writeable mapping.
3789  */
3790 static int
3791 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
3792     vm_page_t m, struct rwlock **lockp)
3793 {
3794 	struct spglist free;
3795 	pd_entry_t *l2, old_l2;
3796 	vm_page_t l2pg, mt;
3797 
3798 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3799 
3800 	if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags &
3801 	    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
3802 		CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
3803 		    va, pmap);
3804 		return (KERN_RESOURCE_SHORTAGE);
3805 	}
3806 
3807 	/*
3808 	 * If there are existing mappings, either abort or remove them.
3809 	 */
3810 	if ((old_l2 = pmap_load(l2)) != 0) {
3811 		KASSERT(l2pg == NULL || l2pg->ref_count > 1,
3812 		    ("pmap_enter_l2: l2pg's ref count is too low"));
3813 		if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
3814 		    VM_MAXUSER_ADDRESS || (old_l2 & ATTR_DESCR_MASK) ==
3815 		    L2_BLOCK || !pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) {
3816 			if (l2pg != NULL)
3817 				l2pg->ref_count--;
3818 			CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx"
3819 			    " in pmap %p", va, pmap);
3820 			return (KERN_FAILURE);
3821 		}
3822 		SLIST_INIT(&free);
3823 		if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
3824 			(void)pmap_remove_l2(pmap, l2, va,
3825 			    pmap_load(pmap_l1(pmap, va)), &free, lockp);
3826 		else
3827 			pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
3828 			    &free, lockp);
3829 		if (va < VM_MAXUSER_ADDRESS) {
3830 			vm_page_free_pages_toq(&free, true);
3831 			KASSERT(pmap_load(l2) == 0,
3832 			    ("pmap_enter_l2: non-zero L2 entry %p", l2));
3833 		} else {
3834 			KASSERT(SLIST_EMPTY(&free),
3835 			    ("pmap_enter_l2: freed kernel page table page"));
3836 
3837 			/*
3838 			 * Both pmap_remove_l2() and pmap_remove_l3_range()
3839 			 * will leave the kernel page table page zero filled.
3840 			 * Nonetheless, the TLB could have an intermediate
3841 			 * entry for the kernel page table page.
3842 			 */
3843 			mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3844 			if (pmap_insert_pt_page(pmap, mt, false))
3845 				panic("pmap_enter_l2: trie insert failed");
3846 			pmap_clear(l2);
3847 			pmap_invalidate_page(pmap, va);
3848 		}
3849 	}
3850 
3851 	if ((new_l2 & ATTR_SW_MANAGED) != 0) {
3852 		/*
3853 		 * Abort this mapping if its PV entry could not be created.
3854 		 */
3855 		if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
3856 			if (l2pg != NULL)
3857 				pmap_abort_ptp(pmap, va, l2pg);
3858 			CTR2(KTR_PMAP,
3859 			    "pmap_enter_l2: failure for va %#lx in pmap %p",
3860 			    va, pmap);
3861 			return (KERN_RESOURCE_SHORTAGE);
3862 		}
3863 		if ((new_l2 & ATTR_SW_DBM) != 0)
3864 			for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3865 				vm_page_aflag_set(mt, PGA_WRITEABLE);
3866 	}
3867 
3868 	/*
3869 	 * Increment counters.
3870 	 */
3871 	if ((new_l2 & ATTR_SW_WIRED) != 0)
3872 		pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
3873 	pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
3874 
3875 	/*
3876 	 * Map the superpage.
3877 	 */
3878 	pmap_store(l2, new_l2);
3879 	dsb(ishst);
3880 
3881 	atomic_add_long(&pmap_l2_mappings, 1);
3882 	CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
3883 	    va, pmap);
3884 
3885 	return (KERN_SUCCESS);
3886 }
3887 
3888 /*
3889  * Maps a sequence of resident pages belonging to the same object.
3890  * The sequence begins with the given page m_start.  This page is
3891  * mapped at the given virtual address start.  Each subsequent page is
3892  * mapped at a virtual address that is offset from start by the same
3893  * amount as the page is offset from m_start within the object.  The
3894  * last page in the sequence is the page with the largest offset from
3895  * m_start that can be mapped at a virtual address less than the given
3896  * virtual address end.  Not every virtual page between start and end
3897  * is mapped; only those for which a resident page exists with the
3898  * corresponding offset from m_start are mapped.
3899  */
3900 void
3901 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3902     vm_page_t m_start, vm_prot_t prot)
3903 {
3904 	struct rwlock *lock;
3905 	vm_offset_t va;
3906 	vm_page_t m, mpte;
3907 	vm_pindex_t diff, psize;
3908 
3909 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
3910 
3911 	psize = atop(end - start);
3912 	mpte = NULL;
3913 	m = m_start;
3914 	lock = NULL;
3915 	PMAP_LOCK(pmap);
3916 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3917 		va = start + ptoa(diff);
3918 		if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
3919 		    m->psind == 1 && pmap_ps_enabled(pmap) &&
3920 		    pmap_enter_2mpage(pmap, va, m, prot, &lock))
3921 			m = &m[L2_SIZE / PAGE_SIZE - 1];
3922 		else
3923 			mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
3924 			    &lock);
3925 		m = TAILQ_NEXT(m, listq);
3926 	}
3927 	if (lock != NULL)
3928 		rw_wunlock(lock);
3929 	PMAP_UNLOCK(pmap);
3930 }
3931 
3932 /*
3933  * this code makes some *MAJOR* assumptions:
3934  * 1. Current pmap & pmap exists.
3935  * 2. Not wired.
3936  * 3. Read access.
3937  * 4. No page table pages.
3938  * but is *MUCH* faster than pmap_enter...
3939  */
3940 
3941 void
3942 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3943 {
3944 	struct rwlock *lock;
3945 
3946 	lock = NULL;
3947 	PMAP_LOCK(pmap);
3948 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3949 	if (lock != NULL)
3950 		rw_wunlock(lock);
3951 	PMAP_UNLOCK(pmap);
3952 }
3953 
3954 static vm_page_t
3955 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3956     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3957 {
3958 	pd_entry_t *pde;
3959 	pt_entry_t *l2, *l3, l3_val;
3960 	vm_paddr_t pa;
3961 	int lvl;
3962 
3963 	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3964 	    (m->oflags & VPO_UNMANAGED) != 0,
3965 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3966 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3967 	PMAP_ASSERT_STAGE1(pmap);
3968 
3969 	CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3970 	/*
3971 	 * In the case that a page table page is not
3972 	 * resident, we are creating it here.
3973 	 */
3974 	if (va < VM_MAXUSER_ADDRESS) {
3975 		vm_pindex_t l2pindex;
3976 
3977 		/*
3978 		 * Calculate pagetable page index
3979 		 */
3980 		l2pindex = pmap_l2_pindex(va);
3981 		if (mpte && (mpte->pindex == l2pindex)) {
3982 			mpte->ref_count++;
3983 		} else {
3984 			/*
3985 			 * Get the l2 entry
3986 			 */
3987 			pde = pmap_pde(pmap, va, &lvl);
3988 
3989 			/*
3990 			 * If the page table page is mapped, we just increment
3991 			 * the hold count, and activate it.  Otherwise, we
3992 			 * attempt to allocate a page table page.  If this
3993 			 * attempt fails, we don't retry.  Instead, we give up.
3994 			 */
3995 			if (lvl == 1) {
3996 				l2 = pmap_l1_to_l2(pde, va);
3997 				if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
3998 				    L2_BLOCK)
3999 					return (NULL);
4000 			}
4001 			if (lvl == 2 && pmap_load(pde) != 0) {
4002 				mpte =
4003 				    PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
4004 				mpte->ref_count++;
4005 			} else {
4006 				/*
4007 				 * Pass NULL instead of the PV list lock
4008 				 * pointer, because we don't intend to sleep.
4009 				 */
4010 				mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
4011 				if (mpte == NULL)
4012 					return (mpte);
4013 			}
4014 		}
4015 		l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
4016 		l3 = &l3[pmap_l3_index(va)];
4017 	} else {
4018 		mpte = NULL;
4019 		pde = pmap_pde(kernel_pmap, va, &lvl);
4020 		KASSERT(pde != NULL,
4021 		    ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
4022 		     va));
4023 		KASSERT(lvl == 2,
4024 		    ("pmap_enter_quick_locked: Invalid level %d", lvl));
4025 		l3 = pmap_l2_to_l3(pde, va);
4026 	}
4027 
4028 	/*
4029 	 * Abort if a mapping already exists.
4030 	 */
4031 	if (pmap_load(l3) != 0) {
4032 		if (mpte != NULL)
4033 			mpte->ref_count--;
4034 		return (NULL);
4035 	}
4036 
4037 	/*
4038 	 * Enter on the PV list if part of our managed memory.
4039 	 */
4040 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
4041 	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
4042 		if (mpte != NULL)
4043 			pmap_abort_ptp(pmap, va, mpte);
4044 		return (NULL);
4045 	}
4046 
4047 	/*
4048 	 * Increment counters
4049 	 */
4050 	pmap_resident_count_inc(pmap, 1);
4051 
4052 	pa = VM_PAGE_TO_PHYS(m);
4053 	l3_val = pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
4054 	    ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
4055 	if ((prot & VM_PROT_EXECUTE) == 0 ||
4056 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
4057 		l3_val |= ATTR_S1_XN;
4058 	if (va < VM_MAXUSER_ADDRESS)
4059 		l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
4060 	else
4061 		l3_val |= ATTR_S1_UXN;
4062 	if (pmap != kernel_pmap)
4063 		l3_val |= ATTR_S1_nG;
4064 
4065 	/*
4066 	 * Now validate mapping with RO protection
4067 	 */
4068 	if ((m->oflags & VPO_UNMANAGED) == 0) {
4069 		l3_val |= ATTR_SW_MANAGED;
4070 		l3_val &= ~ATTR_AF;
4071 	}
4072 
4073 	/* Sync icache before the mapping is stored to PTE */
4074 	if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
4075 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
4076 		cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
4077 
4078 	pmap_store(l3, l3_val);
4079 	dsb(ishst);
4080 
4081 	return (mpte);
4082 }
4083 
4084 /*
4085  * This code maps large physical mmap regions into the
4086  * processor address space.  Note that some shortcuts
4087  * are taken, but the code works.
4088  */
4089 void
4090 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
4091     vm_pindex_t pindex, vm_size_t size)
4092 {
4093 
4094 	VM_OBJECT_ASSERT_WLOCKED(object);
4095 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4096 	    ("pmap_object_init_pt: non-device object"));
4097 }
4098 
4099 /*
4100  *	Clear the wired attribute from the mappings for the specified range of
4101  *	addresses in the given pmap.  Every valid mapping within that range
4102  *	must have the wired attribute set.  In contrast, invalid mappings
4103  *	cannot have the wired attribute set, so they are ignored.
4104  *
4105  *	The wired attribute of the page table entry is not a hardware feature,
4106  *	so there is no need to invalidate any TLB entries.
4107  */
4108 void
4109 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4110 {
4111 	vm_offset_t va_next;
4112 	pd_entry_t *l0, *l1, *l2;
4113 	pt_entry_t *l3;
4114 
4115 	PMAP_LOCK(pmap);
4116 	for (; sva < eva; sva = va_next) {
4117 		l0 = pmap_l0(pmap, sva);
4118 		if (pmap_load(l0) == 0) {
4119 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
4120 			if (va_next < sva)
4121 				va_next = eva;
4122 			continue;
4123 		}
4124 
4125 		l1 = pmap_l0_to_l1(l0, sva);
4126 		if (pmap_load(l1) == 0) {
4127 			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
4128 			if (va_next < sva)
4129 				va_next = eva;
4130 			continue;
4131 		}
4132 
4133 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
4134 		if (va_next < sva)
4135 			va_next = eva;
4136 
4137 		l2 = pmap_l1_to_l2(l1, sva);
4138 		if (pmap_load(l2) == 0)
4139 			continue;
4140 
4141 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
4142 			if ((pmap_load(l2) & ATTR_SW_WIRED) == 0)
4143 				panic("pmap_unwire: l2 %#jx is missing "
4144 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2));
4145 
4146 			/*
4147 			 * Are we unwiring the entire large page?  If not,
4148 			 * demote the mapping and fall through.
4149 			 */
4150 			if (sva + L2_SIZE == va_next && eva >= va_next) {
4151 				pmap_clear_bits(l2, ATTR_SW_WIRED);
4152 				pmap->pm_stats.wired_count -= L2_SIZE /
4153 				    PAGE_SIZE;
4154 				continue;
4155 			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
4156 				panic("pmap_unwire: demotion failed");
4157 		}
4158 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
4159 		    ("pmap_unwire: Invalid l2 entry after demotion"));
4160 
4161 		if (va_next > eva)
4162 			va_next = eva;
4163 		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
4164 		    sva += L3_SIZE) {
4165 			if (pmap_load(l3) == 0)
4166 				continue;
4167 			if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
4168 				panic("pmap_unwire: l3 %#jx is missing "
4169 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
4170 
4171 			/*
4172 			 * ATTR_SW_WIRED must be cleared atomically.  Although
4173 			 * the pmap lock synchronizes access to ATTR_SW_WIRED,
4174 			 * the System MMU may write to the entry concurrently.
4175 			 */
4176 			pmap_clear_bits(l3, ATTR_SW_WIRED);
4177 			pmap->pm_stats.wired_count--;
4178 		}
4179 	}
4180 	PMAP_UNLOCK(pmap);
4181 }
4182 
4183 /*
4184  *	Copy the range specified by src_addr/len
4185  *	from the source map to the range dst_addr/len
4186  *	in the destination map.
4187  *
4188  *	This routine is only advisory and need not do anything.
4189  *
4190  *	Because the executable mappings created by this routine are copied,
4191  *	it should not have to flush the instruction cache.
4192  */
4193 void
4194 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
4195     vm_offset_t src_addr)
4196 {
4197 	struct rwlock *lock;
4198 	pd_entry_t *l0, *l1, *l2, srcptepaddr;
4199 	pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
4200 	vm_offset_t addr, end_addr, va_next;
4201 	vm_page_t dst_l2pg, dstmpte, srcmpte;
4202 
4203 	PMAP_ASSERT_STAGE1(dst_pmap);
4204 	PMAP_ASSERT_STAGE1(src_pmap);
4205 
4206 	if (dst_addr != src_addr)
4207 		return;
4208 	end_addr = src_addr + len;
4209 	lock = NULL;
4210 	if (dst_pmap < src_pmap) {
4211 		PMAP_LOCK(dst_pmap);
4212 		PMAP_LOCK(src_pmap);
4213 	} else {
4214 		PMAP_LOCK(src_pmap);
4215 		PMAP_LOCK(dst_pmap);
4216 	}
4217 	for (addr = src_addr; addr < end_addr; addr = va_next) {
4218 		l0 = pmap_l0(src_pmap, addr);
4219 		if (pmap_load(l0) == 0) {
4220 			va_next = (addr + L0_SIZE) & ~L0_OFFSET;
4221 			if (va_next < addr)
4222 				va_next = end_addr;
4223 			continue;
4224 		}
4225 		l1 = pmap_l0_to_l1(l0, addr);
4226 		if (pmap_load(l1) == 0) {
4227 			va_next = (addr + L1_SIZE) & ~L1_OFFSET;
4228 			if (va_next < addr)
4229 				va_next = end_addr;
4230 			continue;
4231 		}
4232 		va_next = (addr + L2_SIZE) & ~L2_OFFSET;
4233 		if (va_next < addr)
4234 			va_next = end_addr;
4235 		l2 = pmap_l1_to_l2(l1, addr);
4236 		srcptepaddr = pmap_load(l2);
4237 		if (srcptepaddr == 0)
4238 			continue;
4239 		if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
4240 			if ((addr & L2_OFFSET) != 0 ||
4241 			    addr + L2_SIZE > end_addr)
4242 				continue;
4243 			l2 = pmap_alloc_l2(dst_pmap, addr, &dst_l2pg, NULL);
4244 			if (l2 == NULL)
4245 				break;
4246 			if (pmap_load(l2) == 0 &&
4247 			    ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
4248 			    pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
4249 			    PMAP_ENTER_NORECLAIM, &lock))) {
4250 				mask = ATTR_AF | ATTR_SW_WIRED;
4251 				nbits = 0;
4252 				if ((srcptepaddr & ATTR_SW_DBM) != 0)
4253 					nbits |= ATTR_S1_AP_RW_BIT;
4254 				pmap_store(l2, (srcptepaddr & ~mask) | nbits);
4255 				pmap_resident_count_inc(dst_pmap, L2_SIZE /
4256 				    PAGE_SIZE);
4257 				atomic_add_long(&pmap_l2_mappings, 1);
4258 			} else
4259 				pmap_abort_ptp(dst_pmap, addr, dst_l2pg);
4260 			continue;
4261 		}
4262 		KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
4263 		    ("pmap_copy: invalid L2 entry"));
4264 		srcptepaddr &= ~ATTR_MASK;
4265 		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
4266 		KASSERT(srcmpte->ref_count > 0,
4267 		    ("pmap_copy: source page table page is unused"));
4268 		if (va_next > end_addr)
4269 			va_next = end_addr;
4270 		src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
4271 		src_pte = &src_pte[pmap_l3_index(addr)];
4272 		dstmpte = NULL;
4273 		for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
4274 			ptetemp = pmap_load(src_pte);
4275 
4276 			/*
4277 			 * We only virtual copy managed pages.
4278 			 */
4279 			if ((ptetemp & ATTR_SW_MANAGED) == 0)
4280 				continue;
4281 
4282 			if (dstmpte != NULL) {
4283 				KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
4284 				    ("dstmpte pindex/addr mismatch"));
4285 				dstmpte->ref_count++;
4286 			} else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
4287 			    NULL)) == NULL)
4288 				goto out;
4289 			dst_pte = (pt_entry_t *)
4290 			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
4291 			dst_pte = &dst_pte[pmap_l3_index(addr)];
4292 			if (pmap_load(dst_pte) == 0 &&
4293 			    pmap_try_insert_pv_entry(dst_pmap, addr,
4294 			    PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) {
4295 				/*
4296 				 * Clear the wired, modified, and accessed
4297 				 * (referenced) bits during the copy.
4298 				 */
4299 				mask = ATTR_AF | ATTR_SW_WIRED;
4300 				nbits = 0;
4301 				if ((ptetemp & ATTR_SW_DBM) != 0)
4302 					nbits |= ATTR_S1_AP_RW_BIT;
4303 				pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
4304 				pmap_resident_count_inc(dst_pmap, 1);
4305 			} else {
4306 				pmap_abort_ptp(dst_pmap, addr, dstmpte);
4307 				goto out;
4308 			}
4309 			/* Have we copied all of the valid mappings? */
4310 			if (dstmpte->ref_count >= srcmpte->ref_count)
4311 				break;
4312 		}
4313 	}
4314 out:
4315 	/*
4316 	 * XXX This barrier may not be needed because the destination pmap is
4317 	 * not active.
4318 	 */
4319 	dsb(ishst);
4320 
4321 	if (lock != NULL)
4322 		rw_wunlock(lock);
4323 	PMAP_UNLOCK(src_pmap);
4324 	PMAP_UNLOCK(dst_pmap);
4325 }
4326 
4327 /*
4328  *	pmap_zero_page zeros the specified hardware page by mapping
4329  *	the page into KVM and using bzero to clear its contents.
4330  */
4331 void
4332 pmap_zero_page(vm_page_t m)
4333 {
4334 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4335 
4336 	pagezero((void *)va);
4337 }
4338 
4339 /*
4340  *	pmap_zero_page_area zeros the specified hardware page by mapping
4341  *	the page into KVM and using bzero to clear its contents.
4342  *
4343  *	off and size may not cover an area beyond a single hardware page.
4344  */
4345 void
4346 pmap_zero_page_area(vm_page_t m, int off, int size)
4347 {
4348 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4349 
4350 	if (off == 0 && size == PAGE_SIZE)
4351 		pagezero((void *)va);
4352 	else
4353 		bzero((char *)va + off, size);
4354 }
4355 
4356 /*
4357  *	pmap_copy_page copies the specified (machine independent)
4358  *	page by mapping the page into virtual memory and using
4359  *	bcopy to copy the page, one machine dependent page at a
4360  *	time.
4361  */
4362 void
4363 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
4364 {
4365 	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
4366 	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
4367 
4368 	pagecopy((void *)src, (void *)dst);
4369 }
4370 
4371 int unmapped_buf_allowed = 1;
4372 
4373 void
4374 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
4375     vm_offset_t b_offset, int xfersize)
4376 {
4377 	void *a_cp, *b_cp;
4378 	vm_page_t m_a, m_b;
4379 	vm_paddr_t p_a, p_b;
4380 	vm_offset_t a_pg_offset, b_pg_offset;
4381 	int cnt;
4382 
4383 	while (xfersize > 0) {
4384 		a_pg_offset = a_offset & PAGE_MASK;
4385 		m_a = ma[a_offset >> PAGE_SHIFT];
4386 		p_a = m_a->phys_addr;
4387 		b_pg_offset = b_offset & PAGE_MASK;
4388 		m_b = mb[b_offset >> PAGE_SHIFT];
4389 		p_b = m_b->phys_addr;
4390 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4391 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4392 		if (__predict_false(!PHYS_IN_DMAP(p_a))) {
4393 			panic("!DMAP a %lx", p_a);
4394 		} else {
4395 			a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
4396 		}
4397 		if (__predict_false(!PHYS_IN_DMAP(p_b))) {
4398 			panic("!DMAP b %lx", p_b);
4399 		} else {
4400 			b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
4401 		}
4402 		bcopy(a_cp, b_cp, cnt);
4403 		a_offset += cnt;
4404 		b_offset += cnt;
4405 		xfersize -= cnt;
4406 	}
4407 }
4408 
4409 vm_offset_t
4410 pmap_quick_enter_page(vm_page_t m)
4411 {
4412 
4413 	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
4414 }
4415 
4416 void
4417 pmap_quick_remove_page(vm_offset_t addr)
4418 {
4419 }
4420 
4421 /*
4422  * Returns true if the pmap's pv is one of the first
4423  * 16 pvs linked to from this page.  This count may
4424  * be changed upwards or downwards in the future; it
4425  * is only necessary that true be returned for a small
4426  * subset of pmaps for proper page aging.
4427  */
4428 boolean_t
4429 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4430 {
4431 	struct md_page *pvh;
4432 	struct rwlock *lock;
4433 	pv_entry_t pv;
4434 	int loops = 0;
4435 	boolean_t rv;
4436 
4437 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4438 	    ("pmap_page_exists_quick: page %p is not managed", m));
4439 	rv = FALSE;
4440 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4441 	rw_rlock(lock);
4442 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4443 		if (PV_PMAP(pv) == pmap) {
4444 			rv = TRUE;
4445 			break;
4446 		}
4447 		loops++;
4448 		if (loops >= 16)
4449 			break;
4450 	}
4451 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4452 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4453 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4454 			if (PV_PMAP(pv) == pmap) {
4455 				rv = TRUE;
4456 				break;
4457 			}
4458 			loops++;
4459 			if (loops >= 16)
4460 				break;
4461 		}
4462 	}
4463 	rw_runlock(lock);
4464 	return (rv);
4465 }
4466 
4467 /*
4468  *	pmap_page_wired_mappings:
4469  *
4470  *	Return the number of managed mappings to the given physical page
4471  *	that are wired.
4472  */
4473 int
4474 pmap_page_wired_mappings(vm_page_t m)
4475 {
4476 	struct rwlock *lock;
4477 	struct md_page *pvh;
4478 	pmap_t pmap;
4479 	pt_entry_t *pte;
4480 	pv_entry_t pv;
4481 	int count, lvl, md_gen, pvh_gen;
4482 
4483 	if ((m->oflags & VPO_UNMANAGED) != 0)
4484 		return (0);
4485 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4486 	rw_rlock(lock);
4487 restart:
4488 	count = 0;
4489 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4490 		pmap = PV_PMAP(pv);
4491 		if (!PMAP_TRYLOCK(pmap)) {
4492 			md_gen = m->md.pv_gen;
4493 			rw_runlock(lock);
4494 			PMAP_LOCK(pmap);
4495 			rw_rlock(lock);
4496 			if (md_gen != m->md.pv_gen) {
4497 				PMAP_UNLOCK(pmap);
4498 				goto restart;
4499 			}
4500 		}
4501 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4502 		if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4503 			count++;
4504 		PMAP_UNLOCK(pmap);
4505 	}
4506 	if ((m->flags & PG_FICTITIOUS) == 0) {
4507 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4508 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4509 			pmap = PV_PMAP(pv);
4510 			if (!PMAP_TRYLOCK(pmap)) {
4511 				md_gen = m->md.pv_gen;
4512 				pvh_gen = pvh->pv_gen;
4513 				rw_runlock(lock);
4514 				PMAP_LOCK(pmap);
4515 				rw_rlock(lock);
4516 				if (md_gen != m->md.pv_gen ||
4517 				    pvh_gen != pvh->pv_gen) {
4518 					PMAP_UNLOCK(pmap);
4519 					goto restart;
4520 				}
4521 			}
4522 			pte = pmap_pte(pmap, pv->pv_va, &lvl);
4523 			if (pte != NULL &&
4524 			    (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4525 				count++;
4526 			PMAP_UNLOCK(pmap);
4527 		}
4528 	}
4529 	rw_runlock(lock);
4530 	return (count);
4531 }
4532 
4533 /*
4534  * Returns true if the given page is mapped individually or as part of
4535  * a 2mpage.  Otherwise, returns false.
4536  */
4537 bool
4538 pmap_page_is_mapped(vm_page_t m)
4539 {
4540 	struct rwlock *lock;
4541 	bool rv;
4542 
4543 	if ((m->oflags & VPO_UNMANAGED) != 0)
4544 		return (false);
4545 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4546 	rw_rlock(lock);
4547 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
4548 	    ((m->flags & PG_FICTITIOUS) == 0 &&
4549 	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
4550 	rw_runlock(lock);
4551 	return (rv);
4552 }
4553 
4554 /*
4555  * Destroy all managed, non-wired mappings in the given user-space
4556  * pmap.  This pmap cannot be active on any processor besides the
4557  * caller.
4558  *
4559  * This function cannot be applied to the kernel pmap.  Moreover, it
4560  * is not intended for general use.  It is only to be used during
4561  * process termination.  Consequently, it can be implemented in ways
4562  * that make it faster than pmap_remove().  First, it can more quickly
4563  * destroy mappings by iterating over the pmap's collection of PV
4564  * entries, rather than searching the page table.  Second, it doesn't
4565  * have to test and clear the page table entries atomically, because
4566  * no processor is currently accessing the user address space.  In
4567  * particular, a page table entry's dirty bit won't change state once
4568  * this function starts.
4569  */
4570 void
4571 pmap_remove_pages(pmap_t pmap)
4572 {
4573 	pd_entry_t *pde;
4574 	pt_entry_t *pte, tpte;
4575 	struct spglist free;
4576 	vm_page_t m, ml3, mt;
4577 	pv_entry_t pv;
4578 	struct md_page *pvh;
4579 	struct pv_chunk *pc, *npc;
4580 	struct rwlock *lock;
4581 	int64_t bit;
4582 	uint64_t inuse, bitmask;
4583 	int allfree, field, freed, idx, lvl;
4584 	vm_paddr_t pa;
4585 
4586 	KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
4587 
4588 	lock = NULL;
4589 
4590 	SLIST_INIT(&free);
4591 	PMAP_LOCK(pmap);
4592 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4593 		allfree = 1;
4594 		freed = 0;
4595 		for (field = 0; field < _NPCM; field++) {
4596 			inuse = ~pc->pc_map[field] & pc_freemask[field];
4597 			while (inuse != 0) {
4598 				bit = ffsl(inuse) - 1;
4599 				bitmask = 1UL << bit;
4600 				idx = field * 64 + bit;
4601 				pv = &pc->pc_pventry[idx];
4602 				inuse &= ~bitmask;
4603 
4604 				pde = pmap_pde(pmap, pv->pv_va, &lvl);
4605 				KASSERT(pde != NULL,
4606 				    ("Attempting to remove an unmapped page"));
4607 
4608 				switch(lvl) {
4609 				case 1:
4610 					pte = pmap_l1_to_l2(pde, pv->pv_va);
4611 					tpte = pmap_load(pte);
4612 					KASSERT((tpte & ATTR_DESCR_MASK) ==
4613 					    L2_BLOCK,
4614 					    ("Attempting to remove an invalid "
4615 					    "block: %lx", tpte));
4616 					break;
4617 				case 2:
4618 					pte = pmap_l2_to_l3(pde, pv->pv_va);
4619 					tpte = pmap_load(pte);
4620 					KASSERT((tpte & ATTR_DESCR_MASK) ==
4621 					    L3_PAGE,
4622 					    ("Attempting to remove an invalid "
4623 					     "page: %lx", tpte));
4624 					break;
4625 				default:
4626 					panic(
4627 					    "Invalid page directory level: %d",
4628 					    lvl);
4629 				}
4630 
4631 /*
4632  * We cannot remove wired pages from a process' mapping at this time
4633  */
4634 				if (tpte & ATTR_SW_WIRED) {
4635 					allfree = 0;
4636 					continue;
4637 				}
4638 
4639 				pa = tpte & ~ATTR_MASK;
4640 
4641 				m = PHYS_TO_VM_PAGE(pa);
4642 				KASSERT(m->phys_addr == pa,
4643 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4644 				    m, (uintmax_t)m->phys_addr,
4645 				    (uintmax_t)tpte));
4646 
4647 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4648 				    m < &vm_page_array[vm_page_array_size],
4649 				    ("pmap_remove_pages: bad pte %#jx",
4650 				    (uintmax_t)tpte));
4651 
4652 				/*
4653 				 * Because this pmap is not active on other
4654 				 * processors, the dirty bit cannot have
4655 				 * changed state since we last loaded pte.
4656 				 */
4657 				pmap_clear(pte);
4658 
4659 				/*
4660 				 * Update the vm_page_t clean/reference bits.
4661 				 */
4662 				if (pmap_pte_dirty(pmap, tpte)) {
4663 					switch (lvl) {
4664 					case 1:
4665 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4666 							vm_page_dirty(mt);
4667 						break;
4668 					case 2:
4669 						vm_page_dirty(m);
4670 						break;
4671 					}
4672 				}
4673 
4674 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
4675 
4676 				/* Mark free */
4677 				pc->pc_map[field] |= bitmask;
4678 				switch (lvl) {
4679 				case 1:
4680 					pmap_resident_count_dec(pmap,
4681 					    L2_SIZE / PAGE_SIZE);
4682 					pvh = pa_to_pvh(tpte & ~ATTR_MASK);
4683 					TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
4684 					pvh->pv_gen++;
4685 					if (TAILQ_EMPTY(&pvh->pv_list)) {
4686 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4687 							if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
4688 							    TAILQ_EMPTY(&mt->md.pv_list))
4689 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
4690 					}
4691 					ml3 = pmap_remove_pt_page(pmap,
4692 					    pv->pv_va);
4693 					if (ml3 != NULL) {
4694 						KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
4695 						    ("pmap_remove_pages: l3 page not promoted"));
4696 						pmap_resident_count_dec(pmap,1);
4697 						KASSERT(ml3->ref_count == NL3PG,
4698 						    ("pmap_remove_pages: l3 page ref count error"));
4699 						ml3->ref_count = 0;
4700 						pmap_add_delayed_free_list(ml3,
4701 						    &free, FALSE);
4702 					}
4703 					break;
4704 				case 2:
4705 					pmap_resident_count_dec(pmap, 1);
4706 					TAILQ_REMOVE(&m->md.pv_list, pv,
4707 					    pv_next);
4708 					m->md.pv_gen++;
4709 					if ((m->a.flags & PGA_WRITEABLE) != 0 &&
4710 					    TAILQ_EMPTY(&m->md.pv_list) &&
4711 					    (m->flags & PG_FICTITIOUS) == 0) {
4712 						pvh = pa_to_pvh(
4713 						    VM_PAGE_TO_PHYS(m));
4714 						if (TAILQ_EMPTY(&pvh->pv_list))
4715 							vm_page_aflag_clear(m,
4716 							    PGA_WRITEABLE);
4717 					}
4718 					break;
4719 				}
4720 				pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
4721 				    &free);
4722 				freed++;
4723 			}
4724 		}
4725 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
4726 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
4727 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
4728 		if (allfree) {
4729 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4730 			free_pv_chunk(pc);
4731 		}
4732 	}
4733 	if (lock != NULL)
4734 		rw_wunlock(lock);
4735 	pmap_invalidate_all(pmap);
4736 	PMAP_UNLOCK(pmap);
4737 	vm_page_free_pages_toq(&free, true);
4738 }
4739 
4740 /*
4741  * This is used to check if a page has been accessed or modified.
4742  */
4743 static boolean_t
4744 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
4745 {
4746 	struct rwlock *lock;
4747 	pv_entry_t pv;
4748 	struct md_page *pvh;
4749 	pt_entry_t *pte, mask, value;
4750 	pmap_t pmap;
4751 	int lvl, md_gen, pvh_gen;
4752 	boolean_t rv;
4753 
4754 	rv = FALSE;
4755 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4756 	rw_rlock(lock);
4757 restart:
4758 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4759 		pmap = PV_PMAP(pv);
4760 		PMAP_ASSERT_STAGE1(pmap);
4761 		if (!PMAP_TRYLOCK(pmap)) {
4762 			md_gen = m->md.pv_gen;
4763 			rw_runlock(lock);
4764 			PMAP_LOCK(pmap);
4765 			rw_rlock(lock);
4766 			if (md_gen != m->md.pv_gen) {
4767 				PMAP_UNLOCK(pmap);
4768 				goto restart;
4769 			}
4770 		}
4771 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4772 		KASSERT(lvl == 3,
4773 		    ("pmap_page_test_mappings: Invalid level %d", lvl));
4774 		mask = 0;
4775 		value = 0;
4776 		if (modified) {
4777 			mask |= ATTR_S1_AP_RW_BIT;
4778 			value |= ATTR_S1_AP(ATTR_S1_AP_RW);
4779 		}
4780 		if (accessed) {
4781 			mask |= ATTR_AF | ATTR_DESCR_MASK;
4782 			value |= ATTR_AF | L3_PAGE;
4783 		}
4784 		rv = (pmap_load(pte) & mask) == value;
4785 		PMAP_UNLOCK(pmap);
4786 		if (rv)
4787 			goto out;
4788 	}
4789 	if ((m->flags & PG_FICTITIOUS) == 0) {
4790 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4791 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4792 			pmap = PV_PMAP(pv);
4793 			PMAP_ASSERT_STAGE1(pmap);
4794 			if (!PMAP_TRYLOCK(pmap)) {
4795 				md_gen = m->md.pv_gen;
4796 				pvh_gen = pvh->pv_gen;
4797 				rw_runlock(lock);
4798 				PMAP_LOCK(pmap);
4799 				rw_rlock(lock);
4800 				if (md_gen != m->md.pv_gen ||
4801 				    pvh_gen != pvh->pv_gen) {
4802 					PMAP_UNLOCK(pmap);
4803 					goto restart;
4804 				}
4805 			}
4806 			pte = pmap_pte(pmap, pv->pv_va, &lvl);
4807 			KASSERT(lvl == 2,
4808 			    ("pmap_page_test_mappings: Invalid level %d", lvl));
4809 			mask = 0;
4810 			value = 0;
4811 			if (modified) {
4812 				mask |= ATTR_S1_AP_RW_BIT;
4813 				value |= ATTR_S1_AP(ATTR_S1_AP_RW);
4814 			}
4815 			if (accessed) {
4816 				mask |= ATTR_AF | ATTR_DESCR_MASK;
4817 				value |= ATTR_AF | L2_BLOCK;
4818 			}
4819 			rv = (pmap_load(pte) & mask) == value;
4820 			PMAP_UNLOCK(pmap);
4821 			if (rv)
4822 				goto out;
4823 		}
4824 	}
4825 out:
4826 	rw_runlock(lock);
4827 	return (rv);
4828 }
4829 
4830 /*
4831  *	pmap_is_modified:
4832  *
4833  *	Return whether or not the specified physical page was modified
4834  *	in any physical maps.
4835  */
4836 boolean_t
4837 pmap_is_modified(vm_page_t m)
4838 {
4839 
4840 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4841 	    ("pmap_is_modified: page %p is not managed", m));
4842 
4843 	/*
4844 	 * If the page is not busied then this check is racy.
4845 	 */
4846 	if (!pmap_page_is_write_mapped(m))
4847 		return (FALSE);
4848 	return (pmap_page_test_mappings(m, FALSE, TRUE));
4849 }
4850 
4851 /*
4852  *	pmap_is_prefaultable:
4853  *
4854  *	Return whether or not the specified virtual address is eligible
4855  *	for prefault.
4856  */
4857 boolean_t
4858 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4859 {
4860 	pt_entry_t *pte;
4861 	boolean_t rv;
4862 	int lvl;
4863 
4864 	rv = FALSE;
4865 	PMAP_LOCK(pmap);
4866 	pte = pmap_pte(pmap, addr, &lvl);
4867 	if (pte != NULL && pmap_load(pte) != 0) {
4868 		rv = TRUE;
4869 	}
4870 	PMAP_UNLOCK(pmap);
4871 	return (rv);
4872 }
4873 
4874 /*
4875  *	pmap_is_referenced:
4876  *
4877  *	Return whether or not the specified physical page was referenced
4878  *	in any physical maps.
4879  */
4880 boolean_t
4881 pmap_is_referenced(vm_page_t m)
4882 {
4883 
4884 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4885 	    ("pmap_is_referenced: page %p is not managed", m));
4886 	return (pmap_page_test_mappings(m, TRUE, FALSE));
4887 }
4888 
4889 /*
4890  * Clear the write and modified bits in each of the given page's mappings.
4891  */
4892 void
4893 pmap_remove_write(vm_page_t m)
4894 {
4895 	struct md_page *pvh;
4896 	pmap_t pmap;
4897 	struct rwlock *lock;
4898 	pv_entry_t next_pv, pv;
4899 	pt_entry_t oldpte, *pte;
4900 	vm_offset_t va;
4901 	int lvl, md_gen, pvh_gen;
4902 
4903 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4904 	    ("pmap_remove_write: page %p is not managed", m));
4905 	vm_page_assert_busied(m);
4906 
4907 	if (!pmap_page_is_write_mapped(m))
4908 		return;
4909 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4910 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4911 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
4912 retry_pv_loop:
4913 	rw_wlock(lock);
4914 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4915 		pmap = PV_PMAP(pv);
4916 		PMAP_ASSERT_STAGE1(pmap);
4917 		if (!PMAP_TRYLOCK(pmap)) {
4918 			pvh_gen = pvh->pv_gen;
4919 			rw_wunlock(lock);
4920 			PMAP_LOCK(pmap);
4921 			rw_wlock(lock);
4922 			if (pvh_gen != pvh->pv_gen) {
4923 				PMAP_UNLOCK(pmap);
4924 				rw_wunlock(lock);
4925 				goto retry_pv_loop;
4926 			}
4927 		}
4928 		va = pv->pv_va;
4929 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4930 		if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
4931 			(void)pmap_demote_l2_locked(pmap, pte, va, &lock);
4932 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4933 		    ("inconsistent pv lock %p %p for page %p",
4934 		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4935 		PMAP_UNLOCK(pmap);
4936 	}
4937 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4938 		pmap = PV_PMAP(pv);
4939 		PMAP_ASSERT_STAGE1(pmap);
4940 		if (!PMAP_TRYLOCK(pmap)) {
4941 			pvh_gen = pvh->pv_gen;
4942 			md_gen = m->md.pv_gen;
4943 			rw_wunlock(lock);
4944 			PMAP_LOCK(pmap);
4945 			rw_wlock(lock);
4946 			if (pvh_gen != pvh->pv_gen ||
4947 			    md_gen != m->md.pv_gen) {
4948 				PMAP_UNLOCK(pmap);
4949 				rw_wunlock(lock);
4950 				goto retry_pv_loop;
4951 			}
4952 		}
4953 		pte = pmap_pte(pmap, pv->pv_va, &lvl);
4954 		oldpte = pmap_load(pte);
4955 retry:
4956 		if ((oldpte & ATTR_SW_DBM) != 0) {
4957 			if (!atomic_fcmpset_long(pte, &oldpte,
4958 			    (oldpte | ATTR_S1_AP_RW_BIT) & ~ATTR_SW_DBM))
4959 				goto retry;
4960 			if ((oldpte & ATTR_S1_AP_RW_BIT) ==
4961 			    ATTR_S1_AP(ATTR_S1_AP_RW))
4962 				vm_page_dirty(m);
4963 			pmap_invalidate_page(pmap, pv->pv_va);
4964 		}
4965 		PMAP_UNLOCK(pmap);
4966 	}
4967 	rw_wunlock(lock);
4968 	vm_page_aflag_clear(m, PGA_WRITEABLE);
4969 }
4970 
4971 /*
4972  *	pmap_ts_referenced:
4973  *
4974  *	Return a count of reference bits for a page, clearing those bits.
4975  *	It is not necessary for every reference bit to be cleared, but it
4976  *	is necessary that 0 only be returned when there are truly no
4977  *	reference bits set.
4978  *
4979  *	As an optimization, update the page's dirty field if a modified bit is
4980  *	found while counting reference bits.  This opportunistic update can be
4981  *	performed at low cost and can eliminate the need for some future calls
4982  *	to pmap_is_modified().  However, since this function stops after
4983  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
4984  *	dirty pages.  Those dirty pages will only be detected by a future call
4985  *	to pmap_is_modified().
4986  */
4987 int
4988 pmap_ts_referenced(vm_page_t m)
4989 {
4990 	struct md_page *pvh;
4991 	pv_entry_t pv, pvf;
4992 	pmap_t pmap;
4993 	struct rwlock *lock;
4994 	pd_entry_t *pde, tpde;
4995 	pt_entry_t *pte, tpte;
4996 	vm_offset_t va;
4997 	vm_paddr_t pa;
4998 	int cleared, lvl, md_gen, not_cleared, pvh_gen;
4999 	struct spglist free;
5000 
5001 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5002 	    ("pmap_ts_referenced: page %p is not managed", m));
5003 	SLIST_INIT(&free);
5004 	cleared = 0;
5005 	pa = VM_PAGE_TO_PHYS(m);
5006 	lock = PHYS_TO_PV_LIST_LOCK(pa);
5007 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
5008 	rw_wlock(lock);
5009 retry:
5010 	not_cleared = 0;
5011 	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
5012 		goto small_mappings;
5013 	pv = pvf;
5014 	do {
5015 		if (pvf == NULL)
5016 			pvf = pv;
5017 		pmap = PV_PMAP(pv);
5018 		if (!PMAP_TRYLOCK(pmap)) {
5019 			pvh_gen = pvh->pv_gen;
5020 			rw_wunlock(lock);
5021 			PMAP_LOCK(pmap);
5022 			rw_wlock(lock);
5023 			if (pvh_gen != pvh->pv_gen) {
5024 				PMAP_UNLOCK(pmap);
5025 				goto retry;
5026 			}
5027 		}
5028 		va = pv->pv_va;
5029 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
5030 		KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
5031 		KASSERT(lvl == 1,
5032 		    ("pmap_ts_referenced: invalid pde level %d", lvl));
5033 		tpde = pmap_load(pde);
5034 		KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
5035 		    ("pmap_ts_referenced: found an invalid l1 table"));
5036 		pte = pmap_l1_to_l2(pde, pv->pv_va);
5037 		tpte = pmap_load(pte);
5038 		if (pmap_pte_dirty(pmap, tpte)) {
5039 			/*
5040 			 * Although "tpte" is mapping a 2MB page, because
5041 			 * this function is called at a 4KB page granularity,
5042 			 * we only update the 4KB page under test.
5043 			 */
5044 			vm_page_dirty(m);
5045 		}
5046 
5047 		if ((tpte & ATTR_AF) != 0) {
5048 			/*
5049 			 * Since this reference bit is shared by 512 4KB pages,
5050 			 * it should not be cleared every time it is tested.
5051 			 * Apply a simple "hash" function on the physical page
5052 			 * number, the virtual superpage number, and the pmap
5053 			 * address to select one 4KB page out of the 512 on
5054 			 * which testing the reference bit will result in
5055 			 * clearing that reference bit.  This function is
5056 			 * designed to avoid the selection of the same 4KB page
5057 			 * for every 2MB page mapping.
5058 			 *
5059 			 * On demotion, a mapping that hasn't been referenced
5060 			 * is simply destroyed.  To avoid the possibility of a
5061 			 * subsequent page fault on a demoted wired mapping,
5062 			 * always leave its reference bit set.  Moreover,
5063 			 * since the superpage is wired, the current state of
5064 			 * its reference bit won't affect page replacement.
5065 			 */
5066 			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
5067 			    (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
5068 			    (tpte & ATTR_SW_WIRED) == 0) {
5069 				pmap_clear_bits(pte, ATTR_AF);
5070 				pmap_invalidate_page(pmap, pv->pv_va);
5071 				cleared++;
5072 			} else
5073 				not_cleared++;
5074 		}
5075 		PMAP_UNLOCK(pmap);
5076 		/* Rotate the PV list if it has more than one entry. */
5077 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
5078 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5079 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5080 			pvh->pv_gen++;
5081 		}
5082 		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
5083 			goto out;
5084 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
5085 small_mappings:
5086 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
5087 		goto out;
5088 	pv = pvf;
5089 	do {
5090 		if (pvf == NULL)
5091 			pvf = pv;
5092 		pmap = PV_PMAP(pv);
5093 		if (!PMAP_TRYLOCK(pmap)) {
5094 			pvh_gen = pvh->pv_gen;
5095 			md_gen = m->md.pv_gen;
5096 			rw_wunlock(lock);
5097 			PMAP_LOCK(pmap);
5098 			rw_wlock(lock);
5099 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5100 				PMAP_UNLOCK(pmap);
5101 				goto retry;
5102 			}
5103 		}
5104 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
5105 		KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
5106 		KASSERT(lvl == 2,
5107 		    ("pmap_ts_referenced: invalid pde level %d", lvl));
5108 		tpde = pmap_load(pde);
5109 		KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
5110 		    ("pmap_ts_referenced: found an invalid l2 table"));
5111 		pte = pmap_l2_to_l3(pde, pv->pv_va);
5112 		tpte = pmap_load(pte);
5113 		if (pmap_pte_dirty(pmap, tpte))
5114 			vm_page_dirty(m);
5115 		if ((tpte & ATTR_AF) != 0) {
5116 			if ((tpte & ATTR_SW_WIRED) == 0) {
5117 				pmap_clear_bits(pte, ATTR_AF);
5118 				pmap_invalidate_page(pmap, pv->pv_va);
5119 				cleared++;
5120 			} else
5121 				not_cleared++;
5122 		}
5123 		PMAP_UNLOCK(pmap);
5124 		/* Rotate the PV list if it has more than one entry. */
5125 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
5126 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5127 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5128 			m->md.pv_gen++;
5129 		}
5130 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
5131 	    not_cleared < PMAP_TS_REFERENCED_MAX);
5132 out:
5133 	rw_wunlock(lock);
5134 	vm_page_free_pages_toq(&free, true);
5135 	return (cleared + not_cleared);
5136 }
5137 
5138 /*
5139  *	Apply the given advice to the specified range of addresses within the
5140  *	given pmap.  Depending on the advice, clear the referenced and/or
5141  *	modified flags in each mapping and set the mapped page's dirty field.
5142  */
5143 void
5144 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
5145 {
5146 	struct rwlock *lock;
5147 	vm_offset_t va, va_next;
5148 	vm_page_t m;
5149 	pd_entry_t *l0, *l1, *l2, oldl2;
5150 	pt_entry_t *l3, oldl3;
5151 
5152 	PMAP_ASSERT_STAGE1(pmap);
5153 
5154 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
5155 		return;
5156 
5157 	PMAP_LOCK(pmap);
5158 	for (; sva < eva; sva = va_next) {
5159 		l0 = pmap_l0(pmap, sva);
5160 		if (pmap_load(l0) == 0) {
5161 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
5162 			if (va_next < sva)
5163 				va_next = eva;
5164 			continue;
5165 		}
5166 		l1 = pmap_l0_to_l1(l0, sva);
5167 		if (pmap_load(l1) == 0) {
5168 			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
5169 			if (va_next < sva)
5170 				va_next = eva;
5171 			continue;
5172 		}
5173 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
5174 		if (va_next < sva)
5175 			va_next = eva;
5176 		l2 = pmap_l1_to_l2(l1, sva);
5177 		oldl2 = pmap_load(l2);
5178 		if (oldl2 == 0)
5179 			continue;
5180 		if ((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK) {
5181 			if ((oldl2 & ATTR_SW_MANAGED) == 0)
5182 				continue;
5183 			lock = NULL;
5184 			if (!pmap_demote_l2_locked(pmap, l2, sva, &lock)) {
5185 				if (lock != NULL)
5186 					rw_wunlock(lock);
5187 
5188 				/*
5189 				 * The 2MB page mapping was destroyed.
5190 				 */
5191 				continue;
5192 			}
5193 
5194 			/*
5195 			 * Unless the page mappings are wired, remove the
5196 			 * mapping to a single page so that a subsequent
5197 			 * access may repromote.  Choosing the last page
5198 			 * within the address range [sva, min(va_next, eva))
5199 			 * generally results in more repromotions.  Since the
5200 			 * underlying page table page is fully populated, this
5201 			 * removal never frees a page table page.
5202 			 */
5203 			if ((oldl2 & ATTR_SW_WIRED) == 0) {
5204 				va = eva;
5205 				if (va > va_next)
5206 					va = va_next;
5207 				va -= PAGE_SIZE;
5208 				KASSERT(va >= sva,
5209 				    ("pmap_advise: no address gap"));
5210 				l3 = pmap_l2_to_l3(l2, va);
5211 				KASSERT(pmap_load(l3) != 0,
5212 				    ("pmap_advise: invalid PTE"));
5213 				pmap_remove_l3(pmap, l3, va, pmap_load(l2),
5214 				    NULL, &lock);
5215 			}
5216 			if (lock != NULL)
5217 				rw_wunlock(lock);
5218 		}
5219 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
5220 		    ("pmap_advise: invalid L2 entry after demotion"));
5221 		if (va_next > eva)
5222 			va_next = eva;
5223 		va = va_next;
5224 		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
5225 		    sva += L3_SIZE) {
5226 			oldl3 = pmap_load(l3);
5227 			if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
5228 			    (ATTR_SW_MANAGED | L3_PAGE))
5229 				goto maybe_invlrng;
5230 			else if (pmap_pte_dirty(pmap, oldl3)) {
5231 				if (advice == MADV_DONTNEED) {
5232 					/*
5233 					 * Future calls to pmap_is_modified()
5234 					 * can be avoided by making the page
5235 					 * dirty now.
5236 					 */
5237 					m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK);
5238 					vm_page_dirty(m);
5239 				}
5240 				while (!atomic_fcmpset_long(l3, &oldl3,
5241 				    (oldl3 & ~ATTR_AF) |
5242 				    ATTR_S1_AP(ATTR_S1_AP_RO)))
5243 					cpu_spinwait();
5244 			} else if ((oldl3 & ATTR_AF) != 0)
5245 				pmap_clear_bits(l3, ATTR_AF);
5246 			else
5247 				goto maybe_invlrng;
5248 			if (va == va_next)
5249 				va = sva;
5250 			continue;
5251 maybe_invlrng:
5252 			if (va != va_next) {
5253 				pmap_invalidate_range(pmap, va, sva);
5254 				va = va_next;
5255 			}
5256 		}
5257 		if (va != va_next)
5258 			pmap_invalidate_range(pmap, va, sva);
5259 	}
5260 	PMAP_UNLOCK(pmap);
5261 }
5262 
5263 /*
5264  *	Clear the modify bits on the specified physical page.
5265  */
5266 void
5267 pmap_clear_modify(vm_page_t m)
5268 {
5269 	struct md_page *pvh;
5270 	struct rwlock *lock;
5271 	pmap_t pmap;
5272 	pv_entry_t next_pv, pv;
5273 	pd_entry_t *l2, oldl2;
5274 	pt_entry_t *l3, oldl3;
5275 	vm_offset_t va;
5276 	int md_gen, pvh_gen;
5277 
5278 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5279 	    ("pmap_clear_modify: page %p is not managed", m));
5280 	vm_page_assert_busied(m);
5281 
5282 	if (!pmap_page_is_write_mapped(m))
5283 		return;
5284 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5285 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
5286 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5287 	rw_wlock(lock);
5288 restart:
5289 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5290 		pmap = PV_PMAP(pv);
5291 		PMAP_ASSERT_STAGE1(pmap);
5292 		if (!PMAP_TRYLOCK(pmap)) {
5293 			pvh_gen = pvh->pv_gen;
5294 			rw_wunlock(lock);
5295 			PMAP_LOCK(pmap);
5296 			rw_wlock(lock);
5297 			if (pvh_gen != pvh->pv_gen) {
5298 				PMAP_UNLOCK(pmap);
5299 				goto restart;
5300 			}
5301 		}
5302 		va = pv->pv_va;
5303 		l2 = pmap_l2(pmap, va);
5304 		oldl2 = pmap_load(l2);
5305 		/* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */
5306 		if ((oldl2 & ATTR_SW_DBM) != 0 &&
5307 		    pmap_demote_l2_locked(pmap, l2, va, &lock) &&
5308 		    (oldl2 & ATTR_SW_WIRED) == 0) {
5309 			/*
5310 			 * Write protect the mapping to a single page so that
5311 			 * a subsequent write access may repromote.
5312 			 */
5313 			va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK);
5314 			l3 = pmap_l2_to_l3(l2, va);
5315 			oldl3 = pmap_load(l3);
5316 			while (!atomic_fcmpset_long(l3, &oldl3,
5317 			    (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO)))
5318 				cpu_spinwait();
5319 			vm_page_dirty(m);
5320 			pmap_invalidate_page(pmap, va);
5321 		}
5322 		PMAP_UNLOCK(pmap);
5323 	}
5324 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5325 		pmap = PV_PMAP(pv);
5326 		PMAP_ASSERT_STAGE1(pmap);
5327 		if (!PMAP_TRYLOCK(pmap)) {
5328 			md_gen = m->md.pv_gen;
5329 			pvh_gen = pvh->pv_gen;
5330 			rw_wunlock(lock);
5331 			PMAP_LOCK(pmap);
5332 			rw_wlock(lock);
5333 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5334 				PMAP_UNLOCK(pmap);
5335 				goto restart;
5336 			}
5337 		}
5338 		l2 = pmap_l2(pmap, pv->pv_va);
5339 		l3 = pmap_l2_to_l3(l2, pv->pv_va);
5340 		oldl3 = pmap_load(l3);
5341 		if (pmap_l3_valid(oldl3) &&
5342 		    (oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){
5343 			pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO));
5344 			pmap_invalidate_page(pmap, pv->pv_va);
5345 		}
5346 		PMAP_UNLOCK(pmap);
5347 	}
5348 	rw_wunlock(lock);
5349 }
5350 
5351 void *
5352 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
5353 {
5354 	struct pmap_preinit_mapping *ppim;
5355 	vm_offset_t va, offset;
5356 	pd_entry_t *pde;
5357 	pt_entry_t *l2;
5358 	int i, lvl, l2_blocks, free_l2_count, start_idx;
5359 
5360 	if (!vm_initialized) {
5361 		/*
5362 		 * No L3 ptables so map entire L2 blocks where start VA is:
5363 		 * 	preinit_map_va + start_idx * L2_SIZE
5364 		 * There may be duplicate mappings (multiple VA -> same PA) but
5365 		 * ARM64 dcache is always PIPT so that's acceptable.
5366 		 */
5367 		 if (size == 0)
5368 			 return (NULL);
5369 
5370 		 /* Calculate how many L2 blocks are needed for the mapping */
5371 		l2_blocks = (roundup2(pa + size, L2_SIZE) -
5372 		    rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
5373 
5374 		offset = pa & L2_OFFSET;
5375 
5376 		if (preinit_map_va == 0)
5377 			return (NULL);
5378 
5379 		/* Map 2MiB L2 blocks from reserved VA space */
5380 
5381 		free_l2_count = 0;
5382 		start_idx = -1;
5383 		/* Find enough free contiguous VA space */
5384 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5385 			ppim = pmap_preinit_mapping + i;
5386 			if (free_l2_count > 0 && ppim->pa != 0) {
5387 				/* Not enough space here */
5388 				free_l2_count = 0;
5389 				start_idx = -1;
5390 				continue;
5391 			}
5392 
5393 			if (ppim->pa == 0) {
5394 				/* Free L2 block */
5395 				if (start_idx == -1)
5396 					start_idx = i;
5397 				free_l2_count++;
5398 				if (free_l2_count == l2_blocks)
5399 					break;
5400 			}
5401 		}
5402 		if (free_l2_count != l2_blocks)
5403 			panic("%s: too many preinit mappings", __func__);
5404 
5405 		va = preinit_map_va + (start_idx * L2_SIZE);
5406 		for (i = start_idx; i < start_idx + l2_blocks; i++) {
5407 			/* Mark entries as allocated */
5408 			ppim = pmap_preinit_mapping + i;
5409 			ppim->pa = pa;
5410 			ppim->va = va + offset;
5411 			ppim->size = size;
5412 		}
5413 
5414 		/* Map L2 blocks */
5415 		pa = rounddown2(pa, L2_SIZE);
5416 		for (i = 0; i < l2_blocks; i++) {
5417 			pde = pmap_pde(kernel_pmap, va, &lvl);
5418 			KASSERT(pde != NULL,
5419 			    ("pmap_mapbios: Invalid page entry, va: 0x%lx",
5420 			    va));
5421 			KASSERT(lvl == 1,
5422 			    ("pmap_mapbios: Invalid level %d", lvl));
5423 
5424 			/* Insert L2_BLOCK */
5425 			l2 = pmap_l1_to_l2(pde, va);
5426 			pmap_load_store(l2,
5427 			    pa | ATTR_DEFAULT | ATTR_S1_XN |
5428 			    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
5429 
5430 			va += L2_SIZE;
5431 			pa += L2_SIZE;
5432 		}
5433 		pmap_invalidate_all(kernel_pmap);
5434 
5435 		va = preinit_map_va + (start_idx * L2_SIZE);
5436 
5437 	} else {
5438 		/* kva_alloc may be used to map the pages */
5439 		offset = pa & PAGE_MASK;
5440 		size = round_page(offset + size);
5441 
5442 		va = kva_alloc(size);
5443 		if (va == 0)
5444 			panic("%s: Couldn't allocate KVA", __func__);
5445 
5446 		pde = pmap_pde(kernel_pmap, va, &lvl);
5447 		KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
5448 
5449 		/* L3 table is linked */
5450 		va = trunc_page(va);
5451 		pa = trunc_page(pa);
5452 		pmap_kenter(va, size, pa, memory_mapping_mode(pa));
5453 	}
5454 
5455 	return ((void *)(va + offset));
5456 }
5457 
5458 void
5459 pmap_unmapbios(vm_offset_t va, vm_size_t size)
5460 {
5461 	struct pmap_preinit_mapping *ppim;
5462 	vm_offset_t offset, tmpsize, va_trunc;
5463 	pd_entry_t *pde;
5464 	pt_entry_t *l2;
5465 	int i, lvl, l2_blocks, block;
5466 	bool preinit_map;
5467 
5468 	l2_blocks =
5469 	   (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
5470 	KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
5471 
5472 	/* Remove preinit mapping */
5473 	preinit_map = false;
5474 	block = 0;
5475 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5476 		ppim = pmap_preinit_mapping + i;
5477 		if (ppim->va == va) {
5478 			KASSERT(ppim->size == size,
5479 			    ("pmap_unmapbios: size mismatch"));
5480 			ppim->va = 0;
5481 			ppim->pa = 0;
5482 			ppim->size = 0;
5483 			preinit_map = true;
5484 			offset = block * L2_SIZE;
5485 			va_trunc = rounddown2(va, L2_SIZE) + offset;
5486 
5487 			/* Remove L2_BLOCK */
5488 			pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
5489 			KASSERT(pde != NULL,
5490 			    ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
5491 			    va_trunc));
5492 			l2 = pmap_l1_to_l2(pde, va_trunc);
5493 			pmap_clear(l2);
5494 
5495 			if (block == (l2_blocks - 1))
5496 				break;
5497 			block++;
5498 		}
5499 	}
5500 	if (preinit_map) {
5501 		pmap_invalidate_all(kernel_pmap);
5502 		return;
5503 	}
5504 
5505 	/* Unmap the pages reserved with kva_alloc. */
5506 	if (vm_initialized) {
5507 		offset = va & PAGE_MASK;
5508 		size = round_page(offset + size);
5509 		va = trunc_page(va);
5510 
5511 		pde = pmap_pde(kernel_pmap, va, &lvl);
5512 		KASSERT(pde != NULL,
5513 		    ("pmap_unmapbios: Invalid page entry, va: 0x%lx", va));
5514 		KASSERT(lvl == 2, ("pmap_unmapbios: Invalid level %d", lvl));
5515 
5516 		/* Unmap and invalidate the pages */
5517                 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5518 			pmap_kremove(va + tmpsize);
5519 
5520 		kva_free(va, size);
5521 	}
5522 }
5523 
5524 /*
5525  * Sets the memory attribute for the specified page.
5526  */
5527 void
5528 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5529 {
5530 
5531 	m->md.pv_memattr = ma;
5532 
5533 	/*
5534 	 * If "m" is a normal page, update its direct mapping.  This update
5535 	 * can be relied upon to perform any cache operations that are
5536 	 * required for data coherence.
5537 	 */
5538 	if ((m->flags & PG_FICTITIOUS) == 0 &&
5539 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
5540 	    m->md.pv_memattr) != 0)
5541 		panic("memory attribute change on the direct map failed");
5542 }
5543 
5544 /*
5545  * Changes the specified virtual address range's memory type to that given by
5546  * the parameter "mode".  The specified virtual address range must be
5547  * completely contained within either the direct map or the kernel map.  If
5548  * the virtual address range is contained within the kernel map, then the
5549  * memory type for each of the corresponding ranges of the direct map is also
5550  * changed.  (The corresponding ranges of the direct map are those ranges that
5551  * map the same physical pages as the specified virtual address range.)  These
5552  * changes to the direct map are necessary because Intel describes the
5553  * behavior of their processors as "undefined" if two or more mappings to the
5554  * same physical page have different memory types.
5555  *
5556  * Returns zero if the change completed successfully, and either EINVAL or
5557  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
5558  * of the virtual address range was not mapped, and ENOMEM is returned if
5559  * there was insufficient memory available to complete the change.  In the
5560  * latter case, the memory type may have been changed on some part of the
5561  * virtual address range or the direct map.
5562  */
5563 int
5564 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
5565 {
5566 	int error;
5567 
5568 	PMAP_LOCK(kernel_pmap);
5569 	error = pmap_change_attr_locked(va, size, mode);
5570 	PMAP_UNLOCK(kernel_pmap);
5571 	return (error);
5572 }
5573 
5574 static int
5575 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
5576 {
5577 	vm_offset_t base, offset, tmpva;
5578 	pt_entry_t l3, *pte, *newpte;
5579 	int lvl;
5580 
5581 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
5582 	base = trunc_page(va);
5583 	offset = va & PAGE_MASK;
5584 	size = round_page(offset + size);
5585 
5586 	if (!VIRT_IN_DMAP(base) &&
5587 	    !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
5588 		return (EINVAL);
5589 
5590 	for (tmpva = base; tmpva < base + size; ) {
5591 		pte = pmap_pte(kernel_pmap, tmpva, &lvl);
5592 		if (pte == NULL)
5593 			return (EINVAL);
5594 
5595 		if ((pmap_load(pte) & ATTR_S1_IDX_MASK) == ATTR_S1_IDX(mode)) {
5596 			/*
5597 			 * We already have the correct attribute,
5598 			 * ignore this entry.
5599 			 */
5600 			switch (lvl) {
5601 			default:
5602 				panic("Invalid DMAP table level: %d\n", lvl);
5603 			case 1:
5604 				tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
5605 				break;
5606 			case 2:
5607 				tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
5608 				break;
5609 			case 3:
5610 				tmpva += PAGE_SIZE;
5611 				break;
5612 			}
5613 		} else {
5614 			/*
5615 			 * Split the entry to an level 3 table, then
5616 			 * set the new attribute.
5617 			 */
5618 			switch (lvl) {
5619 			default:
5620 				panic("Invalid DMAP table level: %d\n", lvl);
5621 			case 1:
5622 				newpte = pmap_demote_l1(kernel_pmap, pte,
5623 				    tmpva & ~L1_OFFSET);
5624 				if (newpte == NULL)
5625 					return (EINVAL);
5626 				pte = pmap_l1_to_l2(pte, tmpva);
5627 			case 2:
5628 				newpte = pmap_demote_l2(kernel_pmap, pte,
5629 				    tmpva);
5630 				if (newpte == NULL)
5631 					return (EINVAL);
5632 				pte = pmap_l2_to_l3(pte, tmpva);
5633 			case 3:
5634 				/* Update the entry */
5635 				l3 = pmap_load(pte);
5636 				l3 &= ~ATTR_S1_IDX_MASK;
5637 				l3 |= ATTR_S1_IDX(mode);
5638 				if (mode == VM_MEMATTR_DEVICE)
5639 					l3 |= ATTR_S1_XN;
5640 
5641 				pmap_update_entry(kernel_pmap, pte, l3, tmpva,
5642 				    PAGE_SIZE);
5643 
5644 				/*
5645 				 * If moving to a non-cacheable entry flush
5646 				 * the cache.
5647 				 */
5648 				if (mode == VM_MEMATTR_UNCACHEABLE)
5649 					cpu_dcache_wbinv_range(tmpva, L3_SIZE);
5650 
5651 				break;
5652 			}
5653 			tmpva += PAGE_SIZE;
5654 		}
5655 	}
5656 
5657 	return (0);
5658 }
5659 
5660 /*
5661  * Create an L2 table to map all addresses within an L1 mapping.
5662  */
5663 static pt_entry_t *
5664 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
5665 {
5666 	pt_entry_t *l2, newl2, oldl1;
5667 	vm_offset_t tmpl1;
5668 	vm_paddr_t l2phys, phys;
5669 	vm_page_t ml2;
5670 	int i;
5671 
5672 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5673 	oldl1 = pmap_load(l1);
5674 	KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
5675 	    ("pmap_demote_l1: Demoting a non-block entry"));
5676 	KASSERT((va & L1_OFFSET) == 0,
5677 	    ("pmap_demote_l1: Invalid virtual address %#lx", va));
5678 	KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
5679 	    ("pmap_demote_l1: Level 1 table shouldn't be managed"));
5680 
5681 	tmpl1 = 0;
5682 	if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
5683 		tmpl1 = kva_alloc(PAGE_SIZE);
5684 		if (tmpl1 == 0)
5685 			return (NULL);
5686 	}
5687 
5688 	if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
5689 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
5690 		CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
5691 		    " in pmap %p", va, pmap);
5692 		return (NULL);
5693 	}
5694 
5695 	l2phys = VM_PAGE_TO_PHYS(ml2);
5696 	l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
5697 
5698 	/* Address the range points at */
5699 	phys = oldl1 & ~ATTR_MASK;
5700 	/* The attributed from the old l1 table to be copied */
5701 	newl2 = oldl1 & ATTR_MASK;
5702 
5703 	/* Create the new entries */
5704 	for (i = 0; i < Ln_ENTRIES; i++) {
5705 		l2[i] = newl2 | phys;
5706 		phys += L2_SIZE;
5707 	}
5708 	KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
5709 	    ("Invalid l2 page (%lx != %lx)", l2[0],
5710 	    (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
5711 
5712 	if (tmpl1 != 0) {
5713 		pmap_kenter(tmpl1, PAGE_SIZE,
5714 		    DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET,
5715 		    VM_MEMATTR_WRITE_BACK);
5716 		l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
5717 	}
5718 
5719 	pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
5720 
5721 	if (tmpl1 != 0) {
5722 		pmap_kremove(tmpl1);
5723 		kva_free(tmpl1, PAGE_SIZE);
5724 	}
5725 
5726 	return (l2);
5727 }
5728 
5729 static void
5730 pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3)
5731 {
5732 	pt_entry_t *l3;
5733 
5734 	for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) {
5735 		*l3 = newl3;
5736 		newl3 += L3_SIZE;
5737 	}
5738 }
5739 
5740 static void
5741 pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
5742     struct rwlock **lockp)
5743 {
5744 	struct spglist free;
5745 
5746 	SLIST_INIT(&free);
5747 	(void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
5748 	    lockp);
5749 	vm_page_free_pages_toq(&free, true);
5750 }
5751 
5752 /*
5753  * Create an L3 table to map all addresses within an L2 mapping.
5754  */
5755 static pt_entry_t *
5756 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
5757     struct rwlock **lockp)
5758 {
5759 	pt_entry_t *l3, newl3, oldl2;
5760 	vm_offset_t tmpl2;
5761 	vm_paddr_t l3phys;
5762 	vm_page_t ml3;
5763 
5764 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5765 	PMAP_ASSERT_STAGE1(pmap);
5766 	l3 = NULL;
5767 	oldl2 = pmap_load(l2);
5768 	KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
5769 	    ("pmap_demote_l2: Demoting a non-block entry"));
5770 	va &= ~L2_OFFSET;
5771 
5772 	tmpl2 = 0;
5773 	if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
5774 		tmpl2 = kva_alloc(PAGE_SIZE);
5775 		if (tmpl2 == 0)
5776 			return (NULL);
5777 	}
5778 
5779 	/*
5780 	 * Invalidate the 2MB page mapping and return "failure" if the
5781 	 * mapping was never accessed.
5782 	 */
5783 	if ((oldl2 & ATTR_AF) == 0) {
5784 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5785 		    ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
5786 		pmap_demote_l2_abort(pmap, va, l2, lockp);
5787 		CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
5788 		    va, pmap);
5789 		goto fail;
5790 	}
5791 
5792 	if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
5793 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5794 		    ("pmap_demote_l2: page table page for a wired mapping"
5795 		    " is missing"));
5796 
5797 		/*
5798 		 * If the page table page is missing and the mapping
5799 		 * is for a kernel address, the mapping must belong to
5800 		 * the direct map.  Page table pages are preallocated
5801 		 * for every other part of the kernel address space,
5802 		 * so the direct map region is the only part of the
5803 		 * kernel address space that must be handled here.
5804 		 */
5805 		KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va),
5806 		    ("pmap_demote_l2: No saved mpte for va %#lx", va));
5807 
5808 		/*
5809 		 * If the 2MB page mapping belongs to the direct map
5810 		 * region of the kernel's address space, then the page
5811 		 * allocation request specifies the highest possible
5812 		 * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
5813 		 * priority is normal.
5814 		 */
5815 		ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
5816 		    (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
5817 		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5818 
5819 		/*
5820 		 * If the allocation of the new page table page fails,
5821 		 * invalidate the 2MB page mapping and return "failure".
5822 		 */
5823 		if (ml3 == NULL) {
5824 			pmap_demote_l2_abort(pmap, va, l2, lockp);
5825 			CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
5826 			    " in pmap %p", va, pmap);
5827 			goto fail;
5828 		}
5829 
5830 		if (va < VM_MAXUSER_ADDRESS) {
5831 			ml3->ref_count = NL3PG;
5832 			pmap_resident_count_inc(pmap, 1);
5833 		}
5834 	}
5835 	l3phys = VM_PAGE_TO_PHYS(ml3);
5836 	l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
5837 	newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
5838 	KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) !=
5839 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM),
5840 	    ("pmap_demote_l2: L2 entry is writeable but not dirty"));
5841 
5842 	/*
5843 	 * If the page table page is not leftover from an earlier promotion,
5844 	 * or the mapping attributes have changed, (re)initialize the L3 table.
5845 	 *
5846 	 * When pmap_update_entry() clears the old L2 mapping, it (indirectly)
5847 	 * performs a dsb().  That dsb() ensures that the stores for filling
5848 	 * "l3" are visible before "l3" is added to the page table.
5849 	 */
5850 	if (ml3->valid == 0 || (l3[0] & ATTR_MASK) != (newl3 & ATTR_MASK))
5851 		pmap_fill_l3(l3, newl3);
5852 
5853 	/*
5854 	 * Map the temporary page so we don't lose access to the l2 table.
5855 	 */
5856 	if (tmpl2 != 0) {
5857 		pmap_kenter(tmpl2, PAGE_SIZE,
5858 		    DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET,
5859 		    VM_MEMATTR_WRITE_BACK);
5860 		l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
5861 	}
5862 
5863 	/*
5864 	 * The spare PV entries must be reserved prior to demoting the
5865 	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
5866 	 * of the L2 and the PV lists will be inconsistent, which can result
5867 	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
5868 	 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
5869 	 * PV entry for the 2MB page mapping that is being demoted.
5870 	 */
5871 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
5872 		reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
5873 
5874 	/*
5875 	 * Pass PAGE_SIZE so that a single TLB invalidation is performed on
5876 	 * the 2MB page mapping.
5877 	 */
5878 	pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
5879 
5880 	/*
5881 	 * Demote the PV entry.
5882 	 */
5883 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
5884 		pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
5885 
5886 	atomic_add_long(&pmap_l2_demotions, 1);
5887 	CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
5888 	    " in pmap %p %lx", va, pmap, l3[0]);
5889 
5890 fail:
5891 	if (tmpl2 != 0) {
5892 		pmap_kremove(tmpl2);
5893 		kva_free(tmpl2, PAGE_SIZE);
5894 	}
5895 
5896 	return (l3);
5897 
5898 }
5899 
5900 static pt_entry_t *
5901 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
5902 {
5903 	struct rwlock *lock;
5904 	pt_entry_t *l3;
5905 
5906 	lock = NULL;
5907 	l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
5908 	if (lock != NULL)
5909 		rw_wunlock(lock);
5910 	return (l3);
5911 }
5912 
5913 /*
5914  * Perform the pmap work for mincore(2).  If the page is not both referenced and
5915  * modified by this pmap, returns its physical address so that the caller can
5916  * find other mappings.
5917  */
5918 int
5919 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
5920 {
5921 	pt_entry_t *pte, tpte;
5922 	vm_paddr_t mask, pa;
5923 	int lvl, val;
5924 	bool managed;
5925 
5926 	PMAP_ASSERT_STAGE1(pmap);
5927 	PMAP_LOCK(pmap);
5928 	pte = pmap_pte(pmap, addr, &lvl);
5929 	if (pte != NULL) {
5930 		tpte = pmap_load(pte);
5931 
5932 		switch (lvl) {
5933 		case 3:
5934 			mask = L3_OFFSET;
5935 			break;
5936 		case 2:
5937 			mask = L2_OFFSET;
5938 			break;
5939 		case 1:
5940 			mask = L1_OFFSET;
5941 			break;
5942 		default:
5943 			panic("pmap_mincore: invalid level %d", lvl);
5944 		}
5945 
5946 		managed = (tpte & ATTR_SW_MANAGED) != 0;
5947 		val = MINCORE_INCORE;
5948 		if (lvl != 3)
5949 			val |= MINCORE_SUPER;
5950 		if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
5951 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
5952 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5953 		if ((tpte & ATTR_AF) == ATTR_AF)
5954 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5955 
5956 		pa = (tpte & ~ATTR_MASK) | (addr & mask);
5957 	} else {
5958 		managed = false;
5959 		val = 0;
5960 	}
5961 
5962 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5963 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
5964 		*pap = pa;
5965 	}
5966 	PMAP_UNLOCK(pmap);
5967 	return (val);
5968 }
5969 
5970 /*
5971  * Garbage collect every ASID that is neither active on a processor nor
5972  * reserved.
5973  */
5974 static void
5975 pmap_reset_asid_set(pmap_t pmap)
5976 {
5977 	pmap_t curpmap;
5978 	int asid, cpuid, epoch;
5979 	struct asid_set *set;
5980 	enum pmap_stage stage;
5981 
5982 	set = pmap->pm_asid_set;
5983 	stage = pmap->pm_stage;
5984 
5985 	set = pmap->pm_asid_set;
5986 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
5987 	mtx_assert(&set->asid_set_mutex, MA_OWNED);
5988 
5989 	/*
5990 	 * Ensure that the store to asid_epoch is globally visible before the
5991 	 * loads from pc_curpmap are performed.
5992 	 */
5993 	epoch = set->asid_epoch + 1;
5994 	if (epoch == INT_MAX)
5995 		epoch = 0;
5996 	set->asid_epoch = epoch;
5997 	dsb(ishst);
5998 	if (stage == PM_STAGE1) {
5999 		__asm __volatile("tlbi vmalle1is");
6000 	} else {
6001 		KASSERT(pmap_clean_stage2_tlbi != NULL,
6002 		    ("%s: Unset stage 2 tlb invalidation callback\n",
6003 		    __func__));
6004 		pmap_clean_stage2_tlbi();
6005 	}
6006 	dsb(ish);
6007 	bit_nclear(set->asid_set, ASID_FIRST_AVAILABLE,
6008 	    set->asid_set_size - 1);
6009 	CPU_FOREACH(cpuid) {
6010 		if (cpuid == curcpu)
6011 			continue;
6012 		if (stage == PM_STAGE1) {
6013 			curpmap = pcpu_find(cpuid)->pc_curpmap;
6014 			PMAP_ASSERT_STAGE1(pmap);
6015 		} else {
6016 			curpmap = pcpu_find(cpuid)->pc_curvmpmap;
6017 			if (curpmap == NULL)
6018 				continue;
6019 			PMAP_ASSERT_STAGE2(pmap);
6020 		}
6021 		KASSERT(curpmap->pm_asid_set == set, ("Incorrect set"));
6022 		asid = COOKIE_TO_ASID(curpmap->pm_cookie);
6023 		if (asid == -1)
6024 			continue;
6025 		bit_set(set->asid_set, asid);
6026 		curpmap->pm_cookie = COOKIE_FROM(asid, epoch);
6027 	}
6028 }
6029 
6030 /*
6031  * Allocate a new ASID for the specified pmap.
6032  */
6033 static void
6034 pmap_alloc_asid(pmap_t pmap)
6035 {
6036 	struct asid_set *set;
6037 	int new_asid;
6038 
6039 	set = pmap->pm_asid_set;
6040 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
6041 
6042 	mtx_lock_spin(&set->asid_set_mutex);
6043 
6044 	/*
6045 	 * While this processor was waiting to acquire the asid set mutex,
6046 	 * pmap_reset_asid_set() running on another processor might have
6047 	 * updated this pmap's cookie to the current epoch.  In which case, we
6048 	 * don't need to allocate a new ASID.
6049 	 */
6050 	if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch)
6051 		goto out;
6052 
6053 	bit_ffc_at(set->asid_set, set->asid_next, set->asid_set_size,
6054 	    &new_asid);
6055 	if (new_asid == -1) {
6056 		bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
6057 		    set->asid_next, &new_asid);
6058 		if (new_asid == -1) {
6059 			pmap_reset_asid_set(pmap);
6060 			bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
6061 			    set->asid_set_size, &new_asid);
6062 			KASSERT(new_asid != -1, ("ASID allocation failure"));
6063 		}
6064 	}
6065 	bit_set(set->asid_set, new_asid);
6066 	set->asid_next = new_asid + 1;
6067 	pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch);
6068 out:
6069 	mtx_unlock_spin(&set->asid_set_mutex);
6070 }
6071 
6072 /*
6073  * Compute the value that should be stored in ttbr0 to activate the specified
6074  * pmap.  This value may change from time to time.
6075  */
6076 uint64_t
6077 pmap_to_ttbr0(pmap_t pmap)
6078 {
6079 
6080 	return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) |
6081 	    pmap->pm_l0_paddr);
6082 }
6083 
6084 static bool
6085 pmap_activate_int(pmap_t pmap)
6086 {
6087 	struct asid_set *set;
6088 	int epoch;
6089 
6090 	KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
6091 	KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
6092 
6093 	if ((pmap->pm_stage == PM_STAGE1 && pmap == PCPU_GET(curpmap)) ||
6094 	    (pmap->pm_stage == PM_STAGE2 && pmap == PCPU_GET(curvmpmap))) {
6095 		/*
6096 		 * Handle the possibility that the old thread was preempted
6097 		 * after an "ic" or "tlbi" instruction but before it performed
6098 		 * a "dsb" instruction.  If the old thread migrates to a new
6099 		 * processor, its completion of a "dsb" instruction on that
6100 		 * new processor does not guarantee that the "ic" or "tlbi"
6101 		 * instructions performed on the old processor have completed.
6102 		 */
6103 		dsb(ish);
6104 		return (false);
6105 	}
6106 
6107 	set = pmap->pm_asid_set;
6108 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
6109 
6110 	/*
6111 	 * Ensure that the store to curpmap is globally visible before the
6112 	 * load from asid_epoch is performed.
6113 	 */
6114 	if (pmap->pm_stage == PM_STAGE1)
6115 		PCPU_SET(curpmap, pmap);
6116 	else
6117 		PCPU_SET(curvmpmap, pmap);
6118 	dsb(ish);
6119 	epoch = COOKIE_TO_EPOCH(pmap->pm_cookie);
6120 	if (epoch >= 0 && epoch != set->asid_epoch)
6121 		pmap_alloc_asid(pmap);
6122 
6123 	if (pmap->pm_stage == PM_STAGE1) {
6124 		set_ttbr0(pmap_to_ttbr0(pmap));
6125 		if (PCPU_GET(bcast_tlbi_workaround) != 0)
6126 			invalidate_local_icache();
6127 	}
6128 	return (true);
6129 }
6130 
6131 void
6132 pmap_activate_vm(pmap_t pmap)
6133 {
6134 
6135 	PMAP_ASSERT_STAGE2(pmap);
6136 
6137 	(void)pmap_activate_int(pmap);
6138 }
6139 
6140 void
6141 pmap_activate(struct thread *td)
6142 {
6143 	pmap_t	pmap;
6144 
6145 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
6146 	PMAP_ASSERT_STAGE1(pmap);
6147 	critical_enter();
6148 	(void)pmap_activate_int(pmap);
6149 	critical_exit();
6150 }
6151 
6152 /*
6153  * To eliminate the unused parameter "old", we would have to add an instruction
6154  * to cpu_switch().
6155  */
6156 struct pcb *
6157 pmap_switch(struct thread *old __unused, struct thread *new)
6158 {
6159 	pcpu_bp_harden bp_harden;
6160 	struct pcb *pcb;
6161 
6162 	/* Store the new curthread */
6163 	PCPU_SET(curthread, new);
6164 
6165 	/* And the new pcb */
6166 	pcb = new->td_pcb;
6167 	PCPU_SET(curpcb, pcb);
6168 
6169 	/*
6170 	 * TODO: We may need to flush the cache here if switching
6171 	 * to a user process.
6172 	 */
6173 
6174 	if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) {
6175 		/*
6176 		 * Stop userspace from training the branch predictor against
6177 		 * other processes. This will call into a CPU specific
6178 		 * function that clears the branch predictor state.
6179 		 */
6180 		bp_harden = PCPU_GET(bp_harden);
6181 		if (bp_harden != NULL)
6182 			bp_harden();
6183 	}
6184 
6185 	return (pcb);
6186 }
6187 
6188 void
6189 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
6190 {
6191 
6192 	PMAP_ASSERT_STAGE1(pmap);
6193 	if (va >= VM_MIN_KERNEL_ADDRESS) {
6194 		cpu_icache_sync_range(va, sz);
6195 	} else {
6196 		u_int len, offset;
6197 		vm_paddr_t pa;
6198 
6199 		/* Find the length of data in this page to flush */
6200 		offset = va & PAGE_MASK;
6201 		len = imin(PAGE_SIZE - offset, sz);
6202 
6203 		while (sz != 0) {
6204 			/* Extract the physical address & find it in the DMAP */
6205 			pa = pmap_extract(pmap, va);
6206 			if (pa != 0)
6207 				cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
6208 
6209 			/* Move to the next page */
6210 			sz -= len;
6211 			va += len;
6212 			/* Set the length for the next iteration */
6213 			len = imin(PAGE_SIZE, sz);
6214 		}
6215 	}
6216 }
6217 
6218 static int
6219 pmap_stage2_fault(pmap_t pmap, uint64_t esr, uint64_t far)
6220 {
6221 	pd_entry_t *pdep;
6222 	pt_entry_t *ptep, pte;
6223 	int rv, lvl, dfsc;
6224 
6225 	PMAP_ASSERT_STAGE2(pmap);
6226 	rv = KERN_FAILURE;
6227 
6228 	/* Data and insn aborts use same encoding for FSC field. */
6229 	dfsc = esr & ISS_DATA_DFSC_MASK;
6230 	switch (dfsc) {
6231 	case ISS_DATA_DFSC_TF_L0:
6232 	case ISS_DATA_DFSC_TF_L1:
6233 	case ISS_DATA_DFSC_TF_L2:
6234 	case ISS_DATA_DFSC_TF_L3:
6235 		PMAP_LOCK(pmap);
6236 		pdep = pmap_pde(pmap, far, &lvl);
6237 		if (pdep == NULL || lvl != (dfsc - ISS_DATA_DFSC_TF_L1)) {
6238 			PMAP_LOCK(pmap);
6239 			break;
6240 		}
6241 
6242 		switch (lvl) {
6243 		case 0:
6244 			ptep = pmap_l0_to_l1(pdep, far);
6245 			break;
6246 		case 1:
6247 			ptep = pmap_l1_to_l2(pdep, far);
6248 			break;
6249 		case 2:
6250 			ptep = pmap_l2_to_l3(pdep, far);
6251 			break;
6252 		default:
6253 			panic("%s: Invalid pde level %d", __func__,lvl);
6254 		}
6255 		goto fault_exec;
6256 
6257 	case ISS_DATA_DFSC_AFF_L1:
6258 	case ISS_DATA_DFSC_AFF_L2:
6259 	case ISS_DATA_DFSC_AFF_L3:
6260 		PMAP_LOCK(pmap);
6261 		ptep = pmap_pte(pmap, far, &lvl);
6262 fault_exec:
6263 		if (ptep != NULL && (pte = pmap_load(ptep)) != 0) {
6264 			if (icache_vmid) {
6265 				pmap_invalidate_vpipt_icache();
6266 			} else {
6267 				/*
6268 				 * If accessing an executable page invalidate
6269 				 * the I-cache so it will be valid when we
6270 				 * continue execution in the guest. The D-cache
6271 				 * is assumed to already be clean to the Point
6272 				 * of Coherency.
6273 				 */
6274 				if ((pte & ATTR_S2_XN_MASK) !=
6275 				    ATTR_S2_XN(ATTR_S2_XN_NONE)) {
6276 					invalidate_icache();
6277 				}
6278 			}
6279 			pmap_set_bits(ptep, ATTR_AF | ATTR_DESCR_VALID);
6280 			rv = KERN_SUCCESS;
6281 		}
6282 		PMAP_UNLOCK(pmap);
6283 		break;
6284 	}
6285 
6286 	return (rv);
6287 }
6288 
6289 int
6290 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
6291 {
6292 	pt_entry_t pte, *ptep;
6293 	register_t intr;
6294 	uint64_t ec, par;
6295 	int lvl, rv;
6296 
6297 	rv = KERN_FAILURE;
6298 
6299 	ec = ESR_ELx_EXCEPTION(esr);
6300 	switch (ec) {
6301 	case EXCP_INSN_ABORT_L:
6302 	case EXCP_INSN_ABORT:
6303 	case EXCP_DATA_ABORT_L:
6304 	case EXCP_DATA_ABORT:
6305 		break;
6306 	default:
6307 		return (rv);
6308 	}
6309 
6310 	if (pmap->pm_stage == PM_STAGE2)
6311 		return (pmap_stage2_fault(pmap, esr, far));
6312 
6313 	/* Data and insn aborts use same encoding for FSC field. */
6314 	switch (esr & ISS_DATA_DFSC_MASK) {
6315 	case ISS_DATA_DFSC_AFF_L1:
6316 	case ISS_DATA_DFSC_AFF_L2:
6317 	case ISS_DATA_DFSC_AFF_L3:
6318 		PMAP_LOCK(pmap);
6319 		ptep = pmap_pte(pmap, far, &lvl);
6320 		if (ptep != NULL) {
6321 			pmap_set_bits(ptep, ATTR_AF);
6322 			rv = KERN_SUCCESS;
6323 			/*
6324 			 * XXXMJ as an optimization we could mark the entry
6325 			 * dirty if this is a write fault.
6326 			 */
6327 		}
6328 		PMAP_UNLOCK(pmap);
6329 		break;
6330 	case ISS_DATA_DFSC_PF_L1:
6331 	case ISS_DATA_DFSC_PF_L2:
6332 	case ISS_DATA_DFSC_PF_L3:
6333 		if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) ||
6334 		    (esr & ISS_DATA_WnR) == 0)
6335 			return (rv);
6336 		PMAP_LOCK(pmap);
6337 		ptep = pmap_pte(pmap, far, &lvl);
6338 		if (ptep != NULL &&
6339 		    ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
6340 			if ((pte & ATTR_S1_AP_RW_BIT) ==
6341 			    ATTR_S1_AP(ATTR_S1_AP_RO)) {
6342 				pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT);
6343 				pmap_invalidate_page(pmap, far);
6344 			}
6345 			rv = KERN_SUCCESS;
6346 		}
6347 		PMAP_UNLOCK(pmap);
6348 		break;
6349 	case ISS_DATA_DFSC_TF_L0:
6350 	case ISS_DATA_DFSC_TF_L1:
6351 	case ISS_DATA_DFSC_TF_L2:
6352 	case ISS_DATA_DFSC_TF_L3:
6353 		/*
6354 		 * Retry the translation.  A break-before-make sequence can
6355 		 * produce a transient fault.
6356 		 */
6357 		if (pmap == kernel_pmap) {
6358 			/*
6359 			 * The translation fault may have occurred within a
6360 			 * critical section.  Therefore, we must check the
6361 			 * address without acquiring the kernel pmap's lock.
6362 			 */
6363 			if (pmap_kextract(far) != 0)
6364 				rv = KERN_SUCCESS;
6365 		} else {
6366 			PMAP_LOCK(pmap);
6367 			/* Ask the MMU to check the address. */
6368 			intr = intr_disable();
6369 			par = arm64_address_translate_s1e0r(far);
6370 			intr_restore(intr);
6371 			PMAP_UNLOCK(pmap);
6372 
6373 			/*
6374 			 * If the translation was successful, then we can
6375 			 * return success to the trap handler.
6376 			 */
6377 			if (PAR_SUCCESS(par))
6378 				rv = KERN_SUCCESS;
6379 		}
6380 		break;
6381 	}
6382 
6383 	return (rv);
6384 }
6385 
6386 /*
6387  *	Increase the starting virtual address of the given mapping if a
6388  *	different alignment might result in more superpage mappings.
6389  */
6390 void
6391 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
6392     vm_offset_t *addr, vm_size_t size)
6393 {
6394 	vm_offset_t superpage_offset;
6395 
6396 	if (size < L2_SIZE)
6397 		return;
6398 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
6399 		offset += ptoa(object->pg_color);
6400 	superpage_offset = offset & L2_OFFSET;
6401 	if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
6402 	    (*addr & L2_OFFSET) == superpage_offset)
6403 		return;
6404 	if ((*addr & L2_OFFSET) < superpage_offset)
6405 		*addr = (*addr & ~L2_OFFSET) + superpage_offset;
6406 	else
6407 		*addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
6408 }
6409 
6410 /**
6411  * Get the kernel virtual address of a set of physical pages. If there are
6412  * physical addresses not covered by the DMAP perform a transient mapping
6413  * that will be removed when calling pmap_unmap_io_transient.
6414  *
6415  * \param page        The pages the caller wishes to obtain the virtual
6416  *                    address on the kernel memory map.
6417  * \param vaddr       On return contains the kernel virtual memory address
6418  *                    of the pages passed in the page parameter.
6419  * \param count       Number of pages passed in.
6420  * \param can_fault   TRUE if the thread using the mapped pages can take
6421  *                    page faults, FALSE otherwise.
6422  *
6423  * \returns TRUE if the caller must call pmap_unmap_io_transient when
6424  *          finished or FALSE otherwise.
6425  *
6426  */
6427 boolean_t
6428 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
6429     boolean_t can_fault)
6430 {
6431 	vm_paddr_t paddr;
6432 	boolean_t needs_mapping;
6433 	int error, i;
6434 
6435 	/*
6436 	 * Allocate any KVA space that we need, this is done in a separate
6437 	 * loop to prevent calling vmem_alloc while pinned.
6438 	 */
6439 	needs_mapping = FALSE;
6440 	for (i = 0; i < count; i++) {
6441 		paddr = VM_PAGE_TO_PHYS(page[i]);
6442 		if (__predict_false(!PHYS_IN_DMAP(paddr))) {
6443 			error = vmem_alloc(kernel_arena, PAGE_SIZE,
6444 			    M_BESTFIT | M_WAITOK, &vaddr[i]);
6445 			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
6446 			needs_mapping = TRUE;
6447 		} else {
6448 			vaddr[i] = PHYS_TO_DMAP(paddr);
6449 		}
6450 	}
6451 
6452 	/* Exit early if everything is covered by the DMAP */
6453 	if (!needs_mapping)
6454 		return (FALSE);
6455 
6456 	if (!can_fault)
6457 		sched_pin();
6458 	for (i = 0; i < count; i++) {
6459 		paddr = VM_PAGE_TO_PHYS(page[i]);
6460 		if (!PHYS_IN_DMAP(paddr)) {
6461 			panic(
6462 			   "pmap_map_io_transient: TODO: Map out of DMAP data");
6463 		}
6464 	}
6465 
6466 	return (needs_mapping);
6467 }
6468 
6469 void
6470 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
6471     boolean_t can_fault)
6472 {
6473 	vm_paddr_t paddr;
6474 	int i;
6475 
6476 	if (!can_fault)
6477 		sched_unpin();
6478 	for (i = 0; i < count; i++) {
6479 		paddr = VM_PAGE_TO_PHYS(page[i]);
6480 		if (!PHYS_IN_DMAP(paddr)) {
6481 			panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
6482 		}
6483 	}
6484 }
6485 
6486 boolean_t
6487 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
6488 {
6489 
6490 	return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
6491 }
6492 
6493 /*
6494  * Track a range of the kernel's virtual address space that is contiguous
6495  * in various mapping attributes.
6496  */
6497 struct pmap_kernel_map_range {
6498 	vm_offset_t sva;
6499 	pt_entry_t attrs;
6500 	int l3pages;
6501 	int l3contig;
6502 	int l2blocks;
6503 	int l1blocks;
6504 };
6505 
6506 static void
6507 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
6508     vm_offset_t eva)
6509 {
6510 	const char *mode;
6511 	int index;
6512 
6513 	if (eva <= range->sva)
6514 		return;
6515 
6516 	index = range->attrs & ATTR_S1_IDX_MASK;
6517 	switch (index) {
6518 	case ATTR_S1_IDX(VM_MEMATTR_DEVICE):
6519 		mode = "DEV";
6520 		break;
6521 	case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE):
6522 		mode = "UC";
6523 		break;
6524 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK):
6525 		mode = "WB";
6526 		break;
6527 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH):
6528 		mode = "WT";
6529 		break;
6530 	default:
6531 		printf(
6532 		    "%s: unknown memory type %x for range 0x%016lx-0x%016lx\n",
6533 		    __func__, index, range->sva, eva);
6534 		mode = "??";
6535 		break;
6536 	}
6537 
6538 	sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c %3s %d %d %d %d\n",
6539 	    range->sva, eva,
6540 	    (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
6541 	    (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
6542 	    (range->attrs & ATTR_S1_AP_USER) != 0 ? 'u' : 's',
6543 	    mode, range->l1blocks, range->l2blocks, range->l3contig,
6544 	    range->l3pages);
6545 
6546 	/* Reset to sentinel value. */
6547 	range->sva = 0xfffffffffffffffful;
6548 }
6549 
6550 /*
6551  * Determine whether the attributes specified by a page table entry match those
6552  * being tracked by the current range.
6553  */
6554 static bool
6555 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
6556 {
6557 
6558 	return (range->attrs == attrs);
6559 }
6560 
6561 static void
6562 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
6563     pt_entry_t attrs)
6564 {
6565 
6566 	memset(range, 0, sizeof(*range));
6567 	range->sva = va;
6568 	range->attrs = attrs;
6569 }
6570 
6571 /*
6572  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
6573  * those of the current run, dump the address range and its attributes, and
6574  * begin a new run.
6575  */
6576 static void
6577 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
6578     vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e,
6579     pt_entry_t l3e)
6580 {
6581 	pt_entry_t attrs;
6582 
6583 	attrs = l0e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6584 	attrs |= l1e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6585 	if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK)
6586 		attrs |= l1e & ATTR_S1_IDX_MASK;
6587 	attrs |= l2e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
6588 	if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK)
6589 		attrs |= l2e & ATTR_S1_IDX_MASK;
6590 	attrs |= l3e & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK);
6591 
6592 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
6593 		sysctl_kmaps_dump(sb, range, va);
6594 		sysctl_kmaps_reinit(range, va, attrs);
6595 	}
6596 }
6597 
6598 static int
6599 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
6600 {
6601 	struct pmap_kernel_map_range range;
6602 	struct sbuf sbuf, *sb;
6603 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
6604 	pt_entry_t *l3, l3e;
6605 	vm_offset_t sva;
6606 	vm_paddr_t pa;
6607 	int error, i, j, k, l;
6608 
6609 	error = sysctl_wire_old_buffer(req, 0);
6610 	if (error != 0)
6611 		return (error);
6612 	sb = &sbuf;
6613 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
6614 
6615 	/* Sentinel value. */
6616 	range.sva = 0xfffffffffffffffful;
6617 
6618 	/*
6619 	 * Iterate over the kernel page tables without holding the kernel pmap
6620 	 * lock.  Kernel page table pages are never freed, so at worst we will
6621 	 * observe inconsistencies in the output.
6622 	 */
6623 	for (sva = 0xffff000000000000ul, i = pmap_l0_index(sva); i < Ln_ENTRIES;
6624 	    i++) {
6625 		if (i == pmap_l0_index(DMAP_MIN_ADDRESS))
6626 			sbuf_printf(sb, "\nDirect map:\n");
6627 		else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
6628 			sbuf_printf(sb, "\nKernel map:\n");
6629 
6630 		l0e = kernel_pmap->pm_l0[i];
6631 		if ((l0e & ATTR_DESCR_VALID) == 0) {
6632 			sysctl_kmaps_dump(sb, &range, sva);
6633 			sva += L0_SIZE;
6634 			continue;
6635 		}
6636 		pa = l0e & ~ATTR_MASK;
6637 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa);
6638 
6639 		for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) {
6640 			l1e = l1[j];
6641 			if ((l1e & ATTR_DESCR_VALID) == 0) {
6642 				sysctl_kmaps_dump(sb, &range, sva);
6643 				sva += L1_SIZE;
6644 				continue;
6645 			}
6646 			if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) {
6647 				sysctl_kmaps_check(sb, &range, sva, l0e, l1e,
6648 				    0, 0);
6649 				range.l1blocks++;
6650 				sva += L1_SIZE;
6651 				continue;
6652 			}
6653 			pa = l1e & ~ATTR_MASK;
6654 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa);
6655 
6656 			for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) {
6657 				l2e = l2[k];
6658 				if ((l2e & ATTR_DESCR_VALID) == 0) {
6659 					sysctl_kmaps_dump(sb, &range, sva);
6660 					sva += L2_SIZE;
6661 					continue;
6662 				}
6663 				if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
6664 					sysctl_kmaps_check(sb, &range, sva,
6665 					    l0e, l1e, l2e, 0);
6666 					range.l2blocks++;
6667 					sva += L2_SIZE;
6668 					continue;
6669 				}
6670 				pa = l2e & ~ATTR_MASK;
6671 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
6672 
6673 				for (l = pmap_l3_index(sva); l < Ln_ENTRIES;
6674 				    l++, sva += L3_SIZE) {
6675 					l3e = l3[l];
6676 					if ((l3e & ATTR_DESCR_VALID) == 0) {
6677 						sysctl_kmaps_dump(sb, &range,
6678 						    sva);
6679 						continue;
6680 					}
6681 					sysctl_kmaps_check(sb, &range, sva,
6682 					    l0e, l1e, l2e, l3e);
6683 					if ((l3e & ATTR_CONTIGUOUS) != 0)
6684 						range.l3contig += l % 16 == 0 ?
6685 						    1 : 0;
6686 					else
6687 						range.l3pages++;
6688 				}
6689 			}
6690 		}
6691 	}
6692 
6693 	error = sbuf_finish(sb);
6694 	sbuf_delete(sb);
6695 	return (error);
6696 }
6697 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
6698     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
6699     NULL, 0, sysctl_kmaps, "A",
6700     "Dump kernel address layout");
6701