xref: /freebsd/sys/amd64/amd64/pmap.c (revision e17f5b1d)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2003 Peter Wemm
11  * All rights reserved.
12  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
13  * All rights reserved.
14  *
15  * This code is derived from software contributed to Berkeley by
16  * the Systems Programming Group of the University of Utah Computer
17  * Science Department and William Jolitz of UUNET Technologies Inc.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  * 1. Redistributions of source code must retain the above copyright
23  *    notice, this list of conditions and the following disclaimer.
24  * 2. Redistributions in binary form must reproduce the above copyright
25  *    notice, this list of conditions and the following disclaimer in the
26  *    documentation and/or other materials provided with the distribution.
27  * 3. All advertising materials mentioning features or use of this software
28  *    must display the following acknowledgement:
29  *	This product includes software developed by the University of
30  *	California, Berkeley and its contributors.
31  * 4. Neither the name of the University nor the names of its contributors
32  *    may be used to endorse or promote products derived from this software
33  *    without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45  * SUCH DAMAGE.
46  *
47  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
48  */
49 /*-
50  * Copyright (c) 2003 Networks Associates Technology, Inc.
51  * Copyright (c) 2014-2019 The FreeBSD Foundation
52  * All rights reserved.
53  *
54  * This software was developed for the FreeBSD Project by Jake Burkholder,
55  * Safeport Network Services, and Network Associates Laboratories, the
56  * Security Research Division of Network Associates, Inc. under
57  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
58  * CHATS research program.
59  *
60  * Portions of this software were developed by
61  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
62  * the FreeBSD Foundation.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83  * SUCH DAMAGE.
84  */
85 
86 #define	AMD64_NPT_AWARE
87 
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
90 
91 /*
92  *	Manages physical address maps.
93  *
94  *	Since the information managed by this module is
95  *	also stored by the logical address mapping module,
96  *	this module may throw away valid virtual-to-physical
97  *	mappings at almost any time.  However, invalidations
98  *	of virtual-to-physical mappings must be done as
99  *	requested.
100  *
101  *	In order to cope with hardware architectures which
102  *	make virtual-to-physical map invalidates expensive,
103  *	this module may delay invalidate or reduced protection
104  *	operations until such time as they are actually
105  *	necessary.  This module is given full information as
106  *	to which processors are currently using which maps,
107  *	and to when physical maps must be made correct.
108  */
109 
110 #include "opt_ddb.h"
111 #include "opt_pmap.h"
112 #include "opt_vm.h"
113 
114 #include <sys/param.h>
115 #include <sys/bitstring.h>
116 #include <sys/bus.h>
117 #include <sys/systm.h>
118 #include <sys/kernel.h>
119 #include <sys/ktr.h>
120 #include <sys/lock.h>
121 #include <sys/malloc.h>
122 #include <sys/mman.h>
123 #include <sys/mutex.h>
124 #include <sys/proc.h>
125 #include <sys/rangeset.h>
126 #include <sys/rwlock.h>
127 #include <sys/sbuf.h>
128 #include <sys/sx.h>
129 #include <sys/turnstile.h>
130 #include <sys/vmem.h>
131 #include <sys/vmmeter.h>
132 #include <sys/sched.h>
133 #include <sys/sysctl.h>
134 #include <sys/smp.h>
135 #ifdef DDB
136 #include <sys/kdb.h>
137 #include <ddb/ddb.h>
138 #endif
139 
140 #include <vm/vm.h>
141 #include <vm/vm_param.h>
142 #include <vm/vm_kern.h>
143 #include <vm/vm_page.h>
144 #include <vm/vm_map.h>
145 #include <vm/vm_object.h>
146 #include <vm/vm_extern.h>
147 #include <vm/vm_pageout.h>
148 #include <vm/vm_pager.h>
149 #include <vm/vm_phys.h>
150 #include <vm/vm_radix.h>
151 #include <vm/vm_reserv.h>
152 #include <vm/uma.h>
153 
154 #include <machine/intr_machdep.h>
155 #include <x86/apicvar.h>
156 #include <x86/ifunc.h>
157 #include <machine/cpu.h>
158 #include <machine/cputypes.h>
159 #include <machine/md_var.h>
160 #include <machine/pcb.h>
161 #include <machine/specialreg.h>
162 #ifdef SMP
163 #include <machine/smp.h>
164 #endif
165 #include <machine/sysarch.h>
166 #include <machine/tss.h>
167 
168 #ifdef NUMA
169 #define	PMAP_MEMDOM	MAXMEMDOM
170 #else
171 #define	PMAP_MEMDOM	1
172 #endif
173 
174 static __inline boolean_t
175 pmap_type_guest(pmap_t pmap)
176 {
177 
178 	return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
179 }
180 
181 static __inline boolean_t
182 pmap_emulate_ad_bits(pmap_t pmap)
183 {
184 
185 	return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0);
186 }
187 
188 static __inline pt_entry_t
189 pmap_valid_bit(pmap_t pmap)
190 {
191 	pt_entry_t mask;
192 
193 	switch (pmap->pm_type) {
194 	case PT_X86:
195 	case PT_RVI:
196 		mask = X86_PG_V;
197 		break;
198 	case PT_EPT:
199 		if (pmap_emulate_ad_bits(pmap))
200 			mask = EPT_PG_EMUL_V;
201 		else
202 			mask = EPT_PG_READ;
203 		break;
204 	default:
205 		panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type);
206 	}
207 
208 	return (mask);
209 }
210 
211 static __inline pt_entry_t
212 pmap_rw_bit(pmap_t pmap)
213 {
214 	pt_entry_t mask;
215 
216 	switch (pmap->pm_type) {
217 	case PT_X86:
218 	case PT_RVI:
219 		mask = X86_PG_RW;
220 		break;
221 	case PT_EPT:
222 		if (pmap_emulate_ad_bits(pmap))
223 			mask = EPT_PG_EMUL_RW;
224 		else
225 			mask = EPT_PG_WRITE;
226 		break;
227 	default:
228 		panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type);
229 	}
230 
231 	return (mask);
232 }
233 
234 static pt_entry_t pg_g;
235 
236 static __inline pt_entry_t
237 pmap_global_bit(pmap_t pmap)
238 {
239 	pt_entry_t mask;
240 
241 	switch (pmap->pm_type) {
242 	case PT_X86:
243 		mask = pg_g;
244 		break;
245 	case PT_RVI:
246 	case PT_EPT:
247 		mask = 0;
248 		break;
249 	default:
250 		panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type);
251 	}
252 
253 	return (mask);
254 }
255 
256 static __inline pt_entry_t
257 pmap_accessed_bit(pmap_t pmap)
258 {
259 	pt_entry_t mask;
260 
261 	switch (pmap->pm_type) {
262 	case PT_X86:
263 	case PT_RVI:
264 		mask = X86_PG_A;
265 		break;
266 	case PT_EPT:
267 		if (pmap_emulate_ad_bits(pmap))
268 			mask = EPT_PG_READ;
269 		else
270 			mask = EPT_PG_A;
271 		break;
272 	default:
273 		panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
274 	}
275 
276 	return (mask);
277 }
278 
279 static __inline pt_entry_t
280 pmap_modified_bit(pmap_t pmap)
281 {
282 	pt_entry_t mask;
283 
284 	switch (pmap->pm_type) {
285 	case PT_X86:
286 	case PT_RVI:
287 		mask = X86_PG_M;
288 		break;
289 	case PT_EPT:
290 		if (pmap_emulate_ad_bits(pmap))
291 			mask = EPT_PG_WRITE;
292 		else
293 			mask = EPT_PG_M;
294 		break;
295 	default:
296 		panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type);
297 	}
298 
299 	return (mask);
300 }
301 
302 static __inline pt_entry_t
303 pmap_pku_mask_bit(pmap_t pmap)
304 {
305 
306 	return (pmap->pm_type == PT_X86 ? X86_PG_PKU_MASK : 0);
307 }
308 
309 #if !defined(DIAGNOSTIC)
310 #ifdef __GNUC_GNU_INLINE__
311 #define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
312 #else
313 #define PMAP_INLINE	extern inline
314 #endif
315 #else
316 #define PMAP_INLINE
317 #endif
318 
319 #ifdef PV_STATS
320 #define PV_STAT(x)	do { x ; } while (0)
321 #else
322 #define PV_STAT(x)	do { } while (0)
323 #endif
324 
325 #undef pa_index
326 #ifdef NUMA
327 #define	pa_index(pa)	({					\
328 	KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end,	\
329 	    ("address %lx beyond the last segment", (pa)));	\
330 	(pa) >> PDRSHIFT;					\
331 })
332 #define	pa_to_pmdp(pa)	(&pv_table[pa_index(pa)])
333 #define	pa_to_pvh(pa)	(&(pa_to_pmdp(pa)->pv_page))
334 #define	PHYS_TO_PV_LIST_LOCK(pa)	({			\
335 	struct rwlock *_lock;					\
336 	if (__predict_false((pa) > pmap_last_pa))		\
337 		_lock = &pv_dummy_large.pv_lock;		\
338 	else							\
339 		_lock = &(pa_to_pmdp(pa)->pv_lock);		\
340 	_lock;							\
341 })
342 #else
343 #define	pa_index(pa)	((pa) >> PDRSHIFT)
344 #define	pa_to_pvh(pa)	(&pv_table[pa_index(pa)])
345 
346 #define	NPV_LIST_LOCKS	MAXCPU
347 
348 #define	PHYS_TO_PV_LIST_LOCK(pa)	\
349 			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
350 #endif
351 
352 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
353 	struct rwlock **_lockp = (lockp);		\
354 	struct rwlock *_new_lock;			\
355 							\
356 	_new_lock = PHYS_TO_PV_LIST_LOCK(pa);		\
357 	if (_new_lock != *_lockp) {			\
358 		if (*_lockp != NULL)			\
359 			rw_wunlock(*_lockp);		\
360 		*_lockp = _new_lock;			\
361 		rw_wlock(*_lockp);			\
362 	}						\
363 } while (0)
364 
365 #define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
366 			CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
367 
368 #define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
369 	struct rwlock **_lockp = (lockp);		\
370 							\
371 	if (*_lockp != NULL) {				\
372 		rw_wunlock(*_lockp);			\
373 		*_lockp = NULL;				\
374 	}						\
375 } while (0)
376 
377 #define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
378 			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
379 
380 struct pmap kernel_pmap_store;
381 
382 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
383 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
384 
385 int nkpt;
386 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
387     "Number of kernel page table pages allocated on bootup");
388 
389 static int ndmpdp;
390 vm_paddr_t dmaplimit;
391 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
392 pt_entry_t pg_nx;
393 
394 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
395     "VM/pmap parameters");
396 
397 static int pg_ps_enabled = 1;
398 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
399     &pg_ps_enabled, 0, "Are large page mappings enabled?");
400 
401 #define	PAT_INDEX_SIZE	8
402 static int pat_index[PAT_INDEX_SIZE];	/* cache mode to PAT index conversion */
403 
404 static u_int64_t	KPTphys;	/* phys addr of kernel level 1 */
405 static u_int64_t	KPDphys;	/* phys addr of kernel level 2 */
406 u_int64_t		KPDPphys;	/* phys addr of kernel level 3 */
407 u_int64_t		KPML4phys;	/* phys addr of kernel level 4 */
408 
409 static u_int64_t	DMPDphys;	/* phys addr of direct mapped level 2 */
410 static u_int64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
411 static int		ndmpdpphys;	/* number of DMPDPphys pages */
412 
413 static vm_paddr_t	KERNend;	/* phys addr of end of bootstrap data */
414 
415 /*
416  * pmap_mapdev support pre initialization (i.e. console)
417  */
418 #define	PMAP_PREINIT_MAPPING_COUNT	8
419 static struct pmap_preinit_mapping {
420 	vm_paddr_t	pa;
421 	vm_offset_t	va;
422 	vm_size_t	sz;
423 	int		mode;
424 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
425 static int pmap_initialized;
426 
427 /*
428  * Data for the pv entry allocation mechanism.
429  * Updates to pv_invl_gen are protected by the pv list lock but reads are not.
430  */
431 #ifdef NUMA
432 static __inline int
433 pc_to_domain(struct pv_chunk *pc)
434 {
435 
436 	return (_vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
437 }
438 #else
439 static __inline int
440 pc_to_domain(struct pv_chunk *pc __unused)
441 {
442 
443 	return (0);
444 }
445 #endif
446 
447 struct pv_chunks_list {
448 	struct mtx pvc_lock;
449 	TAILQ_HEAD(pch, pv_chunk) pvc_list;
450 	int active_reclaims;
451 } __aligned(CACHE_LINE_SIZE);
452 
453 struct pv_chunks_list __exclusive_cache_line pv_chunks[PMAP_MEMDOM];
454 
455 #ifdef	NUMA
456 struct pmap_large_md_page {
457 	struct rwlock   pv_lock;
458 	struct md_page  pv_page;
459 	u_long pv_invl_gen;
460 };
461 __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large;
462 #define pv_dummy pv_dummy_large.pv_page
463 __read_mostly static struct pmap_large_md_page *pv_table;
464 __read_mostly vm_paddr_t pmap_last_pa;
465 #else
466 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
467 static u_long pv_invl_gen[NPV_LIST_LOCKS];
468 static struct md_page *pv_table;
469 static struct md_page pv_dummy;
470 #endif
471 
472 /*
473  * All those kernel PT submaps that BSD is so fond of
474  */
475 pt_entry_t *CMAP1 = NULL;
476 caddr_t CADDR1 = 0;
477 static vm_offset_t qframe = 0;
478 static struct mtx qframe_mtx;
479 
480 static int pmap_flags = PMAP_PDE_SUPERPAGE;	/* flags for x86 pmaps */
481 
482 static vmem_t *large_vmem;
483 static u_int lm_ents;
484 #define	PMAP_ADDRESS_IN_LARGEMAP(va)	((va) >= LARGEMAP_MIN_ADDRESS && \
485 	(va) < LARGEMAP_MIN_ADDRESS + NBPML4 * (u_long)lm_ents)
486 
487 int pmap_pcid_enabled = 1;
488 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
489     &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
490 int invpcid_works = 0;
491 SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
492     "Is the invpcid instruction available ?");
493 
494 int __read_frequently pti = 0;
495 SYSCTL_INT(_vm_pmap, OID_AUTO, pti, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
496     &pti, 0,
497     "Page Table Isolation enabled");
498 static vm_object_t pti_obj;
499 static pml4_entry_t *pti_pml4;
500 static vm_pindex_t pti_pg_idx;
501 static bool pti_finalized;
502 
503 struct pmap_pkru_range {
504 	struct rs_el	pkru_rs_el;
505 	u_int		pkru_keyidx;
506 	int		pkru_flags;
507 };
508 
509 static uma_zone_t pmap_pkru_ranges_zone;
510 static bool pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
511 static pt_entry_t pmap_pkru_get(pmap_t pmap, vm_offset_t va);
512 static void pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
513 static void *pkru_dup_range(void *ctx, void *data);
514 static void pkru_free_range(void *ctx, void *node);
515 static int pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap);
516 static int pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
517 static void pmap_pkru_deassign_all(pmap_t pmap);
518 
519 static int
520 pmap_pcid_save_cnt_proc(SYSCTL_HANDLER_ARGS)
521 {
522 	int i;
523 	uint64_t res;
524 
525 	res = 0;
526 	CPU_FOREACH(i) {
527 		res += cpuid_to_pcpu[i]->pc_pm_save_cnt;
528 	}
529 	return (sysctl_handle_64(oidp, &res, 0, req));
530 }
531 SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RD |
532     CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU",
533     "Count of saved TLB context on switch");
534 
535 static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
536     LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
537 static struct mtx invl_gen_mtx;
538 /* Fake lock object to satisfy turnstiles interface. */
539 static struct lock_object invl_gen_ts = {
540 	.lo_name = "invlts",
541 };
542 static struct pmap_invl_gen pmap_invl_gen_head = {
543 	.gen = 1,
544 	.next = NULL,
545 };
546 static u_long pmap_invl_gen = 1;
547 static int pmap_invl_waiters;
548 static struct callout pmap_invl_callout;
549 static bool pmap_invl_callout_inited;
550 
551 #define	PMAP_ASSERT_NOT_IN_DI() \
552     KASSERT(pmap_not_in_di(), ("DI already started"))
553 
554 static bool
555 pmap_di_locked(void)
556 {
557 	int tun;
558 
559 	if ((cpu_feature2 & CPUID2_CX16) == 0)
560 		return (true);
561 	tun = 0;
562 	TUNABLE_INT_FETCH("vm.pmap.di_locked", &tun);
563 	return (tun != 0);
564 }
565 
566 static int
567 sysctl_pmap_di_locked(SYSCTL_HANDLER_ARGS)
568 {
569 	int locked;
570 
571 	locked = pmap_di_locked();
572 	return (sysctl_handle_int(oidp, &locked, 0, req));
573 }
574 SYSCTL_PROC(_vm_pmap, OID_AUTO, di_locked, CTLTYPE_INT | CTLFLAG_RDTUN |
575     CTLFLAG_MPSAFE, 0, 0, sysctl_pmap_di_locked, "",
576     "Locked delayed invalidation");
577 
578 static bool pmap_not_in_di_l(void);
579 static bool pmap_not_in_di_u(void);
580 DEFINE_IFUNC(, bool, pmap_not_in_di, (void))
581 {
582 
583 	return (pmap_di_locked() ? pmap_not_in_di_l : pmap_not_in_di_u);
584 }
585 
586 static bool
587 pmap_not_in_di_l(void)
588 {
589 	struct pmap_invl_gen *invl_gen;
590 
591 	invl_gen = &curthread->td_md.md_invl_gen;
592 	return (invl_gen->gen == 0);
593 }
594 
595 static void
596 pmap_thread_init_invl_gen_l(struct thread *td)
597 {
598 	struct pmap_invl_gen *invl_gen;
599 
600 	invl_gen = &td->td_md.md_invl_gen;
601 	invl_gen->gen = 0;
602 }
603 
604 static void
605 pmap_delayed_invl_wait_block(u_long *m_gen, u_long *invl_gen)
606 {
607 	struct turnstile *ts;
608 
609 	ts = turnstile_trywait(&invl_gen_ts);
610 	if (*m_gen > atomic_load_long(invl_gen))
611 		turnstile_wait(ts, NULL, TS_SHARED_QUEUE);
612 	else
613 		turnstile_cancel(ts);
614 }
615 
616 static void
617 pmap_delayed_invl_finish_unblock(u_long new_gen)
618 {
619 	struct turnstile *ts;
620 
621 	turnstile_chain_lock(&invl_gen_ts);
622 	ts = turnstile_lookup(&invl_gen_ts);
623 	if (new_gen != 0)
624 		pmap_invl_gen = new_gen;
625 	if (ts != NULL) {
626 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
627 		turnstile_unpend(ts);
628 	}
629 	turnstile_chain_unlock(&invl_gen_ts);
630 }
631 
632 /*
633  * Start a new Delayed Invalidation (DI) block of code, executed by
634  * the current thread.  Within a DI block, the current thread may
635  * destroy both the page table and PV list entries for a mapping and
636  * then release the corresponding PV list lock before ensuring that
637  * the mapping is flushed from the TLBs of any processors with the
638  * pmap active.
639  */
640 static void
641 pmap_delayed_invl_start_l(void)
642 {
643 	struct pmap_invl_gen *invl_gen;
644 	u_long currgen;
645 
646 	invl_gen = &curthread->td_md.md_invl_gen;
647 	PMAP_ASSERT_NOT_IN_DI();
648 	mtx_lock(&invl_gen_mtx);
649 	if (LIST_EMPTY(&pmap_invl_gen_tracker))
650 		currgen = pmap_invl_gen;
651 	else
652 		currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen;
653 	invl_gen->gen = currgen + 1;
654 	LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link);
655 	mtx_unlock(&invl_gen_mtx);
656 }
657 
658 /*
659  * Finish the DI block, previously started by the current thread.  All
660  * required TLB flushes for the pages marked by
661  * pmap_delayed_invl_page() must be finished before this function is
662  * called.
663  *
664  * This function works by bumping the global DI generation number to
665  * the generation number of the current thread's DI, unless there is a
666  * pending DI that started earlier.  In the latter case, bumping the
667  * global DI generation number would incorrectly signal that the
668  * earlier DI had finished.  Instead, this function bumps the earlier
669  * DI's generation number to match the generation number of the
670  * current thread's DI.
671  */
672 static void
673 pmap_delayed_invl_finish_l(void)
674 {
675 	struct pmap_invl_gen *invl_gen, *next;
676 
677 	invl_gen = &curthread->td_md.md_invl_gen;
678 	KASSERT(invl_gen->gen != 0, ("missed invl_start"));
679 	mtx_lock(&invl_gen_mtx);
680 	next = LIST_NEXT(invl_gen, link);
681 	if (next == NULL)
682 		pmap_delayed_invl_finish_unblock(invl_gen->gen);
683 	else
684 		next->gen = invl_gen->gen;
685 	LIST_REMOVE(invl_gen, link);
686 	mtx_unlock(&invl_gen_mtx);
687 	invl_gen->gen = 0;
688 }
689 
690 static bool
691 pmap_not_in_di_u(void)
692 {
693 	struct pmap_invl_gen *invl_gen;
694 
695 	invl_gen = &curthread->td_md.md_invl_gen;
696 	return (((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) != 0);
697 }
698 
699 static void
700 pmap_thread_init_invl_gen_u(struct thread *td)
701 {
702 	struct pmap_invl_gen *invl_gen;
703 
704 	invl_gen = &td->td_md.md_invl_gen;
705 	invl_gen->gen = 0;
706 	invl_gen->next = (void *)PMAP_INVL_GEN_NEXT_INVALID;
707 }
708 
709 static bool
710 pmap_di_load_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *out)
711 {
712 	uint64_t new_high, new_low, old_high, old_low;
713 	char res;
714 
715 	old_low = new_low = 0;
716 	old_high = new_high = (uintptr_t)0;
717 
718 	__asm volatile("lock;cmpxchg16b\t%1"
719 	    : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
720 	    : "b"(new_low), "c" (new_high)
721 	    : "memory", "cc");
722 	if (res == 0) {
723 		if ((old_high & PMAP_INVL_GEN_NEXT_INVALID) != 0)
724 			return (false);
725 		out->gen = old_low;
726 		out->next = (void *)old_high;
727 	} else {
728 		out->gen = new_low;
729 		out->next = (void *)new_high;
730 	}
731 	return (true);
732 }
733 
734 static bool
735 pmap_di_store_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *old_val,
736     struct pmap_invl_gen *new_val)
737 {
738 	uint64_t new_high, new_low, old_high, old_low;
739 	char res;
740 
741 	new_low = new_val->gen;
742 	new_high = (uintptr_t)new_val->next;
743 	old_low = old_val->gen;
744 	old_high = (uintptr_t)old_val->next;
745 
746 	__asm volatile("lock;cmpxchg16b\t%1"
747 	    : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
748 	    : "b"(new_low), "c" (new_high)
749 	    : "memory", "cc");
750 	return (res);
751 }
752 
753 #ifdef PV_STATS
754 static long invl_start_restart;
755 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_start_restart, CTLFLAG_RD,
756     &invl_start_restart, 0,
757     "");
758 static long invl_finish_restart;
759 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_finish_restart, CTLFLAG_RD,
760     &invl_finish_restart, 0,
761     "");
762 static int invl_max_qlen;
763 SYSCTL_INT(_vm_pmap, OID_AUTO, invl_max_qlen, CTLFLAG_RD,
764     &invl_max_qlen, 0,
765     "");
766 #endif
767 
768 #define di_delay	locks_delay
769 
770 static void
771 pmap_delayed_invl_start_u(void)
772 {
773 	struct pmap_invl_gen *invl_gen, *p, prev, new_prev;
774 	struct thread *td;
775 	struct lock_delay_arg lda;
776 	uintptr_t prevl;
777 	u_char pri;
778 #ifdef PV_STATS
779 	int i, ii;
780 #endif
781 
782 	td = curthread;
783 	invl_gen = &td->td_md.md_invl_gen;
784 	PMAP_ASSERT_NOT_IN_DI();
785 	lock_delay_arg_init(&lda, &di_delay);
786 	invl_gen->saved_pri = 0;
787 	pri = td->td_base_pri;
788 	if (pri > PVM) {
789 		thread_lock(td);
790 		pri = td->td_base_pri;
791 		if (pri > PVM) {
792 			invl_gen->saved_pri = pri;
793 			sched_prio(td, PVM);
794 		}
795 		thread_unlock(td);
796 	}
797 again:
798 	PV_STAT(i = 0);
799 	for (p = &pmap_invl_gen_head;; p = prev.next) {
800 		PV_STAT(i++);
801 		prevl = (uintptr_t)atomic_load_ptr(&p->next);
802 		if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
803 			PV_STAT(atomic_add_long(&invl_start_restart, 1));
804 			lock_delay(&lda);
805 			goto again;
806 		}
807 		if (prevl == 0)
808 			break;
809 		prev.next = (void *)prevl;
810 	}
811 #ifdef PV_STATS
812 	if ((ii = invl_max_qlen) < i)
813 		atomic_cmpset_int(&invl_max_qlen, ii, i);
814 #endif
815 
816 	if (!pmap_di_load_invl(p, &prev) || prev.next != NULL) {
817 		PV_STAT(atomic_add_long(&invl_start_restart, 1));
818 		lock_delay(&lda);
819 		goto again;
820 	}
821 
822 	new_prev.gen = prev.gen;
823 	new_prev.next = invl_gen;
824 	invl_gen->gen = prev.gen + 1;
825 
826 	/* Formal fence between store to invl->gen and updating *p. */
827 	atomic_thread_fence_rel();
828 
829 	/*
830 	 * After inserting an invl_gen element with invalid bit set,
831 	 * this thread blocks any other thread trying to enter the
832 	 * delayed invalidation block.  Do not allow to remove us from
833 	 * the CPU, because it causes starvation for other threads.
834 	 */
835 	critical_enter();
836 
837 	/*
838 	 * ABA for *p is not possible there, since p->gen can only
839 	 * increase.  So if the *p thread finished its di, then
840 	 * started a new one and got inserted into the list at the
841 	 * same place, its gen will appear greater than the previously
842 	 * read gen.
843 	 */
844 	if (!pmap_di_store_invl(p, &prev, &new_prev)) {
845 		critical_exit();
846 		PV_STAT(atomic_add_long(&invl_start_restart, 1));
847 		lock_delay(&lda);
848 		goto again;
849 	}
850 
851 	/*
852 	 * There we clear PMAP_INVL_GEN_NEXT_INVALID in
853 	 * invl_gen->next, allowing other threads to iterate past us.
854 	 * pmap_di_store_invl() provides fence between the generation
855 	 * write and the update of next.
856 	 */
857 	invl_gen->next = NULL;
858 	critical_exit();
859 }
860 
861 static bool
862 pmap_delayed_invl_finish_u_crit(struct pmap_invl_gen *invl_gen,
863     struct pmap_invl_gen *p)
864 {
865 	struct pmap_invl_gen prev, new_prev;
866 	u_long mygen;
867 
868 	/*
869 	 * Load invl_gen->gen after setting invl_gen->next
870 	 * PMAP_INVL_GEN_NEXT_INVALID.  This prevents larger
871 	 * generations to propagate to our invl_gen->gen.  Lock prefix
872 	 * in atomic_set_ptr() worked as seq_cst fence.
873 	 */
874 	mygen = atomic_load_long(&invl_gen->gen);
875 
876 	if (!pmap_di_load_invl(p, &prev) || prev.next != invl_gen)
877 		return (false);
878 
879 	KASSERT(prev.gen < mygen,
880 	    ("invalid di gen sequence %lu %lu", prev.gen, mygen));
881 	new_prev.gen = mygen;
882 	new_prev.next = (void *)((uintptr_t)invl_gen->next &
883 	    ~PMAP_INVL_GEN_NEXT_INVALID);
884 
885 	/* Formal fence between load of prev and storing update to it. */
886 	atomic_thread_fence_rel();
887 
888 	return (pmap_di_store_invl(p, &prev, &new_prev));
889 }
890 
891 static void
892 pmap_delayed_invl_finish_u(void)
893 {
894 	struct pmap_invl_gen *invl_gen, *p;
895 	struct thread *td;
896 	struct lock_delay_arg lda;
897 	uintptr_t prevl;
898 
899 	td = curthread;
900 	invl_gen = &td->td_md.md_invl_gen;
901 	KASSERT(invl_gen->gen != 0, ("missed invl_start: gen 0"));
902 	KASSERT(((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) == 0,
903 	    ("missed invl_start: INVALID"));
904 	lock_delay_arg_init(&lda, &di_delay);
905 
906 again:
907 	for (p = &pmap_invl_gen_head; p != NULL; p = (void *)prevl) {
908 		prevl = (uintptr_t)atomic_load_ptr(&p->next);
909 		if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
910 			PV_STAT(atomic_add_long(&invl_finish_restart, 1));
911 			lock_delay(&lda);
912 			goto again;
913 		}
914 		if ((void *)prevl == invl_gen)
915 			break;
916 	}
917 
918 	/*
919 	 * It is legitimate to not find ourself on the list if a
920 	 * thread before us finished its DI and started it again.
921 	 */
922 	if (__predict_false(p == NULL)) {
923 		PV_STAT(atomic_add_long(&invl_finish_restart, 1));
924 		lock_delay(&lda);
925 		goto again;
926 	}
927 
928 	critical_enter();
929 	atomic_set_ptr((uintptr_t *)&invl_gen->next,
930 	    PMAP_INVL_GEN_NEXT_INVALID);
931 	if (!pmap_delayed_invl_finish_u_crit(invl_gen, p)) {
932 		atomic_clear_ptr((uintptr_t *)&invl_gen->next,
933 		    PMAP_INVL_GEN_NEXT_INVALID);
934 		critical_exit();
935 		PV_STAT(atomic_add_long(&invl_finish_restart, 1));
936 		lock_delay(&lda);
937 		goto again;
938 	}
939 	critical_exit();
940 	if (atomic_load_int(&pmap_invl_waiters) > 0)
941 		pmap_delayed_invl_finish_unblock(0);
942 	if (invl_gen->saved_pri != 0) {
943 		thread_lock(td);
944 		sched_prio(td, invl_gen->saved_pri);
945 		thread_unlock(td);
946 	}
947 }
948 
949 #ifdef DDB
950 DB_SHOW_COMMAND(di_queue, pmap_di_queue)
951 {
952 	struct pmap_invl_gen *p, *pn;
953 	struct thread *td;
954 	uintptr_t nextl;
955 	bool first;
956 
957 	for (p = &pmap_invl_gen_head, first = true; p != NULL; p = pn,
958 	    first = false) {
959 		nextl = (uintptr_t)atomic_load_ptr(&p->next);
960 		pn = (void *)(nextl & ~PMAP_INVL_GEN_NEXT_INVALID);
961 		td = first ? NULL : __containerof(p, struct thread,
962 		    td_md.md_invl_gen);
963 		db_printf("gen %lu inv %d td %p tid %d\n", p->gen,
964 		    (nextl & PMAP_INVL_GEN_NEXT_INVALID) != 0, td,
965 		    td != NULL ? td->td_tid : -1);
966 	}
967 }
968 #endif
969 
970 #ifdef PV_STATS
971 static long invl_wait;
972 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait, CTLFLAG_RD, &invl_wait, 0,
973     "Number of times DI invalidation blocked pmap_remove_all/write");
974 static long invl_wait_slow;
975 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait_slow, CTLFLAG_RD, &invl_wait_slow, 0,
976     "Number of slow invalidation waits for lockless DI");
977 #endif
978 
979 #ifdef NUMA
980 static u_long *
981 pmap_delayed_invl_genp(vm_page_t m)
982 {
983 	vm_paddr_t pa;
984 	u_long *gen;
985 
986 	pa = VM_PAGE_TO_PHYS(m);
987 	if (__predict_false((pa) > pmap_last_pa))
988 		gen = &pv_dummy_large.pv_invl_gen;
989 	else
990 		gen = &(pa_to_pmdp(pa)->pv_invl_gen);
991 
992 	return (gen);
993 }
994 #else
995 static u_long *
996 pmap_delayed_invl_genp(vm_page_t m)
997 {
998 
999 	return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
1000 }
1001 #endif
1002 
1003 static void
1004 pmap_delayed_invl_callout_func(void *arg __unused)
1005 {
1006 
1007 	if (atomic_load_int(&pmap_invl_waiters) == 0)
1008 		return;
1009 	pmap_delayed_invl_finish_unblock(0);
1010 }
1011 
1012 static void
1013 pmap_delayed_invl_callout_init(void *arg __unused)
1014 {
1015 
1016 	if (pmap_di_locked())
1017 		return;
1018 	callout_init(&pmap_invl_callout, 1);
1019 	pmap_invl_callout_inited = true;
1020 }
1021 SYSINIT(pmap_di_callout, SI_SUB_CPU + 1, SI_ORDER_ANY,
1022     pmap_delayed_invl_callout_init, NULL);
1023 
1024 /*
1025  * Ensure that all currently executing DI blocks, that need to flush
1026  * TLB for the given page m, actually flushed the TLB at the time the
1027  * function returned.  If the page m has an empty PV list and we call
1028  * pmap_delayed_invl_wait(), upon its return we know that no CPU has a
1029  * valid mapping for the page m in either its page table or TLB.
1030  *
1031  * This function works by blocking until the global DI generation
1032  * number catches up with the generation number associated with the
1033  * given page m and its PV list.  Since this function's callers
1034  * typically own an object lock and sometimes own a page lock, it
1035  * cannot sleep.  Instead, it blocks on a turnstile to relinquish the
1036  * processor.
1037  */
1038 static void
1039 pmap_delayed_invl_wait_l(vm_page_t m)
1040 {
1041 	u_long *m_gen;
1042 #ifdef PV_STATS
1043 	bool accounted = false;
1044 #endif
1045 
1046 	m_gen = pmap_delayed_invl_genp(m);
1047 	while (*m_gen > pmap_invl_gen) {
1048 #ifdef PV_STATS
1049 		if (!accounted) {
1050 			atomic_add_long(&invl_wait, 1);
1051 			accounted = true;
1052 		}
1053 #endif
1054 		pmap_delayed_invl_wait_block(m_gen, &pmap_invl_gen);
1055 	}
1056 }
1057 
1058 static void
1059 pmap_delayed_invl_wait_u(vm_page_t m)
1060 {
1061 	u_long *m_gen;
1062 	struct lock_delay_arg lda;
1063 	bool fast;
1064 
1065 	fast = true;
1066 	m_gen = pmap_delayed_invl_genp(m);
1067 	lock_delay_arg_init(&lda, &di_delay);
1068 	while (*m_gen > atomic_load_long(&pmap_invl_gen_head.gen)) {
1069 		if (fast || !pmap_invl_callout_inited) {
1070 			PV_STAT(atomic_add_long(&invl_wait, 1));
1071 			lock_delay(&lda);
1072 			fast = false;
1073 		} else {
1074 			/*
1075 			 * The page's invalidation generation number
1076 			 * is still below the current thread's number.
1077 			 * Prepare to block so that we do not waste
1078 			 * CPU cycles or worse, suffer livelock.
1079 			 *
1080 			 * Since it is impossible to block without
1081 			 * racing with pmap_delayed_invl_finish_u(),
1082 			 * prepare for the race by incrementing
1083 			 * pmap_invl_waiters and arming a 1-tick
1084 			 * callout which will unblock us if we lose
1085 			 * the race.
1086 			 */
1087 			atomic_add_int(&pmap_invl_waiters, 1);
1088 
1089 			/*
1090 			 * Re-check the current thread's invalidation
1091 			 * generation after incrementing
1092 			 * pmap_invl_waiters, so that there is no race
1093 			 * with pmap_delayed_invl_finish_u() setting
1094 			 * the page generation and checking
1095 			 * pmap_invl_waiters.  The only race allowed
1096 			 * is for a missed unblock, which is handled
1097 			 * by the callout.
1098 			 */
1099 			if (*m_gen >
1100 			    atomic_load_long(&pmap_invl_gen_head.gen)) {
1101 				callout_reset(&pmap_invl_callout, 1,
1102 				    pmap_delayed_invl_callout_func, NULL);
1103 				PV_STAT(atomic_add_long(&invl_wait_slow, 1));
1104 				pmap_delayed_invl_wait_block(m_gen,
1105 				    &pmap_invl_gen_head.gen);
1106 			}
1107 			atomic_add_int(&pmap_invl_waiters, -1);
1108 		}
1109 	}
1110 }
1111 
1112 DEFINE_IFUNC(, void, pmap_thread_init_invl_gen, (struct thread *))
1113 {
1114 
1115 	return (pmap_di_locked() ? pmap_thread_init_invl_gen_l :
1116 	    pmap_thread_init_invl_gen_u);
1117 }
1118 
1119 DEFINE_IFUNC(static, void, pmap_delayed_invl_start, (void))
1120 {
1121 
1122 	return (pmap_di_locked() ? pmap_delayed_invl_start_l :
1123 	    pmap_delayed_invl_start_u);
1124 }
1125 
1126 DEFINE_IFUNC(static, void, pmap_delayed_invl_finish, (void))
1127 {
1128 
1129 	return (pmap_di_locked() ? pmap_delayed_invl_finish_l :
1130 	    pmap_delayed_invl_finish_u);
1131 }
1132 
1133 DEFINE_IFUNC(static, void, pmap_delayed_invl_wait, (vm_page_t))
1134 {
1135 
1136 	return (pmap_di_locked() ? pmap_delayed_invl_wait_l :
1137 	    pmap_delayed_invl_wait_u);
1138 }
1139 
1140 /*
1141  * Mark the page m's PV list as participating in the current thread's
1142  * DI block.  Any threads concurrently using m's PV list to remove or
1143  * restrict all mappings to m will wait for the current thread's DI
1144  * block to complete before proceeding.
1145  *
1146  * The function works by setting the DI generation number for m's PV
1147  * list to at least the DI generation number of the current thread.
1148  * This forces a caller of pmap_delayed_invl_wait() to block until
1149  * current thread calls pmap_delayed_invl_finish().
1150  */
1151 static void
1152 pmap_delayed_invl_page(vm_page_t m)
1153 {
1154 	u_long gen, *m_gen;
1155 
1156 	rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED);
1157 	gen = curthread->td_md.md_invl_gen.gen;
1158 	if (gen == 0)
1159 		return;
1160 	m_gen = pmap_delayed_invl_genp(m);
1161 	if (*m_gen < gen)
1162 		*m_gen = gen;
1163 }
1164 
1165 /*
1166  * Crashdump maps.
1167  */
1168 static caddr_t crashdumpmap;
1169 
1170 /*
1171  * Internal flags for pmap_enter()'s helper functions.
1172  */
1173 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
1174 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
1175 
1176 /*
1177  * Internal flags for pmap_mapdev_internal() and
1178  * pmap_change_props_locked().
1179  */
1180 #define	MAPDEV_FLUSHCACHE	0x00000001	/* Flush cache after mapping. */
1181 #define	MAPDEV_SETATTR		0x00000002	/* Modify existing attrs. */
1182 #define	MAPDEV_ASSERTVALID	0x00000004	/* Assert mapping validity. */
1183 
1184 TAILQ_HEAD(pv_chunklist, pv_chunk);
1185 
1186 static void	free_pv_chunk(struct pv_chunk *pc);
1187 static void	free_pv_chunk_batch(struct pv_chunklist *batch);
1188 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
1189 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
1190 static int	popcnt_pc_map_pq(uint64_t *map);
1191 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
1192 static void	reserve_pv_entries(pmap_t pmap, int needed,
1193 		    struct rwlock **lockp);
1194 static void	pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1195 		    struct rwlock **lockp);
1196 static bool	pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
1197 		    u_int flags, struct rwlock **lockp);
1198 #if VM_NRESERVLEVEL > 0
1199 static void	pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1200 		    struct rwlock **lockp);
1201 #endif
1202 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
1203 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
1204 		    vm_offset_t va);
1205 
1206 static void	pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
1207 static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
1208     vm_prot_t prot, int mode, int flags);
1209 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
1210 static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
1211     vm_offset_t va, struct rwlock **lockp);
1212 static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
1213     vm_offset_t va);
1214 static bool	pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
1215 		    vm_prot_t prot, struct rwlock **lockp);
1216 static int	pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
1217 		    u_int flags, vm_page_t m, struct rwlock **lockp);
1218 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
1219     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
1220 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
1221 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted);
1222 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
1223     vm_offset_t eva);
1224 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
1225     vm_offset_t eva);
1226 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
1227 		    pd_entry_t pde);
1228 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
1229 static vm_page_t pmap_large_map_getptp_unlocked(void);
1230 static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
1231 #if VM_NRESERVLEVEL > 0
1232 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
1233     struct rwlock **lockp);
1234 #endif
1235 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
1236     vm_prot_t prot);
1237 static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask);
1238 static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
1239     bool exec);
1240 static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
1241 static pd_entry_t *pmap_pti_pde(vm_offset_t va);
1242 static void pmap_pti_wire_pte(void *pte);
1243 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
1244     struct spglist *free, struct rwlock **lockp);
1245 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
1246     pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
1247 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
1248 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1249     struct spglist *free);
1250 static bool	pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1251 		    pd_entry_t *pde, struct spglist *free,
1252 		    struct rwlock **lockp);
1253 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
1254     vm_page_t m, struct rwlock **lockp);
1255 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1256     pd_entry_t newpde);
1257 static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
1258 
1259 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
1260 		struct rwlock **lockp);
1261 static pd_entry_t *pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
1262 		struct rwlock **lockp);
1263 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
1264 		struct rwlock **lockp);
1265 
1266 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
1267     struct spglist *free);
1268 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
1269 
1270 /********************/
1271 /* Inline functions */
1272 /********************/
1273 
1274 /* Return a non-clipped PD index for a given VA */
1275 static __inline vm_pindex_t
1276 pmap_pde_pindex(vm_offset_t va)
1277 {
1278 	return (va >> PDRSHIFT);
1279 }
1280 
1281 
1282 /* Return a pointer to the PML4 slot that corresponds to a VA */
1283 static __inline pml4_entry_t *
1284 pmap_pml4e(pmap_t pmap, vm_offset_t va)
1285 {
1286 
1287 	return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
1288 }
1289 
1290 /* Return a pointer to the PDP slot that corresponds to a VA */
1291 static __inline pdp_entry_t *
1292 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
1293 {
1294 	pdp_entry_t *pdpe;
1295 
1296 	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
1297 	return (&pdpe[pmap_pdpe_index(va)]);
1298 }
1299 
1300 /* Return a pointer to the PDP slot that corresponds to a VA */
1301 static __inline pdp_entry_t *
1302 pmap_pdpe(pmap_t pmap, vm_offset_t va)
1303 {
1304 	pml4_entry_t *pml4e;
1305 	pt_entry_t PG_V;
1306 
1307 	PG_V = pmap_valid_bit(pmap);
1308 	pml4e = pmap_pml4e(pmap, va);
1309 	if ((*pml4e & PG_V) == 0)
1310 		return (NULL);
1311 	return (pmap_pml4e_to_pdpe(pml4e, va));
1312 }
1313 
1314 /* Return a pointer to the PD slot that corresponds to a VA */
1315 static __inline pd_entry_t *
1316 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
1317 {
1318 	pd_entry_t *pde;
1319 
1320 	KASSERT((*pdpe & PG_PS) == 0,
1321 	    ("%s: pdpe %#lx is a leaf", __func__, *pdpe));
1322 	pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
1323 	return (&pde[pmap_pde_index(va)]);
1324 }
1325 
1326 /* Return a pointer to the PD slot that corresponds to a VA */
1327 static __inline pd_entry_t *
1328 pmap_pde(pmap_t pmap, vm_offset_t va)
1329 {
1330 	pdp_entry_t *pdpe;
1331 	pt_entry_t PG_V;
1332 
1333 	PG_V = pmap_valid_bit(pmap);
1334 	pdpe = pmap_pdpe(pmap, va);
1335 	if (pdpe == NULL || (*pdpe & PG_V) == 0)
1336 		return (NULL);
1337 	return (pmap_pdpe_to_pde(pdpe, va));
1338 }
1339 
1340 /* Return a pointer to the PT slot that corresponds to a VA */
1341 static __inline pt_entry_t *
1342 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
1343 {
1344 	pt_entry_t *pte;
1345 
1346 	KASSERT((*pde & PG_PS) == 0,
1347 	    ("%s: pde %#lx is a leaf", __func__, *pde));
1348 	pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
1349 	return (&pte[pmap_pte_index(va)]);
1350 }
1351 
1352 /* Return a pointer to the PT slot that corresponds to a VA */
1353 static __inline pt_entry_t *
1354 pmap_pte(pmap_t pmap, vm_offset_t va)
1355 {
1356 	pd_entry_t *pde;
1357 	pt_entry_t PG_V;
1358 
1359 	PG_V = pmap_valid_bit(pmap);
1360 	pde = pmap_pde(pmap, va);
1361 	if (pde == NULL || (*pde & PG_V) == 0)
1362 		return (NULL);
1363 	if ((*pde & PG_PS) != 0)	/* compat with i386 pmap_pte() */
1364 		return ((pt_entry_t *)pde);
1365 	return (pmap_pde_to_pte(pde, va));
1366 }
1367 
1368 static __inline void
1369 pmap_resident_count_inc(pmap_t pmap, int count)
1370 {
1371 
1372 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1373 	pmap->pm_stats.resident_count += count;
1374 }
1375 
1376 static __inline void
1377 pmap_resident_count_dec(pmap_t pmap, int count)
1378 {
1379 
1380 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1381 	KASSERT(pmap->pm_stats.resident_count >= count,
1382 	    ("pmap %p resident count underflow %ld %d", pmap,
1383 	    pmap->pm_stats.resident_count, count));
1384 	pmap->pm_stats.resident_count -= count;
1385 }
1386 
1387 PMAP_INLINE pt_entry_t *
1388 vtopte(vm_offset_t va)
1389 {
1390 	u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
1391 
1392 	KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va));
1393 
1394 	return (PTmap + ((va >> PAGE_SHIFT) & mask));
1395 }
1396 
1397 static __inline pd_entry_t *
1398 vtopde(vm_offset_t va)
1399 {
1400 	u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
1401 
1402 	KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va));
1403 
1404 	return (PDmap + ((va >> PDRSHIFT) & mask));
1405 }
1406 
1407 static u_int64_t
1408 allocpages(vm_paddr_t *firstaddr, int n)
1409 {
1410 	u_int64_t ret;
1411 
1412 	ret = *firstaddr;
1413 	bzero((void *)ret, n * PAGE_SIZE);
1414 	*firstaddr += n * PAGE_SIZE;
1415 	return (ret);
1416 }
1417 
1418 CTASSERT(powerof2(NDMPML4E));
1419 
1420 /* number of kernel PDP slots */
1421 #define	NKPDPE(ptpgs)		howmany(ptpgs, NPDEPG)
1422 
1423 static void
1424 nkpt_init(vm_paddr_t addr)
1425 {
1426 	int pt_pages;
1427 
1428 #ifdef NKPT
1429 	pt_pages = NKPT;
1430 #else
1431 	pt_pages = howmany(addr, 1 << PDRSHIFT);
1432 	pt_pages += NKPDPE(pt_pages);
1433 
1434 	/*
1435 	 * Add some slop beyond the bare minimum required for bootstrapping
1436 	 * the kernel.
1437 	 *
1438 	 * This is quite important when allocating KVA for kernel modules.
1439 	 * The modules are required to be linked in the negative 2GB of
1440 	 * the address space.  If we run out of KVA in this region then
1441 	 * pmap_growkernel() will need to allocate page table pages to map
1442 	 * the entire 512GB of KVA space which is an unnecessary tax on
1443 	 * physical memory.
1444 	 *
1445 	 * Secondly, device memory mapped as part of setting up the low-
1446 	 * level console(s) is taken from KVA, starting at virtual_avail.
1447 	 * This is because cninit() is called after pmap_bootstrap() but
1448 	 * before vm_init() and pmap_init(). 20MB for a frame buffer is
1449 	 * not uncommon.
1450 	 */
1451 	pt_pages += 32;		/* 64MB additional slop. */
1452 #endif
1453 	nkpt = pt_pages;
1454 }
1455 
1456 /*
1457  * Returns the proper write/execute permission for a physical page that is
1458  * part of the initial boot allocations.
1459  *
1460  * If the page has kernel text, it is marked as read-only. If the page has
1461  * kernel read-only data, it is marked as read-only/not-executable. If the
1462  * page has only read-write data, it is marked as read-write/not-executable.
1463  * If the page is below/above the kernel range, it is marked as read-write.
1464  *
1465  * This function operates on 2M pages, since we map the kernel space that
1466  * way.
1467  */
1468 static inline pt_entry_t
1469 bootaddr_rwx(vm_paddr_t pa)
1470 {
1471 
1472 	/*
1473 	 * The kernel is loaded at a 2MB-aligned address, and memory below that
1474 	 * need not be executable.  The .bss section is padded to a 2MB
1475 	 * boundary, so memory following the kernel need not be executable
1476 	 * either.  Preloaded kernel modules have their mapping permissions
1477 	 * fixed up by the linker.
1478 	 */
1479 	if (pa < trunc_2mpage(btext - KERNBASE) ||
1480 	    pa >= trunc_2mpage(_end - KERNBASE))
1481 		return (X86_PG_RW | pg_nx);
1482 
1483 	/*
1484 	 * The linker should ensure that the read-only and read-write
1485 	 * portions don't share the same 2M page, so this shouldn't
1486 	 * impact read-only data. However, in any case, any page with
1487 	 * read-write data needs to be read-write.
1488 	 */
1489 	if (pa >= trunc_2mpage(brwsection - KERNBASE))
1490 		return (X86_PG_RW | pg_nx);
1491 
1492 	/*
1493 	 * Mark any 2M page containing kernel text as read-only. Mark
1494 	 * other pages with read-only data as read-only and not executable.
1495 	 * (It is likely a small portion of the read-only data section will
1496 	 * be marked as read-only, but executable. This should be acceptable
1497 	 * since the read-only protection will keep the data from changing.)
1498 	 * Note that fixups to the .text section will still work until we
1499 	 * set CR0.WP.
1500 	 */
1501 	if (pa < round_2mpage(etext - KERNBASE))
1502 		return (0);
1503 	return (pg_nx);
1504 }
1505 
1506 static void
1507 create_pagetables(vm_paddr_t *firstaddr)
1508 {
1509 	int i, j, ndm1g, nkpdpe, nkdmpde;
1510 	pd_entry_t *pd_p;
1511 	pdp_entry_t *pdp_p;
1512 	pml4_entry_t *p4_p;
1513 	uint64_t DMPDkernphys;
1514 
1515 	/* Allocate page table pages for the direct map */
1516 	ndmpdp = howmany(ptoa(Maxmem), NBPDP);
1517 	if (ndmpdp < 4)		/* Minimum 4GB of dirmap */
1518 		ndmpdp = 4;
1519 	ndmpdpphys = howmany(ndmpdp, NPDPEPG);
1520 	if (ndmpdpphys > NDMPML4E) {
1521 		/*
1522 		 * Each NDMPML4E allows 512 GB, so limit to that,
1523 		 * and then readjust ndmpdp and ndmpdpphys.
1524 		 */
1525 		printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
1526 		Maxmem = atop(NDMPML4E * NBPML4);
1527 		ndmpdpphys = NDMPML4E;
1528 		ndmpdp = NDMPML4E * NPDEPG;
1529 	}
1530 	DMPDPphys = allocpages(firstaddr, ndmpdpphys);
1531 	ndm1g = 0;
1532 	if ((amd_feature & AMDID_PAGE1GB) != 0) {
1533 		/*
1534 		 * Calculate the number of 1G pages that will fully fit in
1535 		 * Maxmem.
1536 		 */
1537 		ndm1g = ptoa(Maxmem) >> PDPSHIFT;
1538 
1539 		/*
1540 		 * Allocate 2M pages for the kernel. These will be used in
1541 		 * place of the first one or more 1G pages from ndm1g.
1542 		 */
1543 		nkdmpde = howmany((vm_offset_t)(brwsection - KERNBASE), NBPDP);
1544 		DMPDkernphys = allocpages(firstaddr, nkdmpde);
1545 	}
1546 	if (ndm1g < ndmpdp)
1547 		DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
1548 	dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
1549 
1550 	/* Allocate pages */
1551 	KPML4phys = allocpages(firstaddr, 1);
1552 	KPDPphys = allocpages(firstaddr, NKPML4E);
1553 
1554 	/*
1555 	 * Allocate the initial number of kernel page table pages required to
1556 	 * bootstrap.  We defer this until after all memory-size dependent
1557 	 * allocations are done (e.g. direct map), so that we don't have to
1558 	 * build in too much slop in our estimate.
1559 	 *
1560 	 * Note that when NKPML4E > 1, we have an empty page underneath
1561 	 * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed)
1562 	 * pages.  (pmap_enter requires a PD page to exist for each KPML4E.)
1563 	 */
1564 	nkpt_init(*firstaddr);
1565 	nkpdpe = NKPDPE(nkpt);
1566 
1567 	KPTphys = allocpages(firstaddr, nkpt);
1568 	KPDphys = allocpages(firstaddr, nkpdpe);
1569 
1570 	/*
1571 	 * Connect the zero-filled PT pages to their PD entries.  This
1572 	 * implicitly maps the PT pages at their correct locations within
1573 	 * the PTmap.
1574 	 */
1575 	pd_p = (pd_entry_t *)KPDphys;
1576 	for (i = 0; i < nkpt; i++)
1577 		pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1578 
1579 	/*
1580 	 * Map from physical address zero to the end of loader preallocated
1581 	 * memory using 2MB pages.  This replaces some of the PD entries
1582 	 * created above.
1583 	 */
1584 	for (i = 0; (i << PDRSHIFT) < KERNend; i++)
1585 		/* Preset PG_M and PG_A because demotion expects it. */
1586 		pd_p[i] = (i << PDRSHIFT) | X86_PG_V | PG_PS | pg_g |
1587 		    X86_PG_M | X86_PG_A | bootaddr_rwx(i << PDRSHIFT);
1588 
1589 	/*
1590 	 * Because we map the physical blocks in 2M pages, adjust firstaddr
1591 	 * to record the physical blocks we've actually mapped into kernel
1592 	 * virtual address space.
1593 	 */
1594 	if (*firstaddr < round_2mpage(KERNend))
1595 		*firstaddr = round_2mpage(KERNend);
1596 
1597 	/* And connect up the PD to the PDP (leaving room for L4 pages) */
1598 	pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE));
1599 	for (i = 0; i < nkpdpe; i++)
1600 		pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1601 
1602 	/*
1603 	 * Now, set up the direct map region using 2MB and/or 1GB pages.  If
1604 	 * the end of physical memory is not aligned to a 1GB page boundary,
1605 	 * then the residual physical memory is mapped with 2MB pages.  Later,
1606 	 * if pmap_mapdev{_attr}() uses the direct map for non-write-back
1607 	 * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
1608 	 * that are partially used.
1609 	 */
1610 	pd_p = (pd_entry_t *)DMPDphys;
1611 	for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
1612 		pd_p[j] = (vm_paddr_t)i << PDRSHIFT;
1613 		/* Preset PG_M and PG_A because demotion expects it. */
1614 		pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1615 		    X86_PG_M | X86_PG_A | pg_nx;
1616 	}
1617 	pdp_p = (pdp_entry_t *)DMPDPphys;
1618 	for (i = 0; i < ndm1g; i++) {
1619 		pdp_p[i] = (vm_paddr_t)i << PDPSHIFT;
1620 		/* Preset PG_M and PG_A because demotion expects it. */
1621 		pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1622 		    X86_PG_M | X86_PG_A | pg_nx;
1623 	}
1624 	for (j = 0; i < ndmpdp; i++, j++) {
1625 		pdp_p[i] = DMPDphys + ptoa(j);
1626 		pdp_p[i] |= X86_PG_RW | X86_PG_V | pg_nx;
1627 	}
1628 
1629 	/*
1630 	 * Instead of using a 1G page for the memory containing the kernel,
1631 	 * use 2M pages with read-only and no-execute permissions.  (If using 1G
1632 	 * pages, this will partially overwrite the PDPEs above.)
1633 	 */
1634 	if (ndm1g) {
1635 		pd_p = (pd_entry_t *)DMPDkernphys;
1636 		for (i = 0; i < (NPDEPG * nkdmpde); i++)
1637 			pd_p[i] = (i << PDRSHIFT) | X86_PG_V | PG_PS | pg_g |
1638 			    X86_PG_M | X86_PG_A | pg_nx |
1639 			    bootaddr_rwx(i << PDRSHIFT);
1640 		for (i = 0; i < nkdmpde; i++)
1641 			pdp_p[i] = (DMPDkernphys + ptoa(i)) | X86_PG_RW |
1642 			    X86_PG_V | pg_nx;
1643 	}
1644 
1645 	/* And recursively map PML4 to itself in order to get PTmap */
1646 	p4_p = (pml4_entry_t *)KPML4phys;
1647 	p4_p[PML4PML4I] = KPML4phys;
1648 	p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | pg_nx;
1649 
1650 	/* Connect the Direct Map slot(s) up to the PML4. */
1651 	for (i = 0; i < ndmpdpphys; i++) {
1652 		p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
1653 		p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1654 	}
1655 
1656 	/* Connect the KVA slots up to the PML4 */
1657 	for (i = 0; i < NKPML4E; i++) {
1658 		p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
1659 		p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V;
1660 	}
1661 }
1662 
1663 /*
1664  *	Bootstrap the system enough to run with virtual memory.
1665  *
1666  *	On amd64 this is called after mapping has already been enabled
1667  *	and just syncs the pmap module with what has already been done.
1668  *	[We can't call it easily with mapping off since the kernel is not
1669  *	mapped with PA == VA, hence we would have to relocate every address
1670  *	from the linked base (virtual) address "KERNBASE" to the actual
1671  *	(physical) address starting relative to 0]
1672  */
1673 void
1674 pmap_bootstrap(vm_paddr_t *firstaddr)
1675 {
1676 	vm_offset_t va;
1677 	pt_entry_t *pte, *pcpu_pte;
1678 	struct region_descriptor r_gdt;
1679 	uint64_t cr4, pcpu_phys;
1680 	u_long res;
1681 	int i;
1682 
1683 	KERNend = *firstaddr;
1684 	res = atop(KERNend - (vm_paddr_t)kernphys);
1685 
1686 	if (!pti)
1687 		pg_g = X86_PG_G;
1688 
1689 	/*
1690 	 * Create an initial set of page tables to run the kernel in.
1691 	 */
1692 	create_pagetables(firstaddr);
1693 
1694 	pcpu_phys = allocpages(firstaddr, MAXCPU);
1695 
1696 	/*
1697 	 * Add a physical memory segment (vm_phys_seg) corresponding to the
1698 	 * preallocated kernel page table pages so that vm_page structures
1699 	 * representing these pages will be created.  The vm_page structures
1700 	 * are required for promotion of the corresponding kernel virtual
1701 	 * addresses to superpage mappings.
1702 	 */
1703 	vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1704 
1705 	/*
1706 	 * Account for the virtual addresses mapped by create_pagetables().
1707 	 */
1708 	virtual_avail = (vm_offset_t)KERNBASE + round_2mpage(KERNend);
1709 	virtual_end = VM_MAX_KERNEL_ADDRESS;
1710 
1711 	/*
1712 	 * Enable PG_G global pages, then switch to the kernel page
1713 	 * table from the bootstrap page table.  After the switch, it
1714 	 * is possible to enable SMEP and SMAP since PG_U bits are
1715 	 * correct now.
1716 	 */
1717 	cr4 = rcr4();
1718 	cr4 |= CR4_PGE;
1719 	load_cr4(cr4);
1720 	load_cr3(KPML4phys);
1721 	if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
1722 		cr4 |= CR4_SMEP;
1723 	if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
1724 		cr4 |= CR4_SMAP;
1725 	load_cr4(cr4);
1726 
1727 	/*
1728 	 * Initialize the kernel pmap (which is statically allocated).
1729 	 * Count bootstrap data as being resident in case any of this data is
1730 	 * later unmapped (using pmap_remove()) and freed.
1731 	 */
1732 	PMAP_LOCK_INIT(kernel_pmap);
1733 	kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
1734 	kernel_pmap->pm_cr3 = KPML4phys;
1735 	kernel_pmap->pm_ucr3 = PMAP_NO_CR3;
1736 	CPU_FILL(&kernel_pmap->pm_active);	/* don't allow deactivation */
1737 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1738 	kernel_pmap->pm_stats.resident_count = res;
1739 	kernel_pmap->pm_flags = pmap_flags;
1740 
1741  	/*
1742 	 * Initialize the TLB invalidations generation number lock.
1743 	 */
1744 	mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF);
1745 
1746 	/*
1747 	 * Reserve some special page table entries/VA space for temporary
1748 	 * mapping of pages.
1749 	 */
1750 #define	SYSMAP(c, p, v, n)	\
1751 	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1752 
1753 	va = virtual_avail;
1754 	pte = vtopte(va);
1755 
1756 	/*
1757 	 * Crashdump maps.  The first page is reused as CMAP1 for the
1758 	 * memory test.
1759 	 */
1760 	SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
1761 	CADDR1 = crashdumpmap;
1762 
1763 	SYSMAP(struct pcpu *, pcpu_pte, __pcpu, MAXCPU);
1764 	virtual_avail = va;
1765 
1766 	for (i = 0; i < MAXCPU; i++) {
1767 		pcpu_pte[i] = (pcpu_phys + ptoa(i)) | X86_PG_V | X86_PG_RW |
1768 		    pg_g | pg_nx | X86_PG_M | X86_PG_A;
1769 	}
1770 
1771 	/*
1772 	 * Re-initialize PCPU area for BSP after switching.
1773 	 * Make hardware use gdt and common_tss from the new PCPU.
1774 	 */
1775 	STAILQ_INIT(&cpuhead);
1776 	wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
1777 	pcpu_init(&__pcpu[0], 0, sizeof(struct pcpu));
1778 	amd64_bsp_pcpu_init1(&__pcpu[0]);
1779 	amd64_bsp_ist_init(&__pcpu[0]);
1780 	__pcpu[0].pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
1781 	    IOPERM_BITMAP_SIZE;
1782 	memcpy(__pcpu[0].pc_gdt, temp_bsp_pcpu.pc_gdt, NGDT *
1783 	    sizeof(struct user_segment_descriptor));
1784 	gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&__pcpu[0].pc_common_tss;
1785 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
1786 	    (struct system_segment_descriptor *)&__pcpu[0].pc_gdt[GPROC0_SEL]);
1787 	r_gdt.rd_limit = NGDT * sizeof(struct user_segment_descriptor) - 1;
1788 	r_gdt.rd_base = (long)__pcpu[0].pc_gdt;
1789 	lgdt(&r_gdt);
1790 	wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
1791 	ltr(GSEL(GPROC0_SEL, SEL_KPL));
1792 	__pcpu[0].pc_dynamic = temp_bsp_pcpu.pc_dynamic;
1793 	__pcpu[0].pc_acpi_id = temp_bsp_pcpu.pc_acpi_id;
1794 
1795 	/*
1796 	 * Initialize the PAT MSR.
1797 	 * pmap_init_pat() clears and sets CR4_PGE, which, as a
1798 	 * side-effect, invalidates stale PG_G TLB entries that might
1799 	 * have been created in our pre-boot environment.
1800 	 */
1801 	pmap_init_pat();
1802 
1803 	/* Initialize TLB Context Id. */
1804 	if (pmap_pcid_enabled) {
1805 		for (i = 0; i < MAXCPU; i++) {
1806 			kernel_pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN;
1807 			kernel_pmap->pm_pcids[i].pm_gen = 1;
1808 		}
1809 
1810 		/*
1811 		 * PMAP_PCID_KERN + 1 is used for initialization of
1812 		 * proc0 pmap.  The pmap' pcid state might be used by
1813 		 * EFIRT entry before first context switch, so it
1814 		 * needs to be valid.
1815 		 */
1816 		PCPU_SET(pcid_next, PMAP_PCID_KERN + 2);
1817 		PCPU_SET(pcid_gen, 1);
1818 
1819 		/*
1820 		 * pcpu area for APs is zeroed during AP startup.
1821 		 * pc_pcid_next and pc_pcid_gen are initialized by AP
1822 		 * during pcpu setup.
1823 		 */
1824 		load_cr4(rcr4() | CR4_PCIDE);
1825 	}
1826 }
1827 
1828 /*
1829  * Setup the PAT MSR.
1830  */
1831 void
1832 pmap_init_pat(void)
1833 {
1834 	uint64_t pat_msr;
1835 	u_long cr0, cr4;
1836 	int i;
1837 
1838 	/* Bail if this CPU doesn't implement PAT. */
1839 	if ((cpu_feature & CPUID_PAT) == 0)
1840 		panic("no PAT??");
1841 
1842 	/* Set default PAT index table. */
1843 	for (i = 0; i < PAT_INDEX_SIZE; i++)
1844 		pat_index[i] = -1;
1845 	pat_index[PAT_WRITE_BACK] = 0;
1846 	pat_index[PAT_WRITE_THROUGH] = 1;
1847 	pat_index[PAT_UNCACHEABLE] = 3;
1848 	pat_index[PAT_WRITE_COMBINING] = 6;
1849 	pat_index[PAT_WRITE_PROTECTED] = 5;
1850 	pat_index[PAT_UNCACHED] = 2;
1851 
1852 	/*
1853 	 * Initialize default PAT entries.
1854 	 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
1855 	 * Program 5 and 6 as WP and WC.
1856 	 *
1857 	 * Leave 4 and 7 as WB and UC.  Note that a recursive page table
1858 	 * mapping for a 2M page uses a PAT value with the bit 3 set due
1859 	 * to its overload with PG_PS.
1860 	 */
1861 	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
1862 	    PAT_VALUE(1, PAT_WRITE_THROUGH) |
1863 	    PAT_VALUE(2, PAT_UNCACHED) |
1864 	    PAT_VALUE(3, PAT_UNCACHEABLE) |
1865 	    PAT_VALUE(4, PAT_WRITE_BACK) |
1866 	    PAT_VALUE(5, PAT_WRITE_PROTECTED) |
1867 	    PAT_VALUE(6, PAT_WRITE_COMBINING) |
1868 	    PAT_VALUE(7, PAT_UNCACHEABLE);
1869 
1870 	/* Disable PGE. */
1871 	cr4 = rcr4();
1872 	load_cr4(cr4 & ~CR4_PGE);
1873 
1874 	/* Disable caches (CD = 1, NW = 0). */
1875 	cr0 = rcr0();
1876 	load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1877 
1878 	/* Flushes caches and TLBs. */
1879 	wbinvd();
1880 	invltlb();
1881 
1882 	/* Update PAT and index table. */
1883 	wrmsr(MSR_PAT, pat_msr);
1884 
1885 	/* Flush caches and TLBs again. */
1886 	wbinvd();
1887 	invltlb();
1888 
1889 	/* Restore caches and PGE. */
1890 	load_cr0(cr0);
1891 	load_cr4(cr4);
1892 }
1893 
1894 /*
1895  *	Initialize a vm_page's machine-dependent fields.
1896  */
1897 void
1898 pmap_page_init(vm_page_t m)
1899 {
1900 
1901 	TAILQ_INIT(&m->md.pv_list);
1902 	m->md.pat_mode = PAT_WRITE_BACK;
1903 }
1904 
1905 static int pmap_allow_2m_x_ept;
1906 SYSCTL_INT(_vm_pmap, OID_AUTO, allow_2m_x_ept, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
1907     &pmap_allow_2m_x_ept, 0,
1908     "Allow executable superpage mappings in EPT");
1909 
1910 void
1911 pmap_allow_2m_x_ept_recalculate(void)
1912 {
1913 	/*
1914 	 * SKL002, SKL012S.  Since the EPT format is only used by
1915 	 * Intel CPUs, the vendor check is merely a formality.
1916 	 */
1917 	if (!(cpu_vendor_id != CPU_VENDOR_INTEL ||
1918 	    (cpu_ia32_arch_caps & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0 ||
1919 	    (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1920 	    (CPUID_TO_MODEL(cpu_id) == 0x26 ||	/* Atoms */
1921 	    CPUID_TO_MODEL(cpu_id) == 0x27 ||
1922 	    CPUID_TO_MODEL(cpu_id) == 0x35 ||
1923 	    CPUID_TO_MODEL(cpu_id) == 0x36 ||
1924 	    CPUID_TO_MODEL(cpu_id) == 0x37 ||
1925 	    CPUID_TO_MODEL(cpu_id) == 0x86 ||
1926 	    CPUID_TO_MODEL(cpu_id) == 0x1c ||
1927 	    CPUID_TO_MODEL(cpu_id) == 0x4a ||
1928 	    CPUID_TO_MODEL(cpu_id) == 0x4c ||
1929 	    CPUID_TO_MODEL(cpu_id) == 0x4d ||
1930 	    CPUID_TO_MODEL(cpu_id) == 0x5a ||
1931 	    CPUID_TO_MODEL(cpu_id) == 0x5c ||
1932 	    CPUID_TO_MODEL(cpu_id) == 0x5d ||
1933 	    CPUID_TO_MODEL(cpu_id) == 0x5f ||
1934 	    CPUID_TO_MODEL(cpu_id) == 0x6e ||
1935 	    CPUID_TO_MODEL(cpu_id) == 0x7a ||
1936 	    CPUID_TO_MODEL(cpu_id) == 0x57 ||	/* Knights */
1937 	    CPUID_TO_MODEL(cpu_id) == 0x85))))
1938 		pmap_allow_2m_x_ept = 1;
1939 	TUNABLE_INT_FETCH("hw.allow_2m_x_ept", &pmap_allow_2m_x_ept);
1940 }
1941 
1942 static bool
1943 pmap_allow_2m_x_page(pmap_t pmap, bool executable)
1944 {
1945 
1946 	return (pmap->pm_type != PT_EPT || !executable ||
1947 	    !pmap_allow_2m_x_ept);
1948 }
1949 
1950 #ifdef NUMA
1951 static void
1952 pmap_init_pv_table(void)
1953 {
1954 	struct pmap_large_md_page *pvd;
1955 	vm_size_t s;
1956 	long start, end, highest, pv_npg;
1957 	int domain, i, j, pages;
1958 
1959 	/*
1960 	 * We strongly depend on the size being a power of two, so the assert
1961 	 * is overzealous. However, should the struct be resized to a
1962 	 * different power of two, the code below needs to be revisited.
1963 	 */
1964 	CTASSERT((sizeof(*pvd) == 64));
1965 
1966 	/*
1967 	 * Calculate the size of the array.
1968 	 */
1969 	pmap_last_pa = vm_phys_segs[vm_phys_nsegs - 1].end;
1970 	pv_npg = howmany(pmap_last_pa, NBPDR);
1971 	s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
1972 	s = round_page(s);
1973 	pv_table = (struct pmap_large_md_page *)kva_alloc(s);
1974 	if (pv_table == NULL)
1975 		panic("%s: kva_alloc failed\n", __func__);
1976 
1977 	/*
1978 	 * Iterate physical segments to allocate space for respective pages.
1979 	 */
1980 	highest = -1;
1981 	s = 0;
1982 	for (i = 0; i < vm_phys_nsegs; i++) {
1983 		end = vm_phys_segs[i].end / NBPDR;
1984 		domain = vm_phys_segs[i].domain;
1985 
1986 		if (highest >= end)
1987 			continue;
1988 
1989 		start = highest + 1;
1990 		pvd = &pv_table[start];
1991 
1992 		pages = end - start + 1;
1993 		s = round_page(pages * sizeof(*pvd));
1994 		highest = start + (s / sizeof(*pvd)) - 1;
1995 
1996 		for (j = 0; j < s; j += PAGE_SIZE) {
1997 			vm_page_t m = vm_page_alloc_domain(NULL, 0,
1998 			    domain, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
1999 			if (m == NULL)
2000 				panic("vm_page_alloc_domain failed for %lx\n", (vm_offset_t)pvd + j);
2001 			pmap_qenter((vm_offset_t)pvd + j, &m, 1);
2002 		}
2003 
2004 		for (j = 0; j < s / sizeof(*pvd); j++) {
2005 			rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW);
2006 			TAILQ_INIT(&pvd->pv_page.pv_list);
2007 			pvd->pv_page.pv_gen = 0;
2008 			pvd->pv_page.pat_mode = 0;
2009 			pvd->pv_invl_gen = 0;
2010 			pvd++;
2011 		}
2012 	}
2013 	pvd = &pv_dummy_large;
2014 	rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW);
2015 	TAILQ_INIT(&pvd->pv_page.pv_list);
2016 	pvd->pv_page.pv_gen = 0;
2017 	pvd->pv_page.pat_mode = 0;
2018 	pvd->pv_invl_gen = 0;
2019 }
2020 #else
2021 static void
2022 pmap_init_pv_table(void)
2023 {
2024 	vm_size_t s;
2025 	long i, pv_npg;
2026 
2027 	/*
2028 	 * Initialize the pool of pv list locks.
2029 	 */
2030 	for (i = 0; i < NPV_LIST_LOCKS; i++)
2031 		rw_init(&pv_list_locks[i], "pmap pv list");
2032 
2033 	/*
2034 	 * Calculate the size of the pv head table for superpages.
2035 	 */
2036 	pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
2037 
2038 	/*
2039 	 * Allocate memory for the pv head table for superpages.
2040 	 */
2041 	s = (vm_size_t)pv_npg * sizeof(struct md_page);
2042 	s = round_page(s);
2043 	pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
2044 	for (i = 0; i < pv_npg; i++)
2045 		TAILQ_INIT(&pv_table[i].pv_list);
2046 	TAILQ_INIT(&pv_dummy.pv_list);
2047 }
2048 #endif
2049 
2050 /*
2051  *	Initialize the pmap module.
2052  *	Called by vm_init, to initialize any structures that the pmap
2053  *	system needs to map virtual memory.
2054  */
2055 void
2056 pmap_init(void)
2057 {
2058 	struct pmap_preinit_mapping *ppim;
2059 	vm_page_t m, mpte;
2060 	int error, i, ret, skz63;
2061 
2062 	/* L1TF, reserve page @0 unconditionally */
2063 	vm_page_blacklist_add(0, bootverbose);
2064 
2065 	/* Detect bare-metal Skylake Server and Skylake-X. */
2066 	if (vm_guest == VM_GUEST_NO && cpu_vendor_id == CPU_VENDOR_INTEL &&
2067 	    CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x55) {
2068 		/*
2069 		 * Skylake-X errata SKZ63. Processor May Hang When
2070 		 * Executing Code In an HLE Transaction Region between
2071 		 * 40000000H and 403FFFFFH.
2072 		 *
2073 		 * Mark the pages in the range as preallocated.  It
2074 		 * seems to be impossible to distinguish between
2075 		 * Skylake Server and Skylake X.
2076 		 */
2077 		skz63 = 1;
2078 		TUNABLE_INT_FETCH("hw.skz63_enable", &skz63);
2079 		if (skz63 != 0) {
2080 			if (bootverbose)
2081 				printf("SKZ63: skipping 4M RAM starting "
2082 				    "at physical 1G\n");
2083 			for (i = 0; i < atop(0x400000); i++) {
2084 				ret = vm_page_blacklist_add(0x40000000 +
2085 				    ptoa(i), FALSE);
2086 				if (!ret && bootverbose)
2087 					printf("page at %#lx already used\n",
2088 					    0x40000000 + ptoa(i));
2089 			}
2090 		}
2091 	}
2092 
2093 	/* IFU */
2094 	pmap_allow_2m_x_ept_recalculate();
2095 
2096 	/*
2097 	 * Initialize the vm page array entries for the kernel pmap's
2098 	 * page table pages.
2099 	 */
2100 	PMAP_LOCK(kernel_pmap);
2101 	for (i = 0; i < nkpt; i++) {
2102 		mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
2103 		KASSERT(mpte >= vm_page_array &&
2104 		    mpte < &vm_page_array[vm_page_array_size],
2105 		    ("pmap_init: page table page is out of range"));
2106 		mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
2107 		mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
2108 		mpte->ref_count = 1;
2109 
2110 		/*
2111 		 * Collect the page table pages that were replaced by a 2MB
2112 		 * page in create_pagetables().  They are zero filled.
2113 		 */
2114 		if (i << PDRSHIFT < KERNend &&
2115 		    pmap_insert_pt_page(kernel_pmap, mpte, false))
2116 			panic("pmap_init: pmap_insert_pt_page failed");
2117 	}
2118 	PMAP_UNLOCK(kernel_pmap);
2119 	vm_wire_add(nkpt);
2120 
2121 	/*
2122 	 * If the kernel is running on a virtual machine, then it must assume
2123 	 * that MCA is enabled by the hypervisor.  Moreover, the kernel must
2124 	 * be prepared for the hypervisor changing the vendor and family that
2125 	 * are reported by CPUID.  Consequently, the workaround for AMD Family
2126 	 * 10h Erratum 383 is enabled if the processor's feature set does not
2127 	 * include at least one feature that is only supported by older Intel
2128 	 * or newer AMD processors.
2129 	 */
2130 	if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
2131 	    (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
2132 	    CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
2133 	    AMDID2_FMA4)) == 0)
2134 		workaround_erratum383 = 1;
2135 
2136 	/*
2137 	 * Are large page mappings enabled?
2138 	 */
2139 	TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
2140 	if (pg_ps_enabled) {
2141 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
2142 		    ("pmap_init: can't assign to pagesizes[1]"));
2143 		pagesizes[1] = NBPDR;
2144 	}
2145 
2146 	/*
2147 	 * Initialize pv chunk lists.
2148 	 */
2149 	for (i = 0; i < PMAP_MEMDOM; i++) {
2150 		mtx_init(&pv_chunks[i].pvc_lock, "pmap pv chunk list", NULL, MTX_DEF);
2151 		TAILQ_INIT(&pv_chunks[i].pvc_list);
2152 	}
2153 	pmap_init_pv_table();
2154 
2155 	pmap_initialized = 1;
2156 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
2157 		ppim = pmap_preinit_mapping + i;
2158 		if (ppim->va == 0)
2159 			continue;
2160 		/* Make the direct map consistent */
2161 		if (ppim->pa < dmaplimit && ppim->pa + ppim->sz <= dmaplimit) {
2162 			(void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
2163 			    ppim->sz, ppim->mode);
2164 		}
2165 		if (!bootverbose)
2166 			continue;
2167 		printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
2168 		    ppim->pa, ppim->va, ppim->sz, ppim->mode);
2169 	}
2170 
2171 	mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
2172 	error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
2173 	    (vmem_addr_t *)&qframe);
2174 	if (error != 0)
2175 		panic("qframe allocation failed");
2176 
2177 	lm_ents = 8;
2178 	TUNABLE_INT_FETCH("vm.pmap.large_map_pml4_entries", &lm_ents);
2179 	if (lm_ents > LMEPML4I - LMSPML4I + 1)
2180 		lm_ents = LMEPML4I - LMSPML4I + 1;
2181 	if (bootverbose)
2182 		printf("pmap: large map %u PML4 slots (%lu GB)\n",
2183 		    lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024));
2184 	if (lm_ents != 0) {
2185 		large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS,
2186 		    (vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK);
2187 		if (large_vmem == NULL) {
2188 			printf("pmap: cannot create large map\n");
2189 			lm_ents = 0;
2190 		}
2191 		for (i = 0; i < lm_ents; i++) {
2192 			m = pmap_large_map_getptp_unlocked();
2193 			kernel_pmap->pm_pml4[LMSPML4I + i] = X86_PG_V |
2194 			    X86_PG_RW | X86_PG_A | X86_PG_M | pg_nx |
2195 			    VM_PAGE_TO_PHYS(m);
2196 		}
2197 	}
2198 }
2199 
2200 SYSCTL_UINT(_vm_pmap, OID_AUTO, large_map_pml4_entries,
2201     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lm_ents, 0,
2202     "Maximum number of PML4 entries for use by large map (tunable).  "
2203     "Each entry corresponds to 512GB of address space.");
2204 
2205 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2206     "2MB page mapping counters");
2207 
2208 static u_long pmap_pde_demotions;
2209 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
2210     &pmap_pde_demotions, 0, "2MB page demotions");
2211 
2212 static u_long pmap_pde_mappings;
2213 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
2214     &pmap_pde_mappings, 0, "2MB page mappings");
2215 
2216 static u_long pmap_pde_p_failures;
2217 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
2218     &pmap_pde_p_failures, 0, "2MB page promotion failures");
2219 
2220 static u_long pmap_pde_promotions;
2221 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
2222     &pmap_pde_promotions, 0, "2MB page promotions");
2223 
2224 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2225     "1GB page mapping counters");
2226 
2227 static u_long pmap_pdpe_demotions;
2228 SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
2229     &pmap_pdpe_demotions, 0, "1GB page demotions");
2230 
2231 /***************************************************
2232  * Low level helper routines.....
2233  ***************************************************/
2234 
2235 static pt_entry_t
2236 pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
2237 {
2238 	int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT;
2239 
2240 	switch (pmap->pm_type) {
2241 	case PT_X86:
2242 	case PT_RVI:
2243 		/* Verify that both PAT bits are not set at the same time */
2244 		KASSERT((entry & x86_pat_bits) != x86_pat_bits,
2245 		    ("Invalid PAT bits in entry %#lx", entry));
2246 
2247 		/* Swap the PAT bits if one of them is set */
2248 		if ((entry & x86_pat_bits) != 0)
2249 			entry ^= x86_pat_bits;
2250 		break;
2251 	case PT_EPT:
2252 		/*
2253 		 * Nothing to do - the memory attributes are represented
2254 		 * the same way for regular pages and superpages.
2255 		 */
2256 		break;
2257 	default:
2258 		panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type);
2259 	}
2260 
2261 	return (entry);
2262 }
2263 
2264 boolean_t
2265 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
2266 {
2267 
2268 	return (mode >= 0 && mode < PAT_INDEX_SIZE &&
2269 	    pat_index[(int)mode] >= 0);
2270 }
2271 
2272 /*
2273  * Determine the appropriate bits to set in a PTE or PDE for a specified
2274  * caching mode.
2275  */
2276 int
2277 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
2278 {
2279 	int cache_bits, pat_flag, pat_idx;
2280 
2281 	if (!pmap_is_valid_memattr(pmap, mode))
2282 		panic("Unknown caching mode %d\n", mode);
2283 
2284 	switch (pmap->pm_type) {
2285 	case PT_X86:
2286 	case PT_RVI:
2287 		/* The PAT bit is different for PTE's and PDE's. */
2288 		pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2289 
2290 		/* Map the caching mode to a PAT index. */
2291 		pat_idx = pat_index[mode];
2292 
2293 		/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
2294 		cache_bits = 0;
2295 		if (pat_idx & 0x4)
2296 			cache_bits |= pat_flag;
2297 		if (pat_idx & 0x2)
2298 			cache_bits |= PG_NC_PCD;
2299 		if (pat_idx & 0x1)
2300 			cache_bits |= PG_NC_PWT;
2301 		break;
2302 
2303 	case PT_EPT:
2304 		cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
2305 		break;
2306 
2307 	default:
2308 		panic("unsupported pmap type %d", pmap->pm_type);
2309 	}
2310 
2311 	return (cache_bits);
2312 }
2313 
2314 static int
2315 pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
2316 {
2317 	int mask;
2318 
2319 	switch (pmap->pm_type) {
2320 	case PT_X86:
2321 	case PT_RVI:
2322 		mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
2323 		break;
2324 	case PT_EPT:
2325 		mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
2326 		break;
2327 	default:
2328 		panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type);
2329 	}
2330 
2331 	return (mask);
2332 }
2333 
2334 static int
2335 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde)
2336 {
2337 	int pat_flag, pat_idx;
2338 
2339 	pat_idx = 0;
2340 	switch (pmap->pm_type) {
2341 	case PT_X86:
2342 	case PT_RVI:
2343 		/* The PAT bit is different for PTE's and PDE's. */
2344 		pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2345 
2346 		if ((pte & pat_flag) != 0)
2347 			pat_idx |= 0x4;
2348 		if ((pte & PG_NC_PCD) != 0)
2349 			pat_idx |= 0x2;
2350 		if ((pte & PG_NC_PWT) != 0)
2351 			pat_idx |= 0x1;
2352 		break;
2353 	case PT_EPT:
2354 		if ((pte & EPT_PG_IGNORE_PAT) != 0)
2355 			panic("EPT PTE %#lx has no PAT memory type", pte);
2356 		pat_idx = (pte & EPT_PG_MEMORY_TYPE(0x7)) >> 3;
2357 		break;
2358 	}
2359 
2360 	/* See pmap_init_pat(). */
2361 	if (pat_idx == 4)
2362 		pat_idx = 0;
2363 	if (pat_idx == 7)
2364 		pat_idx = 3;
2365 
2366 	return (pat_idx);
2367 }
2368 
2369 bool
2370 pmap_ps_enabled(pmap_t pmap)
2371 {
2372 
2373 	return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
2374 }
2375 
2376 static void
2377 pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
2378 {
2379 
2380 	switch (pmap->pm_type) {
2381 	case PT_X86:
2382 		break;
2383 	case PT_RVI:
2384 	case PT_EPT:
2385 		/*
2386 		 * XXX
2387 		 * This is a little bogus since the generation number is
2388 		 * supposed to be bumped up when a region of the address
2389 		 * space is invalidated in the page tables.
2390 		 *
2391 		 * In this case the old PDE entry is valid but yet we want
2392 		 * to make sure that any mappings using the old entry are
2393 		 * invalidated in the TLB.
2394 		 *
2395 		 * The reason this works as expected is because we rendezvous
2396 		 * "all" host cpus and force any vcpu context to exit as a
2397 		 * side-effect.
2398 		 */
2399 		atomic_add_acq_long(&pmap->pm_eptgen, 1);
2400 		break;
2401 	default:
2402 		panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type);
2403 	}
2404 	pde_store(pde, newpde);
2405 }
2406 
2407 /*
2408  * After changing the page size for the specified virtual address in the page
2409  * table, flush the corresponding entries from the processor's TLB.  Only the
2410  * calling processor's TLB is affected.
2411  *
2412  * The calling thread must be pinned to a processor.
2413  */
2414 static void
2415 pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
2416 {
2417 	pt_entry_t PG_G;
2418 
2419 	if (pmap_type_guest(pmap))
2420 		return;
2421 
2422 	KASSERT(pmap->pm_type == PT_X86,
2423 	    ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type));
2424 
2425 	PG_G = pmap_global_bit(pmap);
2426 
2427 	if ((newpde & PG_PS) == 0)
2428 		/* Demotion: flush a specific 2MB page mapping. */
2429 		invlpg(va);
2430 	else if ((newpde & PG_G) == 0)
2431 		/*
2432 		 * Promotion: flush every 4KB page mapping from the TLB
2433 		 * because there are too many to flush individually.
2434 		 */
2435 		invltlb();
2436 	else {
2437 		/*
2438 		 * Promotion: flush every 4KB page mapping from the TLB,
2439 		 * including any global (PG_G) mappings.
2440 		 */
2441 		invltlb_glob();
2442 	}
2443 }
2444 #ifdef SMP
2445 
2446 /*
2447  * For SMP, these functions have to use the IPI mechanism for coherence.
2448  *
2449  * N.B.: Before calling any of the following TLB invalidation functions,
2450  * the calling processor must ensure that all stores updating a non-
2451  * kernel page table are globally performed.  Otherwise, another
2452  * processor could cache an old, pre-update entry without being
2453  * invalidated.  This can happen one of two ways: (1) The pmap becomes
2454  * active on another processor after its pm_active field is checked by
2455  * one of the following functions but before a store updating the page
2456  * table is globally performed. (2) The pmap becomes active on another
2457  * processor before its pm_active field is checked but due to
2458  * speculative loads one of the following functions stills reads the
2459  * pmap as inactive on the other processor.
2460  *
2461  * The kernel page table is exempt because its pm_active field is
2462  * immutable.  The kernel page table is always active on every
2463  * processor.
2464  */
2465 
2466 /*
2467  * Interrupt the cpus that are executing in the guest context.
2468  * This will force the vcpu to exit and the cached EPT mappings
2469  * will be invalidated by the host before the next vmresume.
2470  */
2471 static __inline void
2472 pmap_invalidate_ept(pmap_t pmap)
2473 {
2474 	int ipinum;
2475 
2476 	sched_pin();
2477 	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
2478 	    ("pmap_invalidate_ept: absurd pm_active"));
2479 
2480 	/*
2481 	 * The TLB mappings associated with a vcpu context are not
2482 	 * flushed each time a different vcpu is chosen to execute.
2483 	 *
2484 	 * This is in contrast with a process's vtop mappings that
2485 	 * are flushed from the TLB on each context switch.
2486 	 *
2487 	 * Therefore we need to do more than just a TLB shootdown on
2488 	 * the active cpus in 'pmap->pm_active'. To do this we keep
2489 	 * track of the number of invalidations performed on this pmap.
2490 	 *
2491 	 * Each vcpu keeps a cache of this counter and compares it
2492 	 * just before a vmresume. If the counter is out-of-date an
2493 	 * invept will be done to flush stale mappings from the TLB.
2494 	 */
2495 	atomic_add_acq_long(&pmap->pm_eptgen, 1);
2496 
2497 	/*
2498 	 * Force the vcpu to exit and trap back into the hypervisor.
2499 	 */
2500 	ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK;
2501 	ipi_selected(pmap->pm_active, ipinum);
2502 	sched_unpin();
2503 }
2504 
2505 static cpuset_t
2506 pmap_invalidate_cpu_mask(pmap_t pmap)
2507 {
2508 
2509 	return (pmap == kernel_pmap ? all_cpus : pmap->pm_active);
2510 }
2511 
2512 static inline void
2513 pmap_invalidate_page_pcid(pmap_t pmap, vm_offset_t va,
2514     const bool invpcid_works1)
2515 {
2516 	struct invpcid_descr d;
2517 	uint64_t kcr3, ucr3;
2518 	uint32_t pcid;
2519 	u_int cpuid, i;
2520 
2521 	cpuid = PCPU_GET(cpuid);
2522 	if (pmap == PCPU_GET(curpmap)) {
2523 		if (pmap->pm_ucr3 != PMAP_NO_CR3 &&
2524 		    /*
2525 		     * If we context-switched right after
2526 		     * PCPU_GET(ucr3_load_mask), we could read the
2527 		     * ~CR3_PCID_SAVE mask, which causes us to skip
2528 		     * the code below to invalidate user pages.  This
2529 		     * is handled in pmap_activate_sw_pcid_pti() by
2530 		     * clearing pm_gen if ucr3_load_mask is ~CR3_PCID_SAVE.
2531 		     */
2532 		    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
2533 			/*
2534 			 * Because pm_pcid is recalculated on a
2535 			 * context switch, we must disable switching.
2536 			 * Otherwise, we might use a stale value
2537 			 * below.
2538 			 */
2539 			critical_enter();
2540 			pcid = pmap->pm_pcids[cpuid].pm_pcid;
2541 			if (invpcid_works1) {
2542 				d.pcid = pcid | PMAP_PCID_USER_PT;
2543 				d.pad = 0;
2544 				d.addr = va;
2545 				invpcid(&d, INVPCID_ADDR);
2546 			} else {
2547 				kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
2548 				ucr3 = pmap->pm_ucr3 | pcid |
2549 				    PMAP_PCID_USER_PT | CR3_PCID_SAVE;
2550 				pmap_pti_pcid_invlpg(ucr3, kcr3, va);
2551 			}
2552 			critical_exit();
2553 		}
2554 	} else
2555 		pmap->pm_pcids[cpuid].pm_gen = 0;
2556 
2557 	CPU_FOREACH(i) {
2558 		if (cpuid != i)
2559 			pmap->pm_pcids[i].pm_gen = 0;
2560 	}
2561 
2562 	/*
2563 	 * The fence is between stores to pm_gen and the read of the
2564 	 * pm_active mask.  We need to ensure that it is impossible
2565 	 * for us to miss the bit update in pm_active and
2566 	 * simultaneously observe a non-zero pm_gen in
2567 	 * pmap_activate_sw(), otherwise TLB update is missed.
2568 	 * Without the fence, IA32 allows such an outcome.  Note that
2569 	 * pm_active is updated by a locked operation, which provides
2570 	 * the reciprocal fence.
2571 	 */
2572 	atomic_thread_fence_seq_cst();
2573 }
2574 
2575 static void
2576 pmap_invalidate_page_pcid_invpcid(pmap_t pmap, vm_offset_t va)
2577 {
2578 
2579 	pmap_invalidate_page_pcid(pmap, va, true);
2580 }
2581 
2582 static void
2583 pmap_invalidate_page_pcid_noinvpcid(pmap_t pmap, vm_offset_t va)
2584 {
2585 
2586 	pmap_invalidate_page_pcid(pmap, va, false);
2587 }
2588 
2589 static void
2590 pmap_invalidate_page_nopcid(pmap_t pmap, vm_offset_t va)
2591 {
2592 }
2593 
2594 DEFINE_IFUNC(static, void, pmap_invalidate_page_mode, (pmap_t, vm_offset_t))
2595 {
2596 
2597 	if (pmap_pcid_enabled)
2598 		return (invpcid_works ? pmap_invalidate_page_pcid_invpcid :
2599 		    pmap_invalidate_page_pcid_noinvpcid);
2600 	return (pmap_invalidate_page_nopcid);
2601 }
2602 
2603 static void
2604 pmap_invalidate_page_curcpu_cb(pmap_t pmap, vm_offset_t va,
2605     vm_offset_t addr2 __unused)
2606 {
2607 
2608 	if (pmap == kernel_pmap) {
2609 		invlpg(va);
2610 	} else {
2611 		if (pmap == PCPU_GET(curpmap))
2612 			invlpg(va);
2613 		pmap_invalidate_page_mode(pmap, va);
2614 	}
2615 }
2616 
2617 void
2618 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
2619 {
2620 
2621 	if (pmap_type_guest(pmap)) {
2622 		pmap_invalidate_ept(pmap);
2623 		return;
2624 	}
2625 
2626 	KASSERT(pmap->pm_type == PT_X86,
2627 	    ("pmap_invalidate_page: invalid type %d", pmap->pm_type));
2628 
2629 	smp_masked_invlpg(pmap_invalidate_cpu_mask(pmap), va, pmap,
2630 	    pmap_invalidate_page_curcpu_cb);
2631 }
2632 
2633 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
2634 #define	PMAP_INVLPG_THRESHOLD	(4 * 1024 * PAGE_SIZE)
2635 
2636 static void
2637 pmap_invalidate_range_pcid(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2638     const bool invpcid_works1)
2639 {
2640 	struct invpcid_descr d;
2641 	uint64_t kcr3, ucr3;
2642 	uint32_t pcid;
2643 	u_int cpuid, i;
2644 
2645 	cpuid = PCPU_GET(cpuid);
2646 	if (pmap == PCPU_GET(curpmap)) {
2647 		if (pmap->pm_ucr3 != PMAP_NO_CR3 &&
2648 		    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
2649 			critical_enter();
2650 			pcid = pmap->pm_pcids[cpuid].pm_pcid;
2651 			if (invpcid_works1) {
2652 				d.pcid = pcid | PMAP_PCID_USER_PT;
2653 				d.pad = 0;
2654 				d.addr = sva;
2655 				for (; d.addr < eva; d.addr += PAGE_SIZE)
2656 					invpcid(&d, INVPCID_ADDR);
2657 			} else {
2658 				kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
2659 				ucr3 = pmap->pm_ucr3 | pcid |
2660 				    PMAP_PCID_USER_PT | CR3_PCID_SAVE;
2661 				pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
2662 			}
2663 			critical_exit();
2664 		}
2665 	} else
2666 		pmap->pm_pcids[cpuid].pm_gen = 0;
2667 
2668 	CPU_FOREACH(i) {
2669 		if (cpuid != i)
2670 			pmap->pm_pcids[i].pm_gen = 0;
2671 	}
2672 	/* See the comment in pmap_invalidate_page_pcid(). */
2673 	atomic_thread_fence_seq_cst();
2674 }
2675 
2676 static void
2677 pmap_invalidate_range_pcid_invpcid(pmap_t pmap, vm_offset_t sva,
2678     vm_offset_t eva)
2679 {
2680 
2681 	pmap_invalidate_range_pcid(pmap, sva, eva, true);
2682 }
2683 
2684 static void
2685 pmap_invalidate_range_pcid_noinvpcid(pmap_t pmap, vm_offset_t sva,
2686     vm_offset_t eva)
2687 {
2688 
2689 	pmap_invalidate_range_pcid(pmap, sva, eva, false);
2690 }
2691 
2692 static void
2693 pmap_invalidate_range_nopcid(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2694 {
2695 }
2696 
2697 DEFINE_IFUNC(static, void, pmap_invalidate_range_mode, (pmap_t, vm_offset_t,
2698     vm_offset_t))
2699 {
2700 
2701 	if (pmap_pcid_enabled)
2702 		return (invpcid_works ? pmap_invalidate_range_pcid_invpcid :
2703 		    pmap_invalidate_range_pcid_noinvpcid);
2704 	return (pmap_invalidate_range_nopcid);
2705 }
2706 
2707 static void
2708 pmap_invalidate_range_curcpu_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2709 {
2710 	vm_offset_t addr;
2711 
2712 	if (pmap == kernel_pmap) {
2713 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
2714 			invlpg(addr);
2715 	} else {
2716 		if (pmap == PCPU_GET(curpmap)) {
2717 			for (addr = sva; addr < eva; addr += PAGE_SIZE)
2718 				invlpg(addr);
2719 		}
2720 		pmap_invalidate_range_mode(pmap, sva, eva);
2721 	}
2722 }
2723 
2724 void
2725 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2726 {
2727 
2728 	if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
2729 		pmap_invalidate_all(pmap);
2730 		return;
2731 	}
2732 
2733 	if (pmap_type_guest(pmap)) {
2734 		pmap_invalidate_ept(pmap);
2735 		return;
2736 	}
2737 
2738 	KASSERT(pmap->pm_type == PT_X86,
2739 	    ("pmap_invalidate_range: invalid type %d", pmap->pm_type));
2740 
2741 	smp_masked_invlpg_range(pmap_invalidate_cpu_mask(pmap), sva, eva, pmap,
2742 	    pmap_invalidate_range_curcpu_cb);
2743 }
2744 
2745 static inline void
2746 pmap_invalidate_all_pcid(pmap_t pmap, bool invpcid_works1)
2747 {
2748 	struct invpcid_descr d;
2749 	uint64_t kcr3;
2750 	uint32_t pcid;
2751 	u_int cpuid, i;
2752 
2753 	if (pmap == kernel_pmap) {
2754 		if (invpcid_works1) {
2755 			bzero(&d, sizeof(d));
2756 			invpcid(&d, INVPCID_CTXGLOB);
2757 		} else {
2758 			invltlb_glob();
2759 		}
2760 	} else {
2761 		cpuid = PCPU_GET(cpuid);
2762 		if (pmap == PCPU_GET(curpmap)) {
2763 			critical_enter();
2764 			pcid = pmap->pm_pcids[cpuid].pm_pcid;
2765 			if (invpcid_works1) {
2766 				d.pcid = pcid;
2767 				d.pad = 0;
2768 				d.addr = 0;
2769 				invpcid(&d, INVPCID_CTX);
2770 			} else {
2771 				kcr3 = pmap->pm_cr3 | pcid;
2772 				load_cr3(kcr3);
2773 			}
2774 			if (pmap->pm_ucr3 != PMAP_NO_CR3)
2775 				PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
2776 			critical_exit();
2777 		} else
2778 			pmap->pm_pcids[cpuid].pm_gen = 0;
2779 		CPU_FOREACH(i) {
2780 			if (cpuid != i)
2781 				pmap->pm_pcids[i].pm_gen = 0;
2782 		}
2783 	}
2784 	/* See the comment in pmap_invalidate_page_pcid(). */
2785 	atomic_thread_fence_seq_cst();
2786 }
2787 
2788 static void
2789 pmap_invalidate_all_pcid_invpcid(pmap_t pmap)
2790 {
2791 
2792 	pmap_invalidate_all_pcid(pmap, true);
2793 }
2794 
2795 static void
2796 pmap_invalidate_all_pcid_noinvpcid(pmap_t pmap)
2797 {
2798 
2799 	pmap_invalidate_all_pcid(pmap, false);
2800 }
2801 
2802 static void
2803 pmap_invalidate_all_nopcid(pmap_t pmap)
2804 {
2805 
2806 	if (pmap == kernel_pmap)
2807 		invltlb_glob();
2808 	else if (pmap == PCPU_GET(curpmap))
2809 		invltlb();
2810 }
2811 
2812 DEFINE_IFUNC(static, void, pmap_invalidate_all_mode, (pmap_t))
2813 {
2814 
2815 	if (pmap_pcid_enabled)
2816 		return (invpcid_works ? pmap_invalidate_all_pcid_invpcid :
2817 		    pmap_invalidate_all_pcid_noinvpcid);
2818 	return (pmap_invalidate_all_nopcid);
2819 }
2820 
2821 static void
2822 pmap_invalidate_all_curcpu_cb(pmap_t pmap, vm_offset_t addr1 __unused,
2823     vm_offset_t addr2 __unused)
2824 {
2825 
2826 	pmap_invalidate_all_mode(pmap);
2827 }
2828 
2829 void
2830 pmap_invalidate_all(pmap_t pmap)
2831 {
2832 
2833 	if (pmap_type_guest(pmap)) {
2834 		pmap_invalidate_ept(pmap);
2835 		return;
2836 	}
2837 
2838 	KASSERT(pmap->pm_type == PT_X86,
2839 	    ("pmap_invalidate_all: invalid type %d", pmap->pm_type));
2840 
2841 	smp_masked_invltlb(pmap_invalidate_cpu_mask(pmap), pmap,
2842 	    pmap_invalidate_all_curcpu_cb);
2843 }
2844 
2845 static void
2846 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, vm_offset_t va __unused,
2847     vm_offset_t addr2 __unused)
2848 {
2849 
2850 	wbinvd();
2851 }
2852 
2853 void
2854 pmap_invalidate_cache(void)
2855 {
2856 
2857 	smp_cache_flush(pmap_invalidate_cache_curcpu_cb);
2858 }
2859 
2860 struct pde_action {
2861 	cpuset_t invalidate;	/* processors that invalidate their TLB */
2862 	pmap_t pmap;
2863 	vm_offset_t va;
2864 	pd_entry_t *pde;
2865 	pd_entry_t newpde;
2866 	u_int store;		/* processor that updates the PDE */
2867 };
2868 
2869 static void
2870 pmap_update_pde_action(void *arg)
2871 {
2872 	struct pde_action *act = arg;
2873 
2874 	if (act->store == PCPU_GET(cpuid))
2875 		pmap_update_pde_store(act->pmap, act->pde, act->newpde);
2876 }
2877 
2878 static void
2879 pmap_update_pde_teardown(void *arg)
2880 {
2881 	struct pde_action *act = arg;
2882 
2883 	if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
2884 		pmap_update_pde_invalidate(act->pmap, act->va, act->newpde);
2885 }
2886 
2887 /*
2888  * Change the page size for the specified virtual address in a way that
2889  * prevents any possibility of the TLB ever having two entries that map the
2890  * same virtual address using different page sizes.  This is the recommended
2891  * workaround for Erratum 383 on AMD Family 10h processors.  It prevents a
2892  * machine check exception for a TLB state that is improperly diagnosed as a
2893  * hardware error.
2894  */
2895 static void
2896 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
2897 {
2898 	struct pde_action act;
2899 	cpuset_t active, other_cpus;
2900 	u_int cpuid;
2901 
2902 	sched_pin();
2903 	cpuid = PCPU_GET(cpuid);
2904 	other_cpus = all_cpus;
2905 	CPU_CLR(cpuid, &other_cpus);
2906 	if (pmap == kernel_pmap || pmap_type_guest(pmap))
2907 		active = all_cpus;
2908 	else {
2909 		active = pmap->pm_active;
2910 	}
2911 	if (CPU_OVERLAP(&active, &other_cpus)) {
2912 		act.store = cpuid;
2913 		act.invalidate = active;
2914 		act.va = va;
2915 		act.pmap = pmap;
2916 		act.pde = pde;
2917 		act.newpde = newpde;
2918 		CPU_SET(cpuid, &active);
2919 		smp_rendezvous_cpus(active,
2920 		    smp_no_rendezvous_barrier, pmap_update_pde_action,
2921 		    pmap_update_pde_teardown, &act);
2922 	} else {
2923 		pmap_update_pde_store(pmap, pde, newpde);
2924 		if (CPU_ISSET(cpuid, &active))
2925 			pmap_update_pde_invalidate(pmap, va, newpde);
2926 	}
2927 	sched_unpin();
2928 }
2929 #else /* !SMP */
2930 /*
2931  * Normal, non-SMP, invalidation functions.
2932  */
2933 void
2934 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
2935 {
2936 	struct invpcid_descr d;
2937 	uint64_t kcr3, ucr3;
2938 	uint32_t pcid;
2939 
2940 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
2941 		pmap->pm_eptgen++;
2942 		return;
2943 	}
2944 	KASSERT(pmap->pm_type == PT_X86,
2945 	    ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
2946 
2947 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
2948 		invlpg(va);
2949 		if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
2950 		    pmap->pm_ucr3 != PMAP_NO_CR3) {
2951 			critical_enter();
2952 			pcid = pmap->pm_pcids[0].pm_pcid;
2953 			if (invpcid_works) {
2954 				d.pcid = pcid | PMAP_PCID_USER_PT;
2955 				d.pad = 0;
2956 				d.addr = va;
2957 				invpcid(&d, INVPCID_ADDR);
2958 			} else {
2959 				kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
2960 				ucr3 = pmap->pm_ucr3 | pcid |
2961 				    PMAP_PCID_USER_PT | CR3_PCID_SAVE;
2962 				pmap_pti_pcid_invlpg(ucr3, kcr3, va);
2963 			}
2964 			critical_exit();
2965 		}
2966 	} else if (pmap_pcid_enabled)
2967 		pmap->pm_pcids[0].pm_gen = 0;
2968 }
2969 
2970 void
2971 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2972 {
2973 	struct invpcid_descr d;
2974 	vm_offset_t addr;
2975 	uint64_t kcr3, ucr3;
2976 
2977 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
2978 		pmap->pm_eptgen++;
2979 		return;
2980 	}
2981 	KASSERT(pmap->pm_type == PT_X86,
2982 	    ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
2983 
2984 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
2985 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
2986 			invlpg(addr);
2987 		if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
2988 		    pmap->pm_ucr3 != PMAP_NO_CR3) {
2989 			critical_enter();
2990 			if (invpcid_works) {
2991 				d.pcid = pmap->pm_pcids[0].pm_pcid |
2992 				    PMAP_PCID_USER_PT;
2993 				d.pad = 0;
2994 				d.addr = sva;
2995 				for (; d.addr < eva; d.addr += PAGE_SIZE)
2996 					invpcid(&d, INVPCID_ADDR);
2997 			} else {
2998 				kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].
2999 				    pm_pcid | CR3_PCID_SAVE;
3000 				ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[0].
3001 				    pm_pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3002 				pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3003 			}
3004 			critical_exit();
3005 		}
3006 	} else if (pmap_pcid_enabled) {
3007 		pmap->pm_pcids[0].pm_gen = 0;
3008 	}
3009 }
3010 
3011 void
3012 pmap_invalidate_all(pmap_t pmap)
3013 {
3014 	struct invpcid_descr d;
3015 	uint64_t kcr3, ucr3;
3016 
3017 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3018 		pmap->pm_eptgen++;
3019 		return;
3020 	}
3021 	KASSERT(pmap->pm_type == PT_X86,
3022 	    ("pmap_invalidate_all: unknown type %d", pmap->pm_type));
3023 
3024 	if (pmap == kernel_pmap) {
3025 		if (pmap_pcid_enabled && invpcid_works) {
3026 			bzero(&d, sizeof(d));
3027 			invpcid(&d, INVPCID_CTXGLOB);
3028 		} else {
3029 			invltlb_glob();
3030 		}
3031 	} else if (pmap == PCPU_GET(curpmap)) {
3032 		if (pmap_pcid_enabled) {
3033 			critical_enter();
3034 			if (invpcid_works) {
3035 				d.pcid = pmap->pm_pcids[0].pm_pcid;
3036 				d.pad = 0;
3037 				d.addr = 0;
3038 				invpcid(&d, INVPCID_CTX);
3039 				if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3040 					d.pcid |= PMAP_PCID_USER_PT;
3041 					invpcid(&d, INVPCID_CTX);
3042 				}
3043 			} else {
3044 				kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].pm_pcid;
3045 				if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3046 					ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[
3047 					    0].pm_pcid | PMAP_PCID_USER_PT;
3048 					pmap_pti_pcid_invalidate(ucr3, kcr3);
3049 				} else
3050 					load_cr3(kcr3);
3051 			}
3052 			critical_exit();
3053 		} else {
3054 			invltlb();
3055 		}
3056 	} else if (pmap_pcid_enabled) {
3057 		pmap->pm_pcids[0].pm_gen = 0;
3058 	}
3059 }
3060 
3061 PMAP_INLINE void
3062 pmap_invalidate_cache(void)
3063 {
3064 
3065 	wbinvd();
3066 }
3067 
3068 static void
3069 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3070 {
3071 
3072 	pmap_update_pde_store(pmap, pde, newpde);
3073 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
3074 		pmap_update_pde_invalidate(pmap, va, newpde);
3075 	else
3076 		pmap->pm_pcids[0].pm_gen = 0;
3077 }
3078 #endif /* !SMP */
3079 
3080 static void
3081 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
3082 {
3083 
3084 	/*
3085 	 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
3086 	 * by a promotion that did not invalidate the 512 4KB page mappings
3087 	 * that might exist in the TLB.  Consequently, at this point, the TLB
3088 	 * may hold both 4KB and 2MB page mappings for the address range [va,
3089 	 * va + NBPDR).  Therefore, the entire range must be invalidated here.
3090 	 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
3091 	 * 4KB page mappings for the address range [va, va + NBPDR), and so a
3092 	 * single INVLPG suffices to invalidate the 2MB page mapping from the
3093 	 * TLB.
3094 	 */
3095 	if ((pde & PG_PROMOTED) != 0)
3096 		pmap_invalidate_range(pmap, va, va + NBPDR - 1);
3097 	else
3098 		pmap_invalidate_page(pmap, va);
3099 }
3100 
3101 DEFINE_IFUNC(, void, pmap_invalidate_cache_range,
3102     (vm_offset_t sva, vm_offset_t eva))
3103 {
3104 
3105 	if ((cpu_feature & CPUID_SS) != 0)
3106 		return (pmap_invalidate_cache_range_selfsnoop);
3107 	if ((cpu_feature & CPUID_CLFSH) != 0)
3108 		return (pmap_force_invalidate_cache_range);
3109 	return (pmap_invalidate_cache_range_all);
3110 }
3111 
3112 #define PMAP_CLFLUSH_THRESHOLD   (2 * 1024 * 1024)
3113 
3114 static void
3115 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
3116 {
3117 
3118 	KASSERT((sva & PAGE_MASK) == 0,
3119 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
3120 	KASSERT((eva & PAGE_MASK) == 0,
3121 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
3122 }
3123 
3124 static void
3125 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
3126 {
3127 
3128 	pmap_invalidate_cache_range_check_align(sva, eva);
3129 }
3130 
3131 void
3132 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
3133 {
3134 
3135 	sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
3136 
3137 	/*
3138 	 * XXX: Some CPUs fault, hang, or trash the local APIC
3139 	 * registers if we use CLFLUSH on the local APIC range.  The
3140 	 * local APIC is always uncached, so we don't need to flush
3141 	 * for that range anyway.
3142 	 */
3143 	if (pmap_kextract(sva) == lapic_paddr)
3144 		return;
3145 
3146 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
3147 		/*
3148 		 * Do per-cache line flush.  Use a locked
3149 		 * instruction to insure that previous stores are
3150 		 * included in the write-back.  The processor
3151 		 * propagates flush to other processors in the cache
3152 		 * coherence domain.
3153 		 */
3154 		atomic_thread_fence_seq_cst();
3155 		for (; sva < eva; sva += cpu_clflush_line_size)
3156 			clflushopt(sva);
3157 		atomic_thread_fence_seq_cst();
3158 	} else {
3159 		/*
3160 		 * Writes are ordered by CLFLUSH on Intel CPUs.
3161 		 */
3162 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
3163 			mfence();
3164 		for (; sva < eva; sva += cpu_clflush_line_size)
3165 			clflush(sva);
3166 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
3167 			mfence();
3168 	}
3169 }
3170 
3171 static void
3172 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
3173 {
3174 
3175 	pmap_invalidate_cache_range_check_align(sva, eva);
3176 	pmap_invalidate_cache();
3177 }
3178 
3179 /*
3180  * Remove the specified set of pages from the data and instruction caches.
3181  *
3182  * In contrast to pmap_invalidate_cache_range(), this function does not
3183  * rely on the CPU's self-snoop feature, because it is intended for use
3184  * when moving pages into a different cache domain.
3185  */
3186 void
3187 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
3188 {
3189 	vm_offset_t daddr, eva;
3190 	int i;
3191 	bool useclflushopt;
3192 
3193 	useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
3194 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
3195 	    ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
3196 		pmap_invalidate_cache();
3197 	else {
3198 		if (useclflushopt)
3199 			atomic_thread_fence_seq_cst();
3200 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3201 			mfence();
3202 		for (i = 0; i < count; i++) {
3203 			daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
3204 			eva = daddr + PAGE_SIZE;
3205 			for (; daddr < eva; daddr += cpu_clflush_line_size) {
3206 				if (useclflushopt)
3207 					clflushopt(daddr);
3208 				else
3209 					clflush(daddr);
3210 			}
3211 		}
3212 		if (useclflushopt)
3213 			atomic_thread_fence_seq_cst();
3214 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3215 			mfence();
3216 	}
3217 }
3218 
3219 void
3220 pmap_flush_cache_range(vm_offset_t sva, vm_offset_t eva)
3221 {
3222 
3223 	pmap_invalidate_cache_range_check_align(sva, eva);
3224 
3225 	if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) == 0) {
3226 		pmap_force_invalidate_cache_range(sva, eva);
3227 		return;
3228 	}
3229 
3230 	/* See comment in pmap_force_invalidate_cache_range(). */
3231 	if (pmap_kextract(sva) == lapic_paddr)
3232 		return;
3233 
3234 	atomic_thread_fence_seq_cst();
3235 	for (; sva < eva; sva += cpu_clflush_line_size)
3236 		clwb(sva);
3237 	atomic_thread_fence_seq_cst();
3238 }
3239 
3240 void
3241 pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr)
3242 {
3243 	pt_entry_t *pte;
3244 	vm_offset_t vaddr;
3245 	int error, pte_bits;
3246 
3247 	KASSERT((spa & PAGE_MASK) == 0,
3248 	    ("pmap_flush_cache_phys_range: spa not page-aligned"));
3249 	KASSERT((epa & PAGE_MASK) == 0,
3250 	    ("pmap_flush_cache_phys_range: epa not page-aligned"));
3251 
3252 	if (spa < dmaplimit) {
3253 		pmap_flush_cache_range(PHYS_TO_DMAP(spa), PHYS_TO_DMAP(MIN(
3254 		    dmaplimit, epa)));
3255 		if (dmaplimit >= epa)
3256 			return;
3257 		spa = dmaplimit;
3258 	}
3259 
3260 	pte_bits = pmap_cache_bits(kernel_pmap, mattr, 0) | X86_PG_RW |
3261 	    X86_PG_V;
3262 	error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3263 	    &vaddr);
3264 	KASSERT(error == 0, ("vmem_alloc failed: %d", error));
3265 	pte = vtopte(vaddr);
3266 	for (; spa < epa; spa += PAGE_SIZE) {
3267 		sched_pin();
3268 		pte_store(pte, spa | pte_bits);
3269 		invlpg(vaddr);
3270 		/* XXXKIB atomic inside flush_cache_range are excessive */
3271 		pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE);
3272 		sched_unpin();
3273 	}
3274 	vmem_free(kernel_arena, vaddr, PAGE_SIZE);
3275 }
3276 
3277 /*
3278  *	Routine:	pmap_extract
3279  *	Function:
3280  *		Extract the physical page address associated
3281  *		with the given map/virtual_address pair.
3282  */
3283 vm_paddr_t
3284 pmap_extract(pmap_t pmap, vm_offset_t va)
3285 {
3286 	pdp_entry_t *pdpe;
3287 	pd_entry_t *pde;
3288 	pt_entry_t *pte, PG_V;
3289 	vm_paddr_t pa;
3290 
3291 	pa = 0;
3292 	PG_V = pmap_valid_bit(pmap);
3293 	PMAP_LOCK(pmap);
3294 	pdpe = pmap_pdpe(pmap, va);
3295 	if (pdpe != NULL && (*pdpe & PG_V) != 0) {
3296 		if ((*pdpe & PG_PS) != 0)
3297 			pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
3298 		else {
3299 			pde = pmap_pdpe_to_pde(pdpe, va);
3300 			if ((*pde & PG_V) != 0) {
3301 				if ((*pde & PG_PS) != 0) {
3302 					pa = (*pde & PG_PS_FRAME) |
3303 					    (va & PDRMASK);
3304 				} else {
3305 					pte = pmap_pde_to_pte(pde, va);
3306 					pa = (*pte & PG_FRAME) |
3307 					    (va & PAGE_MASK);
3308 				}
3309 			}
3310 		}
3311 	}
3312 	PMAP_UNLOCK(pmap);
3313 	return (pa);
3314 }
3315 
3316 /*
3317  *	Routine:	pmap_extract_and_hold
3318  *	Function:
3319  *		Atomically extract and hold the physical page
3320  *		with the given pmap and virtual address pair
3321  *		if that mapping permits the given protection.
3322  */
3323 vm_page_t
3324 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3325 {
3326 	pd_entry_t pde, *pdep;
3327 	pt_entry_t pte, PG_RW, PG_V;
3328 	vm_page_t m;
3329 
3330 	m = NULL;
3331 	PG_RW = pmap_rw_bit(pmap);
3332 	PG_V = pmap_valid_bit(pmap);
3333 
3334 	PMAP_LOCK(pmap);
3335 	pdep = pmap_pde(pmap, va);
3336 	if (pdep != NULL && (pde = *pdep)) {
3337 		if (pde & PG_PS) {
3338 			if ((pde & PG_RW) != 0 || (prot & VM_PROT_WRITE) == 0)
3339 				m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
3340 				    (va & PDRMASK));
3341 		} else {
3342 			pte = *pmap_pde_to_pte(pdep, va);
3343 			if ((pte & PG_V) != 0 &&
3344 			    ((pte & PG_RW) != 0 || (prot & VM_PROT_WRITE) == 0))
3345 				m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3346 		}
3347 		if (m != NULL && !vm_page_wire_mapped(m))
3348 			m = NULL;
3349 	}
3350 	PMAP_UNLOCK(pmap);
3351 	return (m);
3352 }
3353 
3354 vm_paddr_t
3355 pmap_kextract(vm_offset_t va)
3356 {
3357 	pd_entry_t pde;
3358 	vm_paddr_t pa;
3359 
3360 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
3361 		pa = DMAP_TO_PHYS(va);
3362 	} else if (PMAP_ADDRESS_IN_LARGEMAP(va)) {
3363 		pa = pmap_large_map_kextract(va);
3364 	} else {
3365 		pde = *vtopde(va);
3366 		if (pde & PG_PS) {
3367 			pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
3368 		} else {
3369 			/*
3370 			 * Beware of a concurrent promotion that changes the
3371 			 * PDE at this point!  For example, vtopte() must not
3372 			 * be used to access the PTE because it would use the
3373 			 * new PDE.  It is, however, safe to use the old PDE
3374 			 * because the page table page is preserved by the
3375 			 * promotion.
3376 			 */
3377 			pa = *pmap_pde_to_pte(&pde, va);
3378 			pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3379 		}
3380 	}
3381 	return (pa);
3382 }
3383 
3384 /***************************************************
3385  * Low level mapping routines.....
3386  ***************************************************/
3387 
3388 /*
3389  * Add a wired page to the kva.
3390  * Note: not SMP coherent.
3391  */
3392 PMAP_INLINE void
3393 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
3394 {
3395 	pt_entry_t *pte;
3396 
3397 	pte = vtopte(va);
3398 	pte_store(pte, pa | X86_PG_RW | X86_PG_V | pg_g | pg_nx);
3399 }
3400 
3401 static __inline void
3402 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
3403 {
3404 	pt_entry_t *pte;
3405 	int cache_bits;
3406 
3407 	pte = vtopte(va);
3408 	cache_bits = pmap_cache_bits(kernel_pmap, mode, 0);
3409 	pte_store(pte, pa | X86_PG_RW | X86_PG_V | pg_g | pg_nx | cache_bits);
3410 }
3411 
3412 /*
3413  * Remove a page from the kernel pagetables.
3414  * Note: not SMP coherent.
3415  */
3416 PMAP_INLINE void
3417 pmap_kremove(vm_offset_t va)
3418 {
3419 	pt_entry_t *pte;
3420 
3421 	pte = vtopte(va);
3422 	pte_clear(pte);
3423 }
3424 
3425 /*
3426  *	Used to map a range of physical addresses into kernel
3427  *	virtual address space.
3428  *
3429  *	The value passed in '*virt' is a suggested virtual address for
3430  *	the mapping. Architectures which can support a direct-mapped
3431  *	physical to virtual region can return the appropriate address
3432  *	within that region, leaving '*virt' unchanged. Other
3433  *	architectures should map the pages starting at '*virt' and
3434  *	update '*virt' with the first usable address after the mapped
3435  *	region.
3436  */
3437 vm_offset_t
3438 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
3439 {
3440 	return PHYS_TO_DMAP(start);
3441 }
3442 
3443 
3444 /*
3445  * Add a list of wired pages to the kva
3446  * this routine is only used for temporary
3447  * kernel mappings that do not need to have
3448  * page modification or references recorded.
3449  * Note that old mappings are simply written
3450  * over.  The page *must* be wired.
3451  * Note: SMP coherent.  Uses a ranged shootdown IPI.
3452  */
3453 void
3454 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
3455 {
3456 	pt_entry_t *endpte, oldpte, pa, *pte;
3457 	vm_page_t m;
3458 	int cache_bits;
3459 
3460 	oldpte = 0;
3461 	pte = vtopte(sva);
3462 	endpte = pte + count;
3463 	while (pte < endpte) {
3464 		m = *ma++;
3465 		cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
3466 		pa = VM_PAGE_TO_PHYS(m) | cache_bits;
3467 		if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
3468 			oldpte |= *pte;
3469 			pte_store(pte, pa | pg_g | pg_nx | X86_PG_RW | X86_PG_V);
3470 		}
3471 		pte++;
3472 	}
3473 	if (__predict_false((oldpte & X86_PG_V) != 0))
3474 		pmap_invalidate_range(kernel_pmap, sva, sva + count *
3475 		    PAGE_SIZE);
3476 }
3477 
3478 /*
3479  * This routine tears out page mappings from the
3480  * kernel -- it is meant only for temporary mappings.
3481  * Note: SMP coherent.  Uses a ranged shootdown IPI.
3482  */
3483 void
3484 pmap_qremove(vm_offset_t sva, int count)
3485 {
3486 	vm_offset_t va;
3487 
3488 	va = sva;
3489 	while (count-- > 0) {
3490 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
3491 		pmap_kremove(va);
3492 		va += PAGE_SIZE;
3493 	}
3494 	pmap_invalidate_range(kernel_pmap, sva, va);
3495 }
3496 
3497 /***************************************************
3498  * Page table page management routines.....
3499  ***************************************************/
3500 /*
3501  * Schedule the specified unused page table page to be freed.  Specifically,
3502  * add the page to the specified list of pages that will be released to the
3503  * physical memory manager after the TLB has been updated.
3504  */
3505 static __inline void
3506 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
3507     boolean_t set_PG_ZERO)
3508 {
3509 
3510 	if (set_PG_ZERO)
3511 		m->flags |= PG_ZERO;
3512 	else
3513 		m->flags &= ~PG_ZERO;
3514 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
3515 }
3516 
3517 /*
3518  * Inserts the specified page table page into the specified pmap's collection
3519  * of idle page table pages.  Each of a pmap's page table pages is responsible
3520  * for mapping a distinct range of virtual addresses.  The pmap's collection is
3521  * ordered by this virtual address range.
3522  *
3523  * If "promoted" is false, then the page table page "mpte" must be zero filled.
3524  */
3525 static __inline int
3526 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
3527 {
3528 
3529 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3530 	mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
3531 	return (vm_radix_insert(&pmap->pm_root, mpte));
3532 }
3533 
3534 /*
3535  * Removes the page table page mapping the specified virtual address from the
3536  * specified pmap's collection of idle page table pages, and returns it.
3537  * Otherwise, returns NULL if there is no page table page corresponding to the
3538  * specified virtual address.
3539  */
3540 static __inline vm_page_t
3541 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
3542 {
3543 
3544 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3545 	return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
3546 }
3547 
3548 /*
3549  * Decrements a page table page's reference count, which is used to record the
3550  * number of valid page table entries within the page.  If the reference count
3551  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
3552  * page table page was unmapped and FALSE otherwise.
3553  */
3554 static inline boolean_t
3555 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
3556 {
3557 
3558 	--m->ref_count;
3559 	if (m->ref_count == 0) {
3560 		_pmap_unwire_ptp(pmap, va, m, free);
3561 		return (TRUE);
3562 	} else
3563 		return (FALSE);
3564 }
3565 
3566 static void
3567 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
3568 {
3569 
3570 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3571 	/*
3572 	 * unmap the page table page
3573 	 */
3574 	if (m->pindex >= NUPDE + NUPDPE) {
3575 		/* PDP page */
3576 		pml4_entry_t *pml4;
3577 		pml4 = pmap_pml4e(pmap, va);
3578 		*pml4 = 0;
3579 		if (pmap->pm_pml4u != NULL && va <= VM_MAXUSER_ADDRESS) {
3580 			pml4 = &pmap->pm_pml4u[pmap_pml4e_index(va)];
3581 			*pml4 = 0;
3582 		}
3583 	} else if (m->pindex >= NUPDE) {
3584 		/* PD page */
3585 		pdp_entry_t *pdp;
3586 		pdp = pmap_pdpe(pmap, va);
3587 		*pdp = 0;
3588 	} else {
3589 		/* PTE page */
3590 		pd_entry_t *pd;
3591 		pd = pmap_pde(pmap, va);
3592 		*pd = 0;
3593 	}
3594 	pmap_resident_count_dec(pmap, 1);
3595 	if (m->pindex < NUPDE) {
3596 		/* We just released a PT, unhold the matching PD */
3597 		vm_page_t pdpg;
3598 
3599 		pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
3600 		pmap_unwire_ptp(pmap, va, pdpg, free);
3601 	} else if (m->pindex < NUPDE + NUPDPE) {
3602 		/* We just released a PD, unhold the matching PDP */
3603 		vm_page_t pdppg;
3604 
3605 		pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
3606 		pmap_unwire_ptp(pmap, va, pdppg, free);
3607 	}
3608 
3609 	/*
3610 	 * Put page on a list so that it is released after
3611 	 * *ALL* TLB shootdown is done
3612 	 */
3613 	pmap_add_delayed_free_list(m, free, TRUE);
3614 }
3615 
3616 /*
3617  * After removing a page table entry, this routine is used to
3618  * conditionally free the page, and manage the reference count.
3619  */
3620 static int
3621 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
3622     struct spglist *free)
3623 {
3624 	vm_page_t mpte;
3625 
3626 	if (va >= VM_MAXUSER_ADDRESS)
3627 		return (0);
3628 	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
3629 	mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
3630 	return (pmap_unwire_ptp(pmap, va, mpte, free));
3631 }
3632 
3633 /*
3634  * Release a page table page reference after a failed attempt to create a
3635  * mapping.
3636  */
3637 static void
3638 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
3639 {
3640 	struct spglist free;
3641 
3642 	SLIST_INIT(&free);
3643 	if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3644 		/*
3645 		 * Although "va" was never mapped, paging-structure caches
3646 		 * could nonetheless have entries that refer to the freed
3647 		 * page table pages.  Invalidate those entries.
3648 		 */
3649 		pmap_invalidate_page(pmap, va);
3650 		vm_page_free_pages_toq(&free, true);
3651 	}
3652 }
3653 
3654 void
3655 pmap_pinit0(pmap_t pmap)
3656 {
3657 	struct proc *p;
3658 	struct thread *td;
3659 	int i;
3660 
3661 	PMAP_LOCK_INIT(pmap);
3662 	pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
3663 	pmap->pm_pml4u = NULL;
3664 	pmap->pm_cr3 = KPML4phys;
3665 	/* hack to keep pmap_pti_pcid_invalidate() alive */
3666 	pmap->pm_ucr3 = PMAP_NO_CR3;
3667 	pmap->pm_root.rt_root = 0;
3668 	CPU_ZERO(&pmap->pm_active);
3669 	TAILQ_INIT(&pmap->pm_pvchunk);
3670 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
3671 	pmap->pm_flags = pmap_flags;
3672 	CPU_FOREACH(i) {
3673 		pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN + 1;
3674 		pmap->pm_pcids[i].pm_gen = 1;
3675 	}
3676 	pmap_activate_boot(pmap);
3677 	td = curthread;
3678 	if (pti) {
3679 		p = td->td_proc;
3680 		PROC_LOCK(p);
3681 		p->p_md.md_flags |= P_MD_KPTI;
3682 		PROC_UNLOCK(p);
3683 	}
3684 	pmap_thread_init_invl_gen(td);
3685 
3686 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
3687 		pmap_pkru_ranges_zone = uma_zcreate("pkru ranges",
3688 		    sizeof(struct pmap_pkru_range), NULL, NULL, NULL, NULL,
3689 		    UMA_ALIGN_PTR, 0);
3690 	}
3691 }
3692 
3693 void
3694 pmap_pinit_pml4(vm_page_t pml4pg)
3695 {
3696 	pml4_entry_t *pm_pml4;
3697 	int i;
3698 
3699 	pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
3700 
3701 	/* Wire in kernel global address entries. */
3702 	for (i = 0; i < NKPML4E; i++) {
3703 		pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
3704 		    X86_PG_V;
3705 	}
3706 	for (i = 0; i < ndmpdpphys; i++) {
3707 		pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
3708 		    X86_PG_V;
3709 	}
3710 
3711 	/* install self-referential address mapping entry(s) */
3712 	pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
3713 	    X86_PG_A | X86_PG_M;
3714 
3715 	/* install large map entries if configured */
3716 	for (i = 0; i < lm_ents; i++)
3717 		pm_pml4[LMSPML4I + i] = kernel_pmap->pm_pml4[LMSPML4I + i];
3718 }
3719 
3720 static void
3721 pmap_pinit_pml4_pti(vm_page_t pml4pg)
3722 {
3723 	pml4_entry_t *pm_pml4;
3724 	int i;
3725 
3726 	pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
3727 	for (i = 0; i < NPML4EPG; i++)
3728 		pm_pml4[i] = pti_pml4[i];
3729 }
3730 
3731 /*
3732  * Initialize a preallocated and zeroed pmap structure,
3733  * such as one in a vmspace structure.
3734  */
3735 int
3736 pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
3737 {
3738 	vm_page_t pml4pg, pml4pgu;
3739 	vm_paddr_t pml4phys;
3740 	int i;
3741 
3742 	/*
3743 	 * allocate the page directory page
3744 	 */
3745 	pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
3746 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
3747 
3748 	pml4phys = VM_PAGE_TO_PHYS(pml4pg);
3749 	pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys);
3750 	CPU_FOREACH(i) {
3751 		pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
3752 		pmap->pm_pcids[i].pm_gen = 0;
3753 	}
3754 	pmap->pm_cr3 = PMAP_NO_CR3;	/* initialize to an invalid value */
3755 	pmap->pm_ucr3 = PMAP_NO_CR3;
3756 	pmap->pm_pml4u = NULL;
3757 
3758 	pmap->pm_type = pm_type;
3759 	if ((pml4pg->flags & PG_ZERO) == 0)
3760 		pagezero(pmap->pm_pml4);
3761 
3762 	/*
3763 	 * Do not install the host kernel mappings in the nested page
3764 	 * tables. These mappings are meaningless in the guest physical
3765 	 * address space.
3766 	 * Install minimal kernel mappings in PTI case.
3767 	 */
3768 	if (pm_type == PT_X86) {
3769 		pmap->pm_cr3 = pml4phys;
3770 		pmap_pinit_pml4(pml4pg);
3771 		if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) {
3772 			pml4pgu = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
3773 			    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
3774 			pmap->pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP(
3775 			    VM_PAGE_TO_PHYS(pml4pgu));
3776 			pmap_pinit_pml4_pti(pml4pgu);
3777 			pmap->pm_ucr3 = VM_PAGE_TO_PHYS(pml4pgu);
3778 		}
3779 		if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
3780 			rangeset_init(&pmap->pm_pkru, pkru_dup_range,
3781 			    pkru_free_range, pmap, M_NOWAIT);
3782 		}
3783 	}
3784 
3785 	pmap->pm_root.rt_root = 0;
3786 	CPU_ZERO(&pmap->pm_active);
3787 	TAILQ_INIT(&pmap->pm_pvchunk);
3788 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
3789 	pmap->pm_flags = flags;
3790 	pmap->pm_eptgen = 0;
3791 
3792 	return (1);
3793 }
3794 
3795 int
3796 pmap_pinit(pmap_t pmap)
3797 {
3798 
3799 	return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
3800 }
3801 
3802 /*
3803  * This routine is called if the desired page table page does not exist.
3804  *
3805  * If page table page allocation fails, this routine may sleep before
3806  * returning NULL.  It sleeps only if a lock pointer was given.
3807  *
3808  * Note: If a page allocation fails at page table level two or three,
3809  * one or two pages may be held during the wait, only to be released
3810  * afterwards.  This conservative approach is easily argued to avoid
3811  * race conditions.
3812  *
3813  * The ptepindexes, i.e. page indices, of the page table pages encountered
3814  * while translating virtual address va are defined as follows:
3815  * - for the page table page (last level),
3816  *      ptepindex = pmap_pde_pindex(va) = va >> PDRSHIFT,
3817  *   in other words, it is just the index of the PDE that maps the page
3818  *   table page.
3819  * - for the page directory page,
3820  *      ptepindex = NUPDE (number of userland PD entries) +
3821  *          (pmap_pde_index(va) >> NPDEPGSHIFT)
3822  *   i.e. index of PDPE is put after the last index of PDE,
3823  * - for the page directory pointer page,
3824  *      ptepindex = NUPDE + NUPDPE + (pmap_pde_index(va) >> (NPDEPGSHIFT +
3825  *          NPML4EPGSHIFT),
3826  *   i.e. index of pml4e is put after the last index of PDPE.
3827  *
3828  * Define an order on the paging entries, where all entries of the
3829  * same height are put together, then heights are put from deepest to
3830  * root.  Then ptexpindex is the sequential number of the
3831  * corresponding paging entry in this order.
3832  *
3833  * The root page at PML4 does not participate in this indexing scheme, since
3834  * it is statically allocated by pmap_pinit() and not by _pmap_allocpte().
3835  */
3836 static vm_page_t
3837 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
3838 {
3839 	vm_page_t m, pdppg, pdpg;
3840 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
3841 
3842 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3843 
3844 	PG_A = pmap_accessed_bit(pmap);
3845 	PG_M = pmap_modified_bit(pmap);
3846 	PG_V = pmap_valid_bit(pmap);
3847 	PG_RW = pmap_rw_bit(pmap);
3848 
3849 	/*
3850 	 * Allocate a page table page.
3851 	 */
3852 	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
3853 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
3854 		if (lockp != NULL) {
3855 			RELEASE_PV_LIST_LOCK(lockp);
3856 			PMAP_UNLOCK(pmap);
3857 			PMAP_ASSERT_NOT_IN_DI();
3858 			vm_wait(NULL);
3859 			PMAP_LOCK(pmap);
3860 		}
3861 
3862 		/*
3863 		 * Indicate the need to retry.  While waiting, the page table
3864 		 * page may have been allocated.
3865 		 */
3866 		return (NULL);
3867 	}
3868 	if ((m->flags & PG_ZERO) == 0)
3869 		pmap_zero_page(m);
3870 
3871 	/*
3872 	 * Map the pagetable page into the process address space, if
3873 	 * it isn't already there.
3874 	 */
3875 
3876 	if (ptepindex >= (NUPDE + NUPDPE)) {
3877 		pml4_entry_t *pml4, *pml4u;
3878 		vm_pindex_t pml4index;
3879 
3880 		/* Wire up a new PDPE page */
3881 		pml4index = ptepindex - (NUPDE + NUPDPE);
3882 		pml4 = &pmap->pm_pml4[pml4index];
3883 		*pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
3884 		if (pmap->pm_pml4u != NULL && pml4index < NUPML4E) {
3885 			/*
3886 			 * PTI: Make all user-space mappings in the
3887 			 * kernel-mode page table no-execute so that
3888 			 * we detect any programming errors that leave
3889 			 * the kernel-mode page table active on return
3890 			 * to user space.
3891 			 */
3892 			if (pmap->pm_ucr3 != PMAP_NO_CR3)
3893 				*pml4 |= pg_nx;
3894 
3895 			pml4u = &pmap->pm_pml4u[pml4index];
3896 			*pml4u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
3897 			    PG_A | PG_M;
3898 		}
3899 
3900 	} else if (ptepindex >= NUPDE) {
3901 		vm_pindex_t pml4index;
3902 		vm_pindex_t pdpindex;
3903 		pml4_entry_t *pml4;
3904 		pdp_entry_t *pdp;
3905 
3906 		/* Wire up a new PDE page */
3907 		pdpindex = ptepindex - NUPDE;
3908 		pml4index = pdpindex >> NPML4EPGSHIFT;
3909 
3910 		pml4 = &pmap->pm_pml4[pml4index];
3911 		if ((*pml4 & PG_V) == 0) {
3912 			/* Have to allocate a new pdp, recurse */
3913 			if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
3914 			    lockp) == NULL) {
3915 				vm_page_unwire_noq(m);
3916 				vm_page_free_zero(m);
3917 				return (NULL);
3918 			}
3919 		} else {
3920 			/* Add reference to pdp page */
3921 			pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
3922 			pdppg->ref_count++;
3923 		}
3924 		pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
3925 
3926 		/* Now find the pdp page */
3927 		pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
3928 		*pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
3929 
3930 	} else {
3931 		vm_pindex_t pml4index;
3932 		vm_pindex_t pdpindex;
3933 		pml4_entry_t *pml4;
3934 		pdp_entry_t *pdp;
3935 		pd_entry_t *pd;
3936 
3937 		/* Wire up a new PTE page */
3938 		pdpindex = ptepindex >> NPDPEPGSHIFT;
3939 		pml4index = pdpindex >> NPML4EPGSHIFT;
3940 
3941 		/* First, find the pdp and check that its valid. */
3942 		pml4 = &pmap->pm_pml4[pml4index];
3943 		if ((*pml4 & PG_V) == 0) {
3944 			/* Have to allocate a new pd, recurse */
3945 			if (_pmap_allocpte(pmap, NUPDE + pdpindex,
3946 			    lockp) == NULL) {
3947 				vm_page_unwire_noq(m);
3948 				vm_page_free_zero(m);
3949 				return (NULL);
3950 			}
3951 			pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
3952 			pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
3953 		} else {
3954 			pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
3955 			pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
3956 			if ((*pdp & PG_V) == 0) {
3957 				/* Have to allocate a new pd, recurse */
3958 				if (_pmap_allocpte(pmap, NUPDE + pdpindex,
3959 				    lockp) == NULL) {
3960 					vm_page_unwire_noq(m);
3961 					vm_page_free_zero(m);
3962 					return (NULL);
3963 				}
3964 			} else {
3965 				/* Add reference to the pd page */
3966 				pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
3967 				pdpg->ref_count++;
3968 			}
3969 		}
3970 		pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
3971 
3972 		/* Now we know where the page directory page is */
3973 		pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
3974 		*pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
3975 	}
3976 
3977 	pmap_resident_count_inc(pmap, 1);
3978 
3979 	return (m);
3980 }
3981 
3982 static pd_entry_t *
3983 pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
3984     struct rwlock **lockp)
3985 {
3986 	pdp_entry_t *pdpe, PG_V;
3987 	pd_entry_t *pde;
3988 	vm_page_t pdpg;
3989 	vm_pindex_t pdpindex;
3990 
3991 	PG_V = pmap_valid_bit(pmap);
3992 
3993 retry:
3994 	pdpe = pmap_pdpe(pmap, va);
3995 	if (pdpe != NULL && (*pdpe & PG_V) != 0) {
3996 		pde = pmap_pdpe_to_pde(pdpe, va);
3997 		if (va < VM_MAXUSER_ADDRESS) {
3998 			/* Add a reference to the pd page. */
3999 			pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
4000 			pdpg->ref_count++;
4001 		} else
4002 			pdpg = NULL;
4003 	} else if (va < VM_MAXUSER_ADDRESS) {
4004 		/* Allocate a pd page. */
4005 		pdpindex = pmap_pde_pindex(va) >> NPDPEPGSHIFT;
4006 		pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
4007 		if (pdpg == NULL) {
4008 			if (lockp != NULL)
4009 				goto retry;
4010 			else
4011 				return (NULL);
4012 		}
4013 		pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4014 		pde = &pde[pmap_pde_index(va)];
4015 	} else
4016 		panic("pmap_alloc_pde: missing page table page for va %#lx",
4017 		    va);
4018 	*pdpgp = pdpg;
4019 	return (pde);
4020 }
4021 
4022 static vm_page_t
4023 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4024 {
4025 	vm_pindex_t ptepindex;
4026 	pd_entry_t *pd, PG_V;
4027 	vm_page_t m;
4028 
4029 	PG_V = pmap_valid_bit(pmap);
4030 
4031 	/*
4032 	 * Calculate pagetable page index
4033 	 */
4034 	ptepindex = pmap_pde_pindex(va);
4035 retry:
4036 	/*
4037 	 * Get the page directory entry
4038 	 */
4039 	pd = pmap_pde(pmap, va);
4040 
4041 	/*
4042 	 * This supports switching from a 2MB page to a
4043 	 * normal 4K page.
4044 	 */
4045 	if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
4046 		if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
4047 			/*
4048 			 * Invalidation of the 2MB page mapping may have caused
4049 			 * the deallocation of the underlying PD page.
4050 			 */
4051 			pd = NULL;
4052 		}
4053 	}
4054 
4055 	/*
4056 	 * If the page table page is mapped, we just increment the
4057 	 * hold count, and activate it.
4058 	 */
4059 	if (pd != NULL && (*pd & PG_V) != 0) {
4060 		m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
4061 		m->ref_count++;
4062 	} else {
4063 		/*
4064 		 * Here if the pte page isn't mapped, or if it has been
4065 		 * deallocated.
4066 		 */
4067 		m = _pmap_allocpte(pmap, ptepindex, lockp);
4068 		if (m == NULL && lockp != NULL)
4069 			goto retry;
4070 	}
4071 	return (m);
4072 }
4073 
4074 
4075 /***************************************************
4076  * Pmap allocation/deallocation routines.
4077  ***************************************************/
4078 
4079 /*
4080  * Release any resources held by the given physical map.
4081  * Called when a pmap initialized by pmap_pinit is being released.
4082  * Should only be called if the map contains no valid mappings.
4083  */
4084 void
4085 pmap_release(pmap_t pmap)
4086 {
4087 	vm_page_t m;
4088 	int i;
4089 
4090 	KASSERT(pmap->pm_stats.resident_count == 0,
4091 	    ("pmap_release: pmap resident count %ld != 0",
4092 	    pmap->pm_stats.resident_count));
4093 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
4094 	    ("pmap_release: pmap has reserved page table page(s)"));
4095 	KASSERT(CPU_EMPTY(&pmap->pm_active),
4096 	    ("releasing active pmap %p", pmap));
4097 
4098 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4));
4099 
4100 	for (i = 0; i < NKPML4E; i++)	/* KVA */
4101 		pmap->pm_pml4[KPML4BASE + i] = 0;
4102 	for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
4103 		pmap->pm_pml4[DMPML4I + i] = 0;
4104 	pmap->pm_pml4[PML4PML4I] = 0;	/* Recursive Mapping */
4105 	for (i = 0; i < lm_ents; i++)	/* Large Map */
4106 		pmap->pm_pml4[LMSPML4I + i] = 0;
4107 
4108 	vm_page_unwire_noq(m);
4109 	vm_page_free_zero(m);
4110 
4111 	if (pmap->pm_pml4u != NULL) {
4112 		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4u));
4113 		vm_page_unwire_noq(m);
4114 		vm_page_free(m);
4115 	}
4116 	if (pmap->pm_type == PT_X86 &&
4117 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
4118 		rangeset_fini(&pmap->pm_pkru);
4119 }
4120 
4121 static int
4122 kvm_size(SYSCTL_HANDLER_ARGS)
4123 {
4124 	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
4125 
4126 	return sysctl_handle_long(oidp, &ksize, 0, req);
4127 }
4128 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4129     0, 0, kvm_size, "LU",
4130     "Size of KVM");
4131 
4132 static int
4133 kvm_free(SYSCTL_HANDLER_ARGS)
4134 {
4135 	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
4136 
4137 	return sysctl_handle_long(oidp, &kfree, 0, req);
4138 }
4139 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4140     0, 0, kvm_free, "LU",
4141     "Amount of KVM free");
4142 
4143 /*
4144  * Allocate physical memory for the vm_page array and map it into KVA,
4145  * attempting to back the vm_pages with domain-local memory.
4146  */
4147 void
4148 pmap_page_array_startup(long pages)
4149 {
4150 	pdp_entry_t *pdpe;
4151 	pd_entry_t *pde, newpdir;
4152 	vm_offset_t va, start, end;
4153 	vm_paddr_t pa;
4154 	long pfn;
4155 	int domain, i;
4156 
4157 	vm_page_array_size = pages;
4158 
4159 	start = VM_MIN_KERNEL_ADDRESS;
4160 	end = start + pages * sizeof(struct vm_page);
4161 	for (va = start; va < end; va += NBPDR) {
4162 		pfn = first_page + (va - start) / sizeof(struct vm_page);
4163 		domain = _vm_phys_domain(ptoa(pfn));
4164 		pdpe = pmap_pdpe(kernel_pmap, va);
4165 		if ((*pdpe & X86_PG_V) == 0) {
4166 			pa = vm_phys_early_alloc(domain, PAGE_SIZE);
4167 			dump_add_page(pa);
4168 			pagezero((void *)PHYS_TO_DMAP(pa));
4169 			*pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW |
4170 			    X86_PG_A | X86_PG_M);
4171 		}
4172 		pde = pmap_pdpe_to_pde(pdpe, va);
4173 		if ((*pde & X86_PG_V) != 0)
4174 			panic("Unexpected pde");
4175 		pa = vm_phys_early_alloc(domain, NBPDR);
4176 		for (i = 0; i < NPDEPG; i++)
4177 			dump_add_page(pa + i * PAGE_SIZE);
4178 		newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A |
4179 		    X86_PG_M | PG_PS | pg_g | pg_nx);
4180 		pde_store(pde, newpdir);
4181 	}
4182 	vm_page_array = (vm_page_t)start;
4183 }
4184 
4185 /*
4186  * grow the number of kernel page table entries, if needed
4187  */
4188 void
4189 pmap_growkernel(vm_offset_t addr)
4190 {
4191 	vm_paddr_t paddr;
4192 	vm_page_t nkpg;
4193 	pd_entry_t *pde, newpdir;
4194 	pdp_entry_t *pdpe;
4195 
4196 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
4197 
4198 	/*
4199 	 * Return if "addr" is within the range of kernel page table pages
4200 	 * that were preallocated during pmap bootstrap.  Moreover, leave
4201 	 * "kernel_vm_end" and the kernel page table as they were.
4202 	 *
4203 	 * The correctness of this action is based on the following
4204 	 * argument: vm_map_insert() allocates contiguous ranges of the
4205 	 * kernel virtual address space.  It calls this function if a range
4206 	 * ends after "kernel_vm_end".  If the kernel is mapped between
4207 	 * "kernel_vm_end" and "addr", then the range cannot begin at
4208 	 * "kernel_vm_end".  In fact, its beginning address cannot be less
4209 	 * than the kernel.  Thus, there is no immediate need to allocate
4210 	 * any new kernel page table pages between "kernel_vm_end" and
4211 	 * "KERNBASE".
4212 	 */
4213 	if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR)
4214 		return;
4215 
4216 	addr = roundup2(addr, NBPDR);
4217 	if (addr - 1 >= vm_map_max(kernel_map))
4218 		addr = vm_map_max(kernel_map);
4219 	while (kernel_vm_end < addr) {
4220 		pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
4221 		if ((*pdpe & X86_PG_V) == 0) {
4222 			/* We need a new PDP entry */
4223 			nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
4224 			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
4225 			    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
4226 			if (nkpg == NULL)
4227 				panic("pmap_growkernel: no memory to grow kernel");
4228 			if ((nkpg->flags & PG_ZERO) == 0)
4229 				pmap_zero_page(nkpg);
4230 			paddr = VM_PAGE_TO_PHYS(nkpg);
4231 			*pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
4232 			    X86_PG_A | X86_PG_M);
4233 			continue; /* try again */
4234 		}
4235 		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
4236 		if ((*pde & X86_PG_V) != 0) {
4237 			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
4238 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
4239 				kernel_vm_end = vm_map_max(kernel_map);
4240 				break;
4241 			}
4242 			continue;
4243 		}
4244 
4245 		nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
4246 		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
4247 		    VM_ALLOC_ZERO);
4248 		if (nkpg == NULL)
4249 			panic("pmap_growkernel: no memory to grow kernel");
4250 		if ((nkpg->flags & PG_ZERO) == 0)
4251 			pmap_zero_page(nkpg);
4252 		paddr = VM_PAGE_TO_PHYS(nkpg);
4253 		newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
4254 		pde_store(pde, newpdir);
4255 
4256 		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
4257 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
4258 			kernel_vm_end = vm_map_max(kernel_map);
4259 			break;
4260 		}
4261 	}
4262 }
4263 
4264 
4265 /***************************************************
4266  * page management routines.
4267  ***************************************************/
4268 
4269 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
4270 CTASSERT(_NPCM == 3);
4271 CTASSERT(_NPCPV == 168);
4272 
4273 static __inline struct pv_chunk *
4274 pv_to_chunk(pv_entry_t pv)
4275 {
4276 
4277 	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
4278 }
4279 
4280 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
4281 
4282 #define	PC_FREE0	0xfffffffffffffffful
4283 #define	PC_FREE1	0xfffffffffffffffful
4284 #define	PC_FREE2	0x000000fffffffffful
4285 
4286 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
4287 
4288 #ifdef PV_STATS
4289 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
4290 
4291 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
4292 	"Current number of pv entry chunks");
4293 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
4294 	"Current number of pv entry chunks allocated");
4295 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
4296 	"Current number of pv entry chunks frees");
4297 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
4298 	"Number of times tried to get a chunk page but failed.");
4299 
4300 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
4301 static int pv_entry_spare;
4302 
4303 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
4304 	"Current number of pv entry frees");
4305 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
4306 	"Current number of pv entry allocs");
4307 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
4308 	"Current number of pv entries");
4309 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
4310 	"Current number of spare pv entries");
4311 #endif
4312 
4313 static void
4314 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
4315 {
4316 
4317 	if (pmap == NULL)
4318 		return;
4319 	pmap_invalidate_all(pmap);
4320 	if (pmap != locked_pmap)
4321 		PMAP_UNLOCK(pmap);
4322 	if (start_di)
4323 		pmap_delayed_invl_finish();
4324 }
4325 
4326 /*
4327  * We are in a serious low memory condition.  Resort to
4328  * drastic measures to free some pages so we can allocate
4329  * another pv entry chunk.
4330  *
4331  * Returns NULL if PV entries were reclaimed from the specified pmap.
4332  *
4333  * We do not, however, unmap 2mpages because subsequent accesses will
4334  * allocate per-page pv entries until repromotion occurs, thereby
4335  * exacerbating the shortage of free pv entries.
4336  */
4337 static vm_page_t
4338 reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
4339 {
4340 	struct pv_chunks_list *pvc;
4341 	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
4342 	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
4343 	struct md_page *pvh;
4344 	pd_entry_t *pde;
4345 	pmap_t next_pmap, pmap;
4346 	pt_entry_t *pte, tpte;
4347 	pt_entry_t PG_G, PG_A, PG_M, PG_RW;
4348 	pv_entry_t pv;
4349 	vm_offset_t va;
4350 	vm_page_t m, m_pc;
4351 	struct spglist free;
4352 	uint64_t inuse;
4353 	int bit, field, freed;
4354 	bool start_di, restart;
4355 
4356 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
4357 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
4358 	pmap = NULL;
4359 	m_pc = NULL;
4360 	PG_G = PG_A = PG_M = PG_RW = 0;
4361 	SLIST_INIT(&free);
4362 	bzero(&pc_marker_b, sizeof(pc_marker_b));
4363 	bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
4364 	pc_marker = (struct pv_chunk *)&pc_marker_b;
4365 	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
4366 
4367 	/*
4368 	 * A delayed invalidation block should already be active if
4369 	 * pmap_advise() or pmap_remove() called this function by way
4370 	 * of pmap_demote_pde_locked().
4371 	 */
4372 	start_di = pmap_not_in_di();
4373 
4374 	pvc = &pv_chunks[domain];
4375 	mtx_lock(&pvc->pvc_lock);
4376 	pvc->active_reclaims++;
4377 	TAILQ_INSERT_HEAD(&pvc->pvc_list, pc_marker, pc_lru);
4378 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc_marker_end, pc_lru);
4379 	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
4380 	    SLIST_EMPTY(&free)) {
4381 		next_pmap = pc->pc_pmap;
4382 		if (next_pmap == NULL) {
4383 			/*
4384 			 * The next chunk is a marker.  However, it is
4385 			 * not our marker, so active_reclaims must be
4386 			 * > 1.  Consequently, the next_chunk code
4387 			 * will not rotate the pv_chunks list.
4388 			 */
4389 			goto next_chunk;
4390 		}
4391 		mtx_unlock(&pvc->pvc_lock);
4392 
4393 		/*
4394 		 * A pv_chunk can only be removed from the pc_lru list
4395 		 * when both pc_chunks_mutex is owned and the
4396 		 * corresponding pmap is locked.
4397 		 */
4398 		if (pmap != next_pmap) {
4399 			restart = false;
4400 			reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
4401 			    start_di);
4402 			pmap = next_pmap;
4403 			/* Avoid deadlock and lock recursion. */
4404 			if (pmap > locked_pmap) {
4405 				RELEASE_PV_LIST_LOCK(lockp);
4406 				PMAP_LOCK(pmap);
4407 				if (start_di)
4408 					pmap_delayed_invl_start();
4409 				mtx_lock(&pvc->pvc_lock);
4410 				restart = true;
4411 			} else if (pmap != locked_pmap) {
4412 				if (PMAP_TRYLOCK(pmap)) {
4413 					if (start_di)
4414 						pmap_delayed_invl_start();
4415 					mtx_lock(&pvc->pvc_lock);
4416 					restart = true;
4417 				} else {
4418 					pmap = NULL; /* pmap is not locked */
4419 					mtx_lock(&pvc->pvc_lock);
4420 					pc = TAILQ_NEXT(pc_marker, pc_lru);
4421 					if (pc == NULL ||
4422 					    pc->pc_pmap != next_pmap)
4423 						continue;
4424 					goto next_chunk;
4425 				}
4426 			} else if (start_di)
4427 				pmap_delayed_invl_start();
4428 			PG_G = pmap_global_bit(pmap);
4429 			PG_A = pmap_accessed_bit(pmap);
4430 			PG_M = pmap_modified_bit(pmap);
4431 			PG_RW = pmap_rw_bit(pmap);
4432 			if (restart)
4433 				continue;
4434 		}
4435 
4436 		/*
4437 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
4438 		 */
4439 		freed = 0;
4440 		for (field = 0; field < _NPCM; field++) {
4441 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
4442 			    inuse != 0; inuse &= ~(1UL << bit)) {
4443 				bit = bsfq(inuse);
4444 				pv = &pc->pc_pventry[field * 64 + bit];
4445 				va = pv->pv_va;
4446 				pde = pmap_pde(pmap, va);
4447 				if ((*pde & PG_PS) != 0)
4448 					continue;
4449 				pte = pmap_pde_to_pte(pde, va);
4450 				if ((*pte & PG_W) != 0)
4451 					continue;
4452 				tpte = pte_load_clear(pte);
4453 				if ((tpte & PG_G) != 0)
4454 					pmap_invalidate_page(pmap, va);
4455 				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
4456 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
4457 					vm_page_dirty(m);
4458 				if ((tpte & PG_A) != 0)
4459 					vm_page_aflag_set(m, PGA_REFERENCED);
4460 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
4461 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4462 				m->md.pv_gen++;
4463 				if (TAILQ_EMPTY(&m->md.pv_list) &&
4464 				    (m->flags & PG_FICTITIOUS) == 0) {
4465 					pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4466 					if (TAILQ_EMPTY(&pvh->pv_list)) {
4467 						vm_page_aflag_clear(m,
4468 						    PGA_WRITEABLE);
4469 					}
4470 				}
4471 				pmap_delayed_invl_page(m);
4472 				pc->pc_map[field] |= 1UL << bit;
4473 				pmap_unuse_pt(pmap, va, *pde, &free);
4474 				freed++;
4475 			}
4476 		}
4477 		if (freed == 0) {
4478 			mtx_lock(&pvc->pvc_lock);
4479 			goto next_chunk;
4480 		}
4481 		/* Every freed mapping is for a 4 KB page. */
4482 		pmap_resident_count_dec(pmap, freed);
4483 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
4484 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
4485 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
4486 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4487 		if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
4488 		    pc->pc_map[2] == PC_FREE2) {
4489 			PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
4490 			PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
4491 			PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
4492 			/* Entire chunk is free; return it. */
4493 			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
4494 			dump_drop_page(m_pc->phys_addr);
4495 			mtx_lock(&pvc->pvc_lock);
4496 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
4497 			break;
4498 		}
4499 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
4500 		mtx_lock(&pvc->pvc_lock);
4501 		/* One freed pv entry in locked_pmap is sufficient. */
4502 		if (pmap == locked_pmap)
4503 			break;
4504 next_chunk:
4505 		TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
4506 		TAILQ_INSERT_AFTER(&pvc->pvc_list, pc, pc_marker, pc_lru);
4507 		if (pvc->active_reclaims == 1 && pmap != NULL) {
4508 			/*
4509 			 * Rotate the pv chunks list so that we do not
4510 			 * scan the same pv chunks that could not be
4511 			 * freed (because they contained a wired
4512 			 * and/or superpage mapping) on every
4513 			 * invocation of reclaim_pv_chunk().
4514 			 */
4515 			while ((pc = TAILQ_FIRST(&pvc->pvc_list)) != pc_marker) {
4516 				MPASS(pc->pc_pmap != NULL);
4517 				TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
4518 				TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
4519 			}
4520 		}
4521 	}
4522 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
4523 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker_end, pc_lru);
4524 	pvc->active_reclaims--;
4525 	mtx_unlock(&pvc->pvc_lock);
4526 	reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
4527 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
4528 		m_pc = SLIST_FIRST(&free);
4529 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
4530 		/* Recycle a freed page table page. */
4531 		m_pc->ref_count = 1;
4532 	}
4533 	vm_page_free_pages_toq(&free, true);
4534 	return (m_pc);
4535 }
4536 
4537 static vm_page_t
4538 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
4539 {
4540 	vm_page_t m;
4541 	int i, domain;
4542 
4543 	domain = PCPU_GET(domain);
4544 	for (i = 0; i < vm_ndomains; i++) {
4545 		m = reclaim_pv_chunk_domain(locked_pmap, lockp, domain);
4546 		if (m != NULL)
4547 			break;
4548 		domain = (domain + 1) % vm_ndomains;
4549 	}
4550 
4551 	return (m);
4552 }
4553 
4554 /*
4555  * free the pv_entry back to the free list
4556  */
4557 static void
4558 free_pv_entry(pmap_t pmap, pv_entry_t pv)
4559 {
4560 	struct pv_chunk *pc;
4561 	int idx, field, bit;
4562 
4563 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4564 	PV_STAT(atomic_add_long(&pv_entry_frees, 1));
4565 	PV_STAT(atomic_add_int(&pv_entry_spare, 1));
4566 	PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
4567 	pc = pv_to_chunk(pv);
4568 	idx = pv - &pc->pc_pventry[0];
4569 	field = idx / 64;
4570 	bit = idx % 64;
4571 	pc->pc_map[field] |= 1ul << bit;
4572 	if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
4573 	    pc->pc_map[2] != PC_FREE2) {
4574 		/* 98% of the time, pc is already at the head of the list. */
4575 		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
4576 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4577 			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
4578 		}
4579 		return;
4580 	}
4581 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4582 	free_pv_chunk(pc);
4583 }
4584 
4585 static void
4586 free_pv_chunk_dequeued(struct pv_chunk *pc)
4587 {
4588 	vm_page_t m;
4589 
4590 	PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
4591 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
4592 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
4593 	/* entire chunk is free, return it */
4594 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
4595 	dump_drop_page(m->phys_addr);
4596 	vm_page_unwire_noq(m);
4597 	vm_page_free(m);
4598 }
4599 
4600 static void
4601 free_pv_chunk(struct pv_chunk *pc)
4602 {
4603 	struct pv_chunks_list *pvc;
4604 
4605 	pvc = &pv_chunks[pc_to_domain(pc)];
4606 	mtx_lock(&pvc->pvc_lock);
4607 	TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
4608 	mtx_unlock(&pvc->pvc_lock);
4609 	free_pv_chunk_dequeued(pc);
4610 }
4611 
4612 static void
4613 free_pv_chunk_batch(struct pv_chunklist *batch)
4614 {
4615 	struct pv_chunks_list *pvc;
4616 	struct pv_chunk *pc, *npc;
4617 	int i;
4618 
4619 	for (i = 0; i < vm_ndomains; i++) {
4620 		if (TAILQ_EMPTY(&batch[i]))
4621 			continue;
4622 		pvc = &pv_chunks[i];
4623 		mtx_lock(&pvc->pvc_lock);
4624 		TAILQ_FOREACH(pc, &batch[i], pc_list) {
4625 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
4626 		}
4627 		mtx_unlock(&pvc->pvc_lock);
4628 	}
4629 
4630 	for (i = 0; i < vm_ndomains; i++) {
4631 		TAILQ_FOREACH_SAFE(pc, &batch[i], pc_list, npc) {
4632 			free_pv_chunk_dequeued(pc);
4633 		}
4634 	}
4635 }
4636 
4637 /*
4638  * Returns a new PV entry, allocating a new PV chunk from the system when
4639  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
4640  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
4641  * returned.
4642  *
4643  * The given PV list lock may be released.
4644  */
4645 static pv_entry_t
4646 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
4647 {
4648 	struct pv_chunks_list *pvc;
4649 	int bit, field;
4650 	pv_entry_t pv;
4651 	struct pv_chunk *pc;
4652 	vm_page_t m;
4653 
4654 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4655 	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
4656 retry:
4657 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
4658 	if (pc != NULL) {
4659 		for (field = 0; field < _NPCM; field++) {
4660 			if (pc->pc_map[field]) {
4661 				bit = bsfq(pc->pc_map[field]);
4662 				break;
4663 			}
4664 		}
4665 		if (field < _NPCM) {
4666 			pv = &pc->pc_pventry[field * 64 + bit];
4667 			pc->pc_map[field] &= ~(1ul << bit);
4668 			/* If this was the last item, move it to tail */
4669 			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
4670 			    pc->pc_map[2] == 0) {
4671 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4672 				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
4673 				    pc_list);
4674 			}
4675 			PV_STAT(atomic_add_long(&pv_entry_count, 1));
4676 			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
4677 			return (pv);
4678 		}
4679 	}
4680 	/* No free items, allocate another chunk */
4681 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
4682 	    VM_ALLOC_WIRED);
4683 	if (m == NULL) {
4684 		if (lockp == NULL) {
4685 			PV_STAT(pc_chunk_tryfail++);
4686 			return (NULL);
4687 		}
4688 		m = reclaim_pv_chunk(pmap, lockp);
4689 		if (m == NULL)
4690 			goto retry;
4691 	}
4692 	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
4693 	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
4694 	dump_add_page(m->phys_addr);
4695 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
4696 	pc->pc_pmap = pmap;
4697 	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
4698 	pc->pc_map[1] = PC_FREE1;
4699 	pc->pc_map[2] = PC_FREE2;
4700 	pvc = &pv_chunks[_vm_phys_domain(m->phys_addr)];
4701 	mtx_lock(&pvc->pvc_lock);
4702 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
4703 	mtx_unlock(&pvc->pvc_lock);
4704 	pv = &pc->pc_pventry[0];
4705 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
4706 	PV_STAT(atomic_add_long(&pv_entry_count, 1));
4707 	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
4708 	return (pv);
4709 }
4710 
4711 /*
4712  * Returns the number of one bits within the given PV chunk map.
4713  *
4714  * The erratas for Intel processors state that "POPCNT Instruction May
4715  * Take Longer to Execute Than Expected".  It is believed that the
4716  * issue is the spurious dependency on the destination register.
4717  * Provide a hint to the register rename logic that the destination
4718  * value is overwritten, by clearing it, as suggested in the
4719  * optimization manual.  It should be cheap for unaffected processors
4720  * as well.
4721  *
4722  * Reference numbers for erratas are
4723  * 4th Gen Core: HSD146
4724  * 5th Gen Core: BDM85
4725  * 6th Gen Core: SKL029
4726  */
4727 static int
4728 popcnt_pc_map_pq(uint64_t *map)
4729 {
4730 	u_long result, tmp;
4731 
4732 	__asm __volatile("xorl %k0,%k0;popcntq %2,%0;"
4733 	    "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;"
4734 	    "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0"
4735 	    : "=&r" (result), "=&r" (tmp)
4736 	    : "m" (map[0]), "m" (map[1]), "m" (map[2]));
4737 	return (result);
4738 }
4739 
4740 /*
4741  * Ensure that the number of spare PV entries in the specified pmap meets or
4742  * exceeds the given count, "needed".
4743  *
4744  * The given PV list lock may be released.
4745  */
4746 static void
4747 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
4748 {
4749 	struct pv_chunks_list *pvc;
4750 	struct pch new_tail[PMAP_MEMDOM];
4751 	struct pv_chunk *pc;
4752 	vm_page_t m;
4753 	int avail, free, i;
4754 	bool reclaimed;
4755 
4756 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4757 	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
4758 
4759 	/*
4760 	 * Newly allocated PV chunks must be stored in a private list until
4761 	 * the required number of PV chunks have been allocated.  Otherwise,
4762 	 * reclaim_pv_chunk() could recycle one of these chunks.  In
4763 	 * contrast, these chunks must be added to the pmap upon allocation.
4764 	 */
4765 	for (i = 0; i < PMAP_MEMDOM; i++)
4766 		TAILQ_INIT(&new_tail[i]);
4767 retry:
4768 	avail = 0;
4769 	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
4770 #ifndef __POPCNT__
4771 		if ((cpu_feature2 & CPUID2_POPCNT) == 0)
4772 			bit_count((bitstr_t *)pc->pc_map, 0,
4773 			    sizeof(pc->pc_map) * NBBY, &free);
4774 		else
4775 #endif
4776 		free = popcnt_pc_map_pq(pc->pc_map);
4777 		if (free == 0)
4778 			break;
4779 		avail += free;
4780 		if (avail >= needed)
4781 			break;
4782 	}
4783 	for (reclaimed = false; avail < needed; avail += _NPCPV) {
4784 		m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
4785 		    VM_ALLOC_WIRED);
4786 		if (m == NULL) {
4787 			m = reclaim_pv_chunk(pmap, lockp);
4788 			if (m == NULL)
4789 				goto retry;
4790 			reclaimed = true;
4791 		}
4792 		PV_STAT(atomic_add_int(&pc_chunk_count, 1));
4793 		PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
4794 		dump_add_page(m->phys_addr);
4795 		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
4796 		pc->pc_pmap = pmap;
4797 		pc->pc_map[0] = PC_FREE0;
4798 		pc->pc_map[1] = PC_FREE1;
4799 		pc->pc_map[2] = PC_FREE2;
4800 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
4801 		TAILQ_INSERT_TAIL(&new_tail[pc_to_domain(pc)], pc, pc_lru);
4802 		PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
4803 
4804 		/*
4805 		 * The reclaim might have freed a chunk from the current pmap.
4806 		 * If that chunk contained available entries, we need to
4807 		 * re-count the number of available entries.
4808 		 */
4809 		if (reclaimed)
4810 			goto retry;
4811 	}
4812 	for (i = 0; i < vm_ndomains; i++) {
4813 		if (TAILQ_EMPTY(&new_tail[i]))
4814 			continue;
4815 		pvc = &pv_chunks[i];
4816 		mtx_lock(&pvc->pvc_lock);
4817 		TAILQ_CONCAT(&pvc->pvc_list, &new_tail[i], pc_lru);
4818 		mtx_unlock(&pvc->pvc_lock);
4819 	}
4820 }
4821 
4822 /*
4823  * First find and then remove the pv entry for the specified pmap and virtual
4824  * address from the specified pv list.  Returns the pv entry if found and NULL
4825  * otherwise.  This operation can be performed on pv lists for either 4KB or
4826  * 2MB page mappings.
4827  */
4828 static __inline pv_entry_t
4829 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
4830 {
4831 	pv_entry_t pv;
4832 
4833 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4834 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
4835 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4836 			pvh->pv_gen++;
4837 			break;
4838 		}
4839 	}
4840 	return (pv);
4841 }
4842 
4843 /*
4844  * After demotion from a 2MB page mapping to 512 4KB page mappings,
4845  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
4846  * entries for each of the 4KB page mappings.
4847  */
4848 static void
4849 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
4850     struct rwlock **lockp)
4851 {
4852 	struct md_page *pvh;
4853 	struct pv_chunk *pc;
4854 	pv_entry_t pv;
4855 	vm_offset_t va_last;
4856 	vm_page_t m;
4857 	int bit, field;
4858 
4859 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4860 	KASSERT((pa & PDRMASK) == 0,
4861 	    ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
4862 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4863 
4864 	/*
4865 	 * Transfer the 2mpage's pv entry for this mapping to the first
4866 	 * page's pv list.  Once this transfer begins, the pv list lock
4867 	 * must not be released until the last pv entry is reinstantiated.
4868 	 */
4869 	pvh = pa_to_pvh(pa);
4870 	va = trunc_2mpage(va);
4871 	pv = pmap_pvh_remove(pvh, pmap, va);
4872 	KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
4873 	m = PHYS_TO_VM_PAGE(pa);
4874 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4875 	m->md.pv_gen++;
4876 	/* Instantiate the remaining NPTEPG - 1 pv entries. */
4877 	PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
4878 	va_last = va + NBPDR - PAGE_SIZE;
4879 	for (;;) {
4880 		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
4881 		KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
4882 		    pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare"));
4883 		for (field = 0; field < _NPCM; field++) {
4884 			while (pc->pc_map[field]) {
4885 				bit = bsfq(pc->pc_map[field]);
4886 				pc->pc_map[field] &= ~(1ul << bit);
4887 				pv = &pc->pc_pventry[field * 64 + bit];
4888 				va += PAGE_SIZE;
4889 				pv->pv_va = va;
4890 				m++;
4891 				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4892 			    ("pmap_pv_demote_pde: page %p is not managed", m));
4893 				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4894 				m->md.pv_gen++;
4895 				if (va == va_last)
4896 					goto out;
4897 			}
4898 		}
4899 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4900 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
4901 	}
4902 out:
4903 	if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
4904 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4905 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
4906 	}
4907 	PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
4908 	PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
4909 }
4910 
4911 #if VM_NRESERVLEVEL > 0
4912 /*
4913  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
4914  * replace the many pv entries for the 4KB page mappings by a single pv entry
4915  * for the 2MB page mapping.
4916  */
4917 static void
4918 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
4919     struct rwlock **lockp)
4920 {
4921 	struct md_page *pvh;
4922 	pv_entry_t pv;
4923 	vm_offset_t va_last;
4924 	vm_page_t m;
4925 
4926 	KASSERT((pa & PDRMASK) == 0,
4927 	    ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
4928 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4929 
4930 	/*
4931 	 * Transfer the first page's pv entry for this mapping to the 2mpage's
4932 	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
4933 	 * a transfer avoids the possibility that get_pv_entry() calls
4934 	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
4935 	 * mappings that is being promoted.
4936 	 */
4937 	m = PHYS_TO_VM_PAGE(pa);
4938 	va = trunc_2mpage(va);
4939 	pv = pmap_pvh_remove(&m->md, pmap, va);
4940 	KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
4941 	pvh = pa_to_pvh(pa);
4942 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4943 	pvh->pv_gen++;
4944 	/* Free the remaining NPTEPG - 1 pv entries. */
4945 	va_last = va + NBPDR - PAGE_SIZE;
4946 	do {
4947 		m++;
4948 		va += PAGE_SIZE;
4949 		pmap_pvh_free(&m->md, pmap, va);
4950 	} while (va < va_last);
4951 }
4952 #endif /* VM_NRESERVLEVEL > 0 */
4953 
4954 /*
4955  * First find and then destroy the pv entry for the specified pmap and virtual
4956  * address.  This operation can be performed on pv lists for either 4KB or 2MB
4957  * page mappings.
4958  */
4959 static void
4960 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
4961 {
4962 	pv_entry_t pv;
4963 
4964 	pv = pmap_pvh_remove(pvh, pmap, va);
4965 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
4966 	free_pv_entry(pmap, pv);
4967 }
4968 
4969 /*
4970  * Conditionally create the PV entry for a 4KB page mapping if the required
4971  * memory can be allocated without resorting to reclamation.
4972  */
4973 static boolean_t
4974 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
4975     struct rwlock **lockp)
4976 {
4977 	pv_entry_t pv;
4978 
4979 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4980 	/* Pass NULL instead of the lock pointer to disable reclamation. */
4981 	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
4982 		pv->pv_va = va;
4983 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
4984 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4985 		m->md.pv_gen++;
4986 		return (TRUE);
4987 	} else
4988 		return (FALSE);
4989 }
4990 
4991 /*
4992  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
4993  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
4994  * false if the PV entry cannot be allocated without resorting to reclamation.
4995  */
4996 static bool
4997 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags,
4998     struct rwlock **lockp)
4999 {
5000 	struct md_page *pvh;
5001 	pv_entry_t pv;
5002 	vm_paddr_t pa;
5003 
5004 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5005 	/* Pass NULL instead of the lock pointer to disable reclamation. */
5006 	if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
5007 	    NULL : lockp)) == NULL)
5008 		return (false);
5009 	pv->pv_va = va;
5010 	pa = pde & PG_PS_FRAME;
5011 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5012 	pvh = pa_to_pvh(pa);
5013 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5014 	pvh->pv_gen++;
5015 	return (true);
5016 }
5017 
5018 /*
5019  * Fills a page table page with mappings to consecutive physical pages.
5020  */
5021 static void
5022 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
5023 {
5024 	pt_entry_t *pte;
5025 
5026 	for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
5027 		*pte = newpte;
5028 		newpte += PAGE_SIZE;
5029 	}
5030 }
5031 
5032 /*
5033  * Tries to demote a 2MB page mapping.  If demotion fails, the 2MB page
5034  * mapping is invalidated.
5035  */
5036 static boolean_t
5037 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
5038 {
5039 	struct rwlock *lock;
5040 	boolean_t rv;
5041 
5042 	lock = NULL;
5043 	rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
5044 	if (lock != NULL)
5045 		rw_wunlock(lock);
5046 	return (rv);
5047 }
5048 
5049 static void
5050 pmap_demote_pde_check(pt_entry_t *firstpte __unused, pt_entry_t newpte __unused)
5051 {
5052 #ifdef INVARIANTS
5053 #ifdef DIAGNOSTIC
5054 	pt_entry_t *xpte, *ypte;
5055 
5056 	for (xpte = firstpte; xpte < firstpte + NPTEPG;
5057 	    xpte++, newpte += PAGE_SIZE) {
5058 		if ((*xpte & PG_FRAME) != (newpte & PG_FRAME)) {
5059 			printf("pmap_demote_pde: xpte %zd and newpte map "
5060 			    "different pages: found %#lx, expected %#lx\n",
5061 			    xpte - firstpte, *xpte, newpte);
5062 			printf("page table dump\n");
5063 			for (ypte = firstpte; ypte < firstpte + NPTEPG; ypte++)
5064 				printf("%zd %#lx\n", ypte - firstpte, *ypte);
5065 			panic("firstpte");
5066 		}
5067 	}
5068 #else
5069 	KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
5070 	    ("pmap_demote_pde: firstpte and newpte map different physical"
5071 	    " addresses"));
5072 #endif
5073 #endif
5074 }
5075 
5076 static void
5077 pmap_demote_pde_abort(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
5078     pd_entry_t oldpde, struct rwlock **lockp)
5079 {
5080 	struct spglist free;
5081 	vm_offset_t sva;
5082 
5083 	SLIST_INIT(&free);
5084 	sva = trunc_2mpage(va);
5085 	pmap_remove_pde(pmap, pde, sva, &free, lockp);
5086 	if ((oldpde & pmap_global_bit(pmap)) == 0)
5087 		pmap_invalidate_pde_page(pmap, sva, oldpde);
5088 	vm_page_free_pages_toq(&free, true);
5089 	CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx in pmap %p",
5090 	    va, pmap);
5091 }
5092 
5093 static boolean_t
5094 pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
5095     struct rwlock **lockp)
5096 {
5097 	pd_entry_t newpde, oldpde;
5098 	pt_entry_t *firstpte, newpte;
5099 	pt_entry_t PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V;
5100 	vm_paddr_t mptepa;
5101 	vm_page_t mpte;
5102 	int PG_PTE_CACHE;
5103 	bool in_kernel;
5104 
5105 	PG_A = pmap_accessed_bit(pmap);
5106 	PG_G = pmap_global_bit(pmap);
5107 	PG_M = pmap_modified_bit(pmap);
5108 	PG_RW = pmap_rw_bit(pmap);
5109 	PG_V = pmap_valid_bit(pmap);
5110 	PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
5111 	PG_PKU_MASK = pmap_pku_mask_bit(pmap);
5112 
5113 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5114 	in_kernel = va >= VM_MAXUSER_ADDRESS;
5115 	oldpde = *pde;
5116 	KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
5117 	    ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
5118 
5119 	/*
5120 	 * Invalidate the 2MB page mapping and return "failure" if the
5121 	 * mapping was never accessed.
5122 	 */
5123 	if ((oldpde & PG_A) == 0) {
5124 		KASSERT((oldpde & PG_W) == 0,
5125 		    ("pmap_demote_pde: a wired mapping is missing PG_A"));
5126 		pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
5127 		return (FALSE);
5128 	}
5129 
5130 	mpte = pmap_remove_pt_page(pmap, va);
5131 	if (mpte == NULL) {
5132 		KASSERT((oldpde & PG_W) == 0,
5133 		    ("pmap_demote_pde: page table page for a wired mapping"
5134 		    " is missing"));
5135 
5136 		/*
5137 		 * If the page table page is missing and the mapping
5138 		 * is for a kernel address, the mapping must belong to
5139 		 * the direct map.  Page table pages are preallocated
5140 		 * for every other part of the kernel address space,
5141 		 * so the direct map region is the only part of the
5142 		 * kernel address space that must be handled here.
5143 		 */
5144 		KASSERT(!in_kernel || (va >= DMAP_MIN_ADDRESS &&
5145 		    va < DMAP_MAX_ADDRESS),
5146 		    ("pmap_demote_pde: No saved mpte for va %#lx", va));
5147 
5148 		/*
5149 		 * If the 2MB page mapping belongs to the direct map
5150 		 * region of the kernel's address space, then the page
5151 		 * allocation request specifies the highest possible
5152 		 * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
5153 		 * priority is normal.
5154 		 */
5155 		mpte = vm_page_alloc(NULL, pmap_pde_pindex(va),
5156 		    (in_kernel ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
5157 		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5158 
5159 		/*
5160 		 * If the allocation of the new page table page fails,
5161 		 * invalidate the 2MB page mapping and return "failure".
5162 		 */
5163 		if (mpte == NULL) {
5164 			pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
5165 			return (FALSE);
5166 		}
5167 
5168 		if (!in_kernel) {
5169 			mpte->ref_count = NPTEPG;
5170 			pmap_resident_count_inc(pmap, 1);
5171 		}
5172 	}
5173 	mptepa = VM_PAGE_TO_PHYS(mpte);
5174 	firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
5175 	newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
5176 	KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
5177 	    ("pmap_demote_pde: oldpde is missing PG_M"));
5178 	newpte = oldpde & ~PG_PS;
5179 	newpte = pmap_swap_pat(pmap, newpte);
5180 
5181 	/*
5182 	 * If the page table page is not leftover from an earlier promotion,
5183 	 * initialize it.
5184 	 */
5185 	if (mpte->valid == 0)
5186 		pmap_fill_ptp(firstpte, newpte);
5187 
5188 	pmap_demote_pde_check(firstpte, newpte);
5189 
5190 	/*
5191 	 * If the mapping has changed attributes, update the page table
5192 	 * entries.
5193 	 */
5194 	if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
5195 		pmap_fill_ptp(firstpte, newpte);
5196 
5197 	/*
5198 	 * The spare PV entries must be reserved prior to demoting the
5199 	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
5200 	 * of the PDE and the PV lists will be inconsistent, which can result
5201 	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
5202 	 * wrong PV list and pmap_pv_demote_pde() failing to find the expected
5203 	 * PV entry for the 2MB page mapping that is being demoted.
5204 	 */
5205 	if ((oldpde & PG_MANAGED) != 0)
5206 		reserve_pv_entries(pmap, NPTEPG - 1, lockp);
5207 
5208 	/*
5209 	 * Demote the mapping.  This pmap is locked.  The old PDE has
5210 	 * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
5211 	 * set.  Thus, there is no danger of a race with another
5212 	 * processor changing the setting of PG_A and/or PG_M between
5213 	 * the read above and the store below.
5214 	 */
5215 	if (workaround_erratum383)
5216 		pmap_update_pde(pmap, va, pde, newpde);
5217 	else
5218 		pde_store(pde, newpde);
5219 
5220 	/*
5221 	 * Invalidate a stale recursive mapping of the page table page.
5222 	 */
5223 	if (in_kernel)
5224 		pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
5225 
5226 	/*
5227 	 * Demote the PV entry.
5228 	 */
5229 	if ((oldpde & PG_MANAGED) != 0)
5230 		pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
5231 
5232 	atomic_add_long(&pmap_pde_demotions, 1);
5233 	CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx in pmap %p",
5234 	    va, pmap);
5235 	return (TRUE);
5236 }
5237 
5238 /*
5239  * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
5240  */
5241 static void
5242 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
5243 {
5244 	pd_entry_t newpde;
5245 	vm_paddr_t mptepa;
5246 	vm_page_t mpte;
5247 
5248 	KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
5249 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5250 	mpte = pmap_remove_pt_page(pmap, va);
5251 	if (mpte == NULL)
5252 		panic("pmap_remove_kernel_pde: Missing pt page.");
5253 
5254 	mptepa = VM_PAGE_TO_PHYS(mpte);
5255 	newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
5256 
5257 	/*
5258 	 * If this page table page was unmapped by a promotion, then it
5259 	 * contains valid mappings.  Zero it to invalidate those mappings.
5260 	 */
5261 	if (mpte->valid != 0)
5262 		pagezero((void *)PHYS_TO_DMAP(mptepa));
5263 
5264 	/*
5265 	 * Demote the mapping.
5266 	 */
5267 	if (workaround_erratum383)
5268 		pmap_update_pde(pmap, va, pde, newpde);
5269 	else
5270 		pde_store(pde, newpde);
5271 
5272 	/*
5273 	 * Invalidate a stale recursive mapping of the page table page.
5274 	 */
5275 	pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
5276 }
5277 
5278 /*
5279  * pmap_remove_pde: do the things to unmap a superpage in a process
5280  */
5281 static int
5282 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
5283     struct spglist *free, struct rwlock **lockp)
5284 {
5285 	struct md_page *pvh;
5286 	pd_entry_t oldpde;
5287 	vm_offset_t eva, va;
5288 	vm_page_t m, mpte;
5289 	pt_entry_t PG_G, PG_A, PG_M, PG_RW;
5290 
5291 	PG_G = pmap_global_bit(pmap);
5292 	PG_A = pmap_accessed_bit(pmap);
5293 	PG_M = pmap_modified_bit(pmap);
5294 	PG_RW = pmap_rw_bit(pmap);
5295 
5296 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5297 	KASSERT((sva & PDRMASK) == 0,
5298 	    ("pmap_remove_pde: sva is not 2mpage aligned"));
5299 	oldpde = pte_load_clear(pdq);
5300 	if (oldpde & PG_W)
5301 		pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
5302 	if ((oldpde & PG_G) != 0)
5303 		pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
5304 	pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
5305 	if (oldpde & PG_MANAGED) {
5306 		CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
5307 		pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
5308 		pmap_pvh_free(pvh, pmap, sva);
5309 		eva = sva + NBPDR;
5310 		for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
5311 		    va < eva; va += PAGE_SIZE, m++) {
5312 			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
5313 				vm_page_dirty(m);
5314 			if (oldpde & PG_A)
5315 				vm_page_aflag_set(m, PGA_REFERENCED);
5316 			if (TAILQ_EMPTY(&m->md.pv_list) &&
5317 			    TAILQ_EMPTY(&pvh->pv_list))
5318 				vm_page_aflag_clear(m, PGA_WRITEABLE);
5319 			pmap_delayed_invl_page(m);
5320 		}
5321 	}
5322 	if (pmap == kernel_pmap) {
5323 		pmap_remove_kernel_pde(pmap, pdq, sva);
5324 	} else {
5325 		mpte = pmap_remove_pt_page(pmap, sva);
5326 		if (mpte != NULL) {
5327 			KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
5328 			    ("pmap_remove_pde: pte page not promoted"));
5329 			pmap_resident_count_dec(pmap, 1);
5330 			KASSERT(mpte->ref_count == NPTEPG,
5331 			    ("pmap_remove_pde: pte page ref count error"));
5332 			mpte->ref_count = 0;
5333 			pmap_add_delayed_free_list(mpte, free, FALSE);
5334 		}
5335 	}
5336 	return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
5337 }
5338 
5339 /*
5340  * pmap_remove_pte: do the things to unmap a page in a process
5341  */
5342 static int
5343 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
5344     pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
5345 {
5346 	struct md_page *pvh;
5347 	pt_entry_t oldpte, PG_A, PG_M, PG_RW;
5348 	vm_page_t m;
5349 
5350 	PG_A = pmap_accessed_bit(pmap);
5351 	PG_M = pmap_modified_bit(pmap);
5352 	PG_RW = pmap_rw_bit(pmap);
5353 
5354 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5355 	oldpte = pte_load_clear(ptq);
5356 	if (oldpte & PG_W)
5357 		pmap->pm_stats.wired_count -= 1;
5358 	pmap_resident_count_dec(pmap, 1);
5359 	if (oldpte & PG_MANAGED) {
5360 		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
5361 		if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5362 			vm_page_dirty(m);
5363 		if (oldpte & PG_A)
5364 			vm_page_aflag_set(m, PGA_REFERENCED);
5365 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5366 		pmap_pvh_free(&m->md, pmap, va);
5367 		if (TAILQ_EMPTY(&m->md.pv_list) &&
5368 		    (m->flags & PG_FICTITIOUS) == 0) {
5369 			pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5370 			if (TAILQ_EMPTY(&pvh->pv_list))
5371 				vm_page_aflag_clear(m, PGA_WRITEABLE);
5372 		}
5373 		pmap_delayed_invl_page(m);
5374 	}
5375 	return (pmap_unuse_pt(pmap, va, ptepde, free));
5376 }
5377 
5378 /*
5379  * Remove a single page from a process address space
5380  */
5381 static void
5382 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
5383     struct spglist *free)
5384 {
5385 	struct rwlock *lock;
5386 	pt_entry_t *pte, PG_V;
5387 
5388 	PG_V = pmap_valid_bit(pmap);
5389 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5390 	if ((*pde & PG_V) == 0)
5391 		return;
5392 	pte = pmap_pde_to_pte(pde, va);
5393 	if ((*pte & PG_V) == 0)
5394 		return;
5395 	lock = NULL;
5396 	pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
5397 	if (lock != NULL)
5398 		rw_wunlock(lock);
5399 	pmap_invalidate_page(pmap, va);
5400 }
5401 
5402 /*
5403  * Removes the specified range of addresses from the page table page.
5404  */
5405 static bool
5406 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5407     pd_entry_t *pde, struct spglist *free, struct rwlock **lockp)
5408 {
5409 	pt_entry_t PG_G, *pte;
5410 	vm_offset_t va;
5411 	bool anyvalid;
5412 
5413 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5414 	PG_G = pmap_global_bit(pmap);
5415 	anyvalid = false;
5416 	va = eva;
5417 	for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++,
5418 	    sva += PAGE_SIZE) {
5419 		if (*pte == 0) {
5420 			if (va != eva) {
5421 				pmap_invalidate_range(pmap, va, sva);
5422 				va = eva;
5423 			}
5424 			continue;
5425 		}
5426 		if ((*pte & PG_G) == 0)
5427 			anyvalid = true;
5428 		else if (va == eva)
5429 			va = sva;
5430 		if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) {
5431 			sva += PAGE_SIZE;
5432 			break;
5433 		}
5434 	}
5435 	if (va != eva)
5436 		pmap_invalidate_range(pmap, va, sva);
5437 	return (anyvalid);
5438 }
5439 
5440 /*
5441  *	Remove the given range of addresses from the specified map.
5442  *
5443  *	It is assumed that the start and end are properly
5444  *	rounded to the page size.
5445  */
5446 void
5447 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5448 {
5449 	struct rwlock *lock;
5450 	vm_offset_t va_next;
5451 	pml4_entry_t *pml4e;
5452 	pdp_entry_t *pdpe;
5453 	pd_entry_t ptpaddr, *pde;
5454 	pt_entry_t PG_G, PG_V;
5455 	struct spglist free;
5456 	int anyvalid;
5457 
5458 	PG_G = pmap_global_bit(pmap);
5459 	PG_V = pmap_valid_bit(pmap);
5460 
5461 	/*
5462 	 * Perform an unsynchronized read.  This is, however, safe.
5463 	 */
5464 	if (pmap->pm_stats.resident_count == 0)
5465 		return;
5466 
5467 	anyvalid = 0;
5468 	SLIST_INIT(&free);
5469 
5470 	pmap_delayed_invl_start();
5471 	PMAP_LOCK(pmap);
5472 	pmap_pkru_on_remove(pmap, sva, eva);
5473 
5474 	/*
5475 	 * special handling of removing one page.  a very
5476 	 * common operation and easy to short circuit some
5477 	 * code.
5478 	 */
5479 	if (sva + PAGE_SIZE == eva) {
5480 		pde = pmap_pde(pmap, sva);
5481 		if (pde && (*pde & PG_PS) == 0) {
5482 			pmap_remove_page(pmap, sva, pde, &free);
5483 			goto out;
5484 		}
5485 	}
5486 
5487 	lock = NULL;
5488 	for (; sva < eva; sva = va_next) {
5489 
5490 		if (pmap->pm_stats.resident_count == 0)
5491 			break;
5492 
5493 		pml4e = pmap_pml4e(pmap, sva);
5494 		if ((*pml4e & PG_V) == 0) {
5495 			va_next = (sva + NBPML4) & ~PML4MASK;
5496 			if (va_next < sva)
5497 				va_next = eva;
5498 			continue;
5499 		}
5500 
5501 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
5502 		if ((*pdpe & PG_V) == 0) {
5503 			va_next = (sva + NBPDP) & ~PDPMASK;
5504 			if (va_next < sva)
5505 				va_next = eva;
5506 			continue;
5507 		}
5508 
5509 		/*
5510 		 * Calculate index for next page table.
5511 		 */
5512 		va_next = (sva + NBPDR) & ~PDRMASK;
5513 		if (va_next < sva)
5514 			va_next = eva;
5515 
5516 		pde = pmap_pdpe_to_pde(pdpe, sva);
5517 		ptpaddr = *pde;
5518 
5519 		/*
5520 		 * Weed out invalid mappings.
5521 		 */
5522 		if (ptpaddr == 0)
5523 			continue;
5524 
5525 		/*
5526 		 * Check for large page.
5527 		 */
5528 		if ((ptpaddr & PG_PS) != 0) {
5529 			/*
5530 			 * Are we removing the entire large page?  If not,
5531 			 * demote the mapping and fall through.
5532 			 */
5533 			if (sva + NBPDR == va_next && eva >= va_next) {
5534 				/*
5535 				 * The TLB entry for a PG_G mapping is
5536 				 * invalidated by pmap_remove_pde().
5537 				 */
5538 				if ((ptpaddr & PG_G) == 0)
5539 					anyvalid = 1;
5540 				pmap_remove_pde(pmap, pde, sva, &free, &lock);
5541 				continue;
5542 			} else if (!pmap_demote_pde_locked(pmap, pde, sva,
5543 			    &lock)) {
5544 				/* The large page mapping was destroyed. */
5545 				continue;
5546 			} else
5547 				ptpaddr = *pde;
5548 		}
5549 
5550 		/*
5551 		 * Limit our scan to either the end of the va represented
5552 		 * by the current page table page, or to the end of the
5553 		 * range being removed.
5554 		 */
5555 		if (va_next > eva)
5556 			va_next = eva;
5557 
5558 		if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock))
5559 			anyvalid = 1;
5560 	}
5561 	if (lock != NULL)
5562 		rw_wunlock(lock);
5563 out:
5564 	if (anyvalid)
5565 		pmap_invalidate_all(pmap);
5566 	PMAP_UNLOCK(pmap);
5567 	pmap_delayed_invl_finish();
5568 	vm_page_free_pages_toq(&free, true);
5569 }
5570 
5571 /*
5572  *	Routine:	pmap_remove_all
5573  *	Function:
5574  *		Removes this physical page from
5575  *		all physical maps in which it resides.
5576  *		Reflects back modify bits to the pager.
5577  *
5578  *	Notes:
5579  *		Original versions of this routine were very
5580  *		inefficient because they iteratively called
5581  *		pmap_remove (slow...)
5582  */
5583 
5584 void
5585 pmap_remove_all(vm_page_t m)
5586 {
5587 	struct md_page *pvh;
5588 	pv_entry_t pv;
5589 	pmap_t pmap;
5590 	struct rwlock *lock;
5591 	pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW;
5592 	pd_entry_t *pde;
5593 	vm_offset_t va;
5594 	struct spglist free;
5595 	int pvh_gen, md_gen;
5596 
5597 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5598 	    ("pmap_remove_all: page %p is not managed", m));
5599 	SLIST_INIT(&free);
5600 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5601 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5602 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
5603 retry:
5604 	rw_wlock(lock);
5605 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
5606 		pmap = PV_PMAP(pv);
5607 		if (!PMAP_TRYLOCK(pmap)) {
5608 			pvh_gen = pvh->pv_gen;
5609 			rw_wunlock(lock);
5610 			PMAP_LOCK(pmap);
5611 			rw_wlock(lock);
5612 			if (pvh_gen != pvh->pv_gen) {
5613 				rw_wunlock(lock);
5614 				PMAP_UNLOCK(pmap);
5615 				goto retry;
5616 			}
5617 		}
5618 		va = pv->pv_va;
5619 		pde = pmap_pde(pmap, va);
5620 		(void)pmap_demote_pde_locked(pmap, pde, va, &lock);
5621 		PMAP_UNLOCK(pmap);
5622 	}
5623 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
5624 		pmap = PV_PMAP(pv);
5625 		if (!PMAP_TRYLOCK(pmap)) {
5626 			pvh_gen = pvh->pv_gen;
5627 			md_gen = m->md.pv_gen;
5628 			rw_wunlock(lock);
5629 			PMAP_LOCK(pmap);
5630 			rw_wlock(lock);
5631 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5632 				rw_wunlock(lock);
5633 				PMAP_UNLOCK(pmap);
5634 				goto retry;
5635 			}
5636 		}
5637 		PG_A = pmap_accessed_bit(pmap);
5638 		PG_M = pmap_modified_bit(pmap);
5639 		PG_RW = pmap_rw_bit(pmap);
5640 		pmap_resident_count_dec(pmap, 1);
5641 		pde = pmap_pde(pmap, pv->pv_va);
5642 		KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
5643 		    " a 2mpage in page %p's pv list", m));
5644 		pte = pmap_pde_to_pte(pde, pv->pv_va);
5645 		tpte = pte_load_clear(pte);
5646 		if (tpte & PG_W)
5647 			pmap->pm_stats.wired_count--;
5648 		if (tpte & PG_A)
5649 			vm_page_aflag_set(m, PGA_REFERENCED);
5650 
5651 		/*
5652 		 * Update the vm_page_t clean and reference bits.
5653 		 */
5654 		if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5655 			vm_page_dirty(m);
5656 		pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
5657 		pmap_invalidate_page(pmap, pv->pv_va);
5658 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5659 		m->md.pv_gen++;
5660 		free_pv_entry(pmap, pv);
5661 		PMAP_UNLOCK(pmap);
5662 	}
5663 	vm_page_aflag_clear(m, PGA_WRITEABLE);
5664 	rw_wunlock(lock);
5665 	pmap_delayed_invl_wait(m);
5666 	vm_page_free_pages_toq(&free, true);
5667 }
5668 
5669 /*
5670  * pmap_protect_pde: do the things to protect a 2mpage in a process
5671  */
5672 static boolean_t
5673 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
5674 {
5675 	pd_entry_t newpde, oldpde;
5676 	vm_page_t m, mt;
5677 	boolean_t anychanged;
5678 	pt_entry_t PG_G, PG_M, PG_RW;
5679 
5680 	PG_G = pmap_global_bit(pmap);
5681 	PG_M = pmap_modified_bit(pmap);
5682 	PG_RW = pmap_rw_bit(pmap);
5683 
5684 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5685 	KASSERT((sva & PDRMASK) == 0,
5686 	    ("pmap_protect_pde: sva is not 2mpage aligned"));
5687 	anychanged = FALSE;
5688 retry:
5689 	oldpde = newpde = *pde;
5690 	if ((prot & VM_PROT_WRITE) == 0) {
5691 		if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
5692 		    (PG_MANAGED | PG_M | PG_RW)) {
5693 			m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
5694 			for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
5695 				vm_page_dirty(mt);
5696 		}
5697 		newpde &= ~(PG_RW | PG_M);
5698 	}
5699 	if ((prot & VM_PROT_EXECUTE) == 0)
5700 		newpde |= pg_nx;
5701 	if (newpde != oldpde) {
5702 		/*
5703 		 * As an optimization to future operations on this PDE, clear
5704 		 * PG_PROMOTED.  The impending invalidation will remove any
5705 		 * lingering 4KB page mappings from the TLB.
5706 		 */
5707 		if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
5708 			goto retry;
5709 		if ((oldpde & PG_G) != 0)
5710 			pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
5711 		else
5712 			anychanged = TRUE;
5713 	}
5714 	return (anychanged);
5715 }
5716 
5717 /*
5718  *	Set the physical protection on the
5719  *	specified range of this map as requested.
5720  */
5721 void
5722 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
5723 {
5724 	vm_offset_t va_next;
5725 	pml4_entry_t *pml4e;
5726 	pdp_entry_t *pdpe;
5727 	pd_entry_t ptpaddr, *pde;
5728 	pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
5729 	boolean_t anychanged;
5730 
5731 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
5732 	if (prot == VM_PROT_NONE) {
5733 		pmap_remove(pmap, sva, eva);
5734 		return;
5735 	}
5736 
5737 	if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
5738 	    (VM_PROT_WRITE|VM_PROT_EXECUTE))
5739 		return;
5740 
5741 	PG_G = pmap_global_bit(pmap);
5742 	PG_M = pmap_modified_bit(pmap);
5743 	PG_V = pmap_valid_bit(pmap);
5744 	PG_RW = pmap_rw_bit(pmap);
5745 	anychanged = FALSE;
5746 
5747 	/*
5748 	 * Although this function delays and batches the invalidation
5749 	 * of stale TLB entries, it does not need to call
5750 	 * pmap_delayed_invl_start() and
5751 	 * pmap_delayed_invl_finish(), because it does not
5752 	 * ordinarily destroy mappings.  Stale TLB entries from
5753 	 * protection-only changes need only be invalidated before the
5754 	 * pmap lock is released, because protection-only changes do
5755 	 * not destroy PV entries.  Even operations that iterate over
5756 	 * a physical page's PV list of mappings, like
5757 	 * pmap_remove_write(), acquire the pmap lock for each
5758 	 * mapping.  Consequently, for protection-only changes, the
5759 	 * pmap lock suffices to synchronize both page table and TLB
5760 	 * updates.
5761 	 *
5762 	 * This function only destroys a mapping if pmap_demote_pde()
5763 	 * fails.  In that case, stale TLB entries are immediately
5764 	 * invalidated.
5765 	 */
5766 
5767 	PMAP_LOCK(pmap);
5768 	for (; sva < eva; sva = va_next) {
5769 
5770 		pml4e = pmap_pml4e(pmap, sva);
5771 		if ((*pml4e & PG_V) == 0) {
5772 			va_next = (sva + NBPML4) & ~PML4MASK;
5773 			if (va_next < sva)
5774 				va_next = eva;
5775 			continue;
5776 		}
5777 
5778 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
5779 		if ((*pdpe & PG_V) == 0) {
5780 			va_next = (sva + NBPDP) & ~PDPMASK;
5781 			if (va_next < sva)
5782 				va_next = eva;
5783 			continue;
5784 		}
5785 
5786 		va_next = (sva + NBPDR) & ~PDRMASK;
5787 		if (va_next < sva)
5788 			va_next = eva;
5789 
5790 		pde = pmap_pdpe_to_pde(pdpe, sva);
5791 		ptpaddr = *pde;
5792 
5793 		/*
5794 		 * Weed out invalid mappings.
5795 		 */
5796 		if (ptpaddr == 0)
5797 			continue;
5798 
5799 		/*
5800 		 * Check for large page.
5801 		 */
5802 		if ((ptpaddr & PG_PS) != 0) {
5803 			/*
5804 			 * Are we protecting the entire large page?  If not,
5805 			 * demote the mapping and fall through.
5806 			 */
5807 			if (sva + NBPDR == va_next && eva >= va_next) {
5808 				/*
5809 				 * The TLB entry for a PG_G mapping is
5810 				 * invalidated by pmap_protect_pde().
5811 				 */
5812 				if (pmap_protect_pde(pmap, pde, sva, prot))
5813 					anychanged = TRUE;
5814 				continue;
5815 			} else if (!pmap_demote_pde(pmap, pde, sva)) {
5816 				/*
5817 				 * The large page mapping was destroyed.
5818 				 */
5819 				continue;
5820 			}
5821 		}
5822 
5823 		if (va_next > eva)
5824 			va_next = eva;
5825 
5826 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
5827 		    sva += PAGE_SIZE) {
5828 			pt_entry_t obits, pbits;
5829 			vm_page_t m;
5830 
5831 retry:
5832 			obits = pbits = *pte;
5833 			if ((pbits & PG_V) == 0)
5834 				continue;
5835 
5836 			if ((prot & VM_PROT_WRITE) == 0) {
5837 				if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
5838 				    (PG_MANAGED | PG_M | PG_RW)) {
5839 					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
5840 					vm_page_dirty(m);
5841 				}
5842 				pbits &= ~(PG_RW | PG_M);
5843 			}
5844 			if ((prot & VM_PROT_EXECUTE) == 0)
5845 				pbits |= pg_nx;
5846 
5847 			if (pbits != obits) {
5848 				if (!atomic_cmpset_long(pte, obits, pbits))
5849 					goto retry;
5850 				if (obits & PG_G)
5851 					pmap_invalidate_page(pmap, sva);
5852 				else
5853 					anychanged = TRUE;
5854 			}
5855 		}
5856 	}
5857 	if (anychanged)
5858 		pmap_invalidate_all(pmap);
5859 	PMAP_UNLOCK(pmap);
5860 }
5861 
5862 #if VM_NRESERVLEVEL > 0
5863 static bool
5864 pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde)
5865 {
5866 
5867 	if (pmap->pm_type != PT_EPT)
5868 		return (false);
5869 	return ((pde & EPT_PG_EXECUTE) != 0);
5870 }
5871 
5872 /*
5873  * Tries to promote the 512, contiguous 4KB page mappings that are within a
5874  * single page table page (PTP) to a single 2MB page mapping.  For promotion
5875  * to occur, two conditions must be met: (1) the 4KB page mappings must map
5876  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
5877  * identical characteristics.
5878  */
5879 static void
5880 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
5881     struct rwlock **lockp)
5882 {
5883 	pd_entry_t newpde;
5884 	pt_entry_t *firstpte, oldpte, pa, *pte;
5885 	pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V, PG_PKU_MASK;
5886 	vm_page_t mpte;
5887 	int PG_PTE_CACHE;
5888 
5889 	PG_A = pmap_accessed_bit(pmap);
5890 	PG_G = pmap_global_bit(pmap);
5891 	PG_M = pmap_modified_bit(pmap);
5892 	PG_V = pmap_valid_bit(pmap);
5893 	PG_RW = pmap_rw_bit(pmap);
5894 	PG_PKU_MASK = pmap_pku_mask_bit(pmap);
5895 	PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
5896 
5897 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5898 
5899 	/*
5900 	 * Examine the first PTE in the specified PTP.  Abort if this PTE is
5901 	 * either invalid, unused, or does not map the first 4KB physical page
5902 	 * within a 2MB page.
5903 	 */
5904 	firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
5905 setpde:
5906 	newpde = *firstpte;
5907 	if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V) ||
5908 	    !pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap,
5909 	    newpde))) {
5910 		atomic_add_long(&pmap_pde_p_failures, 1);
5911 		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
5912 		    " in pmap %p", va, pmap);
5913 		return;
5914 	}
5915 	if ((newpde & (PG_M | PG_RW)) == PG_RW) {
5916 		/*
5917 		 * When PG_M is already clear, PG_RW can be cleared without
5918 		 * a TLB invalidation.
5919 		 */
5920 		if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
5921 			goto setpde;
5922 		newpde &= ~PG_RW;
5923 	}
5924 
5925 	/*
5926 	 * Examine each of the other PTEs in the specified PTP.  Abort if this
5927 	 * PTE maps an unexpected 4KB physical page or does not have identical
5928 	 * characteristics to the first PTE.
5929 	 */
5930 	pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
5931 	for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
5932 setpte:
5933 		oldpte = *pte;
5934 		if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
5935 			atomic_add_long(&pmap_pde_p_failures, 1);
5936 			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
5937 			    " in pmap %p", va, pmap);
5938 			return;
5939 		}
5940 		if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
5941 			/*
5942 			 * When PG_M is already clear, PG_RW can be cleared
5943 			 * without a TLB invalidation.
5944 			 */
5945 			if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
5946 				goto setpte;
5947 			oldpte &= ~PG_RW;
5948 			CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
5949 			    " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
5950 			    (va & ~PDRMASK), pmap);
5951 		}
5952 		if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
5953 			atomic_add_long(&pmap_pde_p_failures, 1);
5954 			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
5955 			    " in pmap %p", va, pmap);
5956 			return;
5957 		}
5958 		pa -= PAGE_SIZE;
5959 	}
5960 
5961 	/*
5962 	 * Save the page table page in its current state until the PDE
5963 	 * mapping the superpage is demoted by pmap_demote_pde() or
5964 	 * destroyed by pmap_remove_pde().
5965 	 */
5966 	mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
5967 	KASSERT(mpte >= vm_page_array &&
5968 	    mpte < &vm_page_array[vm_page_array_size],
5969 	    ("pmap_promote_pde: page table page is out of range"));
5970 	KASSERT(mpte->pindex == pmap_pde_pindex(va),
5971 	    ("pmap_promote_pde: page table page's pindex is wrong"));
5972 	if (pmap_insert_pt_page(pmap, mpte, true)) {
5973 		atomic_add_long(&pmap_pde_p_failures, 1);
5974 		CTR2(KTR_PMAP,
5975 		    "pmap_promote_pde: failure for va %#lx in pmap %p", va,
5976 		    pmap);
5977 		return;
5978 	}
5979 
5980 	/*
5981 	 * Promote the pv entries.
5982 	 */
5983 	if ((newpde & PG_MANAGED) != 0)
5984 		pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
5985 
5986 	/*
5987 	 * Propagate the PAT index to its proper position.
5988 	 */
5989 	newpde = pmap_swap_pat(pmap, newpde);
5990 
5991 	/*
5992 	 * Map the superpage.
5993 	 */
5994 	if (workaround_erratum383)
5995 		pmap_update_pde(pmap, va, pde, PG_PS | newpde);
5996 	else
5997 		pde_store(pde, PG_PROMOTED | PG_PS | newpde);
5998 
5999 	atomic_add_long(&pmap_pde_promotions, 1);
6000 	CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
6001 	    " in pmap %p", va, pmap);
6002 }
6003 #endif /* VM_NRESERVLEVEL > 0 */
6004 
6005 /*
6006  *	Insert the given physical page (p) at
6007  *	the specified virtual address (v) in the
6008  *	target physical map with the protection requested.
6009  *
6010  *	If specified, the page will be wired down, meaning
6011  *	that the related pte can not be reclaimed.
6012  *
6013  *	NB:  This is the only routine which MAY NOT lazy-evaluate
6014  *	or lose information.  That is, this routine must actually
6015  *	insert this page into the given map NOW.
6016  *
6017  *	When destroying both a page table and PV entry, this function
6018  *	performs the TLB invalidation before releasing the PV list
6019  *	lock, so we do not need pmap_delayed_invl_page() calls here.
6020  */
6021 int
6022 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
6023     u_int flags, int8_t psind)
6024 {
6025 	struct rwlock *lock;
6026 	pd_entry_t *pde;
6027 	pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V;
6028 	pt_entry_t newpte, origpte;
6029 	pv_entry_t pv;
6030 	vm_paddr_t opa, pa;
6031 	vm_page_t mpte, om;
6032 	int rv;
6033 	boolean_t nosleep;
6034 
6035 	PG_A = pmap_accessed_bit(pmap);
6036 	PG_G = pmap_global_bit(pmap);
6037 	PG_M = pmap_modified_bit(pmap);
6038 	PG_V = pmap_valid_bit(pmap);
6039 	PG_RW = pmap_rw_bit(pmap);
6040 
6041 	va = trunc_page(va);
6042 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
6043 	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
6044 	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
6045 	    va));
6046 	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
6047 	    va >= kmi.clean_eva,
6048 	    ("pmap_enter: managed mapping within the clean submap"));
6049 	if ((m->oflags & VPO_UNMANAGED) == 0)
6050 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
6051 	KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
6052 	    ("pmap_enter: flags %u has reserved bits set", flags));
6053 	pa = VM_PAGE_TO_PHYS(m);
6054 	newpte = (pt_entry_t)(pa | PG_A | PG_V);
6055 	if ((flags & VM_PROT_WRITE) != 0)
6056 		newpte |= PG_M;
6057 	if ((prot & VM_PROT_WRITE) != 0)
6058 		newpte |= PG_RW;
6059 	KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
6060 	    ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
6061 	if ((prot & VM_PROT_EXECUTE) == 0)
6062 		newpte |= pg_nx;
6063 	if ((flags & PMAP_ENTER_WIRED) != 0)
6064 		newpte |= PG_W;
6065 	if (va < VM_MAXUSER_ADDRESS)
6066 		newpte |= PG_U;
6067 	if (pmap == kernel_pmap)
6068 		newpte |= PG_G;
6069 	newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
6070 
6071 	/*
6072 	 * Set modified bit gratuitously for writeable mappings if
6073 	 * the page is unmanaged. We do not want to take a fault
6074 	 * to do the dirty bit accounting for these mappings.
6075 	 */
6076 	if ((m->oflags & VPO_UNMANAGED) != 0) {
6077 		if ((newpte & PG_RW) != 0)
6078 			newpte |= PG_M;
6079 	} else
6080 		newpte |= PG_MANAGED;
6081 
6082 	lock = NULL;
6083 	PMAP_LOCK(pmap);
6084 	if (psind == 1) {
6085 		/* Assert the required virtual and physical alignment. */
6086 		KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
6087 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
6088 		rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
6089 		goto out;
6090 	}
6091 	mpte = NULL;
6092 
6093 	/*
6094 	 * In the case that a page table page is not
6095 	 * resident, we are creating it here.
6096 	 */
6097 retry:
6098 	pde = pmap_pde(pmap, va);
6099 	if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
6100 	    pmap_demote_pde_locked(pmap, pde, va, &lock))) {
6101 		pte = pmap_pde_to_pte(pde, va);
6102 		if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
6103 			mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
6104 			mpte->ref_count++;
6105 		}
6106 	} else if (va < VM_MAXUSER_ADDRESS) {
6107 		/*
6108 		 * Here if the pte page isn't mapped, or if it has been
6109 		 * deallocated.
6110 		 */
6111 		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
6112 		mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va),
6113 		    nosleep ? NULL : &lock);
6114 		if (mpte == NULL && nosleep) {
6115 			rv = KERN_RESOURCE_SHORTAGE;
6116 			goto out;
6117 		}
6118 		goto retry;
6119 	} else
6120 		panic("pmap_enter: invalid page directory va=%#lx", va);
6121 
6122 	origpte = *pte;
6123 	pv = NULL;
6124 	if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86)
6125 		newpte |= pmap_pkru_get(pmap, va);
6126 
6127 	/*
6128 	 * Is the specified virtual address already mapped?
6129 	 */
6130 	if ((origpte & PG_V) != 0) {
6131 		/*
6132 		 * Wiring change, just update stats. We don't worry about
6133 		 * wiring PT pages as they remain resident as long as there
6134 		 * are valid mappings in them. Hence, if a user page is wired,
6135 		 * the PT page will be also.
6136 		 */
6137 		if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
6138 			pmap->pm_stats.wired_count++;
6139 		else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
6140 			pmap->pm_stats.wired_count--;
6141 
6142 		/*
6143 		 * Remove the extra PT page reference.
6144 		 */
6145 		if (mpte != NULL) {
6146 			mpte->ref_count--;
6147 			KASSERT(mpte->ref_count > 0,
6148 			    ("pmap_enter: missing reference to page table page,"
6149 			     " va: 0x%lx", va));
6150 		}
6151 
6152 		/*
6153 		 * Has the physical page changed?
6154 		 */
6155 		opa = origpte & PG_FRAME;
6156 		if (opa == pa) {
6157 			/*
6158 			 * No, might be a protection or wiring change.
6159 			 */
6160 			if ((origpte & PG_MANAGED) != 0 &&
6161 			    (newpte & PG_RW) != 0)
6162 				vm_page_aflag_set(m, PGA_WRITEABLE);
6163 			if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
6164 				goto unchanged;
6165 			goto validate;
6166 		}
6167 
6168 		/*
6169 		 * The physical page has changed.  Temporarily invalidate
6170 		 * the mapping.  This ensures that all threads sharing the
6171 		 * pmap keep a consistent view of the mapping, which is
6172 		 * necessary for the correct handling of COW faults.  It
6173 		 * also permits reuse of the old mapping's PV entry,
6174 		 * avoiding an allocation.
6175 		 *
6176 		 * For consistency, handle unmanaged mappings the same way.
6177 		 */
6178 		origpte = pte_load_clear(pte);
6179 		KASSERT((origpte & PG_FRAME) == opa,
6180 		    ("pmap_enter: unexpected pa update for %#lx", va));
6181 		if ((origpte & PG_MANAGED) != 0) {
6182 			om = PHYS_TO_VM_PAGE(opa);
6183 
6184 			/*
6185 			 * The pmap lock is sufficient to synchronize with
6186 			 * concurrent calls to pmap_page_test_mappings() and
6187 			 * pmap_ts_referenced().
6188 			 */
6189 			if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6190 				vm_page_dirty(om);
6191 			if ((origpte & PG_A) != 0) {
6192 				pmap_invalidate_page(pmap, va);
6193 				vm_page_aflag_set(om, PGA_REFERENCED);
6194 			}
6195 			CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
6196 			pv = pmap_pvh_remove(&om->md, pmap, va);
6197 			KASSERT(pv != NULL,
6198 			    ("pmap_enter: no PV entry for %#lx", va));
6199 			if ((newpte & PG_MANAGED) == 0)
6200 				free_pv_entry(pmap, pv);
6201 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
6202 			    TAILQ_EMPTY(&om->md.pv_list) &&
6203 			    ((om->flags & PG_FICTITIOUS) != 0 ||
6204 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
6205 				vm_page_aflag_clear(om, PGA_WRITEABLE);
6206 		} else {
6207 			/*
6208 			 * Since this mapping is unmanaged, assume that PG_A
6209 			 * is set.
6210 			 */
6211 			pmap_invalidate_page(pmap, va);
6212 		}
6213 		origpte = 0;
6214 	} else {
6215 		/*
6216 		 * Increment the counters.
6217 		 */
6218 		if ((newpte & PG_W) != 0)
6219 			pmap->pm_stats.wired_count++;
6220 		pmap_resident_count_inc(pmap, 1);
6221 	}
6222 
6223 	/*
6224 	 * Enter on the PV list if part of our managed memory.
6225 	 */
6226 	if ((newpte & PG_MANAGED) != 0) {
6227 		if (pv == NULL) {
6228 			pv = get_pv_entry(pmap, &lock);
6229 			pv->pv_va = va;
6230 		}
6231 		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
6232 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
6233 		m->md.pv_gen++;
6234 		if ((newpte & PG_RW) != 0)
6235 			vm_page_aflag_set(m, PGA_WRITEABLE);
6236 	}
6237 
6238 	/*
6239 	 * Update the PTE.
6240 	 */
6241 	if ((origpte & PG_V) != 0) {
6242 validate:
6243 		origpte = pte_load_store(pte, newpte);
6244 		KASSERT((origpte & PG_FRAME) == pa,
6245 		    ("pmap_enter: unexpected pa update for %#lx", va));
6246 		if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
6247 		    (PG_M | PG_RW)) {
6248 			if ((origpte & PG_MANAGED) != 0)
6249 				vm_page_dirty(m);
6250 
6251 			/*
6252 			 * Although the PTE may still have PG_RW set, TLB
6253 			 * invalidation may nonetheless be required because
6254 			 * the PTE no longer has PG_M set.
6255 			 */
6256 		} else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
6257 			/*
6258 			 * This PTE change does not require TLB invalidation.
6259 			 */
6260 			goto unchanged;
6261 		}
6262 		if ((origpte & PG_A) != 0)
6263 			pmap_invalidate_page(pmap, va);
6264 	} else
6265 		pte_store(pte, newpte);
6266 
6267 unchanged:
6268 
6269 #if VM_NRESERVLEVEL > 0
6270 	/*
6271 	 * If both the page table page and the reservation are fully
6272 	 * populated, then attempt promotion.
6273 	 */
6274 	if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
6275 	    pmap_ps_enabled(pmap) &&
6276 	    (m->flags & PG_FICTITIOUS) == 0 &&
6277 	    vm_reserv_level_iffullpop(m) == 0)
6278 		pmap_promote_pde(pmap, pde, va, &lock);
6279 #endif
6280 
6281 	rv = KERN_SUCCESS;
6282 out:
6283 	if (lock != NULL)
6284 		rw_wunlock(lock);
6285 	PMAP_UNLOCK(pmap);
6286 	return (rv);
6287 }
6288 
6289 /*
6290  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
6291  * if successful.  Returns false if (1) a page table page cannot be allocated
6292  * without sleeping, (2) a mapping already exists at the specified virtual
6293  * address, or (3) a PV entry cannot be allocated without reclaiming another
6294  * PV entry.
6295  */
6296 static bool
6297 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
6298     struct rwlock **lockp)
6299 {
6300 	pd_entry_t newpde;
6301 	pt_entry_t PG_V;
6302 
6303 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6304 	PG_V = pmap_valid_bit(pmap);
6305 	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
6306 	    PG_PS | PG_V;
6307 	if ((m->oflags & VPO_UNMANAGED) == 0)
6308 		newpde |= PG_MANAGED;
6309 	if ((prot & VM_PROT_EXECUTE) == 0)
6310 		newpde |= pg_nx;
6311 	if (va < VM_MAXUSER_ADDRESS)
6312 		newpde |= PG_U;
6313 	return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
6314 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
6315 	    KERN_SUCCESS);
6316 }
6317 
6318 /*
6319  * Returns true if every page table entry in the specified page table page is
6320  * zero.
6321  */
6322 static bool
6323 pmap_every_pte_zero(vm_paddr_t pa)
6324 {
6325 	pt_entry_t *pt_end, *pte;
6326 
6327 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
6328 	pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
6329 	for (pt_end = pte + NPTEPG; pte < pt_end; pte++) {
6330 		if (*pte != 0)
6331 			return (false);
6332 	}
6333 	return (true);
6334 }
6335 
6336 /*
6337  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
6338  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
6339  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
6340  * a mapping already exists at the specified virtual address.  Returns
6341  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
6342  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
6343  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
6344  *
6345  * The parameter "m" is only used when creating a managed, writeable mapping.
6346  */
6347 static int
6348 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
6349     vm_page_t m, struct rwlock **lockp)
6350 {
6351 	struct spglist free;
6352 	pd_entry_t oldpde, *pde;
6353 	pt_entry_t PG_G, PG_RW, PG_V;
6354 	vm_page_t mt, pdpg;
6355 
6356 	KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0,
6357 	    ("pmap_enter_pde: cannot create wired user mapping"));
6358 	PG_G = pmap_global_bit(pmap);
6359 	PG_RW = pmap_rw_bit(pmap);
6360 	KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
6361 	    ("pmap_enter_pde: newpde is missing PG_M"));
6362 	PG_V = pmap_valid_bit(pmap);
6363 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6364 
6365 	if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap,
6366 	    newpde))) {
6367 		CTR2(KTR_PMAP, "pmap_enter_pde: 2m x blocked for va %#lx"
6368 		    " in pmap %p", va, pmap);
6369 		return (KERN_FAILURE);
6370 	}
6371 	if ((pde = pmap_alloc_pde(pmap, va, &pdpg, (flags &
6372 	    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
6373 		CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
6374 		    " in pmap %p", va, pmap);
6375 		return (KERN_RESOURCE_SHORTAGE);
6376 	}
6377 
6378 	/*
6379 	 * If pkru is not same for the whole pde range, return failure
6380 	 * and let vm_fault() cope.  Check after pde allocation, since
6381 	 * it could sleep.
6382 	 */
6383 	if (!pmap_pkru_same(pmap, va, va + NBPDR)) {
6384 		pmap_abort_ptp(pmap, va, pdpg);
6385 		return (KERN_FAILURE);
6386 	}
6387 	if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86) {
6388 		newpde &= ~X86_PG_PKU_MASK;
6389 		newpde |= pmap_pkru_get(pmap, va);
6390 	}
6391 
6392 	/*
6393 	 * If there are existing mappings, either abort or remove them.
6394 	 */
6395 	oldpde = *pde;
6396 	if ((oldpde & PG_V) != 0) {
6397 		KASSERT(pdpg == NULL || pdpg->ref_count > 1,
6398 		    ("pmap_enter_pde: pdpg's reference count is too low"));
6399 		if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
6400 		    VM_MAXUSER_ADDRESS || (oldpde & PG_PS) != 0 ||
6401 		    !pmap_every_pte_zero(oldpde & PG_FRAME))) {
6402 			if (pdpg != NULL)
6403 				pdpg->ref_count--;
6404 			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
6405 			    " in pmap %p", va, pmap);
6406 			return (KERN_FAILURE);
6407 		}
6408 		/* Break the existing mapping(s). */
6409 		SLIST_INIT(&free);
6410 		if ((oldpde & PG_PS) != 0) {
6411 			/*
6412 			 * The reference to the PD page that was acquired by
6413 			 * pmap_alloc_pde() ensures that it won't be freed.
6414 			 * However, if the PDE resulted from a promotion, then
6415 			 * a reserved PT page could be freed.
6416 			 */
6417 			(void)pmap_remove_pde(pmap, pde, va, &free, lockp);
6418 			if ((oldpde & PG_G) == 0)
6419 				pmap_invalidate_pde_page(pmap, va, oldpde);
6420 		} else {
6421 			pmap_delayed_invl_start();
6422 			if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
6423 			    lockp))
6424 		               pmap_invalidate_all(pmap);
6425 			pmap_delayed_invl_finish();
6426 		}
6427 		if (va < VM_MAXUSER_ADDRESS) {
6428 			vm_page_free_pages_toq(&free, true);
6429 			KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
6430 			    pde));
6431 		} else {
6432 			KASSERT(SLIST_EMPTY(&free),
6433 			    ("pmap_enter_pde: freed kernel page table page"));
6434 
6435 			/*
6436 			 * Both pmap_remove_pde() and pmap_remove_ptes() will
6437 			 * leave the kernel page table page zero filled.
6438 			 */
6439 			mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
6440 			if (pmap_insert_pt_page(pmap, mt, false))
6441 				panic("pmap_enter_pde: trie insert failed");
6442 		}
6443 	}
6444 
6445 	if ((newpde & PG_MANAGED) != 0) {
6446 		/*
6447 		 * Abort this mapping if its PV entry could not be created.
6448 		 */
6449 		if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
6450 			if (pdpg != NULL)
6451 				pmap_abort_ptp(pmap, va, pdpg);
6452 			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
6453 			    " in pmap %p", va, pmap);
6454 			return (KERN_RESOURCE_SHORTAGE);
6455 		}
6456 		if ((newpde & PG_RW) != 0) {
6457 			for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
6458 				vm_page_aflag_set(mt, PGA_WRITEABLE);
6459 		}
6460 	}
6461 
6462 	/*
6463 	 * Increment counters.
6464 	 */
6465 	if ((newpde & PG_W) != 0)
6466 		pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
6467 	pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
6468 
6469 	/*
6470 	 * Map the superpage.  (This is not a promoted mapping; there will not
6471 	 * be any lingering 4KB page mappings in the TLB.)
6472 	 */
6473 	pde_store(pde, newpde);
6474 
6475 	atomic_add_long(&pmap_pde_mappings, 1);
6476 	CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p",
6477 	    va, pmap);
6478 	return (KERN_SUCCESS);
6479 }
6480 
6481 /*
6482  * Maps a sequence of resident pages belonging to the same object.
6483  * The sequence begins with the given page m_start.  This page is
6484  * mapped at the given virtual address start.  Each subsequent page is
6485  * mapped at a virtual address that is offset from start by the same
6486  * amount as the page is offset from m_start within the object.  The
6487  * last page in the sequence is the page with the largest offset from
6488  * m_start that can be mapped at a virtual address less than the given
6489  * virtual address end.  Not every virtual page between start and end
6490  * is mapped; only those for which a resident page exists with the
6491  * corresponding offset from m_start are mapped.
6492  */
6493 void
6494 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
6495     vm_page_t m_start, vm_prot_t prot)
6496 {
6497 	struct rwlock *lock;
6498 	vm_offset_t va;
6499 	vm_page_t m, mpte;
6500 	vm_pindex_t diff, psize;
6501 
6502 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
6503 
6504 	psize = atop(end - start);
6505 	mpte = NULL;
6506 	m = m_start;
6507 	lock = NULL;
6508 	PMAP_LOCK(pmap);
6509 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
6510 		va = start + ptoa(diff);
6511 		if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
6512 		    m->psind == 1 && pmap_ps_enabled(pmap) &&
6513 		    pmap_allow_2m_x_page(pmap, (prot & VM_PROT_EXECUTE) != 0) &&
6514 		    pmap_enter_2mpage(pmap, va, m, prot, &lock))
6515 			m = &m[NBPDR / PAGE_SIZE - 1];
6516 		else
6517 			mpte = pmap_enter_quick_locked(pmap, va, m, prot,
6518 			    mpte, &lock);
6519 		m = TAILQ_NEXT(m, listq);
6520 	}
6521 	if (lock != NULL)
6522 		rw_wunlock(lock);
6523 	PMAP_UNLOCK(pmap);
6524 }
6525 
6526 /*
6527  * this code makes some *MAJOR* assumptions:
6528  * 1. Current pmap & pmap exists.
6529  * 2. Not wired.
6530  * 3. Read access.
6531  * 4. No page table pages.
6532  * but is *MUCH* faster than pmap_enter...
6533  */
6534 
6535 void
6536 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
6537 {
6538 	struct rwlock *lock;
6539 
6540 	lock = NULL;
6541 	PMAP_LOCK(pmap);
6542 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
6543 	if (lock != NULL)
6544 		rw_wunlock(lock);
6545 	PMAP_UNLOCK(pmap);
6546 }
6547 
6548 static vm_page_t
6549 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
6550     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
6551 {
6552 	pt_entry_t newpte, *pte, PG_V;
6553 
6554 	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
6555 	    (m->oflags & VPO_UNMANAGED) != 0,
6556 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
6557 	PG_V = pmap_valid_bit(pmap);
6558 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6559 
6560 	/*
6561 	 * In the case that a page table page is not
6562 	 * resident, we are creating it here.
6563 	 */
6564 	if (va < VM_MAXUSER_ADDRESS) {
6565 		vm_pindex_t ptepindex;
6566 		pd_entry_t *ptepa;
6567 
6568 		/*
6569 		 * Calculate pagetable page index
6570 		 */
6571 		ptepindex = pmap_pde_pindex(va);
6572 		if (mpte && (mpte->pindex == ptepindex)) {
6573 			mpte->ref_count++;
6574 		} else {
6575 			/*
6576 			 * Get the page directory entry
6577 			 */
6578 			ptepa = pmap_pde(pmap, va);
6579 
6580 			/*
6581 			 * If the page table page is mapped, we just increment
6582 			 * the hold count, and activate it.  Otherwise, we
6583 			 * attempt to allocate a page table page.  If this
6584 			 * attempt fails, we don't retry.  Instead, we give up.
6585 			 */
6586 			if (ptepa && (*ptepa & PG_V) != 0) {
6587 				if (*ptepa & PG_PS)
6588 					return (NULL);
6589 				mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
6590 				mpte->ref_count++;
6591 			} else {
6592 				/*
6593 				 * Pass NULL instead of the PV list lock
6594 				 * pointer, because we don't intend to sleep.
6595 				 */
6596 				mpte = _pmap_allocpte(pmap, ptepindex, NULL);
6597 				if (mpte == NULL)
6598 					return (mpte);
6599 			}
6600 		}
6601 		pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
6602 		pte = &pte[pmap_pte_index(va)];
6603 	} else {
6604 		mpte = NULL;
6605 		pte = vtopte(va);
6606 	}
6607 	if (*pte) {
6608 		if (mpte != NULL)
6609 			mpte->ref_count--;
6610 		return (NULL);
6611 	}
6612 
6613 	/*
6614 	 * Enter on the PV list if part of our managed memory.
6615 	 */
6616 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
6617 	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
6618 		if (mpte != NULL)
6619 			pmap_abort_ptp(pmap, va, mpte);
6620 		return (NULL);
6621 	}
6622 
6623 	/*
6624 	 * Increment counters
6625 	 */
6626 	pmap_resident_count_inc(pmap, 1);
6627 
6628 	newpte = VM_PAGE_TO_PHYS(m) | PG_V |
6629 	    pmap_cache_bits(pmap, m->md.pat_mode, 0);
6630 	if ((m->oflags & VPO_UNMANAGED) == 0)
6631 		newpte |= PG_MANAGED;
6632 	if ((prot & VM_PROT_EXECUTE) == 0)
6633 		newpte |= pg_nx;
6634 	if (va < VM_MAXUSER_ADDRESS)
6635 		newpte |= PG_U | pmap_pkru_get(pmap, va);
6636 	pte_store(pte, newpte);
6637 	return (mpte);
6638 }
6639 
6640 /*
6641  * Make a temporary mapping for a physical address.  This is only intended
6642  * to be used for panic dumps.
6643  */
6644 void *
6645 pmap_kenter_temporary(vm_paddr_t pa, int i)
6646 {
6647 	vm_offset_t va;
6648 
6649 	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
6650 	pmap_kenter(va, pa);
6651 	invlpg(va);
6652 	return ((void *)crashdumpmap);
6653 }
6654 
6655 /*
6656  * This code maps large physical mmap regions into the
6657  * processor address space.  Note that some shortcuts
6658  * are taken, but the code works.
6659  */
6660 void
6661 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
6662     vm_pindex_t pindex, vm_size_t size)
6663 {
6664 	pd_entry_t *pde;
6665 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
6666 	vm_paddr_t pa, ptepa;
6667 	vm_page_t p, pdpg;
6668 	int pat_mode;
6669 
6670 	PG_A = pmap_accessed_bit(pmap);
6671 	PG_M = pmap_modified_bit(pmap);
6672 	PG_V = pmap_valid_bit(pmap);
6673 	PG_RW = pmap_rw_bit(pmap);
6674 
6675 	VM_OBJECT_ASSERT_WLOCKED(object);
6676 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
6677 	    ("pmap_object_init_pt: non-device object"));
6678 	if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
6679 		if (!pmap_ps_enabled(pmap))
6680 			return;
6681 		if (!vm_object_populate(object, pindex, pindex + atop(size)))
6682 			return;
6683 		p = vm_page_lookup(object, pindex);
6684 		KASSERT(p->valid == VM_PAGE_BITS_ALL,
6685 		    ("pmap_object_init_pt: invalid page %p", p));
6686 		pat_mode = p->md.pat_mode;
6687 
6688 		/*
6689 		 * Abort the mapping if the first page is not physically
6690 		 * aligned to a 2MB page boundary.
6691 		 */
6692 		ptepa = VM_PAGE_TO_PHYS(p);
6693 		if (ptepa & (NBPDR - 1))
6694 			return;
6695 
6696 		/*
6697 		 * Skip the first page.  Abort the mapping if the rest of
6698 		 * the pages are not physically contiguous or have differing
6699 		 * memory attributes.
6700 		 */
6701 		p = TAILQ_NEXT(p, listq);
6702 		for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
6703 		    pa += PAGE_SIZE) {
6704 			KASSERT(p->valid == VM_PAGE_BITS_ALL,
6705 			    ("pmap_object_init_pt: invalid page %p", p));
6706 			if (pa != VM_PAGE_TO_PHYS(p) ||
6707 			    pat_mode != p->md.pat_mode)
6708 				return;
6709 			p = TAILQ_NEXT(p, listq);
6710 		}
6711 
6712 		/*
6713 		 * Map using 2MB pages.  Since "ptepa" is 2M aligned and
6714 		 * "size" is a multiple of 2M, adding the PAT setting to "pa"
6715 		 * will not affect the termination of this loop.
6716 		 */
6717 		PMAP_LOCK(pmap);
6718 		for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
6719 		    pa < ptepa + size; pa += NBPDR) {
6720 			pde = pmap_alloc_pde(pmap, addr, &pdpg, NULL);
6721 			if (pde == NULL) {
6722 				/*
6723 				 * The creation of mappings below is only an
6724 				 * optimization.  If a page directory page
6725 				 * cannot be allocated without blocking,
6726 				 * continue on to the next mapping rather than
6727 				 * blocking.
6728 				 */
6729 				addr += NBPDR;
6730 				continue;
6731 			}
6732 			if ((*pde & PG_V) == 0) {
6733 				pde_store(pde, pa | PG_PS | PG_M | PG_A |
6734 				    PG_U | PG_RW | PG_V);
6735 				pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
6736 				atomic_add_long(&pmap_pde_mappings, 1);
6737 			} else {
6738 				/* Continue on if the PDE is already valid. */
6739 				pdpg->ref_count--;
6740 				KASSERT(pdpg->ref_count > 0,
6741 				    ("pmap_object_init_pt: missing reference "
6742 				    "to page directory page, va: 0x%lx", addr));
6743 			}
6744 			addr += NBPDR;
6745 		}
6746 		PMAP_UNLOCK(pmap);
6747 	}
6748 }
6749 
6750 /*
6751  *	Clear the wired attribute from the mappings for the specified range of
6752  *	addresses in the given pmap.  Every valid mapping within that range
6753  *	must have the wired attribute set.  In contrast, invalid mappings
6754  *	cannot have the wired attribute set, so they are ignored.
6755  *
6756  *	The wired attribute of the page table entry is not a hardware
6757  *	feature, so there is no need to invalidate any TLB entries.
6758  *	Since pmap_demote_pde() for the wired entry must never fail,
6759  *	pmap_delayed_invl_start()/finish() calls around the
6760  *	function are not needed.
6761  */
6762 void
6763 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6764 {
6765 	vm_offset_t va_next;
6766 	pml4_entry_t *pml4e;
6767 	pdp_entry_t *pdpe;
6768 	pd_entry_t *pde;
6769 	pt_entry_t *pte, PG_V;
6770 
6771 	PG_V = pmap_valid_bit(pmap);
6772 	PMAP_LOCK(pmap);
6773 	for (; sva < eva; sva = va_next) {
6774 		pml4e = pmap_pml4e(pmap, sva);
6775 		if ((*pml4e & PG_V) == 0) {
6776 			va_next = (sva + NBPML4) & ~PML4MASK;
6777 			if (va_next < sva)
6778 				va_next = eva;
6779 			continue;
6780 		}
6781 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6782 		if ((*pdpe & PG_V) == 0) {
6783 			va_next = (sva + NBPDP) & ~PDPMASK;
6784 			if (va_next < sva)
6785 				va_next = eva;
6786 			continue;
6787 		}
6788 		va_next = (sva + NBPDR) & ~PDRMASK;
6789 		if (va_next < sva)
6790 			va_next = eva;
6791 		pde = pmap_pdpe_to_pde(pdpe, sva);
6792 		if ((*pde & PG_V) == 0)
6793 			continue;
6794 		if ((*pde & PG_PS) != 0) {
6795 			if ((*pde & PG_W) == 0)
6796 				panic("pmap_unwire: pde %#jx is missing PG_W",
6797 				    (uintmax_t)*pde);
6798 
6799 			/*
6800 			 * Are we unwiring the entire large page?  If not,
6801 			 * demote the mapping and fall through.
6802 			 */
6803 			if (sva + NBPDR == va_next && eva >= va_next) {
6804 				atomic_clear_long(pde, PG_W);
6805 				pmap->pm_stats.wired_count -= NBPDR /
6806 				    PAGE_SIZE;
6807 				continue;
6808 			} else if (!pmap_demote_pde(pmap, pde, sva))
6809 				panic("pmap_unwire: demotion failed");
6810 		}
6811 		if (va_next > eva)
6812 			va_next = eva;
6813 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
6814 		    sva += PAGE_SIZE) {
6815 			if ((*pte & PG_V) == 0)
6816 				continue;
6817 			if ((*pte & PG_W) == 0)
6818 				panic("pmap_unwire: pte %#jx is missing PG_W",
6819 				    (uintmax_t)*pte);
6820 
6821 			/*
6822 			 * PG_W must be cleared atomically.  Although the pmap
6823 			 * lock synchronizes access to PG_W, another processor
6824 			 * could be setting PG_M and/or PG_A concurrently.
6825 			 */
6826 			atomic_clear_long(pte, PG_W);
6827 			pmap->pm_stats.wired_count--;
6828 		}
6829 	}
6830 	PMAP_UNLOCK(pmap);
6831 }
6832 
6833 /*
6834  *	Copy the range specified by src_addr/len
6835  *	from the source map to the range dst_addr/len
6836  *	in the destination map.
6837  *
6838  *	This routine is only advisory and need not do anything.
6839  */
6840 void
6841 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
6842     vm_offset_t src_addr)
6843 {
6844 	struct rwlock *lock;
6845 	pml4_entry_t *pml4e;
6846 	pdp_entry_t *pdpe;
6847 	pd_entry_t *pde, srcptepaddr;
6848 	pt_entry_t *dst_pte, PG_A, PG_M, PG_V, ptetemp, *src_pte;
6849 	vm_offset_t addr, end_addr, va_next;
6850 	vm_page_t dst_pdpg, dstmpte, srcmpte;
6851 
6852 	if (dst_addr != src_addr)
6853 		return;
6854 
6855 	if (dst_pmap->pm_type != src_pmap->pm_type)
6856 		return;
6857 
6858 	/*
6859 	 * EPT page table entries that require emulation of A/D bits are
6860 	 * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although
6861 	 * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit
6862 	 * (aka EPT_PG_EXECUTE) could still be set. Since some EPT
6863 	 * implementations flag an EPT misconfiguration for exec-only
6864 	 * mappings we skip this function entirely for emulated pmaps.
6865 	 */
6866 	if (pmap_emulate_ad_bits(dst_pmap))
6867 		return;
6868 
6869 	end_addr = src_addr + len;
6870 	lock = NULL;
6871 	if (dst_pmap < src_pmap) {
6872 		PMAP_LOCK(dst_pmap);
6873 		PMAP_LOCK(src_pmap);
6874 	} else {
6875 		PMAP_LOCK(src_pmap);
6876 		PMAP_LOCK(dst_pmap);
6877 	}
6878 
6879 	PG_A = pmap_accessed_bit(dst_pmap);
6880 	PG_M = pmap_modified_bit(dst_pmap);
6881 	PG_V = pmap_valid_bit(dst_pmap);
6882 
6883 	for (addr = src_addr; addr < end_addr; addr = va_next) {
6884 		KASSERT(addr < UPT_MIN_ADDRESS,
6885 		    ("pmap_copy: invalid to pmap_copy page tables"));
6886 
6887 		pml4e = pmap_pml4e(src_pmap, addr);
6888 		if ((*pml4e & PG_V) == 0) {
6889 			va_next = (addr + NBPML4) & ~PML4MASK;
6890 			if (va_next < addr)
6891 				va_next = end_addr;
6892 			continue;
6893 		}
6894 
6895 		pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
6896 		if ((*pdpe & PG_V) == 0) {
6897 			va_next = (addr + NBPDP) & ~PDPMASK;
6898 			if (va_next < addr)
6899 				va_next = end_addr;
6900 			continue;
6901 		}
6902 
6903 		va_next = (addr + NBPDR) & ~PDRMASK;
6904 		if (va_next < addr)
6905 			va_next = end_addr;
6906 
6907 		pde = pmap_pdpe_to_pde(pdpe, addr);
6908 		srcptepaddr = *pde;
6909 		if (srcptepaddr == 0)
6910 			continue;
6911 
6912 		if (srcptepaddr & PG_PS) {
6913 			if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
6914 				continue;
6915 			pde = pmap_alloc_pde(dst_pmap, addr, &dst_pdpg, NULL);
6916 			if (pde == NULL)
6917 				break;
6918 			if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
6919 			    pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
6920 			    PMAP_ENTER_NORECLAIM, &lock))) {
6921 				*pde = srcptepaddr & ~PG_W;
6922 				pmap_resident_count_inc(dst_pmap, NBPDR /
6923 				    PAGE_SIZE);
6924 				atomic_add_long(&pmap_pde_mappings, 1);
6925 			} else
6926 				pmap_abort_ptp(dst_pmap, addr, dst_pdpg);
6927 			continue;
6928 		}
6929 
6930 		srcptepaddr &= PG_FRAME;
6931 		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
6932 		KASSERT(srcmpte->ref_count > 0,
6933 		    ("pmap_copy: source page table page is unused"));
6934 
6935 		if (va_next > end_addr)
6936 			va_next = end_addr;
6937 
6938 		src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
6939 		src_pte = &src_pte[pmap_pte_index(addr)];
6940 		dstmpte = NULL;
6941 		for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
6942 			ptetemp = *src_pte;
6943 
6944 			/*
6945 			 * We only virtual copy managed pages.
6946 			 */
6947 			if ((ptetemp & PG_MANAGED) == 0)
6948 				continue;
6949 
6950 			if (dstmpte != NULL) {
6951 				KASSERT(dstmpte->pindex ==
6952 				    pmap_pde_pindex(addr),
6953 				    ("dstmpte pindex/addr mismatch"));
6954 				dstmpte->ref_count++;
6955 			} else if ((dstmpte = pmap_allocpte(dst_pmap, addr,
6956 			    NULL)) == NULL)
6957 				goto out;
6958 			dst_pte = (pt_entry_t *)
6959 			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
6960 			dst_pte = &dst_pte[pmap_pte_index(addr)];
6961 			if (*dst_pte == 0 &&
6962 			    pmap_try_insert_pv_entry(dst_pmap, addr,
6963 			    PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), &lock)) {
6964 				/*
6965 				 * Clear the wired, modified, and accessed
6966 				 * (referenced) bits during the copy.
6967 				 */
6968 				*dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
6969 				pmap_resident_count_inc(dst_pmap, 1);
6970 			} else {
6971 				pmap_abort_ptp(dst_pmap, addr, dstmpte);
6972 				goto out;
6973 			}
6974 			/* Have we copied all of the valid mappings? */
6975 			if (dstmpte->ref_count >= srcmpte->ref_count)
6976 				break;
6977 		}
6978 	}
6979 out:
6980 	if (lock != NULL)
6981 		rw_wunlock(lock);
6982 	PMAP_UNLOCK(src_pmap);
6983 	PMAP_UNLOCK(dst_pmap);
6984 }
6985 
6986 int
6987 pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap)
6988 {
6989 	int error;
6990 
6991 	if (dst_pmap->pm_type != src_pmap->pm_type ||
6992 	    dst_pmap->pm_type != PT_X86 ||
6993 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
6994 		return (0);
6995 	for (;;) {
6996 		if (dst_pmap < src_pmap) {
6997 			PMAP_LOCK(dst_pmap);
6998 			PMAP_LOCK(src_pmap);
6999 		} else {
7000 			PMAP_LOCK(src_pmap);
7001 			PMAP_LOCK(dst_pmap);
7002 		}
7003 		error = pmap_pkru_copy(dst_pmap, src_pmap);
7004 		/* Clean up partial copy on failure due to no memory. */
7005 		if (error == ENOMEM)
7006 			pmap_pkru_deassign_all(dst_pmap);
7007 		PMAP_UNLOCK(src_pmap);
7008 		PMAP_UNLOCK(dst_pmap);
7009 		if (error != ENOMEM)
7010 			break;
7011 		vm_wait(NULL);
7012 	}
7013 	return (error);
7014 }
7015 
7016 /*
7017  * Zero the specified hardware page.
7018  */
7019 void
7020 pmap_zero_page(vm_page_t m)
7021 {
7022 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
7023 
7024 	pagezero((void *)va);
7025 }
7026 
7027 /*
7028  * Zero an an area within a single hardware page.  off and size must not
7029  * cover an area beyond a single hardware page.
7030  */
7031 void
7032 pmap_zero_page_area(vm_page_t m, int off, int size)
7033 {
7034 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
7035 
7036 	if (off == 0 && size == PAGE_SIZE)
7037 		pagezero((void *)va);
7038 	else
7039 		bzero((char *)va + off, size);
7040 }
7041 
7042 /*
7043  * Copy 1 specified hardware page to another.
7044  */
7045 void
7046 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
7047 {
7048 	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
7049 	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
7050 
7051 	pagecopy((void *)src, (void *)dst);
7052 }
7053 
7054 int unmapped_buf_allowed = 1;
7055 
7056 void
7057 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
7058     vm_offset_t b_offset, int xfersize)
7059 {
7060 	void *a_cp, *b_cp;
7061 	vm_page_t pages[2];
7062 	vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
7063 	int cnt;
7064 	boolean_t mapped;
7065 
7066 	while (xfersize > 0) {
7067 		a_pg_offset = a_offset & PAGE_MASK;
7068 		pages[0] = ma[a_offset >> PAGE_SHIFT];
7069 		b_pg_offset = b_offset & PAGE_MASK;
7070 		pages[1] = mb[b_offset >> PAGE_SHIFT];
7071 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
7072 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
7073 		mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE);
7074 		a_cp = (char *)vaddr[0] + a_pg_offset;
7075 		b_cp = (char *)vaddr[1] + b_pg_offset;
7076 		bcopy(a_cp, b_cp, cnt);
7077 		if (__predict_false(mapped))
7078 			pmap_unmap_io_transient(pages, vaddr, 2, FALSE);
7079 		a_offset += cnt;
7080 		b_offset += cnt;
7081 		xfersize -= cnt;
7082 	}
7083 }
7084 
7085 /*
7086  * Returns true if the pmap's pv is one of the first
7087  * 16 pvs linked to from this page.  This count may
7088  * be changed upwards or downwards in the future; it
7089  * is only necessary that true be returned for a small
7090  * subset of pmaps for proper page aging.
7091  */
7092 boolean_t
7093 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
7094 {
7095 	struct md_page *pvh;
7096 	struct rwlock *lock;
7097 	pv_entry_t pv;
7098 	int loops = 0;
7099 	boolean_t rv;
7100 
7101 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7102 	    ("pmap_page_exists_quick: page %p is not managed", m));
7103 	rv = FALSE;
7104 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7105 	rw_rlock(lock);
7106 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7107 		if (PV_PMAP(pv) == pmap) {
7108 			rv = TRUE;
7109 			break;
7110 		}
7111 		loops++;
7112 		if (loops >= 16)
7113 			break;
7114 	}
7115 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
7116 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
7117 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
7118 			if (PV_PMAP(pv) == pmap) {
7119 				rv = TRUE;
7120 				break;
7121 			}
7122 			loops++;
7123 			if (loops >= 16)
7124 				break;
7125 		}
7126 	}
7127 	rw_runlock(lock);
7128 	return (rv);
7129 }
7130 
7131 /*
7132  *	pmap_page_wired_mappings:
7133  *
7134  *	Return the number of managed mappings to the given physical page
7135  *	that are wired.
7136  */
7137 int
7138 pmap_page_wired_mappings(vm_page_t m)
7139 {
7140 	struct rwlock *lock;
7141 	struct md_page *pvh;
7142 	pmap_t pmap;
7143 	pt_entry_t *pte;
7144 	pv_entry_t pv;
7145 	int count, md_gen, pvh_gen;
7146 
7147 	if ((m->oflags & VPO_UNMANAGED) != 0)
7148 		return (0);
7149 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7150 	rw_rlock(lock);
7151 restart:
7152 	count = 0;
7153 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7154 		pmap = PV_PMAP(pv);
7155 		if (!PMAP_TRYLOCK(pmap)) {
7156 			md_gen = m->md.pv_gen;
7157 			rw_runlock(lock);
7158 			PMAP_LOCK(pmap);
7159 			rw_rlock(lock);
7160 			if (md_gen != m->md.pv_gen) {
7161 				PMAP_UNLOCK(pmap);
7162 				goto restart;
7163 			}
7164 		}
7165 		pte = pmap_pte(pmap, pv->pv_va);
7166 		if ((*pte & PG_W) != 0)
7167 			count++;
7168 		PMAP_UNLOCK(pmap);
7169 	}
7170 	if ((m->flags & PG_FICTITIOUS) == 0) {
7171 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
7172 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
7173 			pmap = PV_PMAP(pv);
7174 			if (!PMAP_TRYLOCK(pmap)) {
7175 				md_gen = m->md.pv_gen;
7176 				pvh_gen = pvh->pv_gen;
7177 				rw_runlock(lock);
7178 				PMAP_LOCK(pmap);
7179 				rw_rlock(lock);
7180 				if (md_gen != m->md.pv_gen ||
7181 				    pvh_gen != pvh->pv_gen) {
7182 					PMAP_UNLOCK(pmap);
7183 					goto restart;
7184 				}
7185 			}
7186 			pte = pmap_pde(pmap, pv->pv_va);
7187 			if ((*pte & PG_W) != 0)
7188 				count++;
7189 			PMAP_UNLOCK(pmap);
7190 		}
7191 	}
7192 	rw_runlock(lock);
7193 	return (count);
7194 }
7195 
7196 /*
7197  * Returns TRUE if the given page is mapped individually or as part of
7198  * a 2mpage.  Otherwise, returns FALSE.
7199  */
7200 boolean_t
7201 pmap_page_is_mapped(vm_page_t m)
7202 {
7203 	struct rwlock *lock;
7204 	boolean_t rv;
7205 
7206 	if ((m->oflags & VPO_UNMANAGED) != 0)
7207 		return (FALSE);
7208 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7209 	rw_rlock(lock);
7210 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
7211 	    ((m->flags & PG_FICTITIOUS) == 0 &&
7212 	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
7213 	rw_runlock(lock);
7214 	return (rv);
7215 }
7216 
7217 /*
7218  * Destroy all managed, non-wired mappings in the given user-space
7219  * pmap.  This pmap cannot be active on any processor besides the
7220  * caller.
7221  *
7222  * This function cannot be applied to the kernel pmap.  Moreover, it
7223  * is not intended for general use.  It is only to be used during
7224  * process termination.  Consequently, it can be implemented in ways
7225  * that make it faster than pmap_remove().  First, it can more quickly
7226  * destroy mappings by iterating over the pmap's collection of PV
7227  * entries, rather than searching the page table.  Second, it doesn't
7228  * have to test and clear the page table entries atomically, because
7229  * no processor is currently accessing the user address space.  In
7230  * particular, a page table entry's dirty bit won't change state once
7231  * this function starts.
7232  *
7233  * Although this function destroys all of the pmap's managed,
7234  * non-wired mappings, it can delay and batch the invalidation of TLB
7235  * entries without calling pmap_delayed_invl_start() and
7236  * pmap_delayed_invl_finish().  Because the pmap is not active on
7237  * any other processor, none of these TLB entries will ever be used
7238  * before their eventual invalidation.  Consequently, there is no need
7239  * for either pmap_remove_all() or pmap_remove_write() to wait for
7240  * that eventual TLB invalidation.
7241  */
7242 void
7243 pmap_remove_pages(pmap_t pmap)
7244 {
7245 	pd_entry_t ptepde;
7246 	pt_entry_t *pte, tpte;
7247 	pt_entry_t PG_M, PG_RW, PG_V;
7248 	struct spglist free;
7249 	struct pv_chunklist free_chunks[PMAP_MEMDOM];
7250 	vm_page_t m, mpte, mt;
7251 	pv_entry_t pv;
7252 	struct md_page *pvh;
7253 	struct pv_chunk *pc, *npc;
7254 	struct rwlock *lock;
7255 	int64_t bit;
7256 	uint64_t inuse, bitmask;
7257 	int allfree, field, freed, i, idx;
7258 	boolean_t superpage;
7259 	vm_paddr_t pa;
7260 
7261 	/*
7262 	 * Assert that the given pmap is only active on the current
7263 	 * CPU.  Unfortunately, we cannot block another CPU from
7264 	 * activating the pmap while this function is executing.
7265 	 */
7266 	KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
7267 #ifdef INVARIANTS
7268 	{
7269 		cpuset_t other_cpus;
7270 
7271 		other_cpus = all_cpus;
7272 		critical_enter();
7273 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
7274 		CPU_AND(&other_cpus, &pmap->pm_active);
7275 		critical_exit();
7276 		KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
7277 	}
7278 #endif
7279 
7280 	lock = NULL;
7281 	PG_M = pmap_modified_bit(pmap);
7282 	PG_V = pmap_valid_bit(pmap);
7283 	PG_RW = pmap_rw_bit(pmap);
7284 
7285 	for (i = 0; i < PMAP_MEMDOM; i++)
7286 		TAILQ_INIT(&free_chunks[i]);
7287 	SLIST_INIT(&free);
7288 	PMAP_LOCK(pmap);
7289 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
7290 		allfree = 1;
7291 		freed = 0;
7292 		for (field = 0; field < _NPCM; field++) {
7293 			inuse = ~pc->pc_map[field] & pc_freemask[field];
7294 			while (inuse != 0) {
7295 				bit = bsfq(inuse);
7296 				bitmask = 1UL << bit;
7297 				idx = field * 64 + bit;
7298 				pv = &pc->pc_pventry[idx];
7299 				inuse &= ~bitmask;
7300 
7301 				pte = pmap_pdpe(pmap, pv->pv_va);
7302 				ptepde = *pte;
7303 				pte = pmap_pdpe_to_pde(pte, pv->pv_va);
7304 				tpte = *pte;
7305 				if ((tpte & (PG_PS | PG_V)) == PG_V) {
7306 					superpage = FALSE;
7307 					ptepde = tpte;
7308 					pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
7309 					    PG_FRAME);
7310 					pte = &pte[pmap_pte_index(pv->pv_va)];
7311 					tpte = *pte;
7312 				} else {
7313 					/*
7314 					 * Keep track whether 'tpte' is a
7315 					 * superpage explicitly instead of
7316 					 * relying on PG_PS being set.
7317 					 *
7318 					 * This is because PG_PS is numerically
7319 					 * identical to PG_PTE_PAT and thus a
7320 					 * regular page could be mistaken for
7321 					 * a superpage.
7322 					 */
7323 					superpage = TRUE;
7324 				}
7325 
7326 				if ((tpte & PG_V) == 0) {
7327 					panic("bad pte va %lx pte %lx",
7328 					    pv->pv_va, tpte);
7329 				}
7330 
7331 /*
7332  * We cannot remove wired pages from a process' mapping at this time
7333  */
7334 				if (tpte & PG_W) {
7335 					allfree = 0;
7336 					continue;
7337 				}
7338 
7339 				if (superpage)
7340 					pa = tpte & PG_PS_FRAME;
7341 				else
7342 					pa = tpte & PG_FRAME;
7343 
7344 				m = PHYS_TO_VM_PAGE(pa);
7345 				KASSERT(m->phys_addr == pa,
7346 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
7347 				    m, (uintmax_t)m->phys_addr,
7348 				    (uintmax_t)tpte));
7349 
7350 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
7351 				    m < &vm_page_array[vm_page_array_size],
7352 				    ("pmap_remove_pages: bad tpte %#jx",
7353 				    (uintmax_t)tpte));
7354 
7355 				pte_clear(pte);
7356 
7357 				/*
7358 				 * Update the vm_page_t clean/reference bits.
7359 				 */
7360 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
7361 					if (superpage) {
7362 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
7363 							vm_page_dirty(mt);
7364 					} else
7365 						vm_page_dirty(m);
7366 				}
7367 
7368 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
7369 
7370 				/* Mark free */
7371 				pc->pc_map[field] |= bitmask;
7372 				if (superpage) {
7373 					pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
7374 					pvh = pa_to_pvh(tpte & PG_PS_FRAME);
7375 					TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
7376 					pvh->pv_gen++;
7377 					if (TAILQ_EMPTY(&pvh->pv_list)) {
7378 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
7379 							if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
7380 							    TAILQ_EMPTY(&mt->md.pv_list))
7381 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
7382 					}
7383 					mpte = pmap_remove_pt_page(pmap, pv->pv_va);
7384 					if (mpte != NULL) {
7385 						KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
7386 						    ("pmap_remove_pages: pte page not promoted"));
7387 						pmap_resident_count_dec(pmap, 1);
7388 						KASSERT(mpte->ref_count == NPTEPG,
7389 						    ("pmap_remove_pages: pte page reference count error"));
7390 						mpte->ref_count = 0;
7391 						pmap_add_delayed_free_list(mpte, &free, FALSE);
7392 					}
7393 				} else {
7394 					pmap_resident_count_dec(pmap, 1);
7395 					TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
7396 					m->md.pv_gen++;
7397 					if ((m->a.flags & PGA_WRITEABLE) != 0 &&
7398 					    TAILQ_EMPTY(&m->md.pv_list) &&
7399 					    (m->flags & PG_FICTITIOUS) == 0) {
7400 						pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
7401 						if (TAILQ_EMPTY(&pvh->pv_list))
7402 							vm_page_aflag_clear(m, PGA_WRITEABLE);
7403 					}
7404 				}
7405 				pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
7406 				freed++;
7407 			}
7408 		}
7409 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
7410 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
7411 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
7412 		if (allfree) {
7413 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
7414 			TAILQ_INSERT_TAIL(&free_chunks[pc_to_domain(pc)], pc, pc_list);
7415 		}
7416 	}
7417 	if (lock != NULL)
7418 		rw_wunlock(lock);
7419 	pmap_invalidate_all(pmap);
7420 	pmap_pkru_deassign_all(pmap);
7421 	free_pv_chunk_batch((struct pv_chunklist *)&free_chunks);
7422 	PMAP_UNLOCK(pmap);
7423 	vm_page_free_pages_toq(&free, true);
7424 }
7425 
7426 static boolean_t
7427 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
7428 {
7429 	struct rwlock *lock;
7430 	pv_entry_t pv;
7431 	struct md_page *pvh;
7432 	pt_entry_t *pte, mask;
7433 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
7434 	pmap_t pmap;
7435 	int md_gen, pvh_gen;
7436 	boolean_t rv;
7437 
7438 	rv = FALSE;
7439 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7440 	rw_rlock(lock);
7441 restart:
7442 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7443 		pmap = PV_PMAP(pv);
7444 		if (!PMAP_TRYLOCK(pmap)) {
7445 			md_gen = m->md.pv_gen;
7446 			rw_runlock(lock);
7447 			PMAP_LOCK(pmap);
7448 			rw_rlock(lock);
7449 			if (md_gen != m->md.pv_gen) {
7450 				PMAP_UNLOCK(pmap);
7451 				goto restart;
7452 			}
7453 		}
7454 		pte = pmap_pte(pmap, pv->pv_va);
7455 		mask = 0;
7456 		if (modified) {
7457 			PG_M = pmap_modified_bit(pmap);
7458 			PG_RW = pmap_rw_bit(pmap);
7459 			mask |= PG_RW | PG_M;
7460 		}
7461 		if (accessed) {
7462 			PG_A = pmap_accessed_bit(pmap);
7463 			PG_V = pmap_valid_bit(pmap);
7464 			mask |= PG_V | PG_A;
7465 		}
7466 		rv = (*pte & mask) == mask;
7467 		PMAP_UNLOCK(pmap);
7468 		if (rv)
7469 			goto out;
7470 	}
7471 	if ((m->flags & PG_FICTITIOUS) == 0) {
7472 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
7473 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
7474 			pmap = PV_PMAP(pv);
7475 			if (!PMAP_TRYLOCK(pmap)) {
7476 				md_gen = m->md.pv_gen;
7477 				pvh_gen = pvh->pv_gen;
7478 				rw_runlock(lock);
7479 				PMAP_LOCK(pmap);
7480 				rw_rlock(lock);
7481 				if (md_gen != m->md.pv_gen ||
7482 				    pvh_gen != pvh->pv_gen) {
7483 					PMAP_UNLOCK(pmap);
7484 					goto restart;
7485 				}
7486 			}
7487 			pte = pmap_pde(pmap, pv->pv_va);
7488 			mask = 0;
7489 			if (modified) {
7490 				PG_M = pmap_modified_bit(pmap);
7491 				PG_RW = pmap_rw_bit(pmap);
7492 				mask |= PG_RW | PG_M;
7493 			}
7494 			if (accessed) {
7495 				PG_A = pmap_accessed_bit(pmap);
7496 				PG_V = pmap_valid_bit(pmap);
7497 				mask |= PG_V | PG_A;
7498 			}
7499 			rv = (*pte & mask) == mask;
7500 			PMAP_UNLOCK(pmap);
7501 			if (rv)
7502 				goto out;
7503 		}
7504 	}
7505 out:
7506 	rw_runlock(lock);
7507 	return (rv);
7508 }
7509 
7510 /*
7511  *	pmap_is_modified:
7512  *
7513  *	Return whether or not the specified physical page was modified
7514  *	in any physical maps.
7515  */
7516 boolean_t
7517 pmap_is_modified(vm_page_t m)
7518 {
7519 
7520 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7521 	    ("pmap_is_modified: page %p is not managed", m));
7522 
7523 	/*
7524 	 * If the page is not busied then this check is racy.
7525 	 */
7526 	if (!pmap_page_is_write_mapped(m))
7527 		return (FALSE);
7528 	return (pmap_page_test_mappings(m, FALSE, TRUE));
7529 }
7530 
7531 /*
7532  *	pmap_is_prefaultable:
7533  *
7534  *	Return whether or not the specified virtual address is eligible
7535  *	for prefault.
7536  */
7537 boolean_t
7538 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
7539 {
7540 	pd_entry_t *pde;
7541 	pt_entry_t *pte, PG_V;
7542 	boolean_t rv;
7543 
7544 	PG_V = pmap_valid_bit(pmap);
7545 	rv = FALSE;
7546 	PMAP_LOCK(pmap);
7547 	pde = pmap_pde(pmap, addr);
7548 	if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
7549 		pte = pmap_pde_to_pte(pde, addr);
7550 		rv = (*pte & PG_V) == 0;
7551 	}
7552 	PMAP_UNLOCK(pmap);
7553 	return (rv);
7554 }
7555 
7556 /*
7557  *	pmap_is_referenced:
7558  *
7559  *	Return whether or not the specified physical page was referenced
7560  *	in any physical maps.
7561  */
7562 boolean_t
7563 pmap_is_referenced(vm_page_t m)
7564 {
7565 
7566 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7567 	    ("pmap_is_referenced: page %p is not managed", m));
7568 	return (pmap_page_test_mappings(m, TRUE, FALSE));
7569 }
7570 
7571 /*
7572  * Clear the write and modified bits in each of the given page's mappings.
7573  */
7574 void
7575 pmap_remove_write(vm_page_t m)
7576 {
7577 	struct md_page *pvh;
7578 	pmap_t pmap;
7579 	struct rwlock *lock;
7580 	pv_entry_t next_pv, pv;
7581 	pd_entry_t *pde;
7582 	pt_entry_t oldpte, *pte, PG_M, PG_RW;
7583 	vm_offset_t va;
7584 	int pvh_gen, md_gen;
7585 
7586 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7587 	    ("pmap_remove_write: page %p is not managed", m));
7588 
7589 	vm_page_assert_busied(m);
7590 	if (!pmap_page_is_write_mapped(m))
7591 		return;
7592 
7593 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7594 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
7595 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
7596 retry_pv_loop:
7597 	rw_wlock(lock);
7598 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
7599 		pmap = PV_PMAP(pv);
7600 		if (!PMAP_TRYLOCK(pmap)) {
7601 			pvh_gen = pvh->pv_gen;
7602 			rw_wunlock(lock);
7603 			PMAP_LOCK(pmap);
7604 			rw_wlock(lock);
7605 			if (pvh_gen != pvh->pv_gen) {
7606 				PMAP_UNLOCK(pmap);
7607 				rw_wunlock(lock);
7608 				goto retry_pv_loop;
7609 			}
7610 		}
7611 		PG_RW = pmap_rw_bit(pmap);
7612 		va = pv->pv_va;
7613 		pde = pmap_pde(pmap, va);
7614 		if ((*pde & PG_RW) != 0)
7615 			(void)pmap_demote_pde_locked(pmap, pde, va, &lock);
7616 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
7617 		    ("inconsistent pv lock %p %p for page %p",
7618 		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
7619 		PMAP_UNLOCK(pmap);
7620 	}
7621 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7622 		pmap = PV_PMAP(pv);
7623 		if (!PMAP_TRYLOCK(pmap)) {
7624 			pvh_gen = pvh->pv_gen;
7625 			md_gen = m->md.pv_gen;
7626 			rw_wunlock(lock);
7627 			PMAP_LOCK(pmap);
7628 			rw_wlock(lock);
7629 			if (pvh_gen != pvh->pv_gen ||
7630 			    md_gen != m->md.pv_gen) {
7631 				PMAP_UNLOCK(pmap);
7632 				rw_wunlock(lock);
7633 				goto retry_pv_loop;
7634 			}
7635 		}
7636 		PG_M = pmap_modified_bit(pmap);
7637 		PG_RW = pmap_rw_bit(pmap);
7638 		pde = pmap_pde(pmap, pv->pv_va);
7639 		KASSERT((*pde & PG_PS) == 0,
7640 		    ("pmap_remove_write: found a 2mpage in page %p's pv list",
7641 		    m));
7642 		pte = pmap_pde_to_pte(pde, pv->pv_va);
7643 retry:
7644 		oldpte = *pte;
7645 		if (oldpte & PG_RW) {
7646 			if (!atomic_cmpset_long(pte, oldpte, oldpte &
7647 			    ~(PG_RW | PG_M)))
7648 				goto retry;
7649 			if ((oldpte & PG_M) != 0)
7650 				vm_page_dirty(m);
7651 			pmap_invalidate_page(pmap, pv->pv_va);
7652 		}
7653 		PMAP_UNLOCK(pmap);
7654 	}
7655 	rw_wunlock(lock);
7656 	vm_page_aflag_clear(m, PGA_WRITEABLE);
7657 	pmap_delayed_invl_wait(m);
7658 }
7659 
7660 static __inline boolean_t
7661 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
7662 {
7663 
7664 	if (!pmap_emulate_ad_bits(pmap))
7665 		return (TRUE);
7666 
7667 	KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
7668 
7669 	/*
7670 	 * XWR = 010 or 110 will cause an unconditional EPT misconfiguration
7671 	 * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared
7672 	 * if the EPT_PG_WRITE bit is set.
7673 	 */
7674 	if ((pte & EPT_PG_WRITE) != 0)
7675 		return (FALSE);
7676 
7677 	/*
7678 	 * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
7679 	 */
7680 	if ((pte & EPT_PG_EXECUTE) == 0 ||
7681 	    ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
7682 		return (TRUE);
7683 	else
7684 		return (FALSE);
7685 }
7686 
7687 /*
7688  *	pmap_ts_referenced:
7689  *
7690  *	Return a count of reference bits for a page, clearing those bits.
7691  *	It is not necessary for every reference bit to be cleared, but it
7692  *	is necessary that 0 only be returned when there are truly no
7693  *	reference bits set.
7694  *
7695  *	As an optimization, update the page's dirty field if a modified bit is
7696  *	found while counting reference bits.  This opportunistic update can be
7697  *	performed at low cost and can eliminate the need for some future calls
7698  *	to pmap_is_modified().  However, since this function stops after
7699  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
7700  *	dirty pages.  Those dirty pages will only be detected by a future call
7701  *	to pmap_is_modified().
7702  *
7703  *	A DI block is not needed within this function, because
7704  *	invalidations are performed before the PV list lock is
7705  *	released.
7706  */
7707 int
7708 pmap_ts_referenced(vm_page_t m)
7709 {
7710 	struct md_page *pvh;
7711 	pv_entry_t pv, pvf;
7712 	pmap_t pmap;
7713 	struct rwlock *lock;
7714 	pd_entry_t oldpde, *pde;
7715 	pt_entry_t *pte, PG_A, PG_M, PG_RW;
7716 	vm_offset_t va;
7717 	vm_paddr_t pa;
7718 	int cleared, md_gen, not_cleared, pvh_gen;
7719 	struct spglist free;
7720 	boolean_t demoted;
7721 
7722 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7723 	    ("pmap_ts_referenced: page %p is not managed", m));
7724 	SLIST_INIT(&free);
7725 	cleared = 0;
7726 	pa = VM_PAGE_TO_PHYS(m);
7727 	lock = PHYS_TO_PV_LIST_LOCK(pa);
7728 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
7729 	rw_wlock(lock);
7730 retry:
7731 	not_cleared = 0;
7732 	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
7733 		goto small_mappings;
7734 	pv = pvf;
7735 	do {
7736 		if (pvf == NULL)
7737 			pvf = pv;
7738 		pmap = PV_PMAP(pv);
7739 		if (!PMAP_TRYLOCK(pmap)) {
7740 			pvh_gen = pvh->pv_gen;
7741 			rw_wunlock(lock);
7742 			PMAP_LOCK(pmap);
7743 			rw_wlock(lock);
7744 			if (pvh_gen != pvh->pv_gen) {
7745 				PMAP_UNLOCK(pmap);
7746 				goto retry;
7747 			}
7748 		}
7749 		PG_A = pmap_accessed_bit(pmap);
7750 		PG_M = pmap_modified_bit(pmap);
7751 		PG_RW = pmap_rw_bit(pmap);
7752 		va = pv->pv_va;
7753 		pde = pmap_pde(pmap, pv->pv_va);
7754 		oldpde = *pde;
7755 		if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
7756 			/*
7757 			 * Although "oldpde" is mapping a 2MB page, because
7758 			 * this function is called at a 4KB page granularity,
7759 			 * we only update the 4KB page under test.
7760 			 */
7761 			vm_page_dirty(m);
7762 		}
7763 		if ((oldpde & PG_A) != 0) {
7764 			/*
7765 			 * Since this reference bit is shared by 512 4KB
7766 			 * pages, it should not be cleared every time it is
7767 			 * tested.  Apply a simple "hash" function on the
7768 			 * physical page number, the virtual superpage number,
7769 			 * and the pmap address to select one 4KB page out of
7770 			 * the 512 on which testing the reference bit will
7771 			 * result in clearing that reference bit.  This
7772 			 * function is designed to avoid the selection of the
7773 			 * same 4KB page for every 2MB page mapping.
7774 			 *
7775 			 * On demotion, a mapping that hasn't been referenced
7776 			 * is simply destroyed.  To avoid the possibility of a
7777 			 * subsequent page fault on a demoted wired mapping,
7778 			 * always leave its reference bit set.  Moreover,
7779 			 * since the superpage is wired, the current state of
7780 			 * its reference bit won't affect page replacement.
7781 			 */
7782 			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
7783 			    (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
7784 			    (oldpde & PG_W) == 0) {
7785 				if (safe_to_clear_referenced(pmap, oldpde)) {
7786 					atomic_clear_long(pde, PG_A);
7787 					pmap_invalidate_page(pmap, pv->pv_va);
7788 					demoted = FALSE;
7789 				} else if (pmap_demote_pde_locked(pmap, pde,
7790 				    pv->pv_va, &lock)) {
7791 					/*
7792 					 * Remove the mapping to a single page
7793 					 * so that a subsequent access may
7794 					 * repromote.  Since the underlying
7795 					 * page table page is fully populated,
7796 					 * this removal never frees a page
7797 					 * table page.
7798 					 */
7799 					demoted = TRUE;
7800 					va += VM_PAGE_TO_PHYS(m) - (oldpde &
7801 					    PG_PS_FRAME);
7802 					pte = pmap_pde_to_pte(pde, va);
7803 					pmap_remove_pte(pmap, pte, va, *pde,
7804 					    NULL, &lock);
7805 					pmap_invalidate_page(pmap, va);
7806 				} else
7807 					demoted = TRUE;
7808 
7809 				if (demoted) {
7810 					/*
7811 					 * The superpage mapping was removed
7812 					 * entirely and therefore 'pv' is no
7813 					 * longer valid.
7814 					 */
7815 					if (pvf == pv)
7816 						pvf = NULL;
7817 					pv = NULL;
7818 				}
7819 				cleared++;
7820 				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
7821 				    ("inconsistent pv lock %p %p for page %p",
7822 				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
7823 			} else
7824 				not_cleared++;
7825 		}
7826 		PMAP_UNLOCK(pmap);
7827 		/* Rotate the PV list if it has more than one entry. */
7828 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
7829 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
7830 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
7831 			pvh->pv_gen++;
7832 		}
7833 		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
7834 			goto out;
7835 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
7836 small_mappings:
7837 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
7838 		goto out;
7839 	pv = pvf;
7840 	do {
7841 		if (pvf == NULL)
7842 			pvf = pv;
7843 		pmap = PV_PMAP(pv);
7844 		if (!PMAP_TRYLOCK(pmap)) {
7845 			pvh_gen = pvh->pv_gen;
7846 			md_gen = m->md.pv_gen;
7847 			rw_wunlock(lock);
7848 			PMAP_LOCK(pmap);
7849 			rw_wlock(lock);
7850 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
7851 				PMAP_UNLOCK(pmap);
7852 				goto retry;
7853 			}
7854 		}
7855 		PG_A = pmap_accessed_bit(pmap);
7856 		PG_M = pmap_modified_bit(pmap);
7857 		PG_RW = pmap_rw_bit(pmap);
7858 		pde = pmap_pde(pmap, pv->pv_va);
7859 		KASSERT((*pde & PG_PS) == 0,
7860 		    ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
7861 		    m));
7862 		pte = pmap_pde_to_pte(pde, pv->pv_va);
7863 		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
7864 			vm_page_dirty(m);
7865 		if ((*pte & PG_A) != 0) {
7866 			if (safe_to_clear_referenced(pmap, *pte)) {
7867 				atomic_clear_long(pte, PG_A);
7868 				pmap_invalidate_page(pmap, pv->pv_va);
7869 				cleared++;
7870 			} else if ((*pte & PG_W) == 0) {
7871 				/*
7872 				 * Wired pages cannot be paged out so
7873 				 * doing accessed bit emulation for
7874 				 * them is wasted effort. We do the
7875 				 * hard work for unwired pages only.
7876 				 */
7877 				pmap_remove_pte(pmap, pte, pv->pv_va,
7878 				    *pde, &free, &lock);
7879 				pmap_invalidate_page(pmap, pv->pv_va);
7880 				cleared++;
7881 				if (pvf == pv)
7882 					pvf = NULL;
7883 				pv = NULL;
7884 				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
7885 				    ("inconsistent pv lock %p %p for page %p",
7886 				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
7887 			} else
7888 				not_cleared++;
7889 		}
7890 		PMAP_UNLOCK(pmap);
7891 		/* Rotate the PV list if it has more than one entry. */
7892 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
7893 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
7894 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
7895 			m->md.pv_gen++;
7896 		}
7897 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
7898 	    not_cleared < PMAP_TS_REFERENCED_MAX);
7899 out:
7900 	rw_wunlock(lock);
7901 	vm_page_free_pages_toq(&free, true);
7902 	return (cleared + not_cleared);
7903 }
7904 
7905 /*
7906  *	Apply the given advice to the specified range of addresses within the
7907  *	given pmap.  Depending on the advice, clear the referenced and/or
7908  *	modified flags in each mapping and set the mapped page's dirty field.
7909  */
7910 void
7911 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
7912 {
7913 	struct rwlock *lock;
7914 	pml4_entry_t *pml4e;
7915 	pdp_entry_t *pdpe;
7916 	pd_entry_t oldpde, *pde;
7917 	pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
7918 	vm_offset_t va, va_next;
7919 	vm_page_t m;
7920 	bool anychanged;
7921 
7922 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
7923 		return;
7924 
7925 	/*
7926 	 * A/D bit emulation requires an alternate code path when clearing
7927 	 * the modified and accessed bits below. Since this function is
7928 	 * advisory in nature we skip it entirely for pmaps that require
7929 	 * A/D bit emulation.
7930 	 */
7931 	if (pmap_emulate_ad_bits(pmap))
7932 		return;
7933 
7934 	PG_A = pmap_accessed_bit(pmap);
7935 	PG_G = pmap_global_bit(pmap);
7936 	PG_M = pmap_modified_bit(pmap);
7937 	PG_V = pmap_valid_bit(pmap);
7938 	PG_RW = pmap_rw_bit(pmap);
7939 	anychanged = false;
7940 	pmap_delayed_invl_start();
7941 	PMAP_LOCK(pmap);
7942 	for (; sva < eva; sva = va_next) {
7943 		pml4e = pmap_pml4e(pmap, sva);
7944 		if ((*pml4e & PG_V) == 0) {
7945 			va_next = (sva + NBPML4) & ~PML4MASK;
7946 			if (va_next < sva)
7947 				va_next = eva;
7948 			continue;
7949 		}
7950 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
7951 		if ((*pdpe & PG_V) == 0) {
7952 			va_next = (sva + NBPDP) & ~PDPMASK;
7953 			if (va_next < sva)
7954 				va_next = eva;
7955 			continue;
7956 		}
7957 		va_next = (sva + NBPDR) & ~PDRMASK;
7958 		if (va_next < sva)
7959 			va_next = eva;
7960 		pde = pmap_pdpe_to_pde(pdpe, sva);
7961 		oldpde = *pde;
7962 		if ((oldpde & PG_V) == 0)
7963 			continue;
7964 		else if ((oldpde & PG_PS) != 0) {
7965 			if ((oldpde & PG_MANAGED) == 0)
7966 				continue;
7967 			lock = NULL;
7968 			if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) {
7969 				if (lock != NULL)
7970 					rw_wunlock(lock);
7971 
7972 				/*
7973 				 * The large page mapping was destroyed.
7974 				 */
7975 				continue;
7976 			}
7977 
7978 			/*
7979 			 * Unless the page mappings are wired, remove the
7980 			 * mapping to a single page so that a subsequent
7981 			 * access may repromote.  Choosing the last page
7982 			 * within the address range [sva, min(va_next, eva))
7983 			 * generally results in more repromotions.  Since the
7984 			 * underlying page table page is fully populated, this
7985 			 * removal never frees a page table page.
7986 			 */
7987 			if ((oldpde & PG_W) == 0) {
7988 				va = eva;
7989 				if (va > va_next)
7990 					va = va_next;
7991 				va -= PAGE_SIZE;
7992 				KASSERT(va >= sva,
7993 				    ("pmap_advise: no address gap"));
7994 				pte = pmap_pde_to_pte(pde, va);
7995 				KASSERT((*pte & PG_V) != 0,
7996 				    ("pmap_advise: invalid PTE"));
7997 				pmap_remove_pte(pmap, pte, va, *pde, NULL,
7998 				    &lock);
7999 				anychanged = true;
8000 			}
8001 			if (lock != NULL)
8002 				rw_wunlock(lock);
8003 		}
8004 		if (va_next > eva)
8005 			va_next = eva;
8006 		va = va_next;
8007 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
8008 		    sva += PAGE_SIZE) {
8009 			if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
8010 				goto maybe_invlrng;
8011 			else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8012 				if (advice == MADV_DONTNEED) {
8013 					/*
8014 					 * Future calls to pmap_is_modified()
8015 					 * can be avoided by making the page
8016 					 * dirty now.
8017 					 */
8018 					m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
8019 					vm_page_dirty(m);
8020 				}
8021 				atomic_clear_long(pte, PG_M | PG_A);
8022 			} else if ((*pte & PG_A) != 0)
8023 				atomic_clear_long(pte, PG_A);
8024 			else
8025 				goto maybe_invlrng;
8026 
8027 			if ((*pte & PG_G) != 0) {
8028 				if (va == va_next)
8029 					va = sva;
8030 			} else
8031 				anychanged = true;
8032 			continue;
8033 maybe_invlrng:
8034 			if (va != va_next) {
8035 				pmap_invalidate_range(pmap, va, sva);
8036 				va = va_next;
8037 			}
8038 		}
8039 		if (va != va_next)
8040 			pmap_invalidate_range(pmap, va, sva);
8041 	}
8042 	if (anychanged)
8043 		pmap_invalidate_all(pmap);
8044 	PMAP_UNLOCK(pmap);
8045 	pmap_delayed_invl_finish();
8046 }
8047 
8048 /*
8049  *	Clear the modify bits on the specified physical page.
8050  */
8051 void
8052 pmap_clear_modify(vm_page_t m)
8053 {
8054 	struct md_page *pvh;
8055 	pmap_t pmap;
8056 	pv_entry_t next_pv, pv;
8057 	pd_entry_t oldpde, *pde;
8058 	pt_entry_t *pte, PG_M, PG_RW;
8059 	struct rwlock *lock;
8060 	vm_offset_t va;
8061 	int md_gen, pvh_gen;
8062 
8063 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8064 	    ("pmap_clear_modify: page %p is not managed", m));
8065 	vm_page_assert_busied(m);
8066 
8067 	if (!pmap_page_is_write_mapped(m))
8068 		return;
8069 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
8070 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
8071 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8072 	rw_wlock(lock);
8073 restart:
8074 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
8075 		pmap = PV_PMAP(pv);
8076 		if (!PMAP_TRYLOCK(pmap)) {
8077 			pvh_gen = pvh->pv_gen;
8078 			rw_wunlock(lock);
8079 			PMAP_LOCK(pmap);
8080 			rw_wlock(lock);
8081 			if (pvh_gen != pvh->pv_gen) {
8082 				PMAP_UNLOCK(pmap);
8083 				goto restart;
8084 			}
8085 		}
8086 		PG_M = pmap_modified_bit(pmap);
8087 		PG_RW = pmap_rw_bit(pmap);
8088 		va = pv->pv_va;
8089 		pde = pmap_pde(pmap, va);
8090 		oldpde = *pde;
8091 		/* If oldpde has PG_RW set, then it also has PG_M set. */
8092 		if ((oldpde & PG_RW) != 0 &&
8093 		    pmap_demote_pde_locked(pmap, pde, va, &lock) &&
8094 		    (oldpde & PG_W) == 0) {
8095 			/*
8096 			 * Write protect the mapping to a single page so that
8097 			 * a subsequent write access may repromote.
8098 			 */
8099 			va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME);
8100 			pte = pmap_pde_to_pte(pde, va);
8101 			atomic_clear_long(pte, PG_M | PG_RW);
8102 			vm_page_dirty(m);
8103 			pmap_invalidate_page(pmap, va);
8104 		}
8105 		PMAP_UNLOCK(pmap);
8106 	}
8107 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8108 		pmap = PV_PMAP(pv);
8109 		if (!PMAP_TRYLOCK(pmap)) {
8110 			md_gen = m->md.pv_gen;
8111 			pvh_gen = pvh->pv_gen;
8112 			rw_wunlock(lock);
8113 			PMAP_LOCK(pmap);
8114 			rw_wlock(lock);
8115 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
8116 				PMAP_UNLOCK(pmap);
8117 				goto restart;
8118 			}
8119 		}
8120 		PG_M = pmap_modified_bit(pmap);
8121 		PG_RW = pmap_rw_bit(pmap);
8122 		pde = pmap_pde(pmap, pv->pv_va);
8123 		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
8124 		    " a 2mpage in page %p's pv list", m));
8125 		pte = pmap_pde_to_pte(pde, pv->pv_va);
8126 		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8127 			atomic_clear_long(pte, PG_M);
8128 			pmap_invalidate_page(pmap, pv->pv_va);
8129 		}
8130 		PMAP_UNLOCK(pmap);
8131 	}
8132 	rw_wunlock(lock);
8133 }
8134 
8135 /*
8136  * Miscellaneous support routines follow
8137  */
8138 
8139 /* Adjust the properties for a leaf page table entry. */
8140 static __inline void
8141 pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask)
8142 {
8143 	u_long opte, npte;
8144 
8145 	opte = *(u_long *)pte;
8146 	do {
8147 		npte = opte & ~mask;
8148 		npte |= bits;
8149 	} while (npte != opte && !atomic_fcmpset_long((u_long *)pte, &opte,
8150 	    npte));
8151 }
8152 
8153 /*
8154  * Map a set of physical memory pages into the kernel virtual
8155  * address space. Return a pointer to where it is mapped. This
8156  * routine is intended to be used for mapping device memory,
8157  * NOT real memory.
8158  */
8159 static void *
8160 pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags)
8161 {
8162 	struct pmap_preinit_mapping *ppim;
8163 	vm_offset_t va, offset;
8164 	vm_size_t tmpsize;
8165 	int i;
8166 
8167 	offset = pa & PAGE_MASK;
8168 	size = round_page(offset + size);
8169 	pa = trunc_page(pa);
8170 
8171 	if (!pmap_initialized) {
8172 		va = 0;
8173 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
8174 			ppim = pmap_preinit_mapping + i;
8175 			if (ppim->va == 0) {
8176 				ppim->pa = pa;
8177 				ppim->sz = size;
8178 				ppim->mode = mode;
8179 				ppim->va = virtual_avail;
8180 				virtual_avail += size;
8181 				va = ppim->va;
8182 				break;
8183 			}
8184 		}
8185 		if (va == 0)
8186 			panic("%s: too many preinit mappings", __func__);
8187 	} else {
8188 		/*
8189 		 * If we have a preinit mapping, re-use it.
8190 		 */
8191 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
8192 			ppim = pmap_preinit_mapping + i;
8193 			if (ppim->pa == pa && ppim->sz == size &&
8194 			    (ppim->mode == mode ||
8195 			    (flags & MAPDEV_SETATTR) == 0))
8196 				return ((void *)(ppim->va + offset));
8197 		}
8198 		/*
8199 		 * If the specified range of physical addresses fits within
8200 		 * the direct map window, use the direct map.
8201 		 */
8202 		if (pa < dmaplimit && pa + size <= dmaplimit) {
8203 			va = PHYS_TO_DMAP(pa);
8204 			if ((flags & MAPDEV_SETATTR) != 0) {
8205 				PMAP_LOCK(kernel_pmap);
8206 				i = pmap_change_props_locked(va, size,
8207 				    PROT_NONE, mode, flags);
8208 				PMAP_UNLOCK(kernel_pmap);
8209 			} else
8210 				i = 0;
8211 			if (!i)
8212 				return ((void *)(va + offset));
8213 		}
8214 		va = kva_alloc(size);
8215 		if (va == 0)
8216 			panic("%s: Couldn't allocate KVA", __func__);
8217 	}
8218 	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
8219 		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
8220 	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
8221 	if ((flags & MAPDEV_FLUSHCACHE) != 0)
8222 		pmap_invalidate_cache_range(va, va + tmpsize);
8223 	return ((void *)(va + offset));
8224 }
8225 
8226 void *
8227 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
8228 {
8229 
8230 	return (pmap_mapdev_internal(pa, size, mode, MAPDEV_FLUSHCACHE |
8231 	    MAPDEV_SETATTR));
8232 }
8233 
8234 void *
8235 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
8236 {
8237 
8238 	return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
8239 }
8240 
8241 void *
8242 pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size)
8243 {
8244 
8245 	return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE,
8246 	    MAPDEV_SETATTR));
8247 }
8248 
8249 void *
8250 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
8251 {
8252 
8253 	return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK,
8254 	    MAPDEV_FLUSHCACHE));
8255 }
8256 
8257 void
8258 pmap_unmapdev(vm_offset_t va, vm_size_t size)
8259 {
8260 	struct pmap_preinit_mapping *ppim;
8261 	vm_offset_t offset;
8262 	int i;
8263 
8264 	/* If we gave a direct map region in pmap_mapdev, do nothing */
8265 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
8266 		return;
8267 	offset = va & PAGE_MASK;
8268 	size = round_page(offset + size);
8269 	va = trunc_page(va);
8270 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
8271 		ppim = pmap_preinit_mapping + i;
8272 		if (ppim->va == va && ppim->sz == size) {
8273 			if (pmap_initialized)
8274 				return;
8275 			ppim->pa = 0;
8276 			ppim->va = 0;
8277 			ppim->sz = 0;
8278 			ppim->mode = 0;
8279 			if (va + size == virtual_avail)
8280 				virtual_avail = va;
8281 			return;
8282 		}
8283 	}
8284 	if (pmap_initialized) {
8285 		pmap_qremove(va, atop(size));
8286 		kva_free(va, size);
8287 	}
8288 }
8289 
8290 /*
8291  * Tries to demote a 1GB page mapping.
8292  */
8293 static boolean_t
8294 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
8295 {
8296 	pdp_entry_t newpdpe, oldpdpe;
8297 	pd_entry_t *firstpde, newpde, *pde;
8298 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
8299 	vm_paddr_t pdpgpa;
8300 	vm_page_t pdpg;
8301 
8302 	PG_A = pmap_accessed_bit(pmap);
8303 	PG_M = pmap_modified_bit(pmap);
8304 	PG_V = pmap_valid_bit(pmap);
8305 	PG_RW = pmap_rw_bit(pmap);
8306 
8307 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8308 	oldpdpe = *pdpe;
8309 	KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
8310 	    ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
8311 	if ((pdpg = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
8312 	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
8313 		CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
8314 		    " in pmap %p", va, pmap);
8315 		return (FALSE);
8316 	}
8317 	pdpgpa = VM_PAGE_TO_PHYS(pdpg);
8318 	firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
8319 	newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
8320 	KASSERT((oldpdpe & PG_A) != 0,
8321 	    ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
8322 	KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
8323 	    ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
8324 	newpde = oldpdpe;
8325 
8326 	/*
8327 	 * Initialize the page directory page.
8328 	 */
8329 	for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
8330 		*pde = newpde;
8331 		newpde += NBPDR;
8332 	}
8333 
8334 	/*
8335 	 * Demote the mapping.
8336 	 */
8337 	*pdpe = newpdpe;
8338 
8339 	/*
8340 	 * Invalidate a stale recursive mapping of the page directory page.
8341 	 */
8342 	pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
8343 
8344 	pmap_pdpe_demotions++;
8345 	CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
8346 	    " in pmap %p", va, pmap);
8347 	return (TRUE);
8348 }
8349 
8350 /*
8351  * Sets the memory attribute for the specified page.
8352  */
8353 void
8354 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
8355 {
8356 
8357 	m->md.pat_mode = ma;
8358 
8359 	/*
8360 	 * If "m" is a normal page, update its direct mapping.  This update
8361 	 * can be relied upon to perform any cache operations that are
8362 	 * required for data coherence.
8363 	 */
8364 	if ((m->flags & PG_FICTITIOUS) == 0 &&
8365 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
8366 	    m->md.pat_mode))
8367 		panic("memory attribute change on the direct map failed");
8368 }
8369 
8370 /*
8371  * Changes the specified virtual address range's memory type to that given by
8372  * the parameter "mode".  The specified virtual address range must be
8373  * completely contained within either the direct map or the kernel map.  If
8374  * the virtual address range is contained within the kernel map, then the
8375  * memory type for each of the corresponding ranges of the direct map is also
8376  * changed.  (The corresponding ranges of the direct map are those ranges that
8377  * map the same physical pages as the specified virtual address range.)  These
8378  * changes to the direct map are necessary because Intel describes the
8379  * behavior of their processors as "undefined" if two or more mappings to the
8380  * same physical page have different memory types.
8381  *
8382  * Returns zero if the change completed successfully, and either EINVAL or
8383  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
8384  * of the virtual address range was not mapped, and ENOMEM is returned if
8385  * there was insufficient memory available to complete the change.  In the
8386  * latter case, the memory type may have been changed on some part of the
8387  * virtual address range or the direct map.
8388  */
8389 int
8390 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
8391 {
8392 	int error;
8393 
8394 	PMAP_LOCK(kernel_pmap);
8395 	error = pmap_change_props_locked(va, size, PROT_NONE, mode,
8396 	    MAPDEV_FLUSHCACHE);
8397 	PMAP_UNLOCK(kernel_pmap);
8398 	return (error);
8399 }
8400 
8401 /*
8402  * Changes the specified virtual address range's protections to those
8403  * specified by "prot".  Like pmap_change_attr(), protections for aliases
8404  * in the direct map are updated as well.  Protections on aliasing mappings may
8405  * be a subset of the requested protections; for example, mappings in the direct
8406  * map are never executable.
8407  */
8408 int
8409 pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
8410 {
8411 	int error;
8412 
8413 	/* Only supported within the kernel map. */
8414 	if (va < VM_MIN_KERNEL_ADDRESS)
8415 		return (EINVAL);
8416 
8417 	PMAP_LOCK(kernel_pmap);
8418 	error = pmap_change_props_locked(va, size, prot, -1,
8419 	    MAPDEV_ASSERTVALID);
8420 	PMAP_UNLOCK(kernel_pmap);
8421 	return (error);
8422 }
8423 
8424 static int
8425 pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
8426     int mode, int flags)
8427 {
8428 	vm_offset_t base, offset, tmpva;
8429 	vm_paddr_t pa_start, pa_end, pa_end1;
8430 	pdp_entry_t *pdpe;
8431 	pd_entry_t *pde, pde_bits, pde_mask;
8432 	pt_entry_t *pte, pte_bits, pte_mask;
8433 	int error;
8434 	bool changed;
8435 
8436 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
8437 	base = trunc_page(va);
8438 	offset = va & PAGE_MASK;
8439 	size = round_page(offset + size);
8440 
8441 	/*
8442 	 * Only supported on kernel virtual addresses, including the direct
8443 	 * map but excluding the recursive map.
8444 	 */
8445 	if (base < DMAP_MIN_ADDRESS)
8446 		return (EINVAL);
8447 
8448 	/*
8449 	 * Construct our flag sets and masks.  "bits" is the subset of
8450 	 * "mask" that will be set in each modified PTE.
8451 	 *
8452 	 * Mappings in the direct map are never allowed to be executable.
8453 	 */
8454 	pde_bits = pte_bits = 0;
8455 	pde_mask = pte_mask = 0;
8456 	if (mode != -1) {
8457 		pde_bits |= pmap_cache_bits(kernel_pmap, mode, true);
8458 		pde_mask |= X86_PG_PDE_CACHE;
8459 		pte_bits |= pmap_cache_bits(kernel_pmap, mode, false);
8460 		pte_mask |= X86_PG_PTE_CACHE;
8461 	}
8462 	if (prot != VM_PROT_NONE) {
8463 		if ((prot & VM_PROT_WRITE) != 0) {
8464 			pde_bits |= X86_PG_RW;
8465 			pte_bits |= X86_PG_RW;
8466 		}
8467 		if ((prot & VM_PROT_EXECUTE) == 0 ||
8468 		    va < VM_MIN_KERNEL_ADDRESS) {
8469 			pde_bits |= pg_nx;
8470 			pte_bits |= pg_nx;
8471 		}
8472 		pde_mask |= X86_PG_RW | pg_nx;
8473 		pte_mask |= X86_PG_RW | pg_nx;
8474 	}
8475 
8476 	/*
8477 	 * Pages that aren't mapped aren't supported.  Also break down 2MB pages
8478 	 * into 4KB pages if required.
8479 	 */
8480 	for (tmpva = base; tmpva < base + size; ) {
8481 		pdpe = pmap_pdpe(kernel_pmap, tmpva);
8482 		if (pdpe == NULL || *pdpe == 0) {
8483 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
8484 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
8485 			return (EINVAL);
8486 		}
8487 		if (*pdpe & PG_PS) {
8488 			/*
8489 			 * If the current 1GB page already has the required
8490 			 * properties, then we need not demote this page.  Just
8491 			 * increment tmpva to the next 1GB page frame.
8492 			 */
8493 			if ((*pdpe & pde_mask) == pde_bits) {
8494 				tmpva = trunc_1gpage(tmpva) + NBPDP;
8495 				continue;
8496 			}
8497 
8498 			/*
8499 			 * If the current offset aligns with a 1GB page frame
8500 			 * and there is at least 1GB left within the range, then
8501 			 * we need not break down this page into 2MB pages.
8502 			 */
8503 			if ((tmpva & PDPMASK) == 0 &&
8504 			    tmpva + PDPMASK < base + size) {
8505 				tmpva += NBPDP;
8506 				continue;
8507 			}
8508 			if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
8509 				return (ENOMEM);
8510 		}
8511 		pde = pmap_pdpe_to_pde(pdpe, tmpva);
8512 		if (*pde == 0) {
8513 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
8514 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
8515 			return (EINVAL);
8516 		}
8517 		if (*pde & PG_PS) {
8518 			/*
8519 			 * If the current 2MB page already has the required
8520 			 * properties, then we need not demote this page.  Just
8521 			 * increment tmpva to the next 2MB page frame.
8522 			 */
8523 			if ((*pde & pde_mask) == pde_bits) {
8524 				tmpva = trunc_2mpage(tmpva) + NBPDR;
8525 				continue;
8526 			}
8527 
8528 			/*
8529 			 * If the current offset aligns with a 2MB page frame
8530 			 * and there is at least 2MB left within the range, then
8531 			 * we need not break down this page into 4KB pages.
8532 			 */
8533 			if ((tmpva & PDRMASK) == 0 &&
8534 			    tmpva + PDRMASK < base + size) {
8535 				tmpva += NBPDR;
8536 				continue;
8537 			}
8538 			if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
8539 				return (ENOMEM);
8540 		}
8541 		pte = pmap_pde_to_pte(pde, tmpva);
8542 		if (*pte == 0) {
8543 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
8544 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
8545 			return (EINVAL);
8546 		}
8547 		tmpva += PAGE_SIZE;
8548 	}
8549 	error = 0;
8550 
8551 	/*
8552 	 * Ok, all the pages exist, so run through them updating their
8553 	 * properties if required.
8554 	 */
8555 	changed = false;
8556 	pa_start = pa_end = 0;
8557 	for (tmpva = base; tmpva < base + size; ) {
8558 		pdpe = pmap_pdpe(kernel_pmap, tmpva);
8559 		if (*pdpe & PG_PS) {
8560 			if ((*pdpe & pde_mask) != pde_bits) {
8561 				pmap_pte_props(pdpe, pde_bits, pde_mask);
8562 				changed = true;
8563 			}
8564 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
8565 			    (*pdpe & PG_PS_FRAME) < dmaplimit) {
8566 				if (pa_start == pa_end) {
8567 					/* Start physical address run. */
8568 					pa_start = *pdpe & PG_PS_FRAME;
8569 					pa_end = pa_start + NBPDP;
8570 				} else if (pa_end == (*pdpe & PG_PS_FRAME))
8571 					pa_end += NBPDP;
8572 				else {
8573 					/* Run ended, update direct map. */
8574 					error = pmap_change_props_locked(
8575 					    PHYS_TO_DMAP(pa_start),
8576 					    pa_end - pa_start, prot, mode,
8577 					    flags);
8578 					if (error != 0)
8579 						break;
8580 					/* Start physical address run. */
8581 					pa_start = *pdpe & PG_PS_FRAME;
8582 					pa_end = pa_start + NBPDP;
8583 				}
8584 			}
8585 			tmpva = trunc_1gpage(tmpva) + NBPDP;
8586 			continue;
8587 		}
8588 		pde = pmap_pdpe_to_pde(pdpe, tmpva);
8589 		if (*pde & PG_PS) {
8590 			if ((*pde & pde_mask) != pde_bits) {
8591 				pmap_pte_props(pde, pde_bits, pde_mask);
8592 				changed = true;
8593 			}
8594 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
8595 			    (*pde & PG_PS_FRAME) < dmaplimit) {
8596 				if (pa_start == pa_end) {
8597 					/* Start physical address run. */
8598 					pa_start = *pde & PG_PS_FRAME;
8599 					pa_end = pa_start + NBPDR;
8600 				} else if (pa_end == (*pde & PG_PS_FRAME))
8601 					pa_end += NBPDR;
8602 				else {
8603 					/* Run ended, update direct map. */
8604 					error = pmap_change_props_locked(
8605 					    PHYS_TO_DMAP(pa_start),
8606 					    pa_end - pa_start, prot, mode,
8607 					    flags);
8608 					if (error != 0)
8609 						break;
8610 					/* Start physical address run. */
8611 					pa_start = *pde & PG_PS_FRAME;
8612 					pa_end = pa_start + NBPDR;
8613 				}
8614 			}
8615 			tmpva = trunc_2mpage(tmpva) + NBPDR;
8616 		} else {
8617 			pte = pmap_pde_to_pte(pde, tmpva);
8618 			if ((*pte & pte_mask) != pte_bits) {
8619 				pmap_pte_props(pte, pte_bits, pte_mask);
8620 				changed = true;
8621 			}
8622 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
8623 			    (*pte & PG_FRAME) < dmaplimit) {
8624 				if (pa_start == pa_end) {
8625 					/* Start physical address run. */
8626 					pa_start = *pte & PG_FRAME;
8627 					pa_end = pa_start + PAGE_SIZE;
8628 				} else if (pa_end == (*pte & PG_FRAME))
8629 					pa_end += PAGE_SIZE;
8630 				else {
8631 					/* Run ended, update direct map. */
8632 					error = pmap_change_props_locked(
8633 					    PHYS_TO_DMAP(pa_start),
8634 					    pa_end - pa_start, prot, mode,
8635 					    flags);
8636 					if (error != 0)
8637 						break;
8638 					/* Start physical address run. */
8639 					pa_start = *pte & PG_FRAME;
8640 					pa_end = pa_start + PAGE_SIZE;
8641 				}
8642 			}
8643 			tmpva += PAGE_SIZE;
8644 		}
8645 	}
8646 	if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
8647 		pa_end1 = MIN(pa_end, dmaplimit);
8648 		if (pa_start != pa_end1)
8649 			error = pmap_change_props_locked(PHYS_TO_DMAP(pa_start),
8650 			    pa_end1 - pa_start, prot, mode, flags);
8651 	}
8652 
8653 	/*
8654 	 * Flush CPU caches if required to make sure any data isn't cached that
8655 	 * shouldn't be, etc.
8656 	 */
8657 	if (changed) {
8658 		pmap_invalidate_range(kernel_pmap, base, tmpva);
8659 		if ((flags & MAPDEV_FLUSHCACHE) != 0)
8660 			pmap_invalidate_cache_range(base, tmpva);
8661 	}
8662 	return (error);
8663 }
8664 
8665 /*
8666  * Demotes any mapping within the direct map region that covers more than the
8667  * specified range of physical addresses.  This range's size must be a power
8668  * of two and its starting address must be a multiple of its size.  Since the
8669  * demotion does not change any attributes of the mapping, a TLB invalidation
8670  * is not mandatory.  The caller may, however, request a TLB invalidation.
8671  */
8672 void
8673 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
8674 {
8675 	pdp_entry_t *pdpe;
8676 	pd_entry_t *pde;
8677 	vm_offset_t va;
8678 	boolean_t changed;
8679 
8680 	if (len == 0)
8681 		return;
8682 	KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
8683 	KASSERT((base & (len - 1)) == 0,
8684 	    ("pmap_demote_DMAP: base is not a multiple of len"));
8685 	if (len < NBPDP && base < dmaplimit) {
8686 		va = PHYS_TO_DMAP(base);
8687 		changed = FALSE;
8688 		PMAP_LOCK(kernel_pmap);
8689 		pdpe = pmap_pdpe(kernel_pmap, va);
8690 		if ((*pdpe & X86_PG_V) == 0)
8691 			panic("pmap_demote_DMAP: invalid PDPE");
8692 		if ((*pdpe & PG_PS) != 0) {
8693 			if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
8694 				panic("pmap_demote_DMAP: PDPE failed");
8695 			changed = TRUE;
8696 		}
8697 		if (len < NBPDR) {
8698 			pde = pmap_pdpe_to_pde(pdpe, va);
8699 			if ((*pde & X86_PG_V) == 0)
8700 				panic("pmap_demote_DMAP: invalid PDE");
8701 			if ((*pde & PG_PS) != 0) {
8702 				if (!pmap_demote_pde(kernel_pmap, pde, va))
8703 					panic("pmap_demote_DMAP: PDE failed");
8704 				changed = TRUE;
8705 			}
8706 		}
8707 		if (changed && invalidate)
8708 			pmap_invalidate_page(kernel_pmap, va);
8709 		PMAP_UNLOCK(kernel_pmap);
8710 	}
8711 }
8712 
8713 /*
8714  * Perform the pmap work for mincore(2).  If the page is not both referenced and
8715  * modified by this pmap, returns its physical address so that the caller can
8716  * find other mappings.
8717  */
8718 int
8719 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
8720 {
8721 	pd_entry_t *pdep;
8722 	pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
8723 	vm_paddr_t pa;
8724 	int val;
8725 
8726 	PG_A = pmap_accessed_bit(pmap);
8727 	PG_M = pmap_modified_bit(pmap);
8728 	PG_V = pmap_valid_bit(pmap);
8729 	PG_RW = pmap_rw_bit(pmap);
8730 
8731 	PMAP_LOCK(pmap);
8732 	pdep = pmap_pde(pmap, addr);
8733 	if (pdep != NULL && (*pdep & PG_V)) {
8734 		if (*pdep & PG_PS) {
8735 			pte = *pdep;
8736 			/* Compute the physical address of the 4KB page. */
8737 			pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
8738 			    PG_FRAME;
8739 			val = MINCORE_SUPER;
8740 		} else {
8741 			pte = *pmap_pde_to_pte(pdep, addr);
8742 			pa = pte & PG_FRAME;
8743 			val = 0;
8744 		}
8745 	} else {
8746 		pte = 0;
8747 		pa = 0;
8748 		val = 0;
8749 	}
8750 	if ((pte & PG_V) != 0) {
8751 		val |= MINCORE_INCORE;
8752 		if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
8753 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
8754 		if ((pte & PG_A) != 0)
8755 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
8756 	}
8757 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
8758 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
8759 	    (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
8760 		*pap = pa;
8761 	}
8762 	PMAP_UNLOCK(pmap);
8763 	return (val);
8764 }
8765 
8766 static uint64_t
8767 pmap_pcid_alloc(pmap_t pmap, u_int cpuid)
8768 {
8769 	uint32_t gen, new_gen, pcid_next;
8770 
8771 	CRITICAL_ASSERT(curthread);
8772 	gen = PCPU_GET(pcid_gen);
8773 	if (pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN)
8774 		return (pti ? 0 : CR3_PCID_SAVE);
8775 	if (pmap->pm_pcids[cpuid].pm_gen == gen)
8776 		return (CR3_PCID_SAVE);
8777 	pcid_next = PCPU_GET(pcid_next);
8778 	KASSERT((!pti && pcid_next <= PMAP_PCID_OVERMAX) ||
8779 	    (pti && pcid_next <= PMAP_PCID_OVERMAX_KERN),
8780 	    ("cpu %d pcid_next %#x", cpuid, pcid_next));
8781 	if ((!pti && pcid_next == PMAP_PCID_OVERMAX) ||
8782 	    (pti && pcid_next == PMAP_PCID_OVERMAX_KERN)) {
8783 		new_gen = gen + 1;
8784 		if (new_gen == 0)
8785 			new_gen = 1;
8786 		PCPU_SET(pcid_gen, new_gen);
8787 		pcid_next = PMAP_PCID_KERN + 1;
8788 	} else {
8789 		new_gen = gen;
8790 	}
8791 	pmap->pm_pcids[cpuid].pm_pcid = pcid_next;
8792 	pmap->pm_pcids[cpuid].pm_gen = new_gen;
8793 	PCPU_SET(pcid_next, pcid_next + 1);
8794 	return (0);
8795 }
8796 
8797 static uint64_t
8798 pmap_pcid_alloc_checked(pmap_t pmap, u_int cpuid)
8799 {
8800 	uint64_t cached;
8801 
8802 	cached = pmap_pcid_alloc(pmap, cpuid);
8803 	KASSERT(pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX,
8804 	    ("pmap %p cpu %d pcid %#x", pmap, cpuid,
8805 	    pmap->pm_pcids[cpuid].pm_pcid));
8806 	KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN ||
8807 	    pmap == kernel_pmap,
8808 	    ("non-kernel pmap pmap %p cpu %d pcid %#x",
8809 	    pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid));
8810 	return (cached);
8811 }
8812 
8813 static void
8814 pmap_activate_sw_pti_post(struct thread *td, pmap_t pmap)
8815 {
8816 
8817 	PCPU_GET(tssp)->tss_rsp0 = pmap->pm_ucr3 != PMAP_NO_CR3 ?
8818 	    PCPU_GET(pti_rsp0) : (uintptr_t)td->td_md.md_stack_base;
8819 }
8820 
8821 static void
8822 pmap_activate_sw_pcid_pti(struct thread *td, pmap_t pmap, u_int cpuid)
8823 {
8824 	pmap_t old_pmap;
8825 	uint64_t cached, cr3, kcr3, ucr3;
8826 
8827 	KASSERT((read_rflags() & PSL_I) == 0,
8828 	    ("PCID needs interrupts disabled in pmap_activate_sw()"));
8829 
8830 	/* See the comment in pmap_invalidate_page_pcid(). */
8831 	if (PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK) {
8832 		PCPU_SET(ucr3_load_mask, PMAP_UCR3_NOMASK);
8833 		old_pmap = PCPU_GET(curpmap);
8834 		MPASS(old_pmap->pm_ucr3 != PMAP_NO_CR3);
8835 		old_pmap->pm_pcids[cpuid].pm_gen = 0;
8836 	}
8837 
8838 	cached = pmap_pcid_alloc_checked(pmap, cpuid);
8839 	cr3 = rcr3();
8840 	if ((cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
8841 		load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid);
8842 	PCPU_SET(curpmap, pmap);
8843 	kcr3 = pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid;
8844 	ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[cpuid].pm_pcid |
8845 	    PMAP_PCID_USER_PT;
8846 
8847 	if (!cached && pmap->pm_ucr3 != PMAP_NO_CR3)
8848 		PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
8849 
8850 	PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
8851 	PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
8852 	if (cached)
8853 		PCPU_INC(pm_save_cnt);
8854 
8855 	pmap_activate_sw_pti_post(td, pmap);
8856 }
8857 
8858 static void
8859 pmap_activate_sw_pcid_nopti(struct thread *td __unused, pmap_t pmap,
8860     u_int cpuid)
8861 {
8862 	uint64_t cached, cr3;
8863 
8864 	KASSERT((read_rflags() & PSL_I) == 0,
8865 	    ("PCID needs interrupts disabled in pmap_activate_sw()"));
8866 
8867 	cached = pmap_pcid_alloc_checked(pmap, cpuid);
8868 	cr3 = rcr3();
8869 	if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
8870 		load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
8871 		    cached);
8872 	PCPU_SET(curpmap, pmap);
8873 	if (cached)
8874 		PCPU_INC(pm_save_cnt);
8875 }
8876 
8877 static void
8878 pmap_activate_sw_nopcid_nopti(struct thread *td __unused, pmap_t pmap,
8879     u_int cpuid __unused)
8880 {
8881 
8882 	load_cr3(pmap->pm_cr3);
8883 	PCPU_SET(curpmap, pmap);
8884 }
8885 
8886 static void
8887 pmap_activate_sw_nopcid_pti(struct thread *td, pmap_t pmap,
8888     u_int cpuid __unused)
8889 {
8890 
8891 	pmap_activate_sw_nopcid_nopti(td, pmap, cpuid);
8892 	PCPU_SET(kcr3, pmap->pm_cr3);
8893 	PCPU_SET(ucr3, pmap->pm_ucr3);
8894 	pmap_activate_sw_pti_post(td, pmap);
8895 }
8896 
8897 DEFINE_IFUNC(static, void, pmap_activate_sw_mode, (struct thread *, pmap_t,
8898     u_int))
8899 {
8900 
8901 	if (pmap_pcid_enabled && pti)
8902 		return (pmap_activate_sw_pcid_pti);
8903 	else if (pmap_pcid_enabled && !pti)
8904 		return (pmap_activate_sw_pcid_nopti);
8905 	else if (!pmap_pcid_enabled && pti)
8906 		return (pmap_activate_sw_nopcid_pti);
8907 	else /* if (!pmap_pcid_enabled && !pti) */
8908 		return (pmap_activate_sw_nopcid_nopti);
8909 }
8910 
8911 void
8912 pmap_activate_sw(struct thread *td)
8913 {
8914 	pmap_t oldpmap, pmap;
8915 	u_int cpuid;
8916 
8917 	oldpmap = PCPU_GET(curpmap);
8918 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
8919 	if (oldpmap == pmap) {
8920 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
8921 			mfence();
8922 		return;
8923 	}
8924 	cpuid = PCPU_GET(cpuid);
8925 #ifdef SMP
8926 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
8927 #else
8928 	CPU_SET(cpuid, &pmap->pm_active);
8929 #endif
8930 	pmap_activate_sw_mode(td, pmap, cpuid);
8931 #ifdef SMP
8932 	CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
8933 #else
8934 	CPU_CLR(cpuid, &oldpmap->pm_active);
8935 #endif
8936 }
8937 
8938 void
8939 pmap_activate(struct thread *td)
8940 {
8941 	/*
8942 	 * invltlb_{invpcid,}_pcid_handler() is used to handle an
8943 	 * invalidate_all IPI, which checks for curpmap ==
8944 	 * smp_tlb_pmap.  The below sequence of operations has a
8945 	 * window where %CR3 is loaded with the new pmap's PML4
8946 	 * address, but the curpmap value has not yet been updated.
8947 	 * This causes the invltlb IPI handler, which is called
8948 	 * between the updates, to execute as a NOP, which leaves
8949 	 * stale TLB entries.
8950 	 *
8951 	 * Note that the most common use of pmap_activate_sw(), from
8952 	 * a context switch, is immune to this race, because
8953 	 * interrupts are disabled (while the thread lock is owned),
8954 	 * so the IPI is delayed until after curpmap is updated.  Protect
8955 	 * other callers in a similar way, by disabling interrupts
8956 	 * around the %cr3 register reload and curpmap assignment.
8957 	 */
8958 	spinlock_enter();
8959 	pmap_activate_sw(td);
8960 	spinlock_exit();
8961 }
8962 
8963 void
8964 pmap_activate_boot(pmap_t pmap)
8965 {
8966 	uint64_t kcr3;
8967 	u_int cpuid;
8968 
8969 	/*
8970 	 * kernel_pmap must be never deactivated, and we ensure that
8971 	 * by never activating it at all.
8972 	 */
8973 	MPASS(pmap != kernel_pmap);
8974 
8975 	cpuid = PCPU_GET(cpuid);
8976 #ifdef SMP
8977 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
8978 #else
8979 	CPU_SET(cpuid, &pmap->pm_active);
8980 #endif
8981 	PCPU_SET(curpmap, pmap);
8982 	if (pti) {
8983 		kcr3 = pmap->pm_cr3;
8984 		if (pmap_pcid_enabled)
8985 			kcr3 |= pmap->pm_pcids[cpuid].pm_pcid | CR3_PCID_SAVE;
8986 	} else {
8987 		kcr3 = PMAP_NO_CR3;
8988 	}
8989 	PCPU_SET(kcr3, kcr3);
8990 	PCPU_SET(ucr3, PMAP_NO_CR3);
8991 }
8992 
8993 void
8994 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
8995 {
8996 }
8997 
8998 /*
8999  *	Increase the starting virtual address of the given mapping if a
9000  *	different alignment might result in more superpage mappings.
9001  */
9002 void
9003 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
9004     vm_offset_t *addr, vm_size_t size)
9005 {
9006 	vm_offset_t superpage_offset;
9007 
9008 	if (size < NBPDR)
9009 		return;
9010 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
9011 		offset += ptoa(object->pg_color);
9012 	superpage_offset = offset & PDRMASK;
9013 	if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
9014 	    (*addr & PDRMASK) == superpage_offset)
9015 		return;
9016 	if ((*addr & PDRMASK) < superpage_offset)
9017 		*addr = (*addr & ~PDRMASK) + superpage_offset;
9018 	else
9019 		*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
9020 }
9021 
9022 #ifdef INVARIANTS
9023 static unsigned long num_dirty_emulations;
9024 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW,
9025 	     &num_dirty_emulations, 0, NULL);
9026 
9027 static unsigned long num_accessed_emulations;
9028 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW,
9029 	     &num_accessed_emulations, 0, NULL);
9030 
9031 static unsigned long num_superpage_accessed_emulations;
9032 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW,
9033 	     &num_superpage_accessed_emulations, 0, NULL);
9034 
9035 static unsigned long ad_emulation_superpage_promotions;
9036 SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW,
9037 	     &ad_emulation_superpage_promotions, 0, NULL);
9038 #endif	/* INVARIANTS */
9039 
9040 int
9041 pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
9042 {
9043 	int rv;
9044 	struct rwlock *lock;
9045 #if VM_NRESERVLEVEL > 0
9046 	vm_page_t m, mpte;
9047 #endif
9048 	pd_entry_t *pde;
9049 	pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V;
9050 
9051 	KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE,
9052 	    ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype));
9053 
9054 	if (!pmap_emulate_ad_bits(pmap))
9055 		return (-1);
9056 
9057 	PG_A = pmap_accessed_bit(pmap);
9058 	PG_M = pmap_modified_bit(pmap);
9059 	PG_V = pmap_valid_bit(pmap);
9060 	PG_RW = pmap_rw_bit(pmap);
9061 
9062 	rv = -1;
9063 	lock = NULL;
9064 	PMAP_LOCK(pmap);
9065 
9066 	pde = pmap_pde(pmap, va);
9067 	if (pde == NULL || (*pde & PG_V) == 0)
9068 		goto done;
9069 
9070 	if ((*pde & PG_PS) != 0) {
9071 		if (ftype == VM_PROT_READ) {
9072 #ifdef INVARIANTS
9073 			atomic_add_long(&num_superpage_accessed_emulations, 1);
9074 #endif
9075 			*pde |= PG_A;
9076 			rv = 0;
9077 		}
9078 		goto done;
9079 	}
9080 
9081 	pte = pmap_pde_to_pte(pde, va);
9082 	if ((*pte & PG_V) == 0)
9083 		goto done;
9084 
9085 	if (ftype == VM_PROT_WRITE) {
9086 		if ((*pte & PG_RW) == 0)
9087 			goto done;
9088 		/*
9089 		 * Set the modified and accessed bits simultaneously.
9090 		 *
9091 		 * Intel EPT PTEs that do software emulation of A/D bits map
9092 		 * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively.
9093 		 * An EPT misconfiguration is triggered if the PTE is writable
9094 		 * but not readable (WR=10). This is avoided by setting PG_A
9095 		 * and PG_M simultaneously.
9096 		 */
9097 		*pte |= PG_M | PG_A;
9098 	} else {
9099 		*pte |= PG_A;
9100 	}
9101 
9102 #if VM_NRESERVLEVEL > 0
9103 	/* try to promote the mapping */
9104 	if (va < VM_MAXUSER_ADDRESS)
9105 		mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
9106 	else
9107 		mpte = NULL;
9108 
9109 	m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
9110 
9111 	if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
9112 	    pmap_ps_enabled(pmap) &&
9113 	    (m->flags & PG_FICTITIOUS) == 0 &&
9114 	    vm_reserv_level_iffullpop(m) == 0) {
9115 		pmap_promote_pde(pmap, pde, va, &lock);
9116 #ifdef INVARIANTS
9117 		atomic_add_long(&ad_emulation_superpage_promotions, 1);
9118 #endif
9119 	}
9120 #endif
9121 
9122 #ifdef INVARIANTS
9123 	if (ftype == VM_PROT_WRITE)
9124 		atomic_add_long(&num_dirty_emulations, 1);
9125 	else
9126 		atomic_add_long(&num_accessed_emulations, 1);
9127 #endif
9128 	rv = 0;		/* success */
9129 done:
9130 	if (lock != NULL)
9131 		rw_wunlock(lock);
9132 	PMAP_UNLOCK(pmap);
9133 	return (rv);
9134 }
9135 
9136 void
9137 pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
9138 {
9139 	pml4_entry_t *pml4;
9140 	pdp_entry_t *pdp;
9141 	pd_entry_t *pde;
9142 	pt_entry_t *pte, PG_V;
9143 	int idx;
9144 
9145 	idx = 0;
9146 	PG_V = pmap_valid_bit(pmap);
9147 	PMAP_LOCK(pmap);
9148 
9149 	pml4 = pmap_pml4e(pmap, va);
9150 	ptr[idx++] = *pml4;
9151 	if ((*pml4 & PG_V) == 0)
9152 		goto done;
9153 
9154 	pdp = pmap_pml4e_to_pdpe(pml4, va);
9155 	ptr[idx++] = *pdp;
9156 	if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0)
9157 		goto done;
9158 
9159 	pde = pmap_pdpe_to_pde(pdp, va);
9160 	ptr[idx++] = *pde;
9161 	if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0)
9162 		goto done;
9163 
9164 	pte = pmap_pde_to_pte(pde, va);
9165 	ptr[idx++] = *pte;
9166 
9167 done:
9168 	PMAP_UNLOCK(pmap);
9169 	*num = idx;
9170 }
9171 
9172 /**
9173  * Get the kernel virtual address of a set of physical pages. If there are
9174  * physical addresses not covered by the DMAP perform a transient mapping
9175  * that will be removed when calling pmap_unmap_io_transient.
9176  *
9177  * \param page        The pages the caller wishes to obtain the virtual
9178  *                    address on the kernel memory map.
9179  * \param vaddr       On return contains the kernel virtual memory address
9180  *                    of the pages passed in the page parameter.
9181  * \param count       Number of pages passed in.
9182  * \param can_fault   TRUE if the thread using the mapped pages can take
9183  *                    page faults, FALSE otherwise.
9184  *
9185  * \returns TRUE if the caller must call pmap_unmap_io_transient when
9186  *          finished or FALSE otherwise.
9187  *
9188  */
9189 boolean_t
9190 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
9191     boolean_t can_fault)
9192 {
9193 	vm_paddr_t paddr;
9194 	boolean_t needs_mapping;
9195 	pt_entry_t *pte;
9196 	int cache_bits, error __unused, i;
9197 
9198 	/*
9199 	 * Allocate any KVA space that we need, this is done in a separate
9200 	 * loop to prevent calling vmem_alloc while pinned.
9201 	 */
9202 	needs_mapping = FALSE;
9203 	for (i = 0; i < count; i++) {
9204 		paddr = VM_PAGE_TO_PHYS(page[i]);
9205 		if (__predict_false(paddr >= dmaplimit)) {
9206 			error = vmem_alloc(kernel_arena, PAGE_SIZE,
9207 			    M_BESTFIT | M_WAITOK, &vaddr[i]);
9208 			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
9209 			needs_mapping = TRUE;
9210 		} else {
9211 			vaddr[i] = PHYS_TO_DMAP(paddr);
9212 		}
9213 	}
9214 
9215 	/* Exit early if everything is covered by the DMAP */
9216 	if (!needs_mapping)
9217 		return (FALSE);
9218 
9219 	/*
9220 	 * NB:  The sequence of updating a page table followed by accesses
9221 	 * to the corresponding pages used in the !DMAP case is subject to
9222 	 * the situation described in the "AMD64 Architecture Programmer's
9223 	 * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
9224 	 * Coherency Considerations".  Therefore, issuing the INVLPG right
9225 	 * after modifying the PTE bits is crucial.
9226 	 */
9227 	if (!can_fault)
9228 		sched_pin();
9229 	for (i = 0; i < count; i++) {
9230 		paddr = VM_PAGE_TO_PHYS(page[i]);
9231 		if (paddr >= dmaplimit) {
9232 			if (can_fault) {
9233 				/*
9234 				 * Slow path, since we can get page faults
9235 				 * while mappings are active don't pin the
9236 				 * thread to the CPU and instead add a global
9237 				 * mapping visible to all CPUs.
9238 				 */
9239 				pmap_qenter(vaddr[i], &page[i], 1);
9240 			} else {
9241 				pte = vtopte(vaddr[i]);
9242 				cache_bits = pmap_cache_bits(kernel_pmap,
9243 				    page[i]->md.pat_mode, 0);
9244 				pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
9245 				    cache_bits);
9246 				invlpg(vaddr[i]);
9247 			}
9248 		}
9249 	}
9250 
9251 	return (needs_mapping);
9252 }
9253 
9254 void
9255 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
9256     boolean_t can_fault)
9257 {
9258 	vm_paddr_t paddr;
9259 	int i;
9260 
9261 	if (!can_fault)
9262 		sched_unpin();
9263 	for (i = 0; i < count; i++) {
9264 		paddr = VM_PAGE_TO_PHYS(page[i]);
9265 		if (paddr >= dmaplimit) {
9266 			if (can_fault)
9267 				pmap_qremove(vaddr[i], 1);
9268 			vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
9269 		}
9270 	}
9271 }
9272 
9273 vm_offset_t
9274 pmap_quick_enter_page(vm_page_t m)
9275 {
9276 	vm_paddr_t paddr;
9277 
9278 	paddr = VM_PAGE_TO_PHYS(m);
9279 	if (paddr < dmaplimit)
9280 		return (PHYS_TO_DMAP(paddr));
9281 	mtx_lock_spin(&qframe_mtx);
9282 	KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
9283 	pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
9284 	    X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0));
9285 	return (qframe);
9286 }
9287 
9288 void
9289 pmap_quick_remove_page(vm_offset_t addr)
9290 {
9291 
9292 	if (addr != qframe)
9293 		return;
9294 	pte_store(vtopte(qframe), 0);
9295 	invlpg(qframe);
9296 	mtx_unlock_spin(&qframe_mtx);
9297 }
9298 
9299 /*
9300  * Pdp pages from the large map are managed differently from either
9301  * kernel or user page table pages.  They are permanently allocated at
9302  * initialization time, and their reference count is permanently set to
9303  * zero.  The pml4 entries pointing to those pages are copied into
9304  * each allocated pmap.
9305  *
9306  * In contrast, pd and pt pages are managed like user page table
9307  * pages.  They are dynamically allocated, and their reference count
9308  * represents the number of valid entries within the page.
9309  */
9310 static vm_page_t
9311 pmap_large_map_getptp_unlocked(void)
9312 {
9313 	vm_page_t m;
9314 
9315 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
9316 	    VM_ALLOC_ZERO);
9317 	if (m != NULL && (m->flags & PG_ZERO) == 0)
9318 		pmap_zero_page(m);
9319 	return (m);
9320 }
9321 
9322 static vm_page_t
9323 pmap_large_map_getptp(void)
9324 {
9325 	vm_page_t m;
9326 
9327 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
9328 	m = pmap_large_map_getptp_unlocked();
9329 	if (m == NULL) {
9330 		PMAP_UNLOCK(kernel_pmap);
9331 		vm_wait(NULL);
9332 		PMAP_LOCK(kernel_pmap);
9333 		/* Callers retry. */
9334 	}
9335 	return (m);
9336 }
9337 
9338 static pdp_entry_t *
9339 pmap_large_map_pdpe(vm_offset_t va)
9340 {
9341 	vm_pindex_t pml4_idx;
9342 	vm_paddr_t mphys;
9343 
9344 	pml4_idx = pmap_pml4e_index(va);
9345 	KASSERT(LMSPML4I <= pml4_idx && pml4_idx < LMSPML4I + lm_ents,
9346 	    ("pmap_large_map_pdpe: va %#jx out of range idx %#jx LMSPML4I "
9347 	    "%#jx lm_ents %d",
9348 	    (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
9349 	KASSERT((kernel_pmap->pm_pml4[pml4_idx] & X86_PG_V) != 0,
9350 	    ("pmap_large_map_pdpe: invalid pml4 for va %#jx idx %#jx "
9351 	    "LMSPML4I %#jx lm_ents %d",
9352 	    (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
9353 	mphys = kernel_pmap->pm_pml4[pml4_idx] & PG_FRAME;
9354 	return ((pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va));
9355 }
9356 
9357 static pd_entry_t *
9358 pmap_large_map_pde(vm_offset_t va)
9359 {
9360 	pdp_entry_t *pdpe;
9361 	vm_page_t m;
9362 	vm_paddr_t mphys;
9363 
9364 retry:
9365 	pdpe = pmap_large_map_pdpe(va);
9366 	if (*pdpe == 0) {
9367 		m = pmap_large_map_getptp();
9368 		if (m == NULL)
9369 			goto retry;
9370 		mphys = VM_PAGE_TO_PHYS(m);
9371 		*pdpe = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
9372 	} else {
9373 		MPASS((*pdpe & X86_PG_PS) == 0);
9374 		mphys = *pdpe & PG_FRAME;
9375 	}
9376 	return ((pd_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pde_index(va));
9377 }
9378 
9379 static pt_entry_t *
9380 pmap_large_map_pte(vm_offset_t va)
9381 {
9382 	pd_entry_t *pde;
9383 	vm_page_t m;
9384 	vm_paddr_t mphys;
9385 
9386 retry:
9387 	pde = pmap_large_map_pde(va);
9388 	if (*pde == 0) {
9389 		m = pmap_large_map_getptp();
9390 		if (m == NULL)
9391 			goto retry;
9392 		mphys = VM_PAGE_TO_PHYS(m);
9393 		*pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
9394 		PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->ref_count++;
9395 	} else {
9396 		MPASS((*pde & X86_PG_PS) == 0);
9397 		mphys = *pde & PG_FRAME;
9398 	}
9399 	return ((pt_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pte_index(va));
9400 }
9401 
9402 static vm_paddr_t
9403 pmap_large_map_kextract(vm_offset_t va)
9404 {
9405 	pdp_entry_t *pdpe, pdp;
9406 	pd_entry_t *pde, pd;
9407 	pt_entry_t *pte, pt;
9408 
9409 	KASSERT(PMAP_ADDRESS_IN_LARGEMAP(va),
9410 	    ("not largemap range %#lx", (u_long)va));
9411 	pdpe = pmap_large_map_pdpe(va);
9412 	pdp = *pdpe;
9413 	KASSERT((pdp & X86_PG_V) != 0,
9414 	    ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
9415 	    (u_long)pdpe, pdp));
9416 	if ((pdp & X86_PG_PS) != 0) {
9417 		KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
9418 		    ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
9419 		    (u_long)pdpe, pdp));
9420 		return ((pdp & PG_PS_PDP_FRAME) | (va & PDPMASK));
9421 	}
9422 	pde = pmap_pdpe_to_pde(pdpe, va);
9423 	pd = *pde;
9424 	KASSERT((pd & X86_PG_V) != 0,
9425 	    ("invalid pd va %#lx pde %#lx pd %#lx", va, (u_long)pde, pd));
9426 	if ((pd & X86_PG_PS) != 0)
9427 		return ((pd & PG_PS_FRAME) | (va & PDRMASK));
9428 	pte = pmap_pde_to_pte(pde, va);
9429 	pt = *pte;
9430 	KASSERT((pt & X86_PG_V) != 0,
9431 	    ("invalid pte va %#lx pte %#lx pt %#lx", va, (u_long)pte, pt));
9432 	return ((pt & PG_FRAME) | (va & PAGE_MASK));
9433 }
9434 
9435 static int
9436 pmap_large_map_getva(vm_size_t len, vm_offset_t align, vm_offset_t phase,
9437     vmem_addr_t *vmem_res)
9438 {
9439 
9440 	/*
9441 	 * Large mappings are all but static.  Consequently, there
9442 	 * is no point in waiting for an earlier allocation to be
9443 	 * freed.
9444 	 */
9445 	return (vmem_xalloc(large_vmem, len, align, phase, 0, VMEM_ADDR_MIN,
9446 	    VMEM_ADDR_MAX, M_NOWAIT | M_BESTFIT, vmem_res));
9447 }
9448 
9449 int
9450 pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
9451     vm_memattr_t mattr)
9452 {
9453 	pdp_entry_t *pdpe;
9454 	pd_entry_t *pde;
9455 	pt_entry_t *pte;
9456 	vm_offset_t va, inc;
9457 	vmem_addr_t vmem_res;
9458 	vm_paddr_t pa;
9459 	int error;
9460 
9461 	if (len == 0 || spa + len < spa)
9462 		return (EINVAL);
9463 
9464 	/* See if DMAP can serve. */
9465 	if (spa + len <= dmaplimit) {
9466 		va = PHYS_TO_DMAP(spa);
9467 		*addr = (void *)va;
9468 		return (pmap_change_attr(va, len, mattr));
9469 	}
9470 
9471 	/*
9472 	 * No, allocate KVA.  Fit the address with best possible
9473 	 * alignment for superpages.  Fall back to worse align if
9474 	 * failed.
9475 	 */
9476 	error = ENOMEM;
9477 	if ((amd_feature & AMDID_PAGE1GB) != 0 && rounddown2(spa + len,
9478 	    NBPDP) >= roundup2(spa, NBPDP) + NBPDP)
9479 		error = pmap_large_map_getva(len, NBPDP, spa & PDPMASK,
9480 		    &vmem_res);
9481 	if (error != 0 && rounddown2(spa + len, NBPDR) >= roundup2(spa,
9482 	    NBPDR) + NBPDR)
9483 		error = pmap_large_map_getva(len, NBPDR, spa & PDRMASK,
9484 		    &vmem_res);
9485 	if (error != 0)
9486 		error = pmap_large_map_getva(len, PAGE_SIZE, 0, &vmem_res);
9487 	if (error != 0)
9488 		return (error);
9489 
9490 	/*
9491 	 * Fill pagetable.  PG_M is not pre-set, we scan modified bits
9492 	 * in the pagetable to minimize flushing.  No need to
9493 	 * invalidate TLB, since we only update invalid entries.
9494 	 */
9495 	PMAP_LOCK(kernel_pmap);
9496 	for (pa = spa, va = vmem_res; len > 0; pa += inc, va += inc,
9497 	    len -= inc) {
9498 		if ((amd_feature & AMDID_PAGE1GB) != 0 && len >= NBPDP &&
9499 		    (pa & PDPMASK) == 0 && (va & PDPMASK) == 0) {
9500 			pdpe = pmap_large_map_pdpe(va);
9501 			MPASS(*pdpe == 0);
9502 			*pdpe = pa | pg_g | X86_PG_PS | X86_PG_RW |
9503 			    X86_PG_V | X86_PG_A | pg_nx |
9504 			    pmap_cache_bits(kernel_pmap, mattr, TRUE);
9505 			inc = NBPDP;
9506 		} else if (len >= NBPDR && (pa & PDRMASK) == 0 &&
9507 		    (va & PDRMASK) == 0) {
9508 			pde = pmap_large_map_pde(va);
9509 			MPASS(*pde == 0);
9510 			*pde = pa | pg_g | X86_PG_PS | X86_PG_RW |
9511 			    X86_PG_V | X86_PG_A | pg_nx |
9512 			    pmap_cache_bits(kernel_pmap, mattr, TRUE);
9513 			PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->
9514 			    ref_count++;
9515 			inc = NBPDR;
9516 		} else {
9517 			pte = pmap_large_map_pte(va);
9518 			MPASS(*pte == 0);
9519 			*pte = pa | pg_g | X86_PG_RW | X86_PG_V |
9520 			    X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap,
9521 			    mattr, FALSE);
9522 			PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))->
9523 			    ref_count++;
9524 			inc = PAGE_SIZE;
9525 		}
9526 	}
9527 	PMAP_UNLOCK(kernel_pmap);
9528 	MPASS(len == 0);
9529 
9530 	*addr = (void *)vmem_res;
9531 	return (0);
9532 }
9533 
9534 void
9535 pmap_large_unmap(void *svaa, vm_size_t len)
9536 {
9537 	vm_offset_t sva, va;
9538 	vm_size_t inc;
9539 	pdp_entry_t *pdpe, pdp;
9540 	pd_entry_t *pde, pd;
9541 	pt_entry_t *pte;
9542 	vm_page_t m;
9543 	struct spglist spgf;
9544 
9545 	sva = (vm_offset_t)svaa;
9546 	if (len == 0 || sva + len < sva || (sva >= DMAP_MIN_ADDRESS &&
9547 	    sva + len <= DMAP_MIN_ADDRESS + dmaplimit))
9548 		return;
9549 
9550 	SLIST_INIT(&spgf);
9551 	KASSERT(PMAP_ADDRESS_IN_LARGEMAP(sva) &&
9552 	    PMAP_ADDRESS_IN_LARGEMAP(sva + len - 1),
9553 	    ("not largemap range %#lx %#lx", (u_long)svaa, (u_long)svaa + len));
9554 	PMAP_LOCK(kernel_pmap);
9555 	for (va = sva; va < sva + len; va += inc) {
9556 		pdpe = pmap_large_map_pdpe(va);
9557 		pdp = *pdpe;
9558 		KASSERT((pdp & X86_PG_V) != 0,
9559 		    ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
9560 		    (u_long)pdpe, pdp));
9561 		if ((pdp & X86_PG_PS) != 0) {
9562 			KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
9563 			    ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
9564 			    (u_long)pdpe, pdp));
9565 			KASSERT((va & PDPMASK) == 0,
9566 			    ("PDPMASK bit set, va %#lx pdpe %#lx pdp %#lx", va,
9567 			    (u_long)pdpe, pdp));
9568 			KASSERT(va + NBPDP <= sva + len,
9569 			    ("unmap covers partial 1GB page, sva %#lx va %#lx "
9570 			    "pdpe %#lx pdp %#lx len %#lx", sva, va,
9571 			    (u_long)pdpe, pdp, len));
9572 			*pdpe = 0;
9573 			inc = NBPDP;
9574 			continue;
9575 		}
9576 		pde = pmap_pdpe_to_pde(pdpe, va);
9577 		pd = *pde;
9578 		KASSERT((pd & X86_PG_V) != 0,
9579 		    ("invalid pd va %#lx pde %#lx pd %#lx", va,
9580 		    (u_long)pde, pd));
9581 		if ((pd & X86_PG_PS) != 0) {
9582 			KASSERT((va & PDRMASK) == 0,
9583 			    ("PDRMASK bit set, va %#lx pde %#lx pd %#lx", va,
9584 			    (u_long)pde, pd));
9585 			KASSERT(va + NBPDR <= sva + len,
9586 			    ("unmap covers partial 2MB page, sva %#lx va %#lx "
9587 			    "pde %#lx pd %#lx len %#lx", sva, va, (u_long)pde,
9588 			    pd, len));
9589 			pde_store(pde, 0);
9590 			inc = NBPDR;
9591 			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
9592 			m->ref_count--;
9593 			if (m->ref_count == 0) {
9594 				*pdpe = 0;
9595 				SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
9596 			}
9597 			continue;
9598 		}
9599 		pte = pmap_pde_to_pte(pde, va);
9600 		KASSERT((*pte & X86_PG_V) != 0,
9601 		    ("invalid pte va %#lx pte %#lx pt %#lx", va,
9602 		    (u_long)pte, *pte));
9603 		pte_clear(pte);
9604 		inc = PAGE_SIZE;
9605 		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pte));
9606 		m->ref_count--;
9607 		if (m->ref_count == 0) {
9608 			*pde = 0;
9609 			SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
9610 			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
9611 			m->ref_count--;
9612 			if (m->ref_count == 0) {
9613 				*pdpe = 0;
9614 				SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
9615 			}
9616 		}
9617 	}
9618 	pmap_invalidate_range(kernel_pmap, sva, sva + len);
9619 	PMAP_UNLOCK(kernel_pmap);
9620 	vm_page_free_pages_toq(&spgf, false);
9621 	vmem_free(large_vmem, sva, len);
9622 }
9623 
9624 static void
9625 pmap_large_map_wb_fence_mfence(void)
9626 {
9627 
9628 	mfence();
9629 }
9630 
9631 static void
9632 pmap_large_map_wb_fence_atomic(void)
9633 {
9634 
9635 	atomic_thread_fence_seq_cst();
9636 }
9637 
9638 static void
9639 pmap_large_map_wb_fence_nop(void)
9640 {
9641 }
9642 
9643 DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (void))
9644 {
9645 
9646 	if (cpu_vendor_id != CPU_VENDOR_INTEL)
9647 		return (pmap_large_map_wb_fence_mfence);
9648 	else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB |
9649 	    CPUID_STDEXT_CLFLUSHOPT)) == 0)
9650 		return (pmap_large_map_wb_fence_atomic);
9651 	else
9652 		/* clflush is strongly enough ordered */
9653 		return (pmap_large_map_wb_fence_nop);
9654 }
9655 
9656 static void
9657 pmap_large_map_flush_range_clwb(vm_offset_t va, vm_size_t len)
9658 {
9659 
9660 	for (; len > 0; len -= cpu_clflush_line_size,
9661 	    va += cpu_clflush_line_size)
9662 		clwb(va);
9663 }
9664 
9665 static void
9666 pmap_large_map_flush_range_clflushopt(vm_offset_t va, vm_size_t len)
9667 {
9668 
9669 	for (; len > 0; len -= cpu_clflush_line_size,
9670 	    va += cpu_clflush_line_size)
9671 		clflushopt(va);
9672 }
9673 
9674 static void
9675 pmap_large_map_flush_range_clflush(vm_offset_t va, vm_size_t len)
9676 {
9677 
9678 	for (; len > 0; len -= cpu_clflush_line_size,
9679 	    va += cpu_clflush_line_size)
9680 		clflush(va);
9681 }
9682 
9683 static void
9684 pmap_large_map_flush_range_nop(vm_offset_t sva __unused, vm_size_t len __unused)
9685 {
9686 }
9687 
9688 DEFINE_IFUNC(static, void, pmap_large_map_flush_range, (vm_offset_t, vm_size_t))
9689 {
9690 
9691 	if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) != 0)
9692 		return (pmap_large_map_flush_range_clwb);
9693 	else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0)
9694 		return (pmap_large_map_flush_range_clflushopt);
9695 	else if ((cpu_feature & CPUID_CLFSH) != 0)
9696 		return (pmap_large_map_flush_range_clflush);
9697 	else
9698 		return (pmap_large_map_flush_range_nop);
9699 }
9700 
9701 static void
9702 pmap_large_map_wb_large(vm_offset_t sva, vm_offset_t eva)
9703 {
9704 	volatile u_long *pe;
9705 	u_long p;
9706 	vm_offset_t va;
9707 	vm_size_t inc;
9708 	bool seen_other;
9709 
9710 	for (va = sva; va < eva; va += inc) {
9711 		inc = 0;
9712 		if ((amd_feature & AMDID_PAGE1GB) != 0) {
9713 			pe = (volatile u_long *)pmap_large_map_pdpe(va);
9714 			p = *pe;
9715 			if ((p & X86_PG_PS) != 0)
9716 				inc = NBPDP;
9717 		}
9718 		if (inc == 0) {
9719 			pe = (volatile u_long *)pmap_large_map_pde(va);
9720 			p = *pe;
9721 			if ((p & X86_PG_PS) != 0)
9722 				inc = NBPDR;
9723 		}
9724 		if (inc == 0) {
9725 			pe = (volatile u_long *)pmap_large_map_pte(va);
9726 			p = *pe;
9727 			inc = PAGE_SIZE;
9728 		}
9729 		seen_other = false;
9730 		for (;;) {
9731 			if ((p & X86_PG_AVAIL1) != 0) {
9732 				/*
9733 				 * Spin-wait for the end of a parallel
9734 				 * write-back.
9735 				 */
9736 				cpu_spinwait();
9737 				p = *pe;
9738 
9739 				/*
9740 				 * If we saw other write-back
9741 				 * occuring, we cannot rely on PG_M to
9742 				 * indicate state of the cache.  The
9743 				 * PG_M bit is cleared before the
9744 				 * flush to avoid ignoring new writes,
9745 				 * and writes which are relevant for
9746 				 * us might happen after.
9747 				 */
9748 				seen_other = true;
9749 				continue;
9750 			}
9751 
9752 			if ((p & X86_PG_M) != 0 || seen_other) {
9753 				if (!atomic_fcmpset_long(pe, &p,
9754 				    (p & ~X86_PG_M) | X86_PG_AVAIL1))
9755 					/*
9756 					 * If we saw PG_M without
9757 					 * PG_AVAIL1, and then on the
9758 					 * next attempt we do not
9759 					 * observe either PG_M or
9760 					 * PG_AVAIL1, the other
9761 					 * write-back started after us
9762 					 * and finished before us.  We
9763 					 * can rely on it doing our
9764 					 * work.
9765 					 */
9766 					continue;
9767 				pmap_large_map_flush_range(va, inc);
9768 				atomic_clear_long(pe, X86_PG_AVAIL1);
9769 			}
9770 			break;
9771 		}
9772 		maybe_yield();
9773 	}
9774 }
9775 
9776 /*
9777  * Write-back cache lines for the given address range.
9778  *
9779  * Must be called only on the range or sub-range returned from
9780  * pmap_large_map().  Must not be called on the coalesced ranges.
9781  *
9782  * Does nothing on CPUs without CLWB, CLFLUSHOPT, or CLFLUSH
9783  * instructions support.
9784  */
9785 void
9786 pmap_large_map_wb(void *svap, vm_size_t len)
9787 {
9788 	vm_offset_t eva, sva;
9789 
9790 	sva = (vm_offset_t)svap;
9791 	eva = sva + len;
9792 	pmap_large_map_wb_fence();
9793 	if (sva >= DMAP_MIN_ADDRESS && eva <= DMAP_MIN_ADDRESS + dmaplimit) {
9794 		pmap_large_map_flush_range(sva, len);
9795 	} else {
9796 		KASSERT(sva >= LARGEMAP_MIN_ADDRESS &&
9797 		    eva <= LARGEMAP_MIN_ADDRESS + lm_ents * NBPML4,
9798 		    ("pmap_large_map_wb: not largemap %#lx %#lx", sva, len));
9799 		pmap_large_map_wb_large(sva, eva);
9800 	}
9801 	pmap_large_map_wb_fence();
9802 }
9803 
9804 static vm_page_t
9805 pmap_pti_alloc_page(void)
9806 {
9807 	vm_page_t m;
9808 
9809 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
9810 	m = vm_page_grab(pti_obj, pti_pg_idx++, VM_ALLOC_NOBUSY |
9811 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
9812 	return (m);
9813 }
9814 
9815 static bool
9816 pmap_pti_free_page(vm_page_t m)
9817 {
9818 
9819 	KASSERT(m->ref_count > 0, ("page %p not referenced", m));
9820 	if (!vm_page_unwire_noq(m))
9821 		return (false);
9822 	vm_page_free_zero(m);
9823 	return (true);
9824 }
9825 
9826 static void
9827 pmap_pti_init(void)
9828 {
9829 	vm_page_t pml4_pg;
9830 	pdp_entry_t *pdpe;
9831 	vm_offset_t va;
9832 	int i;
9833 
9834 	if (!pti)
9835 		return;
9836 	pti_obj = vm_pager_allocate(OBJT_PHYS, NULL, 0, VM_PROT_ALL, 0, NULL);
9837 	VM_OBJECT_WLOCK(pti_obj);
9838 	pml4_pg = pmap_pti_alloc_page();
9839 	pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg));
9840 	for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS &&
9841 	    va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) {
9842 		pdpe = pmap_pti_pdpe(va);
9843 		pmap_pti_wire_pte(pdpe);
9844 	}
9845 	pmap_pti_add_kva_locked((vm_offset_t)&__pcpu[0],
9846 	    (vm_offset_t)&__pcpu[0] + sizeof(__pcpu[0]) * MAXCPU, false);
9847 	pmap_pti_add_kva_locked((vm_offset_t)idt, (vm_offset_t)idt +
9848 	    sizeof(struct gate_descriptor) * NIDT, false);
9849 	CPU_FOREACH(i) {
9850 		/* Doublefault stack IST 1 */
9851 		va = __pcpu[i].pc_common_tss.tss_ist1;
9852 		pmap_pti_add_kva_locked(va - PAGE_SIZE, va, false);
9853 		/* NMI stack IST 2 */
9854 		va = __pcpu[i].pc_common_tss.tss_ist2 + sizeof(struct nmi_pcpu);
9855 		pmap_pti_add_kva_locked(va - PAGE_SIZE, va, false);
9856 		/* MC# stack IST 3 */
9857 		va = __pcpu[i].pc_common_tss.tss_ist3 +
9858 		    sizeof(struct nmi_pcpu);
9859 		pmap_pti_add_kva_locked(va - PAGE_SIZE, va, false);
9860 		/* DB# stack IST 4 */
9861 		va = __pcpu[i].pc_common_tss.tss_ist4 + sizeof(struct nmi_pcpu);
9862 		pmap_pti_add_kva_locked(va - PAGE_SIZE, va, false);
9863 	}
9864 	pmap_pti_add_kva_locked((vm_offset_t)kernphys + KERNBASE,
9865 	    (vm_offset_t)etext, true);
9866 	pti_finalized = true;
9867 	VM_OBJECT_WUNLOCK(pti_obj);
9868 }
9869 SYSINIT(pmap_pti, SI_SUB_CPU + 1, SI_ORDER_ANY, pmap_pti_init, NULL);
9870 
9871 static pdp_entry_t *
9872 pmap_pti_pdpe(vm_offset_t va)
9873 {
9874 	pml4_entry_t *pml4e;
9875 	pdp_entry_t *pdpe;
9876 	vm_page_t m;
9877 	vm_pindex_t pml4_idx;
9878 	vm_paddr_t mphys;
9879 
9880 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
9881 
9882 	pml4_idx = pmap_pml4e_index(va);
9883 	pml4e = &pti_pml4[pml4_idx];
9884 	m = NULL;
9885 	if (*pml4e == 0) {
9886 		if (pti_finalized)
9887 			panic("pml4 alloc after finalization\n");
9888 		m = pmap_pti_alloc_page();
9889 		if (*pml4e != 0) {
9890 			pmap_pti_free_page(m);
9891 			mphys = *pml4e & ~PAGE_MASK;
9892 		} else {
9893 			mphys = VM_PAGE_TO_PHYS(m);
9894 			*pml4e = mphys | X86_PG_RW | X86_PG_V;
9895 		}
9896 	} else {
9897 		mphys = *pml4e & ~PAGE_MASK;
9898 	}
9899 	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va);
9900 	return (pdpe);
9901 }
9902 
9903 static void
9904 pmap_pti_wire_pte(void *pte)
9905 {
9906 	vm_page_t m;
9907 
9908 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
9909 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
9910 	m->ref_count++;
9911 }
9912 
9913 static void
9914 pmap_pti_unwire_pde(void *pde, bool only_ref)
9915 {
9916 	vm_page_t m;
9917 
9918 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
9919 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde));
9920 	MPASS(m->ref_count > 0);
9921 	MPASS(only_ref || m->ref_count > 1);
9922 	pmap_pti_free_page(m);
9923 }
9924 
9925 static void
9926 pmap_pti_unwire_pte(void *pte, vm_offset_t va)
9927 {
9928 	vm_page_t m;
9929 	pd_entry_t *pde;
9930 
9931 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
9932 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
9933 	MPASS(m->ref_count > 0);
9934 	if (pmap_pti_free_page(m)) {
9935 		pde = pmap_pti_pde(va);
9936 		MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V);
9937 		*pde = 0;
9938 		pmap_pti_unwire_pde(pde, false);
9939 	}
9940 }
9941 
9942 static pd_entry_t *
9943 pmap_pti_pde(vm_offset_t va)
9944 {
9945 	pdp_entry_t *pdpe;
9946 	pd_entry_t *pde;
9947 	vm_page_t m;
9948 	vm_pindex_t pd_idx;
9949 	vm_paddr_t mphys;
9950 
9951 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
9952 
9953 	pdpe = pmap_pti_pdpe(va);
9954 	if (*pdpe == 0) {
9955 		m = pmap_pti_alloc_page();
9956 		if (*pdpe != 0) {
9957 			pmap_pti_free_page(m);
9958 			MPASS((*pdpe & X86_PG_PS) == 0);
9959 			mphys = *pdpe & ~PAGE_MASK;
9960 		} else {
9961 			mphys =  VM_PAGE_TO_PHYS(m);
9962 			*pdpe = mphys | X86_PG_RW | X86_PG_V;
9963 		}
9964 	} else {
9965 		MPASS((*pdpe & X86_PG_PS) == 0);
9966 		mphys = *pdpe & ~PAGE_MASK;
9967 	}
9968 
9969 	pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
9970 	pd_idx = pmap_pde_index(va);
9971 	pde += pd_idx;
9972 	return (pde);
9973 }
9974 
9975 static pt_entry_t *
9976 pmap_pti_pte(vm_offset_t va, bool *unwire_pde)
9977 {
9978 	pd_entry_t *pde;
9979 	pt_entry_t *pte;
9980 	vm_page_t m;
9981 	vm_paddr_t mphys;
9982 
9983 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
9984 
9985 	pde = pmap_pti_pde(va);
9986 	if (unwire_pde != NULL) {
9987 		*unwire_pde = true;
9988 		pmap_pti_wire_pte(pde);
9989 	}
9990 	if (*pde == 0) {
9991 		m = pmap_pti_alloc_page();
9992 		if (*pde != 0) {
9993 			pmap_pti_free_page(m);
9994 			MPASS((*pde & X86_PG_PS) == 0);
9995 			mphys = *pde & ~(PAGE_MASK | pg_nx);
9996 		} else {
9997 			mphys = VM_PAGE_TO_PHYS(m);
9998 			*pde = mphys | X86_PG_RW | X86_PG_V;
9999 			if (unwire_pde != NULL)
10000 				*unwire_pde = false;
10001 		}
10002 	} else {
10003 		MPASS((*pde & X86_PG_PS) == 0);
10004 		mphys = *pde & ~(PAGE_MASK | pg_nx);
10005 	}
10006 
10007 	pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
10008 	pte += pmap_pte_index(va);
10009 
10010 	return (pte);
10011 }
10012 
10013 static void
10014 pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, bool exec)
10015 {
10016 	vm_paddr_t pa;
10017 	pd_entry_t *pde;
10018 	pt_entry_t *pte, ptev;
10019 	bool unwire_pde;
10020 
10021 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10022 
10023 	sva = trunc_page(sva);
10024 	MPASS(sva > VM_MAXUSER_ADDRESS);
10025 	eva = round_page(eva);
10026 	MPASS(sva < eva);
10027 	for (; sva < eva; sva += PAGE_SIZE) {
10028 		pte = pmap_pti_pte(sva, &unwire_pde);
10029 		pa = pmap_kextract(sva);
10030 		ptev = pa | X86_PG_RW | X86_PG_V | X86_PG_A | X86_PG_G |
10031 		    (exec ? 0 : pg_nx) | pmap_cache_bits(kernel_pmap,
10032 		    VM_MEMATTR_DEFAULT, FALSE);
10033 		if (*pte == 0) {
10034 			pte_store(pte, ptev);
10035 			pmap_pti_wire_pte(pte);
10036 		} else {
10037 			KASSERT(!pti_finalized,
10038 			    ("pti overlap after fin %#lx %#lx %#lx",
10039 			    sva, *pte, ptev));
10040 			KASSERT(*pte == ptev,
10041 			    ("pti non-identical pte after fin %#lx %#lx %#lx",
10042 			    sva, *pte, ptev));
10043 		}
10044 		if (unwire_pde) {
10045 			pde = pmap_pti_pde(sva);
10046 			pmap_pti_unwire_pde(pde, true);
10047 		}
10048 	}
10049 }
10050 
10051 void
10052 pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec)
10053 {
10054 
10055 	if (!pti)
10056 		return;
10057 	VM_OBJECT_WLOCK(pti_obj);
10058 	pmap_pti_add_kva_locked(sva, eva, exec);
10059 	VM_OBJECT_WUNLOCK(pti_obj);
10060 }
10061 
10062 void
10063 pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva)
10064 {
10065 	pt_entry_t *pte;
10066 	vm_offset_t va;
10067 
10068 	if (!pti)
10069 		return;
10070 	sva = rounddown2(sva, PAGE_SIZE);
10071 	MPASS(sva > VM_MAXUSER_ADDRESS);
10072 	eva = roundup2(eva, PAGE_SIZE);
10073 	MPASS(sva < eva);
10074 	VM_OBJECT_WLOCK(pti_obj);
10075 	for (va = sva; va < eva; va += PAGE_SIZE) {
10076 		pte = pmap_pti_pte(va, NULL);
10077 		KASSERT((*pte & X86_PG_V) != 0,
10078 		    ("invalid pte va %#lx pte %#lx pt %#lx", va,
10079 		    (u_long)pte, *pte));
10080 		pte_clear(pte);
10081 		pmap_pti_unwire_pte(pte, va);
10082 	}
10083 	pmap_invalidate_range(kernel_pmap, sva, eva);
10084 	VM_OBJECT_WUNLOCK(pti_obj);
10085 }
10086 
10087 static void *
10088 pkru_dup_range(void *ctx __unused, void *data)
10089 {
10090 	struct pmap_pkru_range *node, *new_node;
10091 
10092 	new_node = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
10093 	if (new_node == NULL)
10094 		return (NULL);
10095 	node = data;
10096 	memcpy(new_node, node, sizeof(*node));
10097 	return (new_node);
10098 }
10099 
10100 static void
10101 pkru_free_range(void *ctx __unused, void *node)
10102 {
10103 
10104 	uma_zfree(pmap_pkru_ranges_zone, node);
10105 }
10106 
10107 static int
10108 pmap_pkru_assign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
10109     int flags)
10110 {
10111 	struct pmap_pkru_range *ppr;
10112 	int error;
10113 
10114 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10115 	MPASS(pmap->pm_type == PT_X86);
10116 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
10117 	if ((flags & AMD64_PKRU_EXCL) != 0 &&
10118 	    !rangeset_check_empty(&pmap->pm_pkru, sva, eva))
10119 		return (EBUSY);
10120 	ppr = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
10121 	if (ppr == NULL)
10122 		return (ENOMEM);
10123 	ppr->pkru_keyidx = keyidx;
10124 	ppr->pkru_flags = flags & AMD64_PKRU_PERSIST;
10125 	error = rangeset_insert(&pmap->pm_pkru, sva, eva, ppr);
10126 	if (error != 0)
10127 		uma_zfree(pmap_pkru_ranges_zone, ppr);
10128 	return (error);
10129 }
10130 
10131 static int
10132 pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
10133 {
10134 
10135 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10136 	MPASS(pmap->pm_type == PT_X86);
10137 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
10138 	return (rangeset_remove(&pmap->pm_pkru, sva, eva));
10139 }
10140 
10141 static void
10142 pmap_pkru_deassign_all(pmap_t pmap)
10143 {
10144 
10145 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10146 	if (pmap->pm_type == PT_X86 &&
10147 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
10148 		rangeset_remove_all(&pmap->pm_pkru);
10149 }
10150 
10151 static bool
10152 pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
10153 {
10154 	struct pmap_pkru_range *ppr, *prev_ppr;
10155 	vm_offset_t va;
10156 
10157 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10158 	if (pmap->pm_type != PT_X86 ||
10159 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
10160 	    sva >= VM_MAXUSER_ADDRESS)
10161 		return (true);
10162 	MPASS(eva <= VM_MAXUSER_ADDRESS);
10163 	for (va = sva, prev_ppr = NULL; va < eva;) {
10164 		ppr = rangeset_lookup(&pmap->pm_pkru, va);
10165 		if ((ppr == NULL) ^ (prev_ppr == NULL))
10166 			return (false);
10167 		if (ppr == NULL) {
10168 			va += PAGE_SIZE;
10169 			continue;
10170 		}
10171 		if (prev_ppr->pkru_keyidx != ppr->pkru_keyidx)
10172 			return (false);
10173 		va = ppr->pkru_rs_el.re_end;
10174 	}
10175 	return (true);
10176 }
10177 
10178 static pt_entry_t
10179 pmap_pkru_get(pmap_t pmap, vm_offset_t va)
10180 {
10181 	struct pmap_pkru_range *ppr;
10182 
10183 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10184 	if (pmap->pm_type != PT_X86 ||
10185 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
10186 	    va >= VM_MAXUSER_ADDRESS)
10187 		return (0);
10188 	ppr = rangeset_lookup(&pmap->pm_pkru, va);
10189 	if (ppr != NULL)
10190 		return (X86_PG_PKU(ppr->pkru_keyidx));
10191 	return (0);
10192 }
10193 
10194 static bool
10195 pred_pkru_on_remove(void *ctx __unused, void *r)
10196 {
10197 	struct pmap_pkru_range *ppr;
10198 
10199 	ppr = r;
10200 	return ((ppr->pkru_flags & AMD64_PKRU_PERSIST) == 0);
10201 }
10202 
10203 static void
10204 pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
10205 {
10206 
10207 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10208 	if (pmap->pm_type == PT_X86 &&
10209 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
10210 		rangeset_remove_pred(&pmap->pm_pkru, sva, eva,
10211 		    pred_pkru_on_remove);
10212 	}
10213 }
10214 
10215 static int
10216 pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap)
10217 {
10218 
10219 	PMAP_LOCK_ASSERT(dst_pmap, MA_OWNED);
10220 	PMAP_LOCK_ASSERT(src_pmap, MA_OWNED);
10221 	MPASS(dst_pmap->pm_type == PT_X86);
10222 	MPASS(src_pmap->pm_type == PT_X86);
10223 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
10224 	if (src_pmap->pm_pkru.rs_data_ctx == NULL)
10225 		return (0);
10226 	return (rangeset_copy(&dst_pmap->pm_pkru, &src_pmap->pm_pkru));
10227 }
10228 
10229 static void
10230 pmap_pkru_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
10231     u_int keyidx)
10232 {
10233 	pml4_entry_t *pml4e;
10234 	pdp_entry_t *pdpe;
10235 	pd_entry_t newpde, ptpaddr, *pde;
10236 	pt_entry_t newpte, *ptep, pte;
10237 	vm_offset_t va, va_next;
10238 	bool changed;
10239 
10240 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10241 	MPASS(pmap->pm_type == PT_X86);
10242 	MPASS(keyidx <= PMAP_MAX_PKRU_IDX);
10243 
10244 	for (changed = false, va = sva; va < eva; va = va_next) {
10245 		pml4e = pmap_pml4e(pmap, va);
10246 		if ((*pml4e & X86_PG_V) == 0) {
10247 			va_next = (va + NBPML4) & ~PML4MASK;
10248 			if (va_next < va)
10249 				va_next = eva;
10250 			continue;
10251 		}
10252 
10253 		pdpe = pmap_pml4e_to_pdpe(pml4e, va);
10254 		if ((*pdpe & X86_PG_V) == 0) {
10255 			va_next = (va + NBPDP) & ~PDPMASK;
10256 			if (va_next < va)
10257 				va_next = eva;
10258 			continue;
10259 		}
10260 
10261 		va_next = (va + NBPDR) & ~PDRMASK;
10262 		if (va_next < va)
10263 			va_next = eva;
10264 
10265 		pde = pmap_pdpe_to_pde(pdpe, va);
10266 		ptpaddr = *pde;
10267 		if (ptpaddr == 0)
10268 			continue;
10269 
10270 		MPASS((ptpaddr & X86_PG_V) != 0);
10271 		if ((ptpaddr & PG_PS) != 0) {
10272 			if (va + NBPDR == va_next && eva >= va_next) {
10273 				newpde = (ptpaddr & ~X86_PG_PKU_MASK) |
10274 				    X86_PG_PKU(keyidx);
10275 				if (newpde != ptpaddr) {
10276 					*pde = newpde;
10277 					changed = true;
10278 				}
10279 				continue;
10280 			} else if (!pmap_demote_pde(pmap, pde, va)) {
10281 				continue;
10282 			}
10283 		}
10284 
10285 		if (va_next > eva)
10286 			va_next = eva;
10287 
10288 		for (ptep = pmap_pde_to_pte(pde, va); va != va_next;
10289 		    ptep++, va += PAGE_SIZE) {
10290 			pte = *ptep;
10291 			if ((pte & X86_PG_V) == 0)
10292 				continue;
10293 			newpte = (pte & ~X86_PG_PKU_MASK) | X86_PG_PKU(keyidx);
10294 			if (newpte != pte) {
10295 				*ptep = newpte;
10296 				changed = true;
10297 			}
10298 		}
10299 	}
10300 	if (changed)
10301 		pmap_invalidate_range(pmap, sva, eva);
10302 }
10303 
10304 static int
10305 pmap_pkru_check_uargs(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
10306     u_int keyidx, int flags)
10307 {
10308 
10309 	if (pmap->pm_type != PT_X86 || keyidx > PMAP_MAX_PKRU_IDX ||
10310 	    (flags & ~(AMD64_PKRU_PERSIST | AMD64_PKRU_EXCL)) != 0)
10311 		return (EINVAL);
10312 	if (eva <= sva || eva > VM_MAXUSER_ADDRESS)
10313 		return (EFAULT);
10314 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
10315 		return (ENOTSUP);
10316 	return (0);
10317 }
10318 
10319 int
10320 pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
10321     int flags)
10322 {
10323 	int error;
10324 
10325 	sva = trunc_page(sva);
10326 	eva = round_page(eva);
10327 	error = pmap_pkru_check_uargs(pmap, sva, eva, keyidx, flags);
10328 	if (error != 0)
10329 		return (error);
10330 	for (;;) {
10331 		PMAP_LOCK(pmap);
10332 		error = pmap_pkru_assign(pmap, sva, eva, keyidx, flags);
10333 		if (error == 0)
10334 			pmap_pkru_update_range(pmap, sva, eva, keyidx);
10335 		PMAP_UNLOCK(pmap);
10336 		if (error != ENOMEM)
10337 			break;
10338 		vm_wait(NULL);
10339 	}
10340 	return (error);
10341 }
10342 
10343 int
10344 pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
10345 {
10346 	int error;
10347 
10348 	sva = trunc_page(sva);
10349 	eva = round_page(eva);
10350 	error = pmap_pkru_check_uargs(pmap, sva, eva, 0, 0);
10351 	if (error != 0)
10352 		return (error);
10353 	for (;;) {
10354 		PMAP_LOCK(pmap);
10355 		error = pmap_pkru_deassign(pmap, sva, eva);
10356 		if (error == 0)
10357 			pmap_pkru_update_range(pmap, sva, eva, 0);
10358 		PMAP_UNLOCK(pmap);
10359 		if (error != ENOMEM)
10360 			break;
10361 		vm_wait(NULL);
10362 	}
10363 	return (error);
10364 }
10365 
10366 /*
10367  * Track a range of the kernel's virtual address space that is contiguous
10368  * in various mapping attributes.
10369  */
10370 struct pmap_kernel_map_range {
10371 	vm_offset_t sva;
10372 	pt_entry_t attrs;
10373 	int ptes;
10374 	int pdes;
10375 	int pdpes;
10376 };
10377 
10378 static void
10379 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
10380     vm_offset_t eva)
10381 {
10382 	const char *mode;
10383 	int i, pat_idx;
10384 
10385 	if (eva <= range->sva)
10386 		return;
10387 
10388 	pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true);
10389 	for (i = 0; i < PAT_INDEX_SIZE; i++)
10390 		if (pat_index[i] == pat_idx)
10391 			break;
10392 
10393 	switch (i) {
10394 	case PAT_WRITE_BACK:
10395 		mode = "WB";
10396 		break;
10397 	case PAT_WRITE_THROUGH:
10398 		mode = "WT";
10399 		break;
10400 	case PAT_UNCACHEABLE:
10401 		mode = "UC";
10402 		break;
10403 	case PAT_UNCACHED:
10404 		mode = "U-";
10405 		break;
10406 	case PAT_WRITE_PROTECTED:
10407 		mode = "WP";
10408 		break;
10409 	case PAT_WRITE_COMBINING:
10410 		mode = "WC";
10411 		break;
10412 	default:
10413 		printf("%s: unknown PAT mode %#x for range 0x%016lx-0x%016lx\n",
10414 		    __func__, pat_idx, range->sva, eva);
10415 		mode = "??";
10416 		break;
10417 	}
10418 
10419 	sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %s %d %d %d\n",
10420 	    range->sva, eva,
10421 	    (range->attrs & X86_PG_RW) != 0 ? 'w' : '-',
10422 	    (range->attrs & pg_nx) != 0 ? '-' : 'x',
10423 	    (range->attrs & X86_PG_U) != 0 ? 'u' : 's',
10424 	    (range->attrs & X86_PG_G) != 0 ? 'g' : '-',
10425 	    mode, range->pdpes, range->pdes, range->ptes);
10426 
10427 	/* Reset to sentinel value. */
10428 	range->sva = KVADDR(NPML4EPG - 1, NPDPEPG - 1, NPDEPG - 1, NPTEPG - 1);
10429 }
10430 
10431 /*
10432  * Determine whether the attributes specified by a page table entry match those
10433  * being tracked by the current range.  This is not quite as simple as a direct
10434  * flag comparison since some PAT modes have multiple representations.
10435  */
10436 static bool
10437 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
10438 {
10439 	pt_entry_t diff, mask;
10440 
10441 	mask = X86_PG_G | X86_PG_RW | X86_PG_U | X86_PG_PDE_CACHE | pg_nx;
10442 	diff = (range->attrs ^ attrs) & mask;
10443 	if (diff == 0)
10444 		return (true);
10445 	if ((diff & ~X86_PG_PDE_PAT) == 0 &&
10446 	    pmap_pat_index(kernel_pmap, range->attrs, true) ==
10447 	    pmap_pat_index(kernel_pmap, attrs, true))
10448 		return (true);
10449 	return (false);
10450 }
10451 
10452 static void
10453 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
10454     pt_entry_t attrs)
10455 {
10456 
10457 	memset(range, 0, sizeof(*range));
10458 	range->sva = va;
10459 	range->attrs = attrs;
10460 }
10461 
10462 /*
10463  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
10464  * those of the current run, dump the address range and its attributes, and
10465  * begin a new run.
10466  */
10467 static void
10468 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
10469     vm_offset_t va, pml4_entry_t pml4e, pdp_entry_t pdpe, pd_entry_t pde,
10470     pt_entry_t pte)
10471 {
10472 	pt_entry_t attrs;
10473 
10474 	attrs = pml4e & (X86_PG_RW | X86_PG_U | pg_nx);
10475 
10476 	attrs |= pdpe & pg_nx;
10477 	attrs &= pg_nx | (pdpe & (X86_PG_RW | X86_PG_U));
10478 	if ((pdpe & PG_PS) != 0) {
10479 		attrs |= pdpe & (X86_PG_G | X86_PG_PDE_CACHE);
10480 	} else if (pde != 0) {
10481 		attrs |= pde & pg_nx;
10482 		attrs &= pg_nx | (pde & (X86_PG_RW | X86_PG_U));
10483 	}
10484 	if ((pde & PG_PS) != 0) {
10485 		attrs |= pde & (X86_PG_G | X86_PG_PDE_CACHE);
10486 	} else if (pte != 0) {
10487 		attrs |= pte & pg_nx;
10488 		attrs &= pg_nx | (pte & (X86_PG_RW | X86_PG_U));
10489 		attrs |= pte & (X86_PG_G | X86_PG_PTE_CACHE);
10490 
10491 		/* Canonicalize by always using the PDE PAT bit. */
10492 		if ((attrs & X86_PG_PTE_PAT) != 0)
10493 			attrs ^= X86_PG_PDE_PAT | X86_PG_PTE_PAT;
10494 	}
10495 
10496 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
10497 		sysctl_kmaps_dump(sb, range, va);
10498 		sysctl_kmaps_reinit(range, va, attrs);
10499 	}
10500 }
10501 
10502 static int
10503 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
10504 {
10505 	struct pmap_kernel_map_range range;
10506 	struct sbuf sbuf, *sb;
10507 	pml4_entry_t pml4e;
10508 	pdp_entry_t *pdp, pdpe;
10509 	pd_entry_t *pd, pde;
10510 	pt_entry_t *pt, pte;
10511 	vm_offset_t sva;
10512 	vm_paddr_t pa;
10513 	int error, i, j, k, l;
10514 
10515 	error = sysctl_wire_old_buffer(req, 0);
10516 	if (error != 0)
10517 		return (error);
10518 	sb = &sbuf;
10519 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
10520 
10521 	/* Sentinel value. */
10522 	range.sva = KVADDR(NPML4EPG - 1, NPDPEPG - 1, NPDEPG - 1, NPTEPG - 1);
10523 
10524 	/*
10525 	 * Iterate over the kernel page tables without holding the kernel pmap
10526 	 * lock.  Outside of the large map, kernel page table pages are never
10527 	 * freed, so at worst we will observe inconsistencies in the output.
10528 	 * Within the large map, ensure that PDP and PD page addresses are
10529 	 * valid before descending.
10530 	 */
10531 	for (sva = 0, i = pmap_pml4e_index(sva); i < NPML4EPG; i++) {
10532 		switch (i) {
10533 		case PML4PML4I:
10534 			sbuf_printf(sb, "\nRecursive map:\n");
10535 			break;
10536 		case DMPML4I:
10537 			sbuf_printf(sb, "\nDirect map:\n");
10538 			break;
10539 		case KPML4BASE:
10540 			sbuf_printf(sb, "\nKernel map:\n");
10541 			break;
10542 		case LMSPML4I:
10543 			sbuf_printf(sb, "\nLarge map:\n");
10544 			break;
10545 		}
10546 
10547 		/* Convert to canonical form. */
10548 		if (sva == 1ul << 47)
10549 			sva |= -1ul << 48;
10550 
10551 restart:
10552 		pml4e = kernel_pmap->pm_pml4[i];
10553 		if ((pml4e & X86_PG_V) == 0) {
10554 			sva = rounddown2(sva, NBPML4);
10555 			sysctl_kmaps_dump(sb, &range, sva);
10556 			sva += NBPML4;
10557 			continue;
10558 		}
10559 		pa = pml4e & PG_FRAME;
10560 		pdp = (pdp_entry_t *)PHYS_TO_DMAP(pa);
10561 
10562 		for (j = pmap_pdpe_index(sva); j < NPDPEPG; j++) {
10563 			pdpe = pdp[j];
10564 			if ((pdpe & X86_PG_V) == 0) {
10565 				sva = rounddown2(sva, NBPDP);
10566 				sysctl_kmaps_dump(sb, &range, sva);
10567 				sva += NBPDP;
10568 				continue;
10569 			}
10570 			pa = pdpe & PG_FRAME;
10571 			if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
10572 			    vm_phys_paddr_to_vm_page(pa) == NULL)
10573 				goto restart;
10574 			if ((pdpe & PG_PS) != 0) {
10575 				sva = rounddown2(sva, NBPDP);
10576 				sysctl_kmaps_check(sb, &range, sva, pml4e, pdpe,
10577 				    0, 0);
10578 				range.pdpes++;
10579 				sva += NBPDP;
10580 				continue;
10581 			}
10582 			pd = (pd_entry_t *)PHYS_TO_DMAP(pa);
10583 
10584 			for (k = pmap_pde_index(sva); k < NPDEPG; k++) {
10585 				pde = pd[k];
10586 				if ((pde & X86_PG_V) == 0) {
10587 					sva = rounddown2(sva, NBPDR);
10588 					sysctl_kmaps_dump(sb, &range, sva);
10589 					sva += NBPDR;
10590 					continue;
10591 				}
10592 				pa = pde & PG_FRAME;
10593 				if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
10594 				    vm_phys_paddr_to_vm_page(pa) == NULL)
10595 					goto restart;
10596 				if ((pde & PG_PS) != 0) {
10597 					sva = rounddown2(sva, NBPDR);
10598 					sysctl_kmaps_check(sb, &range, sva,
10599 					    pml4e, pdpe, pde, 0);
10600 					range.pdes++;
10601 					sva += NBPDR;
10602 					continue;
10603 				}
10604 				pt = (pt_entry_t *)PHYS_TO_DMAP(pa);
10605 
10606 				for (l = pmap_pte_index(sva); l < NPTEPG; l++,
10607 				    sva += PAGE_SIZE) {
10608 					pte = pt[l];
10609 					if ((pte & X86_PG_V) == 0) {
10610 						sysctl_kmaps_dump(sb, &range,
10611 						    sva);
10612 						continue;
10613 					}
10614 					sysctl_kmaps_check(sb, &range, sva,
10615 					    pml4e, pdpe, pde, pte);
10616 					range.ptes++;
10617 				}
10618 			}
10619 		}
10620 	}
10621 
10622 	error = sbuf_finish(sb);
10623 	sbuf_delete(sb);
10624 	return (error);
10625 }
10626 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
10627     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
10628     NULL, 0, sysctl_kmaps, "A",
10629     "Dump kernel address layout");
10630 
10631 #ifdef DDB
10632 DB_SHOW_COMMAND(pte, pmap_print_pte)
10633 {
10634 	pmap_t pmap;
10635 	pml4_entry_t *pml4;
10636 	pdp_entry_t *pdp;
10637 	pd_entry_t *pde;
10638 	pt_entry_t *pte, PG_V;
10639 	vm_offset_t va;
10640 
10641 	if (!have_addr) {
10642 		db_printf("show pte addr\n");
10643 		return;
10644 	}
10645 	va = (vm_offset_t)addr;
10646 
10647 	if (kdb_thread != NULL)
10648 		pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
10649 	else
10650 		pmap = PCPU_GET(curpmap);
10651 
10652 	PG_V = pmap_valid_bit(pmap);
10653 	pml4 = pmap_pml4e(pmap, va);
10654 	db_printf("VA 0x%016lx pml4e 0x%016lx", va, *pml4);
10655 	if ((*pml4 & PG_V) == 0) {
10656 		db_printf("\n");
10657 		return;
10658 	}
10659 	pdp = pmap_pml4e_to_pdpe(pml4, va);
10660 	db_printf(" pdpe 0x%016lx", *pdp);
10661 	if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
10662 		db_printf("\n");
10663 		return;
10664 	}
10665 	pde = pmap_pdpe_to_pde(pdp, va);
10666 	db_printf(" pde 0x%016lx", *pde);
10667 	if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
10668 		db_printf("\n");
10669 		return;
10670 	}
10671 	pte = pmap_pde_to_pte(pde, va);
10672 	db_printf(" pte 0x%016lx\n", *pte);
10673 }
10674 
10675 DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
10676 {
10677 	vm_paddr_t a;
10678 
10679 	if (have_addr) {
10680 		a = (vm_paddr_t)addr;
10681 		db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
10682 	} else {
10683 		db_printf("show phys2dmap addr\n");
10684 	}
10685 }
10686 #endif
10687